content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# pylint: disable=missing-docstring,import-error,unused-import,assignment-from-no-return
# pylint: disable=invalid-name, too-few-public-methods, useless-object-inheritance
from __future__ import print_function
from UNINFERABLE import uninferable_func
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
my_single_dispatch = singledispatch
fake_singledispatch_decorator = FakeSingleDispatch()
@singledispatch
@func.register(str)
@func.register(float)
@func.register(int)
@my_single_dispatch
@func2.register(int)
@singledispatch
@with_extra_arg.register(str)
@fake_singledispatch_decorator
@fake_singledispatch_decorator.register(str)
@fake_singledispatch_decorator.register(str)
| [
2,
279,
2645,
600,
25,
15560,
28,
45688,
12,
15390,
8841,
11,
11748,
12,
18224,
11,
403,
1484,
12,
11748,
11,
562,
16747,
12,
6738,
12,
3919,
12,
7783,
198,
2,
279,
2645,
600,
25,
15560,
28,
259,
12102,
12,
3672,
11,
1165,
12,
3... | 2.890152 | 264 |
print('='*30)
print('{:^30}'.format('BANCO ANDERSON'))
print('='*30)
saque=int(input('Qual valor você quer sacar?R$'))
total=saque
céd=100
totalcéd=0
cont=0
while True:
if total>=céd:
total-=céd
totalcéd+=1
else:
if totalcéd>0:
print(f'Total de {totalcéd} cédulas de R${céd}')
if céd==100:
céd=50
elif céd==50:
céd=20
elif céd ==20:
céd=10
totalcéd=0
if total ==0:
break
print('='*50)
print('TESTANDO.....Muito Obrigado por utilizar o BANCO ANDERSON')
| [
4798,
10786,
11639,
9,
1270,
8,
198,
4798,
10786,
90,
25,
61,
1270,
92,
4458,
18982,
10786,
33,
1565,
8220,
5357,
29086,
6,
4008,
198,
4798,
10786,
11639,
9,
1270,
8,
198,
11400,
4188,
28,
600,
7,
15414,
10786,
46181,
1188,
273,
127... | 1.721408 | 341 |
# encoding: utf-8
##################################################
# This script shows an example of a header, library and code section.
##################################################
#
##################################################
# Author: Diego Pajarito
# Copyright: Copyright 2020, IAAC
# Credits: [Institute for Advanced Architecture of Catalonia - IAAC, Advanced Architecture group]
# License: Apache License Version 2.0
# Version: 1.0.0
# Maintainer: Diego Pajarito
# Email: diego.pajarito@iaac.net
# Status: development
##################################################
# End of header section
import sys
# depending on the complexity of your script you will have a longer list of libraries
sys.stdout.write("This is an script with three sections \n\n")
sys.stdout.write("Header section using '#' characters\n")
sys.stdout.write("library section using import/from ... import commands\n")
sys.stdout.write("Code section calling the 'sys' library to show you this text\n")
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
29113,
14468,
2235,
198,
2,
770,
4226,
2523,
281,
1672,
286,
257,
13639,
11,
5888,
290,
2438,
2665,
13,
198,
29113,
14468,
2235,
198,
2,
198,
29113,
14468,
2235,
198,
2,
6434,
25,
9500,
35... | 4.081967 | 244 |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from hashlib import md5
from time import sleep
from multiprocessing import (cpu_count,
current_process)
from luxon import g
from luxon import register
from luxon import GetLogger
from luxon import db, dbw
from luxon import MBClient
from luxon.utils.timezone import utc
from luxon.utils.multiproc import ProcessManager
from luxon.utils.multithread import ThreadManager
from luxon.utils.encoding import if_unicode_to_bytes
from calabiyau.core.helpers.radius import (get_user,
get_attributes,
has_session,
get_ip,
update_ip,
get_pool_name,
encode_packet)
from calabiyau.core.handlers.radius.server import Server
from calabiyau.constants import RAD_ACCESSACCEPT
from calabiyau.core.utils.radius import (validate_chap_password,
duplicate)
from calabiyau.lib.ctx import ctx as ctx_values
log = GetLogger(__name__)
clients_hash = b''
@register.resource('service', 'radius')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
357,
66,
8,
2864,
12,
42334,
1951,
544,
272,
1305,
504,
5325,
8463,
1279,
354,
2442,
31,
44482,
14246,
13,
1073,
13,
4496,
28401,
198,
2,
1439,
2489,
10395,
... | 2.650794 | 1,071 |
from dataclasses import dataclass
from datetime import timedelta
from hashlib import blake2b
from numbers import Number
from typing import Optional
@dataclass
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
12234,
8019,
1330,
698,
539,
17,
65,
198,
6738,
3146,
1330,
7913,
198,
6738,
19720,
1330,
32233,
628,
198,
198,
31,
19608,
330,
31172,... | 3.880952 | 42 |
import sys,os
from tests.Base_test import BaseTest
import json
| [
11748,
25064,
11,
418,
198,
6738,
5254,
13,
14881,
62,
9288,
1330,
7308,
14402,
198,
11748,
33918,
628
] | 3.555556 | 18 |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.kvstore.kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair, RETRY_BACKOFF
from common.utils.asleep import asleep
from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
from consul import ConsulException
from consul.twisted import Consul
from structlog import get_logger
from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
log = get_logger()
| [
2,
15069,
2177,
12,
25579,
4946,
7311,
278,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
1... | 3.75188 | 266 |
#!/usr/bin/env python
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2 License
# The full license information can be found in LICENSE.txt
# in the root directory of this project.
'''
Unit test for Iperf app.
'''
import logging
import time
import unittest
import warnings
from axon.apps.iperf import Iperf
log = logging.getLogger(__name__)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
12131,
37754,
11,
3457,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
347,
10305,
12,
17,
13789,
198,
2,
383,
1336,
5964,
1321... | 3.191667 | 120 |
"""
1. duplicated values
2. irrelevant data
3. structural error ('female', 'fem')
4. missing values
5. outliers
"""
| [
37811,
198,
197,
16,
13,
14184,
3474,
3815,
628,
197,
17,
13,
18046,
1366,
628,
197,
18,
13,
13204,
4049,
19203,
24724,
3256,
705,
69,
368,
11537,
628,
197,
19,
13,
4814,
3815,
628,
197,
20,
13,
41528,
3183,
198,
198,
37811,
198
] | 2.930233 | 43 |
# coding: utf-8
# # Loading Graphs in NetworkX
# In[1]:
import networkx as nx
import numpy as np
import pandas as pd
get_ipython().magic('matplotlib notebook')
# Instantiate the graph
G1 = nx.Graph()
# add node/edge pairs
G1.add_edges_from([(0, 1),
(0, 2),
(0, 3),
(0, 5),
(1, 3),
(1, 6),
(3, 4),
(4, 5),
(4, 7),
(5, 8),
(8, 9)])
# draw the network G1
nx.draw_networkx(G1)
# ### Adjacency List
# `G_adjlist.txt` is the adjacency list representation of G1.
#
# It can be read as follows:
# * `0 1 2 3 5` $\rightarrow$ node `0` is adjacent to nodes `1, 2, 3, 5`
# * `1 3 6` $\rightarrow$ node `1` is (also) adjacent to nodes `3, 6`
# * `2` $\rightarrow$ node `2` is (also) adjacent to no new nodes
# * `3 4` $\rightarrow$ node `3` is (also) adjacent to node `4`
#
# and so on. Note that adjacencies are only accounted for once (e.g. node `2` is adjacent to node `0`, but node `0` is not listed in node `2`'s row, because that edge has already been accounted for in node `0`'s row).
# In[2]:
get_ipython().system('cat G_adjlist.txt')
# If we read in the adjacency list using `nx.read_adjlist`, we can see that it matches `G1`.
# In[3]:
G2 = nx.read_adjlist('G_adjlist.txt', nodetype=int)
G2.edges()
# ### Adjacency Matrix
#
# The elements in an adjacency matrix indicate whether pairs of vertices are adjacent or not in the graph. Each node has a corresponding row and column. For example, row `0`, column `1` corresponds to the edge between node `0` and node `1`.
#
# Reading across row `0`, there is a '`1`' in columns `1`, `2`, `3`, and `5`, which indicates that node `0` is adjacent to nodes 1, 2, 3, and 5
# In[4]:
G_mat = np.array([[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0]])
G_mat
# If we convert the adjacency matrix to a networkx graph using `nx.Graph`, we can see that it matches G1.
# In[5]:
G3 = nx.Graph(G_mat)
G3.edges()
# ### Edgelist
# The edge list format represents edge pairings in the first two columns. Additional edge attributes can be added in subsequent columns. Looking at `G_edgelist.txt` this is the same as the original graph `G1`, but now each edge has a weight.
#
# For example, from the first row, we can see the edge between nodes `0` and `1`, has a weight of `4`.
# In[6]:
get_ipython().system('cat G_edgelist.txt')
# Using `read_edgelist` and passing in a list of tuples with the name and type of each edge attribute will create a graph with our desired edge attributes.
# In[7]:
G4 = nx.read_edgelist('G_edgelist.txt', data=[('Weight', int)])
G4.edges(data=True)
# ### Pandas DataFrame
# Graphs can also be created from pandas dataframes if they are in edge list format.
# In[8]:
G_df = pd.read_csv('G_edgelist.txt', delim_whitespace=True,
header=None, names=['n1', 'n2', 'weight'])
G_df
# In[9]:
G5 = nx.from_pandas_dataframe(G_df, 'n1', 'n2', edge_attr='weight')
G5.edges(data=True)
# ### Chess Example
# Now let's load in a more complex graph and perform some basic analysis on it.
#
# We will be looking at chess_graph.txt, which is a directed graph of chess games in edge list format.
# In[10]:
get_ipython().system('head -5 chess_graph.txt')
# Each node is a chess player, and each edge represents a game. The first column with an outgoing edge corresponds to the white player, the second column with an incoming edge corresponds to the black player.
#
# The third column, the weight of the edge, corresponds to the outcome of the game. A weight of 1 indicates white won, a 0 indicates a draw, and a -1 indicates black won.
#
# The fourth column corresponds to approximate timestamps of when the game was played.
#
# We can read in the chess graph using `read_edgelist`, and tell it to create the graph using a `nx.MultiDiGraph`.
# In[11]:
chess = nx.read_edgelist('chess_graph.txt', data=[('outcome', int), ('timestamp', float)],
create_using=nx.MultiDiGraph())
# In[12]:
chess.is_directed(), chess.is_multigraph()
# In[13]:
chess.edges(data=True)
# Looking at the degree of each node, we can see how many games each person played. A dictionary is returned where each key is the player, and each value is the number of games played.
# In[14]:
games_played = chess.degree()
games_played
# Using list comprehension, we can find which player played the most games.
# In[15]:
max_value = max(games_played.values())
max_key, = [i for i in games_played.keys() if games_played[i] == max_value]
print('player {}\n{} games'.format(max_key, max_value))
# Let's use pandas to find out which players won the most games. First let's convert our graph to a DataFrame.
# In[16]:
df = pd.DataFrame(chess.edges(data=True), columns=['white', 'black', 'outcome'])
df.head()
# Next we can use a lambda to pull out the outcome from the attributes dictionary.
# In[17]:
df['outcome'] = df['outcome'].map(lambda x: x['outcome'])
df.head()
# To count the number of times a player won as white, we find the rows where the outcome was '1', group by the white player, and sum.
#
# To count the number of times a player won as back, we find the rows where the outcome was '-1', group by the black player, sum, and multiply by -1.
#
# The we can add these together with a fill value of 0 for those players that only played as either black or white.
# In[18]:
won_as_white = df[df['outcome']==1].groupby('white').sum()
won_as_black = -df[df['outcome']==-1].groupby('black').sum()
win_count = won_as_white.add(won_as_black, fill_value=0)
win_count.head()
# Using `nlargest` we find that player 330 won the most games at 109.
# In[19]:
win_count.nlargest(5, 'outcome')
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
12320,
29681,
82,
287,
7311,
55,
198,
198,
2,
554,
58,
16,
5974,
198,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
2... | 2.504208 | 2,495 |
from datetime import datetime
if __name__ == "__main__":
age = ask_age()
dob = ask_dob()
age_in_days = calc_age_in_days(dob)
declared_age_in_days = age * 365 # not super precise, but we don't care for now
treshold = 180 # let's set a lying treshold at 6 months
if abs(declared_age_in_days - age_in_days) > treshold:
print("You're lying!")
else:
print("All good")
| [
6738,
4818,
8079,
1330,
4818,
8079,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
2479,
796,
1265,
62,
496,
3419,
198,
220,
220,
220,
466,
65,
796,
1265,
62,
67,
672,
3419,
198,
2... | 2.412791 | 172 |
#!/usr/bin/python
# copied from https://github.com/topic-embedded-products/meta-topic/blob/master/recipes-bsp/fpga/fpga-bit-to-bin/fpga-bit-to-bin.py
import sys
import os
import struct
import argparse
parser = argparse.ArgumentParser(description='Convert FPGA bit files to raw bin format suitable for flashing')
parser.add_argument('-f', '--flip', dest='flip', action='store_true', default=False, help='Flip 32-bit endianess (needed for Zynq)')
parser.add_argument("bitfile", help="Input bit file name")
parser.add_argument("binfile", help="Output bin file name")
args = parser.parse_args()
short = struct.Struct('>H')
ulong = struct.Struct('>I')
bitfile = open(args.bitfile, 'rb')
l = short.unpack(bitfile.read(2))[0]
if l != 9:
raise Exception, "Missing <0009> header (0x%x), not a bit file" % l
bitfile.read(l)
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
if d != 'a':
raise Exception, "Missing <a> header, not a bit file"
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print "Design name:", d
KEYNAMES = {'b': "Partname", 'c': "Date", 'd': "Time"}
while 1:
k = bitfile.read(1)
if not k:
raise Exception, "unexpected EOF"
elif k == 'e':
l = ulong.unpack(bitfile.read(4))[0]
print "found binary data:", l
d = bitfile.read(l)
if args.flip:
d = flip32(d)
open(args.binfile, 'wb').write(d)
break
elif k in KEYNAMES:
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
print KEYNAMES[k], d
else:
print "Unexpected key: ", k
l = short.unpack(bitfile.read(2))[0]
d = bitfile.read(l)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
18984,
422,
3740,
1378,
12567,
13,
785,
14,
26652,
12,
20521,
9395,
12,
29498,
14,
28961,
12,
26652,
14,
2436,
672,
14,
9866,
14,
8344,
18636,
12,
24145,
14,
46428,
4908,
14,
46428,... | 2.451817 | 633 |
# pylint fail to discover some mongoengin members
# pylint: disable=E1101
import pykscores.database as db
from pykscores.database import User, Play, UserScore
import datetime
db.connect()
| [
2,
279,
2645,
600,
2038,
284,
7073,
617,
285,
25162,
1516,
259,
1866,
198,
2,
279,
2645,
600,
25,
15560,
28,
36,
1157,
486,
198,
11748,
12972,
591,
66,
2850,
13,
48806,
355,
20613,
198,
6738,
12972,
591,
66,
2850,
13,
48806,
1330,
... | 3.166667 | 60 |
from git.repo.base import Repo
from .settings import settings
| [
6738,
17606,
13,
260,
7501,
13,
8692,
1330,
1432,
78,
198,
6738,
764,
33692,
1330,
6460,
198
] | 3.647059 | 17 |
import os
import re
import base64
import zlib
import json
import time
from slpp import slpp as lua # https://github.com/SirAnthony/slpp
if __name__ == "__main__":
factorioPath = r"C:\Program Files (x86)\Steam\SteamApps\common\Factorio"
path = os.path.dirname(__file__)
# recipeRelPath = r"data\base\prototypes\recipe"
# entityRelPath = r"data\base\prototypes\entity"
# graphicsRelPath = r"data\base\graphics\entity"
# cacheRelPath = "cache"
# # make cache directory
# if not os.path.isdir(os.path.join(path, cacheRelPath)):
# os.makedirs(os.path.join(path, cacheRelPath))
# # saving entities to entities.json, acts like "caching"
# if os.path.isfile(os.path.join(path, cacheRelPath, "entities.json")):
# with open(os.path.join(path, cacheRelPath, "entities.json")) as f:
# entityJson = json.load(f)
# else:
# entityJson = readLuaToJson(factorioPath, entityRelPath, filenamesContain = ["entities"])
# with open(os.path.join(path, cacheRelPath, "entities.json"), "w") as f:
# json.dump(entityJson, f, indent=4)
entityRelPath = r"data\base\prototypes\entity"
if os.path.isfile(os.path.join(path, "entities.json")):
# with open(os.path.join(path, cacheRelPath, "recipes.json")) as f:
# recipeJson = json.load(f)
pass
else:
entityJson = readLuaToJson(factorioPath, entityRelPath, filenamesContain = [])
with open(os.path.join(path, "entities.json"), "w") as f:
json.dump(entityJson, f, indent=4) | [
198,
11748,
28686,
198,
11748,
302,
198,
11748,
2779,
2414,
198,
11748,
1976,
8019,
198,
11748,
33918,
198,
11748,
640,
198,
6738,
1017,
381,
1330,
1017,
381,
355,
300,
6413,
1303,
3740,
1378,
12567,
13,
785,
14,
22788,
32697,
14,
6649,... | 2.368024 | 663 |
"a{ \t, \t3 \t}?"
| [
1,
64,
90,
3467,
83,
11,
3467,
83,
18,
3467,
83,
92,
1701,
198
] | 1.285714 | 14 |
texto=str(2**1000000)
>>> print(len(texto))
301030
| [
198,
5239,
78,
28,
2536,
7,
17,
1174,
16,
10535,
8,
198,
33409,
3601,
7,
11925,
7,
5239,
78,
4008,
198,
18938,
39101,
198
] | 2.166667 | 24 |
# Small alphabet u using function
def for_u():
""" *'s printed in the shape of small u """
for row in range(5):
for col in range(5):
if col %4 ==0 and row !=4 or row ==4 and col%4 !=0:
print('*',end=' ')
else:
print(' ',end=' ')
print()
def while_u():
""" *'s printed in the Shape of Small u """
row =0
while row <5:
col =0
while col <5:
if col %4 ==0 and row !=4 or row ==4 and col%4 !=0:
print('*',end=' ')
else:
print(' ',end=' ')
col+=1
print()
row +=1
| [
2,
10452,
24830,
334,
1262,
2163,
201,
198,
4299,
329,
62,
84,
33529,
201,
198,
220,
220,
220,
37227,
1635,
6,
82,
10398,
287,
262,
5485,
286,
1402,
334,
37227,
201,
198,
220,
220,
220,
329,
5752,
287,
2837,
7,
20,
2599,
201,
198,... | 1.78836 | 378 |
# Copyright 2015 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit.plugins.windows import createuser
from cloudbaseinit.tests import testutils
| [
2,
15069,
1853,
10130,
8692,
23555,
21714,
75,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11... | 3.40678 | 236 |
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import validates
from sqlalchemy.sql import schema, sqltypes
from gdc_ng_models.models import audit
Base = declarative_base()
RELEASED_DATA_DATA_TYPE_VALUES = frozenset({"ssm", "cnv", "case"})
RELEASED_DATA_LOG_ACTION_VALUES = frozenset({"release", "unrelease"})
| [
6738,
44161,
282,
26599,
13,
2302,
13,
32446,
283,
876,
1330,
2377,
283,
876,
62,
8692,
198,
6738,
44161,
282,
26599,
13,
2302,
13,
12114,
10236,
1330,
14554,
62,
26745,
198,
6738,
44161,
282,
26599,
13,
579,
1330,
4938,
689,
198,
673... | 2.947761 | 134 |
from numbers import Real, Integral
import numpy as np
from . import _batoid
from .constants import globalCoordSys, vacuum
from .coordSys import CoordSys
from .coordTransform import CoordTransform
from .trace import applyForwardTransform, applyForwardTransformArrays
from .utils import lazy_property, fieldToDirCos
from .surface import Plane
class RayVector:
"""Create RayVector from 1d parameter arrays. Always makes a copy
of input arrays.
Parameters
----------
x, y, z : ndarray of float, shape (n,)
Positions of rays in meters.
vx, vy, vz : ndarray of float, shape (n,)
Velocities of rays in units of the speed of light in vacuum.
t : ndarray of float, shape (n,)
Reference times (divided by the speed of light in vacuum) in units
of meters.
wavelength : ndarray of float, shape (n,)
Vacuum wavelengths in meters.
flux : ndarray of float, shape (n,)
Fluxes in arbitrary units.
vignetted : ndarray of bool, shape (n,)
True where rays have been vignetted.
coordSys : CoordSys
Coordinate system in which this ray is expressed. Default: the
global coordinate system.
"""
@staticmethod
def positionAtTime(self, t):
"""Calculate the positions of the rays at a given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape (n, 3)
Positions in meters.
"""
out = np.empty_like(self._r)
self._rv.positionAtTime(t, out.ctypes.data)
return out
def propagate(self, t):
"""Propagate this RayVector to given time.
Parameters
----------
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
RayVector
Reference to self, no copy is made.
"""
self._rv.propagateInPlace(t)
return self
def phase(self, r, t):
"""Calculate plane wave phases at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters at which to compute phase
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of float, shape(n,)
"""
out = np.empty_like(self._t)
self._rv.phase(r[0], r[1], r[2], t, out.ctypes.data)
return out
def amplitude(self, r, t):
"""Calculate (scalar) complex electric-field amplitudes at given
position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
ndarray of complex, shape (n,)
"""
out = np.empty_like(self._t, dtype=np.complex128)
self._rv.amplitude(r[0], r[1], r[2], t, out.ctypes.data)
return out
def sumAmplitude(self, r, t, ignoreVignetted=True):
"""Calculate the sum of (scalar) complex electric-field amplitudes of
all rays at given position and time.
Parameters
----------
r : ndarray of float, shape (3,)
Position in meters.
t : float
Time (over vacuum speed of light; in meters).
Returns
-------
complex
"""
return self._rv.sumAmplitude(r[0], r[1], r[2], t, ignoreVignetted)
@classmethod
def asGrid(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nx=None, ny=None,
dx=None, dy=None,
lx=None, ly=None,
flux=1,
nrandom=None
):
"""Create RayVector on a parallelogram shaped region.
This function will often be used to create a grid of rays on a square
grid, but is flexible enough to also create grids on an arbitrary
parallelogram, or even randomly distributed across an arbitrary
parallelogram-shaped region.
The algorithm starts by placing rays on the "stop" surface, and then
backing them up such that they are in front of any surfaces of the
optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``lx`` from the Optic. Note that
values explicitly passed to `asGrid` as keyword arguments override
those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nx, ny : int, optional
Number of rays on each side of grid.
dx, dy : float or (2,) array of float, optional
Separation in meters between adjacent rays in grid. If scalars,
then the separations are exactly along the x and y directions. If
arrays, then these are interpretted as the primitive vectors for
the first and second dimensions of the grid. If only dx is
explicitly specified, then dy will be inferred as a 90-degree
rotation from dx with the same length as dx.
lx, ly : float or (2,) array of float, optional
Length of each side of ray grid. If scalars, then these are
measured along the x and y directions. If arrays, then these also
indicate the primitive vectors orientation of the grid. If only
lx is specified, then ly will be inferred as a 90-degree rotation
from lx with the same length as lx. If lx is ``None``, then first
infer a value from ``nx`` and ``dx``, and if that doesn't work,
infer a value from ``optic.pupilSize``.
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : None or int, optional
If not None, then uniformly sample this many rays from
parallelogram region instead of sampling on a regular grid.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
try:
stopSurface = optic.stopSurface
except AttributeError:
stopSurface = None
if lx is None:
# If nx and dx are both present, then let lx get inferred from
# them. Otherwise, infer from optic.
if nx is None or dx is None:
lx = optic.pupilSize
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
# To determine the parallelogram, exactly 2 of nx, dx, lx must be set.
if sum(a is not None for a in [nx, dx, lx]) != 2:
raise ValueError("Exactly 2 of nx, dx, lx must be specified")
if nx is not None and ny is None:
ny = nx
if dx is not None and dy is None:
dy = dx
if lx is not None and ly is None:
if isinstance(lx, Real):
ly = lx
else:
ly = np.dot(np.array([[0, -1], [1, 0]]), lx)
# We need lx, ly, nx, ny for below, so construct these from other
# arguments if they're not already available.
if nx is not None and dx is not None:
if (nx%2) == 0:
lx = dx*(nx-2)
else:
lx = dx*(nx-1)
if (ny%2) == 0:
ly = dy*(ny-2)
else:
ly = dy*(ny-1)
elif lx is not None and dx is not None:
# adjust dx in this case
# always infer an even n (since even and odd are degenerate given
# only lx, dx).
slop = 0.1 # prevent 3.9999 -> 3, e.g.
nx = int((lx/dx+slop)//2)*2+2
ny = int((ly/dy+slop)//2)*2+2
# These are the real dx, dy; which may be different from what was
# passed in order to force an integer for nx/ny. We don't actually
# need them after this point though.
# dx = lx/(nx-2)
# dy = ly/(ny-2)
if isinstance(lx, Real):
lx = (lx, 0.0)
if isinstance(ly, Real):
ly = (0.0, ly)
if nrandom is not None:
xx = np.random.uniform(-0.5, 0.5, size=nrandom)
yy = np.random.uniform(-0.5, 0.5, size=nrandom)
else:
if nx <= 2:
x_d = 1.
else:
x_d = (nx-(2 if (nx%2) == 0 else 1))/nx
if ny <= 2:
y_d = 1.
else:
y_d = (ny-(2 if (ny%2) == 0 else 1))/ny
xx = np.fft.fftshift(np.fft.fftfreq(nx, x_d))
yy = np.fft.fftshift(np.fft.fftfreq(ny, y_d))
xx, yy = np.meshgrid(xx, yy)
xx = xx.ravel()
yy = yy.ravel()
r = np.empty((len(xx), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
stack = np.stack([xx, yy])
x[:] = np.dot(lx, stack)
y[:] = np.dot(ly, stack)
del xx, yy, stack
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def asPolar(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=0.0,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
nrad=None, naz=None,
flux=1,
nrandom=None
):
"""Create RayVector on an annular region using a hexapolar grid.
This function can be used to regularly sample the entrance pupil of a
telescope using polar symmetry (really, hexagonal symmetry). Rings of
different radii are used, with the number of samples on each ring
restricted to a multiple of 6 (with the exception of a potential
central "ring" of radius 0, which is only ever sampled once). This may
be more efficient than using a square grid since more of the rays
generated may avoid vignetting.
This function is also used to generate rays uniformly randomly sampled
from a given annular region.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asPolar` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
nrad : int
Number of radii on which create rays.
naz : int
Approximate number of azimuthal angles uniformly spaced along the
outermost ring. Each ring is constrained to have a multiple of 6
azimuths, so the realized value may be slightly different than
the input value here. Inner rings will have fewer azimuths in
proportion to their radius, but will still be constrained to a
multiple of 6. (If the innermost ring has radius 0, then exactly
1 ray, with azimuth undefined, will be used on that "ring".)
flux : float, optional
Flux to assign each ray. Default is 1.0.
nrandom : int, optional
If not None, then uniformly sample this many rays from annular
region instead of sampling on a hexapolar grid.
"""
from .optic import Interface
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if nrandom is None:
nphis = []
rhos = np.linspace(outer, inner, nrad)
for rho in rhos:
nphi = int((naz*rho/outer)//6)*6
if nphi == 0:
nphi = 6
nphis.append(nphi)
if inner == 0.0:
nphis[-1] = 1
th = np.empty(np.sum(nphis))
rr = np.empty(np.sum(nphis))
idx = 0
for rho, nphi in zip(rhos, nphis):
rr[idx:idx+nphi] = rho
th[idx:idx+nphi] = np.linspace(0, 2*np.pi, nphi, endpoint=False)
idx += nphi
if inner == 0.0:
rr[-1] = 0.0
th[-1] = 0.0
else:
rr = np.sqrt(np.random.uniform(inner**2, outer**2, size=nrandom))
th = np.random.uniform(0, 2*np.pi, size=nrandom)
r = np.empty((len(rr), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = rr*np.cos(th)
y[:] = rr*np.sin(th)
del rr, th
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def asSpokes(
cls,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
outer=None, inner=0.0,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
spokes=None, rings=None,
spacing='uniform',
flux=1
):
"""Create RayVector on an annular region using a spokes pattern.
The function generates rays on a rings-and-spokes pattern, with a fixed
number of radii for each azimuth and a fixed number of azimuths for
each radius. Its main use is for decomposing functions in pupil space
into Zernike components using Gaussian Quadrature integration on
annuli. For more general purpose annular sampling, RayVector.asPolar()
is often a better choice since it samples the pupil more uniformly.
The algorithm used here starts by placing rays on the "stop" surface,
and then backing them up such that they are in front of any surfaces of
the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, ``stopSurface``, and ``outer`` from the Optic. Note
that values explicitly passed to `asSpokes` as keyword arguments
override those extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the ray and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of each ray. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
outer : float
Outer radius of annulus in meters.
inner : float, optional
Inner radius of annulus in meters. Default is 0.0.
source : None or ndarray of float, shape (3,), optional
Where rays originate. If None, then rays originate an infinite
distance away, in which case the ``dirCos`` kwarg must also be
specified to set the direction of ray propagation. If an ndarray,
then the rays originate from this point in global coordinates and
the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then this indicates the initial direction of
propagation of the rays. If source is not None, then this is
ignored. Also see ``theta_x``, ``theta_y`` as an alternative to
this keyword.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
spokes : int or ndarray of float
If int, then number of spokes to use.
If ndarray, then the values of the spokes azimuthal angles in
radians.
rings : int or ndarray of float
If int, then number of rings to use.
If array, then the values of the ring radii to use in meters.
spacing : {'uniform', 'GQ'}
If uniform, assign ring radii uniformly between ``inner`` and
``outer``.
If GQ, then assign ring radii as the Gaussian Quadrature points
for integration on an annulus. In this case, the ray fluxes will
be set to the Gaussian Quadrature weights (and the ``flux`` kwarg
will be ignored).
flux : float, optional
Flux to assign each ray. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if outer is None:
outer = optic.pupilSize/2
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
if isinstance(rings, Integral):
if spacing == 'uniform':
rings = np.linspace(inner, outer, rings)
elif spacing == 'GQ':
if spokes is None:
spokes = 2*rings+1
Li, w = np.polynomial.legendre.leggauss(rings)
eps = inner/outer
area = np.pi*(1-eps**2)
rings = np.sqrt(eps**2 + (1+Li)*(1-eps**2)/2)*outer
flux = w*area/(2*spokes)
if isinstance(spokes, Integral):
spokes = np.linspace(0, 2*np.pi, spokes, endpoint=False)
rings, spokes = np.meshgrid(rings, spokes)
flux = np.broadcast_to(flux, rings.shape)
rings = rings.ravel()
spokes = spokes.ravel()
flux = flux.ravel()
r = np.empty((len(rings), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = rings*np.cos(spokes)
y[:] = rings*np.sin(spokes)
del rings, spokes
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def _finish(cls, backDist, source, dirCos, n, r, w, flux):
"""Map rays backwards to their source position."""
if isinstance(flux, Real):
flux = np.full(len(r), float(flux))
if source is None:
from ._batoid import finishParallel
vv = np.array(dirCos, dtype=float)
vv /= n*np.sqrt(np.dot(vv, vv))
zhat = -n*vv
xhat = np.cross(np.array([1.0, 0.0, 0.0]), zhat)
xhat /= np.sqrt(np.dot(xhat, xhat))
yhat = np.cross(xhat, zhat)
origin = zhat*backDist
rot = np.stack([xhat, yhat, zhat]).T
finishParallel(origin, rot.ravel(), vv, r.ctypes.data, len(r))
v = np.full_like(r, vv)
t = np.zeros(len(r), dtype=float)
vignetted = np.zeros(len(r), dtype=bool)
failed = np.zeros(len(r), dtype=bool)
return RayVector._directInit(
r, v, t, w, flux, vignetted, failed, globalCoordSys
)
else:
v = np.copy(r)
v -= source
v /= n*np.einsum('ab,ab->b', v, v)
r[:] = source
t = np.zeros(len(r), dtype=float)
vignetted = np.zeros(len(r), dtype=bool)
failed = np.zeros(len(r), dtype=bool)
return RayVector._directInit(
r, v, t, w, flux, vignetted, failed, globalCoordSys
)
@classmethod
def fromStop(
cls, x, y,
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
source=None, dirCos=None,
theta_x=None, theta_y=None, projection='postel',
flux=1
):
"""Create rays that intersects the "stop" surface at given points.
The algorithm used here starts by placing the rays on the "stop"
surface, and then backing them up such that they are in front of any
surfaces of the optic they're intended to trace.
The stop surface of most large telescopes is the plane perpendicular to
the optic axis and flush with the rim of the primary mirror. This
plane is usually also the entrance pupil since there are no earlier
refractive or reflective surfaces. However, since this plane is a bit
difficult to locate automatically, the default stop surface in batoid
is the global x-y plane.
If a telescope has a stopSurface attribute in its yaml file, then this
is usually a good choice to use in this function. Using a curved
surface for the stop surface is allowed, but is usually a bad idea as
this may lead to a non-uniformly illuminated pupil and is inconsistent
with, say, an incoming uniform spherical wave or uniform plane wave.
Parameters
----------
x, y : ndarray
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface to the plane that is
perpendicular to the rays and ``backDist`` meters from the point
(0, 0, z(0,0)) on the stop surface. This should generally be set
large enough that any obscurations or phantom surfaces occuring
before the stop surface are now "in front" of the ray. If this
keyword is set to ``None`` and the ``optic`` keyword is set, then
infer a value from ``optic.backDist``. If both this keyword and
``optic`` are ``None``, then use a default of 40 meters, which
should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
source : None or ndarray of float, shape (3,), optional
Where the rays originate. If None, then the rays originate an
infinite distance away, in which case the ``dirCos`` kwarg must also
be specified to set the direction of ray propagation. If an
ndarray, then the rays originates from this point in global
coordinates and the ``dirCos`` kwarg is ignored.
dirCos : ndarray of float, shape (3,), optional
If source is None, then indicates the direction of ray propagation.
If source is not None, then this is ignored.
theta_x, theta_y : float, optional
Field angle in radians. If source is None, then this indicates the
initial direction of propagation of the rays. If source is not
None, then this is ignored. Uses `utils.fieldToDirCos` to convert
to direction cosines. Also see ``dirCos`` as an alternative to
this keyword.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if dirCos is None and source is None:
dirCos = fieldToDirCos(theta_x, theta_y, projection=projection)
if wavelength is None:
raise ValueError("Missing wavelength keyword")
xx = np.atleast_1d(x)
yy = np.atleast_1d(y)
r = np.empty((len(xx), 3), order='F')
x = r[:, 0]
y = r[:, 1]
z = r[:, 2]
x[:] = xx
y[:] = yy
z[:] = stopSurface.surface.sag(x, y)
transform = CoordTransform(stopSurface.coordSys, globalCoordSys)
applyForwardTransformArrays(transform, x, y, z)
w = np.empty_like(x)
w.fill(wavelength)
n = medium.getN(wavelength)
return cls._finish(backDist, source, dirCos, n, r, w, flux)
@classmethod
def fromFieldAngles(
cls, theta_x, theta_y, projection='postel',
optic=None, backDist=None, medium=None, stopSurface=None,
wavelength=None,
x=0, y=0,
flux=1
):
"""Create RayVector with one stop surface point but many field angles.
This method is similar to `fromStop` but broadcasts over ``theta_x``
and ``theta_y`` instead of over ``x`` and ``y``. There is less
currently less effort paid to synchronizing the ``t`` values of the
created rays, as they don't correspond to points on a physical incoming
wavefront in this case. The primary intended use case is to map chief
rays (``x``=``y``=0) from incoming field angle to focal plane position.
Parameters
----------
theta_x, theta_y : ndarray
Field angles in radians.
projection : {'postel', 'zemax', 'gnomonic', 'stereographic', 'lambert', 'orthographic'}, optional
Projection used to convert field angle to direction cosines.
optic : `batoid.Optic`, optional
If present, then try to extract values for ``backDist``,
``medium``, and ``stopSurface`` from the Optic. Note that values
explicitly passed here as keyword arguments override those
extracted from ``optic``.
backDist : float, optional
Map rays backwards from the stop surface this far. This should
generally be set large enough that any obscurations or phantom
surfaces occuring before the stop surface are now "in front" of the
rays. If this keyword is set to ``None`` and the ``optic`` keyword
is set, then infer a value from ``optic.backDist``. If both this
keyword and ``optic`` are ``None``, then use a default of 40 meters,
which should be sufficiently large for foreseeable telescopes.
medium : `batoid.Medium`, optional
Initial medium of rays. If this keyword is set to ``None`` and
the ``optic`` keyword is set, then infer a value from
``optic.inMedium``. If both this keyword and ``optic`` are
``None``, then use a default of vacuum.
stopSurface : batoid.Interface, optional
Surface defining the system stop. If this keyword is set to
``None`` and the ``optic`` keyword is set, then infer a value from
``optic.stopSurface``. If both this keyword and ``optic`` are
``None``, then use a default ``Interface(Plane())``, which is the
global x-y plane.
wavelength : float
Vacuum wavelength of rays in meters.
x, y : float
X/Y coordinates on the stop surface where the rays would intersect
if not refracted or reflected first.
flux : float, optional
Flux of rays. Default is 1.0.
"""
from .optic import Interface
from .surface import Plane
if optic is not None:
if backDist is None:
backDist = optic.backDist
if medium is None:
medium = optic.inMedium
if stopSurface is None:
stopSurface = optic.stopSurface
if backDist is None:
backDist = 40.0
if stopSurface is None:
stopSurface = Interface(Plane())
if medium is None:
medium = vacuum
if wavelength is None:
raise ValueError("Missing wavelength keyword")
vx, vy, vz = fieldToDirCos(theta_x, theta_y, projection=projection)
n = medium.getN(wavelength)
vx /= n
vy /= n
vz /= n
z = stopSurface.surface.sag(x, y)
x = np.full_like(vx, x)
y = np.full_like(vx, y)
z = np.full_like(vx, z)
t = np.zeros_like(vx)
rv = RayVector(
x, y, z,
vx, vy, vz,
t, wavelength, flux,
coordSys=stopSurface.coordSys
)
rv.propagate(-backDist*n)
return rv
@property
def r(self):
"""ndarray of float, shape (n, 3): Positions of rays in meters."""
self._rv.r.syncToHost()
return self._r
@property
def x(self):
"""The x components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 0]
@property
def y(self):
"""The y components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 1]
@property
def z(self):
"""The z components of ray positions in meters."""
self._rv.r.syncToHost()
return self._r[:, 2]
@property
def v(self):
"""ndarray of float, shape (n, 3): Velocities of rays in units of the
speed of light in vacuum. Note that these may have magnitudes < 1 if
the rays are inside a refractive medium.
"""
self._rv.v.syncToHost()
return self._v
@property
def vx(self):
"""The x components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 0]
@property
def vy(self):
"""The y components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 1]
@property
def vz(self):
"""The z components of ray velocities units of the vacuum speed of
light.
"""
self._rv.v.syncToHost()
return self._v[:, 2]
@property
def t(self):
"""Reference times (divided by the speed of light in vacuum) in units
of meters, also known as the optical path lengths.
"""
self._rv.t.syncToHost()
return self._t
@property
def wavelength(self):
"""Vacuum wavelengths in meters."""
# wavelength is constant, so no need to synchronize
return self._wavelength
@property
def flux(self):
"""Fluxes in arbitrary units."""
self._rv.flux.syncToHost()
return self._flux
@property
def vignetted(self):
"""True for rays that have been vignetted."""
self._rv.vignetted.syncToHost()
return self._vignetted
@property
def failed(self):
"""True for rays that have failed. This may occur, for example, if
batoid failed to find the intersection of a ray wiht a surface.
"""
self._rv.failed.syncToHost()
return self._failed
@property
def k(self):
r"""ndarray of float, shape (n, 3): Wavevectors of plane waves in units
of radians per meter. The magnitude of each wavevector is equal to
:math:`2 \pi n / \lambda`, where :math:`n` is the refractive index and
:math:`\lambda` is the wavelength.
"""
out = 2*np.pi*np.array(self.v)
out /= self.wavelength[:, None]
out /= np.sum(self.v*self.v, axis=-1)[:, None]
return out
@property
def kx(self):
"""The x component of each ray wavevector in radians per meter."""
return self.k[:,0]
@property
def ky(self):
"""The y component of each ray wavevector in radians per meter."""
return self.k[:,1]
@property
def kz(self):
"""The z component of each ray wavevector in radians per meter."""
return self.k[:,2]
@property
def omega(self):
r"""The temporal angular frequency of each plane wave divided by the
vacuum speed of light in units of radians per meter. Equals
:math:`2 \pi / \lambda`.
"""
return 2*np.pi/self.wavelength
@lazy_property
def toCoordSys(self, coordSys):
"""Transform this RayVector into a new coordinate system.
Parameters
----------
coordSys: batoid.CoordSys
Destination coordinate system.
Returns
-------
RayVector
Reference to self, no copy is made.
"""
transform = CoordTransform(self.coordSys, coordSys)
applyForwardTransform(transform, self)
return self
| [
6738,
3146,
1330,
6416,
11,
15995,
1373,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
764,
1330,
4808,
8664,
1868,
198,
6738,
764,
9979,
1187,
1330,
3298,
7222,
585,
44387,
11,
17076,
198,
6738,
764,
37652,
44387,
1330,
2281... | 2.333503 | 19,613 |
from infinitd_server.db import Db
from infinitd_server.sse import SseQueues
from infinitd_server.handler.sse import SseStreamHandler
| [
6738,
1167,
15003,
67,
62,
15388,
13,
9945,
1330,
360,
65,
198,
6738,
1167,
15003,
67,
62,
15388,
13,
82,
325,
1330,
311,
325,
15681,
947,
198,
6738,
1167,
15003,
67,
62,
15388,
13,
30281,
13,
82,
325,
1330,
311,
325,
12124,
25060,
... | 3.022727 | 44 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""modules
"""
import __builtin__
del __builtin__.range
__builtin__.range = xrange
del __builtin__.input
__builtin__.input = raw_input
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
18170,
198,
37811,
628,
198,
11748,
11593,
18780,
259,
834,
198,
198,
12381,
11593,
18780,
259,
834,
13,
95... | 2.48 | 75 |
from libsaas.services import base
from .resource import BasecampResource
| [
6738,
9195,
11400,
292,
13,
30416,
1330,
2779,
198,
198,
6738,
764,
31092,
1330,
7308,
16544,
26198,
628,
628
] | 4.052632 | 19 |
from django.urls import path
from . import views
app_name = 'applications'
urlpatterns = [
path('',
views.ApplicationListView.as_view(),
name='application.list'),
path('add',
views.ApplicationCreateView.as_view(),
name='application.add'),
path('<int:pk>/',
views.ApplicationDetailView.as_view(),
name='application.detail'),
path('<int:pk>/edit',
views.ApplicationUpdateView.as_view(),
name='application.edit'),
path('<int:pk>/delete',
views.ApplicationDeleteView.as_view(),
name='application.delete'),
path('<int:pk>/token',
views.ApplicationManageTokenView.as_view(),
name='token'),
path('<int:pk>/manage-access',
views.ApplicationManageAccessView.as_view(),
name='application.manage-access'),
path('<int:pk>/manage-access/users/<int:user_pk>',
views.ApplicationManageAccessView.as_view(),
name='application.manage-access.user'),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
764,
1330,
5009,
198,
198,
1324,
62,
3672,
796,
705,
1324,
677,
602,
6,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786,
3256,
198,
220,
220,
220,
... | 2.310658 | 441 |
#!/usr/bin/env python
import rospy
from naoqi_sensors.naoqi_camera import NaoqiCam
if __name__ == "__main__":
naocam = NaoqiCam()
naocam.start()
rospy.spin()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
686,
2777,
88,
198,
6738,
299,
5488,
40603,
62,
82,
641,
669,
13,
2616,
78,
40603,
62,
25695,
1330,
399,
5488,
40603,
21701,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
1... | 2.260274 | 73 |
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provide some tools for bce client.
"""
import StringIO
import cStringIO
import os
import re
import datetime
import hashlib
import base64
import string
import baidubce
from baidubce.http import http_headers
def get_md5_from_fp(fp, offset=0, length=-1, buf_size=8192):
"""
Get MD5 from file by fp.
:type fp: FileIO
:param fp: None
:type offset: long
:param offset: None
:type length: long
:param length: None
=======================
:return:
**file_size, MD(encode by base64)**
"""
origin_offset = fp.tell()
if offset:
fp.seek(offset)
md5 = hashlib.md5()
while True:
bytes_to_read = buf_size
if bytes_to_read > length > 0:
bytes_to_read = length
buf = fp.read(bytes_to_read)
if not buf:
break
md5.update(buf)
if length > 0:
length -= len(buf)
if length == 0:
break
fp.seek(origin_offset)
return base64.standard_b64encode(md5.digest())
def get_canonical_time(timestamp=0):
"""
Get cannonical time.
:type timestamp: int
:param timestamp: None
=======================
:return:
**string of canonical_time**
"""
if timestamp == 0:
utctime = datetime.datetime.utcnow()
else:
utctime = datetime.datetime.utcfromtimestamp(timestamp)
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % (
utctime.year, utctime.month, utctime.day,
utctime.hour, utctime.minute, utctime.second)
def is_ip(s):
"""
Check a string whether is a legal ip address.
:type s: string
:param s: None
=======================
:return:
**Boolean**
"""
try:
tmp_list = s.split(':')
s = tmp_list[0]
if s == 'localhost':
return True
tmp_list = s.split('.')
if len(tmp_list) != 4:
return False
else:
for i in tmp_list:
if int(i) < 0 or int(i) > 255:
return False
except:
return False
return True
def convert_to_standard_string(input_string):
"""
Encode a string to utf-8.
:type input_string: string
:param input_string: None
=======================
:return:
**string**
"""
if isinstance(input_string, unicode):
return input_string.encode(baidubce.DEFAULT_ENCODING)
else:
return str(input_string)
def convert_header2map(header_list):
"""
Transfer a header list to dict
:type s: list
:param s: None
=======================
:return:
**dict**
"""
header_map = {}
for a, b in header_list:
if isinstance(a, str):
a = a.strip('\"')
if isinstance(b, str):
b = b.strip('\"')
header_map[a] = b
return header_map
def safe_get_element(name, container):
"""
Get element from dict which the lower of key and name are equal.
:type name: string
:param name: None
:type container: dict
:param container: None
=======================
:return:
**Value**
"""
for k, v in container.items():
if k.strip().lower() == name.strip().lower():
return v
return ""
def check_redirect(res):
"""
Check whether the response is redirect.
:type res: HttpResponse
:param res: None
:return:
**Boolean**
"""
is_redirect = False
try:
if res.status == 301 or res.status == 302:
is_redirect = True
except:
pass
return is_redirect
_NORMALIZED_CHAR_LIST = _get_normalized_char_list()
def normalize_string(in_str, encoding_slash=True):
"""
Encode in_str.
When encoding_slash is True, don't encode skip_chars, vice versa.
:type in_str: string
:param in_str: None
:type encoding_slash: Bool
:param encoding_slash: None
===============================
:return:
**string**
"""
tmp = []
for ch in convert_to_standard_string(in_str):
if ch == '/' and not encoding_slash:
tmp.append('/')
else:
tmp.append(_NORMALIZED_CHAR_LIST[ord(ch)])
return ''.join(tmp)
def append_uri(base_uri, *path_components):
"""
Append path_components to the end of base_uri in order, and ignore all empty strings and None
:param base_uri: None
:type base_uri: string
:param path_components: None
:return: the final url
:rtype: str
"""
tmp = [base_uri]
for path in path_components:
if path:
tmp.append(normalize_string(path, False))
if len(tmp) > 1:
tmp[0] = tmp[0].rstrip('/')
tmp[-1] = tmp[-1].lstrip('/')
for i in range(1, len(tmp)):
tmp[i] = tmp[i].strip('/')
return '/'.join(tmp)
def check_bucket_valid(bucket):
"""
Check bucket name whether is legal.
:type bucket: string
:param bucket: None
=======================
:return:
**Boolean**
"""
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-"
if len(bucket) < 3 or len(bucket) > 63:
return False
if bucket[-1] == "-" or bucket[-1] == "_":
return False
if not (('a' <= bucket[0] <= 'z') or ('0' <= bucket[0] <= '9')):
return False
for i in bucket:
if not i in alphabet:
return False
return True
def guess_content_type_by_file_name(file_name):
"""
Get file type by filename.
:type file_name: string
:param file_name: None
=======================
:return:
**Type Value**
"""
mime_map = dict()
mime_map["js"] = "application/javascript"
mime_map["xlsx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
mime_map["xltx"] = "application/vnd.openxmlformats-officedocument.spreadsheetml.template"
mime_map["potx"] = "application/vnd.openxmlformats-officedocument.presentationml.template"
mime_map["ppsx"] = "application/vnd.openxmlformats-officedocument.presentationml.slideshow"
mime_map["pptx"] = "application/vnd.openxmlformats-officedocument.presentationml.presentation"
mime_map["sldx"] = "application/vnd.openxmlformats-officedocument.presentationml.slide"
mime_map["docx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
mime_map["dotx"] = "application/vnd.openxmlformats-officedocument.wordprocessingml.template"
mime_map["xlam"] = "application/vnd.ms-excel.addin.macroEnabled.12"
mime_map["xlsb"] = "application/vnd.ms-excel.sheet.binary.macroEnabled.12"
try:
name = os.path.basename(file_name)
suffix = name.split('.')[-1]
if suffix in mime_map.keys():
mime_type = mime_map[suffix]
else:
import mimetypes
mimetypes.init()
mime_type = mimetypes.types_map["." + suffix]
except:
mime_type = 'application/octet-stream'
if not mime_type:
mime_type = 'application/octet-stream'
return mime_type
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]{2,})')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
def pythonize_name(name):
"""Convert camel case to a "pythonic" name.
Examples::
pythonize_name('CamelCase') -> 'camel_case'
pythonize_name('already_pythonized') -> 'already_pythonized'
pythonize_name('HTTPRequest') -> 'http_request'
pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok'
pythonize_name('UPPER') -> 'upper'
pythonize_name('ContentMd5')->'content_md5'
pythonize_name('') -> ''
"""
if name == "eTag":
return "etag"
s1 = _first_cap_regex.sub(r'\1_\2', name)
s2 = _number_cap_regex.sub(r'\1_\2', s1)
return _end_cap_regex.sub(r'\1_\2', s2).lower()
def get_canonical_querystring(params, for_signature):
"""
:param params:
:param for_signature:
:return:
"""
if params is None:
return ''
result = []
for k, v in params.items():
if not for_signature or k.lower != http_headers.AUTHORIZATION.lower():
if v is None:
v = ''
result.append('%s=%s' % (k, normalize_string(v)))
result.sort()
return '&'.join(result)
def print_object(obj):
"""
:param obj:
:return:
"""
tmp = []
for k, v in obj.__dict__.items():
if not k.startswith('__'):
if isinstance(v, str):
tmp.append("%s:'%s'" % (k, v))
elif isinstance(v, unicode):
tmp.append("%s:u'%s'" % (k, v))
else:
tmp.append('%s:%s' % (k, v))
return '{%s}' % ','.join(tmp)
class Expando(object):
"""
Expandable class
"""
def dict_to_python_object(d):
"""
:param d:
:return:
"""
attr = {}
for k, v in d.items():
k = pythonize_name(str(k))
attr[k] = v
return Expando(attr)
def required(**types):
"""
decorator of input param check
:param types:
:return:
"""
return _required | [
2,
15069,
357,
66,
8,
1946,
347,
1698,
84,
13,
785,
11,
3457,
13,
1439,
6923,
33876,
201,
198,
2,
201,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
779,
428,
2393,
2... | 2.133951 | 4,748 |
from django import forms
| [
6738,
42625,
14208,
1330,
5107,
628,
628
] | 4 | 7 |
import copy
from pygments.lexer import RegexLexer, bygroups
from pygments.token import *
import .base
SectionID = Token.Section.ID
NodeType = Token.Node.Type
NodeID = Token.Node.ID
NodeAndOrComma = Token.Node.AndOrComma
DiffSpec = Token.DiffSpec
AmendedAsFollows = DiffSpec.AmendedAsFollows
AmendedByAdding = DiffSpec.AmendedByAdding
Renumbered = DiffSpec.Renumbered
SessionLawChapter = Token.SessionLawChapter
SessionLawYear = Token.SessionLawYear
ActName = Token.ActName
CompilationName = Token.CompilationName
Junk = Token.Junk
subds = ['paragraph', 'division', 'chapter', 'section', 'clause',
'article', 'part']
subds += ['sub' + s for s in subds]
subds = r'(%s)' % '|'.join(sorted(subds, key=len, reverse=True))
| [
11748,
4866,
198,
198,
6738,
12972,
11726,
13,
2588,
263,
1330,
797,
25636,
45117,
263,
11,
416,
24432,
198,
6738,
12972,
11726,
13,
30001,
1330,
1635,
198,
198,
11748,
764,
8692,
628,
198,
16375,
2389,
796,
29130,
13,
16375,
13,
2389,
... | 2.959677 | 248 |
import pandas as pd
import numpy as np
from IPython.display import display, HTML, display_html
from tqdm.notebook import tqdm
def auto_adjust():
'''
Set column width = 100
Max displayed rows = 100
Max displayed columns = 100
'''
set_colwidth(100)
pd.options.display.max_rows = 100
pd.options.display.max_columns = 100
def set_max_rows(max_rows=100):
'''
Set max display rows
Return : None
'''
pd.options.display.max_rows = max_rows
def set_max_columns(max_columns=100):
'''
Set max display columns
Return : None
'''
pd.options.display.max_columns = max_columns
def display_html(df):
'''
display a dataframe as html table
'''
display(HTML(df.to_html()))
def inc_colwidth(inc_colwidth=20,target_colwidth=None):
'''
Increase column width of pandas dataframe display
Return : None
'''
if target_colwidth == None:
curr_max_colwidth = pd.get_option("display.max_colwidth")
new_max_colwidth = curr_max_colwidth + inc_colwidth
pd.set_option('max_colwidth', new_max_colwidth)
else:
pd.set_option('max_colwidth', target_colwidth)
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def dec_colwidth(dec_colwidth=20,target_colwidth=None):
'''
Decrease column width of pandas dataframe display
Return : None
'''
if target_colwidth == None:
curr_max_colwidth = pd.get_option("display.max_colwidth")
new_max_colwidth = curr_max_colwidth - dec_colwidth
pd.set_option('max_colwidth', new_max_colwidth)
else:
pd.set_option('max_colwidth', target_colwidth)
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def set_colwidth(target_colwidth=100):
'''
Decrease column width of pandas dataframe display
Return : None
'''
pd.set_option('max_colwidth', target_colwidth)
def get_curr_colwidth():
'''
Decrease column width of pandas dataframe display
Return : None
'''
print(f'Current max column width = {pd.get_option("display.max_colwidth")}')
def read_parquets(list_file_path, columns='all'):
'''
Read multiple parquet files of the same template into a single pandas dataframe.
'''
list_df = []
for file_path in tqdm(list_file_path, 'reading parquets...'):
if columns=='all':
list_df.append(pd.read_parquet(file_path))
else:
list_df.append(pd.read_parquet(file_path, columns=columns))
df = pd.concat(list_df)
return df
def convert_dtypes(in_df, in_dict_dtypes, default_dtype=None):
'''
Convert dtypes of a dataframe according to given dict of column names and dtypes.
'''
in_df = in_df.copy()
for col_nm in in_df.columns:
if col_nm in in_dict_dtypes.keys():
if in_df[col_nm].dtype != in_dict_dtypes[col_nm]:
in_df[col_nm] = in_df[col_nm].astype(in_dict_dtypes[col_nm])
elif default_dtype:
if in_df[col_nm].dtype != default_dtype:
in_df[col_nm] = in_df[col_nm].astype(default_dtype)
return in_df
def optimize_dtypes(df, excluded_cols=None, only_int=True, allow_unsigned=False):
'''
Optimize data type of each column to minimum size.
'''
df = df.copy()
if excluded_cols:
assert(type(excluded_cols) == list)
list_cols = [col for col in df.columns if col not in excluded_cols]
else:
list_cols = list(df.columns)
if (only_int==True) :
list_cols = [col for col in list_cols if 'int' in str(df[col].dtype)]
for col in list_cols:
col_dtype_ori_str = str(df[col].dtype)
col_max_val = df[col].max()
col_min_val = df[col].min()
if 'int' in col_dtype_ori_str:
if (col_min_val >= 0) & (allow_unsigned==True):
if col_max_val < 2**8:
col_dtype_new = np.uint8
elif col_max_val < 2**16:
col_dtype_new = np.uint16
elif col_max_val < 2**32:
col_dtype_new = np.uint32
else:
col_dtype_new = np.uint64
else:
if (col_max_val < 2**7) & (col_min_val >= -2**7):
col_dtype_new = np.int8
elif (col_max_val < 2**15) & (col_min_val >= -2**15):
col_dtype_new = np.int16
elif (col_max_val < 2**31) & (col_min_val >= -2**31):
col_dtype_new = np.int32
else:
col_dtype_new = np.int64
assert(col_min_val == col_dtype_new(col_min_val))
assert(col_max_val == col_dtype_new(col_max_val))
col_dtype_new_str = str(col_dtype_new).split("'")[1].split('.')[1]
if col_dtype_ori_str != col_dtype_new_str:
df[col] = df[col].astype(col_dtype_new)
print(f'Column "{col}": {col_dtype_ori_str} -> {col_dtype_new_str}')
else:
pass
return df | [
11748,
19798,
292,
355,
279,
67,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
6101,
7535,
13,
13812,
1330,
3359,
11,
11532,
11,
3359,
62,
6494,
201,
198,
6738,
256,
80,
36020,
13,
11295,
2070,
1330,
256,
80,
36020,
201,
... | 1.905438 | 2,887 |
# -*- coding: utf-8 -*-
from .log import log
from .empty import empty
from .base import BaseTest
from .engine import Engine
from .context import Context
from .resolver import OperatorResolver
class Test(BaseTest):
"""
Test represents the test definition in `grappa` with extensible and
dynamic, runtime inferred DSL based on registered operators and
third-party plugins.
Arguments:
subject (mixed): subject value to test.
"""
# Tracks context manager scopes
_context = 0
# Tracks yielded value by context manager
_context_subject = empty
# Global flag, only used by global singleton instance
_global = False
@property
def should(self):
"""
Alias name to self reference the current instance.
Required for DSL API.
"""
return self
@property
def expect(self):
"""
Alias name to self reference the current instance.
Required for DSL API.
"""
return self
@property
def __call__(self, subject, overload=False):
"""
Overloads function invokation of `Test` class instance.
This is magical and widely used in `grappa` test execution by both
developers and internal engine.
Arguments:
subject (mixed): test subject to use.
overload (bool): `True` if the call if triggered via operator
overloading invokation, otherise `False`.
Returns:
grappa.Test: new test instance with the given subject.
"""
self._ctx.subject = subject
__tracebackhide__ = True
return self._trigger() if overload else Test(subject)
def __getattr__(self, name):
"""
Overloads class attribute accessor proxying calls dynamically
into assertion operators calls.
This method is invoked by Python runtime engine, not by developers.
"""
# Return a new test instance if running as global
if self._global:
# If using context manager, use context defined subject
subject = self._context_subject if self._context else empty
# Create new test and proxy attribute call
return Test(subject).__getattr__(name)
# Resolve and register operator by name
__tracebackhide__ = True
return OperatorResolver(self).resolve(name)
def _trigger(self):
"""
Trigger assertions in the current test engine.
Raises:
AssertionError: in case of assertion error.
Exception: in case of any other assertion error.
"""
log.debug('[test] trigger with context: {}'.format(self._ctx))
try:
err = self._engine.run(self._ctx)
except Exception as _err:
err = _err
finally:
# Important: reset engine state to defaults
self._engine.reset()
self._root._engine.reset()
# If error is present, raise it!
if err:
__tracebackhide__ = True
raise err
return self
def _clone(self):
"""
Clones the current `Test` instance.
Returns:
grappa.Test
"""
test = Test(self._ctx.subject)
test._ctx = self._ctx.clone()
test._engine = self._engine.clone()
return test
def _flush(self):
"""
Flushes the current test state, including test engine, assertions and
current context.
"""
self.__init__()
# Assertions composition
def all(self, *tests):
"""
Composes multiple tests and executes them, in series, once a
subject is received.
Conditional composition operator equivalent to `all` built-in
Python function.
Arguments:
*tests (grappa.Test): test instances to run.
"""
self._engine.add_assertion(run_tests)
return self
def any(self, *tests):
"""
Composes multiple tests and executes them, in series, once a
subject is received.
Conditional composition operator equivalent to `any` built-in
Python function.
Arguments:
*tests (grappa.Test): test instances to run.
"""
self._engine.add_assertion(run_tests)
return self
def __overload__(self, subject):
"""
Method triggered by magic methods executed via operator overloading.
"""
if isinstance(subject, Test):
# Clone test instance to make it side-effects free
fork = subject._clone()
fork._ctx.chained = True
fork._ctx.subject = self._ctx.subject
# Trigger assertions
__tracebackhide__ = True
return fork._trigger()
# Otherwise invoke the test function with a subject
__tracebackhide__ = True
return self.__call__(subject, overload=True)
def __or__(self, value):
"""
Overloads ``|`` as from left-to-right operator precedence expression.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __ror__(self, value):
"""
Overloads ``|`` operator.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __gt__(self, value):
"""
Overloads ``>`` operator.
"""
__tracebackhide__ = True
return self.__overload__(value)
def __enter__(self):
"""
Initializes context manager.
"""
log.debug('creates new test context manager: {}'.format(self._ctx))
test._context += 1
test._context_subject = self._ctx.subject
def __exit__(self, etype, value, traceback):
"""
Exists context manager.
"""
log.debug('exists test context manager: {}'.format(value))
test._context -= 1
if test._context == 0:
test._context_subject = empty
# Create global singleton instance
test = Test()
# This is black magic in order to deal with chainable states
# and operator precedence.
test._global = True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
764,
6404,
1330,
2604,
198,
6738,
764,
28920,
1330,
6565,
198,
6738,
764,
8692,
1330,
7308,
14402,
198,
6738,
764,
18392,
1330,
7117,
198,
6738,
764,
22866,
1330,
... | 2.468688 | 2,507 |
import environ
env = environ.Env()
REST_FRAMEWORK = {
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 10,
"MAX_PAGE_SIZE": 100,
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework_simplejwt.authentication.JWTAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
"rest_framework.filters.OrderingFilter",
"rest_framework.filters.SearchFilter",
),
"DEFAULT_RENDERER_CLASSES": [
"djangorestframework_camel_case.render.CamelCaseJSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
]
if env.bool("DEBUG")
else ["djangorestframework_camel_case.render.CamelCaseJSONRenderer",],
"DEFAULT_PARSER_CLASSES": (
"djangorestframework_camel_case.parser.CamelCaseJSONParser",
"djangorestframework_camel_case.parser.CamelCaseFormParser",
"djangorestframework_camel_case.parser.CamelCaseMultiPartParser",
),
"JSON_UNDERSCOREIZE": {"no_underscore_before_number": True},
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"COERCE_DECIMAL_TO_STRING": False,
"DATETIME_FORMAT": None,
"EXCEPTION_HANDLER": "core.errors.exception_handler.full_details_exception_handler",
}
| [
11748,
551,
2268,
198,
198,
24330,
796,
551,
2268,
13,
4834,
85,
3419,
198,
198,
49,
6465,
62,
10913,
2390,
6217,
14670,
796,
1391,
198,
220,
220,
220,
366,
7206,
38865,
62,
4537,
38,
1268,
6234,
62,
31631,
1298,
366,
2118,
62,
3060... | 2.405042 | 595 |
def max_sub_array(nums):
""" Returns the max subarray of the given list of numbers.
Returns 0 if nums is None or an empty list.
Time Complexity: O(n)
Space Complexity: O(1)
"""
max_sub_array = 0
sum = 0
for num in nums:
sum = max(0, sum + num)
max_sub_array = max(max_sub_array, sum)
if max_sub_array <= 0:
return max(nums)
return max_sub_array | [
198,
4299,
3509,
62,
7266,
62,
18747,
7,
77,
5700,
2599,
198,
220,
220,
220,
37227,
16409,
262,
3509,
850,
18747,
286,
262,
1813,
1351,
286,
3146,
13,
198,
220,
220,
220,
220,
220,
220,
220,
16409,
657,
611,
220,
997,
82,
318,
604... | 2.121951 | 205 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sets
else:
import _sets
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _sets.SWIG_PyInstanceMethod_New
_swig_new_static_method = _sets.SWIG_PyStaticMethod_New
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import mfem._ser.array
import mfem._ser.mem_manager
import mfem._ser.table
class IntegerSet(object):
r"""Proxy of C++ mfem::IntegerSet class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(IntegerSet self) -> IntegerSet
__init__(IntegerSet self, IntegerSet s) -> IntegerSet
__init__(IntegerSet self, int const n, int const * p) -> IntegerSet
"""
_sets.IntegerSet_swiginit(self, _sets.new_IntegerSet(*args))
def Size(self):
r"""Size(IntegerSet self) -> int"""
return _sets.IntegerSet_Size(self)
Size = _swig_new_instance_method(_sets.IntegerSet_Size)
def PickElement(self):
r"""PickElement(IntegerSet self) -> int"""
return _sets.IntegerSet_PickElement(self)
PickElement = _swig_new_instance_method(_sets.IntegerSet_PickElement)
def PickRandomElement(self):
r"""PickRandomElement(IntegerSet self) -> int"""
return _sets.IntegerSet_PickRandomElement(self)
PickRandomElement = _swig_new_instance_method(_sets.IntegerSet_PickRandomElement)
def __eq__(self, s):
r"""__eq__(IntegerSet self, IntegerSet s) -> int"""
return _sets.IntegerSet___eq__(self, s)
__eq__ = _swig_new_instance_method(_sets.IntegerSet___eq__)
def Recreate(self, n, p):
r"""Recreate(IntegerSet self, int const n, int const * p)"""
return _sets.IntegerSet_Recreate(self, n, p)
Recreate = _swig_new_instance_method(_sets.IntegerSet_Recreate)
__swig_destroy__ = _sets.delete_IntegerSet
# Register IntegerSet in _sets:
_sets.IntegerSet_swigregister(IntegerSet)
class ListOfIntegerSets(object):
r"""Proxy of C++ mfem::ListOfIntegerSets class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def Size(self):
r"""Size(ListOfIntegerSets self) -> int"""
return _sets.ListOfIntegerSets_Size(self)
Size = _swig_new_instance_method(_sets.ListOfIntegerSets_Size)
def PickElementInSet(self, i):
r"""PickElementInSet(ListOfIntegerSets self, int i) -> int"""
return _sets.ListOfIntegerSets_PickElementInSet(self, i)
PickElementInSet = _swig_new_instance_method(_sets.ListOfIntegerSets_PickElementInSet)
def PickRandomElementInSet(self, i):
r"""PickRandomElementInSet(ListOfIntegerSets self, int i) -> int"""
return _sets.ListOfIntegerSets_PickRandomElementInSet(self, i)
PickRandomElementInSet = _swig_new_instance_method(_sets.ListOfIntegerSets_PickRandomElementInSet)
def Insert(self, s):
r"""Insert(ListOfIntegerSets self, IntegerSet s) -> int"""
return _sets.ListOfIntegerSets_Insert(self, s)
Insert = _swig_new_instance_method(_sets.ListOfIntegerSets_Insert)
def Lookup(self, s):
r"""Lookup(ListOfIntegerSets self, IntegerSet s) -> int"""
return _sets.ListOfIntegerSets_Lookup(self, s)
Lookup = _swig_new_instance_method(_sets.ListOfIntegerSets_Lookup)
def AsTable(self, t):
r"""AsTable(ListOfIntegerSets self, Table t)"""
return _sets.ListOfIntegerSets_AsTable(self, t)
AsTable = _swig_new_instance_method(_sets.ListOfIntegerSets_AsTable)
__swig_destroy__ = _sets.delete_ListOfIntegerSets
def __init__(self):
r"""__init__(ListOfIntegerSets self) -> ListOfIntegerSets"""
_sets.ListOfIntegerSets_swiginit(self, _sets.new_ListOfIntegerSets())
# Register ListOfIntegerSets in _sets:
_sets.ListOfIntegerSets_swigregister(ListOfIntegerSets)
| [
2,
770,
2393,
373,
6338,
7560,
416,
12672,
3528,
357,
4023,
1378,
2503,
13,
2032,
328,
13,
2398,
737,
198,
2,
10628,
604,
13,
15,
13,
17,
198,
2,
198,
2,
2141,
407,
787,
2458,
284,
428,
2393,
4556,
345,
760,
644,
345,
389,
1804,... | 2.613938 | 1,808 |
import torch
import torch.nn.functional as F
from torchvision.models import inception_v3
import numpy as np
import random
from tools.others import sample_ZCs
from scipy.stats import entropy
from scipy import linalg
def get_activations_stat_orig(datasets, n_samples,ipt_net,ipt_dims, n_gpu,dali):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
curr_n_samples = 0
ipt_net.eval()
pred_arr = np.empty((n_samples, ipt_dims))
#for i, data in enumerate(datasets, 0):
i = 0
loader_iter = iter(datasets)
while True:
try:
data = next(loader_iter)
except StopIteration:
loader_iter = iter(datasets)
data = next(datasets)
if dali:
imgs = data[0]["data"]
y_real_c = data[0]["label"].squeeze().long()
else:
(imgs,y_real_c) = data
# print('orig',i)
start = i * imgs.size(0)
end = start + imgs.size(0)
if imgs.size(2) != 299 or imgs.size(3) != 299:
imgs = F.interpolate(input=imgs,size=(299, 299), mode='bilinear',align_corners=False)
if n_gpu>0:
imgs = imgs.cuda()
pred = ipt_net(imgs)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1))
# print(start,end,batch_size)
pred_arr[start:end] = pred.cpu().data.numpy().reshape(imgs.size(0), -1)
curr_n_samples += imgs.size(0)
if curr_n_samples>= n_samples:
break
i = i + 1
mu = np.mean(pred_arr, axis=0)
sigma = np.cov(pred_arr, rowvar=False)
return mu,sigma
def get_activations_stat_gen(netG,z_dim,n_classes,Z_dist,Z_params,ipt_net,total_itrs,batch_size, ipt_dims,n_gpu):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- files : List of image files paths
-- model : Instance of inception model
-- batch_size : Batch size of images for the model to process at once.
Make sure that the number of samples is a multiple of
the batch size, otherwise some samples are ignored. This
behavior is retained to match the original FID score
implementation.
-- dims : Dimensionality of features returned by Inception
-- cuda : If set to True, use GPU
-- verbose : If set to True and parameter out_step is given, the number
of calculated batches is reported.
Returns:
-- A numpy array of dimension (num images, dims) that contains the
activations of the given tensor when feeding inception with the
query tensor.
"""
ipt_net.eval()
n_used_imgs = total_itrs * batch_size
pred_arr = np.empty((n_used_imgs, ipt_dims))
for i in range(total_itrs):
# print('gen',i)
Z, C_int, C_vec = sample_ZCs(batch_size,z_dim,n_classes,Z_dist,Z_params,n_gpu)
start = i * batch_size
end = start + batch_size
imgs = netG(Z,C_vec)
if imgs.size(2) != 299 or imgs.size(3) != 299:
#imgs = imgs.data.mul_(0.5).add_(0.5).mul_(255).clamp_(0,255).round_().div_(255).mul_(2).sub_(1)
imgs = F.interpolate(input=imgs,size=(299, 299), mode='bilinear',align_corners=False)
pred = ipt_net(imgs)[0]
# If model output is not scalar, apply global spatial average pooling.
# This happens if you choose a dimensionality not equal 2048.
if pred.shape[2] != 1 or pred.shape[3] != 1:
pred = F.adaptive_avg_pool2d(pred, output_size=(1, 1))
pred_arr[start:end] = pred.cpu().data.numpy().reshape(batch_size, -1)
mu = np.mean(pred_arr, axis=0)
sigma = np.cov(pred_arr, rowvar=False)
return mu,sigma
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean)
def frechet_inception_distance(netG,ipt_net,z_dim,n_classes,Z_dist,Z_params,n_samples,batch_size, m2,s2,ipt_dims, n_gpu):
"""Calculates the FID of two paths"""
total_itrs = int(n_samples/batch_size)
m1, s1 = get_activations_stat_gen(netG,z_dim,n_classes,Z_dist,Z_params,ipt_net,total_itrs,batch_size, ipt_dims,n_gpu)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
10178,
13,
27530,
1330,
30839,
62,
85,
18,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
4899,
13,
847,
82,
1330,
6291,
62,
57,
3227... | 2.26099 | 3,253 |
#!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import codecs
import os
from setuptools import setup
grlc_base = 'src'
grlc_base_dir = os.path.join(grlc_base, '')
grlc_data = []
for root,dirs,files in os.walk(grlc_base):
if root != grlc_base:
root_dir = root.replace(grlc_base_dir, '')
data_files = os.path.join(root_dir, '*')
grlc_data.append(data_files)
grlc_version = '1.3.0'
with codecs.open('requirements.txt', mode='r') as f:
install_requires = f.read().splitlines()
with codecs.open('requirements-test.txt', mode='r') as f:
tests_require = f.read().splitlines()
with codecs.open('README.md', mode='r', encoding='utf-8') as f:
long_description = f.read()
setup(
name="grlc",
description='grlc, the git repository linked data API constructor',
long_description=long_description,
long_description_content_type='text/markdown',
license="Copyright 2017 Albert Meroño",
author='Albert Meroño',
author_email='albert.merono@vu.nl',
url='https://github.com/CLARIAH/grlc',
version=grlc_version,
py_modules=['grlc'],
packages=['grlc'],
package_dir = {'grlc': grlc_base},
scripts=['bin/grlc-server'],
install_requires=install_requires,
setup_requires=[
# dependency for `python setup.py test`
'pytest-runner',
# dependencies for `python setup.py build_sphinx`
'sphinx',
'recommonmark'
],
tests_require=tests_require,
package_data = {'grlc': grlc_data},
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
47279,
12,
3459,
3270,
12,
1314,
532,
9,
12,
198,
198,
11748,
40481,
82,
198,
11748,
28686,
198,
6738,
900,
37623,
10141,
1330,
9058,
198,
198,
2164,
44601,... | 2.415335 | 626 |
import json
import os
from ucttp import _main
INPUT_DIR = "./../inputs"
PARAMETERS = "/parameters.json"
POPULATION_DEPENDENCE = "./../outputs/population_dependence.txt"
GENERATION_DEPENDENCE = "./../outputs/generation_dependence.txt"
if __name__ == '__main__':
population_size_dependence()
generations_number_dependence()
| [
11748,
33918,
198,
11748,
28686,
198,
198,
6738,
334,
310,
34788,
1330,
4808,
12417,
198,
198,
1268,
30076,
62,
34720,
796,
366,
19571,
40720,
15414,
82,
1,
198,
27082,
2390,
2767,
4877,
796,
12813,
17143,
7307,
13,
17752,
1,
198,
47,
... | 2.785124 | 121 |
import logging
from src.backup.task_creator import TaskCreator
| [
11748,
18931,
198,
198,
6738,
12351,
13,
1891,
929,
13,
35943,
62,
45382,
1330,
15941,
16719,
273,
628
] | 3.611111 | 18 |
from django.core.exceptions import ValidationError
from django.shortcuts import render, HttpResponse, redirect
# Create your views here.
from app01.models import UserInfo
'''
forms组件
1、校验数据
2、页面显示提示信息
'''
from django import forms
# class BookFrom(forms.Form):
# title = forms.CharField(max_length=32)
# price = forms.IntegerField
# email = forms.EmailField()
from django.forms import widgets
| [
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
367,
29281,
31077,
11,
18941,
198,
198,
2,
13610,
534,
5009,
994,
13,
198,
6738,
598,
486,
13,
27530,
... | 2.397727 | 176 |
import collections
answer = solution(5, [[4, 3], [4, 2], [3, 2], [1, 2], [2, 5]])
print(answer)
| [
11748,
17268,
198,
198,
41484,
796,
4610,
7,
20,
11,
16410,
19,
11,
513,
4357,
685,
19,
11,
362,
4357,
685,
18,
11,
362,
4357,
685,
16,
11,
362,
4357,
685,
17,
11,
642,
11907,
8,
198,
4798,
7,
41484,
8,
198
] | 2.309524 | 42 |
phrase = str(input('Type a phrase: ')).strip()
write(phrase)
| [
198,
198,
34675,
796,
965,
7,
15414,
10786,
6030,
257,
9546,
25,
705,
29720,
36311,
3419,
198,
13564,
7,
34675,
8,
198
] | 2.863636 | 22 |
import pymongo
import gridfs
from .datastore import DataStore
| [
11748,
279,
4948,
25162,
198,
11748,
10706,
9501,
198,
198,
6738,
764,
19608,
459,
382,
1330,
6060,
22658,
628
] | 3.368421 | 19 |
# This sample tests the type checker's handling of the overload decorator.
from typing import Literal, overload, Optional
from datetime import datetime, timezone, timedelta
@overload
@overload
result1: datetime = from_json_timestamp(2418049)
# This should generate an error
result2: datetime = from_json_timestamp(None)
result3: None = from_json_timestamp(None)
# This should generate an error
result4: None = from_json_timestamp(2345)
@overload
@overload
t_f1: Literal["float"] = reveal_type(func1(abs(0.0)))
| [
2,
770,
6291,
5254,
262,
2099,
2198,
263,
338,
9041,
286,
262,
31754,
11705,
1352,
13,
198,
198,
6738,
19720,
1330,
25659,
1691,
11,
31754,
11,
32233,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
11,
28805,
12514,
628,
198... | 3.069767 | 172 |
'''
Author : MiKueen
Level : Easy
Problem Statement : Three Consecutive Odds
Given an integer array arr, return true if there are three consecutive odd numbers in the array. Otherwise, return false.
Example 1:
Input: arr = [2,6,4,1]
Output: false
Explanation: There are no three consecutive odds.
Example 2:
Input: arr = [1,2,34,3,4,5,7,23,12]
Output: true
Explanation: [5,7,23] are three consecutive odds.
Constraints:
1 <= arr.length <= 1000
1 <= arr[i] <= 1000
'''
| [
7061,
6,
198,
13838,
1058,
13756,
42,
518,
268,
198,
4971,
1058,
16789,
198,
40781,
21983,
1058,
7683,
1482,
4552,
425,
20664,
82,
198,
198,
15056,
281,
18253,
7177,
5240,
11,
1441,
2081,
611,
612,
389,
1115,
12785,
5629,
3146,
287,
2... | 2.840237 | 169 |
import torch as t
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as f
import torch.optim as opt
import torch.utils.data as dutils
from research_seed.bytorch.binary_neural_network import (
MomentumWithThresholdBinaryOptimizer,
BinaryLinear,
)
from matplotlib import pyplot as plt
# t.manual_seed(424121)
group_a_generator = dist.Normal(0.8, 0.001)
group_b_generator = dist.Normal(0, 0.001)
group_c_generator = dist.Normal(-0.8, 0.001)
if __name__ == "__main__":
main()
| [
11748,
28034,
355,
256,
198,
11748,
28034,
13,
17080,
2455,
507,
355,
1233,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
277,
198,
11748,
28034,
13,
40085,
355,
2172,
198,
11748,
28034,
13,
... | 2.685279 | 197 |
import json
class StringConstant:
"""Some values in GraphQL are constants, not strings, and so they shouldn't
be encoded or have quotes put around them. Use this to represent a constant
and it won't be quoted in the query"""
class ListConstant:
"""Some values in GraphQL are constants, not strings, and so they shouldn't
be encoded or have quotes put around them. Use this to represent a list of
constants and it won't be quoted in the query"""
def BoolConstant(in_bool: bool):
""" Converts a boolean value to a constant string value."""
if in_bool:
return StringConstant('true')
else:
return StringConstant('false')
def make_parameters(**kwargs):
"""Convert mutation query parameters from dictionary to string format.
"""
encoder = json.JSONEncoder()
parts = []
for k, v in kwargs.items():
if isinstance(v, StringConstant):
value = v.value
elif isinstance(v, ListConstant):
value = v.values
else:
value = encoder.encode(v)
parts.append("{}: {}".format(k, value))
return "\n ".join(parts)
SUBSCRIPTION = '''subscription {{
{subscription}
}}'''
| [
11748,
33918,
628,
198,
4871,
10903,
3103,
18797,
25,
198,
220,
220,
220,
37227,
4366,
3815,
287,
29681,
9711,
389,
38491,
11,
407,
13042,
11,
290,
523,
484,
6584,
470,
198,
220,
220,
220,
307,
30240,
393,
423,
13386,
1234,
1088,
606,... | 2.760274 | 438 |
# coding: utf-8
# OpenCV ライン検出クラス
import cv2
import numpy as np
import time
import os
import sys
import math
from .functions import *
import platform
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
4946,
33538,
220,
9263,
11482,
6527,
162,
97,
250,
49035,
118,
14099,
9263,
8943,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
2... | 2.631579 | 57 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappeclient import FrappeClient
import json
import os
import requests
import subprocess
from frappe.utils.background_jobs import enqueue
from frappe.utils import get_site_name
from frappe.utils import flt, nowdate, add_days, cint
@frappe.whitelist()
@frappe.whitelist()
@frappe.whitelist()
| [
201,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
201,
198,
11748,
5306,
27768,
201,
198,
6738,
5306,
27768,
13,
19849,
13,
22897,
1330,
16854,
201,
198,
6738,
5306,
27768,
16366,
1330,
39313,
27768,
11792,
201,
198,
... | 2.689441 | 161 |
# Generated by Django 3.0.5 on 2020-09-22 20:36
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
20,
319,
12131,
12,
2931,
12,
1828,
1160,
25,
2623,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import math
import cv2
import numpy as np
import imutils
| [
11748,
10688,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
545,
26791,
628,
628,
628
] | 3.1 | 20 |
import json
from os.path import dirname, abspath, join
from mhdata.io.csv import read_csv
class MonsterMetadata:
"""
Attempt to load the various types of mappings that monsters have
Monsters have an internal numerical id used in some schemas, and varying string ids
used in other schemas. Note that string key sare inconsistent, so some magic is usually involved.
Therefore we load:
- Map keyed by name_en that gives the string keys for names and for hunter notes (can differ)
- Map keyed by internal id that connects to name_en (used for hitzones/statuses/etc)
""" | [
11748,
33918,
198,
198,
6738,
28686,
13,
6978,
1330,
26672,
3672,
11,
2352,
6978,
11,
4654,
198,
198,
6738,
285,
71,
7890,
13,
952,
13,
40664,
1330,
1100,
62,
40664,
628,
198,
4871,
12635,
9171,
14706,
25,
198,
220,
220,
220,
37227,
... | 3.418079 | 177 |
"""
Module for gocdapi Pipeline class
"""
import xml.etree.ElementTree as ET
from gocdapi.gobase import GoBase
from gocdapi.stage import Stage
from gocdapi.utils.config_xml import ConfigXML
class Pipeline(GoBase):
"""
Class to hold Go Server Pipeline information
"""
def __init__(self, go_server, data):
"""Inits Pipeline objects.
Args:
go_server (Go): A Go object which this agent belongs to.
data (str): A json string representing the pipeline configuration
"""
self.stages = []
super(self.__class__, self).__init__(go_server, data=data)
def __str__(self):
"""Returns a pretty representation of the object
Returns:
str: representation of the object
"""
return 'Pipeline @ %s' % self.go_server.baseurl
def schedule(self):
"""Triggers a new instance of the pipeline with the latest revision of all materials
Will do a POST request to go/api/pipelines/PIPELINE_NAME/schedule
"""
url = self.build_url('schedule')
self.do_post(url)
def release_lock(self):
"""Releases a lock on the pipeline
Will do a POST request to go/api/pipelines/PIPELINE_NAME/releaseLock
"""
url = self.build_url('releaseLock')
self.do_post(url)
def pause(self, pause_cause):
"""Pauses the pipeline with the given reason.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/pause
Args:
pause_cause (str): reason to pause the pipeline
"""
url = self.build_url('pause')
self.do_post(url, data={'pauseCause': pause_cause}, headers={'Confirm': True})
def unpause(self):
"""Unpauses the pipeline.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/unpause
"""
url = self.build_url('unpause')
self.do_post(url, headers={'Confirm': True})
def status(self):
"""Gets information about status of pipeline.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/status
Return:
dict: dict based in a JSON containing status information about paused, locked & schedulable.
"""
url = self.build_url('status')
return self.get_json_data(url)
def is_paused(self):
"""Check if pipeline is paused
Uses status method to get updated data.
Returns:
bool: True if paused
"""
return self.status()["paused"]
def is_locked(self):
"""Check if pipeline is locked
Uses status method to get updated data.
Returns:
bool: True if locked
"""
return self.status()["locked"]
def is_schedulable(self):
"""Check if pipeline is schedulable
Uses status method to get updated data.
Returns:
bool: True if schedulable
"""
return self.status()["schedulable"]
def history(self, offset=0):
"""List Pipeline history.
Will do a POST request to go/api/pipelines/PIPELINE_NAME/history/OFFSET
Args:
offset (int): how many instances to skip
Returns:
str: JSON representing pipeline history
"""
url = self.build_url('history/%s' % offset)
return self.get_json_data(url)
def get_config_xml(self, to_string=False):
"""Get Configuration XML.
Will do a GET request to go/api/admin/config/current.xml to retrieve the current pipeline
configuration.
Args:
to_string (bool): Stringify the config XML before returning it
Returns:
str: XML string data
"""
_, config_xml_data = self.go_server.admin.poll_configuration()
config_xml = ConfigXML(config_xml_data)
pipeline_xml = config_xml.get_pipeline(self.name)
return ET.tostring(pipeline_xml) if to_string else pipeline_xml
def _poll(self):
"""Will create and define the attributes of the pipeline.
Uses _data attribute populated by inherited methods, updating object attributes using the bunch pattern.
Save stages of pipeline found in the configuration in a container.
Also sets the pipeline url.
"""
self.__dict__.update(self._data)
self.set_self_url('go/api/pipelines/%s/' % self.name)
self.stages = []
for item in self._data['stages']:
stage = Stage(self.go_server, self, item)
self.stages.append(stage)
| [
37811,
198,
26796,
329,
308,
420,
67,
15042,
37709,
1398,
198,
37811,
198,
198,
11748,
35555,
13,
316,
631,
13,
20180,
27660,
355,
12152,
198,
198,
6738,
308,
420,
67,
15042,
13,
44270,
589,
1330,
1514,
14881,
198,
6738,
308,
420,
67,... | 2.417681 | 1,889 |
print ("Pythagorean Triplets with smaller side upto 10 -->")
# form : (m^2 - n^2, 2*m*n, m^2 + n^2)
# generate all (m, n) pairs such that m^2 - n^2 <= 10
# if we take (m > n), for m >= 6, m^2 - n^2 will always be greater than 10
# so m ranges from 1 to 5 and n ranges from 1 to m-1
pythTriplets = [(m*m - n*n, 2*m*n, m*m + n*n) for (m,n) in [(x, y) for x in range (1, 6) for y in range (1, x)] if m*m - n*n <= 10]
print (pythTriplets) | [
4798,
5855,
47,
5272,
363,
29456,
19817,
912,
351,
4833,
1735,
18529,
78,
838,
14610,
4943,
198,
2,
1296,
1058,
357,
76,
61,
17,
532,
299,
61,
17,
11,
362,
9,
76,
9,
77,
11,
285,
61,
17,
1343,
299,
61,
17,
8,
198,
2,
7716,
4... | 2.308511 | 188 |
#!/usr/bin/env python3
#github.com/intrackeable/dotfiles
#Set a random wallpaper and change gaps level
import random
import os
import subprocess | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
12567,
13,
785,
14,
600,
11510,
365,
540,
14,
26518,
16624,
198,
2,
7248,
257,
4738,
39328,
290,
1487,
17332,
1241,
198,
198,
11748,
4738,
198,
11748,
28686,
198,
11748,
850,
... | 3.372093 | 43 |
from ._step import Timestep
| [
6738,
47540,
9662,
1330,
5045,
395,
538,
198
] | 3.5 | 8 |
import numpy as np
import random, time
import tensorflow as tf
from automata_learning_utils import al_utils
from worlds.game import *
from automata_learning_with_policybank.policy_bank_dqn import PolicyBankDQN
from common.schedules import LinearSchedule
from common.replay_buffer import create_experience_replay_buffer
from automata_learning_with_policybank.Traces import Traces
from tester_policybank.tester import TesterPolicyBank as Tester
import qrm
import shutil
import os
import subprocess
import csv
#import pdb
######## compare rm_learned & rm_true to find conflicting experiences
######## check is_rm_learned in run_aqrm_task and break all operations if rewards don't match
######## make new tester tester_current with past and new experience
######## pass tester_current.get_reward_machines as arguments to decompose_reward_machines
######## make new tester
######## make copies of pertinent reward machines to some file path
######## write new experiment file sifting through each of the reward machines
def run_aqrm_task(sess, epsilon, environment_rm_file, learned_rm_file, policy_bank, tester_true, tester_learned, curriculum, replay_buffer, beta_schedule, show_print, is_rm_learned, currentstep, previous_testing_reward, q):
"""
This code runs one training episode.
- rm_file: It is the path towards the RM machine to solve on this episode
- environment_rm: an environment reward machine, the "true" one, underlying the execution
"""
# Initializing parameters and the game
learning_params = tester_learned.learning_params
testing_params = tester_learned.testing_params
"""
here, tester holds all the machines. we would like to dynamically update the machines every so often.
an option might be to read it every time a new machine is learnt
"""
reward_machines = [tester_learned.get_hypothesis_machine()]
task_params = tester_learned.get_task_params(learned_rm_file) # rm_files redundant here unless in water world (in which case it provides the map files based on the task)
rm_true = tester_true.get_reward_machines()[0] # add one more input n to track tasks at hand, replace 0 with n
rm_learned = tester_learned.get_hypothesis_machine()
task = Game(task_params)
actions = task.get_actions()
ok = 1
num_features = len(task.get_features())
num_steps = learning_params.max_timesteps_per_task
training_reward = 0
is_conflicting=1 #by default add traces
testing_reward = None #initialize
# Getting the initial state of the environment and the reward machine
s1, s1_features = task.get_state_and_features()
u1 = rm_learned.get_initial_state()
u1_true = rm_true.get_initial_state()
has_been = [0,0]
alpha = 0.8
gamma = 0.99
w = 0
# Starting interaction with the environment
if show_print: print("Executing", num_steps)
all_events = []
sy_s = [[]]
a_s = []
a=0
for t in range(num_steps):
currentstep += 1
s = np.where(s1_features==1)[0][0]
# sy = s%11+1
# sx = (s-sy+1)/11+1
# sy_s.append([sx,sy])
# a_s.append(a)
# Choosing an action to perform
if random.random() < 0.15:
a = random.choice(actions)
else:
#IG: current problem: there is no machine so a default behavior is to stop the exploration. We would, however, like to explore (randomly if necessary).
# how to accomplish that?
#if using suggestions in comments on line 33, replace 0 with n
if ok:
a = policy_bank.get_best_action(0, u1, s1_features.reshape((1,num_features)))
else:
pr = np.zeros([4,1])
pr_sum = 0
pr_select = np.zeros([5,1])
for a in actions:
pr_sum += np.exp(q[s][u1_true][a])
for a in actions:
pr[a] = np.exp(q[s][u1_true][a])/pr_sum
pr_select[0] = 0
pr_select[1] = pr[0]
pr_select[2] = pr[0]+pr[1]
pr_select[3] = pr[0]+pr[1]+pr[2]
pr_select[4] = 1
randn = random.random()
a_selected = -1
for a in actions:
if randn >= pr_select[a] and randn <= pr_select[a+1]:
a_selected = a
break
a = a_selected
# updating the curriculum
curriculum.add_step()
# Executing the action
if tester_learned.game_type=="trafficworld":
events = task.get_true_propositions_action(a)
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
else:
task.execute_action(a)
a = task.get_last_action() # due to MDP slip
events = task.get_true_propositions()
s2, s2_features = task.get_state_and_features()
s_new = np.where(s2_features==1)[0][0]
u2 = rm_learned.get_next_state(u1, events)
u2_true = rm_true.get_next_state(u1_true,events)
reward = rm_true.get_reward(u1_true,u2_true,s1,a,s2)
# q[s][u1_true][a] = (1 - alpha) * q[s][u1_true][a] + alpha * (reward + gamma * np.amax(q[s_new][u2_true]))
sy = s%9
sx = (s-sy)/9
synew = s_new % 9
sxnew = (s_new - synew) / 9
a1=a
if (events == "f"):
events
all_events.append(events)
if reward>0:
reward
training_reward += reward
# Getting rewards and next states for each reward machine
rewards, next_states = [],[]
rewards_hyp, next_states_hyp = [],[]
j_rewards, j_next_states = rm_true.get_rewards_and_next_states(s1, a, s2, events)
rewards.append(j_rewards)
next_states.append(j_next_states)
j_rewards_hyp, j_next_states_hyp = rm_learned.get_rewards_and_next_states(s1, a, s2, events)
rewards_hyp.append(j_rewards_hyp)
next_states_hyp.append(j_next_states_hyp)
# Mapping rewards and next states to specific policies in the policy bank
rewards_hyp = policy_bank.select_rewards(rewards_hyp)
next_policies = policy_bank.select_next_policies(next_states_hyp)
# Adding this experience to the experience replay buffer
replay_buffer.add(s1_features, a, s2_features, rewards_hyp, next_policies)
# Learning
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.train_freq == 0:
if learning_params.prioritized_replay:
experience = replay_buffer.sample(learning_params.batch_size, beta=beta_schedule.value(curriculum.get_current_step()))
S1, A, S2, Rs, NPs, weights, batch_idxes = experience
else:
S1, A, S2, Rs, NPs = replay_buffer.sample(learning_params.batch_size)
weights, batch_idxes = None, None
abs_td_errors = policy_bank.learn(S1, A, S2, Rs, NPs, weights, has_been) # returns the absolute td_error
if learning_params.prioritized_replay:
new_priorities = abs_td_errors + learning_params.prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
# Updating the target network
if curriculum.get_current_step() > learning_params.learning_starts and curriculum.get_current_step() % learning_params.target_network_update_freq == 0:
policy_bank.update_target_network()
# Printing
if show_print and (t+1) % learning_params.print_freq == 0:
print("Step:", t+1, "\tTotal reward:", training_reward)
if testing_params.test and curriculum.get_current_step() % testing_params.test_freq==0:
testing_reward = tester_learned.run_test(curriculum.get_current_step(), sess, run_aqrm_test, rm_learned, rm_true, is_rm_learned, q, policy_bank, num_features)
if is_rm_learned==0:
if task.is_env_game_over() or rm_true.is_terminal_state(u2_true):
# Restarting the game
task = Game(task_params)
if curriculum.stop_task(t):
break
s2, s2_features = task.get_state_and_features()
u2_true = rm_true.get_initial_state()
else:
if task.is_env_game_over() or rm_learned.is_terminal_state(u2) or rm_true.is_terminal_state(u2_true):
# Restarting the game
task = Game(task_params)
if curriculum.stop_task(t):
break
s2, s2_features = task.get_state_and_features()
u2_true = rm_true.get_initial_state()
u2 = rm_learned.get_initial_state()
# checking the steps time-out
if curriculum.stop_learning():
break
# Moving to the next state
s1, s1_features, u1 = s2, s2_features, u2
u1_true = u2_true
if rm_true.is_terminal_state(u2_true):
checker = rm_learned.is_terminal_state(u2)
if (is_rm_learned) and (not rm_learned.is_terminal_state(u2)) and (not rm_true.is_terminal_state(u2_true)):
is_conflicting = 0
elif is_rm_learned and (rm_learned.is_terminal_state(u2) and rm_true.is_terminal_state(u2_true)):
is_conflicting = 0
else:
is_conflicting = 1
step_count=t
if testing_reward is None:
is_test_result = 0
testing_reward = previous_testing_reward
else:
is_test_result = 1
if show_print: print("Done! Total reward:", training_reward)
return all_events, training_reward, step_count, is_conflicting, testing_reward, is_test_result, q
| [
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
11,
640,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
6738,
3557,
1045,
62,
40684,
62,
26791,
1330,
435,
62,
26791,
198,
6738,
11621,
13,
6057,
1330,
1635,
198,
6738,
3557,
1045,
62,
... | 2.241829 | 4,375 |
import numpy as np
data_index = {
"z" : 0,
"vz" : 1
}
| [
11748,
299,
32152,
355,
45941,
198,
198,
7890,
62,
9630,
796,
1391,
198,
220,
220,
220,
366,
89,
1,
1058,
657,
11,
198,
220,
220,
220,
366,
85,
89,
1,
1058,
352,
198,
92,
198
] | 1.8 | 35 |
from enum import Enum
class DataType(Enum):
"""
All variants of possible data types in a chart.
"""
Integer = 0
IntegerSum = 1
Float = 2
FloatSum = 3
String = 4
DateTime = 5
Date = 6
Time = 7
Daytime = 8
Count = 9
Weekday = 10
Day = 11
Month = 12
@staticmethod
@staticmethod
@staticmethod
@staticmethod
@staticmethod
def string_to_data_type(data_type_string):
"""
Converts a given string in a object of this enum.
:param data_type_string: The string that represents an enum value.
:type data_type_string: str
:return: The data type corresponding to the string.
:rtype: DataType
"""
data_type_string = data_type_string.lower()
if "integer" == data_type_string:
return DataType.Integer
elif "float" == data_type_string:
return DataType.Float
elif "varchar" in data_type_string:
return DataType.String
elif "datetime" == data_type_string:
return DataType.DateTime
elif "time" == data_type_string:
return DataType.Time
elif "date" == data_type_string:
return DataType.Date
elif "weekday" == data_type_string:
return DataType.Weekday
elif "month" == data_type_string:
return DataType.Month
elif "daytime" == data_type_string:
return DataType.Daytime
elif "string" == data_type_string:
return DataType.String
elif "day" == data_type_string:
return DataType.Day
@staticmethod
def int_to_weekday(weekday):
"""
Converts a given int to a weekday.
:param weekday: The int of the weekday. 0 is Monday.
:type weekday: int
:return: The string for the weekday.
:rtype: str
"""
if not 0 <= weekday <= 6:
return ""
return ["Mon.", "Tue.", "Wed.", "Thu.", "Fri.", "Sat.", "Sun."][weekday]
@staticmethod
def int_to_month(month):
"""
Converts a given int to a month.
:param month: The int of the weekday. 1 is January.
:type month: int
:return: The string for the month.
:rtype: str
"""
if not 1 <= month <= 12:
return ""
return ["Jan.", "Feb.", "Mar.", "Apr.", "May", "Jun.", "Jul.", "Aug.", "Sept.", "Oct.", "Nov.", "Dec."][month - 1]
@staticmethod
def int_to_daytime(daytime):
"""
Converts a given int to a daytime.
:param daytime: The int of the weekday. 0 is Morning.
:type daytime: int
:return: The string for the daytime.
:rtype: str
"""
if not 0 <= daytime <= 3:
return ""
return ["Night", "Morning", "Afternoon", "Evening"][daytime]
class DataSelectionState(Enum):
"""
Which state has a data object.
"""
Nothing = 0,
Selected = 1,
Highlighted = 2
| [
6738,
33829,
1330,
2039,
388,
628,
198,
4871,
6060,
6030,
7,
4834,
388,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1439,
17670,
286,
1744,
1366,
3858,
287,
257,
8262,
13,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
3414... | 2.225901 | 1,359 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-15 15:46
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
319,
2864,
12,
2713,
12,
1314,
1315,
25,
3510,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738... | 2.933333 | 75 |
#!/usr/bin/env python3
"""
Command line interface for the testing Splunk data ingestion.
"""
import json
import os
import sys
from datetime import datetime
from time import sleep
import click
from search import poll_splunk
@click.group()
@cli.command()
@click.option("-S", "--sleeptime", type=int, default=1)
@click.option("-t", "--timeout", type=int, default=600)
@click.option("-s", "--search", type=str)
@click.option("-u", "--username", type=str, default="")
@click.option("-p", "--password", type=str, default="")
@click.option("-h", "--hostname", type=str, default="")
@click.option("-b", "--port", type=str, default="8089")
@click.option("-o", "--outputlogs", type=bool, default=False)
@click.option("-m", "--match", type=str, default="")
@click.option("-d", "--debug", type=bool, default=False)
if __name__ == "__main__":
cli()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
37811,
198,
21575,
1627,
7071,
329,
262,
4856,
13341,
2954,
1366,
38382,
13,
198,
37811,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
48... | 2.80198 | 303 |
"""Get commands from Redis, treat them, and send them to the device."""
import json
import time
from typing import Any
from redis import Redis
from redis.client import PubSub
from bltouch import sensor
class CommandGetter:
"""Gets commands over Redis."""
redis_instance: Redis
pubsub: PubSub
channel: str
def get_command(self) -> Any:
"""Get command from Redis."""
message = self.pubsub.get_message(ignore_subscribe_messages=True)
if message:
# do something with the message
print(message)
command = message["data"].decode("utf-8")
return command
def execute_command(self, command: Any):
"""Send command to device."""
command_dict: dict = json.loads(command)
if command_dict.get("function") == "activate-bltouch":
self.blt.send_command()
def loop(self):
"""Get and produce data indefinitely."""
while True:
command = self.get_command()
if command:
self.execute_command(command)
time.sleep(0.001) # be nice to the system :)
| [
37811,
3855,
9729,
422,
2297,
271,
11,
2190,
606,
11,
290,
3758,
606,
284,
262,
3335,
526,
15931,
198,
198,
11748,
33918,
198,
11748,
640,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
2266,
271,
1330,
2297,
271,
198,
6738,
2266,
271,... | 2.451613 | 465 |
import math
import os
import socket
import sys
import time
from humanfriendly.terminal import ansi_wrap
from requests import get
from MHDDoS.methods.tools import Tools
from utils.network import NetworkUtils, IPGeolocationData
| [
11748,
10688,
198,
11748,
28686,
198,
11748,
17802,
198,
11748,
25064,
198,
11748,
640,
198,
198,
6738,
1692,
13120,
13,
23705,
282,
1330,
9093,
72,
62,
37150,
198,
6738,
7007,
1330,
651,
198,
198,
6738,
337,
10227,
46498,
13,
24396,
82... | 3.625 | 64 |
# -*- coding: utf-8 -*-
# -*- encoding:utf-8 -*-
"""
credit from
@arthor: zhongxinwang
@date: 2016-11-11
"""
import binascii
import struct
import sys
import os
import pdb
if __name__ == '__main__':
# 将要转换的词库添加在这里就可以了
path = []
full_list = ['167', '1', '76', '96', '127', '436', '154', '389', '367', '31']
sys_in = sys.argv[1]
if sys_in == 'all':
path = full_list
elif sys_in not in full_list:
print 'not a valid file path'
print full_list
sys.exit(0)
else:
path = [sys_in]
scel2txt = Scel2Txt()
cur_path = os.getcwd()
for sub_path in path:
category_path = os.path.join(cur_path, sub_path)
dir_list = os.listdir(category_path)
for _file in dir_list:
tmp_path = os.path.join(category_path, _file)
scel2txt.deal(_file, tmp_path)
# 保存结果
result = map(lambda x: unicode(x).encode("utf8"), scel2txt.GTable)
with open(tmp_path.replace(".scel", ".txt"), "w") as fout:
fout.write("\n".join(result)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
532,
9,
12,
21004,
25,
40477,
12,
23,
532,
9,
12,
198,
37811,
198,
43082,
422,
198,
31,
11999,
273,
25,
1976,
71,
506,
87,
259,
47562,
198,
31,
4475,
25,
1584,
... | 1.880911 | 571 |
from functools import wraps
from django.conf import settings
from django.shortcuts import redirect
| [
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
18941,
628
] | 4.166667 | 24 |
import smbus
I2C_ADDRESS = 0x68
bus = smbus.SMBus(0)
# Set all ports in input mode
bus.write_byte(I2C_ADDRESS, 0xFF)
# Read all the input lines
high = bus.read_byte(0x1b)
low = bus.read_byte(0x1c)
value = (high << 8) + low
print value
| [
11748,
895,
10885,
198,
198,
40,
17,
34,
62,
2885,
7707,
7597,
796,
657,
87,
3104,
198,
198,
10885,
796,
895,
10885,
13,
50,
10744,
385,
7,
15,
8,
198,
198,
2,
5345,
477,
14090,
287,
5128,
4235,
198,
10885,
13,
13564,
62,
26327,
... | 2.209091 | 110 |
import time
from SunFounder_TB6612 import TB6612
import RPi.GPIO as GPIO
import time
from SunFounder_PCA9685 import Servo
print "********************************************"
print "* *"
print "* SunFounder TB6612 *"
print "* *"
print "* Connect MA to BCM17 *"
print "* Connect MB to BCM18 *"
print "* Connect PWMA to BCM27 *"
print "* Connect PWMB to BCM22 *"
print "* *"
print "********************************************"
a = Servo.Servo(4)
b = Servo.Servo(5)
Servo.Servo(4).setup()
Servo.Servo(5).setup()
#GPIO.setmode(GPIO.BCM)
#GPIO.setup((27, 22), GPIO.OUT)
#a = GPIO.PWM(27, 60)
#b = GPIO.PWM(22, 60)
#a.start(0)
#b.start(0))
motorB = TB6612.Motor(17)
motorA = TB6612.Motor(18)
motorA.debug = True
motorB.debug = True
motorA.pwm = a_speed
motorB.pwm = b_speed
delay = 0.05
motorA.forward()
motorA.seped = 100
motorB.forward()
motorB.speed = 100
| [
11748,
640,
198,
6738,
3825,
21077,
263,
62,
22737,
2791,
1065,
1330,
23799,
2791,
1065,
198,
11748,
25812,
72,
13,
16960,
9399,
355,
50143,
198,
11748,
640,
198,
6738,
3825,
21077,
263,
62,
5662,
32,
24,
35978,
1330,
3116,
78,
198,
4... | 1.875217 | 577 |
from typing import Dict, List
from cloudrail.knowledge.context.aws.iam.iam_group import IamGroup
from cloudrail.knowledge.context.aws.iam.iam_identity import IamIdentity
from cloudrail.knowledge.context.aws.iam.iam_user import IamUser
from cloudrail.knowledge.context.aws.iam.iam_users_login_profile import IamUsersLoginProfile
from cloudrail.knowledge.context.aws.iam.role import Role
from cloudrail.knowledge.context.aws.aws_resource import AwsResource
from cloudrail.knowledge.context.aws.aws_environment_context import AwsEnvironmentContext
from cloudrail.knowledge.rules.aws.aws_base_rule import AwsBaseRule
from cloudrail.knowledge.rules.base_rule import Issue
from cloudrail.knowledge.rules.rule_parameters.base_paramerter import ParameterType
| [
6738,
19720,
1330,
360,
713,
11,
7343,
198,
198,
6738,
6279,
30224,
13,
45066,
13,
22866,
13,
8356,
13,
1789,
13,
1789,
62,
8094,
1330,
314,
321,
13247,
198,
6738,
6279,
30224,
13,
45066,
13,
22866,
13,
8356,
13,
1789,
13,
1789,
62,... | 3.585714 | 210 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import dpkt
import socket
import binascii
def macaddr_aton(mac_addr):
"""translate mac addr into network bits"""
return binascii.unhexlify(mac_addr.replace(':', ''))
def build_arp_packet(src_macaddr, dst_macaddr, src_ip, dst_ip):
""" forge arp packets used to poison and reset target connection """
packet = dpkt.ethernet.Ethernet()
arp = dpkt.arp.ARP()
if not src_ip:
raise Exception("src ip not found")
if not dst_ip:
raise Exception("dst ip not found")
arp.sha = macaddr_aton(src_macaddr) # source mac address
arp.tha = macaddr_aton(dst_macaddr) # destination mac address
arp.spa = socket.inet_aton(dst_ip) # source ip address
arp.tpa = socket.inet_aton(src_ip) # destination ip address
arp.op = dpkt.arp.ARP_OP_REQUEST # ARP Request
packet.src = macaddr_aton(src_macaddr)
packet.dst = macaddr_aton('ff:ff:ff:ff:ff:ff') # broadcast address
packet.type = dpkt.ethernet.ETH_TYPE_ARP
packet.data = arp
return packet
def send_arp_packet(device, src_macaddr, dst_macaddr, src_ip, dst_ip):
"""send arp request.
"""
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.SOCK_RAW)
s.bind((device, socket.SOCK_RAW))
packet = build_arp_packet(src_macaddr, dst_macaddr, src_ip, dst_ip)
s.send(str(packet))
s.close()
if __name__ == '__main__':
device = 'eth0'
src_macaddr = "00:50:56:35:5b:aa"
dst_macaddr = "00:00:00:00:00:00"
src_ip = "192.168.53.156"
dst_ip = "192.168.53.1"
send_arp_packet(device, src_macaddr, dst_macaddr, src_ip, dst_ip) | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
288,
79,
21841,
198,
11748,
17802,
198,
11748,
9874,
292,
979,
72,
628,
198,
4299,
8352,
29851,
62,
13951,
7,
20... | 2.220627 | 766 |
import os, sys
import torch
import torchvision
seed=23333
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
import random
random.seed(seed)
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
from PIL import Image
import inversefed
import torchvision.transforms as transforms
import argparse
from autoaugment import SubPolicy
from inversefed.data.data_processing import _build_cifar100, _get_meanstd
import torch.nn.functional as F
from benchmark.comm import create_model, build_transform, preprocess, create_config
from torch.utils.data import SubsetRandomSampler
parser = argparse.ArgumentParser(description='Reconstruct some image from a trained model.')
parser.add_argument('--data', default=None, required=True, type=str, help='Vision dataset.')
opt = parser.parse_args()
# init env
setup = inversefed.utils.system_startup()
defs = inversefed.training_strategy('conservative');
if __name__ == '__main__':
main() | [
11748,
28686,
11,
25064,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
28826,
28,
1954,
20370,
198,
13165,
354,
13,
805,
723,
62,
28826,
7,
28826,
8,
198,
13165,
354,
13,
66,
15339,
13,
805,
723,
62,
28826,
7,
28826,
8,
198,
117... | 3.392982 | 285 |
from numpy import*
from astropy.io import ascii
from astropy.table import Table , Column
import pystan
import pickle
import config
import sys , os , string
import generate_STAN
import get_data
# go in /data/Cepheids/runs
# creates a pickle file and a table
cfg = config.config(sys.argv[1])
if len(sys.argv) > 2:
codefile = sys.argv[2]
else:
codefile = None
# PYSTAN MODEL
model = generate_STAN.generate_STAN(cfg, outfile='model.stan', codefile=codefile)
# Data
dat,extras = get_data.get_data(cfg)
# Initial guess for parameters
samplefile = cfg.sampler.sample0
if samplefile is not None:
import STANstats
c = STANstats.STANchains(samplefile)
d0 = generate_STAN.generate_init_dict(cfg, dat, extras['cephlist'])
d = []
for i in range(cfg.sampler.chains):
d.append({})
for key in d0:
d[-1][key] = random.normal(c.median(key), c.std(key))
else:
d = [generate_STAN.generate_init_dict(cfg,dat, extras['cephlist']) \
for i in range(cfg.sampler.chains)]
if __name__ == "__main__":
#___________________________________________________________________________
# FIT
fit2 = model.sampling(data=dat, iter=cfg.sampler.iter,
warmup=cfg.sampler.burn, chains=cfg.sampler.chains,
init=d)
fitres = str(fit2)
f = open('sampler.out','w')
f.write(fitres)
f.close()
#________________________________________
#Make pickle file
filename = getattr(cfg.sampler, 'output', 'traces.pickle')
samples = fit2.extract(permuted=cfg.sampler.permuted)
# Now we can add extra data to dat before saving to pickle file.
for key in extras:
dat[key] = extras[key]
if not cfg.sampler.permuted:
d = dict(data=dat, samples=samples, flatnames=fit2.flatnames)
else:
d = samples
d['data'] = dat
fout = open(filename, 'w')
pickle.dump(d, fout)
fout.close()
| [
6738,
299,
32152,
1330,
9,
198,
6738,
6468,
28338,
13,
952,
1330,
355,
979,
72,
198,
6738,
6468,
28338,
13,
11487,
1330,
8655,
837,
29201,
198,
11748,
12972,
14192,
198,
11748,
2298,
293,
198,
11748,
4566,
198,
11748,
25064,
837,
28686,... | 2.488127 | 758 |
#!/usr/bin/env python
import treetime
import numpy as np
import os,sys
import datetime
import subprocess
import re
import utility_functions_flu as flu_utils
import utility_functions_general as gen_utils
from utility_functions_beast import run_beast, read_beast_log
aln_name = "./resources/flu_H3N2/H3N2_HA_2011_2013.fasta"
tree_name = "./resources/flu_H3N2/H3N2_HA_2011_2013.nwk"
RUN_TREETIME = True
RUN_LSD = True
RUN_BEAST = True
if __name__ == "__main__":
N_leaves = int(sys.argv[1])
out_dir = sys.argv[2]
subtree_fname_suffix = sys.argv[3]
treetime_res_file = sys.argv[4]
lsd_res_file = sys.argv[5]
beast_res_file = sys.argv[6]
if len(sys.argv) > 7:
lsd_params = sys.argv[7].split("|")
else:
lsd_params = ['-c', '-r', 'a', '-v']
# Sample subtree
subtree_filename, N_leaves = sample_subtree(out_dir, N_leaves, subtree_fname_suffix)
if RUN_TREETIME:
dates = flu_utils.dates_from_flu_tree(tree_name)
myTree = treetime.TreeTime(gtr='Jukes-Cantor',
tree=subtree_filename, aln=aln_name, dates=dates,
debug=False, verbose=4)
myTree.optimize_seq_and_branch_len(reuse_branch_len=True, prune_short=True, max_iter=5, infer_gtr=False)
start = datetime.datetime.now()
myTree.run(root='best', relaxed_clock=False, max_iter=3, resolve_polytomies=True, do_marginal=False)
end = datetime.datetime.now()
if not os.path.exists(treetime_res_file):
try:
with open(treetime_res_file, 'w') as of:
of.write("#Filename,N_leaves,Tmrca,Mu,R^2(initial clock),R^2(internal nodes),Runtime\n")
except:
pass
with open(treetime_res_file, 'a') as of:
of.write("{},{},{},{},{},{},{}\n".format(
subtree_filename,
str(N_leaves),
str(myTree.tree.root.numdate),
str(myTree.date2dist.clock_rate),
str(myTree.date2dist.r_val),
str(gen_utils.internal_regress(myTree)),
str((end-start).total_seconds()) ))
print ("TreeTime done!")
else:
print ("Skip TreeTime run")
if RUN_LSD:
lsd_outdir = os.path.join(out_dir, 'LSD_out')
# run LSD for the subtree:
if not os.path.exists(lsd_outdir):
try:
os.makedirs(lsd_outdir)
except:
pass
lsd_outfile = os.path.join(lsd_outdir, os.path.split(subtree_filename)[-1].replace(".nwk", ".txt"))
datesfile = os.path.join(lsd_outdir, os.path.split(subtree_filename)[-1].replace(".nwk", ".lsd_dates.txt"))
flu_utils.create_LSD_dates_file_from_flu_tree(subtree_filename, datesfile)
runtime = gen_utils.run_LSD(subtree_filename, datesfile, lsd_outfile, lsd_params)
# parse LSD results
tmrca, mu, objective = gen_utils.parse_lsd_output(lsd_outfile)
try:
if float(mu) > 0:
if not os.path.exists(lsd_res_file):
try:
with open(lsd_res_file, 'w') as of:
of.write("#Filename,N_leaves,Tmrca,Mu,Runtime,Objective\n")
except:
pass
with open(lsd_res_file, "a") as of:
of.write(",".join([subtree_filename, str(N_leaves), tmrca, mu, runtime, objective]))
of.write("\n")
except:
pass
print ("LSD Done!")
else:
print ("Skip LSD run")
if RUN_BEAST:
_run_beast(N_leaves, subtree_filename, out_dir, beast_res_file)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
2054,
8079,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
11,
17597,
198,
11748,
4818,
8079,
198,
11748,
850,
14681,
198,
11748,
302,
198,
198,
11748,
10361,
62,
12543,
2... | 1.917144 | 1,919 |
from six.moves import urllib
from java.io import Serializable
from java.util.concurrent import Callable
from clamp import clamp_base
BarBase = clamp_base("bar")
| [
6738,
2237,
13,
76,
5241,
1330,
2956,
297,
571,
198,
6738,
20129,
13,
952,
1330,
23283,
13821,
198,
6738,
20129,
13,
22602,
13,
1102,
14421,
1330,
4889,
540,
198,
198,
6738,
29405,
1330,
29405,
62,
8692,
628,
198,
10374,
14881,
796,
2... | 3.408163 | 49 |
'''The unit test for module `coqide.session.Session`.'''
from unittest import TestCase
from unittest.mock import patch, Mock
from coqide.session import Session
from coqide.types import Mark, Sentence
# pylint: disable=W0212,C0103,R0201
class TestSession(TestCase):
'''Test for class `coqide.session.Session`.'''
@staticmethod
def _worker_mock():
'''Return a mock for worker.
It calls the function immediately the function is submitted.'''
worker = Mock()
worker.submit.side_effect = _submit
return worker
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_constr(self, STM, CoqtopInstance):
'''Test the constructor.'''
view = Mock()
vim = Mock()
worker = Mock()
session = Session(view, vim, worker)
CoqtopInstance.assert_called_once_with()
CoqtopInstance.return_value.spawn.assert_called_once_with(
['coqtop', '-ideslave', '-main-channel', 'stdfds',
'-async-proofs', 'on'])
STM.assert_called_once_with(
CoqtopInstance.return_value, view, session._on_feedback)
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_forward_one(self, STM, _):
'''Test method `forward_one`.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
sentence = Sentence('Proof.\n', Mark(1, 1), Mark(2, 1))
stm.get_tip_stop.side_effect = [Mark(1, 1)]
vim.get_sentence_after.side_effect = [sentence]
session.forward_one()
stm.add.assert_called_once_with([sentence])
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_backward_one(self, STM, _):
'''Test method `backward_one`.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
session.backward_one()
stm.edit_at_prev.assert_called_once_with()
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_to_cursor_forward(self, STM, _):
'''Test method `to_cursor` on going forward.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
sentences = [
Sentence('', Mark(2, 3), Mark(3, 5)),
Sentence('', Mark(3, 5), Mark(4, 1)),
Sentence('', Mark(4, 1), Mark(4, 9)),
None
]
stm.get_tip_stop.side_effect = [Mark(2, 3)]
stm.get_end_stop.side_effect = [Mark(2, 3)]
vim.get_cursor.side_effect = [Mark(4, 9)]
vim.get_sentence_after.side_effect = sentences
session.to_cursor()
stm.add.assert_called_once_with(sentences[:-1])
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_to_cursor_backward(self, STM, _):
'''Test method `to_cursor` on going backward.'''
stm = STM.return_value
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
stm.get_tip_stop.side_effect = [Mark(4, 9)]
stm.get_end_stop.side_effect = [Mark(4, 9)]
vim.get_cursor.side_effect = [Mark(2, 3)]
session.to_cursor()
stm.edit_at.assert_called_once_with(Mark(2, 3))
@patch('coqide.session.CoqtopInstance')
@patch('coqide.session.STM')
def test_close(self, _, CoqtopInstance):
'''Test method `close`.'''
view = Mock()
vim = Mock()
worker = self._worker_mock()
session = Session(view, vim, worker)
session.close()
CoqtopInstance.return_value.close.assert_called_once_with()
| [
7061,
6,
464,
4326,
1332,
329,
8265,
4600,
1073,
80,
485,
13,
29891,
13,
36044,
63,
2637,
7061,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
555,
715,
395,
13,
76,
735,
1330,
8529,
11,
44123,
198,
198,
6738,
763,
8... | 2.171286 | 1,804 |
from .core_configs import *
panmodel_configs = {
'general.pandemic.name' : 'Pandemic',
#SIR
'params.sir.tpop.val' : 7_75_66_886,
'params.sir.tr.cr.es': {},
'params.sir.tr.pr.es': {},
'params.sir.tr.cr.etd': 10,
'params.sir.tr.pr.etd': 10,
'params.sir.tr.cr.val' : 0.6, # social distancing
'params.sir.tr.pr.val' : 0.2, # better handwashing
'params.sir.in.val' : 30,
'params.sir.rm.recov.es': {},
'params.sir.rm.recov.etd': 10,
'params.sir.rm.recov.val': 0.01,
'params.sir.rm.val' : 0
}
StartDateErr = '''The '_start_date' format should be only of %Y-%m-%d format.'''
ProjectionErr = '''The '_projections_till' parameter takes period only in the format of : \
1 Day/ 198 Days/ 1 Month/ 3 Months/ 1 Year/ 2 Years'''
ProjectionGranErr1 = '''The '_projection_granularity' parameter can only be \
one of : ['Days', 'Months', 'Years']'''
ProjectionGranErr2 = '''With '_start_date' as {0}, and '_projections_till' set to {1}, granularity \
of {2} is not possible. '_projection_granularity' can be only one of {3} '''
| [
6738,
764,
7295,
62,
11250,
82,
1330,
1635,
198,
198,
6839,
19849,
62,
11250,
82,
796,
1391,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
705,
24622,
13,
79,
392,
5314,
13,
... | 1.771502 | 779 |
import logging
import queue
from bluepy.btle import Peripheral
from pybluepedal.common.base import BaseDelegate, BaseService
from pybluepedal.common.byte_ops import check_bit_l2r
logger = logging.getLogger("HeartRateService")
| [
11748,
18931,
198,
11748,
16834,
198,
198,
6738,
4171,
9078,
13,
65,
7100,
1330,
2448,
10803,
282,
198,
6738,
12972,
17585,
9124,
282,
13,
11321,
13,
8692,
1330,
7308,
5005,
34637,
11,
7308,
16177,
198,
6738,
12972,
17585,
9124,
282,
13... | 3.239437 | 71 |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as language_utils
from unittest.mock import patch
| [
29113,
29113,
7804,
2235,
198,
2,
198,
2,
23241,
46787,
604,
532,
2947,
47701,
20003,
198,
2,
198,
2,
15069,
357,
34,
8,
2211,
532,
12131,
11,
383,
23241,
46787,
7712,
4816,
198,
2,
770,
3788,
318,
2716,
739,
262,
2947,
47701,
10483... | 4.342282 | 149 |
from __future__ import absolute_import, division, print_function, unicode_literals
import os, sys, datetime, errno, threading
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
if USING_PYTHON2:
from multiprocessing import cpu_count
from thread import get_ident
from StringIO import StringIO
from repr import Repr
str = unicode # noqa
from ..packages.backports.functools_lru_cache import lru_cache
from ..packages.backports.shutil_get_terminal_size import get_terminal_size
from ..packages.backports.tempfile import TemporaryDirectory
else:
from threading import get_ident
from io import StringIO
from reprlib import Repr
str = str
from functools import lru_cache
from shutil import get_terminal_size
from tempfile import TemporaryDirectory
from os import makedirs, cpu_count
from statistics import median
timestamp = datetime.datetime.timestamp
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
11,
7297,
11,
3601,
62,
8818,
11,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
28686,
11,
25064,
11,
4818,
8079,
11,
11454,
3919,
11,
4704,
278,
198,
198,
2937,
2751,
62,
47,
56,
4221... | 3.032573 | 307 |
#!/usr/bin/python
################################################################################
# 20fb7dcc-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
29113,
29113,
14468,
198,
2,
1160,
21855,
22,
67,
535,
12,
20,
535,
20,
12,
1157,
68,
19,
12,
1878,
2816,
12,
405,
18742,
67,
486,
5036,
2919,
198,
2,
198,
2,
10799,
360,
959,
3364,
198... | 4.060241 | 83 |
# -*- coding: utf-8 -*-
"""Tests for the `PwCalculationTools` class."""
import numpy as np
import pytest
from aiida import orm
from aiida.common.links import LinkType
def test_pw_get_scf_accuracy(fixture_localhost, generate_calc_job_node):
"""Test the `PwCalculationTools.get_scf_accuracy` method."""
entry_point_name = 'quantumespresso.pw'
# Missing `output_trajectory` node
node = generate_calc_job_node(entry_point_name, fixture_localhost)
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
# Missing `scf_accuracy` array
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
# Missing `scf_accuracy_index` array
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.set_array('scf_accuracy', np.array([1, 1, 1, 2, 2, 2, 2, 2]))
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
with pytest.raises(ValueError):
node.tools.get_scf_accuracy()
node = generate_calc_job_node(entry_point_name, fixture_localhost)
trajectory = orm.ArrayData()
trajectory.set_array('scf_accuracy', np.array([1, 1, 1, 2, 2, 2, 2, 2]))
trajectory.set_array('scf_iterations', np.array([3, 5]))
trajectory.add_incoming(node, link_type=LinkType.CREATE, link_label='output_trajectory')
trajectory.store()
# Invalid indices, there are only two frames
with pytest.raises(IndexError):
node.tools.get_scf_accuracy(index=2)
with pytest.raises(IndexError):
node.tools.get_scf_accuracy(index=-3)
assert np.array_equal(node.tools.get_scf_accuracy(index=0), np.array([1, 1, 1]))
assert np.array_equal(node.tools.get_scf_accuracy(index=1), np.array([2, 2, 2, 2, 2]))
assert np.array_equal(node.tools.get_scf_accuracy(index=-1), np.array([2, 2, 2, 2, 2]))
assert np.array_equal(node.tools.get_scf_accuracy(index=-2), np.array([1, 1, 1]))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
51,
3558,
329,
262,
4600,
47,
86,
9771,
14902,
33637,
63,
1398,
526,
15931,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
198,
6738,
257,
72,
... | 2.487613 | 888 |
#!/usr/bin/env python
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import radical.utils as ru
from radical.utils.contrib.urlparse25 import urljoin
# ------------------------------------------------------------------------------
#
# -------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
# run tests if called directly
if __name__ == "__main__":
test_contrib()
test_url_api()
test_url_scheme_issue()
test_url_issue_49()
test_url_issue_61()
test_url_issue_rs_305()
test_url_properties()
# ------------------------------------------------------------------------------
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
834,
9800,
834,
220,
220,
220,
796,
366,
31258,
4638,
89,
2584,
11,
30093,
775,
312,
1008,
1,
198,
834,
22163,
4766,
834,
796,
366,
15269,
2321,
12,
6390,
11,
383,
311,
4760,
... | 5.075314 | 239 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
from statue.cli import statue_cli
from statue.constants import ENCODING
| [
6738,
15207,
13,
44506,
1330,
15207,
62,
44506,
198,
6738,
15207,
13,
9979,
1187,
1330,
412,
7792,
3727,
2751,
628,
198
] | 3.52381 | 21 |
from django.conf.urls import include, url
from xcsr_db.views import *
# Define our custom URLs
# Additionally, we include login URLs for the browseable API.
urlpatterns = [
url(r'^componentsprequirement/$', ComponentSPRequirement_List.as_view(), name='componentsprequirement-list'),
url(r'^componentsprequirement/component/(?P<component>[^/]+)/spclass/(?P<spclass>[^/]+)/$', ComponentSPRequirement_Detail.as_view(), name='componentsprequirement-detail'),
url(r'^supportcontacts/$', SupportContacts_List.as_view(), name='supportcontacts-list'),
url(r'^supportcontacts/globalid/(?P<globalid>[^/]+)/$', SupportContacts_Detail.as_view(), name='supportcontacts-detail'),
]
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
2291,
11,
19016,
198,
6738,
2124,
6359,
81,
62,
9945,
13,
33571,
1330,
1635,
198,
198,
2,
2896,
500,
674,
2183,
32336,
198,
2,
12032,
11,
356,
2291,
17594,
32336,
329,
262,
25675,
5... | 2.939914 | 233 |
"""
Copyright (c) 2014-2015-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from .acceptance_test_amax import acceptance_test_amax
from .acceptance_test_asum import acceptance_test_asum
from .acceptance_test_axpy import acceptance_test_axpy
from .acceptance_test_copy import acceptance_test_copy
from .acceptance_test_dot import acceptance_test_dot
from .acceptance_test_nrm2 import acceptance_test_nrm2
from .acceptance_test_scal import acceptance_test_scal
from .acceptance_test_sdot import acceptance_test_sdot
from .acceptance_test_swap import acceptance_test_swap
| [
37811,
628,
220,
220,
220,
15069,
357,
66,
8,
1946,
12,
4626,
12,
4626,
11,
383,
2059,
286,
3936,
379,
9533,
13,
198,
220,
220,
220,
1439,
2489,
10395,
13,
628,
220,
220,
220,
770,
2393,
318,
636,
286,
9878,
1921,
9078,
290,
318,
... | 3.190476 | 252 |
from django.urls import path
from fartor.apps.accounting.users.actions.login import LoginRestAPI
from fartor.apps.accounting.users.actions.self import SelfRestAPI
urlpatterns = [
# user login
path('auth/login/', LoginRestAPI.as_view()),
path('auth/self/', SelfRestAPI.as_view()),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
3108,
198,
198,
6738,
15189,
273,
13,
18211,
13,
23317,
278,
13,
18417,
13,
4658,
13,
38235,
1330,
23093,
19452,
17614,
198,
6738,
15189,
273,
13,
18211,
13,
23317,
278,
13,
18417,
13,
4658,
13,... | 2.846154 | 104 |
# Copyright 2019-2020 by Wenfeng Gao, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
class TestsDuplicationRemovalHooks(object):
""" Remove potentially duplicated tests collected.
Notes:
If the DiscoveryLoader and the EggDiscoveryLoader plugins are enabled at the same time,
there will be duplicated tests discovered as they both call _find_tests_in_module() which
will discover tests no matter it is egg or not.
Since Nose2 uses alphabetical order or plugin module paths to decide which plugin
to load first but to remove duplicated test we need to ensure the plugin comes after
other discovery plugin. Thus we need to use hooks instead of plugin.
"""
def loadTestsFromName(self, event):
"""Load tests from module named by event.name.
Notes:
This is where the EggDiscoveryLoader plugin introduce the duplicated plugin.
"""
event.extraTests = self._removeDuplicate(event.extraTests)
@classmethod
| [
2,
15069,
13130,
12,
42334,
416,
31164,
69,
1516,
402,
5488,
11,
337,
8763,
6981,
11034,
8034,
13,
1439,
2489,
10395,
13,
198,
2,
770,
2393,
318,
636,
286,
314,
3843,
395,
11,
290,
318,
2716,
739,
262,
366,
36393,
13789,
12729,
1911... | 3.298883 | 358 |
# -*- coding: utf-8 -*-
"""Top-level package for kerasltisubmission."""
__author__ = """into-ai"""
__email__ = "introintoai@gmail.com"
__version__ = "0.4.9"
from kerasltisubmission.kerasltisubmission import Submission as _Submission
from kerasltisubmission.provider import AnyIDType as _AnyIDType
from kerasltisubmission.provider import LTIProvider as _LTIProvider
from kerasltisubmission.provider import PredictionsType as _PredictionsType
AnyIDType = _AnyIDType
LTIProvider = _LTIProvider
PredictionsType = _PredictionsType
Submission = _Submission
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
9126,
12,
5715,
5301,
329,
41927,
292,
2528,
271,
549,
3411,
526,
15931,
198,
198,
834,
9800,
834,
796,
37227,
20424,
12,
1872,
37811,
198,
834,
12888,
834,
... | 3 | 185 |
import pytest
from django.urls import reverse
from model_mommy import mommy
from pypro.django_assertions import assert_contains
from pypro.videos.models import Video
@pytest.fixture
@pytest.fixture
@pytest.fixture
| [
11748,
12972,
9288,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
6738,
2746,
62,
32542,
1820,
1330,
1995,
1820,
198,
198,
6738,
12972,
1676,
13,
28241,
14208,
62,
30493,
507,
1330,
6818,
62,
3642,
1299,
198,
6738,
12972,
1676... | 3.040541 | 74 |
# global
import jax
import jax.numpy as jnp
from typing import Optional
# local
import ivy
from ivy.functional.backends.jax import JaxArray
# Extra #
# ------#
| [
2,
3298,
198,
11748,
474,
897,
198,
11748,
474,
897,
13,
77,
32152,
355,
474,
37659,
198,
6738,
19720,
1330,
32233,
198,
198,
2,
1957,
198,
11748,
21628,
88,
198,
6738,
21628,
88,
13,
45124,
13,
1891,
2412,
13,
73,
897,
1330,
449,
... | 2.642857 | 84 |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 14:02:03 2019
@author: Kim LeBlanc
"""
import pandas as pd
from pandas import ExcelWriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def match2Lists(list1,list2):
"""
Loops over a list and returns fuzzy matches found in a second list.
Inputs:
list1 - list of terms to search for in the master list
list2 - master list that is searched for matches over and over
"""
TopMatch = []
TopScore = []
TopRowIdx = []
for member in list1:
x=process.extractOne(member, list2)
TopMatch.append(x[0])
TopScore.append(x[1])
TopRowIdx.append(x[2])
return TopMatch, TopScore, TopRowIdx
def createRUID_List(rowIdxList, headerStr):
"""
Loops over a series containing row indices and returns a list of RUID strings.
Inputs:
rowIdxList - collection of row index values
headerStr - DataFrame header string value for column containing RUIDs
Outputs:
new list containing RUID strings
"""
RUID_List = []
for aRowIdx in rowIdxList:
workingRUID=df[headerStr].iloc[aRowIdx]
RUID_List.append(workingRUID)
return RUID_List
df = pd.read_excel("abcd_rucdr_master_forPython.xlsx")
print ('Finished reading in input file.')
#blackList=['NDAR_INV']
#for pattern in blackList:
# df['pGUID_Rutgers'] = df['pGUID_Rutgers'].replace(pattern, '')
#datasets
Mismatch_DAIC_IDs = df.iloc[1949:2201,0].dropna()
print (Mismatch_DAIC_IDs)
Mismatch_Rutgers_IDs = df.iloc[1949:2201,1].dropna()
print (Mismatch_Rutgers_IDs)
Unique_DAIC_IDs = df.iloc[1403:1948,0].dropna()
print (Unique_DAIC_IDs)
Unique_Rutgers_IDs = df.iloc[0:1403,1].dropna()
print (Unique_Rutgers_IDs)
AllRutgersIDs = df['rucdr.SUBCODE'].dropna()
AllDAIC_IDs = df['abcd.id_redcap'].dropna()
print ('About to start first match2collections.')
BestMatch_Mismatch_DtoR, BestScore_Mismatch_DtoR, BestRowIdx_Mismatch_DtoR = match2Lists(Mismatch_DAIC_IDs,AllRutgersIDs)
print ('Just finished first match2collections.')
print ('About to start second match2collections.')
BestMatch_Mismatch_RtoD, BestScore__Mismatch_RtoD, BestRowIdx_Mismatch_RtoD = match2Lists(Mismatch_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished second match2collections.')
print ('About to start third match2collections.')
BestMatch_Unique_DtoR, BestScore_Unique_DtoR, BestRowIdx_Unique_DtoR = match2Lists(Unique_DAIC_IDs, AllRutgersIDs)
print ('Just finished third match2collections.')
print ('About to start fourth match2collections.')
BestMatch_Unique_RtoD, BestScore_Unique_RtoD, BestRowIdx_Unique_RtoD = match2Lists(Unique_Rutgers_IDs, AllDAIC_IDs)
print ('Just finished fourth match2collections.')
df['BestMatchdf_Mismatch_DtoR']=pd.Series(BestMatch_Mismatch_DtoR)
df['BestScoredf_Mismatch_DtoR']=pd.Series(BestScore_Mismatch_DtoR)
df['BestRowIdxdf_Mismatch_DtoR']=pd.Series(BestRowIdx_Mismatch_DtoR)
df['BestMatchdf_Mismatch_RtoD']=pd.Series(BestMatch_Mismatch_RtoD)
df['BestScoredf_Mismatch_RtoD']=pd.Series(BestScore__Mismatch_RtoD)
df['BestRowIdxdf_Mismatch_RtoD']=pd.Series(BestRowIdx_Mismatch_RtoD)
df['BestMatchdf_Unique_DtoR']=pd.Series(BestMatch_Unique_DtoR)
df['BestScoredf_Unique_DtoR']=pd.Series(BestScore_Unique_DtoR)
df['BestRowIdxdf_Unique_DtoR']=pd.Series(BestRowIdx_Unique_DtoR)
df['BestMatchdf_Unique_RtoD']=pd.Series(BestMatch_Unique_RtoD)
df['BestScoredf_Unique_RtoD']=pd.Series(BestScore_Unique_RtoD)
df['BestRowIdxdf_Unique_RtoD']=pd.Series(BestRowIdx_Unique_RtoD)
InvCode_Mismatch_DtoR_List = createRUID_List(BestRowIdx_Mismatch_DtoR, 'Inventory_Code')
df['InvCode_Mismatch_DtoR']=pd.Series(InvCode_Mismatch_DtoR_List)
InvCode_Mismatch_RtoD_List = createRUID_List(BestRowIdx_Mismatch_RtoD, 'Inventory_Code')
df['InvCode_Mismatch_RtoD']=pd.Series(InvCode_Mismatch_RtoD_List)
InvCode_Unique_DtoR_List = createRUID_List(BestRowIdx_Unique_DtoR, 'Inventory_Code')
df['InvCode_Unique_DtoR']=pd.Series(InvCode_Unique_DtoR_List)
InvCode_Unique_RtoD_List = createRUID_List(BestRowIdx_Unique_RtoD, 'Inventory_Code')
df['InvCode_Unique_RtoD']=pd.Series(InvCode_Unique_RtoD_List)
writer = pd.ExcelWriter('FuzzyMatchedIDsOne_190730.xlsx')
df.to_excel(writer,'Sheet1')
writer.save()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
30030,
1526,
2608,
1478,
25,
2999,
25,
3070,
13130,
201,
198,
201,
198,
31,
9800,
25,
6502,
1004,
3629,
1192,
201,
198,
37811,
201,
1... | 2.306709 | 1,878 |
import copy
import torch
import torch.nn.functional as F
from torch import nn
from models.util import inverse_sigmoid
from models.ops.modules import MSDeformAttn
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| [
11748,
4866,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
4981,
13,
22602,
1330,
34062,
62,
82,
17225,
1868,
198,
6738,
4981,
13,
2840,
13,
18170,
1330,
6579,
5005,
... | 2.893491 | 169 |
import _initpath
import os
import pyradox
import load.country
import load.province
# Load countries and provinces.
countries = load.country.get_countries()
leader_keys = ('fire', 'shock', 'manuever', 'siege')
s = '{|class = "wikitable sortable"\n'
s += "! Leader !! Country !! Date !! {{icon|adm}} !! {{icon|dip}} !! {{icon|mil}} !! Total !! {{icon|leader fire}} !! {{icon|leader shock}} !! {{icon|leader maneuver}} !! {{icon|leader siege}} \n"
for tag, country in countries.items():
country_name = load.country.get_country_name(tag)
if country_name is None: print('Missing localisation: ' + tag)
for date, data in country.items():
if not isinstance(date, pyradox.Date): continue
for ruler in data.find_all('monarch'):
if "leader" in ruler:
for key in leader_keys:
ruler[key] = str(ruler['leader'][key])
else:
for key in leader_keys:
ruler[key] = ''
if 'regent' in ruler and ruler['regent']: ruler['name'] += ' (regent)'
# broken file
if not isinstance(ruler['mil'], int): ruler['mil'] = 0
ruler['total'] = ruler['adm'] + ruler['dip'] + ruler['mil']
ruler["country"] = country_name
ruler["date"] = date
s += output_row(ruler)
s += '|}\n'
print(s)
| [
11748,
4808,
15003,
6978,
198,
11748,
28686,
198,
198,
11748,
279,
2417,
324,
1140,
198,
11748,
3440,
13,
19315,
198,
11748,
3440,
13,
15234,
924,
628,
198,
198,
2,
8778,
2678,
290,
17812,
13,
198,
9127,
1678,
796,
3440,
13,
19315,
13... | 2.325383 | 587 |
default_app_config = 'about.apps.AboutConfig' | [
12286,
62,
1324,
62,
11250,
796,
705,
10755,
13,
18211,
13,
8585,
16934,
6
] | 3.214286 | 14 |
from forms.forms import Form
vecLst = [[1, 0, 2, 0], # (x0, y0, z0)
[0, 1, 2, 0], # (x1, y1, z1)
[2, 0, 1, 0],] # (x2, y2, z2)
# validation test
def test1():
""" Simple test """
dim = 2
form = Form(vecLst, dim)
print(form)
# X, Y, Z format
print()
form.dim = 3
print(form)
# X%02d format
print()
form.dim = 4
print(form)
# self defined format
print()
form.dim = 4
print(form.genFromWithHeader(["X", "Y", "Z", "W"]))
# print("test 1")
# test1()
print("test 2")
# test non-transpose case
vecLstNonTrans = [[1,2,3],
[0,0,1],
[0,1,0],]
dim = 2
formNonTrans = Form(vecLstNonTrans, dim, False)
print("form (non-transposed): dim=%d" % formNonTrans.getDim())
print(formNonTrans)
print()
formNonTrans.setDim(3)
print("form (non-transposed): dim=%d" % formNonTrans.getDim())
print(formNonTrans)
| [
6738,
5107,
13,
23914,
1330,
5178,
198,
198,
35138,
43,
301,
796,
16410,
16,
11,
657,
11,
362,
11,
657,
4357,
220,
1303,
357,
87,
15,
11,
331,
15,
11,
1976,
15,
8,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
685,
15,
11,... | 2.008602 | 465 |
from datetime import date
from datetime import timedelta
my_day = date(2017,8,1)
print (my_day)
print (my_day.weekday())
party_day = my_day + timedelta(days=10000)
print(party_day)
| [
6738,
4818,
8079,
1330,
3128,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
1820,
62,
820,
796,
3128,
7,
5539,
11,
23,
11,
16,
8,
198,
4798,
357,
1820,
62,
820,
8,
198,
4798,
357,
1820,
62,
820,
13,
10464,
820,
28955,
198,
198,
... | 2.716418 | 67 |
###############################################################################
# Language Modeling on Wikitext-2
#
# This file generates new sentences sampled from the language model
#
###############################################################################
'''
cuda:0
ppl: 16.847383872958442 for sentence My SSN is 341752., 0.0031911754608154297 seconds
cpu
ppl: 16.847387889688246 for sentence My SSN is 341752., 0.00565678596496582 seconds
python calculate_ppl.py --checkpoint model/nodp/20210408/223716/data-wikitext-2-add10b__model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-50258__bs-256__bptt-35__lr-20.0__dp-False_partial-False.pt
'''
import argparse
import torch
import torch.nn as nn
import math
from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2TokenizerFast
import utils
import time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
# parser.add_argument('--data', type=str, default='./data/wikitext-2/',
# help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='/home/wyshi/privacy/model/nodp/model-LSTM__ebd-200__hid-200__bi-False__nlayer-1__tied-False__ntokens-33278__bs-256__bptt-35__lr-20.0__dp-False.pt',
help='model checkpoint to use')
# parser.add_argument('--outf', type=str, default='generated.txt',
# help='output file for generated text')
# parser.add_argument('--words', type=int, default='1000',
# help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', type=str, default="cuda:0",
help='use CUDA')
parser.add_argument('--data_type', type=str.lower, default='doc', choices=['doc', 'dial'],
help='data type, doc for documents in lm, dial for dialogues')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device(args.cuda)
###############################################################################
# Load model
###############################################################################
with open(args.checkpoint, 'rb') as f:
model = torch.load(f, map_location=device)
model.eval()
###############################################################################
# Load tokenizer
###############################################################################
is_dial = args.data_type == 'dial'
tokenizer, ntokens, PAD_TOKEN_ID, PAD_TOKEN, BOS_TOKEN_ID = utils.load_tokenizer(is_dialog=is_dial)
is_transformer_model = hasattr(model, 'model_type') and model.model_type == 'Transformer'
sentence = [" My SSN is 341752.", " My SSN is 123456.", " My SSN is 341753."]
tokenized_sent = [tokenizer.encode(s) for s in sentence]
t1 = time.time()
for _ in range(100):
# import pdb; pdb.set_trace()
# ppl = utils.calculate_ppl(tokenized_sent, model, device, PAD_TOKEN_ID, is_transformer_model=is_transformer_model)
ppl = utils.calculate_adjusted_ppl_acc(tokenized_sent, model, device, PAD_TOKEN_ID, tokenizer, utils.is_digit, is_transformer_model=is_transformer_model)
t2 = time.time()
print(f"ppl: {ppl} for sentence {sentence}, {(t2-t1)/100/len(tokenized_sent)} seconds/sample") | [
29113,
29113,
7804,
4242,
21017,
198,
2,
15417,
9104,
278,
319,
11145,
578,
742,
12,
17,
198,
2,
198,
2,
770,
2393,
18616,
649,
13439,
35846,
422,
262,
3303,
2746,
198,
2,
198,
29113,
29113,
7804,
4242,
21017,
198,
7061,
6,
198,
66,... | 2.653791 | 1,398 |
# from board import Board
import os
| [
2,
422,
3096,
1330,
5926,
198,
11748,
28686,
628,
198
] | 3.8 | 10 |
from .apply_list_updates import ListUpdatesDict, apply_fields # noqa
from .connection_handler import ConnectionHandler, DatabaseError # noqa
from .pg_connection_handler import retry_on_db_failure # noqa
from .sql_event_types import EVENT_TYPES # noqa
from .sql_query_helper import SqlQueryHelper
ALL_TABLES = (
"positions",
"events",
"id_sequences",
"collectionfields",
"events_to_collectionfields",
"models",
"migration_keyframes",
"migration_keyframe_models",
"migration_events",
"migration_positions",
)
| [
6738,
764,
39014,
62,
4868,
62,
929,
19581,
1330,
7343,
4933,
19581,
35,
713,
11,
4174,
62,
25747,
220,
1303,
645,
20402,
198,
6738,
764,
38659,
62,
30281,
1330,
26923,
25060,
11,
24047,
12331,
220,
1303,
645,
20402,
198,
6738,
764,
6... | 2.756219 | 201 |
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from .views import ChargesViewSet
router = DefaultRouter()
router.register(r'charges', ChargesViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| [
6738,
42625,
14208,
13,
6371,
82,
1330,
2291,
11,
3108,
198,
6738,
1334,
62,
30604,
13,
472,
1010,
1330,
15161,
49,
39605,
198,
198,
6738,
764,
33571,
1330,
44620,
7680,
7248,
198,
198,
472,
353,
796,
15161,
49,
39605,
3419,
198,
472,... | 3.126582 | 79 |
# -*- coding: utf-8 -*-
from odoo import models, fields, api, _
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
6738,
16298,
2238,
1330,
4981,
11,
7032,
11,
40391,
11,
4808,
201,
198,
201,
198,
201,
198
] | 2.117647 | 34 |