content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import pymongo
from django.shortcuts import render
from main.secrets import connect_string
from django.http import HttpResponse
my_client = pymongo.MongoClient(connect_string)
dbname = my_client['plates']
glass = dbname["glass"]
archives = dbname["archives"]
| [
11748,
279,
4948,
25162,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
1388,
13,
2363,
8004,
1330,
2018,
62,
8841,
198,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
198,
1820,
62,
16366,
796,
279,... | 3.275 | 80 |
# -*- coding: utf-8 -*-
from pyfeedbacker.app import stage
from pyfeedbacker.app.controller import base
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
12972,
12363,
49978,
13,
1324,
1330,
3800,
198,
6738,
12972,
12363,
49978,
13,
1324,
13,
36500,
1330,
2779,
628,
198
] | 2.972222 | 36 |
#!/usr/bin/env python
#Create the rsrc/audio.swf containing all mp3s in the rsrc folder
import os, os.path, string, sys, shutil
print "Testing Flash"
os.system("bin/createImportAllForSwc.py src src/TestHydraxCompilation.hx")
os.system("haxe -main TestHydraxCompilation -cp src -swf build/delete.swf -lib polygonal -lib feffects -lib hsl-1 -lib gm2d")
print "Testing javascript"
os.system("bin/createImportAllForJs.py src src/TestHydraxCompilation.hx")
os.system("haxe -main TestHydraxCompilation -cp src -js build/test.js -cp ../libdamago/lib/bdog-redis -cp ../libdamago/lib/bdog-core --remap flash:jeash -lib polygonal -lib feffects -lib hsl-1 -lib html5 -lib jQueryExtern -lib gm2d -lib nodejs -lib nodejs_externs")
filesToDelete = ["build/delete.swf", "src/TestHydraxCompilation.hx" "build/test.js"]
for f in filesToDelete:
if os.path.exists(f):
os.remove(f)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
16447,
262,
374,
10677,
14,
24051,
13,
2032,
69,
7268,
477,
29034,
18,
82,
287,
262,
374,
10677,
9483,
198,
11748,
28686,
11,
28686,
13,
6978,
11,
4731,
11,
25064,
11,
4423,
... | 2.652439 | 328 |
import argparse
import paddle
import numpy as np
from model_pp import Generator
from tqdm import tqdm
from PIL import Image
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate samples from the generator")
parser.add_argument(
"--size", type=int, default=1024, help="output image size of the generator"
)
parser.add_argument(
"--sample",
type=int,
default=1,
help="number of samples to be generated for each image",
)
parser.add_argument(
"--pics", type=int, default=20, help="number of images to be generated"
)
parser.add_argument("--truncation", type=float, default=1, help="truncation ratio")
parser.add_argument(
"--truncation_mean",
type=int,
default=4096,
help="number of vectors to calculate mean for the truncation",
)
parser.add_argument(
"--ckpt",
type=str,
default="stylegan2-ffhq-config-f.g_ema",
help="path to the model checkpoint",
)
parser.add_argument(
"--channel_multiplier",
type=int,
default=2,
help="channel multiplier of the generator. config-f = 2, else = 1",
)
args = parser.parse_args()
args.latent = 512
args.n_mlp = 8
g_ema = Generator(
args.size, args.latent, args.n_mlp, channel_multiplier=args.channel_multiplier
)
state_dict = paddle.load(args.ckpt)
g_ema.set_state_dict(state_dict)
if args.truncation < 1:
with paddle.no_grad():
mean_latent = g_ema.mean_latent(args.truncation_mean)
else:
mean_latent = None
generate(args, g_ema, mean_latent) | [
11748,
1822,
29572,
198,
198,
11748,
39517,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2746,
62,
381,
1330,
35986,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
6738,
350,
4146,
1330,
7412,
628,
198,
198,
361,
11593,
3672,
... | 2.357043 | 717 |
import pytest
import os
import json
import shutil
# Getting absolute paths, names and regexes
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(TEST_DIR)
SERVICE_CONFIG_NAME = "service_manifest.yml"
SERVICE_CONFIG_PATH = os.path.join(ROOT_DIR, SERVICE_CONFIG_NAME)
TEMP_SERVICE_CONFIG_PATH = os.path.join("/tmp", SERVICE_CONFIG_NAME)
# Samples that we will be sending to the service
sample1 = dict(
sid=1,
metadata={},
service_name='espresso',
service_config={},
fileinfo=dict(
magic='Zip archive data, at least v2.0 to extract Zip archive data, made by v2.0, extract using at least '
'v2.0, last modified Fri Nov 22 13:25:57 2013, uncompressed size 239, method=deflate',
md5='762c340965c408900af83290a0c638b4',
mime='application/zip',
sha1='c727718ef0b7314979ddef22058c35022b7caedc',
sha256='121723c86cb7b24ad90f68dde901fe6dec0e337d8d3233cd5ef0d58f07d47487',
size=4092,
type='java/jar',
),
filename='121723c86cb7b24ad90f68dde901fe6dec0e337d8d3233cd5ef0d58f07d47487',
min_classification='TLP:W',
max_files=501, # TODO: get the actual value
ttl=3600,
)
@pytest.fixture
| [
11748,
12972,
9288,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
4423,
346,
198,
198,
2,
18067,
4112,
13532,
11,
3891,
290,
40364,
274,
198,
51,
6465,
62,
34720,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
... | 2.225275 | 546 |
from queryutils.databases import PostgresDB
l = "lupe"
p = PostgresDB(l, l, l)
#p.connect()
##cursor = p.execute("SELECT id, text, time, is_interactive, is_suspicious, search_type, \
## earliest_event, latest_event, range, is_realtime, \
## splunk_search_id, execution_time, saved_search_name, \
## user_id, session_id \
## FROM queries \
## WHERE text=%s" % p.wildcard, (text,))
#queries = []
#for idx, query in enumerate(p.get_interactive_queries()):
# if idx > 10: break
# queries.append(query)
#
#for query in queries:
# text = query.text
# print text
# cursor = p.execute("SELECT id, text, time, is_interactive, is_suspicious, search_type, \
# earliest_event, latest_event, range, is_realtime, \
# splunk_search_id, execution_time, saved_search_name, \
# user_id, session_id \
# FROM queries \
# WHERE is_interactive=%s AND text=%s" % (p.wildcard, p.wildcard), (True, text))
# rows = cursor.fetchall()
# print rows
#
#for q in p.get_interactive_queries_with_text(text):
# print q
for query_group in p.get_query_groups():
if query_group.query.text == "| metadata type=sourcetypes | search totalCount > 0":
print query_group
| [
6738,
12405,
26791,
13,
19608,
18826,
1330,
2947,
34239,
11012,
198,
75,
796,
366,
2290,
431,
1,
198,
79,
796,
2947,
34239,
11012,
7,
75,
11,
300,
11,
300,
8,
198,
198,
2,
79,
13,
8443,
3419,
198,
2235,
66,
21471,
796,
279,
13,
... | 2.181077 | 613 |
#!/usr/bin/env python3
import datetime
import argparse
from botocore.exceptions import ClientError
from copy import deepcopy
import time
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
4818,
8079,
198,
198,
11748,
1822,
29572,
198,
6738,
10214,
420,
382,
13,
1069,
11755,
1330,
20985,
12331,
198,
6738,
4866,
1330,
2769,
30073,
198,
11748,
640,
628
] | 3.564103 | 39 |
from .draw_card import *
from .draw_card_row import *
from .draw_card_stack import *
from .draw_sjuan_stack import *
| [
6738,
764,
19334,
62,
9517,
1330,
1635,
198,
6738,
764,
19334,
62,
9517,
62,
808,
1330,
1635,
198,
6738,
764,
19334,
62,
9517,
62,
25558,
1330,
1635,
198,
6738,
764,
19334,
62,
82,
73,
7258,
62,
25558,
1330,
1635,
198
] | 2.925 | 40 |
# Copyright 2009-2012 James P Goodwin ped tiny python editor
""" module that implements the keymapping and translation of keys to command identifiers """
import curses
import curses.ascii
import sys
import os
from ped_core import cmd_names
from ped_core import keytab
import pprint
import time
# default keymap for the editor manager
keymap_manager = {
keytab.KEYTAB_CTRLN: (cmd_names.CMD_NEXTEDITOR,keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTN: (cmd_names.CMD_NEXTEDITOR,keytab.KEYTAB_NOKEY),
keytab.KEYTAB_RESIZE: (cmd_names.CMD_RESIZE,keytab.KEYTAB_NOKEY),
keytab.KEYTAB_REFRESH: (cmd_names.CMD_REFRESH,keytab.KEYTAB_NOKEY),
keytab.KEYTAB_F12: (cmd_names.CMD_REFRESH,keytab.KEYTAB_NOKEY),
keytab.KEYTAB_F04: (cmd_names.CMD_NEXTFRAME,keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTB: (cmd_names.CMD_BUFFERLIST,keytab.KEYTAB_REFRESH),
keytab.KEYTAB_ALTF: (cmd_names.CMD_FILEFIND,keytab.KEYTAB_REFRESH),
keytab.KEYTAB_F22: (cmd_names.CMD_SFTP,keytab.KEYTAB_REFRESH),
keytab.KEYTAB_ALTX: (cmd_names.CMD_SAVEEXIT,keytab.KEYTAB_REFRESH),
keytab.KEYTAB_F01: (cmd_names.CMD_HELP, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTI: (cmd_names.CMD_HELP, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_F10: (cmd_names.CMD_SHELLCMD, keytab.KEYTAB_REFRESH),
keytab.KEYTAB_CTRLO: (cmd_names.CMD_OPENEDITOR, keytab.KEYTAB_REFRESH),
keytab.KEYTAB_ALTE: (cmd_names.CMD_OPENEDITOR, keytab.KEYTAB_REFRESH),
keytab.KEYTAB_CTRLP: (cmd_names.CMD_PREVEDITOR, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTP: (cmd_names.CMD_PREVEDITOR, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTH: (cmd_names.CMD_HORZSPLIT, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTV: (cmd_names.CMD_VERTSPLIT, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTZ: (cmd_names.CMD_ZOOMFRAME, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ALTK: (cmd_names.CMD_KILLFRAME, keytab.KEYTAB_REFRESH),
keytab.KEYTAB_CTRLD: (cmd_names.CMD_DELEDITOR, keytab.KEYTAB_REFRESH),
keytab.KEYTAB_ALTD: (cmd_names.CMD_DELEDITOR, keytab.KEYTAB_NOKEY),
keytab.KEYTAB_ESC: (cmd_names.CMD_EXITNOSAVE, keytab.KEYTAB_REFRESH),
keytab.KEYTAB_DLGCANCEL: (cmd_names.CMD_EXITNOSAVE, keytab.KEYTAB_REFRESH),
keytab.KEYTAB_MOUSE: (cmd_names.CMD_MOUSE, keytab.KEYTAB_NOKEY),
}
# default keymap for the editor
keymap_editor = {
keytab.KEYTAB_CTRLK: (cmd_names.CMD_MARKSPAN, keytab.KEYTAB_NOKEY), # CTRL-K
keytab.KEYTAB_CTRLR: (cmd_names.CMD_MARKRECT, keytab.KEYTAB_NOKEY), # CTRL-R
keytab.KEYTAB_CTRLC: (cmd_names.CMD_COPYMARKED, keytab.KEYTAB_NOKEY), # CTRL-C
keytab.KEYTAB_CTRLG: (cmd_names.CMD_PRMTGOTO, keytab.KEYTAB_REFRESH), # CTRL-G
keytab.KEYTAB_BACKSPACE: (cmd_names.CMD_BACKSPACE, keytab.KEYTAB_NOKEY), # CTRL-H
keytab.KEYTAB_CTRLF: (cmd_names.CMD_FILENAME, keytab.KEYTAB_REFRESH), # CTRL-F
keytab.KEYTAB_CTRLX: (cmd_names.CMD_CUTMARKED, keytab.KEYTAB_NOKEY), # CTRL-X
keytab.KEYTAB_CTRLV: (cmd_names.CMD_PASTE, keytab.KEYTAB_NOKEY), # ctrl-v
keytab.KEYTAB_CTRLL: (cmd_names.CMD_MARKLINES, keytab.KEYTAB_NOKEY), # ctrl-l
keytab.KEYTAB_CR: (cmd_names.CMD_CR, keytab.KEYTAB_NOKEY), # ctrl-m (enter)
keytab.KEYTAB_TAB: (cmd_names.CMD_TAB, keytab.KEYTAB_NOKEY), # tab
keytab.KEYTAB_CTRLS: (cmd_names.CMD_SAVE, keytab.KEYTAB_NOKEY), # ctrl-s (save)
keytab.KEYTAB_CTRLW: (cmd_names.CMD_SAVEAS, keytab.KEYTAB_REFRESH), # ctrl-w (save as)
keytab.KEYTAB_ALTL: (cmd_names.CMD_MARKLINES, keytab.KEYTAB_NOKEY), # alt-L
keytab.KEYTAB_ALTM: (cmd_names.CMD_MARKSPAN, keytab.KEYTAB_NOKEY), # alt-M
keytab.KEYTAB_ALTC: (cmd_names.CMD_MARKRECT, keytab.KEYTAB_NOKEY), # alt-C
keytab.KEYTAB_ALTW: (cmd_names.CMD_SAVE, keytab.KEYTAB_REFRESH), # alt-W
keytab.KEYTAB_ALTo: (cmd_names.CMD_SAVEAS, keytab.KEYTAB_REFRESH), # alt-o
keytab.KEYTAB_ALTG: (cmd_names.CMD_PRMTGOTO, keytab.KEYTAB_REFRESH), # alt-G
keytab.KEYTAB_ALTU: (cmd_names.CMD_UNDO, keytab.KEYTAB_NOKEY), # alt-U
keytab.KEYTAB_ALTJ: (cmd_names.CMD_TOGGLEWRAP, keytab.KEYTAB_NOKEY), # alt-J
keytab.KEYTAB_KEYPADPLUS: (cmd_names.CMD_MARKCOPYLINE, keytab.KEYTAB_NOKEY), # keypad +
keytab.KEYTAB_KEYTPADMINUS:(cmd_names.CMD_MARKCUTLINE, keytab.KEYTAB_NOKEY), # keypad -
keytab.KEYTAB_ALTO: (cmd_names.CMD_SAVEAS, keytab.KEYTAB_REFRESH), # alt-O
keytab.KEYTAB_BACKTAB: (cmd_names.CMD_BTAB, keytab.KEYTAB_NOKEY), # BACKTAB
keytab.KEYTAB_CTRLLEFT: (cmd_names.CMD_PREVWORD, keytab.KEYTAB_NOKEY), # ctrl-leftarrow
keytab.KEYTAB_CTRLRIGHT: (cmd_names.CMD_NEXTWORD, keytab.KEYTAB_NOKEY), # ctrl-rightarrow
keytab.KEYTAB_CTRLHOME: (cmd_names.CMD_HOME1, keytab.KEYTAB_NOKEY), # home
keytab.KEYTAB_CTRLEND: (cmd_names.CMD_END1, keytab.KEYTAB_NOKEY), # end
keytab.KEYTAB_RESIZE: (cmd_names.CMD_RETURNKEY, keytab.KEYTAB_RESIZE), # resize
keytab.KEYTAB_UP: (cmd_names.CMD_UP, keytab.KEYTAB_NOKEY), # up
keytab.KEYTAB_DOWN: (cmd_names.CMD_DOWN, keytab.KEYTAB_NOKEY), # down
keytab.KEYTAB_LEFT: (cmd_names.CMD_LEFT, keytab.KEYTAB_NOKEY), # left
keytab.KEYTAB_RIGHT: (cmd_names.CMD_RIGHT, keytab.KEYTAB_NOKEY), # right
keytab.KEYTAB_DELC: (cmd_names.CMD_DELC, keytab.KEYTAB_NOKEY), # del
keytab.KEYTAB_BACKSPACE: (cmd_names.CMD_BACKSPACE, keytab.KEYTAB_NOKEY), # backspace
keytab.KEYTAB_HOME: (cmd_names.CMD_HOME, keytab.KEYTAB_NOKEY), # home
keytab.KEYTAB_END: (cmd_names.CMD_END, keytab.KEYTAB_NOKEY), # end
keytab.KEYTAB_PAGEUP: (cmd_names.CMD_PAGEUP, keytab.KEYTAB_NOKEY), # pageup
keytab.KEYTAB_PAGEDOWN: (cmd_names.CMD_PAGEDOWN, keytab.KEYTAB_NOKEY), # pagedown
keytab.KEYTAB_BTAB: (cmd_names.CMD_BTAB, keytab.KEYTAB_NOKEY), # backtab
keytab.KEYTAB_INSERT: (cmd_names.CMD_PASTE, keytab.KEYTAB_NOKEY), # paste
keytab.KEYTAB_F05: (cmd_names.CMD_PRMTSEARCH, keytab.KEYTAB_REFRESH), # F5
keytab.KEYTAB_F06: (cmd_names.CMD_PRMTREPLACE, keytab.KEYTAB_REFRESH), # F6
keytab.KEYTAB_F07: (cmd_names.CMD_TRANSFERCLIPIN, keytab.KEYTAB_NOKEY), # F7
keytab.KEYTAB_F08: (cmd_names.CMD_TRANSFERCLIPOUT, keytab.KEYTAB_NOKEY), # f8
keytab.KEYTAB_F17: (cmd_names.CMD_PRMTSEARCHBACK, keytab.KEYTAB_REFRESH), # SHIFT F5
keytab.KEYTAB_F03: (cmd_names.CMD_SEARCHAGAIN, keytab.KEYTAB_REFRESH), # F3
keytab.KEYTAB_F09: (cmd_names.CMD_TOGGLERECORD, keytab.KEYTAB_NOKEY), # F9
keytab.KEYTAB_ALTA: (cmd_names.CMD_PLAYBACK, keytab.KEYTAB_NOKEY), # alt-a
keytab.KEYTAB_F11: (cmd_names.CMD_PLAYBACK, keytab.KEYTAB_NOKEY), # F11
}
# default keymap for dialogs
keymap_dialog = {
keytab.KEYTAB_TAB: (cmd_names.CMD_DLGNEXT, keytab.KEYTAB_DLGNOP),
keytab.KEYTAB_CR: (cmd_names.CMD_DLGNEXT, keytab.KEYTAB_DLGNOP),
keytab.KEYTAB_BTAB: (cmd_names.CMD_DLGPREV, keytab.KEYTAB_DLGNOP),
keytab.KEYTAB_UP: (cmd_names.CMD_DLGUP, keytab.KEYTAB_DLGNOP ),
keytab.KEYTAB_ESC: (cmd_names.CMD_RETURNKEY, keytab.KEYTAB_DLGCANCEL ),
keytab.KEYTAB_RESIZE: (cmd_names.CMD_DLGRESIZE, keytab.KEYTAB_DLGNOP ),
keytab.KEYTAB_MOUSE: (cmd_names.CMD_DLGMOUSE, keytab.KEYTAB_NOKEY),
}
recording = False
playback = False
macro = []
macro_idx = 0
keydef_map = {}
def insert_keydef( km, oseq, kt ):
""" insert into the keydef_map an ordinal sequence and a keytab key to map it to """
try:
if len(oseq) == 1:
km[oseq[0]] = kt
else:
if oseq[0] not in km:
km[oseq[0]] = {}
insert_keydef( km[oseq[0]], oseq[1:], kt )
except:
raise
def compile_keydef():
""" compile ketab.keydef into keydef_map for lookup of ordinal key strings """
global keydef_map
keydef_map = {}
for kd in keytab.keydef:
insert_keydef( keydef_map, kd[0], kd[1] )
def start_recording():
""" start recording key sequences into macro list """
global recording, macro, macro_idx
recording = True
macro = []
macro_idx = 0
def stop_recording():
""" stop recording key sequences into macro list """
global recording
recording = False
def toggle_recording():
""" flip recording on or off """
global recording,macro
if recording:
macro.pop()
stop_recording()
else:
start_recording()
def is_recording():
""" return current state of recording key macro """
global recording
return recording
def start_playback():
""" start playback from macro """
global playback, macro_idx
playback = True
macro_idx = 0
if is_recording():
stop_recording()
def stop_playback():
""" stop playback from macro """
global playback, macro_index
playback = False
macro_index = 0
def record_seq( seq ):
""" record a key sequence into the buffer """
global macro
if (len(seq) == 1 and seq[0] == -1) or seq == '\x00':
return
macro.append(seq)
def playback_seq():
""" get the next sequence to play back """
global macro, macro_idx
if macro and macro_idx < len(macro):
seq = macro[macro_idx]
macro_idx += 1
return seq
stop_playback()
return keytab.KEYTAB_REFRESH
def is_playback():
""" return true if we are in playback mode """
global playback
return playback
def keypending( scr ):
""" return true if getch is going to return a real key """
ch = scr.getch()
if ch >= 0:
curses.ungetch(ch)
return (ch >= 0)
def getch( scr ):
""" wrapper to fetch keys from a curses screen or window """
global playback
if playback:
return 0
else:
time.sleep(0.01)
ch = scr.getch()
return ch
def get_keyseq( scr, ch ):
""" get the full key sequence to be mapped, parameter is the first key of the sequence """
global playback, recording
if playback:
return playback_seq()
if 0<ch<256 and curses.ascii.isprint(ch):
seq = chr(ch)
else:
map = keydef_map
while True:
if ch in map:
map = map[ch]
if not isinstance(map,dict):
seq = map
break
else:
seq = keytab.KEYTAB_NOKEY
break
if ch < 0:
while ch < 0:
ch = scr.getch()
else:
ch = scr.getch()
if recording:
record_seq( seq )
return seq
def mapkey( scr, keymap_xlate, ch ):
""" complete fetching the key sequence and get the command and return character as a tuple (cmd_id, retkey) """
seq = get_keyseq( scr, ch )
return mapseq( keymap_xlate, seq )
def mapseq( keymap_xlate, seq ):
""" map a key sequence from the KEYMAP_ constants to a command using the supplied keymap_xlate hash return (cmd,seq) tuple"""
ret = (cmd_names.CMD_RETURNKEY,seq)
if len(seq) == 1 and curses.ascii.isprint(ord(seq[0])):
return (cmd_names.CMD_INSERT,ord(seq[0]))
elif seq in keymap_xlate:
ret = keymap_xlate[seq]
return ret
def loadkeymap():
""" look for a file ~/.pedkeymap and load alternate key bindings from it """
# lines either are blank, are a comment starting with #
# or are KEYMAP={EDITOR,DIALOG,MANAGER} which selects a keymap to apply subsequent binding to also clears that keymap
# or MAP=key,cmd,ret which maps key to cmd and specifies the return key that should result after the cmd executes
global keymap_editor, keymap_dialog,keymap_manager
kmf = os.path.expanduser("~/.pedkeymap")
mapname = None
if os.path.exists(kmf):
for l in open(kmf,"r"):
l = l.strip()
if l and not l.startswith("#"):
if l.startswith("KEYMAP"):
keyword,mapname = l.split("=",1)
mapname = mapname.strip().upper()
if mapname == "EDITOR":
keymap_editor = {}
elif mapname == "DIALOG":
keymap_dialog = {}
elif mapname == "MANAGER":
keymap_manager = {}
elif l.startswith("MAP"):
keyword,mapping = l.split("=",1)
mapping = mapping.strip().upper()
key,cmd,ret = mapping.split(",",2)
if mapname == "EDITOR":
keymap_editor[keytab.name_to_key[key]] = ( cmd_names.name_to_cmd[cmd], keytab.name_to_key[ret] )
elif mapname == "DIALOG":
keymap_dialog[keytab.name_to_key[key]] = ( cmd_names.name_to_cmd[cmd], keytab.name_to_key[ret] )
elif mapname == "MANAGER":
keymap_manager[keytab.name_to_key[key]] = ( cmd_names.name_to_cmd[cmd], keytab.name_to_key[ret] )
def dumpkeymap():
""" create a default ~/.pedkeymap keybinding file """
kmf = os.path.expanduser("~/.pedkeymap")
f = open(kmf,"w")
keymaps = [ ("EDITOR",keymap_editor), ("DIALOG",keymap_dialog), ("MANAGER",keymap_manager) ]
for name,km in keymaps:
print("KEYMAP=%s"%name, file=f)
for key, mapping in list(km.items()):
print("MAP=%s,%s,%s"%(keytab.key_to_name[key],cmd_names.cmd_to_name[mapping[0]],keytab.key_to_name[mapping[1]]), file=f)
def loadkeydef():
""" look for a file ~/.pedkeydef and load alternate KEYTAB definitions and mappings from raw curses key codes to KEYTAB definitions """
# lines either are blank, are a comment starting with #
# or are KEYTAB_KEY="literal"
# or are KEYDEF=ord,[ord1...ordn],KEYTAB_KEY
#
kdf = os.path.expanduser("~/.pedkeydef")
if os.path.exists(kdf):
keytab.keydef = []
for l in open(kdf,"r"):
l = l.strip()
if l and not l.startswith("#"):
key,value = l.split("=",1)
key = key.strip()
value = value.strip()
if key == "KEYDEF":
parms = [f.strip() for f in value.split(",")]
ords = tuple([int(f) for f in parms[:-1]])
key = keytab.name_to_key[parms[-1]]
keytab.keydef.append((ords,key))
elif key.startswith("KEYTAB_"):
setattr(keytab,key,eval(value))
keytab.name_to_key[key] = eval(value)
keytab.key_to_name[eval(value)] = key
compile_keydef()
def dumpkeydef():
""" create a default ~/.pedkeydef keydef file """
kdf = os.path.expanduser("~/.pedkeydef")
fkdf = open(kdf,"w")
for attr_name in dir(keytab):
if attr_name.startswith("KEYTAB_"):
print("%s=%s"%(attr_name,repr(getattr(keytab,attr_name))), file=fkdf)
for k in keytab.keydef:
print("KEYDEF=%s,%s"%(",".join([str(f) for f in k[0]]),keytab.key_to_name[k[1]]), file=fkdf)
# force the keydef_map to be built on load of module
if not keydef_map:
compile_keydef()
# altch = stdscr.getch()
# stdscr.nodelay(1)
# rest = ""
# while altch > 0:
# stdscr.addstr(0,0, "curses[%d][%s]"%(altch,curses.keyname(altch)), curses.A_REVERSE)
# altch = stdscr.getch()
#
# stdscr.nodelay(0)
if __name__ == '__main__':
os.environ.setdefault('ESCDELAY','25')
curses.wrapper(main)
| [
2,
15069,
3717,
12,
6999,
3700,
350,
46653,
7190,
7009,
21015,
5464,
198,
37811,
8265,
326,
23986,
262,
1994,
76,
5912,
290,
11059,
286,
8251,
284,
3141,
42814,
37227,
198,
11748,
43878,
198,
11748,
43878,
13,
292,
979,
72,
198,
11748,
... | 1.95524 | 8,177 |
# Copyright Notice:
# Copyright 2017-2019 DMTF. All rights reserved.
# License: BSD 3-Clause License. For full text see link: https://github.com/DMTF/Redfish-Interface-Emulator/blob/master/LICENSE.md
# Manager Template File
import copy
import strgen
_TEMPLATE = \
{
"@Redfish.Copyright": "Copyright 2014-2019 DMTF. All rights reserved.",
"@odata.context": "{rb}$metadata#Manager.Manager",
"@odata.id": "{rb}Managers/{id}",
"@odata.type": "#Manager.v1_1_0.Manager",
"Id": "{id}",
"Name": "Manager",
"ManagerType": "BMC",
"Description": "BMC",
"ServiceEntryPointUUID": "92384634-2938-2342-8820-489239905423",
"UUID": "00000000-0000-0000-0000-000000000000",
"Model": "Joo Janta 200",
"DateTime": "2015-03-13T04:14:33+06:00",
"DateTimeLocalOffset": "+06:00",
"Status": {
"State": "Enabled",
"Health": "OK"
},
"GraphicalConsole": {
"ServiceEnabled": True,
"MaxConcurrentSessions": 2,
"ConnectTypesSupported": [
"KVMIP"
]
},
"SerialConsole": {
"ServiceEnabled": True,
"MaxConcurrentSessions": 1,
"ConnectTypesSupported": [
"Telnet",
"SSH",
"IPMI"
]
},
"CommandShell": {
"ServiceEnabled": True,
"MaxConcurrentSessions": 4,
"ConnectTypesSupported": [
"Telnet",
"SSH"
]
},
"FirmwareVersion": "1.00",
"NetworkProtocol": {
"@odata.id": "{rb}Managers/{id}/NetworkProtocol"
},
"EthernetInterfaces": {
"@odata.id": "{rb}Managers/{id}/EthernetInterfaces"
},
"SerialInterfaces": {
"@odata.id": "{rb}Managers/{id}/SerialInterfaces"
},
"LogServices": {
"@odata.id": "{rb}Managers/{id}/LogServices"
},
"VirtualMedia": {
"@odata.id": "{rb}Managers/{id}/VM1"
},
"Links": {
"ManagerForServers": [
{
"@odata.id": "{rb}Systems/{linkSystem}"
}
],
"ManagerForChassis": [
{
"@odata.id": "{rb}Chassis/{linkChassis}"
}
],
"ManagerInChassis": {
"@odata.id": "{rb}Chassis/{linkInChassis}"
}
},
"Actions": {
"#Manager.Reset": {
"target": "{rb}Managers/{id}/Actions/Manager.Reset",
"ResetType@Redfish.AllowableValues": [
"ForceRestart",
"GracefulRestart"
]
}
}
}
def get_Manager_instance(wildcards):
"""
Instantiate and format the template
Arguments:
wildcard - A dictionary of wildcards strings and their repalcement values
"""
c = copy.deepcopy(_TEMPLATE)
c['@odata.context'] = c['@odata.context'].format(**wildcards)
c['@odata.id'] = c['@odata.id'].format(**wildcards)
c['Id'] = c['Id'].format(**wildcards)
c['NetworkProtocol']['@odata.id'] = c['NetworkProtocol']['@odata.id'].format(**wildcards)
c['EthernetInterfaces']['@odata.id'] = c['EthernetInterfaces']['@odata.id'].format(**wildcards)
c['SerialInterfaces']['@odata.id'] = c['SerialInterfaces']['@odata.id'].format(**wildcards)
c['LogServices']['@odata.id'] = c['LogServices']['@odata.id'].format(**wildcards)
c['VirtualMedia']['@odata.id'] = c['VirtualMedia']['@odata.id'].format(**wildcards)
systems=wildcards['linkSystem']
if type(systems) is list:
mfs=[{'@odata.id':c['Links']['ManagerForServers'][0]['@odata.id'].format(rb=wildcards['rb'], linkSystem=x)}
for x in systems]
else:
mfs=[{'@odata.id':c['Links']['ManagerForServers'][0]['@odata.id'].format(**wildcards)}]
c['Links']['ManagerForServers'] = mfs
c['Links']['ManagerForChassis'][0]['@odata.id'] = c['Links']['ManagerForChassis'][0]['@odata.id'].format(**wildcards)
c['Links']['ManagerInChassis']['@odata.id'] = c['Links']['ManagerInChassis']['@odata.id'].format(**wildcards)
c['Actions']['#Manager.Reset']['target'] = c['Actions']['#Manager.Reset']['target'].format(**wildcards)
return c
| [
2,
15069,
17641,
25,
198,
2,
15069,
2177,
12,
23344,
14848,
10234,
13,
1439,
2489,
10395,
13,
198,
2,
13789,
25,
347,
10305,
513,
12,
2601,
682,
13789,
13,
1114,
1336,
2420,
766,
2792,
25,
3740,
1378,
12567,
13,
785,
14,
23127,
1023... | 2.089613 | 1,964 |
import networkx as nx
import pickle
import abc
import logging
| [
11748,
3127,
87,
355,
299,
87,
198,
11748,
2298,
293,
198,
11748,
450,
66,
198,
11748,
18931,
198
] | 3.444444 | 18 |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ActMoodIntent(GenericTypeCode):
"""
v3.ActMoodIntent
From: http://terminology.hl7.org/ValueSet/v3-ActMoodIntent in v3-codesystems.xml
An intention or plan to perform a service. Historical note: in previous RIM
versions, the intent mood was captured as a separate class hierarchy, called
Service_intent_or_order.
"""
"""
http://terminology.hl7.org/CodeSystem/v3-ActMood
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-ActMood"
class ActMoodIntentValues:
"""
These are moods describing activities as they progress in the business cycle,
from defined, through planned and ordered to completed.
From: http://terminology.hl7.org/CodeSystem/v3-ActMood in v3-codesystems.xml
"""
ActMoodCompletionTrack = ActMoodIntent("_ActMoodCompletionTrack")
"""
Definition: An act that expresses condition statements for other acts.
From: http://terminology.hl7.org/CodeSystem/v3-ActMood in v3-codesystems.xml
"""
ActMoodPredicate = ActMoodIntent("_ActMoodPredicate")
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
9009,
62,
23736,
62,
76,
11463,
62,
69,
71,
343,
13,
69,
71,
343,
62,
19199,
13,
9900,
1330,
376,
71,
343,
52,
380,
198,
198,
6738,
9009,
62,
23736,
62,
76,
11463,
62,
69,
71... | 2.865462 | 498 |
# from django import forms
from fitbox.cadastro_paciente.models import CadastroPaciente
from bootstrap_modal_forms.forms import BSModalModelForm
| [
2,
422,
42625,
14208,
1330,
5107,
198,
198,
6738,
4197,
3524,
13,
66,
324,
459,
305,
62,
33587,
1153,
68,
13,
27530,
1330,
20517,
459,
305,
18844,
1153,
68,
198,
6738,
6297,
26418,
62,
4666,
282,
62,
23914,
13,
23914,
1330,
24218,
5... | 3.12766 | 47 |
from .writer import MzIdentMLWriter, default_cv_list
__all__ = [
"MzIdentMLWriter",
"default_cv_list"
]
| [
6738,
764,
16002,
1330,
337,
89,
33234,
5805,
34379,
11,
4277,
62,
33967,
62,
4868,
628,
198,
834,
439,
834,
796,
685,
198,
220,
220,
220,
366,
44,
89,
33234,
5805,
34379,
1600,
198,
220,
220,
220,
366,
12286,
62,
33967,
62,
4868,
... | 2.425532 | 47 |
import pytest
import numpy as np
import scipy.sparse as sp
from joblib import cpu_count
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn import datasets
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.datasets import load_linnerud
from sklearn.ensemble import GradientBoostingRegressor, RandomForestClassifier
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import Lasso
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import Ridge
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import SGDRegressor
from sklearn.metrics import jaccard_score, mean_squared_error
from sklearn.multiclass import OneVsRestClassifier
from sklearn.multioutput import ClassifierChain, RegressorChain
from sklearn.multioutput import MultiOutputClassifier
from sklearn.multioutput import MultiOutputRegressor
from sklearn.svm import LinearSVC
from sklearn.base import ClassifierMixin
from sklearn.utils import shuffle
from sklearn.model_selection import GridSearchCV
from sklearn.dummy import DummyRegressor, DummyClassifier
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from sklearn.ensemble import StackingRegressor
# Import the data
iris = datasets.load_iris()
# create a multiple targets by randomized shuffling and concatenating y.
X = iris.data
y1 = iris.target
y2 = shuffle(y1, random_state=1)
y3 = shuffle(y1, random_state=2)
y = np.column_stack((y1, y2, y3))
n_samples, n_features = X.shape
n_outputs = y.shape[1]
n_classes = len(np.unique(y1))
classes = list(map(np.unique, (y1, y2, y3)))
# check multioutput has predict_proba
# check predict_proba passes
@pytest.mark.parametrize("response_method", ["predict_proba", "predict"])
def test_multi_output_not_fitted_error(response_method):
"""Check that we raise the proper error when the estimator is not fitted"""
moc = MultiOutputClassifier(LogisticRegression())
with pytest.raises(NotFittedError):
getattr(moc, response_method)(X)
def test_multi_output_delegate_predict_proba():
"""Check the behavior for the delegation of predict_proba to the underlying
estimator"""
# A base estimator with `predict_proba`should expose the method even before fit
moc = MultiOutputClassifier(LogisticRegression())
assert hasattr(moc, "predict_proba")
moc.fit(X, y)
assert hasattr(moc, "predict_proba")
# A base estimator without `predict_proba` should raise an AttributeError
moc = MultiOutputClassifier(LinearSVC())
assert not hasattr(moc, "predict_proba")
msg = "'LinearSVC' object has no attribute 'predict_proba'"
with pytest.raises(AttributeError, match=msg):
moc.predict_proba(X)
moc.fit(X, y)
assert not hasattr(moc, "predict_proba")
with pytest.raises(AttributeError, match=msg):
moc.predict_proba(X)
@pytest.mark.parametrize(
"estimator",
[
RandomForestClassifier(n_estimators=2),
MultiOutputClassifier(RandomForestClassifier(n_estimators=2)),
ClassifierChain(RandomForestClassifier(n_estimators=2)),
],
)
@pytest.mark.filterwarnings("ignore:`n_features_in_` is deprecated")
@pytest.mark.parametrize(
"estimator, dataset",
[
(
MultiOutputClassifier(DummyClassifierWithFitParams(strategy="prior")),
datasets.make_multilabel_classification(),
),
(
MultiOutputRegressor(DummyRegressorWithFitParams()),
datasets.make_regression(n_targets=3, random_state=0),
),
],
)
@pytest.mark.parametrize(
"MultiOutputEstimator, Estimator",
[(MultiOutputClassifier, LogisticRegression), (MultiOutputRegressor, Ridge)],
)
# FIXME: we should move this test in `estimator_checks` once we are able
# to construct meta-estimator instances
@pytest.mark.parametrize("order_type", [list, np.array, tuple])
def test_multioutputregressor_ducktypes_fitted_estimator():
"""Test that MultiOutputRegressor checks the fitted estimator for
predict. Non-regression test for #16549."""
X, y = load_linnerud(return_X_y=True)
stacker = StackingRegressor(
estimators=[("sgd", SGDRegressor(random_state=1))],
final_estimator=Ridge(),
cv=2,
)
reg = MultiOutputRegressor(estimator=stacker).fit(X, y)
# Does not raise
reg.predict(X)
| [
11748,
12972,
9288,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
629,
541,
88,
13,
82,
29572,
355,
599,
201,
198,
6738,
1693,
8019,
1330,
42804,
62,
9127,
201,
198,
201,
198,
6738,
1341,
35720,
13,
26791,
13557,
33407,
13... | 2.602941 | 1,836 |
import os
import filecmp
import unittest
from ccquery.data import json_controller
from ccquery.utils import io_utils
class TestJsonController(unittest.TestCase):
"""Test the json controller methods"""
def setUp(self):
"""Set up local variables"""
self.jsonl_file = os.path.join(
os.path.dirname(__file__), 'sample.jsonl')
self.txt_file = io_utils.change_extension(self.jsonl_file, 'txt')
self.copy_txt = io_utils.change_extension(self.jsonl_file, 'copy.txt')
io_utils.check_file_readable(self.jsonl_file)
def tearDown(self):
"""Remove temporary files"""
if os.path.exists(self.copy_txt):
os.remove(self.copy_txt)
def test_load_jsonl(self):
"""Load json-lines data"""
keys = [
'baths', 'beds', 'city', 'latitude', 'longitude',
'price', 'sale_date', 'sq__ft', 'state', 'type', 'zip']
cities = ['SACRAMENTO'] * 8 + ['RANCHO CORDOVA', 'RIO LINDA']
data = json_controller.load_fields(self.jsonl_file, keys)
self.assertEqual(keys, sorted(list(data.keys())))
self.assertEqual(10, len(data[keys[0]]))
self.assertEqual(cities, data['city'])
def test_load_field_jsonl(self):
"""Load json-lines data for one single field"""
cities = ['SACRAMENTO'] * 8 + ['RANCHO CORDOVA', 'RIO LINDA']
data = json_controller.load_field(self.jsonl_file, 'city')
self.assertEqual(cities, data)
def test_load_chunk(self):
"""Load partial data"""
# without shuffle
input_data, target_data = json_controller.load_chunk(
self.jsonl_file, 5, input_field='city', target_field='zip')
self.assertEqual(5, len(input_data))
self.assertEqual(5, len(target_data))
# with shuffle
input_data, target_data = json_controller.load_chunk(
self.jsonl_file, 5,
input_field='city', target_field='zip', to_shuffle=True)
self.assertEqual(5, len(input_data))
self.assertEqual(5, len(target_data))
def test_stream_jsonl(self):
"""Load one entry at a time"""
index = 0
input_data = ['SACRAMENTO'] * 8 + ['RANCHO CORDOVA', 'RIO LINDA']
target_data = [
95838, 95823, 95815, 95815, 95824,
95841, 95842, 95820, 95670, 95673]
for input_value, target_value in json_controller.stream(
self.jsonl_file, input_field='city', target_field='zip'):
self.assertEqual(input_value, input_data[index])
self.assertEqual(target_value, target_data[index])
index += 1
def test_stream_field_jsonl(self):
"""Load one field from one entry at a time"""
cities = ['SACRAMENTO'] * 8 + ['RANCHO CORDOVA', 'RIO LINDA']
values = list(json_controller.stream_field(self.jsonl_file, 'city'))
self.assertEqual(cities, values)
def test_stream_chunk_jsonl(self):
"""Load partial data"""
cities = ['SACRAMENTO'] * 5
zip_codes = [95838, 95823, 95815, 95815, 95824]
values = json_controller.stream_chunk(self.jsonl_file, 5, 'city', 'zip')
values = list(values)[0]
self.assertEqual(cities, values[0])
self.assertEqual(zip_codes, values[1])
def test_store_txt(self):
"""Store content to text file"""
# test correct request
data = json_controller.load_field(self.jsonl_file, 'city')
json_controller.store_text(data, self.copy_txt)
self.assertTrue(
filecmp.cmp(self.copy_txt, self.txt_file, shallow=False),
'Output file different from input file')
# test incorrect request: no output
with self.assertRaises(Exception) as context:
json_controller.store_text('error', 'file.txt')
self.assertTrue('Method expects list' in str(context.exception))
| [
11748,
28686,
198,
11748,
2393,
48991,
198,
11748,
555,
715,
395,
198,
6738,
36624,
22766,
13,
7890,
1330,
33918,
62,
36500,
198,
6738,
36624,
22766,
13,
26791,
1330,
33245,
62,
26791,
198,
198,
4871,
6208,
41,
1559,
22130,
7,
403,
715,... | 2.249425 | 1,740 |
# -*- coding: utf-8 -*-
# @Time : 2020/9/2 13:35
# @Author : KevinHoo
# @Site :
# @File : URLdecoder.py.py
# @Software: PyCharm
# @Email : hu.rui0530@gmail.com
from decodeAllinOne import jsdecode
decode_dict = {
' ': '%20',
'"': '%22',
'#': '%23',
'%': '%25',
'&': '%26',
'(': '%28',
')': '%29',
'+': '%2B',
',': '%2C',
'/': '%2F',
':': '%3A',
';': '%3B',
'<': '%3C',
'=': '%3D',
'>': '%3E',
'?': '%3F',
'@': '%40',
'\\': '%5C',
'|': '%7C',
}
# 百分号解码
if __name__ == '__main__':
# sigcipher = ""
# sigcipher = seperatorOff(sigcipher)
# s = ""
# s = decode(s)
print(seperatorOff('https://r2---sn-jxnj5-cjoe.googlevideo.com/videoplayback%3Fexpire%3D1599061097%26ei'
'%3DCWhPX_uJKs-uW__QpIgI%26ip%3D13.69.158.161%26id%3Do-AMptcNa7sVQHQBiiBb6KPrXw0T9-FZFvMtv6lY'
'-9GIEZ%26itag%3D18%26source%3Dyoutube%26requiressl%3Dyes%26mh%3DLh%26mm%3D31%252C29%26mn%3Dsn'
'-jxnj5-cjoe%252Csn-4g5e6nzl%26ms%3Dau%252Crdu%26mv%3Du%26mvi%3D2%26pl%3D23%26gcr%3Die%26vprv'
'%3D1%26mime%3Dvideo%252Fmp4%26gir%3Dyes%26clen%3D17793698%26ratebypass%3Dyes%26dur%3D295.404'
'%26lmt%3D1583110775970339%26mt%3D1599039073%26fvip%3D2%26c%3DWEB%26txp%3D5531432%26sparams'
'%3Dexpire%252Cei%252Cip%252Cid%252Citag%252Csource%252Crequiressl%252Cgcr%252Cvprv%252Cmime'
'%252Cgir%252Cclen%252Cratebypass%252Cdur%252Clmt%26lsparams%3Dmh%252Cmm%252Cmn%252Cms%252Cmv'
'%252Cmvi%252Cpl%26lsig'
'%3DAG3C_xAwRQIgXEnR8OzzFXGWYiKu9kokr7HfLvOQ1OQk5hSmTSk7UcICIQCtyWBGRuEDV1gnxRWg2ZYXt5ru0ES'
'-QfzK4deMU6pFDA%253D%253D'))
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
7575,
1058,
12131,
14,
24,
14,
17,
1511,
25,
2327,
198,
2,
2488,
13838,
1058,
7939,
39,
2238,
198,
2,
2488,
29123,
1058,
220,
198,
2,
2488,
8979,
1058,
1028... | 1.504992 | 1,202 |
# Copyright (C) 2016-2018 Alibaba Group Holding Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from xdl.python import pybind
from xdl.python.lib.tensorshape import TensorShape
from xdl.python.lib.datatype import DataType
| [
2,
15069,
357,
34,
8,
1584,
12,
7908,
41992,
4912,
31703,
15302,
198,
2,
220,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
35... | 3.946602 | 206 |
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 11 00:19:06 2022
@author: Moritz Hobein
"""
import argparse
import numpy as np
import pandas as pd
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform
from os.path import commonprefix
if __name__ == '__main__':
main()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
19480,
3158,
1367,
3571,
25,
1129,
25,
3312,
33160,
198,
198,
31,
9800,
25,
3461,
4224,
367,
5910,
259,
198,
37811,
198,
11748,
1822,
29572,
198,... | 2.518519 | 135 |
import attr
@attr.s
class Ellipsoid:
"""Ellipsoid used for mesh calculations
Args:
a (float): semi-major axis
b (float): semi-minor axis
"""
a: float = attr.ib()
b: float = attr.ib()
e2: float = attr.ib(init=False)
| [
11748,
708,
81,
628,
198,
31,
35226,
13,
82,
198,
4871,
7122,
541,
568,
312,
25,
198,
220,
220,
220,
37227,
30639,
541,
568,
312,
973,
329,
19609,
16765,
628,
220,
220,
220,
943,
14542,
25,
198,
220,
220,
220,
220,
220,
220,
220,
... | 2.176471 | 119 |
from pyEyeTrack.EyeTracking.PupilTrackingClass import PupilTracking
from pyEyeTrack.EyeTracking.BlinkingClass import Blinking
from pyEyeTrack.EyeTracking.PupilBlinkingClass import PupilBlinking
from pyEyeTrack.AudioVideoRecording.VideoRecordingClass import VideoRecorder
from pyEyeTrack.AudioVideoRecording.AudioRecordingClass import AudioRecorder
import threading
import importlib
import sys
import os
class pyEyeTrack():
"""PyEyeTrack is a pupil tracking library, built on top of the
Python programming language. The library provides various
functionalities like pupil tracking, blink detection, video,
and audio recording on the user-specified UI.
"""
def pyEyeTrack_runner(
self,
UI=False,
UI_file_name="User_ImageUI_EscExit",
pupilTracking=False,
blinkDetection=False,
video_source=0,
eyeTrackingLog=True,
eyeTrackingFileName='EyeTrackLog',
videoRecorder=False,
videoName='video',
audioRecorder=False,
audioName='audio',
destinationPath='/Output'):
"""
This function enables the user to run the functionalities of the
library simultaneously.
Functionalities include running the UI specified by the user,
pupil tracking, blink detection, video recording and audio recording.
The user can set flags to run the combination of these functionalities.
The function also allows the user to name the output file.
Args:
UI (bool, optional): This parameter enables the user to run UI.
Default: False.
UI_file_name (str, optional): This parameter takes the file name
of the UI. Default: "User_ImageUI_EscExit".
pupilTracking (bool, optional): This parameter enables the user to
run pupil tracking. Default: False.
blinkDetection (bool, optional): This parameter enables the user
to run blink detection. Default: False.
video_source (int/str, optional): This parameter takes either
device index or a video file as input. Default: 0.
eyeTrackingLog (bool, optional): This parameter enables the user to
generate a CSV of pupil tracking/ blink detection. Default: True.
eyeTrackingFileName (str, optional): This parameter takes the file name
for the CSV. Default: 'EyeTrackLog'.
videoRecorder (bool, optional): This parameter enables the user to
record video. Default: False.
videoName (str, optional): This parameter enables the user to specify
the filename with which the recorded video is to be saved.
Default: 'video'.
audioRecorder (bool, optional): This parameter enables the user to
record audio. Default: False.
audioName (str, optional): This parameter enables the user to specify
the filename with which the recorded video is to be saved.
Default: 'audio'.
destinationPath (str, optional): The parameter enables the user to specify
the location of the output files. Default: ‘/Output’.
"""
startEyeTracking = False
outputPath = destinationPath
if os.access(
destinationPath,
os.W_OK) == False and destinationPath != '/Output':
print('You may not have write permission.Try changing the destination path.')
sys.exit()
if os.path.exists(
destinationPath) == False and destinationPath != '/Output':
os.mkdir(destinationPath)
elif destinationPath == '/Output':
currentPath = os.getcwd()
outputPath = currentPath + '/Output'
if os.path.exists(outputPath) == False:
os.mkdir(outputPath)
outputPath = outputPath + '/'
if (pupilTracking or blinkDetection) and videoRecorder:
print('Video Recording and Eye Tracking functionalities '
'require access to the webcam simultaneously and are therefore '
'recommended not to run these functionalities simultaneously.')
sys.exit()
if pupilTracking or blinkDetection:
startEyeTracking = True
if video_source != 0:
if os.path.exists(video_source) == False:
print('Please specify correct path for the video source.')
sys.exit()
if blinkDetection and pupilTracking:
eyeTracking = PupilBlinking(video_source)
eyeTrackingThread = threading.Thread(target=eyeTracking.start)
if blinkDetection and pupilTracking == False:
eyeTracking = Blinking(video_source)
eyeTrackingThread = threading.Thread(target=eyeTracking.start)
if pupilTracking and blinkDetection == False:
eyeTracking = PupilTracking(video_source)
eyeTrackingThread = threading.Thread(target=eyeTracking.start)
if videoRecorder:
videoOutputPath = outputPath + videoName
videoRecorder = VideoRecorder(videoOutputPath)
videoRecorderThread = threading.Thread(target=videoRecorder.main)
if audioRecorder:
audioOutputPath = outputPath + audioName
audioRecorder = AudioRecorder(outputPath + audioName)
audioRecorderThread = threading.Thread(target=audioRecorder.main)
if UI:
module = self.dynamic_import(UI_file_name)
if hasattr(module, 'main'):
uiThread = threading.Thread(target=module.main)
else:
print(
'UI needs a main method. Please Refer documentation for more information.')
sys.exit()
if UI:
uiThread.start()
if startEyeTracking:
eyeTrackingThread.start()
if videoRecorder:
videoRecorderThread.start()
if audioRecorder:
audioRecorderThread.start()
if UI:
uiThread.join()
if startEyeTracking:
eyeTrackingThread.join()
if eyeTrackingLog:
eyeTrackingOutput = outputPath + eyeTrackingFileName
eyeTracking.csv_writer(eyeTrackingOutput)
if videoRecorder:
videoRecorderThread.join()
videoRecorder.stop()
if audioRecorder:
audioRecorderThread.join()
audioRecorder.stop()
| [
6738,
12972,
24876,
24802,
13,
24876,
2898,
5430,
13,
47,
929,
346,
2898,
5430,
9487,
1330,
350,
929,
346,
2898,
5430,
198,
6738,
12972,
24876,
24802,
13,
24876,
2898,
5430,
13,
3629,
8040,
9487,
1330,
1086,
8040,
198,
6738,
12972,
2487... | 2.389504 | 2,801 |
"""-----------------------------------------------------------------------------
Name: thematic_accuracy.py
Purpose: Creates the themtic accuracy score from the feature level metadata.
Description: This tool scores the thematic accuracy of a TDS 6.1 curve feature
class. The tool uses global population data to infer an appropriate
collection scale for the data, compares that scale to the
scalability trigger, and generates a score based on that comparison.
The score represents whether we think the scale is appropriate.
Requirements: Python 2.7.x/Python3.x, ArcGIS 10.4+/Pro 1.2+
Author(s): Gregory Brunner, Contractor for National Geospatial-Intelligence
Agency (NGA) | Andrew Chapkowski, Contractor NGA
Program Manager: Derek Silva, NGA (Derek.A.Silva@nga.mil)
Created: August, 2015
Modified: August 12, 2016 | April, 2017
Copyright: Esri
License: TBD
-----------------------------------------------------------------------------"""
from __future__ import division
from __future__ import print_function
import os
import sys
from geodataset import SpatialDataFrame
import numpy as np
import pandas as pd
#Using the ArcGIS API for Python
import arcgis
from arcgis.gis import GIS
from arcgis.features import FeatureLayer
from arcgis.geometry import filters
from arcgis.geometry import Geometry
#Import logic to create layer selection
import sotd_config as config
import arcpy
from arcpy import env
from arcpy import da
FIELDS = ['DOM_SCALE',
'DOM_COUNT',
'DOM_PER',
'MIN_SCALE',
'MIN_PER',
'MAX_SCALE',
'MAX_PER',
'CNT_2500',
'CNT_5000',
'CNT_12500',
'CNT_25000',
'CNT_50000',
'CNT_100000',
'CNT_250000',
'CNT_500000',
'CNT_1000000',
'PER_2500',
'PER_5000',
'PER_12500',
'PER_25000',
'PER_50000',
'PER_100000',
'PER_250000',
'PER_500000',
'PER_1000000',
'COUNT_', #Add Underscore if AGOL
'MISSION_PLANNING',
'POPULATION_SCALE',
'THEM_ACC_SCORE']
#--------------------------------------------------------------------------
class FunctionError(Exception):
""" raised when a function fails to run """
pass
#--------------------------------------------------------------------------
def trace():
"""
trace finds the line, the filename
and error message and returns it
to the user
"""
import traceback
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# script name + line number
line = tbinfo.split(", ")[1]
# Get Python syntax error
#
synerror = traceback.format_exc().splitlines()[-1]
return line, __file__, synerror
def get_equal_breaks_score(mean):
""""""
ratio = mean
if (ratio >= 0 and ratio <= 0.5):
return "G"
elif (ratio > 0.5 and ratio <= 1.0):
return "R"
elif (ratio > 1.0 and ratio <= 1.5):
return "L"
elif (ratio > 1.5 and ratio <= 2.0):
return "S/U"
else:
return 0
#--------------------------------------------------------------------------
#--------------------------------------------------------------------------
def extend_table(table, rows=None):
"""
Adds the required columns to the table and appends new records if
given.
"""
try:
if rows is None:
rows = []
dtypes = np.dtype(
[
('_ID', np.int),
('DOM_SCALE', np.float64),
('DOM_COUNT', np.int32),
('DOM_PER', np.float64),
('MIN_SCALE', np.float64),
('MIN_PER', np.float64),
('MAX_SCALE', np.float64),
('MAX_PER', np.float64),
('CNT_2500', np.int32),
('CNT_5000', np.int32),
('CNT_12500', np.int32),
('CNT_25000', np.int32),
('CNT_50000', np.int32),
('CNT_100000', np.int32),
('CNT_250000', np.int32),
('CNT_500000', np.int32),
('CNT_1000000', np.int32),
('PER_2500', np.float64),
('PER_5000', np.float64),
('PER_12500', np.float64),
('PER_25000', np.float64),
('PER_50000', np.float64),
('PER_100000', np.float64),
('PER_250000', np.float64),
('PER_500000', np.float64),
('PER_1000000', np.float64),
('COUNT', np.int32),
('MISSION_PLANNING', '|S1024'),
('POPULATION_SCALE', '|S1024'),
('THEM_ACC_SCORE', np.float64)
]
)
array = np.array(rows, dtypes)
da.ExtendTable(table, "OID@", array, "_ID", False)
return table
except:
line, filename, synerror = trace()
raise FunctionError(
{
"function": "extend_table",
"line": line,
"filename": filename,
"synerror": synerror,
"arc" : str(arcpy.GetMessages(2))
}
)
def create_grls(grid, population, output_features):
"""Creates a table to join to the grid dataset"""
try:
#output_features = os.path.join(env.scratchGDB, "temp_grid")
reclass_population = os.path.join(env.scratchFolder, "rast_temp.tif")
zonal_table = os.path.join(env.scratchGDB, 'zonalstats') #in_memory\\table"
if arcpy.Exists(reclass_population):
arcpy.Delete_management(reclass_population)
if arcpy.Exists(zonal_table):
arcpy.Delete_management(zonal_table)
output_features = arcpy.CopyFeatures_management(grid, output_features)#[0]
arcpy.AddMessage(output_features)
arcpy.AddMessage(reclass_population)
arcpy.AddMessage(zonal_table)
arcpy.gp.Reclassify_sa(population, "VALUE", "0 0;1 2;2 2;3 2;4 2;5 2;6 1;7 1;8 1;9 1;10 1", reclass_population, "DATA")
arcpy.gp.ZonalStatisticsAsTable_sa(output_features, "OBJECTID", reclass_population,zonal_table, "DATA", "ALL")
#zonal_oid = arcpy.Describe(zonal_table).OIDFieldName
arcpy.JoinField_management(output_features, "OBJECTID",
zonal_table, "OBJECTID_1",
"Count;Area;Min;Max;Range;Variety;Majority;Minority;Median;Mean;Std;Sum")
arcpy.Delete_management(reclass_population)
return output_features
except:
line, filename, synerror = trace()
raise FunctionError(
{
"function": "create_grls",
"line": line,
"filename": filename,
"synerror": synerror,
"arc" : str(arcpy.GetMessages(2))
}
)
#--------------------------------------------------------------------------
def thematic_accuracy(gis, df_current, output_features, grid_filter, geom, value_field):
""" main driver of program """
try:
out_fl = FeatureLayer(gis=gis, url=output_features)
out_sdf = out_fl.query(geometry_filter=grid_filter,return_geometry=True,
return_all_records=True).df
sq = df_current['SHAPE'].disjoint(geom) == False
df_current = df_current[sq].copy()
if len(df_current) > 0:
count = len(df_current)
max_val = df_current[value_field].max()
max_scale = 100 * (len(df_current[df_current[value_field] == max_val])/count)
min_val = df_current[value_field].min()
min_scale = 100 * (len(df_current[df_current[value_field] == min_val])/count)
vc = df_current[value_field].value_counts()
common = df_current[value_field].mode() # Used in MSP
if len(common) > 0:
common = common[0]
common_count = vc[common]
common_per = (vc[common]/count) * 100
else:
common = min_val
common_count = 1
common_per = 100
count_2500 = 0
count_5000 = 0
count_12500 = 0
count_25000 = 0
count_50000 = 0
count_100000 = 0
count_250000 = 0
count_500000 = 0
count_1000000 = 0
if 2500 in vc:
count_2500 = vc[2500]
if 5000 in vc:
count_5000 = vc[5000]
if 12500 in vc:
count_12500 = vc[12500]
if 25000 in vc:
count_25000 = vc[25000]
if 50000 in vc:
count_50000 = vc[50000]
if 100000 in vc:
count_100000 = vc[100000]
if 250000 in vc:
count_250000 = vc[250000]
if 500000 in vc:
count_500000 = vc[500000]
if 1000000 in vc:
count_1000000 = vc[1000000]
MSP = get_msp(scale=common) # SHOULD UPDATE MISSION_PLANNING FIELD
SCORE_VALUE = get_equal_breaks_score(mean=out_sdf['MEAN'][0])# get_equal_breaks_score(output_features, ['MEAN','EQUAL']) # PUT SCORE IN EQUAL
GRLS = SCORE_VALUE
domScale = common
# FIELD 1 is the source, Field 2 is the field to be updated
#df_current['EQUAL'] = SCORE_VALUE # ASSIGNS EQUAL TO LANSCAN_SCALE
#29 field
out_sdf[FIELDS[0]][0]=common# median
out_sdf[FIELDS[1]][0]=common_count # % common
out_sdf[FIELDS[2]][0]=round(common_per,1)
out_sdf[FIELDS[3]][0]=min_val
out_sdf[FIELDS[4]][0]=round(min_scale,1)
out_sdf[FIELDS[5]][0]=max_val
out_sdf[FIELDS[6]][0]=round(max_scale,1)
out_sdf[FIELDS[7]][0]=count_2500
out_sdf[FIELDS[8]][0]=count_5000
out_sdf[FIELDS[9]][0]=count_12500
out_sdf[FIELDS[10]][0]=count_25000
out_sdf[FIELDS[11]][0]=count_50000
out_sdf[FIELDS[12]][0]=count_100000
out_sdf[FIELDS[13]][0]=count_250000
out_sdf[FIELDS[14]][0]=count_500000
out_sdf[FIELDS[15]][0]=count_1000000
out_sdf[FIELDS[16]][0]=round(count_2500*100/count,1)
out_sdf[FIELDS[17]][0]=round(count_5000*100/count,1)
out_sdf[FIELDS[18]][0]=round(count_12500*100/count,1)
out_sdf[FIELDS[19]][0]=round(count_25000*100/count,1)
out_sdf[FIELDS[20]][0]=round(count_50000*100/count,1)
out_sdf[FIELDS[21]][0]=round(count_100000*100/count,1)
out_sdf[FIELDS[22]][0]=round(count_250000*100/count,1)
out_sdf[FIELDS[23]][0]=round(count_500000*100/count,1)
out_sdf[FIELDS[24]][0]=round(count_1000000*100/count,1)
out_sdf[FIELDS[25]][0]=count
out_sdf[FIELDS[26]][0]=str(MSP) #MISSION_PLANNING FIELD
out_sdf[FIELDS[27]][0]=SCORE_VALUE#), # THEMATIC SCALE VALUE
out_sdf[FIELDS[28]][0]=population_scale(common, SCORE_VALUE) # POPULATION_SCALE
else:
# results.append(tuple([oid] + [-1] * 25 + [0] + ['N/A']*2 + [0]))
out_sdf[FIELDS[0]][0]=-1
out_sdf[FIELDS[1]][0]=-1
out_sdf[FIELDS[2]][0]=-1
out_sdf[FIELDS[3]][0]=-1
out_sdf[FIELDS[4]][0]=-1
out_sdf[FIELDS[5]][0]=-1
out_sdf[FIELDS[6]][0]=-1
out_sdf[FIELDS[7]][0]=-1
out_sdf[FIELDS[8]][0]=-1
out_sdf[FIELDS[9]][0]=-1
out_sdf[FIELDS[10]][0]=-1
out_sdf[FIELDS[11]][0]=-1
out_sdf[FIELDS[12]][0]=-1
out_sdf[FIELDS[13]][0]=-1
out_sdf[FIELDS[14]][0]=-1
out_sdf[FIELDS[15]][0]=-1
out_sdf[FIELDS[16]][0]=-1
out_sdf[FIELDS[17]][0]=-1
out_sdf[FIELDS[18]][0]=-1
out_sdf[FIELDS[19]][0]=-1
out_sdf[FIELDS[20]][0]=-1
out_sdf[FIELDS[21]][0]=-1
out_sdf[FIELDS[22]][0]=-1
out_sdf[FIELDS[23]][0]=-1
out_sdf[FIELDS[24]][0]=-1
out_sdf[FIELDS[25]][0]=0
out_sdf[FIELDS[26]][0]='N/A'
out_sdf[FIELDS[27]][0]='N/A'
out_sdf[FIELDS[28]][0]=0
del geom
del sq
del df_current
return out_sdf, out_fl
except FunctionError as f_e:
messages = f_e.args[0]
## #arcpy.AddError("error in function: %s" % messages["function"])
## #arcpy.AddError("error on line: %s" % messages["line"])
## #arcpy.AddError("error in file name: %s" % messages["filename"])
## #arcpy.AddError("with error message: %s" % messages["synerror"])
## #arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
## #arcpy.AddError("error on line: %s" % line)
## #arcpy.AddError("error in file name: %s" % filename)
## #arcpy.AddError("with error message: %s" % synerror) | [
37811,
10097,
32501,
198,
5376,
25,
606,
1512,
62,
4134,
23843,
13,
9078,
198,
30026,
3455,
25,
7921,
274,
262,
606,
13370,
9922,
4776,
422,
262,
3895,
1241,
20150,
13,
198,
11828,
25,
770,
2891,
8198,
262,
606,
1512,
9922,
286,
257,
... | 1.941756 | 6,696 |
from rest_framework import serializers
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
628
] | 5 | 8 |
# Generated by Django 3.2 on 2021-04-19 15:20
import django.db.models.deletion
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
17,
319,
33448,
12,
3023,
12,
1129,
1315,
25,
1238,
201,
198,
201,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
201,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
... | 2.645833 | 48 |
from django.shortcuts import render, redirect
from users.models import User
from django.contrib.auth.hashers import make_password
from django.contrib.auth.decorators import login_required
import os
from django.conf import settings
from django.core.files.storage import default_storage
@login_required(login_url='/login/')
def create_user_view(request):
"""
This view Renders Create User Page
"""
if request.user.is_admin:
context = {
"title": "Create User"
}
return render(request, 'create_user.html', context)
else:
return redirect('/dashboard/')
@login_required(login_url='/login/')
def store_user(request):
"""
This view creates user and Store user in DB
"""
if request.user.is_admin:
formData = User()
name = request.POST.get("name")
email = request.POST.get("email")
password = request.POST.get("password")
phone = request.POST.get("phone")
designation = request.POST.get("designation")
isActive = request.POST.get("user_status")
role = request.POST.get("role_id")
if 'avatar' in request.FILES:
avatar_file = request.FILES["avatar"]
save_path = os.path.join(settings.MEDIA_ROOT, avatar_file.name)
default_storage.save(save_path, avatar_file)
formData.avatar = avatar_file.name
formData.name = name
formData.email = email
formData.password = make_password(password)
formData.phone = phone
formData.designation = designation
formData.status = isActive
if isActive == 'Active':
formData.is_active = True
else :
formData.is_active = False
if role == "s":
formData.is_staff = True
elif role == "a" :
formData.is_admin = True
elif role == "v" :
formData.is_viewer = True
# save_path = os.path.join(settings.MEDIA_ROOT, avatar_file.name)
# default_storage.save(save_path, avatar_file)
#
# formData.avatar = avatar_file.name
formData.save()
return redirect("/create-user/")
else:
return redirect('/dashboard/')
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
18941,
198,
6738,
2985,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
10134,
7084,
1330,
787,
62,
28712,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
... | 2.337526 | 954 |
import os
from tempfile import gettempdir
import zipfile
from urllib.request import urlretrieve
URL = "http://cdstar.shh.mpg.de//bitstreams/EAEA0-A662-A618-386E-0/ldh_description.bib.zip"
| [
11748,
28686,
198,
6738,
20218,
7753,
1330,
651,
29510,
15908,
198,
11748,
19974,
7753,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
1186,
30227,
198,
198,
21886,
796,
366,
4023,
1378,
10210,
7364,
13,
1477,
71,
13,
3149,
70,
13,
... | 2.567568 | 74 |
import json
from pprint import pprint
with open('metric-1-list.json') as data_file:
data = json.load(data_file)
pprint(data["rows"]["panels"]["targets"][0]) | [
11748,
33918,
198,
6738,
279,
4798,
1330,
279,
4798,
198,
198,
4480,
1280,
10786,
4164,
1173,
12,
16,
12,
4868,
13,
17752,
11537,
355,
1366,
62,
7753,
25,
220,
220,
220,
220,
198,
220,
220,
220,
1366,
796,
33918,
13,
2220,
7,
7890,
... | 2.426471 | 68 |
# -*- coding: utf-8 -*-
"""
idfy_rest_client.models.konsern_link
This file was automatically generated for Idfy by APIMATIC v2.0 ( https://apimatic.io )
"""
class KonsernLink(object):
"""Implementation of the 'KonsernLink' model.
TODO: type model description here.
Attributes:
orgnr_overste_mor_field (int): TODO: type description here.
orgnr_naermeste_mor_field (int): TODO: type description here.
lopenr_field (int): TODO: type description here.
niva_deltagende_field (int): TODO: type description here.
landkode_deltagende_field (string): TODO: type description here.
orgnr_deltagende_field (int): TODO: type description here.
navn_deltagende_field (string): TODO: type description here.
eierandel_deltagende_field (float): TODO: type description here.
"""
# Create a mapping from Model property names to API property names
_names = {
"orgnr_overste_mor_field":'orgnrOversteMorField',
"orgnr_naermeste_mor_field":'orgnrNaermesteMorField',
"lopenr_field":'lopenrField',
"niva_deltagende_field":'nivaDeltagendeField',
"landkode_deltagende_field":'landkodeDeltagendeField',
"orgnr_deltagende_field":'orgnrDeltagendeField',
"navn_deltagende_field":'navnDeltagendeField',
"eierandel_deltagende_field":'eierandelDeltagendeField'
}
def __init__(self,
orgnr_overste_mor_field=None,
orgnr_naermeste_mor_field=None,
lopenr_field=None,
niva_deltagende_field=None,
landkode_deltagende_field=None,
orgnr_deltagende_field=None,
navn_deltagende_field=None,
eierandel_deltagende_field=None,
additional_properties = {}):
"""Constructor for the KonsernLink class"""
# Initialize members of the class
self.orgnr_overste_mor_field = orgnr_overste_mor_field
self.orgnr_naermeste_mor_field = orgnr_naermeste_mor_field
self.lopenr_field = lopenr_field
self.niva_deltagende_field = niva_deltagende_field
self.landkode_deltagende_field = landkode_deltagende_field
self.orgnr_deltagende_field = orgnr_deltagende_field
self.navn_deltagende_field = navn_deltagende_field
self.eierandel_deltagende_field = eierandel_deltagende_field
# Add additional model properties to the instance
self.additional_properties = additional_properties
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
orgnr_overste_mor_field = dictionary.get('orgnrOversteMorField')
orgnr_naermeste_mor_field = dictionary.get('orgnrNaermesteMorField')
lopenr_field = dictionary.get('lopenrField')
niva_deltagende_field = dictionary.get('nivaDeltagendeField')
landkode_deltagende_field = dictionary.get('landkodeDeltagendeField')
orgnr_deltagende_field = dictionary.get('orgnrDeltagendeField')
navn_deltagende_field = dictionary.get('navnDeltagendeField')
eierandel_deltagende_field = dictionary.get('eierandelDeltagendeField')
# Clean out expected properties from dictionary
for key in cls._names.values():
if key in dictionary:
del dictionary[key]
# Return an object of this model
return cls(orgnr_overste_mor_field,
orgnr_naermeste_mor_field,
lopenr_field,
niva_deltagende_field,
landkode_deltagende_field,
orgnr_deltagende_field,
navn_deltagende_field,
eierandel_deltagende_field,
dictionary)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
37811,
201,
198,
220,
220,
220,
4686,
24928,
62,
2118,
62,
16366,
13,
27530,
13,
74,
684,
1142,
62,
8726,
201,
198,
201,
198,
220,
220,
220,
770,
2393... | 2.103647 | 2,084 |
from NodeDefender.mqtt.command import fire, topic_format
| [
6738,
19081,
7469,
2194,
13,
76,
80,
926,
13,
21812,
1330,
2046,
11,
7243,
62,
18982,
198
] | 3.352941 | 17 |
from src.utils.helper_embedding import reshape_df
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
df = pd.read_csv(filepath_or_buffer='data/processed/content_store_clean.csv')
# convert dtype object to unicode string
df['text_clean'] = df['text_clean'].astype('U').values
df = df.sample(n=10000, random_state=42)
# compute term-frequency/normalised bag of word word vectors
tf_vec = TfidfVectorizer(use_idf=False,
norm='l2',
max_features=512)
tf_content = tf_vec.fit_transform(raw_documents=df['text_clean'])
tf_word_embeddings = pd.DataFrame(data=tf_content.toarray(),
columns=tf_vec.get_feature_names())
tf_word_embeddings = reshape_df(df=tf_word_embeddings,
col_name='bow_embeddings')
tf_word_embeddings.to_pickle(path='data/processed/tf_embeddings.pkl')
# compute tf-idf word vectors
tfidf_vec = TfidfVectorizer(use_idf=True,
lowercase=True,
max_features=512)
tfidf_content = tfidf_vec.fit_transform(raw_documents=df['text_clean'])
tfidf_word_embeddings = pd.DataFrame(data=tfidf_content.toarray(),
columns=tfidf_vec.get_feature_names())
tfidf_word_embeddings = reshape_df(df=tfidf_word_embeddings,
col_name='tfidf_embeddings')
tf_word_embeddings.to_pickle(path='data/processed/tfidf_embeddings.pkl')
| [
6738,
12351,
13,
26791,
13,
2978,
525,
62,
20521,
12083,
1330,
27179,
1758,
62,
7568,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
628,
... | 2.060942 | 722 |
import unittest
from helpers import absolute_sample_path
from pdfminer.high_level import extract_text
from pdfminer.layout import LAParams
test_strings = {
"simple1.pdf": "Hello \n\nWorld\n\nHello \n\nWorld\n\n"
"H e l l o \n\nW o r l d\n\n"
"H e l l o \n\nW o r l d\n\n\f",
"simple1.pdf_no_boxes_flow": "Hello \nWorld\nHello \nWorld\n"
"H e l l o \nW o r l d\n"
"H e l l o \nW o r l d\n\f",
"simple2.pdf": "\f",
"simple3.pdf": "Hello\n\nHello\n\nWorld\n\nWorld\n\n\f",
}
if __name__ == "__main__":
unittest.main()
| [
11748,
555,
715,
395,
198,
198,
6738,
49385,
1330,
4112,
62,
39873,
62,
6978,
198,
6738,
37124,
1084,
263,
13,
8929,
62,
5715,
1330,
7925,
62,
5239,
198,
6738,
37124,
1084,
263,
13,
39786,
1330,
406,
2969,
283,
4105,
628,
628,
198,
... | 1.778689 | 366 |
import lime
from lime import lime_image
from lime.wrappers.scikit_image import SegmentationAlgorithm
import tensorflow as tf
import keras.backend as K
import numpy as np
from smerf.models import *
# Uses LIME library to obtain feature attributions | [
11748,
28738,
198,
6738,
28738,
1330,
28738,
62,
9060,
198,
6738,
28738,
13,
29988,
11799,
13,
36216,
15813,
62,
9060,
1330,
1001,
5154,
341,
2348,
42289,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
11748,
41927,
292,
13,
1891,
437,
... | 3.594203 | 69 |
# -*- coding: utf-8 -*-
import enum
import collections
from typing import NamedTuple, Optional, Union, Collection
from mpl_events import MplEventDispatcher, MplObject_Type
class MouseButton(enum.Enum):
"""
"""
ANY = 0
LEFT = 1
MIDDLE = 2
RIGHT = 3
class AxisType(enum.Enum):
"""
"""
X = 'x'
Y = 'y'
ALL = 'xy'
class KeyModifier(enum.Flag):
"""
"""
NO = 0
CTRL = 2
ALT = 4
class Key(NamedTuple):
"""
"""
key: Optional[str]
modifier: KeyModifier
class InteractorBase(MplEventDispatcher):
"""The base class for all interactors
"""
def update(self):
"""Updates and redraw canvas
"""
self.figure.canvas.draw_idle()
@staticmethod
def parse_key(key: str) -> Key:
"""Parses key string that comes from mpl KeyEvent
"""
if not key:
return Key(key=None, modifier=KeyModifier.NO)
modifiers = collections.OrderedDict([
('ctrl+alt+', KeyModifier.CTRL | KeyModifier.ALT),
('ctrl+', KeyModifier.CTRL),
('alt+', KeyModifier.ALT),
('_none', KeyModifier.NO),
])
modifier = '_none'
for m in modifiers:
if m in key:
key = key.replace(m, '')
modifier = m
break
return Key(key=key, modifier=modifiers[modifier])
def check_key(self, key: Union[str, Key], key_set: Collection[str],
modifier: Optional[KeyModifier] = None):
"""Checks the key for given set of keys and optionally modifier
"""
if not isinstance(key, Key):
key = self.parse_key(key)
if not key:
return False
k_ok = key.key in key_set
m_ok = True if modifier is None else key.modifier == modifier
return k_ok and m_ok
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33829,
198,
11748,
17268,
198,
6738,
19720,
1330,
34441,
51,
29291,
11,
32233,
11,
4479,
11,
12251,
198,
198,
6738,
285,
489,
62,
31534,
1330,
337,
489,
9237... | 2.15411 | 876 |
import io
from typing import Tuple, Callable, Sequence, Iterator, Optional, Any
import ipywidgets as widgets
from functools import wraps
from ipycanvas import Canvas
from PIL import Image
from palettable.colorbrewer.qualitative import Set2_8
def set_colors() -> Iterator[str]:
"""An infinite iterator over the Set2 hex colors.
Yields
-------
str
A valid hex-string from the Set2 colors. 8 unique colors available.
"""
while True:
yield from Set2_8.hex_colors
def dist(q: Sequence[float], p: Sequence[float]) -> float:
"""Euclidian distance between two points.
Parameters
----------
q : Sequence[float]
Point q
p : Sequence[float]
Point p
Returns
-------
float
The distance between point q and p.
"""
return (sum((px - qx) ** 2.0 for px, qx in zip(p, q))) ** 0.5
def trigger_redraw(fn: Callable) -> Callable:
"""Method decorator for functions that need to trigger a re-draw.
Parameters
----------
fn : Callable
The function that needs to trigger a re-draw, e.g. because it changes
the appearance of the canvas.
Returns
-------
Callable
A wrapped function that, when called, calls the input function and then
calls the re-draw method on the class.
"""
@wraps(fn)
return wrapped_fn
def only_inside_image(
fn: Callable[[Any, float, float], Optional[Any]]
) -> Callable:
"""Method decorator for function that needs to only work inside the image.
The input should be a method that accepts x and y.
Parameters
----------
fn : Callable
The method that accepts self, x and y.
Returns
-------
Callable
A wrapped function that, when called, returns None if x and y are not
inside the image (indicated by self.image_extent)
"""
@wraps(fn)
return wrapped_fn
| [
11748,
33245,
198,
6738,
19720,
1330,
309,
29291,
11,
4889,
540,
11,
45835,
11,
40806,
1352,
11,
32233,
11,
4377,
198,
11748,
20966,
88,
28029,
11407,
355,
40803,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
20966,
88,
5171,
110... | 2.777939 | 689 |
"""
Scene-related inference and estimation
"""
from .scene import estimate_scene_description | [
37811,
198,
36542,
12,
5363,
32278,
290,
31850,
198,
37811,
198,
198,
6738,
764,
29734,
1330,
8636,
62,
29734,
62,
11213
] | 4.428571 | 21 |
# type: ignore
from multiprocessing import Process, Queue
import pytest
import tempfile
import time
from click.testing import CliRunner
from chaosiqagent.cli import cli
cfg = b"""
DEBUG=0
LOG_FORMAT="plain"
AGENT_URL=https://console.chaosiq.dev
AGENT_ACCESS_TOKEN=1234
"""
invalid_cfg = b"""
"""
pytestmark = pytest.mark.skip
| [
2,
2099,
25,
8856,
198,
198,
6738,
18540,
305,
919,
278,
1330,
10854,
11,
4670,
518,
198,
11748,
12972,
9288,
198,
11748,
20218,
7753,
198,
11748,
640,
198,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
6738,
11918,
250... | 2.798319 | 119 |
"""
Coleção de funções para extração das características julgadas
relevantes para o terinamento do modelo ML
"""
import pandas as pd
import numpy as np
import scipy.stats as stats
from scipy import signal
from scipy import integrate
def extract_features(signals, fft, fft_amplitude, ratio = 10):
"""Função principal, chama as demais funções"""
# retira o sinal do microfone da análise
signals.pop('microfone')
fft.pop('microfone')
fft_amplitude.pop('microfone')
# encontra a rotacao_calc e o seu index
rotacao_calc = get_rotation(fft_amplitude)
index = fft_amplitude.index[fft_amplitude['freq_ax'] == rotacao_calc]
# remove colunas após a determinação da rotação
signals.pop('tacometro')
fft.pop('freq_ax')
fft_amplitude.pop('tacometro')
# gera o dicionário com as features do experimento
features = {'rotacao_calc': rotacao_calc}
features.update(get_n_harmonics(fft_amplitude, index))
features.update(get_phase_angles(fft, index))
features.update(get_time_statistics(signals))
features.update(get_freq_statistics(fft_amplitude))
# features.update(get_vel_rms(signals, ratio))
return features
def get_rotation(fft_amplitude_df):
"""Dentre os 3 maiores picos na fft do tacômetro, deve retornar o de menor frequência.
Assim, evita-se o mascaramento da rotacao_calc pelas harmonicas"""
# cópia que evita sobrescrição de valores no DataFrame original
tacometro_copy = fft_amplitude_df['tacometro'].copy()
candidates = [0, 0, 0]
for i in range(3):
index = tacometro_copy.argmax()
candidates[i] = fft_amplitude_df.freq_ax[index]
for j in range(-2, 3):
tacometro_copy[index+j] = 0
return min(candidates)
def get_n_harmonics(fft_amplitude_df, fund_index, n_harmonics=3):
"""Extrai todos os valores nos n primeiros harmônicos, exceto para o tacometro e freq_ax"""
fft_amplitude_df = fft_amplitude_df.drop(['freq_ax'], axis=1)
harmonic_features = {}
idx = fund_index[0]
for i in range(1, n_harmonics+1):
# resgata na frequência os valores na harmonica i
# a partir do maior valor encontrado em um intervalo de +/- 5 Hz em torno da posição i*rotacao_calc
harm_values = fft_amplitude_df.iloc[(idx-5)*i:(idx+5)*i].max()
# adiciona às features com o respectivo sulfixo do harmonico i
harmonic_features.update({k+'_{}h'.format(i): v for k, v in harm_values.items()})
return harmonic_features
def get_phase_angles(fft_df, fund_index):
"""extrai todos os valores nos n primeiros harmônicos, exceto para o tacometro e freq_ax"""
# resgata FFT na rotacao_calc para cada eixo
fft_df = fft_df.iloc[fund_index].squeeze()
# encontra a diferença do ângulo de fase de cada eixo
# em relação ao do tacômetro antes de descarta-lo
fft_df = fft_df / fft_df['tacometro']
fft_df.pop('tacometro')
# calcula o angulo de fase em radianos
angle = fft_df.apply(np.angle)
# retorna features com o respectivo sulfixo
return {k+'_phase': v for k, v in angle.items()}
def get_time_statistics(time_df):
"""extrai estatísticas do sinal no tempo"""
# valores auxiliares
absolute_max = time_df.abs().max()
absolute_average = time_df.abs().mean()
# valores extraídos
rms = time_df.pow(2).sum().pow(1/2)
sra = time_df.abs().pow(1/2).mean().pow(2)
kurtosis = time_df.kurtosis()
sqewness = time_df.skew()
peak_to_peak = time_df.max() - time_df.min()
crest = absolute_max / rms
impulse = absolute_max / absolute_average
margin = absolute_max / sra
shape = rms / absolute_average
kurtosis_f = kurtosis / rms.pow(4)
entropy = calc_entropy(time_df)
rms_dic = {k+'_timestat_rms':v for k, v in rms.to_dict().items()}
sra_dic = {k+'_timestat_sra':v for k, v in sra.to_dict().items()}
kurtosis_dic = {k+'_timestat_kurt':v for k, v in kurtosis.to_dict().items()}
sqewness_dic = {k+'_timestat_sqew':v for k, v in sqewness.to_dict().items()}
peak_to_peak_dic = {k+'_timestat_peak':v for k, v in peak_to_peak.to_dict().items()}
crest_dic = {k+'_timestat_crest':v for k, v in crest.to_dict().items()}
impulse_dic = {k+'_timestat_impulse':v for k, v in impulse.to_dict().items()}
margin_dic = {k+'_timestat_margin':v for k, v in margin.to_dict().items()}
shape_dic = {k+'_timestat_shape':v for k, v in shape.to_dict().items()}
kurtosis_f_dic = {k+'_timestat_kurt_f':v for k, v in kurtosis_f.to_dict().items()}
entropy_dic = {k+'_timestat_entropy':v for k, v in entropy.to_dict().items()}
time_statistics = {}
time_statistics.update(rms_dic)
time_statistics.update(sra_dic)
time_statistics.update(kurtosis_dic)
time_statistics.update(sqewness_dic)
time_statistics.update(peak_to_peak_dic)
time_statistics.update(crest_dic)
time_statistics.update(impulse_dic)
time_statistics.update(margin_dic)
time_statistics.update(shape_dic)
time_statistics.update(kurtosis_f_dic)
time_statistics.update(entropy_dic)
return time_statistics
def get_freq_statistics(fft_amplitude_df):
"""extrai estatísticas do sinal no tempo"""
freq_ax = fft_amplitude_df['freq_ax']
fft_amplitude_df = fft_amplitude_df.drop(['freq_ax'], axis=1)
sum_axis = fft_amplitude_df.sum()
freq_center = (freq_ax * fft_amplitude_df.T).T.sum() / sum_axis
rmsf = ((freq_ax**2 * fft_amplitude_df.T).T.sum() / sum_axis).pow(1/2)
rvf = ((np.subtract.outer(freq_ax.values, freq_center.values)**2 * fft_amplitude_df).sum() / sum_axis).pow(1/2)
freq_center_dict = {k+'_freqstat_fc':v for k, v in freq_center.to_dict().items()}
rmsf_dict = {k+'_freqstat_rmsf':v for k, v in rmsf.to_dict().items()}
rvf_dict = {k+'_freqstat_rvf':v for k, v in rvf.to_dict().items()}
freq_statistics = {}
freq_statistics.update(freq_center_dict)
freq_statistics.update(rmsf_dict)
freq_statistics.update(rvf_dict)
return freq_statistics
def get_vel_rms(time_df, ratio=10):
"""integra o sinal da aceleração para a velocidade e extrai o valor-eficaz"""
# define nova frequência de aquisição.
sampling_freq = 50000/ratio
# note: 50 kHz é a frequência de aquisição original dos dados
# instancia o filtro passa alta arbitrário em 10 Hz
sos = signal.butter(6, 10, 'highpass', fs=sampling_freq, output='sos')
# calcula velocidade pela integral (trapezoidal) dos sinais
velocity_filtered = pd.DataFrame()
dt = 1/sampling_freq
for col in time_df.columns:
velocity_filtered[col] = signal.sosfilt(sos, time_df[col])
velocity_filtered[col] = integrate.cumtrapz(y=np.array(velocity_filtered[col]), dx=dt, initial=0)
vel_rms = velocity_filtered.pow(2).sum().pow(1/2).to_dict()
return {k+'_vel_rms':v for k, v in vel_rms.items()}
| [
37811,
198,
46509,
16175,
28749,
390,
1257,
16175,
127,
113,
274,
31215,
3131,
16175,
28749,
288,
292,
1097,
7321,
8836,
11268,
292,
474,
377,
70,
38768,
198,
49659,
274,
31215,
267,
1059,
259,
3263,
78,
466,
2746,
78,
10373,
198,
37811... | 2.20538 | 3,160 |
import binascii
import urllib2
import json
from hashlib import sha1
import base64
import datetime
import calendar
import time
__author__ = 'DeStars'
| [
11748,
9874,
292,
979,
72,
198,
11748,
2956,
297,
571,
17,
198,
11748,
33918,
198,
6738,
12234,
8019,
1330,
427,
64,
16,
198,
11748,
2779,
2414,
198,
11748,
4818,
8079,
198,
11748,
11845,
198,
11748,
640,
198,
198,
834,
9800,
834,
796... | 3.212766 | 47 |
import cv2
import os
import numpy as np
# Load HAAR face classifier
face_classifier = cv2.CascadeClassifier(
cv2.data.haarcascades + "haarcascade_frontalface_default.xml"
)
# Load functions
# Initialize Webcam
cap = cv2.VideoCapture(0)
while True:
name = input("Enter candidate name:")
check_make_dir(name)
check_make_dir(name, "val")
count = 0
if name != "Blank":
# Create dataset for unique faces
while True:
ret, frame = cap.read()
cv2.imshow("frame",frame)
if face_extractor(frame) is not None:
count += 1
face = cv2.resize(face_extractor(frame), (400, 400))
write_show_img(name, count, face)
else:
print("Face not found")
pass
if cv2.waitKey(1) == 13 or count == 450:
break
else:
# For ground truth
while True:
ret, frame = cap.read()
if face_extractor(frame) is None:
count += 1
bg = cv2.resize(frame, (400, 400))
write_show_img(name, count, bg)
else:
print("Face found")
pass
if cv2.waitKey(1) == 13 or count == 450:
break
break
cap.release()
cv2.destroyAllWindows()
print("Collecting Samples Complete")
| [
11748,
269,
85,
17,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
198,
2,
8778,
14558,
1503,
1986,
1398,
7483,
198,
2550,
62,
4871,
7483,
796,
269,
85,
17,
13,
34,
28966,
9487,
7483,
7,
198,
220,
220,
220,
269,
85,
1... | 1.942577 | 714 |
"""URLs to run the tests."""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('hijack/', include('hijack.urls', namespace='hijack')),
path('hello/', include('hijack.tests.test_app.urls', 'test_app')),
]
| [
37811,
21886,
82,
284,
1057,
262,
5254,
526,
15931,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
2291,
11,
3108,
198,
198,
6371,
33279,
82,
796,
685,
198,
220,
220,
220,
3108,
10786... | 2.607143 | 112 |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
'''
Case Type: MOT
Case Name: 结合MOT表,以START TRANSACTION启动事务以END结束事务测试
Descption: 1.开启事务;2.创建普通MOT行存表; 3.插入数据; 4. 结束事务;5.清理数据;
'''
import unittest
import sys
sys.path.append(sys.path[0] + "/../")
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
| [
37811,
198,
15269,
357,
66,
8,
33160,
43208,
21852,
1766,
1539,
43,
8671,
13,
198,
198,
9654,
35389,
1046,
318,
11971,
739,
17996,
272,
6599,
43,
410,
17,
13,
198,
1639,
460,
779,
428,
3788,
1864,
284,
262,
2846,
290,
3403,
286,
262... | 2.226804 | 388 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['BucketArgs', 'Bucket']
@pulumi.input_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
26144,
35986,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760,
644,
345,
389,
1804,
0,
17202,
... | 3.5 | 126 |
# -*- encoding: utf-8 -*-
import yaml
from sysl.core import syslx
from sysl.util import datamodel
from sysl.util import java
from sysl.util import rex
from sysl.proto import sysl_pb2
TYPE_MAP = {
sysl_pb2.Type.ANY: {'type': 'object'},
sysl_pb2.Type.BOOL: {'type': 'boolean'},
sysl_pb2.Type.INT: {'type': 'number', 'format': 'integer'},
sysl_pb2.Type.FLOAT: {'type': 'number', 'format': 'double'},
sysl_pb2.Type.DECIMAL: {'type': 'number', 'format': 'double'},
sysl_pb2.Type.STRING: {'type': 'string'},
sysl_pb2.Type.BYTES: None,
sysl_pb2.Type.STRING_8: {'type': 'string'},
sysl_pb2.Type.DATE: {'type': 'string'},
sysl_pb2.Type.DATETIME: {'type': 'string'},
sysl_pb2.Type.XML: {'type': 'string'},
}
STATUS_MAP = {
# 1xx
100: 'Continue',
101: 'Switching Protocol',
# 2xx
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi-Status',
208: 'Already Reported',
226: 'IM Used',
# 3xx
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
# 306: 'Switch Proxy', No longer used
307: 'Temporary Redirect',
308: 'Permanent Redirect',
# 4xx
400: 'Bad Request',
401: 'Unauthorised',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Payload Too Large',
414: 'URI Too Long',
415: 'Unsupported Media Type',
416: 'Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot',
421: 'Misdirected Request',
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required',
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
451: 'Unavailable For Legal Reasons',
# 5xx
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
506: 'Variant Also Negotiates',
507: 'Insufficient Storage',
508: 'Loop Detected',
510: 'Not Extended',
511: 'Network Authentication Required',
}
| [
2,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
331,
43695,
198,
198,
6738,
827,
6649,
13,
7295,
1330,
827,
6649,
87,
198,
198,
6738,
827,
6649,
13,
22602,
1330,
4818,
321,
375,
417,
198,
6738,
827,
6649,
13,... | 2.374416 | 1,071 |
from pybuilder.core import Logger, Project
from pybuilder.reactor import Reactor
from pybuilder_integration.exec_utility import exec_command
| [
6738,
12972,
38272,
13,
7295,
1330,
5972,
1362,
11,
4935,
198,
6738,
12972,
38272,
13,
260,
11218,
1330,
797,
11218,
198,
198,
6738,
12972,
38272,
62,
18908,
1358,
13,
18558,
62,
315,
879,
1330,
2452,
62,
21812,
628,
628,
628
] | 3.675 | 40 |
"""
Unit tests for ``EpidemicRegressor``.
"""
import io
import os
import pickle
import unittest
import warnings
import numpy
from numpy.testing import assert_almost_equal
from pandas import read_csv
from aftercovid.models import EpidemicRegressor, CovidSIRD, CovidSIRDc
if __name__ == '__main__':
unittest.main()
| [
37811,
198,
26453,
5254,
329,
7559,
13807,
312,
5314,
8081,
44292,
15506,
13,
198,
37811,
198,
11748,
33245,
198,
11748,
28686,
198,
11748,
2298,
293,
198,
11748,
555,
715,
395,
198,
11748,
14601,
198,
11748,
299,
32152,
198,
6738,
299,
... | 3.028302 | 106 |
import cv2
##length 150
# 그림 그리는거
label_txt = open("dataset/integrated_label(validation).txt" , "r")
labels = label_txt.readlines()
head_batch_label, gaze_batch_label = loadLabel_gazetest(labels,[filename])
##end 포인트 가져오는것
image = cv2.imread(os.path.join(args.im_path, image_file_name))
output_image = np.copy(color_img)
cv2.arrowedLine(output_image, (int(center_x), int(center_y)), (int(GT_endpoint_x), int(GT_endpoint_y)), (0, 255, 0), 2)
cv2.arrowedLine(output_image, (int(center_x), int(center_y)), (int(endpoint_x), int(endpoint_y)), (255, 0, 0), 2)
cv2.imwrite(os.path.join(args.output_path, os.path.splitext(base_name)[0]+str(people_count) + '_headpose.jpg'), output_image)
| [
11748,
269,
85,
17,
628,
198,
2235,
13664,
6640,
198,
198,
2,
220,
166,
115,
116,
167,
99,
120,
220,
166,
115,
116,
167,
99,
105,
167,
232,
242,
166,
109,
108,
628,
198,
18242,
62,
14116,
796,
1280,
7203,
19608,
292,
316,
14,
18... | 2.149533 | 321 |
import unittest.mock as mock
from unittest import TestCase
from esrally import config
from esrally.mechanic import provisioner
| [
11748,
555,
715,
395,
13,
76,
735,
355,
15290,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
1658,
81,
453,
1330,
4566,
198,
6738,
1658,
81,
453,
13,
1326,
3147,
291,
1330,
8287,
263,
628
] | 3.394737 | 38 |
from .cloudpickle import load, loads, register_pickle_by_value # noqa
from .cloudpickle_fast import dumps, dump # noqa
| [
6738,
764,
17721,
27729,
293,
1330,
3440,
11,
15989,
11,
7881,
62,
27729,
293,
62,
1525,
62,
8367,
220,
1303,
645,
20402,
198,
6738,
764,
17721,
27729,
293,
62,
7217,
1330,
45514,
11,
10285,
220,
1303,
645,
20402,
628,
628
] | 3.1 | 40 |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
with open('HISTORY.md') as history_file:
history = history_file.read()
requirements = ['requests>=2.27.1']
test_requirements = ['pytest>=3', ]
setup(
author="Alex Ausch",
author_email='alex@ausch.name',
python_requires='>=3.9',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'
],
description="Python 3 wrapper for the NFTScan API",
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
long_description_content_type="text/markdown",
include_package_data=True,
keywords=['nft', 'blockchain', 'nftscan'],
maintainer='Alex Ausch',
maintainer_email='alex@ausch.name',
name='nftscan-api',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/nftscan2022/nftscan-api-python-sdk',
project_urls={
'Source': 'https://github.com/nftscan2022/nftscan-api-python-sdk',
},
version='0.1.3',
zip_safe=False,
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
464,
9058,
4226,
526,
15931,
198,
198,
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
4480,
1280,
10786,
15675,
11682,
13,
9132,
11537,
355,
1100,
1326,
... | 2.565809 | 661 |
"""Device tracker constants."""
from datetime import timedelta
import logging
LOGGER = logging.getLogger(__package__)
DOMAIN = 'device_tracker'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
PLATFORM_TYPE_LEGACY = 'legacy'
PLATFORM_TYPE_ENTITY = 'entity_platform'
SOURCE_TYPE_GPS = 'gps'
SOURCE_TYPE_ROUTER = 'router'
SOURCE_TYPE_BLUETOOTH = 'bluetooth'
SOURCE_TYPE_BLUETOOTH_LE = 'bluetooth_le'
CONF_SCAN_INTERVAL = 'interval_seconds'
SCAN_INTERVAL = timedelta(seconds=12)
CONF_TRACK_NEW = 'track_new_devices'
DEFAULT_TRACK_NEW = True
CONF_AWAY_HIDE = 'hide_if_away'
DEFAULT_AWAY_HIDE = False
CONF_CONSIDER_HOME = 'consider_home'
DEFAULT_CONSIDER_HOME = timedelta(seconds=180)
CONF_NEW_DEVICE_DEFAULTS = 'new_device_defaults'
ATTR_ATTRIBUTES = 'attributes'
ATTR_BATTERY = 'battery'
ATTR_DEV_ID = 'dev_id'
ATTR_GPS = 'gps'
ATTR_HOST_NAME = 'host_name'
ATTR_LOCATION_NAME = 'location_name'
ATTR_MAC = 'mac'
ATTR_SOURCE_TYPE = 'source_type'
ATTR_CONSIDER_HOME = 'consider_home'
| [
37811,
24728,
30013,
38491,
526,
15931,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
18931,
198,
198,
25294,
30373,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
26495,
834,
8,
198,
198,
39170,
29833,
796,
705,
25202,
62,
2213,
1... | 2.373171 | 410 |
from core.config.base import *
from core.config.rock import * | [
198,
6738,
4755,
13,
11250,
13,
8692,
1330,
1635,
198,
6738,
4755,
13,
11250,
13,
10823,
1330,
1635
] | 3.444444 | 18 |
import tkinter as tk #imports the GUI framework
| [
11748,
256,
74,
3849,
355,
256,
74,
1303,
320,
3742,
262,
25757,
9355,
201
] | 3.428571 | 14 |
from pyfirmata2 import Arduino
from config import ConfigArduino
| [
6738,
12972,
69,
2533,
1045,
17,
1330,
27634,
198,
6738,
4566,
1330,
17056,
3163,
24493,
198
] | 4 | 16 |
from PySide6 import QtGui, QtWidgets
app = QtWidgets.QApplication()
window1 = TestWindow()
window1.show()
app.exec()
| [
6738,
9485,
24819,
21,
1330,
33734,
8205,
72,
11,
33734,
54,
312,
11407,
201,
198,
201,
198,
201,
198,
201,
198,
1324,
796,
33734,
54,
312,
11407,
13,
48,
23416,
3419,
201,
198,
17497,
16,
796,
6208,
27703,
3419,
201,
198,
17497,
16... | 2.37037 | 54 |
"""
Tests communication with and data acquisition from a Measurement Computing
USB1608G DAQ.
The tests run by this script are very simple and are by no means exhaustive. It
just sets different numbers of channels and ensures the data received is the
correct shape.
"""
import argparse
from pymcc import MccDaq
SAMPLE_RATE = 2048
SAMPLES_PER_READ = 1024
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
'-d', '--device',
dest='device',
default='USB_1608G',
help="Type of device to test. Default is USB_1608G.")
args = parser.parse_args()
dev = MccDaq(SAMPLE_RATE, 1, (0, 0), SAMPLES_PER_READ, devname=args.device)
single_channel_test(dev)
multi_channel_test(dev)
| [
37811,
198,
51,
3558,
6946,
351,
290,
1366,
12673,
422,
257,
24291,
434,
38589,
198,
27155,
1433,
2919,
38,
17051,
48,
13,
198,
198,
464,
5254,
1057,
416,
428,
4226,
389,
845,
2829,
290,
389,
416,
645,
1724,
36049,
13,
632,
198,
313... | 2.759076 | 303 |
# Generated by Django 3.0.3 on 2020-05-14 15:51
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
18,
319,
12131,
12,
2713,
12,
1415,
1315,
25,
4349,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.settings import import_from_string
from rest_framework.authentication import get_authorization_header
from django.utils.translation import ugettext as _
from djangooidc.backends import OpenIdConnectBackend as DOIDCBackend
from bossoidc.models import Keycloak as KeycloakModel
from jwkest.jwt import JWT
import json
import logging
def load_user_roles(user, roles):
"""Default implementation of the LOAD_USER_ROLES callback
Args:
user (UserModel): Django user object for the user logging in
roles (list[str]): List of Keycloak roles assigned to the user
Note: Contains both realm roles and client roles
"""
pass
LOAD_USER_ROLES = getattr(settings, 'LOAD_USER_ROLES', None)
if LOAD_USER_ROLES is None:
# DP NOTE: had issues with import_from_string loading bossoidc.backend.load_user_roles
LOAD_USER_ROLES_FUNCTION = load_user_roles
else: # pragma: no cover
LOAD_USER_ROLES_FUNCTION = import_from_string(LOAD_USER_ROLES, 'LOAD_USER_ROLES')
def update_user_data(user, userinfo):
"""Default implementation of the UPDATE_USER_DATA callback
Args:
user (UserModel): Django user object for the user logging in
userinfo (dict): Dictionary of userinfo requested from Keycloak with the
user's profile data
"""
pass
UPDATE_USER_DATA = getattr(settings, 'UPDATE_USER_DATA', None)
if UPDATE_USER_DATA is None:
UPDATE_USER_DATA_FUNCTION = update_user_data
else: # pragma: no cover
UPDATE_USER_DATA_FUNCTION = import_from_string(UPDATE_USER_DATA, 'UPDATE_USER_DATA')
def check_username(username):
"""Ensure that the given username does exceed the current user models field
length
Args:
username (str): Username of the user logging in
Raises:
AuthenticationFailed: If the username length exceeds the fields max length
"""
username_field = get_user_model()._meta.get_field("username")
if len(username) > username_field.max_length:
raise AuthenticationFailed(_('Username is too long for Django'))
def get_user_by_id(request, userinfo):
"""Get or create the user object based on the user's information
Note: Taken from djangooidc.backends.OpenIdConnectBackend and made common for
drf-oidc-auth to make use of the same create user functionality
Note: The user's token is loaded from the request session or header to load_user_roles
the user's Keycloak roles
Args:
request (Request): Django request from the user
userinfo (dict): Dictionary of userinfo requested from Keycloak with the
user's profile data
Returns:
UserModel: user object for the requesting user
None: If the requesting user's token's audience is not valid
Raises:
AuthenticationFailed: If the requesting user's username is too long
"""
access_token = get_access_token(request)
audience = get_token_audience(access_token)
# if not token_audience_is_valid(audience):
# return None
subdomain = request.session["subdomain"]
UserModel = get_user_model()
uid = userinfo['sub']
preferred_username = userinfo['preferred_username']
prefix_username = ''
try:
prefix_username = preferred_username.encode('latin1').decode('utf-8')
except Exception:
prefix_username = preferred_username
username = prefix_username + "+" + subdomain
check_username(username)
# Some OP may actually choose to withhold some information, so we must test if it is present
openid_data = {'last_login': datetime.datetime.now()}
if 'first_name' in userinfo.keys():
openid_data['first_name'] = userinfo['first_name']
if 'given_name' in userinfo.keys():
openid_data['first_name'] = userinfo['given_name']
if 'christian_name' in userinfo.keys():
openid_data['first_name'] = userinfo['christian_name']
if 'family_name' in userinfo.keys():
openid_data['last_name'] = userinfo['family_name']
if 'last_name' in userinfo.keys():
openid_data['last_name'] = userinfo['last_name']
if 'email' in userinfo.keys():
openid_data['email'] = userinfo['email']
# DP NOTE: The thing that we are trying to prevent is the user account being
# deleted and recreated in Keycloak (all user data the same, but a
# different uid) and getting the application permissions of the old
# user account.
try: # try to lookup by keycloak UID first
kc_user = KeycloakModel.objects.get(UID = uid, subdomain=request.session['subdomain'])
user = kc_user.user
except KeycloakModel.DoesNotExist: # user doesn't exist with a keycloak UID and subdomain
try:
user = UserModel.objects.get_by_natural_key(username)
fmt = "Deleting user '{}' becuase it matches the authenticated Keycloak username"
_log('get_user_by_id').info(fmt.format(username))
# remove existing user account, so permissions are not transfered
# DP NOTE: required, as the username field is still a unique field,
# which doesn't allow multiple users in the table with the
# same username
user.delete()
except UserModel.DoesNotExist:
pass
args = {UserModel.USERNAME_FIELD: username, 'defaults': openid_data, }
user, created = UserModel.objects.update_or_create(**args)
kc_user = KeycloakModel.objects.create(user = user, UID = uid, subdomain = subdomain)
if kc_user:
kc_user.user_type = userinfo['https://www.openclinica.com/userContext']['userType']
kc_user.save()
roles = get_roles(access_token)
user.is_staff = 'admin' in roles or 'superuser' in roles
user.is_superuser = 'superuser' in roles
LOAD_USER_ROLES_FUNCTION(user, roles)
UPDATE_USER_DATA_FUNCTION(user, userinfo)
user.save()
return user
def get_roles(decoded_token):
"""Get roles declared in the input token
Note: returns both the realm roles and client roles
Args:
decoded_token (dict): The user's decoded bearer token
Returns:
list[str]: List of role names
"""
# Extract realm scoped roles
try:
# Session logins and Bearer tokens from password Grant Types
if 'realm_access' in decoded_token:
roles = decoded_token['realm_access']['roles']
else: # Bearer tokens from authorization_code Grant Types
# DP ???: a session login uses an authorization_code code, not sure
# about the difference
roles = decoded_token['resource_access']['account']['roles']
except KeyError:
roles = []
# Extract all client scoped roles
for name, client in decoded_token.get('resource_access', {}).items():
if name is 'account':
continue
try:
roles.extend(client['roles'])
except KeyError: # pragma no cover
pass
return roles
def get_access_token(request):
"""Retrieve access token from the request
The access token is searched first the request's session. If it is not
found it is then searched in the request's ``Authorization`` header.
Args:
request (Request): Django request from the user
Returns:
dict: JWT payload of the bearer token
"""
access_token = request.session.get("access_token")
if access_token is None: # Bearer token login
access_token = get_authorization_header(request).split()[1]
return JWT().unpack(access_token).payload()
def get_token_audience(token):
"""Retrieve the token's intended audience
According to the openid-connect spec `aud` may be a string or a list:
http://openid.net/specs/openid-connect-basic-1_0.html#IDToken
Args:
token (dict): The user's decoded bearer token
Returns:
list[str]: The list of token audiences
"""
aud = token.get("aud", [])
return [aud] if isinstance(aud, str) else aud
def token_audience_is_valid(audience):
"""Check if the input audiences is valid
Args:
audience (list[str]): List of token audiences
Returns:
bool: If any of the audience is in the list of requested audiences
"""
if not hasattr(settings, 'OIDC_AUTH'):
# Don't assume that the bossoidc settings module was used
return False
trusted_audiences = settings.OIDC_AUTH.get('OIDC_AUDIENCES', [])
for aud in audience:
if aud in trusted_audiences:
result = True
break
else:
result = False
return result
class OpenIdConnectBackend(DOIDCBackend): # pragma: no cover
"""Subclass of the Django OIDC Backend that makes use of our get_user_by_id
implementation
"""
| [
2,
15069,
1584,
383,
25824,
21183,
2059,
27684,
23123,
18643,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13... | 2.738823 | 3,534 |
##
# Copyright (c) 2010-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
OpenDirectory service tests.
"""
from twisted.trial import unittest
from ...idirectory import QueryNotSupportedError
from ...expression import (
CompoundExpression, ExistsExpression, MatchExpression, BooleanExpression,
Operand, MatchType, MatchFlags
)
from ...test.test_xml import UnknownConstant
from .._constants import LDAPOperand
from .._service import (
DirectoryService, RecordTypeSchema, DEFAULT_FIELDNAME_ATTRIBUTE_MAP
)
from .._util import (
ldapQueryStringFromQueryStrings,
ldapQueryStringFromBooleanExpression,
ldapQueryStringFromCompoundExpression,
ldapQueryStringFromExistsExpression,
ldapQueryStringFromMatchExpression,
ldapQueryStringFromExpression,
)
from ...idirectory import FieldName as BaseFieldName
from twisted.python.constants import Names, NamedConstant
TEST_FIELDNAME_MAP = dict(DEFAULT_FIELDNAME_ATTRIBUTE_MAP)
TEST_FIELDNAME_MAP[BaseFieldName.uid] = (u"__who_uid__",)
class LDAPQueryTestCase(unittest.TestCase):
"""
Tests for LDAP query generation.
"""
def fieldNameMap(self, service):
"""
Create a mapping from field names to LDAP attribute names.
The attribute names returned here are not real LDAP attribute names,
but we don't care for these tests, since we're not actually connecting
to LDAP.
"""
return dict([
(c, (c.name,))
for c in service.fieldName.iterconstants()
])
def recordTypeSchemas(self, service):
"""
Create a mapping from record types to LDAP object class names.
The object class names returned here are not real LDAP object class
names, but we don't care for these tests, since we're not actually
connecting to LDAP.
"""
return dict([
(
c,
RecordTypeSchema(
relativeDN=NotImplemented, # Don't expect this to be used
attributes=((u"recordTypeAttribute", c.name),)
)
)
for c in service.recordType.iterconstants()
])
def test_ldapQueryStringFromQueryStrings_empty(self):
"""
A single expression should just be returned as-is.
"""
return self._test_ldapQueryStringFromQueryStrings((), u"")
def test_ldapQueryStringFromQueryStrings_single(self):
"""
A single expression should just be returned as-is.
"""
queryStrings = (u"(x=yzzy)",)
return self._test_ldapQueryStringFromQueryStrings(
queryStrings, queryStrings[0]
)
def test_ldapQueryStringFromQueryStrings_multiple(self):
"""
Multiple expressions should just be combined with an operator.
"""
return self._test_ldapQueryStringFromQueryStrings(
(u"(x=yzzy)", u"(xy=zzy)"), u"({operand}(x=yzzy)(xy=zzy))"
)
def test_queryStringFromExistsExpression(self):
"""
Exists expressions produce the correct (attribute=*) string.
"""
service = self.service()
expression = ExistsExpression(service.fieldName.shortNames)
queryString = ldapQueryStringFromExistsExpression(
expression,
self.fieldNameMap(service),
self.recordTypeSchemas(service),
)
expected = u"(shortNames=*)"
self.assertEquals(queryString, expected)
def test_queryStringFromBooleanExpression(self):
"""
If a field is a boolean type and the fieldNameToAttributesMap
value for the field has an equals sign, the portion to the right
of the equals sign is the value that represents True. Make sure
the query string we generate includes that value.
"""
service = self.service()
testFieldNameMap = {
TestFieldName.isAwesome: ("awesome:totally",),
TestFieldName.isCool: ("cool",),
}
expression = BooleanExpression(TestFieldName.isAwesome)
queryString = ldapQueryStringFromBooleanExpression(
expression,
testFieldNameMap,
self.recordTypeSchemas(service),
)
expected = u"(awesome=totally)"
self.assertEquals(queryString, expected)
expression = BooleanExpression(TestFieldName.isCool)
queryString = ldapQueryStringFromBooleanExpression(
expression,
testFieldNameMap,
self.recordTypeSchemas(service),
)
expected = u"(cool=true)"
self.assertEquals(queryString, expected)
def test_queryStringFromMatchExpression_matchTypes(self):
"""
Match expressions with each match type produces the correct
operator=value string.
"""
service = self.service()
for matchType, expected in (
(MatchType.equals, u"=xyzzy"),
(MatchType.startsWith, u"=xyzzy*"),
(MatchType.endsWith, u"=*xyzzy"),
(MatchType.contains, u"=*xyzzy*"),
(MatchType.lessThan, u"<xyzzy"),
(MatchType.greaterThan, u">xyzzy"),
(MatchType.lessThanOrEqualTo, u"<=xyzzy"),
(MatchType.greaterThanOrEqualTo, u">=xyzzy"),
):
expression = MatchExpression(
service.fieldName.shortNames, u"xyzzy",
matchType=matchType
)
queryString = ldapQueryStringFromMatchExpression(
expression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
expected = u"({attribute}{expected})".format(
attribute=u"shortNames", expected=expected
)
self.assertEquals(queryString, expected)
def test_queryStringFromMatchExpression_match_not(self):
"""
Match expression with the C{NOT} flag adds the C{!} operator.
"""
service = self.service()
expression = MatchExpression(
service.fieldName.shortNames, u"xyzzy",
flags=MatchFlags.NOT
)
queryString = ldapQueryStringFromMatchExpression(
expression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
expected = u"(!{attribute}=xyzzy)".format(
attribute=u"shortNames",
)
self.assertEquals(queryString, expected)
def test_queryStringFromMatchExpression_match_caseInsensitive(self):
"""
Match expression with the C{caseInsensitive} flag adds the C{??????}
operator.
"""
service = self.service()
expression = MatchExpression(
service.fieldName.shortNames, u"xyzzy",
flags=MatchFlags.caseInsensitive
)
queryString = ldapQueryStringFromMatchExpression(
expression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
expected = u"???????({attribute}=xyzzy)".format(
attribute=u"shortNames",
)
self.assertEquals(queryString, expected)
test_queryStringFromMatchExpression_match_caseInsensitive.todo = (
"unimplemented"
)
def test_queryStringFromMatchExpression_match_quoting(self):
"""
Special characters are quoted properly.
"""
service = self.service()
expression = MatchExpression(
service.fieldName.fullNames,
u"\\xyzzy: a/b/(c)* ~~ >=< ~~ &| \0!!"
)
queryString = ldapQueryStringFromMatchExpression(
expression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
expected = u"({attribute}={expected})".format(
attribute=u"fullNames",
expected=(
u"\\5Cxyzzy: a\\2Fb\\2F\\28c\\29\\2A "
"\\7E\\7E \\3E\\3D\\3C \\7E\\7E \\26\\7C \\00!!"
)
)
self.assertEquals(queryString, expected)
def test_queryStringFromMatchExpression_unknownFieldName(self):
"""
Unknown expression.
"""
service = self.service()
expression = MatchExpression(
UnknownConstant.unknown, u"xyzzy",
)
self.assertRaises(
QueryNotSupportedError,
ldapQueryStringFromMatchExpression,
expression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
def test_queryStringFromMatchExpression_unknownMatchType(self):
"""
Unknown expression.
"""
service = self.service()
expression = MatchExpression(
service.fieldName.shortNames, u"xyzzy",
matchType=object()
)
self.assertRaises(
QueryNotSupportedError,
ldapQueryStringFromMatchExpression,
expression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
def test_queryStringFromMatchExpression_multipleAttribute(self):
"""
Match expression when the queried field name maps to multiple
attributes.
"""
# We want a match for either attribute.
expected = u"(|(mail=xyzzy)(alternateMail=xyzzy))"
return self._test_queryStringFromMatchExpression_multiAttribute(
MatchFlags.none, expected
)
def test_queryStringFromMatchExpression_multipleAttribute_not(self):
"""
Match expression when the queried field name maps to multiple
attributes and the NOT flag is set.
"""
# We want a NOT match for both attributes.
expected = u"(&(!mail=xyzzy)(!alternateMail=xyzzy))"
return self._test_queryStringFromMatchExpression_multiAttribute(
MatchFlags.NOT, expected
)
def test_queryStringFromMatchExpression_multipleRecordType(self):
"""
Match expression when the queried field name is the record type field,
which maps to multiple attributes.
"""
# We want a match for both values.
expected = (
u"(&({recordType}={type1})"
u"({recordType}={type2})"
u"({accountStatus}={status1}))"
)
return self._test_queryStringFromMatchExpression_multiRecordType(
MatchFlags.none, expected
)
def test_queryStringFromMatchExpression_multipleRecordType_not(self):
"""
Match expression when the queried field name is the record type field,
which maps to multiple attributes and the NOT flag is set.
"""
# We want a NOT match for either value.
expected = (
u"(|(!{recordType}={type1})"
u"(!{recordType}={type2})"
u"(!{accountStatus}={status1}))"
)
return self._test_queryStringFromMatchExpression_multiRecordType(
MatchFlags.NOT, expected
)
def test_queryStringFromCompoundExpression_single(
self, queryFunction=ldapQueryStringFromCompoundExpression
):
"""
Compound expression with a single sub-expression.
Should result in the same query string as just the sub-expression
would.
The Operand shouldn't make any difference here, so we test AND and OR,
expecting the same result.
"""
service = self.service()
for operand in (Operand.AND, Operand.OR):
matchExpression = MatchExpression(
service.fieldName.shortNames, u"xyzzy"
)
compoundExpression = CompoundExpression(
[matchExpression],
operand
)
queryString = queryFunction(
compoundExpression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
expected = u"{match}".format(
match=ldapQueryStringFromMatchExpression(
matchExpression,
self.fieldNameMap(service),
self.recordTypeSchemas(service),
)
)
self.assertEquals(queryString, expected)
def test_queryStringFromCompoundExpression_multiple(
self, queryFunction=ldapQueryStringFromCompoundExpression
):
"""
Compound expression with multiple sub-expressions.
The sub-expressions should be grouped with the given operand.
"""
service = self.service()
for (operand, token) in ((Operand.AND, u"&"), (Operand.OR, u"|")):
matchExpression1 = MatchExpression(
service.fieldName.shortNames, u"xyzzy"
)
matchExpression2 = MatchExpression(
service.fieldName.shortNames, u"plugh"
)
compoundExpression = CompoundExpression(
[matchExpression1, matchExpression2],
operand
)
queryString = queryFunction(
compoundExpression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
expected = u"({op}{match1}{match2})".format(
op=token,
match1=ldapQueryStringFromMatchExpression(
matchExpression1,
self.fieldNameMap(service),
self.recordTypeSchemas(service),
),
match2=ldapQueryStringFromMatchExpression(
matchExpression2,
self.fieldNameMap(service),
self.recordTypeSchemas(service),
),
)
self.assertEquals(queryString, expected)
def test_queryStringFromExpression_match(self):
"""
Match expression.
"""
service = self.service()
matchExpression = MatchExpression(
service.fieldName.shortNames, u"xyzzy"
)
queryString = ldapQueryStringFromExpression(
matchExpression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
expected = ldapQueryStringFromMatchExpression(
matchExpression,
self.fieldNameMap(service), self.recordTypeSchemas(service),
)
self.assertEquals(queryString, expected)
def test_queryStringFromExpression_compound(self):
"""
Compound expression.
"""
self.test_queryStringFromCompoundExpression_single(
queryFunction=ldapQueryStringFromExpression
)
self.test_queryStringFromCompoundExpression_multiple(
queryFunction=ldapQueryStringFromExpression
)
def test_queryStringFromExpression_unknown(self):
"""
Unknown expression.
"""
service = self.service()
self.assertRaises(
QueryNotSupportedError,
ldapQueryStringFromExpression,
object(),
self.fieldNameMap(service), self.recordTypeSchemas(service)
)
| [
2235,
198,
2,
15069,
357,
66,
8,
3050,
12,
5539,
4196,
3457,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
... | 2.27721 | 6,854 |
#!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# display text over an image
#
ellipse = vtk.vtkImageEllipsoidSource()
mapImage = vtk.vtkImageMapper()
mapImage.SetInputConnection(ellipse.GetOutputPort())
mapImage.SetColorWindow(255)
mapImage.SetColorLevel(127.5)
img = vtk.vtkActor2D()
img.SetMapper(mapImage)
mapText = vtk.vtkTextMapper()
mapText.SetInput("Text Overlay")
mapText.GetTextProperty().SetFontSize(15)
mapText.GetTextProperty().SetColor(0,1,1)
mapText.GetTextProperty().BoldOn()
mapText.GetTextProperty().ShadowOn()
txt = vtk.vtkActor2D()
txt.SetMapper(mapText)
txt.SetPosition(138,128)
ren1 = vtk.vtkRenderer()
ren1.AddActor2D(img)
ren1.AddActor2D(txt)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renWin.Render()
iren.Initialize()
# --- end of script --
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
410,
30488,
198,
6738,
410,
30488,
13,
9288,
1330,
23983,
198,
6738,
410,
30488,
13,
22602,
13,
44374,
1330,
410,
30488,
3855,
6601,
30016,
198,
36392,
42,
62,
26947,
62,
13252,
... | 2.593923 | 362 |
import datetime
import os
import uuid
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse, HttpResponse, Http404
from django.conf import settings
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from accounts.models import UserModel
from accounts.viewmodels import UserModelSerializer
import logging
logger = logging.getLogger(__name__)
# Create your views here.
@csrf_exempt
| [
11748,
4818,
8079,
198,
11748,
28686,
198,
11748,
334,
27112,
198,
198,
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198,
6738,
42625,
14208,
13,
33571,
13,
12501,
273,
2024,
13,
6359,
41871,
1330,
269,
27891,
69,
62,
42679,
198,
6... | 3.546012 | 163 |
"""
Played.to urlresolver plugin
Copyright (C) 2013/2014 TheHighway
This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
| [
37811,
198,
11002,
276,
13,
1462,
19016,
411,
14375,
13877,
198,
15269,
357,
34,
8,
2211,
14,
4967,
383,
11922,
1014,
198,
198,
1212,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
340,
739,
262,
2846,
2... | 3.943966 | 232 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: Contact
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
| [
2,
6338,
7560,
416,
262,
21939,
36474,
364,
17050,
11,
466,
407,
13096,
198,
198,
2,
25745,
25,
14039,
198,
198,
11748,
6228,
36873,
364,
198,
6738,
6228,
36873,
364,
13,
5589,
265,
1330,
1330,
62,
77,
32152,
198,
37659,
796,
1330,
... | 3.723404 | 47 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Uploads a TensorBoard logdir to TensorBoard.dev."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import time
import grpc
import six
from tensorboard.uploader.proto import write_service_pb2
from tensorboard.uploader import logdir_loader
from tensorboard.uploader import peekable_iterator
from tensorboard.uploader import util
from tensorboard import data_compat
from tensorboard.backend.event_processing import directory_loader
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.backend.event_processing import io_wrapper
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import grpc_util
from tensorboard.util import tb_logging
from tensorboard.util import tensor_util
# Minimum length of an upload cycle in seconds; shorter cycles will sleep to
# use up the rest of the time to avoid sending write RPCs too quickly.
_MIN_UPLOAD_CYCLE_DURATION_SECS = 5
# Age in seconds of last write after which an event file is considered inactive.
# TODO(@nfelt): consolidate with TensorBoard --reload_multifile default logic.
_EVENT_FILE_INACTIVE_SECS = 4000
# Maximum length of a base-128 varint as used to encode a 64-bit value
# (without the "msb of last byte is bit 63" optimization, to be
# compatible with protobuf and golang varints).
_MAX_VARINT64_LENGTH_BYTES = 10
# Maximum outgoing request size. The server-side limit is 4 MiB [1]; we
# should pad a bit to mitigate any errors in our bookkeeping. Currently,
# we pad a lot, because using higher request sizes causes occasional
# Deadline Exceeded errors in the RPC server.
#
# [1]: https://github.com/grpc/grpc/blob/e70d8582b4b0eedc45e3d25a57b58a08b94a9f4a/include/grpc/impl/codegen/grpc_types.h#L447 # pylint: disable=line-too-long
_MAX_REQUEST_LENGTH_BYTES = 1024 * 128
logger = tb_logging.get_logger()
class TensorBoardUploader(object):
"""Uploads a TensorBoard logdir to TensorBoard.dev."""
def __init__(self, writer_client, logdir, rate_limiter=None):
"""Constructs a TensorBoardUploader.
Args:
writer_client: a TensorBoardWriterService stub instance
logdir: path of the log directory to upload
rate_limiter: a `RateLimiter` to use to limit upload cycle frequency
"""
self._api = writer_client
self._logdir = logdir
self._request_builder = None
if rate_limiter is None:
self._rate_limiter = util.RateLimiter(
_MIN_UPLOAD_CYCLE_DURATION_SECS
)
else:
self._rate_limiter = rate_limiter
active_filter = (
lambda secs: secs + _EVENT_FILE_INACTIVE_SECS >= time.time()
)
directory_loader_factory = functools.partial(
directory_loader.DirectoryLoader,
loader_factory=event_file_loader.TimestampedEventFileLoader,
path_filter=io_wrapper.IsTensorFlowEventsFile,
active_filter=active_filter,
)
self._logdir_loader = logdir_loader.LogdirLoader(
self._logdir, directory_loader_factory
)
def create_experiment(self):
"""Creates an Experiment for this upload session and returns the ID."""
logger.info("Creating experiment")
request = write_service_pb2.CreateExperimentRequest()
response = grpc_util.call_with_retries(
self._api.CreateExperiment, request
)
self._request_builder = _RequestBuilder(response.experiment_id)
return response.experiment_id
def start_uploading(self):
"""Blocks forever to continuously upload data from the logdir.
Raises:
RuntimeError: If `create_experiment` has not yet been called.
ExperimentNotFoundError: If the experiment is deleted during the
course of the upload.
"""
if self._request_builder is None:
raise RuntimeError(
"Must call create_experiment() before start_uploading()"
)
while True:
self._upload_once()
def _upload_once(self):
"""Runs one upload cycle, sending zero or more RPCs."""
logger.info("Starting an upload cycle")
self._rate_limiter.tick()
sync_start_time = time.time()
self._logdir_loader.synchronize_runs()
sync_duration_secs = time.time() - sync_start_time
logger.info("Logdir sync took %.3f seconds", sync_duration_secs)
run_to_events = self._logdir_loader.get_run_events()
first_request = True
for request in self._request_builder.build_requests(run_to_events):
if not first_request:
self._rate_limiter.tick()
first_request = False
upload_start_time = time.time()
request_bytes = request.ByteSize()
logger.info("Trying request of %d bytes", request_bytes)
self._upload(request)
upload_duration_secs = time.time() - upload_start_time
logger.info(
"Upload for %d runs (%d bytes) took %.3f seconds",
len(request.runs),
request_bytes,
upload_duration_secs,
)
def delete_experiment(writer_client, experiment_id):
"""Permanently deletes an experiment and all of its contents.
Args:
writer_client: a TensorBoardWriterService stub instance
experiment_id: string ID of the experiment to delete
Raises:
ExperimentNotFoundError: If no such experiment exists.
PermissionDeniedError: If the user is not authorized to delete this
experiment.
RuntimeError: On unexpected failure.
"""
logger.info("Deleting experiment %r", experiment_id)
request = write_service_pb2.DeleteExperimentRequest()
request.experiment_id = experiment_id
try:
grpc_util.call_with_retries(writer_client.DeleteExperiment, request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.NOT_FOUND:
raise ExperimentNotFoundError()
if e.code() == grpc.StatusCode.PERMISSION_DENIED:
raise PermissionDeniedError()
raise
class _OutOfSpaceError(Exception):
"""Action could not proceed without overflowing request budget.
This is a signaling exception (like `StopIteration`) used internally
by `_RequestBuilder`; it does not mean that anything has gone wrong.
"""
pass
class _RequestBuilder(object):
"""Helper class for building requests that fit under a size limit.
This class is not threadsafe. Use external synchronization if
calling its methods concurrently.
"""
_NON_SCALAR_TIME_SERIES = object() # sentinel
def _new_request(self):
"""Allocates a new request and refreshes the budget."""
self._request = write_service_pb2.WriteScalarRequest()
self._byte_budget = _MAX_REQUEST_LENGTH_BYTES
self._request.experiment_id = self._experiment_id
self._byte_budget -= self._request.ByteSize()
if self._byte_budget < 0:
raise RuntimeError("Byte budget too small for experiment ID")
def build_requests(self, run_to_events):
"""Converts a stream of TF events to a stream of outgoing requests.
Each yielded request will be at most `_MAX_REQUEST_LENGTH_BYTES`
bytes long.
Args:
run_to_events: Mapping from run name to generator of `tf.Event`
values, as returned by `LogdirLoader.get_run_events`.
Yields:
A finite stream of `WriteScalarRequest` objects.
Raises:
RuntimeError: If no progress can be made because even a single
point is too large (say, due to a gigabyte-long tag name).
"""
self._new_request()
runs = {} # cache: map from run name to `Run` proto in request
tags = (
{}
) # cache: map from `(run, tag)` to `Tag` proto in run in request
work_items = peekable_iterator.PeekableIterator(
self._run_values(run_to_events)
)
while work_items.has_next():
(run_name, event, orig_value) = work_items.peek()
value = data_compat.migrate_value(orig_value)
time_series_key = (run_name, value.tag)
metadata = self._tag_metadata.get(time_series_key)
if metadata is None:
plugin_name = value.metadata.plugin_data.plugin_name
if plugin_name == scalar_metadata.PLUGIN_NAME:
metadata = value.metadata
else:
metadata = _RequestBuilder._NON_SCALAR_TIME_SERIES
self._tag_metadata[time_series_key] = metadata
if metadata is _RequestBuilder._NON_SCALAR_TIME_SERIES:
next(work_items)
continue
try:
run_proto = runs.get(run_name)
if run_proto is None:
run_proto = self._create_run(run_name)
runs[run_name] = run_proto
tag_proto = tags.get((run_name, value.tag))
if tag_proto is None:
tag_proto = self._create_tag(run_proto, value.tag, metadata)
tags[(run_name, value.tag)] = tag_proto
self._create_point(tag_proto, event, value)
next(work_items)
except _OutOfSpaceError:
# Flush request and start a new one.
request_to_emit = self._prune_request()
if request_to_emit is None:
raise RuntimeError("Could not make progress uploading data")
self._new_request()
runs.clear()
tags.clear()
yield request_to_emit
final_request = self._prune_request()
if final_request is not None:
yield final_request
def _run_values(self, run_to_events):
"""Helper generator to create a single stream of work items."""
# Note that each of these joins in principle has deletion anomalies:
# if the input stream contains runs with no events, or events with
# no values, we'll lose that information. This is not a problem: we
# would need to prune such data from the request anyway.
for (run_name, events) in six.iteritems(run_to_events):
for event in events:
for value in event.summary.value:
yield (run_name, event, value)
def _prune_request(self):
"""Removes empty runs and tags from the active request.
This does not refund `self._byte_budget`; it is assumed that the
request will be emitted immediately, anyway.
Returns:
The active request, or `None` if after pruning the request
contains no data.
"""
request = self._request
for (run_idx, run) in reversed(list(enumerate(request.runs))):
for (tag_idx, tag) in reversed(list(enumerate(run.tags))):
if not tag.points:
del run.tags[tag_idx]
if not run.tags:
del self._request.runs[run_idx]
if not request.runs:
request = None
return request
def _create_run(self, run_name):
"""Adds a run to the live request, if there's space.
Args:
run_name: String name of the run to add.
Returns:
The `WriteScalarRequest.Run` that was added to `request.runs`.
Raises:
_OutOfSpaceError: If adding the run would exceed the remaining
request budget.
"""
run_proto = self._request.runs.add(name=run_name)
# We can't calculate the proto key cost exactly ahead of time, as
# it depends on the total size of all tags. Be conservative.
cost = run_proto.ByteSize() + _MAX_VARINT64_LENGTH_BYTES + 1
if cost > self._byte_budget:
raise _OutOfSpaceError()
self._byte_budget -= cost
return run_proto
def _create_tag(self, run_proto, tag_name, metadata):
"""Adds a tag for the given value, if there's space.
Args:
run_proto: `WriteScalarRequest.Run` proto to which to add a tag.
tag_name: String name of the tag to add (as `value.tag`).
metadata: TensorBoard `SummaryMetadata` proto from the first
occurrence of this time series.
Returns:
The `WriteScalarRequest.Tag` that was added to `run_proto.tags`.
Raises:
_OutOfSpaceError: If adding the tag would exceed the remaining
request budget.
"""
tag_proto = run_proto.tags.add(name=tag_name)
tag_proto.metadata.CopyFrom(metadata)
submessage_cost = tag_proto.ByteSize()
# We can't calculate the proto key cost exactly ahead of time, as
# it depends on the number of points. Be conservative.
cost = submessage_cost + _MAX_VARINT64_LENGTH_BYTES + 1
if cost > self._byte_budget:
raise _OutOfSpaceError()
self._byte_budget -= cost
return tag_proto
def _create_point(self, tag_proto, event, value):
"""Adds a scalar point to the given tag, if there's space.
Args:
tag_proto: `WriteScalarRequest.Tag` proto to which to add a point.
event: Enclosing `Event` proto with the step and wall time data.
value: Scalar `Summary.Value` proto with the actual scalar data.
Returns:
The `ScalarPoint` that was added to `tag_proto.points`.
Raises:
_OutOfSpaceError: If adding the point would exceed the remaining
request budget.
"""
point = tag_proto.points.add()
point.step = event.step
# TODO(@nfelt): skip tensor roundtrip for Value with simple_value set
point.value = tensor_util.make_ndarray(value.tensor).item()
util.set_timestamp(point.wall_time, event.wall_time)
submessage_cost = point.ByteSize()
cost = submessage_cost + _varint_cost(submessage_cost) + 1 # proto key
if cost > self._byte_budget:
tag_proto.points.pop()
raise _OutOfSpaceError()
self._byte_budget -= cost
return point
def _varint_cost(n):
"""Computes the size of `n` encoded as an unsigned base-128 varint.
This should be consistent with the proto wire format:
<https://developers.google.com/protocol-buffers/docs/encoding#varints>
Args:
n: A non-negative integer.
Returns:
An integer number of bytes.
"""
result = 1
while n >= 128:
result += 1
n >>= 7
return result
| [
2,
15069,
13130,
383,
309,
22854,
37535,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,... | 2.438801 | 6,340 |
from io import StringIO
from html.parser import HTMLParser
| [
6738,
33245,
1330,
10903,
9399,
198,
6738,
27711,
13,
48610,
1330,
11532,
46677,
628,
198
] | 4.066667 | 15 |
from setuptools import setup, find_packages
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
# get version from __version__ variable in flegeapp/__init__.py
from flegeapp import __version__ as version
setup(
name="flegeapp",
version=version,
description="Healthcare App",
author="Flege",
author_email="flege@flege.com",
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
4480,
1280,
7203,
8897,
18883,
13,
14116,
4943,
355,
277,
25,
198,
197,
17350,
62,
47911,
796,
277,
13,
961,
22446,
36311,
22446,
35312,
7203,
59,
77,
4943,
198,
198,... | 3.032051 | 156 |
"""empty message
Revision ID: 8c56a59f4f05
Revises: 8d8c816257ce
Create Date: 2021-02-11 21:01:07.818791
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '8c56a59f4f05'
down_revision = '8d8c816257ce'
branch_labels = None
depends_on = None
| [
37811,
28920,
3275,
198,
198,
18009,
1166,
4522,
25,
807,
66,
3980,
64,
3270,
69,
19,
69,
2713,
198,
18009,
2696,
25,
807,
67,
23,
66,
23,
1433,
28676,
344,
198,
16447,
7536,
25,
33448,
12,
2999,
12,
1157,
2310,
25,
486,
25,
2998,... | 2.453237 | 139 |
from typing import List
import ibis
from ibis.expr.operations import Literal
import pytest
from sql_to_ibis import query
from sql_to_ibis.tests.utils import assert_ibis_equal_show_diff, assert_state_not_change
@assert_state_not_change
def test_where_clause(forest_fires):
"""
Test where clause
:return:
"""
my_table = query("""select * from forest_fires where month = 'mar'""")
ibis_table = forest_fires[forest_fires.month == "mar"]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_all_boolean_ops_clause(forest_fires):
"""
Test where clause
:return:
"""
my_table = query(
"""select * from forest_fires where month = 'mar' and temp > 8.0 and rain >= 0
and area != 0 and dc < 100 and ffmc <= 90.1
"""
)
ibis_table = forest_fires[
(forest_fires.month == "mar")
& (forest_fires.temp > 8.0)
& (forest_fires.rain >= 0)
& (forest_fires.area != ibis.literal(0))
& (forest_fires.DC < 100)
& (forest_fires.FFMC <= 90.1)
]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_having_multiple_conditions(forest_fires):
"""
Test having clause
:return:
"""
my_table = query(
"select min(temp) from forest_fires having min(temp) > 2 and max(dc) < 200"
)
having_condition = (forest_fires.temp.min() > 2) & (forest_fires.DC.max() < 200)
ibis_table = forest_fires.aggregate(
metrics=forest_fires.temp.min().name("_col0"),
having=having_condition,
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_having_multiple_conditions_with_or(forest_fires):
"""
Test having clause
:return:
"""
my_table = query(
"select min(temp) from forest_fires having min(temp) > 2 and "
"max(dc) < 200 or max(dc) > 1000"
)
having_condition = (forest_fires.temp.min() > 2) & (forest_fires.DC.max() < 200) | (
(forest_fires.DC.max() > 1000)
)
ibis_table = forest_fires.aggregate(
metrics=forest_fires.temp.min().name("_col0"),
having=having_condition,
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_having_one_condition(forest_fires):
"""
Test having clause
:return:
"""
my_table = query("select min(temp) from forest_fires having min(temp) > 2")
min_aggregate = forest_fires.temp.min()
ibis_table = forest_fires.aggregate(
min_aggregate.name("_col0"), having=(min_aggregate > 2)
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_having_with_group_by(forest_fires):
"""
Test having clause
:return:
"""
my_table = query(
"select min(temp) from forest_fires group by day having min(temp) > 5"
)
ibis_table = (
forest_fires.groupby("day")
.having(forest_fires.temp.min() > 5)
.aggregate(forest_fires.temp.min().name("_col0"))
.drop(["day"])
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_between_operator(forest_fires):
"""
Test using between operator
:return:
"""
my_table = query(
"""
select * from forest_fires
where wind between 5 and 6
"""
)
ibis_table = forest_fires.filter(forest_fires.wind.between(5, 6))
assert_ibis_equal_show_diff(ibis_table, my_table)
in_list_params = pytest.mark.parametrize(
"sql,ibis_expr_list",
[
(
"('fri', 'sun')",
[ibis.literal("fri"), ibis.literal("sun")],
),
(
"('fri', 'sun', 'sat')",
[ibis.literal("fri"), ibis.literal("sun"), ibis.literal("sat")],
),
],
)
@assert_state_not_change
@in_list_params
def test_in_operator(forest_fires, sql: str, ibis_expr_list: List[Literal]):
"""
Test using in operator in a sql query
:return:
"""
my_table = query(
f"""
select * from forest_fires where day in {sql}
"""
)
ibis_table = forest_fires[forest_fires.day.isin(ibis_expr_list)]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_in_operator_expression_numerical(forest_fires):
"""
Test using in operator in a sql query
:return:
"""
my_table = query(
"""
select * from forest_fires where X in (5, 9)
"""
)
ibis_table = forest_fires[forest_fires.X.isin((ibis.literal(5), ibis.literal(9)))]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
@in_list_params
def test_not_in_operator(forest_fires, sql: str, ibis_expr_list: List[Literal]):
"""
Test using in operator in a sql query
:return:
"""
my_table = query(
f"""
select * from forest_fires where day not in {sql}
"""
)
ibis_table = forest_fires[forest_fires.day.notin(ibis_expr_list)]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_case_statement_w_name(forest_fires):
"""
Test using case statements
:return:
"""
my_table = query(
"""
select case when wind > 5 then 'strong'
when wind = 5 then 'mid'
else 'weak' end as wind_strength
from
forest_fires
"""
)
ibis_table = forest_fires.projection(
ibis.case()
.when(forest_fires.wind > 5, "strong")
.when(forest_fires.wind == 5, "mid")
.else_("weak")
.end()
.name("wind_strength")
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_case_statement_w_no_name(forest_fires):
"""
Test using case statements
:return:
"""
my_table = query(
"""
select case when wind > 5 then 'strong' when wind = 5 then 'mid' else 'weak' end
from forest_fires
"""
)
ibis_table = forest_fires.projection(
ibis.case()
.when(forest_fires.wind > 5, "strong")
.when(forest_fires.wind == 5, "mid")
.else_("weak")
.end()
.name("_col0")
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_case_statement_w_other_columns_as_result(forest_fires):
"""
Test using case statements
:return:
"""
my_table = query(
"""
select case when wind > 5 then month when wind = 5 then 'mid' else day end
from forest_fires
"""
)
ibis_table = forest_fires.projection(
ibis.case()
.when(forest_fires.wind > 5, forest_fires.month)
.when(forest_fires.wind == 5, "mid")
.else_(forest_fires.day)
.end()
.name("_col0")
)
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
@assert_state_not_change
def test_boolean_order_of_operations_with_parens(forest_fires):
"""
Test boolean order of operations with parentheses
:return:
"""
my_table = query(
"select * from forest_fires "
"where (month = 'oct' and day = 'fri') or "
"(month = 'nov' and day = 'tue')"
)
ibis_table = forest_fires[
((forest_fires.month == "oct") & (forest_fires.day == "fri"))
| ((forest_fires.month == "nov") & (forest_fires.day == "tue"))
]
assert_ibis_equal_show_diff(ibis_table, my_table)
@assert_state_not_change
def test_case_statement_with_same_conditions(forest_fires):
"""
Test using case statements
:return:
"""
my_table = query(
"""
select case when wind > 5 then month when wind > 5 then 'mid' else day end
from forest_fires
"""
)
ibis_table = forest_fires.projection(
ibis.case()
.when(forest_fires.wind > 5, forest_fires.month)
.when(forest_fires.wind > 5, "mid")
.else_(forest_fires.day)
.end()
.name("_col0")
)
assert_ibis_equal_show_diff(ibis_table, my_table)
| [
6738,
19720,
1330,
7343,
198,
198,
11748,
24283,
271,
198,
6738,
24283,
271,
13,
31937,
13,
3575,
602,
1330,
25659,
1691,
198,
11748,
12972,
9288,
198,
198,
6738,
44161,
62,
1462,
62,
571,
271,
1330,
12405,
198,
6738,
44161,
62,
1462,
... | 2.265937 | 3,561 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.dispatch import Signal
topic_poll_pre_vote = Signal(providing_args=['poll, user'])
topic_poll_post_vote = Signal(providing_args=['poll, user'])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
6381,
17147,
1330,
26484,
628,
198,
26652,
62,
30393,
62,
3866,
62,
27... | 2.765432 | 81 |
"""Self contained util to collect number of forks of a github project."""
import json
import os
from github import Github
from cache import Cache
SCRIPT_DIR = os.path.dirname(__file__)
CONFIG_PATH = os.path.join(SCRIPT_DIR, './config.json')
CACHE = Cache(os.path.join(SCRIPT_DIR, "./forks_cache.json"))
def get_total_forks(user, project):
"""Get total number of forks of a project."""
cache_key = f"{user}/{project}"
if CACHE.has_key(cache_key):
return CACHE.get_value(cache_key)
cache_value = get_total_forks_no_cache(user, project)
CACHE.set_value(cache_key, cache_value)
return cache_value
def get_total_forks_no_cache(user, project):
"""Get total number of forks of a project without caching results."""
repo = get_repo(user, project)
if repo:
return repo.forks
return None
_GITHUB_API = None
def _get_github_api():
"""Init Github API client."""
# authenticate github api
global _GITHUB_API
if _GITHUB_API is None:
_GITHUB_API = Github(
login_or_token=config_get('github_token')
)
return _GITHUB_API
def get_repo(user, project):
"""Get repo of a repository."""
try:
return _get_github_api().get_user(user).get_repo(project)
except:
print(f"Error: could not find gh project {user}/{project}.")
return None
def config_get(config_key):
"""Get value of a configuration config_key."""
with open(CONFIG_PATH) as config_file:
config = json.load(config_file)
value = config.get(config_key)
if not value:
print("Warning: no config value found for {}.".format(config_key))
return value
| [
37811,
24704,
7763,
7736,
284,
2824,
1271,
286,
43378,
286,
257,
33084,
1628,
526,
15931,
198,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
33084,
1330,
38994,
198,
198,
6738,
12940,
1330,
34088,
198,
198,
6173,
46023,
62,
34720,
796,... | 2.502985 | 670 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from experiments.utils import get_observer
from xview.datasets import Cityscapes_GAN
from xview.datasets import get_dataset
from xview.models import get_model
from xview.settings import EXP_OUT
import os
import sacred as sc
from sacred.utils import apply_backspaces_and_linefeeds
import scipy.misc
import numpy as np
import shutil
import tensorflow as tf
import argparse
import json
import glob
import random
import collections
import math
import time
import scipy
import cv2
from copy import deepcopy
from sys import stdout
from skimage.measure import compare_ssim
a = Helper()
EPS = 1e-12
num_test_images = 20
def create_directories(run_id, experiment):
"""
Make sure directories for storing diagnostics are created and clean.
Args:
run_id: ID of the current sacred run, you can get it from _run._id in a captured
function.
experiment: The sacred experiment object
Returns:
The path to the created output directory you can store your diagnostics to.
"""
root = EXP_OUT
# create temporary directory for output files
if not os.path.exists(root):
os.makedirs(root)
# The id of this experiment is stored in the magical _run object we get from the
# decorator.
output_dir = '{}/{}'.format(root, run_id)
if os.path.exists(output_dir):
# Directory may already exist if run_id is None (in case of an unobserved
# test-run)
shutil.rmtree(output_dir)
os.mkdir(output_dir)
# Tell the experiment that this output dir is also used for tensorflow summaries
experiment.info.setdefault("tensorflow", {}).setdefault("logdirs", [])\
.append(output_dir)
return output_dir
ex = sc.Experiment()
# reduce output of progress bars
ex.captured_out_filter = apply_backspaces_and_linefeeds
ex.observers.append(get_observer())
@ex.main
if __name__ == '__main__':
ex.run_commandline()
# for some reason we have processes running in the background that won't stop
# this is the only way to kill them
os._exit(os.EX_OK)
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
25064,
198,
198,
6738,
10256,
13,
26791,
1330,
651,
62,
672,
15388,
198,
198,... | 2.938005 | 742 |
from pyramid.testing import DummyRequest
from dbas.tests.utils import TestCaseWithConfig, construct_dummy_request
from dbas.validators import core
from dbas.validators.core import validate
| [
6738,
27944,
13,
33407,
1330,
360,
13513,
18453,
198,
198,
6738,
288,
12093,
13,
41989,
13,
26791,
1330,
6208,
20448,
3152,
16934,
11,
5678,
62,
67,
13513,
62,
25927,
198,
6738,
288,
12093,
13,
12102,
2024,
1330,
4755,
198,
6738,
288,
... | 3.730769 | 52 |
import glob
import os
BASE_CONFIG = os.path.join(os.path.dirname(__file__), 'base.cfg')
| [
11748,
15095,
198,
11748,
28686,
198,
198,
33,
11159,
62,
10943,
16254,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
8692,
13,
37581,
11537,
628
] | 2.5 | 36 |
import torch
import torch.nn.functional as F
from torch.nn.modules.dropout import Dropout
from sig.nn.models.base_model import BaseClassifier
from torch_geometric.nn import GATConv, global_mean_pool
class GAT(BaseClassifier):
"""
Graph Attention Network.
Args:
input_size (int): Number of input node features.
output_size (int): Number of output node features.
hidden_conv_sizes (tuple of int): Output sizes for hidden convolution layers.
hidden_dropout_probs (tuple of float): Dropout probabilities after the hidden
convolution layers.
activation (torch.nn.functional): Non-linear activation function after the hidden
convolution layers.
classify_graph (boolean): Whether the model is a graph classifier. Default False
for node classifier.
lin_dropout_prob (None or float): Dropout probability after the hidden linear
layer for graph classifier. None for node classifier.
**kwargs: addtional keyword arguments for GATConv
"""
def forward(
self,
x,
edge_index,
return_all_attentions=False,
return_no_selfloop_attentions=True,
batch=None
):
"""
Forward pass.
Args:
x (torch.float): Node feature tensor with shape [num_nodes, num_node_feat].
edge_index (torch.long): Edges in COO format with shape [2, num_edges].
return_all_attentions (boolean): Whether to return attention weights across
all the layers.
return_no_selfloop_attentions (boolean): Whether to remove the self-loop
attention weights for returning all_atts.
batch (None or torch.long): Node assignment for a batch of graphs with shape
[num_nodes] for graph classification. None for node classification.
Return:
x (torch.float): Final output of the network with shape
[num_nodes, output_size].
all_atts (torch.float): Edge attention weights for all the layers with shape
[num_edges, num_layers] if return_no_selfloop_attentions is True, else with
shape [num_edges + num_nodes, num_layers].
"""
all_atts = []
for i in range(self.num_conv_layers):
x, att = self.convs[i](
x,
edge_index,
return_attention_weights=True
)
if self.classify_graph or i < self.num_conv_layers - 1:
# no activation for output conv layer in node classifier
x = self.activation(x)
x = self.dropouts[i](x)
if return_no_selfloop_attentions:
att = att[:-x.shape[0]]
all_atts.append(att)
all_atts = torch.cat(all_atts, dim=1)
if self.classify_graph:
x = global_mean_pool(x, batch)
x = self.activation(self.lin1(x))
x = self.lin_dropout(x)
x = self.lin2(x)
if return_all_attentions:
return x, all_atts
else:
return x | [
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
6738,
28034,
13,
20471,
13,
18170,
13,
14781,
448,
1330,
14258,
448,
201,
198,
6738,
43237,
13,
20471,
13,
27530,
13,
8692,
62,
19849,
1330,
7308,
9487,
7... | 2.152565 | 1,501 |
from filters.siat import SiatEntry, SiatListing, AvcListing, MmmListing, MmmEntry, NhlListing, LackhandEntry, LackhandListing
from filters.ssss import SsssEntry, SsssListing
from filters.veb import VebEntry, VebListing
ENTRY_FILTERS = [
LackhandEntry(),
MmmEntry(),
SiatEntry(),
SsssEntry(),
VebEntry(),
]
ENTRY_LISTINGS = [
AvcListing(),
LackhandListing(),
MmmListing(),
SiatListing(),
SsssListing(),
VebListing(),
NhlListing(), # Uses 'Next Page' for TOC, so must be after 'Previous'.
]
| [
6738,
16628,
13,
82,
5375,
1330,
311,
5375,
30150,
11,
311,
5375,
8053,
278,
11,
5184,
66,
8053,
278,
11,
337,
3020,
8053,
278,
11,
337,
3020,
30150,
11,
399,
18519,
8053,
278,
11,
38289,
4993,
30150,
11,
38289,
4993,
8053,
278,
198... | 2.627551 | 196 |
import connexion
import datetime
import json
import os
import sys
import time
import acme.challenges
import acme.client
import acme.jose
import click
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec, rsa
import boto3
import OpenSSL.crypto
import rfc3986
DEFAULT_ACME_DIRECTORY_URL = "https://acme-v01.api.letsencrypt.org/directory"
STAGE_ACME_DIRECTORY_URL = "https://acme-staging.api.letsencrypt.org/directory"
CERTIFICATE_EXPIRATION_THRESHOLD = datetime.timedelta(days=45)
# One day
PERSISTENT_SLEEP_INTERVAL = 60 * 60 * 24
DNS_TTL = 30
@click.group()
@cli.command(name="update-certificates")
@cli.command()
@click.argument("email")
@click.option(
"--out",
type=click.File("w"),
default="-",
help="Where to write the private key to. Defaults to stdout."
)
if __name__ == "__main__":
cli()
| [
11748,
369,
12413,
295,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
640,
198,
198,
11748,
936,
1326,
13,
36747,
34120,
198,
11748,
936,
1326,
13,
16366,
198,
11748,
936,
1326,
13,
73,
577... | 2.742466 | 365 |
#!/usr/bin/env python
# coding: utf-8
# In[8]:
import pandas as pd
from sklearn import tree
# In[9]:
df = pd.read_csv("E:\GIT\-CSE-0408-Summer-2021\Final\Decision\Tahmina.csv")
# In[10]:
x = df.iloc[:,:-1]
# In[11]:
x
# In[12]:
y=df.iloc[:,3]
# In[13]:
y
# In[14]:
classify_ = tree.DecisionTreeClassifier()
# In[15]:
classify_ =classify_.fit(x,y)
# In[16]:
prediction_ = classify_.predict([[190,70,43]])
# In[17]:
prediction_
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
23,
5974,
628,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
1330,
5509,
628,
198,
2,
554,
58,
24,
5974,
628... | 2.060086 | 233 |
""" File to define Dataset and Dataloader"""
import os
from typing import Optional, Tuple, List, Dict
import pandas as pd
import torch
from loguru import logger
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from experiment.sentiment_tokenizer import SentimentTokenizer
DEFAULT_TRAIN_DATA_PATH: str = os.path.join(
"data", "train_clean.csv"
)
DEFAULT_VAL_DATA_PATH: str = os.path.join("data", "val_clean.csv")
LABEL_TO_ID: Dict = {"negative": 0, "neutral": 1, "positive": 2}
| [
37811,
9220,
284,
8160,
16092,
292,
316,
290,
360,
10254,
1170,
263,
37811,
198,
11748,
28686,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
11,
7343,
11,
360,
713,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
28034,
198,
6... | 2.968085 | 188 |
import numpy as np
import gym
from lagom.envs.spaces import Discrete
from lagom.envs.spaces import Box
from lagom.envs import Env
from lagom.envs import make_gym_env
from lagom.envs.wrappers import Wrapper
from lagom.envs.wrappers import GymWrapper
from lagom.envs.wrappers import FrameStack
from lagom.envs.wrappers import RewardScale
# FlattenDictWrapper requires Mujoco, so omitted from test
| [
11748,
299,
32152,
355,
45941,
198,
198,
11748,
11550,
198,
198,
6738,
19470,
296,
13,
268,
14259,
13,
2777,
2114,
1330,
8444,
8374,
198,
6738,
19470,
296,
13,
268,
14259,
13,
2777,
2114,
1330,
8315,
198,
198,
6738,
19470,
296,
13,
26... | 2.949275 | 138 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-06 09:32
from __future__ import unicode_literals
from django.db import migrations, models
"""
This migration was triggered by black formatting. It doesn't actually change anything
to our models, only text that was formatted differently. Please ignore this as it is
merely cosmetic (and the easiest way to get rid of those false migrations).
"""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
1157,
13,
1983,
319,
12131,
12,
2999,
12,
3312,
7769,
25,
2624,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198... | 3.654867 | 113 |
#Main Program for encryption.
from sys import argv
import os
import sys
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.primitives import hashes
from cryptography.exceptions import InvalidSignature
script, e_or_d, public_key_file, private_key_file, plain_text_file, cipher_text_file = argv
backend = default_backend()
#Generating a random symmetric key and IV
#Symmetric Encryption function
#Symmetric Decryption function
#Asymmetric Encryption function (RSA)
#Assymmetric Decryption function (RSA)
#Digital Signing function
if __name__ == "__main__":
main(argv[1:]) | [
2,
13383,
6118,
329,
15835,
13,
201,
198,
201,
198,
6738,
25064,
1330,
1822,
85,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
6738,
45898,
13,
71,
1031,
6759,
13,
19795,
20288,
13,
66,
541,
7084,
1330,
44334,
11,
16113,
... | 2.848387 | 310 |
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
628,
628,
628,
628,
198
] | 3.125 | 24 |
# test script to preform prediction on test images inside
# dataset/test/
# -- image_1.jpg
# -- image_2.jpg
# ...
# organize imports
from __future__ import print_function
# keras imports
from keras.applications.vgg16 import VGG16, preprocess_input
from keras.applications.vgg19 import VGG19, preprocess_input
from keras.applications.xception import Xception, preprocess_input
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
from keras.applications.mobilenet import MobileNet, preprocess_input
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.preprocessing import image
from keras.models import Model
from keras.models import model_from_json
from keras.layers import Input
# other imports
from sklearn.linear_model import LogisticRegression
import numpy as np
import os
import json
import pickle
import cv2
# load the user configs
with open('conf/conf.json') as f:
config = json.load(f)
# config variables
model_name = config["model"]
weights = config["weights"]
include_top = config["include_top"]
train_path = config["train_path"]
test_path = config["test_path"]
features_path = config["features_path"]
labels_path = config["labels_path"]
test_size = config["test_size"]
results = config["results"]
model_path = config["model_path"]
seed = config["seed"]
classifier_path = config["classifier_path"]
# load the trained logistic regression classifier
print ("[INFO] loading the classifier...")
classifier = pickle.load(open(classifier_path, 'rb'))
# pretrained models needed to perform feature extraction on test data too!
if model_name == "vgg16":
base_model = VGG16(weights=weights)
model = Model(input=base_model.input, output=base_model.get_layer('fc1').output)
image_size = (224, 224)
elif model_name == "vgg19":
base_model = VGG19(weights=weights)
model = Model(input=base_model.input, output=base_model.get_layer('fc1').output)
image_size = (224, 224)
elif model_name == "resnet50":
base_model = ResNet50(weights=weights)
model = Model(input=base_model.input, output=base_model.get_layer('flatten').output)
image_size = (224, 224)
elif model_name == "inceptionv3":
base_model = InceptionV3(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
image_size = (299, 299)
elif model_name == "inceptionresnetv2":
base_model = InceptionResNetV2(include_top=include_top, weights=weights, input_tensor=Input(shape=(299,299,3)))
model = Model(input=base_model.input, output=base_model.get_layer('custom').output)
image_size = (299, 299)
elif model_name == "mobilenet":
base_model = MobileNet(include_top=include_top, weights=weights, input_tensor=Input(shape=(224,224,3)), input_shape=(224,224,3))
model = Model(inputs=[base_model.input], outputs=[base_model.layers[-1].output])
image_size = (224, 224)
elif model_name == "xception":
base_model = Xception(weights=weights)
model = Model(input=[base_model.input], output=[base_model.get_layer('avg_pool').output])
image_size = (299, 299)
else:
base_model = None
# get all the train labels
train_labels = os.listdir(train_path)
print("done here"*50)
# get all the test images paths
test_images = os.listdir(test_path)
# loop through each image in the test data
for image_path in test_images:
path = test_path + "/" + image_path
img = image.load_img(path, target_size=image_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = model.predict(x)
flat = feature.flatten()
flat = np.expand_dims(flat, axis=0)
preds = classifier.predict(flat)
prediction = train_labels[preds[0]]
# perform prediction on test image
print ("I think it is a " + train_labels[preds[0]])
#img_color = cv2.imread(path, 1)
#cv2.putText(img_color, "I think it is a " + prediction, (140,445), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
#cv2.imshow("test", img_color)
# key tracker
#key = cv2.waitKey(0) & 0xFF
#if (key == ord('q')):
# cv2.destroyAllWindows() | [
2,
1332,
4226,
284,
662,
687,
17724,
319,
1332,
4263,
2641,
220,
201,
198,
2,
27039,
14,
9288,
14,
201,
198,
2,
220,
220,
1377,
2939,
62,
16,
13,
9479,
201,
198,
2,
220,
220,
1377,
2939,
62,
17,
13,
9479,
201,
198,
2,
220,
220... | 2.626838 | 1,632 |
"""
Question Source:Leetcode
Level: Hard
Topic: Binary search
Solver: Tayyrov
Date: 31.03.2022
"""
from typing import List
| [
37811,
201,
198,
24361,
8090,
25,
3123,
316,
8189,
201,
198,
4971,
25,
6912,
201,
198,
33221,
25,
45755,
2989,
201,
198,
50,
14375,
25,
25569,
88,
18657,
201,
198,
10430,
25,
3261,
13,
3070,
13,
1238,
1828,
201,
198,
37811,
201,
198... | 2.64 | 50 |
import os
| [
11748,
28686,
628
] | 3.666667 | 3 |
from os.path import join
import matplotlib.pyplot as plt
from Tests import save_validation_path as save_path
from Tests import TEST_DATA_DIR
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.DXFImport import DXFImport
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
import pytest
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
@pytest.mark.long
@pytest.mark.validation
@pytest.mark.FEMM
def test_FEMM_import_dxf():
"""Validation of the TOYOTA Prius 2004 interior magnet (V shape) with distributed winding
50 kW peak, 400 Nm peak at 1500 rpm from publication
from publication
Z. Yang, M. Krishnamurthy and I. P. Brown,
"Electromagnetic and vibrational characteristic of IPM over full torque-speed range,"
Electric Machines & Drives Conference (IEMDC), 2013 IEEE International, Chicago, IL, 2013, pp. 295-302.
Test compute the Flux in FEMM, with and without DXF Import
"""
IPMSM_A = load(join(DATA_DIR, "Machine", "IPMSM_A.json"))
simu = Simu1(name="FEMM_import_dxf", machine=IPMSM_A)
# Definition of the magnetic simulation (FEMM with symmetry and sliding band)
simu.mag = MagFEMM(
type_BH_stator=0,
type_BH_rotor=0,
is_periodicity_a=True,
Kgeo_fineness=0.75,
)
# Run only Magnetic module
simu.force = None
simu.struct = None
simu.input = InputCurrent()
simu.input.Id_ref = -100 # [A]
simu.input.Iq_ref = 200 # [A]
simu.input.Nt_tot = 1 # Number of time step
simu.input.Na_tot = 2048 # Spatial discretization
simu.input.N0 = 2000 # Rotor speed [rpm]
simu.input.rot_dir = 1 # To enforce the rotation direction
# DXF import setup
simu.mag.rotor_dxf = DXFImport(
file_path=join(TEST_DATA_DIR, "prius_test.dxf").replace("\\", "/")
)
# Set each surface name
surf_dict = dict()
surf_dict[0.0546 + 1j * 0.0224] = "Lamination_Rotor_Bore_Radius_Ext"
surf_dict[0.0763 + 0.00867j] = "Hole_Rotor_R0_T0_S0"
surf_dict[0.0669 + 0.01668j] = "HoleMagnet_Rotor_Parallel_N_R0_T0_S0"
surf_dict[0.0614 + 0.0254j] = "Hole_Rotor_R0_T1_S0"
surf_dict[0.0591 + 0.03555j] = "HoleMagnet_Rotor_Parallel_N_R0_T1_S0"
surf_dict[0.06009 + 0.0478j] = "Hole_Rotor_R0_T2_S0"
simu.mag.rotor_dxf.surf_dict = surf_dict
# Set every BC
BC_list = list()
BC_list.append((0.0489 + 1j * 0.0489, False, "bc_r1"))
BC_list.append((0, True, "bc_A0"))
BC_list.append((0.067, False, "bc_r1"))
simu.mag.rotor_dxf.BC_list = BC_list
# Run DXF simulation
out = simu.run()
# Run Normal simulation
simu2 = simu.copy()
simu2.mag.rotor_dxf = None
out2 = simu2.run()
# Plot/compare the flux
out.plot_2D_Data(
"mag.B",
"angle",
data_list=[out2.mag.B],
legend_list=["Rotor from DXF", "Rotor from pyleecan"],
save_path=join(save_path, "FEMM_import_dxf_B.png"),
)
| [
6738,
28686,
13,
6978,
1330,
4654,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
30307,
1330,
3613,
62,
12102,
341,
62,
6978,
355,
3613,
62,
6978,
198,
6738,
30307,
1330,
43001,
62,
26947,
62,
34720,
198,
67... | 2.26435 | 1,324 |
import datetime
from erpres import attempt | [
11748,
4818,
8079,
198,
6738,
1931,
18302,
1330,
2230
] | 4.666667 | 9 |
"""
Abero Runner
(c) 2020 Rodney Maniego Jr.
File analysis tool
"""
import sys
import argparse
import abero
from arkivist import Arkivist
if __name__ == "__main__":
runner() | [
37811,
198,
220,
220,
220,
27700,
78,
21529,
198,
220,
220,
220,
357,
66,
8,
12131,
34895,
1869,
494,
2188,
7504,
13,
198,
220,
220,
220,
9220,
3781,
2891,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
198,
11748,
... | 2.71831 | 71 |
import io
import time
import requests
import pandas as pd
import tensordata.gfile as gfile
from tensordata.utils._utils import assert_dirs
from linora.image import save_image, array_to_image
__all__ = ['mnist_tibetan']
def mnist_tibetan(root):
"""Tibetan-MNIST from https://github.com/bat67/TibetanMNIST.
Tibetan-MNIST is a drop-in replacement for the
MNIST dataset (28x28 grayscale, 70,000 images),
provided in the original MNIST format as well as a NumPy format.
Since MNIST restricts us to 10 classes, we chose one character to
represent each of the 10 rows of Hiragana when creating Tibetan-MNIST.
Each sample is an gray image (in 3D NDArray) with shape (28, 28, 1).
Attention: if exist dirs `root/mnist_tibetan`, api will delete it and create it.
Data storage directory:
root = `/user/.../mydata`
mnist_tibetan data:
`root/mnist_tibetan/train/0/xx.png`
`root/mnist_tibetan/train/2/xx.png`
`root/mnist_tibetan/train/6/xx.png`
`root/mnist_tibetan/test/0/xx.png`
`root/mnist_tibetan/test/2/xx.png`
`root/mnist_tibetan/test/6/xx.png`
Args:
root: str, Store the absolute path of the data directory.
example:if you want data path is `/user/.../mydata/mnist_tibetan`,
root should be `/user/.../mydata`.
Returns:
Store the absolute path of the data directory, is `root/mnist_tibetan`.
"""
start = time.time()
print('Downloading data from https://github.com/Hourout/datasets/tree/master/TibetanMNIST')
task_path = assert_dirs(root, 'mnist_tibetan')
url_list = ['https://raw.githubusercontent.com/Hourout/datasets/master/TibetanMNIST/TibetanMNIST_28_28_01.csv',
'https://raw.githubusercontent.com/Hourout/datasets/master/TibetanMNIST/TibetanMNIST_28_28_02.csv']
data = pd.DataFrame()
for url in url_list:
s = requests.get(url).content
data = pd.concat([data, pd.read_csv(io.StringIO(s.decode('utf-8')),header=None, dtype='uint8')])
train = data.loc[:, 1:].values.reshape(-1, 28, 28)
train_label = data.loc[:, 0].values
for i in set(train_label):
gfile.makedirs(gfile.path_join(task_path, 'train', str(i)))
for idx in range(train.shape[0]):
save_image(gfile.path_join(task_path, 'train', str(train_label[idx]), str(idx)+'.png'),
array_to_image(train[idx].reshape(28, 28, 1)))
print('mnist_tibetan dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return task_path
| [
11748,
33245,
198,
11748,
640,
198,
198,
11748,
7007,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
11192,
585,
1045,
13,
70,
7753,
355,
308,
7753,
198,
6738,
11192,
585,
1045,
13,
26791,
13557,
26791,
1330,
6818,
62,
15908,
82,
1... | 2.344037 | 1,090 |
from enum import Enum
import re
| [
6738,
33829,
1330,
2039,
388,
198,
11748,
302,
628
] | 3.666667 | 9 |
# https://leetcode.com/problems/open-the-lock
from typing import List
from collections import defaultdict, deque
| [
2,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
9654,
12,
1169,
12,
5354,
198,
198,
6738,
19720,
1330,
7343,
198,
6738,
17268,
1330,
4277,
11600,
11,
390,
4188,
628
] | 3.484848 | 33 |
# SPDX-FileCopyrightText: 2018 Kattni Rembor for Adafruit Industries
#
# SPDX-License-Identifier: MIT
# CircuitPython AnalogIn Demo
import time
import board
from analogio import AnalogIn
analog_in = AnalogIn(board.A1)
while True:
print((get_voltage(analog_in),))
time.sleep(0.1)
| [
2,
30628,
55,
12,
8979,
15269,
8206,
25,
2864,
509,
1078,
8461,
3982,
2865,
329,
1215,
1878,
4872,
20171,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
198,
2,
13588,
37906,
50088,
818,
34588,
198,
198,
1174... | 2.773585 | 106 |
# Generated by Django 2.0.4 on 2018-05-25 08:19
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
19,
319,
2864,
12,
2713,
12,
1495,
8487,
25,
1129,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.84375 | 32 |
from PyQt5 import QtCore
from model import group
class GroupsModel(QtCore.QAbstractListModel):
'''
an item model for groups combobox
items are mostly represented as strings (captions)
'''
# a feedback signal for the window - if new filter rules means that inputs should change
# this feedback will be used to notify the mainwindow
feedback = QtCore.pyqtSignal(str, str)
def favorite(self, row):
'''
make/unmake the Group given by the lst index favourite
returns the new index of the group with list
return -1 if the index was illegal ie. the index given refered to the "all tracks" group
'''
if not self.dbmodel.canfavorite(row):
return -1
self.beginResetModel()
newrow = self.dbmodel.favorite(row)
self.endResetModel()
return newrow
| [
6738,
9485,
48,
83,
20,
1330,
33734,
14055,
198,
198,
6738,
2746,
1330,
1448,
198,
198,
4871,
27441,
17633,
7,
48,
83,
14055,
13,
48,
23839,
8053,
17633,
2599,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
281,
2378,
2746,
329,... | 2.725552 | 317 |
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
import random
from stack.api import Call, ReturnCode
HOST = 'backend-1000-0'
ENVIRONMENT = 'pytest'
def test_attr(table=None, owner=None):
"""
Tests the Scoping rules for attributes. Called without
arguments this test the global attributes.
table = os | environment | appliance | host
owner = osname | environmentname | appliancename | hostname
"""
if not table:
table = ''
if not owner:
owner = ''
attr = 'a.b.c.d'
result = Call('remove %s attr' % table, ('%s attr=%s' % (owner, attr)).split())
assert ReturnCode() == 0 and result == [ ]
value = str(random.randint(0, 100))
result = Call('set %s attr' % table,
('%s attr=%s value=%s' % (owner, attr, value)).split())
assert ReturnCode() == 0 and result == [ ]
result = Call('list %s attr' % table, ('%s attr=%s' % (owner, attr)).split())
assert ReturnCode() == 0 and len(result) == 1
assert result[0]['attr'] == attr
assert result[0]['value'] == value
result = Call('remove %s attr' % table, ('%s attr=%s' % (owner, attr)).split())
assert ReturnCode() == 0 and result == [ ]
| [
2,
2488,
22163,
4766,
31,
198,
2,
15069,
357,
66,
8,
4793,
532,
13130,
3813,
14706,
198,
2,
1439,
2489,
10395,
13,
23881,
72,
7,
81,
8,
410,
20,
13,
87,
8931,
72,
13,
785,
198,
2,
3740,
1378,
12567,
13,
785,
14,
15156,
14706,
... | 2.692308 | 468 |
"""Defines the Calc objects which are used to run calculations
The purpose of the concrete classes is to handle all of the specificities of
each program so as to be able to call a calculation and read its output in
the same way from the main program regardless of which program it is.
As such if any clunky code is necessary due to an external program's specific
preferences, it should be contained here in its Calc object member methods.
"""
import subprocess
import os
from fromage.utils.mol import Mol
from fromage.io import edit_file as ef
from fromage.io import read_file as rf
from fromage.utils import array_operations as ao
bohrconv = 1.88973 # Something in Angstrom * bohrconv = Something in Bohr
def setup_calc(calc_name, calc_type):
"""
Return a calculation of the correct subclass
"""
calc_type = calc_type.lower()
calc_types = {"gaussian" : Gauss_calc,
"gaussian_cas" : Gauss_CAS_calc,
"molcas" : Molcas_calc,
"turbomole" : Turbo_calc,
"turbomole_scf" : Turbo_SCF_calc,
"turbomole_tddft" : Turbo_calc_TDDFT,
"dftb" : DFTB_calc}
try:
out_calc = calc_types[calc_type](calc_name)
except KeyError:
print("Unercognised program: " + calc_type)
return out_calc
class Calc(object):
"""
Abstract class for calculation objects
Attributes
----------
calc_name : str
Name of the calculation, typically rl, ml, mh or mg
"""
def __init__(self, calc_name_in=None, in_here=os.getcwd()):
"""Constructor which sets the calculation name"""
self.calc_name = calc_name_in
self.here = in_here
def run(self, atoms):
"""
Write all of the variable inputs necessary for one calculations
"""
raise NotImplementedError("Please Implement this method")
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Read the output of the calculation and sometimes updates the geom_*.xyz files
"""
raise NotImplementedError("Please Implement this method")
def update_geom(self, positions, in_mol, in_shell):
"""
Update the geom_mol.xyz and geom_cluster.xyz files
Parameters
----------
positions : list of floats
List of atomic coordinates
in_mol : list of Atom objects
Atoms in the inner region
in_shell : list of Atom objects
Atoms in the middle region
"""
subdir = os.getcwd()
os.chdir(self.here)
with open("geom_mol.xyz", "a") as geom_m_file:
geom_m_file.write(str(len(in_mol)) + "\n")
geom_m_file.write(self.calc_name + "\n")
for atom in ao.array2atom(in_mol, positions):
atom_str = "{:>6} {:10.6f} {:10.6f} {:10.6f}".format(
atom.elem, atom.x, atom.y, atom.z) + "\n"
geom_m_file.write(atom_str)
# the inner and middle regions
with open("geom_cluster.xyz", "a") as geom_c_file:
geom_c_file.write(
str(int((len(positions) / 3) + len(in_shell))) + "\n")
geom_c_file.write(self.calc_name + "\n")
for atom in ao.array2atom(in_mol, positions):
atom_str = "{:>6} {:10.6f} {:10.6f} {:10.6f}".format(
atom.elem, atom.x, atom.y, atom.z) + "\n"
geom_c_file.write(atom_str)
for atom in in_shell:
atom_str = "{:>6} {:10.6f} {:10.6f} {:10.6f}".format(
atom.elem, atom.x, atom.y, atom.z) + "\n"
geom_c_file.write(atom_str)
os.chdir(subdir)
return
class DFTB_calc(Calc):
"""
Calculation of DFTB+ tested with v18.2
"""
def run(self, atoms):
"""
Write a DFTB .gen file and return a subprocess.Popen
Parameters
----------
atoms : list of Atom objects
Atoms to be calculated with DFTB+
Returns
-------
proc : subprocess.Popen object
the object should have a .wait() method
"""
dftb_path = os.path.join(self.here, self.calc_name)
os.chdir(dftb_path)
mol = Mol(atoms)
region_2_file = "r2.xyz"
if os.path.exists(region_2_file):
mol_r2 = rf.mol_from_file(region_2_file)
mol += mol_r2
mol.write_xyz("geom.xyz")
subprocess.call("xyz2gen geom.xyz", shell=True)
# Run DFTB+
proc = subprocess.Popen("dftb+ > dftb_out", shell=True)
os.chdir(self.here)
return proc
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Analyse a DFTB+ detailed.out file while printing geometry updates
To update the geom files, include in_mol and in_shell
Parameters
----------
positions : list of floats
List of atomic coordinates, important for truncation of gradients
if too many are calculated
in_mol : list of Atom objects, optional
Atoms in the inner region. Include to write geom files
in_shell : list of Atom objects, optional
Atoms in the middle region. Include to write geom files
Returns
-------
energy : float
Energy calculated by DFTB+ in Hartree
gradients : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. in Hartree/Angstrom
scf_energy : float
The ground state energy in Hartree
"""
dftb_path = os.path.join(self.here, self.calc_name)
os.chdir(dftb_path)
energy, gradients_b, scf_energy = rf.read_dftb_out("detailed.out")
# fix gradients units to Hartree/Angstrom
gradients = gradients_b * bohrconv
# update the geometry log
if in_mol != None:
self.update_geom(positions, in_mol, in_shell)
# truncate gradients if too long
gradients = gradients[:len(positions)]
os.chdir(self.here)
return (energy, gradients, scf_energy)
class Gauss_calc(Calc):
"""
Calculation with Gaussian 09
"""
def run(self, atoms):
"""
Write a Gaussian input file and return a subprocess.Popen
Parameters
----------
atoms : list of Atom objects
Atoms to be calculated with Gaussian
Returns
-------
proc : subprocess.Popen object
the object should have a .wait() method
"""
gauss_path = os.path.join(self.here, self.calc_name)
os.chdir(gauss_path)
ef.write_gauss(self.calc_name + ".com", atoms,
[], self.calc_name + ".temp")
proc = subprocess.Popen(
"${FRO_GAUSS} " + self.calc_name + ".com", shell=True)
os.chdir(self.here)
return proc
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Analyse a Gaussian .chk file while printing geometry updates
To update the geom files, include in_mol and in_shell
Parameters
----------
positions : list of floats
List of atomic coordinates, important for truncation of gradients
if too many are calculated
in_mol : list of Atom objects, optional
Atoms in the inner region
in_shell : list of Atom objects, optional
Atoms in the middle region
Returns
-------
energy : float
Energy calculated by Gaussian in Hartree
gradients : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. in Hartree/Angstrom
scf_energy : float
The ground state energy in Hartree
"""
gauss_path = os.path.join(self.here, self.calc_name)
os.chdir(gauss_path)
# stdout=FNULL to not have to read the output of formchk
FNULL = open(os.devnull, 'w')
subprocess.call("formchk gck.chk", stdout=FNULL, shell=True)
energy, gradients_b, scf_energy = rf.read_fchk("gck.fchk")
# fix gradients units to Hartree/Angstrom
gradients = gradients_b * bohrconv
# update the geometry log
if in_mol != None:
self.update_geom(positions, in_mol, in_shell)
# truncate gradients if too long
gradients = gradients[:len(positions)]
os.chdir(self.here)
return (energy, gradients, scf_energy)
def read_out_mol(self, pop="EPS"):
"""Read the output log file and return Mol"""
gauss_path = os.path.join(self.here, self.calc_name)
os.chdir(gauss_path)
out_mol = rf.mol_from_gauss(self.calc_name + ".log")
os.chdir(self.here)
return out_mol
class Gauss_CAS_calc(Calc):
"""
Calculation with Gaussian 09 for CAS calculations
"""
def run(self, atoms):
"""
Write a Gaussian input file and return a subprocess.Popen
Parameters
----------
atoms : list of Atom objects
Atoms to be calculated with Gaussian
Returns
-------
proc : subprocess.Popen object
the object should have a .wait() method
"""
gauss_path = os.path.join(self.here, self.calc_name)
os.chdir(gauss_path)
ef.write_gauss(self.calc_name + ".com", atoms,
[], self.calc_name + ".temp")
proc = subprocess.Popen(
"${FRO_GAUSS} " + self.calc_name + ".com", shell=True)
os.chdir(self.here)
return proc
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Analyse a Gaussian .chk file while printing geometry updates
To update the geom files, include in_mol and in_shell
Parameters
----------
positions : list of floats
List of atomic coordinates, important for truncation of gradients
if too many are calculated
in_mol : list of Atom objects, optional
Atoms in the inner region
in_shell : list of Atom objects, optional
Atoms in the middle region
Returns
-------
energy : float
Energy calculated by Gaussian in Hartree
gradients : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. in Hartree/Angstrom
scf_energy : float
The ground state energy in Hartree
"""
gauss_path = os.path.join(self.here, self.calc_name)
os.chdir(gauss_path)
energy_e, grad_e, energy_g, grad_g = rf.read_g_cas(
self.calc_name + ".log")
# fix gradients units to Hartree/Angstrom
grad_e = grad_e * bohrconv
grad_g = grad_g * bohrconv
# update the geometry log
if in_mol != None:
self.update_geom(positions, in_mol, in_shell)
# truncate gradients if too long
grad_e = grad_e[:len(positions)]
grad_g = grad_g[:len(positions)]
os.chdir(self.here)
return (energy_e, grad_e, energy_g, grad_g)
def turbo_redefine(atoms):
"""Update Turbomole mos and run actual"""
FNULL = open(os.devnull, 'w')
ef.write_coord(atoms)
# Update mos
subprocess.call("rm -f mos", shell=True)
with open("define_feed", "w") as tmp_def_in:
# define input for Huckel guess
tmp_def_in.write("\n\n\neht\n\n\n\n\n\n\n\n*\n\n")
subprocess.call("define < define_feed", stdout=FNULL, shell=True)
subprocess.call("rm -f define_feed", shell=True)
subprocess.call("actual -r", shell=True)
return
class Turbo_calc_TDDFT(Calc):
"""
Calculation of TDDFT energy and gradients with Turbomole 7.0
"""
def run(self, atoms):
"""
Write a Turbomole coord file and return a subprocess.Popen
Parameters
----------
atoms : list of Atom objects
Atoms to be calculated with Gaussian
Returns
-------
proc : subprocess.Popen object
the object should have a .wait() method
"""
FNULL = open(os.devnull, 'w')
turbo_path = os.path.join(self.here, self.calc_name)
os.chdir(turbo_path)
turbo_redefine(atoms)
# Run Turbomole
proc = subprocess.Popen(
"dscf > dscf.out && egrad > grad.out", stdout=FNULL, shell=True)
os.chdir(self.here)
return proc
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Analyse a Turbomole grad.out file while printing geometry updates
To update the geom files, include in_mol and in_shell
Parameters
----------
positions : list of floats
List of atomic coordinates, important for truncation of gradients
if too many are calculated
in_mol : list of Atom objects, optional
Atoms in the inner region. Include to write geom files
in_shell : list of Atom objects, optional
Atoms in the middle region. Include to write geom files
Returns
-------
energy : float
Energy calculated by Turbomole in Hartree
gradients : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. in Hartree/Angstrom
scf_energy : float
The ground state energy in Hartree
"""
turbo_path = os.path.join(self.here, self.calc_name)
os.chdir(turbo_path)
energy, gradients_b, scf_energy = rf.read_tb_grout("grad.out")
# fix gradients units to Hartree/Angstrom
gradients = gradients_b * bohrconv
# update the geometry log
if in_mol != None:
self.update_geom(positions, in_mol, in_shell)
# truncate gradients if too long
gradients = gradients[:len(positions)]
os.chdir(self.here)
return (energy, gradients, scf_energy)
class Turbo_calc(Calc):
"""
Calculation of CC2 with Turbomole 7.0
"""
def run(self, atoms):
"""
Write a Turbomole coord file and return a subprocess.Popen
Parameters
----------
atoms : list of Atom objects
Atoms to be calculated with Gaussian
Returns
-------
proc : subprocess.Popen object
the object should have a .wait() method
"""
FNULL = open(os.devnull, 'w')
turbo_path = os.path.join(self.here, self.calc_name)
os.chdir(turbo_path)
turbo_redefine(atoms)
# Run Turbomole
proc = subprocess.Popen(
"dscf > dscf.out && ricc2 > ricc2.out", stdout=FNULL, shell=True)
os.chdir(self.here)
return proc
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Analyse a Turbomole ricc2.out file while printing geometry updates
To update the geom files, include in_mol and in_shell
Parameters
----------
positions : list of floats
List of atomic coordinates, important for truncation of gradients
if too many are calculated
in_mol : list of Atom objects, optional
Atoms in the inner region. Include to write geom files
in_shell : list of Atom objects, optional
Atoms in the middle region. Include to write geom files
Returns
-------
energy : float
Energy calculated by Turbomole in Hartree
gradients : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. in Hartree/Angstrom
scf_energy : float
The ground state energy in Hartree
"""
turbo_path = os.path.join(self.here, self.calc_name)
os.chdir(turbo_path)
energy, gradients_b, scf_energy = rf.read_ricc2("ricc2.out")
# fix gradients units to Hartree/Angstrom
gradients = gradients_b * bohrconv
# update the geometry log
if in_mol != None:
self.update_geom(positions, in_mol, in_shell)
# truncate gradients if too long
gradients = gradients[:len(positions)]
os.chdir(self.here)
return (energy, gradients, scf_energy)
class Turbo_SCF_calc(Calc):
"""
Calculation of SCF like DFT or HF with Turbomole 7.0
"""
def run(self, atoms):
"""
Write a Turbomole coord file and return a subprocess.Popen
Parameters
----------
atoms : list of Atom objects
Atoms to be calculated with Gaussian
Returns
-------
proc : subprocess.Popen object
the object should have a .wait() method
"""
FNULL = open(os.devnull, 'w')
turbo_path = os.path.join(self.here, self.calc_name)
os.chdir(turbo_path)
turbo_redefine(atoms)
# Run Turbomole
proc = subprocess.Popen(
"dscf > dscf.out && grad > grad.out", stdout=FNULL, shell=True)
os.chdir(self.here)
return proc
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Analyse a Turbomole gradient file while printing geometry updates
To update the geom files, include in_mol and in_shell
Parameters
----------
positions : list of floats
List of atomic coordinates, important for truncation of gradients
if too many are calculated
in_mol : list of Atom objects, optional
Atoms in the inner region. Include to write geom files
in_shell : list of Atom objects, optional
Atoms in the middle region. Include to write geom files
Returns
-------
energy : float
Energy calculated by Gaussian in Hartree
gradients : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. in Hartree/Angstrom
scf_energy : float
The ground state energy in Hartree
"""
turbo_path = os.path.join(self.here, self.calc_name)
os.chdir(turbo_path)
energy, gradients_b = rf.read_tbgrad("gradient")
scf_energy = energy
# fix gradients units to Hartree/Angstrom
gradients = gradients_b * bohrconv
# update the geometry log
if in_mol != None:
self.update_geom(positions, in_mol, in_shell)
# truncate gradients if too long
gradients = gradients[:len(positions)]
os.chdir(self.here)
return (energy, gradients, scf_energy)
class Molcas_calc(Calc):
"""
Calculation with Molcas 8.0
"""
def run(self, atoms):
"""
Write a Molcas input file and return a subprocess.Popen
Make sure the input file is called [name of calculation].input
e.g. mh.input and the geometry file in Gateway is called geom.xyz
Parameters
----------
atoms : list of Atom objects
Atoms to be calculated with Gaussian
Returns
-------
proc : subprocess.Popen object
the object should have a .wait() method
"""
molcas_path = os.path.join(self.here, self.calc_name)
os.chdir(molcas_path)
# Write a temporary geom file for molcas to read
ef.write_xyz("geom.xyz", atoms)
proc = subprocess.Popen(
"molcas molcas.input -f", shell=True)
os.chdir(self.here)
return proc
def read_out(self, positions, in_mol=None, in_shell=None):
"""
Analyse a Molcas .input file while printing geometry updates
To update the geom files, include in_mol and in_shell. Also removes
molcas.*
Parameters
----------
positions : list of floats
List of atomic coordinates, important for truncation of gradients
if too many are calculated
in_mol : list of Atom objects, optional
Atoms in the inner region. Include to write geom files
in_shell : list of Atom objects, optional
Atoms in the middle region. Include to write geom files
Returns
-------
energy : float
Energy calculated by Gaussian in Hartree
gradients : list of floats
The gradients in form x1,y1,z1,x2,y2,z2 etc. in Hartree/Angstrom
scf_energy : float
The ground state energy in Hartree
"""
molcas_path = os.path.join(self.here, self.calc_name)
os.chdir(molcas_path)
energy, gradients_b, scf_energy = rf.read_molcas("molcas.log")
# fix gradients units to Hartree/Angstrom
gradients = gradients_b * bohrconv
# update the geometry log
if in_mol != None:
self.update_geom(positions, in_mol, in_shell)
# truncate gradients if too long
gradients = gradients[:len(positions)]
os.chdir(self.here)
# for large molcas wavefunction information
subprocess.call("rm -rf molcas.*", shell=True)
return (energy, gradients, scf_energy)
| [
37811,
7469,
1127,
262,
2199,
66,
5563,
543,
389,
973,
284,
1057,
16765,
198,
198,
464,
4007,
286,
262,
10017,
6097,
318,
284,
5412,
477,
286,
262,
2176,
871,
286,
198,
27379,
1430,
523,
355,
284,
307,
1498,
284,
869,
257,
17952,
29... | 2.206414 | 9,573 |
import os
import sys
import time
import torch
import torch.nn.functional as F
import numpy as np
import pandas as pd
| [
11748,
28686,
201,
198,
11748,
25064,
201,
198,
11748,
640,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
... | 2.844444 | 45 |
"""
Date: 2022.04.27 22:18
Description: Omit
LastEditors: Rustle Karl
LastEditTime: 2022.04.27 22:18
"""
from .render import render_templates
| [
37811,
198,
10430,
25,
33160,
13,
3023,
13,
1983,
2534,
25,
1507,
198,
11828,
25,
440,
2781,
198,
5956,
18378,
669,
25,
17103,
293,
15415,
198,
5956,
18378,
7575,
25,
33160,
13,
3023,
13,
1983,
2534,
25,
1507,
198,
37811,
198,
6738,
... | 2.86 | 50 |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
from geometry_msgs.msg import TwistStamped
import math
from data_types import ControlParameters, Velocity, ControlCommand
from twist_controller import Controller
"""
Keep in mind the status of `dbw_enabled`. While in the simulator, its enabled
all the time, in the real car, that will not be the case. This may cause the
PID controller to accumulate error because the car could temporarily be driven
by a human instead of your controller.
"""
# The DBW system expects messages at 50Hz, and will disengage (reverting
# control back to the driver) if control messages are published at less
# than 10hz.
FREQUENCY = 50
"""
The car has an automatic transmission, which means the car will roll forward
if no brake and no throttle is applied. To prevent it from moving requires
about 700 Nm of torque.
"""
if __name__ == "__main__":
DBWNode()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
686,
2777,
88,
198,
6738,
14367,
62,
907,
14542,
13,
19662,
1330,
347,
970,
198,
6738,
20613,
86,
62,
28015,
89,
62,
907,
14542,
13,
19662,
1330,
47854,
23296,
40109,
11,
... | 3.648352 | 273 |
from icemet_sensor import Context, version, homedir
from icemet_sensor.config import create_config_file, SensorConfig
from icemet_sensor.measure import Measure
from icemet_sensor.sender import Sender
from icemet_sensor.util import collect_garbage
import aioftp
import argparse
import asyncio
from datetime import datetime
import logging
import os
import sys
import time
_version_str = """ICEMET-sensor {version}
Copyright (C) 2019-2020 Eero Molkoselkä <eero.molkoselka@gmail.com>
""".format(version=version)
_default_config_file = os.path.join(homedir, "icemet-sensor.yaml")
| [
6738,
14158,
19261,
62,
82,
22854,
1330,
30532,
11,
2196,
11,
3488,
276,
343,
198,
6738,
14158,
19261,
62,
82,
22854,
13,
11250,
1330,
2251,
62,
11250,
62,
7753,
11,
35367,
16934,
198,
6738,
14158,
19261,
62,
82,
22854,
13,
1326,
5015... | 3.005181 | 193 |
from datetime import datetime as dt
import dash
import dash_html_components as html
import dash_core_components as dcc
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=dt(1995, 8, 5),
max_date_allowed=dt(2017, 9, 19),
initial_visible_month=dt(2017, 8, 5),
end_date=dt(2017, 8, 25)
),
html.Div(id='output-container-date-picker-range')
])
@app.callback(
dash.dependencies.Output('output-container-date-picker-range', 'children'),
[dash.dependencies.Input('my-date-picker-range', 'start_date'),
dash.dependencies.Input('my-date-picker-range', 'end_date')])
if __name__ == '__main__':
app.run_server(debug=True)
| [
6738,
4818,
8079,
1330,
4818,
8079,
355,
288,
83,
198,
11748,
14470,
198,
11748,
14470,
62,
6494,
62,
5589,
3906,
355,
27711,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
198,
22615,
62,
47720,
258,
1039,
796,
3725... | 2.421348 | 356 |