id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8133430 | <filename>questions/n-queens-ii/Solution.py
"""
The n-queens puzzle is the problem of placing n queens on an n x n chessboard such that no two queens attack each other.
Given an integer n, return the number of distinct solutions to the n-queens puzzle.
Example 1:
Input: n = 4
Output: 2
Explanation: There are two distinct solutions to the 4-queens puzzle as shown.
Example 2:
Input: n = 1
Output: 1
Constraints:
1 <= n <= 9
"""
class Solution:
def totalNQueens(self, n: int) -> int:
def get_ld(col, row, n):
m = min(col, row)
col, row = col - m, row - m
if row == 0:
return n - col - 1
return n - 1 + row
def get_rd(col, row, n):
m = min(row, n - col - 1)
col, row = col + m, row - m
if row == 0:
return col
return n - 1 + row
def build_board(board, idx, n, cols, lds, rds, ret):
if idx == n:
ret[0] += 1
return
for i in range(n):
if cols[i]:
continue
ld = get_ld(idx, i, n)
if lds[ld]:
continue
rd = get_rd(idx, i, n)
if rds[rd]:
continue
board[idx][i] = 'Q'
cols[i] = True
lds[ld] = True
rds[rd] = True
build_board(board, idx + 1, n, cols, lds, rds, ret)
board[idx][i] = '.'
cols[i] = False
lds[ld] = False
rds[rd] = False
board = [['.' for _ in range(n)] for _ in range(n)]
cols = [False for _ in range(n)]
lds = [False for _ in range(n + n - 1)]
rds = [False for _ in range(n + n - 1)]
ret = [0]
build_board(board, 0, n, cols, lds, rds, ret)
return ret[0] | StarcoderdataPython |
8070834 | from datetime import timezone
from enum import unique
from . import db
from flask_login import UserMixin
from sqlalchemy.sql import func
class Note(db.Model):
id = db.Column(db.Integer, primary_key = True)
text = db.Column(db.String(4096))
date = db.Column(db.DateTime(timezone = True), default = func.now())
user_id = db.Column(db.Integer, db.ForeignKey("user.id"))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key = True)
userName = db.Column(db.String(128), unique = True)
password = db.Column(db.String(128))
notes = db.relationship("Note") | StarcoderdataPython |
303906 | #
# bgNukes.py
#
# v1.3
#
# <NAME> [<EMAIL>]
#
# A script for launching single-core command-line Nuke renderers
# in the background from inside the Nuke UI.
#
# Saves log files of each render instance's output to the same folder
# where the Nuke script lives.
#
# Thanks go to <NAME>. His localRender.py was (and continues
# to be) an excellent reference.
#
#
# edited by <NAME> for PC compatability
# From Python
import os
import subprocess
# From Nuke
import nuke
def launch_nukes(nodes=[]):
"""
Launch single-core command-line Nuke renderers from inside the Nuke UI.
"""
if nuke.root().knob('name').value() == '':
nuke.message('This script is not named. Please save it and try again.')
return
# Select Write nodes.
nodelist = ''
if nodes != []:
nodelist = ','.join([n.name() for n in nodes if "Write" in n.Class()])
start = int(nuke.knob("first_frame"))
end = int(nuke.knob("last_frame"))
instances = nuke.env['numCPUs']
framerange = str(start) + "-" + str(end)
p = nuke.Panel("Launch Nukes")
p.addSingleLineInput("Frames to execute:", framerange)
p.addSingleLineInput("Node(s) to execute:", nodelist)
p.addSingleLineInput("Number of background procs:", instances)
p.addButton("Cancel")
p.addButton("OK")
result = p.show()
if not result: return
framerange = p.value("Frames to execute:")
nodelist = p.value("Node(s) to execute:").replace(' ', '')
inst = int(p.value("Number of background procs:"))
if framerange is None:
return
if inst is None:
return
(scriptpath, scriptname) = os.path.split(nuke.value("root.name"))
flags = "-ixm 1"
if nodelist != '': flags += " -X " + nodelist
r = nuke.FrameRanges()
r.add(framerange)
r.compact()
frame_list = r.toFrameList()
print frame_list
# Create lists of frames to render for each instance.
inst_frame_list = []
for i in range(inst): inst_frame_list.append([])
print inst_frame_list
cnt = 0
for frame in frame_list:
inst_frame_list[cnt].append(str(frame))
cnt += 1
if cnt == inst: cnt = 0
print inst_frame_list
print ">>> launching", inst, "nuke instances"
# Launch each renderer
logs = []
for i in range(inst):
instRange = ' '.join(inst_frame_list[i])
print ">>> frame list for instance", i, "is:", instRange
logFile = "%s/%s_log%02d.log" % (scriptpath, scriptname, i)
logs.append(logFile)
cmd = " ".join([nuke.env['ExecutablePath'], flags, '-F', '"' + instRange + '"', nuke.value("root.name"), '&>', logFile])
print ">>> starting instance %d" % (i, )
print "command: " + cmd
subprocess.Popen(cmd, shell=True)
nuke.message(str(inst) + ' renderers launched in the background.\nLog files: ' + ', '.join(logs))
# Add BG Render to the Render menu
menubar=nuke.menu("Nuke")
m = menubar.findItem('Render')
if not m.findItem('BG Render'):
m.addCommand('-', '')
m.addCommand('BG Render', 'bgNukes.launch_nukes(nuke.selectedNodes())')
| StarcoderdataPython |
3505893 | <gh_stars>0
""" vault encrypt lambda """
from __future__ import print_function
import logging
import json
import boto3
from botocore.exceptions import ClientError
from botocore.vendored import requests
import ansible
from ansible.parsing.vault import VaultLib
from ansible.constants import DEFAULT_VAULT_ID_MATCH
from ansible.parsing.vault import VaultSecret
LOGGER = logging.getLogger()
LOGGER.setLevel(logging.INFO)
DEBUG_MODE = True
if DEBUG_MODE:
LOGGER.setLevel(logging.DEBUG)
# Set directories or ansible tries to set on it's own, which breaks in lambda
ansible.constants.DEFAULT_LOCAL_TMP = '/tmp/ansible'
ansible.constants.DEFAULT_REMOTE_TMP = '/tmp/ansible'
ansible.local_tmp = '/tmp/ansible'
SSM_CLIENT = boto3.client('ssm')
def make_secret(secret):
""" makes a secret"""
return [(DEFAULT_VAULT_ID_MATCH, VaultSecret(secret))]
def get_vault_password(key_name):
""" gets vault password file and reutrns it cleaned"""
response = SSM_CLIENT.get_parameter(
Name=key_name,
WithDecryption=True
)
return response['Parameter']['Value']
def lambda_handler(event, context):
"""Main Lambda function."""
vault_pass = get_vault_password(event['key_name'])
logging.debug("Vault Password: %s", vault_pass)
vault = VaultLib(make_secret(vault_pass))
secret = vault.encrypt(event["secret"])
return secret
def main():
"""Main stub"""
print("Main")
if __name__ == "__main__":
main()
| StarcoderdataPython |
300225 | import os
import argparse
import pandas as pd
ROLLUPS_DIR = 'rollups'
def get_eta(origin, dest):
# NOTE: This only works for nodes which are adjacent; in order for full route eta, we
# would need to either (1) pass in route plan or (2) do to-ultimate-destination calculations
latest_rollup = sorted(os.listdir(ROLLUPS_DIR))[-1]
rollup = pd.read_parquet(f'{ROLLUPS_DIR}/{latest_rollup}')['travel_time']
eta = rollup[(int(origin), int(dest))]
return eta
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-o', '--origin', required=True)
ap.add_argument('-d', '--dest', required=True)
args = ap.parse_args()
origin = args.origin
dest = args.dest
print(f'ETA from {origin} to {dest}...')
eta = get_eta(origin, dest)
print(f'ETA: {eta}')
| StarcoderdataPython |
4837464 | import argparse
import sys
import time
import numpy as np
import pyqtgraph as pg
from sensapex import UMP
from sensapex.sensapex import LIBUM_DEF_BCAST_ADDRESS
from sensapex.utils import bytes_str
parser = argparse.ArgumentParser(
description="Test for sensapex devices; perform a series of random moves while rapidly polling the device position and state."
)
parser.add_argument("device", type=int, help="Device ID to test")
parser.add_argument(
"--library-path", "-l", type=str, dest="library_path", default=None, help="Folder containing the umsdk library"
)
parser.add_argument("--address", "-a", type=bytes_str, default=LIBUM_DEF_BCAST_ADDRESS, help="Device network address")
parser.add_argument("--debug", "-d", action="store_true", help="Turn on debug logging")
parser.add_argument("--group", "-g", type=int, default=0, help="Device group number")
parser.add_argument(
"--x", action="store_true", default=False, dest="x", help="True = Random X axis values. False = keep start position"
)
parser.add_argument(
"--y", action="store_true", default=False, dest="y", help="True = Random Y axis values. False = keep start position"
)
parser.add_argument(
"--z", action="store_true", default=False, dest="z", help="True = Random Z axis values. False = keep start position"
)
parser.add_argument("--speed", type=int, default=1000, help="Movement speed in um/sec")
parser.add_argument(
"--distance", type=int, default=10, help="Max distance to travel in um (relative to current position)"
)
parser.add_argument("--iter", type=int, default=10, help="Number of positions to test")
parser.add_argument("--acceleration", type=int, default=0, help="Max speed acceleration")
parser.add_argument(
"--high-res",
action="store_true",
default=False,
dest="high_res",
help="Use high-resolution time sampling rather than poller's schedule",
)
parser.add_argument(
"--start-pos",
type=str,
default=None,
dest="start_pos",
help="x,y,z starting position (by default, the current position is used)",
)
parser.add_argument(
"--test-pos",
type=str,
default=None,
dest="test_pos",
help="x,y,z position to test (by default, random steps from the starting position are used)",
)
args = parser.parse_args()
UMP.set_library_path(args.library_path)
ump = UMP.get_ump(address=args.address, group=args.group)
if args.debug:
try:
ump.set_debug_mode(True)
except Exception as e:
print(f"Could not enable Sensapex debug mode: {e}")
time.sleep(2)
devids = ump.list_devices()
devs = {i: ump.get_device(i) for i in devids}
print("SDK version:", ump.sdk_version())
print("Found device IDs:", devids)
dev = devs[args.device]
app = pg.mkQApp()
win = pg.GraphicsLayoutWidget()
win.show()
plots = [
win.addPlot(labels={"left": ("x position", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("y position", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("z position", "m"), "bottom": ("time", "s")}),
]
plots[1].setYLink(plots[0])
plots[2].setYLink(plots[0])
plots[1].setXLink(plots[0])
plots[2].setXLink(plots[0])
win.nextRow()
errplots = [
win.addPlot(labels={"left": ("x error", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("y error", "m"), "bottom": ("time", "s")}),
win.addPlot(labels={"left": ("z error", "m"), "bottom": ("time", "s")}),
]
errplots[1].setYLink(errplots[0])
errplots[2].setYLink(errplots[0])
errplots[0].setXLink(plots[0])
errplots[1].setXLink(plots[0])
errplots[2].setXLink(plots[0])
start = pg.ptime.time()
pos = [[], [], []]
tgt = [[], [], []]
err = [[], [], []]
bus = []
mov = []
times = []
lastupdate = pg.ptime.time()
def update(update_error=False):
global lastupdate
timeout = -1 if args.high_res else 0
p = dev.get_pos(timeout=timeout)
s = dev.is_busy()
m = not move_req.finished
bus.append(int(s))
mov.append(int(m))
now = pg.ptime.time() - start
times.append(now)
for i in range(3):
pos[i].append((p[i] - start_pos[i]) * 1e-6)
tgt[i].append((target[i] - start_pos[i]) * 1e-6)
if update_error:
err[i].append(pos[i][-1] - tgt[i][-1])
else:
err[i].append(np.nan)
def update_plots():
for i in range(3):
plots[i].clear()
plots[i].addItem(
pg.PlotCurveItem(times, bus[:-1], stepMode=True, pen=None, brush=(0, 255, 0, 40), fillLevel=0),
ignoreBounds=True,
)
plots[i].addItem(
pg.PlotCurveItem(times, mov[:-1], stepMode=True, pen=None, brush=(255, 0, 0, 40), fillLevel=0),
ignoreBounds=True,
)
plots[i].plot(times, tgt[i], pen="r")
plots[i].plot(times, pos[i], symbol="o", symbolSize=5)
errplots[i].plot(times, err[i], clear=True, connect="finite")
if args.start_pos is None:
start_pos = dev.get_pos()
else:
start_pos = np.array(list(map(float, args.start_pos.split(","))))
print(start_pos)
diffs = []
errs = []
positions = []
if args.test_pos is None:
xmoves = []
ymoves = []
zmoves = []
if args.x:
xmoves = (np.random.random(size=(args.iter, 1)) * args.distance).astype(int)
else:
xmoves = np.zeros(args.iter)
if args.y:
ymoves = (np.random.random(size=(args.iter, 1)) * args.distance).astype(int)
else:
ymoves = np.zeros(args.iter)
if args.z:
zmoves = (np.random.random(size=(args.iter, 1)) * args.distance).astype(int)
else:
zmoves = np.zeros(args.iter)
moves = np.column_stack((xmoves, ymoves, zmoves))
# moves = (np.random.random(size=(args.iter, 3)) * args.distance*1000).astype(int)
targets = np.array(start_pos)[np.newaxis, :] + moves
print(moves)
print(targets)
else:
# just move back and forth between start and test position
test_pos = np.array(list(map(float, args.test_pos.split(","))))
targets = np.zeros((args.iter, 3))
targets[::2] = start_pos[None, :]
targets[1::2] = test_pos[None, :]
speeds = [args.speed] * args.iter
# targets = np.array([[15431718, 7349832, 17269820], [15432068, 7349816, 17249852]] * 5)
# speeds = [100, 2] * args.iter
# targets = np.array([[13073580, 13482162, 17228380], [9280157.0, 9121206.0, 12198605.]] * 5)
# speeds = [1000] * args.iter
# targets = np.array([[9335078, 10085446, 12197238], [14793665.0, 11658668.0, 17168934.]] * 5)
# speeds = [1000] * args.iter
dev.stop()
for i in range(args.iter):
target = targets[i]
move_req = dev.goto_pos(target, speed=speeds[i], linear=False, max_acceleration=args.acceleration)
while not move_req.finished:
update(update_error=False)
time.sleep(0.002)
waitstart = pg.ptime.time()
while pg.ptime.time() - waitstart < 1.0:
update(update_error=True)
time.sleep(0.002)
# time.sleep(0.05)
p2 = dev.get_pos(timeout=200)
positions.append(p2)
diff = (p2 - target) * 1e-6
diffs.append(diff)
errs.append(np.linalg.norm(diff))
print(i, diff, errs[-1])
update_plots()
dev.goto_pos(start_pos, args.speed)
print("mean:", np.mean(errs), " max:", np.max(errs))
if sys.flags.interactive == 0:
app.exec_()
| StarcoderdataPython |
3446817 | <reponame>Darkhunter9/python
from math import ceil, floor
import re
def text_formatting(text: str, width: int, style: str) -> str:
words = text.split(' ')
lines = []
line = ''
while words:
while words and (len(line) <= width - len(words[0]) - 1 or not line):
if not line:
line += words.pop(0)
else:
line += ' '+words.pop(0)
lines.append(line)
line = ''
for i in range(len(lines)):
# if style == 'l' and i != len(lines)-1:
# lines[i] = lines[i] + ' '*(width-len(lines[i]))
if style == 'r':
lines[i] = ' '*(width-len(lines[i])) + lines[i]
elif style == 'c':
lines[i] = ' '*floor((width-len(lines[i]))/2) + lines[i]
elif style == 'j' and i != len(lines)-1:
space = width-len(lines[i])
a = space//lines[i].count(' ')
b = space%lines[i].count(' ')
lines[i] = lines[i].replace(' ', ' '*(a+1))
lines[i] = lines[i].replace(' '*(a+1), ' '*(a+2), b)
# print ('\n'.join(lines))
return '\n'.join(lines)
if __name__ == '__main__':
text_formatting("Oh, my sweet summer child, what do you know of fear? Fear is for the winter, my little lord, when the snows fall a hundred feet deep and the ice wind comes howling out of the north. Fear is for the long night, when the sun hides its face for years at a time, and little children are born and live and die all in darkness while the direwolves grow gaunt and hungry, and the white walkers move through the woods.",10,"c")
LINE = ('Lorem ipsum dolor sit amet, consectetur adipisicing elit. Iure '
'harum suscipit aperiam aliquam ad, perferendis ex molestias '
'reiciendis accusantium quos, tempore sunt quod veniam, eveniet '
'et necessitatibus mollitia. Quasi, culpa.')
print('Example:')
print(text_formatting(LINE, 38, 'l'))
assert text_formatting(LINE, 38, 'l') == \
'''Lorem ipsum dolor sit amet,
consectetur adipisicing elit. Iure
harum suscipit aperiam aliquam ad,
perferendis ex molestias reiciendis
accusantium quos, tempore sunt quod
veniam, eveniet et necessitatibus
mollitia. Quasi, culpa.''', 'Left 38'
assert text_formatting(LINE, 30, 'c') == \
''' Lorem ipsum dolor sit amet,
consectetur adipisicing elit.
Iure harum suscipit aperiam
aliquam ad, perferendis ex
molestias reiciendis
accusantium quos, tempore sunt
quod veniam, eveniet et
necessitatibus mollitia.
Quasi, culpa.''', 'Center 30'
assert text_formatting(LINE, 50, 'r') == \
''' Lorem ipsum dolor sit amet, consectetur
adipisicing elit. Iure harum suscipit aperiam
aliquam ad, perferendis ex molestias reiciendis
accusantium quos, tempore sunt quod veniam,
eveniet et necessitatibus mollitia. Quasi, culpa.''', 'Right 50'
assert text_formatting(LINE, 45, 'j') == \
'''Lorem ipsum dolor sit amet, consectetur
adipisicing elit. Iure harum suscipit aperiam
aliquam ad, perferendis ex molestias
reiciendis accusantium quos, tempore sunt
quod veniam, eveniet et necessitatibus
mollitia. Quasi, culpa.''', 'Justify 45' | StarcoderdataPython |
1896324 | <reponame>davidxiao93/Advent-of-Code
input = """+11
+14
+10
-8
-13
-2
+8
+14
-11
-4
+2
-17
-15
-12
-15
-16
-15
-13
-15
-4
-9
-4
-9
-12
+19
+1
+10
+15
-6
+15
+15
+19
-8
+16
+2
+12
+10
+20
-15
+16
-3
-2
+1
+8
+5
-6
+8
-22
-14
+13
-12
-6
+11
-1
+21
+11
+4
-6
-19
-27
-14
+8
-5
+1
-6
-16
-10
+13
-14
-4
-9
-5
+17
-4
-17
+2
+8
+14
+13
+5
+2
+3
-13
-18
-8
+12
+19
+14
-3
-15
+17
+14
-4
+1
+10
-13
+7
-21
+18
+4
+11
+7
+8
+14
+17
-6
+17
+7
+10
-14
+9
+15
+13
+6
+11
-8
+16
+18
-8
+6
+16
+1
+14
-1
-16
-6
+15
-4
+5
+9
+17
-18
+3
+14
-10
+21
-15
+14
+6
+18
-11
+4
-9
+17
+12
+15
+1
-17
+19
+3
+19
-14
-9
+7
+10
+3
+13
+2
-14
+16
+11
-3
+10
-13
+14
-2
-14
+18
-8
-19
+18
-6
+21
+4
+5
-4
+18
+16
-6
-18
-12
+17
-19
+13
+13
+9
+2
-3
+16
+2
-7
-6
+29
+14
+13
-1
+34
+19
-13
+23
+10
-2
-6
+10
-3
+18
-9
+2
+10
+17
-18
-14
+2
+6
-4
+12
-6
+11
+5
-7
+10
+4
+17
+15
-8
+7
-3
-2
+13
+22
+10
-16
+4
+15
-1
-3
+13
-8
-9
-3
+4
+5
+17
+10
+18
+11
+9
+1
+12
+11
+7
-8
-17
-19
+1
+9
-5
+19
-3
+17
+8
-23
-1
-4
-2
-17
-13
-14
+16
-18
-14
+12
+5
+4
-14
+22
-1
-15
+18
+10
-2
-13
-18
+2
-8
+3
-8
-17
-12
-8
+16
+6
+19
+39
+15
-19
-12
-4
+5
+17
+32
+4
-2
+15
-12
-41
+8
+18
-17
+24
+15
+13
+13
-14
+15
+14
-16
+4
-38
-59
-22
-35
+12
-8
-22
-14
+3
+6
-8
+5
-17
-18
-14
+10
+21
+8
-11
-4
+2
+9
+5
+14
+17
+7
+13
-23
-5
-13
+9
-18
-16
+17
+6
-3
-7
+26
-8
-48
-16
-2
-10
-21
+14
+9
-5
+8
-14
-3
-4
+3
-7
+10
-1
+12
+6
-13
+19
+9
+5
-29
+19
-6
-8
-21
-23
-25
-9
+5
+19
-2
-8
+100
+61
+1
+18
+108
-60
+291
-13
+118
-1098
-56117
+14
-15
-4
-2
-12
-18
+3
+5
+15
-10
+18
+12
-19
-7
-21
+16
-7
+15
+5
+3
-14
+7
+19
+10
+9
+12
+16
-10
-7
-2
+7
-18
-3
+11
+14
-17
-7
+6
-15
-11
+18
+14
-18
-7
+16
-6
-5
+22
+12
-8
+11
+5
+18
-5
-19
+8
+14
-19
+7
-3
-14
-4
-6
-6
+18
+7
+10
+23
-18
-8
+9
+8
+8
+4
+17
-16
+10
-9
-9
+17
-5
-10
+18
+9
+17
+12
-3
+13
+3
+16
-14
-1
-15
-4
+17
-4
+19
-4
+17
+20
+22
-43
+18
-9
+14
-12
-16
-16
-2
-11
+19
-13
+22
+2
+16
+17
-9
-23
+7
-6
+10
+20
+14
+3
-4
-48
-1
+14
-28
-16
-12
+4
+14
-20
-1
+4
-18
-17
+10
+14
+6
-2
+19
+4
+3
-27
-13
+8
-3
+2
+18
-30
+6
-22
+4
+11
+18
+17
-20
-17
+65
+20
+9
-6
-93
-22
+2
-10
-71
+17
-15
-10
-4
-6
-8
-14
-3
+13
-2
+12
-9
-12
+8
+47
-8
-23
-21
-23
-12
-8
-5
-17
-30
-25
-11
+6
-17
-13
+11
-14
-9
+17
+17
+10
-17
-8
-15
+2
+18
-6
-29
+1
-17
+19
+19
+22
+5
-1
+19
+13
+15
-5
-19
+6
-13
+15
+3
-23
-46
-16
-19
-16
-11
+4
-21
+16
+13
-20
-16
-1
-14
-6
-16
+9
-18
+19
+2
+15
+15
-16
-18
-15
+8
+6
-11
-11
-14
-18
+26
+15
-34
+17
-88
+1
-10
-8
+16
-10
-19
+10
-13
-10
-16
-16
+10
+16
+20
+15
-20
+12
-15
-10
-1
+7
+5
-16
-14
-18
+14
-1
-18
+16
-12
-5
+14
+12
+2
-10
-15
+10
-11
-5
-19
-4
+16
-18
+12
-3
-16
-2
+16
+16
-12
-14
-14
+5
+6
-20
+15
+14
-16
-17
-1
+10
-17
+6
-19
+12
-35
-10
+8
+111
+14
-11
-17
-11
-12
+1
+23
+15
+7
-8
+4
+24
-9
+19
+19
+13
-1
+3
+17
+5
-16
-17
-6
-5
+14
-24
+14
-12
-18
+1
+12
-16
+11
-5
+24
+15
+9
-17
+9
-8
+4
-16
-4
-14
-14
-12
-24
+9
-19
+159
-27
+11
+12
+11
+3
+16
-25
-23
+10
-24
+4
-70
-4
-32
-13
+103
+162
+41
-401
-56221
+8
+19
-3
-11
-6
-21
-7
+16
+4
-10
-8
-17
-10
+16
+8
+10
+10
+5
+12
+27
-2
-9
+12
-6
+13
+23
-54
-10
-4
+13
+21
-65
-14
+2
+13
-19
-11
+10
+10
-2
-4
+8
-15
-20
+2
-6
-11
-6
+7
+17
+13
+2
-7
-10
-18
-17
-16
+4
+4
+11
+8
+15
-8
+10
+20
-10
-17
+2
-19
+20
-17
+5
-2
-16
+14
-19
-15
+2
+17
-2
-11
-12
+7
-20
-19
+13
-17
+18
-17
+9
-2
+7
-6
+11
-9
+3
-19
-12
+15
+4
+10
+4
-11
+17
+19
+9
-19
+1
-19
+11
+21
-17
-9
+11
-3
+2
+8
+14
+18
-2
-1
-12
+10
-18
-8
-5
+14
+23
-7
-28
+18
+5
-19
+10
+16
+19
+13
+17
+14
+11
+7
+1
+5
-11
-3
+15
+4
-34
+8
+5
+7
-35
-9
-17
-5
+13
+11
+11
-12
-27
+1
+21
-2
+14
+3
+17
-6
+45
-16
+24
+91
+20
+23
-2
+9
+7
+12
+6
+9
+7
-12
-13
-15
-2
+14
-11
-4
-10
+17
-1
-14
+21
+11
+1
+13
-5
+14
+16
-10
+4
+17
+5
-1
-3
-7
-13
-7
-19
+6
+24
+5
-6
+9
-11
-14
+19
-15
+19
+12
+19
-1
-14
+2
-1
-16
+10
-5
-32
+13
-3
-15
+113294"""
seen_frequencies = { 0 }
current_frequency = 0
current_instruction = 0
instructions = input.splitlines()
seen = False
duplicate_frequency = None
while not seen:
instruction = instructions[current_instruction]
current_frequency += int(instruction)
if current_frequency in seen_frequencies:
seen = True
duplicate_frequency = current_frequency
break
seen_frequencies.add(current_frequency)
current_instruction = (current_instruction + 1) % len(instructions)
print(duplicate_frequency) | StarcoderdataPython |
1993133 | <reponame>LucasHelal/data-science
import numpy as np
# Create an array
arr = np.arange(5)
# Saving array on disk in binary format (file extension .npy)
np.save('my_array', arr)
# Change arr
arr = np.arange(10)
# Lets see the original saved copy
np.load('my_array.npy')
# Saving multiple arrays into a zip file
np.savez('two_arrays.npz', x=arr, y=arr)
# Now loading multiple arrays
archive_array = np.load('two_arrays.npz')
# Show
archive_array['x']
# Now saving and loading text files
arr = np.array([[1, 2, 3], [4, 5, 6]])
np.savetxt('my_test_text.txt', arr, delimiter=',')
# Loading text files
arr = np.loadtxt('my_test_text.txt', delimiter=',')
| StarcoderdataPython |
8145453 | from setuptools import setup, find_packages
import codecs
import os
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, "README.md"), encoding="utf-8") as fh:
long_description = "\n" + fh.read()
VERSION = '1.0.6'
DESCRIPTION = 'Python Chemical Thermodynamics for Process Modeling (PyCTPM)'
LONG_DESCRIPTION = 'Python Chemical Thermodynamics for Process Modeling (PyCTPM) is an open-source package which can be used to estimate thermodynamic properties in a typical process modeling'
# Setting up
setup(
name="PyCTPM",
version=VERSION,
author="<NAME>",
author_email="<<EMAIL>>",
description=DESCRIPTION,
long_description_content_type="text/markdown",
long_description=long_description,
packages=find_packages(exclude=['tests', '*.tests', '*.tests.*']),
license='MIT',
include_package_data=True,
install_requires=['numpy',
'scipy', 'matplotlib', 'pandas'],
keywords=['python', 'chemical engineering', 'thermodynamics',
'process modeling', 'process simulation'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Education",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| StarcoderdataPython |
8006904 | <gh_stars>100-1000
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from typing import cast
from datadog_checks.base import OpenMetricsBaseCheck
from .config import Config
from .types import Instance
class AzureIoTEdgeCheck(OpenMetricsBaseCheck):
__NAMESPACE__ = 'azure.iot_edge' # Child of `azure.` namespace.
def __init__(self, name, init_config, instances):
self._config = Config(cast(Instance, instances[0]))
super(AzureIoTEdgeCheck, self).__init__(name, init_config, self._config.prometheus_instances)
def check(self, _):
for instance in self._config.prometheus_instances:
scraper_config = self.get_scraper_config(instance)
self.process(scraper_config)
| StarcoderdataPython |
352492 | <reponame>pwessels-uhh/sarkas<filename>sarkas/time_evolution/integrators.py
"""
Module of various types of time_evolution
"""
import numpy as np
from numba import njit
from IPython import get_ipython
if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
from tqdm import tqdm_notebook as tqdm
else:
from tqdm import tqdm
# import fmm3dpy as fmm
# from sarkas.potentials import force_pm, force_pp
class Integrator:
"""
Class used to assign integrator type.
Attributes
----------
dt: float
Timestep.
equilibration_steps: int
Total number of equilibration timesteps.
eq_dump_step: int
Equilibration dump interval.
kB: float
Boltzmann constant.
magnetized: bool
Magnetized simulation flag.
production_steps: int
Total number of production timesteps.
prod_dump_step: int
Production dump interval.
species_num: numpy.ndarray
Number of particles of each species. copy of ``parameters.species_num``.
box_lengths: numpy.ndarray
Length of each box side.
pbox_lengths: numpy.ndarray
Initial particle box sides' lengths.
verbose: bool
Verbose output flag.
type: str
Integrator type.
update: func
Integrator choice. 'verlet', 'verlet_langevin', 'magnetic_verlet' or 'magnetic_boris'.
update_accelerations: func
Link to the correct potential update function.
thermostate: func
Link to the correct thermostat function.
enforce_bc: func
Link to the function enforcing boundary conditions. 'periodic' or 'absorbing'.
"""
def __init__(self):
self.type = None
self.dt = None
self.kB = None
self.magnetized = False
self.electrostatic_equilibration = False
self.production_steps = None
self.equilibration_steps = None
self.magnetization_steps = None
self.prod_dump_step = None
self.eq_dump_step = None
self.mag_dump_steps = None
self.update = None
self.species_num = None
self.box_lengths = None
self.pbox_lengths = None
self.boundary_conditions = None
self.enforce_bc = None
self.verbose = False
self.supported_boundary_conditions = ['periodic', 'absorbing']
self.supported_integrators = ['verlet', 'verlet_langevin', 'magnetic_verlet', 'magnetic_boris']
# def __repr__(self):
# sortedDict = dict(sorted(self.__dict__.items(), key=lambda x: x[0].lower()))
# disp = 'Integrator( \n'
# for key, value in sortedDict.items():
# disp += "\t{} : {}\n".format(key, value)
# disp += ')'
# return disp
def from_dict(self, input_dict: dict):
"""
Update attributes from input dictionary.
Parameters
----------
input_dict: dict
Dictionary to be copied.
"""
self.__dict__.update(input_dict)
def setup(self, params, thermostat, potential):
"""
Assign attributes from simulation's parameters and classes.
Parameters
----------
params: sarkas.core.parameters
Parameters class.
thermostat: sarkas.time_evolution.thermostat
Thermostat class
potential: sarkas.potentials.core.Potential
Potential class.
"""
self.box_lengths = np.copy(params.box_lengths)
self.pbox_lengths = np.copy(params.pbox_lengths)
self.kB = params.kB
self.species_num = np.copy(params.species_num)
self.boundary_conditions = params.boundary_conditions
self.verbose = params.verbose
if self.dt is None:
self.dt = params.dt
if self.production_steps is None:
self.production_steps = params.production_steps
if self.equilibration_steps is None:
self.equilibration_steps = params.equilibration_steps
if self.prod_dump_step is None:
if hasattr(params, 'prod_dump_step'):
self.prod_dump_step = params.prod_dump_step
else:
self.prod_dump_step = int(0.1 * self.production_steps)
if self.eq_dump_step is None:
if hasattr(params, 'eq_dump_step'):
self.eq_dump_step = params.eq_dump_step
else:
self.eq_dump_step = int(0.1 * self.equilibration_steps)
assert self.boundary_conditions.lower() in self.supported_boundary_conditions, 'Wrong choice of boundary condition.'
# Assign integrator.enforce_bc to the correct method
if self.boundary_conditions.lower() == "periodic":
self.enforce_bc = self.periodic
elif self.boundary_conditions.lower() == "absorbing":
self.enforce_bc = self.absorbing
assert self.type.lower() in self.supported_integrators, 'Wrong integrator choice.'
# Assign integrator.update to the correct method
if self.type.lower() == "verlet":
self.update = self.verlet
elif self.type.lower() == "verlet_langevin":
self.sigma = np.sqrt(
2. * self.langevin_gamma * params.kB * params.species_temperatures / params.species_masses)
self.c1 = (1. - 0.5 * self.langevin_gamma * self.dt)
self.c2 = 1. / (1. + 0.5 * self.langevin_gamma * self.dt)
self.update = self.verlet_langevin
elif self.type.lower() == "magnetic_verlet":
# Create the unit vector of the magnetic field
self.magnetic_field_uvector = params.magnetic_field / np.linalg.norm(params.magnetic_field)
self.omega_c = np.zeros((params.total_num_ptcls, params.dimensions))
sp_start = 0
sp_end = 0
for ic, sp_np in enumerate(params.species_num):
sp_end += sp_np
self.omega_c[sp_start: sp_end, :] = params.species_cyclotron_frequencies[ic]
sp_start += sp_np
# Calculate functions for magnetic integrator
# This could be used when the generalization to Forest-Ruth and MacLachlan algorithms will be implemented
# In a magnetic Velocity-Verlet the coefficient is 1/2, see eq.~(78) in :cite:`Chin2008`
self.magnetic_helpers(0.5)
# array to temporary store velocities
# Luciano: I have the vague doubt that allocating memory for these arrays is faster than calculating them
# each time step
self.v_B = np.zeros((params.total_num_ptcls, params.dimensions))
self.v_F = np.zeros((params.total_num_ptcls, params.dimensions))
if np.dot(self.magnetic_field_uvector, np.array([0.0, 0.0, 1.0])) == 1.0:
self.update = self.magnetic_verlet_zdir
else:
self.update = self.magnetic_verlet
elif self.type.lower() == "magnetic_boris":
# Create the unit vector of the magnetic field
self.magnetic_field_uvector = params.magnetic_field / np.linalg.norm(params.magnetic_field)
self.omega_c = np.zeros((params.total_num_ptcls, params.dimensions))
sp_start = 0
sp_end = 0
for ic, sp_np in enumerate(params.species_num):
sp_end += sp_np
self.omega_c[sp_start: sp_end, :] = params.species_cyclotron_frequencies[ic]
sp_start += sp_np
# In a leapfrog-type algorithm the coefficient is different for the acceleration and magnetic rotation
# see eq.~(79) in :cite:`Chin2008`
# self.magnetic_helpers(1.0)
if np.dot(self.magnetic_field_uvector, np.array([0.0, 0.0, 1.0])) == 1.0:
self.update = self.magnetic_boris_zdir
else:
self.update = self.magnetic_boris
# array to temporary store velocities
# Luciano: I have the vague doubt that allocating memory for these arrays is faster than calculating them
# each time step
self.v_B = np.zeros((params.total_num_ptcls, params.dimensions))
self.v_F = np.zeros((params.total_num_ptcls, params.dimensions))
if params.magnetized:
self.magnetized = True
if self.electrostatic_equilibration:
params.electrostatic_equilibration = True
self.magnetic_integrator = self.update
self.update = self.verlet
if self.magnetization_steps is None:
self.magnetization_steps = params.magnetization_steps
if self.prod_dump_step is None:
if hasattr(params, 'mag_dump_step'):
self.mag_dump_step = params.mag_dump_step
else:
self.mag_dump_step = int(0.1 * self.production_steps)
if not potential.method == 'FMM':
if potential.pppm_on:
self.update_accelerations = potential.update_pppm
else:
if potential.linked_list_on:
self.update_accelerations = potential.update_linked_list
else:
self.update_accelerations = potential.update_brute
else:
self.update_accelerations = potential.update_fmm
self.thermostate = thermostat.update
def equilibrate(self, it_start, ptcls, checkpoint):
"""
Loop over the equilibration steps.
Parameters
----------
it_start: int
Initial step of equilibration.
ptcls: sarkas.core.Particles
Particles' class.
checkpoint: sarkas.utilities.InputOutput
IO class for saving dumps.
"""
for it in tqdm(range(it_start, self.equilibration_steps), disable=not self.verbose):
# Calculate the Potential energy and update particles' data
self.update(ptcls)
if (it + 1) % self.eq_dump_step == 0:
checkpoint.dump('equilibration', ptcls, it + 1)
self.thermostate(ptcls, it)
ptcls.remove_drift()
def magnetize(self, it_start, ptcls, checkpoint):
self.update = self.magnetic_integrator
for it in tqdm(range(it_start, self.magnetization_steps), disable=not self.verbose):
# Calculate the Potential energy and update particles' data
self.update(ptcls)
if (it + 1) % self.mag_dump_step == 0:
checkpoint.dump('magnetization', ptcls, it + 1)
self.thermostate(ptcls, it)
def produce(self, it_start, ptcls, checkpoint):
"""
Loop over the production steps.
Parameters
----------
it_start: int
Initial step of production phase.
ptcls: sarkas.core.Particles
Particles' class.
checkpoint: sarkas.utilities.InputOutput
IO class for saving dumps.
"""
for it in tqdm(range(it_start, self.production_steps), disable=(not self.verbose)):
# Move the particles and calculate the potential
self.update(ptcls)
if (it + 1) % self.prod_dump_step == 0:
# Save particles' data for restart
checkpoint.dump('production', ptcls, it + 1)
def verlet_langevin(self, ptcls):
"""
Update particles class using the velocity verlet algorithm and Langevin damping.
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
"""
beta = ptcls.gaussian(0., 1., ptcls.pos.shape[0])
sp_start = 0 # start index for species loop
sp_end = 0
for ic, num in enumerate(self.species_num):
sp_end += num
ptcls.pos[sp_start:sp_end, :] += self.c1 * self.dt * ptcls.vel[sp_start:sp_end, :] \
+ 0.5 * self.dt ** 2 * ptcls.acc[sp_start:sp_end, :] \
+ 0.5 * self.sigma[ic] * self.dt ** 1.5 * beta
# Enforce boundary condition
self.enforce_bc(ptcls)
acc_old = np.copy(ptcls.acc)
self.update_accelerations(ptcls)
sp_start = 0
sp_end = 0
for ic, num in enumerate(self.species_num):
sp_end += num
ptcls.vel[sp_start:sp_end, :] = self.c1 * self.c2 * ptcls.vel[sp_start:sp_end, :] \
+ 0.5 * self.c2 * self.dt * (ptcls.acc[sp_start:sp_end, :]
+ acc_old[sp_start:sp_end, :]) \
+ self.c2 * self.sigma[ic] * np.sqrt(self.dt) * beta
sp_start = sp_end
def verlet(self, ptcls):
"""
Update particles' class based on velocity verlet algorithm.
More information can be found here: https://en.wikipedia.org/wiki/Verlet_integration
or on the Sarkas website.
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
"""
# First half step velocity update
ptcls.vel += 0.5 * ptcls.acc * self.dt
# Full step position update
ptcls.pos += ptcls.vel * self.dt
# Enforce boundary condition
self.enforce_bc(ptcls)
# Compute total potential energy and acceleration for second half step velocity update
self.update_accelerations(ptcls)
# Second half step velocity update
ptcls.vel += 0.5 * ptcls.acc * self.dt
def magnetic_helpers(self, coefficient):
"""Calculate the trigonometric functions of the magnetic integrators.
Parameters
----------
coefficient: float
Timestep coefficient.
Notes
-----
This is useful for the Leapfrog magnetic algorithm and future Forest-Ruth and MacLachlan algorithms.
"""
theta = self.omega_c * self.dt * coefficient
self.sdt = np.sin(theta)
self.cdt = np.cos(theta)
self.ccodt = 1.0 - self.cdt
self.ssodt = 1.0 - self.sdt / theta
def magnetic_verlet_zdir(self, ptcls):
"""
Update particles' class based on velocity verlet method in the case of a
constant magnetic field along the :math:`z` axis. For more info see eq. (78) of Ref. :cite:`Chin2008`
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
Returns
-------
potential_energy : float
Total potential energy.
Notes
-----
This integrator is faster than `magnetic_verlet` but valid only for a magnetic field in the :math:`z`-direction.
This is the preferred choice in this case.
"""
# First half step of velocity update
# # Magnetic rotation x - velocity
# (B x v)_x = -v_y, (B x B x v)_x = -v_x
self.v_B[:, 0] = ptcls.vel[:, 1] * self.sdt[:, 0] + ptcls.vel[:, 0] * self.cdt[:, 0]
# Magnetic rotation y - velocity
# (B x v)_y = v_x, (B x B x v)_y = -v_y
self.v_B[:, 1] = - ptcls.vel[:, 0] * self.sdt[:, 0] + ptcls.vel[:, 1] * self.cdt[:, 1]
# Magnetic + Const force field x - velocity
# (B x a)_x = -a_y, (B x B x a)_x = -a_x
self.v_F[:, 0] = self.ccodt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1] \
+ self.sdt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0]
# Magnetic + Const force field y - velocity
# (B x a)_y = a_x, (B x B x a)_y = -a_y
self.v_F[:, 1] = - self.ccodt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0] \
+ self.sdt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1]
ptcls.vel[:, 0] = self.v_B[:, 0] + self.v_F[:, 0]
ptcls.vel[:, 1] = self.v_B[:, 1] + self.v_F[:, 1]
ptcls.vel[:, 2] += 0.5 * self.dt * ptcls.acc[:, 2]
# Position update
ptcls.pos += ptcls.vel * self.dt
# Enforce boundary condition
self.enforce_bc(ptcls)
# Compute total potential energy and acceleration for second half step velocity update
potential_energy = self.update_accelerations(ptcls)
# # Magnetic rotation x - velocity
# (B x v)_x = -v_y, (B x B x v)_x = -v_x
self.v_B[:, 0] = ptcls.vel[:, 1] * self.sdt[:, 0] + ptcls.vel[:, 0] * self.cdt[:, 0]
# Magnetic rotation y - velocity
# (B x v)_y = v_x, (B x B x v)_y = -v_y
self.v_B[:, 1] = - ptcls.vel[:, 0] * self.sdt[:, 0] + ptcls.vel[:, 1] * self.cdt[:, 1]
# Magnetic + Const force field x - velocity
# (B x a)_x = -a_y, (B x B x a)_x = -a_x
self.v_F[:, 0] = self.ccodt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1] \
+ self.sdt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0]
# Magnetic + Const force field y - velocity
# (B x a)_y = a_x, (B x B x a)_y = -a_y
self.v_F[:, 1] = - self.ccodt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0] \
+ self.sdt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1]
ptcls.vel[:, 0] = self.v_B[:, 0] + self.v_F[:, 0]
ptcls.vel[:, 1] = self.v_B[:, 1] + self.v_F[:, 1]
ptcls.vel[:, 2] += 0.5 * self.dt * ptcls.acc[:, 2]
return potential_energy
def magnetic_verlet(self, ptcls):
"""
Update particles' class based on velocity verlet method in the case of an arbitrary direction of the
constant magnetic field. For more info see eq. (78) of Ref. :cite:`Chin2008`
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
Returns
-------
potential_energy : float
Total potential energy.
Notes
-----
:cite:`Chin2008` equations are written for a negative charge. This allows him to write
:math:`\dot{\mathbf v} = \omega_c \hat{B} \\times \mathbf v`. In the case of positive charges we will have
:math:`\dot{\mathbf v} = - \omega_c \hat{B} \\times \mathbf v`. Hence the reason of the different signs in the
formulas below compared to Chin's.
Warnings
--------
This integrator is valid for a magnetic field in an arbitrary direction. However, while the integrator works for
an arbitrary direction, methods in `sarkas.tool.observables` work only for a magnetic field in the
:math:`z` - direction. Hence, if you choose to use this integrator remember to change your physical observables.
"""
# Calculate the cross products
b_cross_v = np.cross(self.magnetic_field_uvector, ptcls.vel)
b_cross_b_cross_v = np.cross(self.magnetic_field_uvector, b_cross_v)
b_cross_a = np.cross(self.magnetic_field_uvector, ptcls.acc)
b_cross_b_cross_a = np.cross(self.magnetic_field_uvector, b_cross_a)
# First half step of velocity update
ptcls.vel += - self.sdt * b_cross_v + self.ccodt * b_cross_b_cross_v
ptcls.vel += 0.5 * ptcls.acc * self.dt - self.ccodt / self.omega_c * b_cross_a \
+ 0.5 * self.dt * self.ssodt * b_cross_b_cross_a
# Position update
ptcls.pos += ptcls.vel * self.dt
# Enforce boundary condition
self.enforce_bc(ptcls)
# Compute total potential energy and acceleration for second half step velocity update
potential_energy = self.update_accelerations(ptcls)
# Re-calculate the cross products
b_cross_v = np.cross(self.magnetic_field_uvector, ptcls.vel)
b_cross_b_cross_v = np.cross(self.magnetic_field_uvector, b_cross_v)
b_cross_a = np.cross(self.magnetic_field_uvector, ptcls.acc)
b_cross_b_cross_a = np.cross(self.magnetic_field_uvector, b_cross_a)
# Second half step velocity update
ptcls.vel += - self.sdt * b_cross_v + self.ccodt * b_cross_b_cross_v
ptcls.vel += 0.5 * ptcls.acc * self.dt - self.ccodt / self.omega_c * b_cross_a \
+ 0.5 * self.dt * self.ssodt * b_cross_b_cross_a
return potential_energy
def magnetic_boris_zdir(self, ptcls):
"""
Update particles' class using the Boris algorithm in the case of a
constant magnetic field along the :math:`z` axis. For more info see eqs. (80) - (81) of Ref. :cite:`Chin2008`
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
Returns
-------
potential_energy : float
Total potential energy.
"""
# First half step of velocity update: Apply exp(eV_BF)
# epsilon/2 V_F
self.magnetic_helpers(0.5)
# Magnetic + Const force field x - velocity
# (B x a)_x = -a_y, (B x B x a)_x = -a_x
self.v_F[:, 0] = self.ccodt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1] \
+ self.sdt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0]
# Magnetic + Const force field y - velocity
# (B x a)_y = a_x, (B x B x a)_y = -a_y
self.v_F[:, 1] = - self.ccodt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0] \
+ self.sdt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1]
ptcls.vel[:, 0] += self.v_F[:, 0]
ptcls.vel[:, 1] += self.v_F[:, 1]
ptcls.vel[:, 2] += 0.5 * self.dt * ptcls.acc[:, 2]
# epsilon V_B
self.magnetic_helpers(1.0)
# Magnetic rotation x - velocity
# (B x v)_x = -v_y, (B x B x v)_x = -v_x
self.v_B[:, 0] = ptcls.vel[:, 1] * self.sdt[:, 0] + ptcls.vel[:, 0] * self.cdt[:, 0]
# Magnetic rotation y - velocity
# (B x v)_y = v_x, (B x B x v)_y = -v_y
self.v_B[:, 1] = - ptcls.vel[:, 0] * self.sdt[:, 0] + ptcls.vel[:, 1] * self.cdt[:, 1]
ptcls.vel[:, 0] = np.copy(self.v_B[:, 0])
ptcls.vel[:, 1] = np.copy(self.v_B[:, 1])
# # epsilon/2 V_F
self.magnetic_helpers(0.5)
# Magnetic + Const force field x - velocity
# (B x a)_x = -a_y, (B x B x a)_x = -a_x
self.v_F[:, 0] = self.ccodt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1] \
+ self.sdt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0]
# Magnetic + Const force field y - velocity
# (B x a)_y = a_x, (B x B x a)_y = -a_y
self.v_F[:, 1] = - self.ccodt[:, 0] / self.omega_c[:, 0] * ptcls.acc[:, 0] \
+ self.sdt[:, 1] / self.omega_c[:, 1] * ptcls.acc[:, 1]
ptcls.vel[:, 0] += self.v_F[:, 0]
ptcls.vel[:, 1] += self.v_F[:, 1]
ptcls.vel[:, 2] += 0.5 * self.dt * ptcls.acc[:, 2]
# Full step position update
ptcls.pos += ptcls.vel * self.dt
# Enforce boundary condition
self.enforce_bc(ptcls)
# Compute total potential energy and acceleration for second half step velocity update
potential_energy = self.update_accelerations(ptcls)
return potential_energy
def magnetic_boris(self, ptcls):
"""
Update particles' class using the Boris algorithm in the case of a
constant magnetic field along the :math:`z` axis. For more info see eqs. (80) - (81) of Ref. :cite:`Chin2008`
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
Returns
-------
potential_energy : float
Total potential energy.
"""
# First half step of velocity update: Apply exp(eV_BF)
# epsilon/2 V_F
self.magnetic_helpers(0.5)
b_cross_a = np.cross(self.magnetic_field_uvector, ptcls.acc)
b_cross_b_cross_a = np.cross(self.magnetic_field_uvector, b_cross_a)
ptcls.vel += ptcls.acc * 0.5 * self.dt - self.ccodt/self.omega_c * b_cross_a \
+ 0.5 * self.dt * self.ssodt * b_cross_b_cross_a
# epsilon V_B
self.magnetic_helpers(1.0)
b_cross_v = np.cross(self.magnetic_field_uvector, ptcls.vel)
b_cross_b_cross_v = np.cross(self.magnetic_field_uvector, b_cross_v)
ptcls.vel += - self.sdt * b_cross_v + self.ccodt * b_cross_b_cross_v
# # epsilon/2 V_F
self.magnetic_helpers(0.5)
b_cross_a = np.cross(self.magnetic_field_uvector, ptcls.acc)
b_cross_b_cross_a = np.cross(self.magnetic_field_uvector, b_cross_a)
ptcls.vel += ptcls.acc * 0.5 * self.dt - self.ccodt/self.omega_c * b_cross_a \
+ 0.5 * self.dt * self.ssodt * b_cross_b_cross_a
# Full step position update
ptcls.pos += ptcls.vel * self.dt
# Enforce boundary condition
self.enforce_bc(ptcls)
# Compute total potential energy and acceleration for second half step velocity update
potential_energy = self.update_accelerations(ptcls)
return potential_energy
def periodic(self, ptcls):
"""
Applies periodic boundary conditions by calling enforce_pbc
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
"""
enforce_pbc(ptcls.pos, ptcls.pbc_cntr, self.box_lengths)
def absorbing(self, ptcls):
"""
Applies absorbing boundary conditions by calling enforce_abc
Parameters
----------
ptcls: sarkas.core.Particles
Particles data.
"""
enforce_abc(ptcls.pos, ptcls.vel, ptcls.acc, ptcls.charges, self.box_lengths)
def pretty_print(self, frequency, restart, restart_step):
"""Print integrator attributes in a user friendly way."""
if self.magnetized and self.electrostatic_equilibration:
print("Type: {}".format(self.magnetic_integrator.__name__))
else:
print("Type: {}".format(self.type))
wp_dt = frequency * self.dt
print('Time step = {:.6e} [s]'.format(self.dt))
print('Total plasma frequency = {:.6e} [Hz]'.format(frequency))
print('w_p dt = {:.4f} ~ 1/{}'.format(wp_dt, int(1.0/wp_dt) ))
# if potential_type in ['Yukawa', 'EGS', 'Coulomb', 'Moliere']:
# # if simulation.parameters.magnetized:
# # if simulation.parameters.num_species > 1:
# # high_wc_dt = simulation.parameters.species_cyclotron_frequencies.max() * simulation.integrator.dt
# # low_wc_dt = simulation.parameters.species_cyclotron_frequencies.min() * simulation.integrator.dt
# # print('Highest w_c dt = {:2.4f}'.format(high_wc_dt))
# # print('Smalles w_c dt = {:2.4f}'.format(low_wc_dt))
# # else:
# # high_wc_dt = simulation.parameters.species_cyclotron_frequencies.max() * simulation.integrator.dt
# # print('w_c dt = {:2.4f}'.format(high_wc_dt))
# elif simulation.potential.type == 'QSP':
# print('e plasma frequency = {:.6e} [Hz]'.format(simulation.species[0].plasma_frequency))
# print('ion plasma frequency = {:.6e} [Hz]'.format(simulation.species[1].plasma_frequency))
# print('w_pe dt = {:2.4f}'.format(simulation.integrator.dt * simulation.species[0].plasma_frequency))
# if simulation.parameters.magnetized:
# if simulation.parameters.num_species > 1:
# high_wc_dt = simulation.parameters.species_cyclotron_frequencies.max() * simulation.integrator.dt
# low_wc_dt = simulation.parameters.species_cyclotron_frequencies.min() * simulation.integrator.dt
# print('Electron w_ce dt = {:2.4f}'.format(high_wc_dt))
# print('Ions w_ci dt = {:2.4f}'.format(low_wc_dt))
# else:
# high_wc_dt = simulation.parameters.species_cyclotron_frequencies.max() * simulation.integrator.dt
# print('w_c dt = {:2.4f}'.format(high_wc_dt))
# elif simulation.potential.type == 'LJ':
# print('Total equivalent plasma frequency = {:1.6e} [Hz]'.format(
# simulation.parameters.total_plasma_frequency))
# print('w_p dt = {:2.4f}'.format(wp_dt))
# if simulation.parameters.magnetized:
# if simulation.parameters.num_species > 1:
# high_wc_dt = simulation.parameters.species_cyclotron_frequencies.max() * simulation.integrator.dt
# low_wc_dt = simulation.parameters.species_cyclotron_frequencies.min() * simulation.integrator.dt
# print('Highest w_c dt = {:2.4f}'.format(high_wc_dt))
# print('Smalles w_c dt = {:2.4f}'.format(low_wc_dt))
# else:
# high_wc_dt = simulation.parameters.species_cyclotron_frequencies.max() * simulation.integrator.dt
# print('w_c dt = {:2.4f}'.format(high_wc_dt))
# Print Time steps information
# Check for restart simulations
if restart in ['production_restart', 'prod_restart']:
print("Restart step: {}".format(restart_step))
print('Total production steps = {} \n'
'Total production time = {:.4e} [s] ~ {} w_p T_prod '.format(
self.production_steps,
self.production_steps * self.dt,
int(self.production_steps * wp_dt)))
print('snapshot interval step = {} \n'
'snapshot interval time = {:.4e} [s] = {:.4f} w_p T_snap'.format(
self.prod_dump_step,
self.prod_dump_step * self.dt,
self.prod_dump_step * wp_dt))
print('Total number of snapshots = {} '.format( int(self.production_steps /self.prod_dump_step) ) )
elif restart in ['equilibration_restart', 'eq_restart']:
print("Restart step: {}".format(restart_step))
print('Total equilibration steps = {} \n'
'Total equilibration time = {:.4e} [s] ~ {} w_p T_eq'.format(
self.equilibration_steps,
self.equilibration_steps * self.dt,
int(self.eq_dump_step * wp_dt)))
print('snapshot interval step = {} \n'
'snapshot interval time = {:.4e} [s] = {:.4f} w_p T_snap'.format(
self.eq_dump_step,
self.eq_dump_step * self.dt,
self.eq_dump_step * wp_dt))
print('Total number of snapshots = {} '.format(int(self.equilibration_steps / self.eq_dump_step)))
elif restart in ['magnetization_restart', 'mag_restart']:
print("Restart step: {}".format(restart_step))
print('Total magnetization steps = {} \n'
'Total magnetization time = {:.4e} [s] ~ {} w_p T_mag'.format(
self.magnetization_steps,
self.magnetization_steps * self.dt,
int(self.mag_dump_step * wp_dt)))
print('snapshot interval step = {} \n'
'snapshot interval time = {:.4e} [s] ~ {:.4f} w_p T_snap'.format(
self.mag_dump_step,
self.mag_dump_step * self.dt,
self.mag_dump_step * wp_dt))
print('Total number of snapshots = {} '.format(int(self.magnetization_steps / self.mag_dump_step)))
else:
# Equilibration
print('\nEquilibration: \nNo. of equilibration steps = {} \n'
'Total equilibration time = {:.4e} [s] ~ {} w_p T_eq '.format(
self.equilibration_steps,
self.equilibration_steps * self.dt,
int(self.equilibration_steps * wp_dt)))
print('snapshot interval step = {} \n'
'snapshot interval time = {:.4e} [s] = {:.4f} w_p T_snap'.format(
self.eq_dump_step,
self.eq_dump_step * self.dt,
self.eq_dump_step * wp_dt))
print('Total number of snapshots = {} '.format(int(self.equilibration_steps / self.eq_dump_step)))
# Magnetization
if self.electrostatic_equilibration:
print('Electrostatic Equilibration Type: {}'.format(self.type))
print('\nMagnetization: \nNo. of magnetization steps = {} \n'
'Total magnetization time = {:.4e} [s] ~ {} w_p T_mag '.format(
self.magnetization_steps,
self.magnetization_steps * self.dt,
int(self.magnetization_steps * wp_dt)))
print('snapshot interval step = {} \n'
'snapshot interval time = {:.4e} [s] = {:.4f} w_p T_snap'.format(
self.mag_dump_step,
self.mag_dump_step * self.dt,
self.mag_dump_step * wp_dt))
print('Total number of snapshots = {} '.format(int(self.magnetization_steps / self.mag_dump_step)))
# Production
print('\nProduction: \nNo. of production steps = {} \n'
'Total production time = {:.4e} [s] ~ {} w_p T_prod '.format(
self.production_steps,
self.production_steps * self.dt,
int(self.production_steps * wp_dt)))
print('snapshot interval step = {} \n'
'snapshot interval time = {:.4e} [s] = {:.4f} w_p T_snap'.format(
self.prod_dump_step,
self.prod_dump_step * self.dt,
self.prod_dump_step * wp_dt))
print('Total number of snapshots = {} '.format(int(self.production_steps / self.prod_dump_step)))
@njit
def enforce_pbc(pos, cntr, BoxVector):
"""
Enforce Periodic Boundary conditions.
Parameters
----------
pos: numpy.ndarray
Particles' positions.
cntr: numpy.ndarray
Counter for the number of times each particle get folded back into the main simulation box
BoxVector: numpy.ndarray
Box Dimensions.
"""
# Loop over all particles
for p in np.arange(pos.shape[0]):
for d in np.arange(pos.shape[1]):
# If particle is outside of box in positive direction, wrap to negative side
if pos[p, d] > BoxVector[d]:
pos[p, d] -= BoxVector[d]
cntr[p, d] += 1
# If particle is outside of box in negative direction, wrap to positive side
if pos[p, d] < 0.0:
pos[p, d] += BoxVector[d]
cntr[p, d] -= 1
@njit
def enforce_abc(pos, vel, acc, charges, BoxVector):
"""
Enforce Absorbing Boundary conditions.
Parameters
----------
pos: numpy.ndarray
Particles' positions.
vel : numpy.ndarray
Particles' velocities.
acc : numpy.ndarray
Particles' accelerations.
charges : numpy.ndarray
Charge of each particle. Shape = (``total_num_ptcls``).
BoxVector: numpy.ndarray
Box Dimensions.
"""
# Loop over all particles
for p in np.arange(pos.shape[0]):
for d in np.arange(pos.shape[1]):
# If particle is outside of box in positive direction, remove charge, velocity and acceleration
if pos[p, d] >= BoxVector[d]:
pos[p, d] = BoxVector[d]
vel[p, :] = np.zeros(3)
acc[p, :] = np.zeros(3)
charges[p] = 0.0
# If particle is outside of box in negative direction, remove charge, velocity and acceleration
if pos[p, d] <= 0.0:
pos[p, d] = 0.0
vel[p, :] = np.zeros(3)
acc[p, :] = np.zeros(3)
charges[p] = 0.0
@njit
def remove_drift(vel, nums, masses):
"""
Enforce conservation of total linear momentum. Updates ``particles.vel``
Parameters
----------
vel: numpy.ndarray
Particles' velocities.
nums: numpy.ndarray
Number of particles of each species.
masses: numpy.ndarray
Mass of each species.
"""
P = np.zeros((len(nums), vel.shape[1]))
species_start = 0
for ic in range(len(nums)):
species_end = species_start + nums[ic]
P[ic, :] = np.sum(vel[species_start:species_end, :], axis=0) * masses[ic]
species_start = species_end
if np.sum(P[:, 0]) > 1e-40 or np.sum(P[:, 1]) > 1e-40 or np.sum(P[:, 2]) > 1e-40:
# Remove tot momentum
species_start = 0
for ic in range(len(nums)):
species_end = species_start + nums[ic]
vel[species_start:species_end, :] -= P[ic, :] / (float(nums[ic]) * masses[ic])
species_start = species_end
| StarcoderdataPython |
1633707 | """Platform for Miele button integration."""
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Any, Final
import aiohttp
from homeassistant.components.button import ButtonEntity, ButtonEntityDescription
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from . import get_coordinator
from .const import (
ACT_START,
ACT_STOP,
ACTIONS,
API,
DIALOG_OVEN,
DISHWASHER,
DOMAIN,
HOOD,
MICROWAVE,
OVEN,
OVEN_MICROWAVE,
POWER_OFF,
POWER_ON,
PROCESS_ACTION,
STEAM_OVEN,
STEAM_OVEN_COMBI,
STEAM_OVEN_MICRO,
TUMBLE_DRYER,
WASHER_DRYER,
WASHING_MACHINE,
)
_LOGGER = logging.getLogger(__name__)
@dataclass
class MieleButtonDescription(ButtonEntityDescription):
"""Class describing Miele button entities."""
type_key: str | None = None
press_data: dict[str, Any] | None = None
@dataclass
class MieleButtonDefinition:
"""Class for defining button entities."""
types: tuple[int, ...]
description: MieleButtonDescription = None
BUTTON_TYPES: Final[tuple[MieleButtonDefinition, ...]] = (
MieleButtonDefinition(
types=[
WASHING_MACHINE,
TUMBLE_DRYER,
DISHWASHER,
OVEN,
OVEN_MICROWAVE,
STEAM_OVEN,
MICROWAVE,
WASHER_DRYER,
STEAM_OVEN_COMBI,
STEAM_OVEN_MICRO,
DIALOG_OVEN,
],
description=MieleButtonDescription(
key="start",
type_key="ident|type|value_localized",
name="Start",
press_data={PROCESS_ACTION: ACT_START},
),
),
MieleButtonDefinition(
types=[
WASHING_MACHINE,
TUMBLE_DRYER,
DISHWASHER,
OVEN,
OVEN_MICROWAVE,
STEAM_OVEN,
MICROWAVE,
HOOD,
WASHER_DRYER,
STEAM_OVEN_COMBI,
STEAM_OVEN_MICRO,
DIALOG_OVEN,
],
description=MieleButtonDescription(
key="stop",
type_key="ident|type|value_localized",
name="Stop",
press_data={PROCESS_ACTION: ACT_STOP},
),
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigType,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the button platform."""
coordinator = await get_coordinator(hass, config_entry)
entities = []
for idx, ent in enumerate(coordinator.data):
for definition in BUTTON_TYPES:
if coordinator.data[ent]["ident|type|value_raw"] in definition.types:
entities.append(
MieleButton(
coordinator,
idx,
ent,
definition.description,
hass,
config_entry,
)
)
async_add_entities(entities)
class MieleButton(CoordinatorEntity, ButtonEntity):
"""Representation of a Button."""
entity_description: MieleButtonDescription
def __init__(
self,
coordinator: DataUpdateCoordinator,
idx,
ent,
description: MieleButtonDescription,
hass: HomeAssistant,
entry: ConfigType,
):
"""Initialize the button."""
super().__init__(coordinator)
self._api = hass.data[DOMAIN][entry.entry_id][API]
self._api_data = hass.data[DOMAIN][entry.entry_id]
self._idx = idx
self._ent = ent
self.entity_description = description
_LOGGER.debug("init button %s", ent)
self._attr_name = f"{self.coordinator.data[self._ent][self.entity_description.type_key]} {self.entity_description.name}"
self._attr_unique_id = f"{self.entity_description.key}-{self._ent}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self._ent)},
name=self.coordinator.data[self._ent][self.entity_description.type_key],
manufacturer="Miele",
model=self.coordinator.data[self._ent]["ident|deviceIdentLabel|techType"],
)
def _action_available(self, action) -> bool:
"""Check if action is available according to API."""
# _LOGGER.debug("%s _action_available: %s", self.entity_description.name, action)
if PROCESS_ACTION in action:
value = action.get(PROCESS_ACTION)
action_data = (
self._api_data.get(ACTIONS, {})
.get(self._ent, {})
.get(PROCESS_ACTION, {})
)
return value in action_data
elif POWER_ON in action:
action_data = (
self._api_data.get(ACTIONS, {}).get(self._ent, {}).get(POWER_ON, False)
)
return action_data
elif POWER_OFF in action:
action_data = (
self._api_data.get(ACTIONS, {}).get(self._ent, {}).get(POWER_OFF, False)
)
return action_data
_LOGGER.debug("Action not found: %s", action)
return False
@property
def available(self):
"""Return the availability of the entity."""
if not self.coordinator.last_update_success:
return False
return self.coordinator.data[self._ent][
"state|status|value_raw"
] != 255 and self._action_available(self.entity_description.press_data)
async def async_press(self):
"""Press the button."""
_LOGGER.debug("press: %s", self._attr_name)
if self._action_available(self.entity_description.press_data):
try:
await self._api.send_action(
self._ent, self.entity_description.press_data
)
except aiohttp.ClientResponseError as ex:
_LOGGER.error("Press: %s - %s", ex.status, ex.message)
# TODO Consider removing accepted action from [ACTIONS] to block
# further calls of async_press util API update arrives
else:
_LOGGER.warning(
"Device does not accept this action now: %s / %s",
self._attr_name,
self.entity_description.press_data,
)
| StarcoderdataPython |
9677909 | #!/usr/bin/env python3
# <NAME>
# based off of Tdulcet's 'mprime.exp'
# Python3 exp.py <User ID> <Computer name> <Type of work>
# Note: * pexpect has quirks about nonempty ()s (e.g., (y)), *s, inline ''s, or +s.
import sys
from time import sleep
import subprocess
# Potentially installing dependency
try:
import pexpect
except ImportError as error:
print("Installing pexpect...")
p = subprocess.run('pip install pexpect', shell=True)
import pexpect
# Prerequisites, gained from mprime.py
USERID, COMPUTER, TYPE = sys.argv[1], sys.argv[2], sys.argv[3]
child = pexpect.spawn('./mprime -m') # starts shell to interact with
child.logfile = sys.stdout.buffer # enables output to screen (Python 3)
expects = (("Join Gimps?", "y"), ("Use PrimeNet to get work and report results ()", "y"),
("Your user ID or", USERID), ("Optional computer name", COMPUTER),
("Type of work to get", TYPE), ("Your choice:", "5"))
index = 0
while 1:
try:
if index != len(expects):
child.expect(expects[index][0], timeout=1)
sleep(1)
child.sendline(expects[index][1])
index += 1
else:
child.expect("Done communicating with server.")
child.sendline("\x03")
sleep(10)
child.expect("Choose Test/Continue to restart.")
sleep(1)
child.sendline("5")
child.expect(pexpect.EOF)
break
except pexpect.TIMEOUT:
child.sendline("")
| StarcoderdataPython |
4882661 | <filename>LeetCode-All-Solution/Python3/LC-0784-Letter-Case-Permutation.py<gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0784-Letter-Case-Permutation.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-01-11
=================================================================="""
import sys
import time
from typing import List
"""
LeetCode - 0784 - (Medium) - Letter Case Permutation
https://leetcode.com/problems/letter-case-permutation/
Description & Requirement:
Given a string s, you can transform every letter individually
to be lowercase or uppercase to create another string.
Return a list of all possible strings we could create.
Return the output in any order.
Example 1:
Input: s = "a1b2"
Output: ["a1b2","a1B2","A1b2","A1B2"]
Example 2:
Input: s = "3z4"
Output: ["3z4","3Z4"]
Constraints:
1 <= s.length <= 12
s consists of lowercase English letters, uppercase English letters, and digits.
"""
class Solution:
def letterCasePermutation(self, s: str) -> List[str]:
# exception case
if not isinstance(s, str) or len(s) <= 0 or not s.isalnum():
return [] # Error input type
if s.isdigit(): # only digits
return [s]
# main method: (dfs (needn't backtrack))
return self._letterCasePermutation(s)
def _letterCasePermutation(self, s: str) -> List[str]:
res_permute = [] # stack
len_s = len(s)
def __dfs(cur_permute: str, cur_index: int):
if len(cur_permute) == len_s: # end of recursion
res_permute.append(cur_permute[:])
return
cur_char = s[cur_index]
if cur_char.isdigit(): # if cur_char is a digit, just add it
__dfs(cur_permute + cur_char, cur_index + 1) # dfs
elif cur_char.isalpha(): # if cur_char is an English letter, add its lowercase or uppercase
__dfs(cur_permute + cur_char.lower(), cur_index + 1) # dfs lowercase letter
__dfs(cur_permute + cur_char.upper(), cur_index + 1) # dfs uppercase letter
permute = ""
start_index = 0
__dfs(permute, start_index)
return res_permute
def main():
# Example 1: Output: ["a1b2","a1B2","A1b2","A1B2"]
# s = "a1b2"
# Example 2: Output: ["3z4","3Z4"]
s = "3z4"
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.letterCasePermutation(s)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
1762758 | <gh_stars>1-10
#!/usr/bin/env python
import math
import numpy as np
from tqdm import tqdm
import seaborn as sns
import pandas as pd
from subprocess import call
import matplotlib.pyplot as plt
from pushestpy.utils import utils
plt.rcParams.update({'font.size': 36})
def wrap_to_pi(arr):
arr_wrap = (arr + math.pi) % (2 * math.pi) - math.pi
return arr_wrap
def traj_error(xyh_gt, xyh_est, err_type):
if (err_type == "rmse"):
diff = xyh_gt - xyh_est
diff[:, 2] = wrap_to_pi(diff[:, 2])
diff_sq = diff**2
rmse_trans = np.sqrt(np.mean(diff_sq[:, 0:2].flatten()))
rmse_rot = np.sqrt(np.mean(diff_sq[:, 2].flatten()))
error = (rmse_trans, rmse_rot)
elif (err_type == "ate"):
pass
return error
def compute_traj_errors(cfg):
errors_all = {}
for logger_idx, logger_name in enumerate(cfg.logger.names):
print("Computing errors for logger {0}".format(logger_name))
error_mat = np.zeros((cfg.logger.num_seqs, cfg.logger.num_steps, 2))
for seq_idx in tqdm(range(cfg.logger.num_seqs)):
logger_filename = "{0}/local/outputs/{1}/seq_{2:03d}/{3}.obj".format(
cfg.BASE_PATH, cfg.dataset_names[0], seq_idx, logger_name)
logger = utils.load_pkl_obj(logger_filename)
for tstep in range(cfg.logger.num_steps):
poses_obj_gt = logger.data[tstep][cfg.logger.fields.poses_obj_gt]
poses_obj_graph = logger.data[tstep][cfg.logger.fields.poses_obj_graph]
error_mat[seq_idx, tstep, 0], error_mat[seq_idx, tstep, 1] = traj_error(
poses_obj_gt, poses_obj_graph, err_type="rmse")
errors_all[logger_name] = error_mat
return errors_all
def compute_traj_errors_multi_dataset(cfg):
errors_all = {}
num_seqs_total = sum(cfg.logger.num_seqs)
for logger_idx, logger_name in enumerate(cfg.logger.names):
print("Computing errors for logger {0}".format(logger_name))
error_mat = np.zeros((num_seqs_total, cfg.logger.num_steps, 2))
err_idx = 0
for (ds_idx, dataset) in enumerate(cfg.dataset_names):
for seq_idx in tqdm(range(cfg.logger.num_seqs[ds_idx])):
logger_filename = "{0}/local/outputs/{1}/{2}/seq_{3:03d}/{4}.obj".format(
cfg.BASE_PATH, cfg.logger.dir_prefix, cfg.dataset_names[ds_idx], seq_idx, logger_name)
logger = utils.load_pkl_obj(logger_filename)
for tstep in range(cfg.logger.num_steps):
poses_obj_gt = logger.data[tstep][cfg.logger.fields.poses_obj_gt]
poses_obj_graph = logger.data[tstep][cfg.logger.fields.poses_obj_graph]
error_mat[err_idx, tstep, 0], error_mat[err_idx, tstep, 1] = traj_error(
poses_obj_gt, poses_obj_graph, err_type="rmse")
err_idx = err_idx + 1
errors_all[logger_name] = error_mat
return errors_all
def get_subset(errors, cfg, num_subset):
# remove outliers
errors_logger = errors[cfg.logger.names[2]] # nseq x nsteps x 2
[nseq, nsteps, ndim] = errors_logger.shape
nout = np.minimum(int(0.25 * nseq), nseq-num_subset)
outlier_idxs = np.argsort(-errors_logger[:, -1, 1])[:nout]
for logger_name in cfg.logger.names:
errors[logger_name] = np.delete(errors[logger_name], outlier_idxs, 0)
# choose random subset
subset_idxs = np.random.randint(
len(errors[cfg.logger.names[0]]), size=num_subset)
for logger_name in cfg.logger.names:
errors[logger_name] = errors[logger_name][subset_idxs][0:num_subset, :, :]
return errors
def plot_traj_errors(cfg, errors):
plt.ion()
fig1 = plt.figure(constrained_layout=True, figsize=(12, 8))
fig2 = plt.figure(constrained_layout=True, figsize=(12, 8))
fig3 = plt.figure(constrained_layout=True, figsize=(12, 8))
fig4 = plt.figure(constrained_layout=True, figsize=(12, 8))
# fontsize_label = 22
num_subset = 50
errors = get_subset(errors, cfg, num_subset=num_subset) if (
num_subset < sum(cfg.logger.num_seqs)) else errors
num_seqs = np.minimum(num_subset, sum(cfg.logger.num_seqs))
errors_final_bplot = np.zeros(
(num_seqs, len(cfg.logger.names), 2)) # nseq x nloggers x 2
for logger_idx, logger_name in enumerate(cfg.logger.names):
errors_logger = errors[logger_name] # nseq x nsteps x 2
errors_final_bplot[:, logger_idx, 0] = 1000 * errors_logger[:, -1, 0]
errors_final_bplot[:, logger_idx, 1] = 180 / \
math.pi * (errors_logger[:, -1, 1])
# plotting data
x = np.arange(cfg.logger.num_steps) / float(cfg.logger.freq)
mean_error_time = np.mean(errors_logger, 0)
std_error_time = np.std(errors_logger, 0)
scale_vis_std = 0.3
# convert units
mean_error_time[:, 0] = 1000 * mean_error_time[:, 0]
std_error_time[:, 0] = 1000 * std_error_time[:, 0]
mean_error_time[:, 1] = 180/math.pi * (mean_error_time[:, 1])
std_error_time[:, 1] = 180/math.pi * (std_error_time[:, 1])
# error shade plot (trans)
plt.figure(fig1.number)
plt.semilogy(x, mean_error_time[:, 0], color=cfg.logger.colors[logger_idx],
label=cfg.logger.labels[logger_idx], linewidth=2)
plt.fill_between(x, mean_error_time[:, 0] - scale_vis_std * std_error_time[:, 0],
mean_error_time[:, 0] +
scale_vis_std * std_error_time[:, 0],
color=cfg.logger.colors[logger_idx], alpha=0.2)
plt.xlabel("Time (s)")
plt.ylabel("RMSE translational error (mm)")
# plt.legend(loc='upper left')
plt.ylim((10**-1, 10**2))
plt.savefig("{0}/local/visualizations/quant_{1}_trans_err_shadeplot.png".format(
cfg.BASE_PATH, cfg.obj_sdf_shape))
# error shade plot (rot)
plt.figure(fig2.number)
plt.semilogy(x, mean_error_time[:, 1], color=cfg.logger.colors[logger_idx],
label=cfg.logger.labels[logger_idx], linewidth=2)
plt.fill_between(x, mean_error_time[:, 1] - scale_vis_std * std_error_time[:, 1],
mean_error_time[:, 1] +
scale_vis_std * std_error_time[:, 1],
color=cfg.logger.colors[logger_idx], alpha=0.2)
plt.xlabel("Time (s)")
plt.ylabel("RMSE rotational error (deg)")
# plt.legend(loc='upper left')
plt.ylim((10**-2, 10**2))
plt.savefig("{0}/local/visualizations/quant_{1}_rot_err_shadeplot.png".format(
cfg.BASE_PATH, cfg.obj_sdf_shape))
plt.show()
line_props = dict(color="black", alpha=1.0, linewidth=2)
kwargs = {'vert': True, 'notch': False, 'patch_artist': True,
'medianprops': line_props, 'whiskerprops': line_props}
# error box plot (trans)
plt.figure(fig3.number)
bplot1 = plt.gca().boxplot(
errors_final_bplot[:, :, 0], widths=0.4, labels=cfg.logger.labels, **kwargs)
plt.gca().set_yscale('log')
# plt.title("RMSE translational error (mm)")
plt.ylim((10**-1, 10**2))
for patch, color in zip(bplot1['boxes'], cfg.logger.colors):
color = np.array([color[0], color[1], color[2], 0.5])
patch.set_facecolor(color)
patch.set_linewidth(2)
plt.savefig("{0}/local/visualizations/quant_{1}_trans_err_boxplot.png".format(
cfg.BASE_PATH, cfg.obj_sdf_shape))
# error box plot (rot)
plt.figure(fig4.number)
bplot2 = plt.gca().boxplot(
errors_final_bplot[:, :, 1], widths=0.4, labels=cfg.logger.labels, **kwargs)
plt.gca().set_yscale('log')
# plt.title("RMSE rotational error (deg)")
plt.ylim((10**-2, 10**2))
for patch, color in zip(bplot2['boxes'], cfg.logger.colors):
color = np.array([color[0], color[1], color[2], 0.5])
patch.set_facecolor(color)
patch.set_linewidth(2)
plt.savefig("{0}/local/visualizations/quant_{1}_rot_err_boxplot.png".format(
cfg.BASE_PATH, cfg.obj_sdf_shape))
plt.show(block=True)
def eval_traj_error(cfg):
errors = compute_traj_errors_multi_dataset(cfg)
plot_traj_errors(cfg, errors)
def runtime_plot(cfg):
# learnt model
logger_idx = 2
logger_name = cfg.logger.names[logger_idx]
num_seqs = cfg.logger.num_seqs[0]
runtime_mat = np.zeros((num_seqs, cfg.logger.num_steps)) # nseq x nsteps
for seq_idx in tqdm(range(num_seqs)):
logger_filename = "{0}/local/outputs/runtime/{1}/seq_{2:03d}/{3}.obj".format(
cfg.BASE_PATH, cfg.dataset_names[0], seq_idx, logger_name)
logger = utils.load_pkl_obj(logger_filename)
for tstep in range(cfg.logger.num_steps):
runtime_mat[seq_idx, tstep] = logger.runtime[tstep]
x = np.arange(cfg.logger.num_steps) / float(cfg.logger.freq)
mean_runtimes = np.mean(runtime_mat, 0)
std_runtimes = np.std(runtime_mat, 0)
plt.errorbar(x, mean_runtimes, std_runtimes, linewidth=2,
color=cfg.logger.colors[logger_idx])
plt.ylim(0, 0.05)
plt.xlabel("Time (s)", fontsize=28)
plt.ylabel("Runtime per iteration", fontsize=28)
plt.show(block=True)
| StarcoderdataPython |
322308 | DEPS = [
'archive',
'depot_tools/bot_update',
'chromium',
'chromium_tests',
'chromium_android',
'commit_position',
'file',
'depot_tools/gclient',
'isolate',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
'swarming',
'test_utils',
'trigger',
'depot_tools/tryserver',
]
| StarcoderdataPython |
6459393 | from django.conf import settings
from django.contrib import messages
from django.core.mail import send_mail, mail_admins
from django.urls import reverse
from django.utils import timezone
from .models import *
| StarcoderdataPython |
62634 | #!/usr/bin/python -B
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# asfindex.py - Pelican plugin that runs shell scripts during initialization
#
import sys
import subprocess
import shlex
import io
import os
import os.path
import traceback
import pelican.plugins.signals
import pelican.settings
from pelican.contents import Article, Page, Static
from pelican.generators import (ArticlesGenerator, # noqa: I100
PagesGenerator, SourceFileGenerator,
StaticGenerator, TemplatePagesGenerator)
# get setting
# Settings are for the whole pelican environment.
def get_setting(generators, setting):
try:
for g in generators:
if isinstance(g, PagesGenerator):
return g.settings[setting]
except Exception:
return None
# set context
# Context are the processed settings and other environment which is made available to the JINJA template.
# Changes to the settings have no effect as those are already copied to each generator's context.
def set_context(generators, setting, value):
for g in generators:
if isinstance(g, PagesGenerator):
g.context[setting] = value
return value
return None
# get pages
# The PagesGenerator has a list of pages. Retrieve a sorted array of page information
def get_pages(generators):
site_index = []
for g in generators:
if isinstance(g, PagesGenerator):
for p in g.pages:
# use an absolute path
save_as = '/' + p.save_as
if save_as.endswith('/index.html'):
# use "/" for the filename of index.html files assuring that they are first in a folder's list
save_as = save_as[:-10]
# extract the path name
path, page = os.path.split(save_as)
site_index.append((path, save_as, p.title))
site_index.sort()
return site_index
# get site index
def get_index(site_index, scope):
current_folder = None
started = False
site_listing = ''
if not scope:
return
scoped = False
if scope != '**':
scoped = True
for p in site_index:
path, page = os.path.split(p[0])
folder = page.capitalize()
if not scoped or (scoped and p[0].startswith(scope)):
if folder != current_folder:
if started:
site_listing += '</ol>\n'
started = True
site_listing += f'<h3><a href="{p[1]}">{p[2]}</a></h3>\n'
site_listing += '<ol>\n'
current_folder = folder
else:
# menu item for page
site_listing += f'<li><a href="{p[1]}">{p[2]}</a></li>\n'
if started:
site_listing += '</ol>\n'
return site_listing
# get site menu
# def get_menu(site_index, menus):
# currrent_menu = None
# site_menu = ''
# if menus:
# for f in menus:
# path, page = os.path.split(f)
# folder = page.capitalize()
# site_menu += '<li class="nav-item active dropdown">\n'
# site_menu += f'<a class="nav-link dropdown-toggle" href="#" id="dropdown{folder}" '
# site_menu += f'role="button" data-toggle="dropdown" aria-expanded="false">{folder}</a>\n'
# site_menu += f'<ul class="dropdown-menu" aria-labelledby="dropdown{folder}">\n'
# for p in site_index:
# if p[0] == f:
# # menu item for page
# site_menu += f'<li><a class="dropdownitem" href="{p[1]}">{p[2]}</a></li>\n'
# site_menu += '</ul></li>\n'
# return site_menu
#
#
# show pages
def show_pages(generators):
site_index = get_pages(generators)
asf_index = get_setting(generators, 'ASF_INDEX')
print(asf_index)
# Not currently interested in menus this way as it is not generalizable
# set_context(generators, 'SITE_MENU', get_menu(site_index, asf_index['menus']))
set_context(generators, 'SITE_INDEX', get_index(site_index, asf_index['index']))
def tb_finalized(generators):
""" Print any exception, before Pelican chews it into nothingness."""
try:
show_pages(generators)
except Exception:
print('-----', file=sys.stderr)
traceback.print_exc()
# exceptions here stop the build
raise
def register():
pelican.plugins.signals.all_generators_finalized.connect(tb_finalized)
| StarcoderdataPython |
9721598 | """Setup package"""
import os
import re
import subprocess
import sys
from setuptools import setup, find_packages
from setuptools.command.build_ext import build_ext as _build_ext
# Packages to include in the distribution
packages = find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"])
# Additional data required to install this package (Use MANIFEST.in to work with make sdist)
package_data = {
}
# Files with that are data out of the package
# data_files=[('my_data', ['data/data_file'])],
# List of dependencies minimally needed by this project to run
with open('requirements.in') as f:
install_requires = [x for x in f.read().splitlines() if not x.startswith(('--', '#'))]
# Trove classifiers
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: PyPy'
]
# Keywords to help users find this package on PyPi
keywords = ['elgato', 'streamdeck', 'userfriendly', 'manager']
here = os.path.abspath(os.path.dirname(__file__))
meta = {}
readme = ''
# Read version and README files
with open(os.path.join(here, 'streamdeck_manager', '_meta.py'), 'r') as f:
exec(f.read(), meta)
with open(os.path.join(here, 'README.md'), 'r') as f:
readme = f.read()
setup(
name=meta['__title__'],
version=meta['__version__'],
description=meta['__description__'],
long_description=readme,
long_description_content_type='text/markdown',
url=meta['__url__'],
author=meta['__author__'],
author_email=meta['__email__'],
license=meta['__license__'],
classifiers=classifiers,
keywords=keywords,
platforms=['any'],
packages=packages,
install_requires=install_requires,
package_data=package_data,
include_package_data=True,
python_requires=">=3.6.*, <4",
download_url='https://github.com/vgonisanz/streamdeck_manager/archive/refs/tags/v0.1.0.tar.gz'
)
| StarcoderdataPython |
8199700 | # Generated by Django 3.2.2 on 2021-07-20 06:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0021_product_label'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='label',
),
]
| StarcoderdataPython |
3593268 | """
Functionality for getting and plotting SDSS image data.
"""
__all__=['sdss']
| StarcoderdataPython |
188749 | def django_ready(context):
context.django = True | StarcoderdataPython |
4901832 | import mmcv
import pytest
def test_iter_cast():
assert mmcv.list_cast([1, 2, 3], int) == [1, 2, 3]
assert mmcv.list_cast(['1.1', 2, '3'], float) == [1.1, 2.0, 3.0]
assert mmcv.list_cast([1, 2, 3], str) == ['1', '2', '3']
assert mmcv.tuple_cast((1, 2, 3), str) == ('1', '2', '3')
assert next(mmcv.iter_cast([1, 2, 3], str)) == '1'
with pytest.raises(TypeError):
mmcv.iter_cast([1, 2, 3], '')
with pytest.raises(TypeError):
mmcv.iter_cast(1, str)
def test_is_seq_of():
assert mmcv.is_seq_of([1.0, 2.0, 3.0], float)
assert mmcv.is_seq_of([(1, ), (2, ), (3, )], tuple)
assert mmcv.is_seq_of((1.0, 2.0, 3.0), float)
assert mmcv.is_list_of([1.0, 2.0, 3.0], float)
assert not mmcv.is_seq_of((1.0, 2.0, 3.0), float, seq_type=list)
assert not mmcv.is_tuple_of([1.0, 2.0, 3.0], float)
assert not mmcv.is_seq_of([1.0, 2, 3], int)
assert not mmcv.is_seq_of((1.0, 2, 3), int)
def test_slice_list():
in_list = [1, 2, 3, 4, 5, 6]
assert mmcv.slice_list(in_list, [1, 2, 3]) == [[1], [2, 3], [4, 5, 6]]
assert mmcv.slice_list(in_list, [len(in_list)]) == [in_list]
with pytest.raises(TypeError):
mmcv.slice_list(in_list, 2.0)
with pytest.raises(ValueError):
mmcv.slice_list(in_list, [1, 2])
def test_concat_list():
assert mmcv.concat_list([[1, 2]]) == [1, 2]
assert mmcv.concat_list([[1, 2], [3, 4, 5], [6]]) == [1, 2, 3, 4, 5, 6]
def test_requires_package(capsys):
@mmcv.requires_package('nnn')
def func_a():
pass
@mmcv.requires_package(['numpy', 'n1', 'n2'])
def func_b():
pass
@mmcv.requires_package('six')
def func_c():
return 1
with pytest.raises(RuntimeError):
func_a()
out, _ = capsys.readouterr()
assert out == ('Prerequisites "nnn" are required in method "func_a" but '
'not found, please install them first.\n')
with pytest.raises(RuntimeError):
func_b()
out, _ = capsys.readouterr()
assert out == (
'Prerequisites "n1, n2" are required in method "func_b" but not found,'
' please install them first.\n')
assert func_c() == 1
def test_requires_executable(capsys):
@mmcv.requires_executable('nnn')
def func_a():
pass
@mmcv.requires_executable(['ls', 'n1', 'n2'])
def func_b():
pass
@mmcv.requires_executable('mv')
def func_c():
return 1
with pytest.raises(RuntimeError):
func_a()
out, _ = capsys.readouterr()
assert out == ('Prerequisites "nnn" are required in method "func_a" but '
'not found, please install them first.\n')
with pytest.raises(RuntimeError):
func_b()
out, _ = capsys.readouterr()
assert out == (
'Prerequisites "n1, n2" are required in method "func_b" but not found,'
' please install them first.\n')
assert func_c() == 1
| StarcoderdataPython |
5072869 | <reponame>simonzsy/facedetector
import dlib
import os
_face_model = dlib.face_recognition_model_v1(
os.path.join(os.path.join(os.path.dirname(__file__), "data"), "dlib_face_recognition_resnet_model_v1.dat"))
def describe(image, shape):
return _face_model.compute_face_descriptor(image, shape)
| StarcoderdataPython |
103024 | """
百度分类API:
pip install baidu-aip
"""
import time
import os
import sys
import codecs
import json
import traceback
from tqdm import tqdm
from aip import AipNlp
sys.path.insert(0, './') # 定义搜索路径的优先顺序,序号从0开始,表示最大优先级
from data import baidu_config # noqa
""" 你的 APPID AK SK """
APP_ID = baidu_config.APP_ID # '你的 App ID'
API_KEY = baidu_config.API_KEY # '你的 Api Key'
SECRET_KEY = baidu_config.SECRET_KEY # '你的 Secret Key'
client = AipNlp(APP_ID, API_KEY, SECRET_KEY)
import myClue # noqa
print('myClue module path :{}'.format(myClue.__file__)) # 输出测试模块文件位置
from myClue.core import logger # noqa
from myClue.tools.file import read_file_texts # noqa
from myClue.tools.file import init_file_path # noqa
def get_baidu_topic(text):
for i in range(20):
try:
text = text.encode('gbk', errors='ignore').decode('gbk', errors='ignore') # 去掉GBK不识别的字符串,该接口接收GBK格式
cws_result = client.topic('凤凰新闻', text)
if 'item' in cws_result:
return cws_result['item']
else:
continue
except Exception as e:
time.sleep(0.5)
print('text:{}, i:{}, exception:{}'.format(text, i, e))
traceback.print_exc()
return {}
if __name__ == "__main__":
train_file_config = {
'dev': './data/UCAS_NLP_TC/devdata.txt',
'test': './data/UCAS_NLP_TC/testdata.txt',
'train': './data/UCAS_NLP_TC/traindata.txt',
}
output_path = './data/UCAS_NLP_TC/data_baidu_topic'
init_file_path(output_path)
for file_label, file_name in train_file_config.items():
logger.info('开始处理:{}'.format(file_label))
texts = read_file_texts(file_name)
output_file_name = os.path.join(output_path, '{}_topic.json'.format(file_label))
with codecs.open(output_file_name, mode='w', encoding='utf8') as fw:
for text in tqdm(texts):
label, news_content = text.split('\t')
news_content = news_content.replace(' ', '')
topic_item = get_baidu_topic(news_content)
row_json = {
'label': label,
'topic_item': topic_item,
'news_content': news_content
}
fw.write('{}\n'.format(json.dumps(row_json, ensure_ascii=False)))
time.sleep(0.3)
# break
| StarcoderdataPython |
9611590 | <gh_stars>0
from . import StorageEngineInterface
GLUE = "."
class DictionaryEngine(StorageEngineInterface):
def __init__(self):
self.data = {}
def set(self, key, value):
keys = key.split(GLUE)
tmp = self.data
for k in keys:
if k == keys[-1]:
tmp.update({k: value})
else:
tmp.update({k: {}})
tmp = tmp[k]
def get(self, key):
keys = key.split(GLUE)
tmp = self.data
for k in keys:
if k in tmp:
tmp = tmp[k]
else:
tmp = None
break
return tmp
def exists(self, key):
keys = key.split(GLUE)
tmp = self.data
for k in keys:
if k in tmp:
tmp = tmp[k]
else:
tmp = False
break
return True if tmp else False
| StarcoderdataPython |
4882633 | N = int(input())
K = int(input())
result = 1
for i in range(N):
if result <= K:
result = 2*result
else:
result += K
print(result)
| StarcoderdataPython |
4886787 | <reponame>aloctavodia/kulprit
"""Continuous distribution families."""
from kulprit.data.data import ModelData
from kulprit.data.submodel import SubModelStructure
from kulprit.families import BaseFamily
import numpy as np
import torch
class GaussianFamily(BaseFamily):
def __init__(self, data: ModelData) -> None:
# initialise family object with necessary attributes
self.data = data
self.has_dispersion_parameters = True
self.has_analytic_solution = True
self.name = "gaussian"
def solve_analytic(self, submodel_structure: SubModelStructure) -> torch.tensor:
"""Analytic solution to the reference model parameter projection.
Args:
submodel_structure (SubModelStructure): The submodel structure object
Returns
tuple: A tuple of the projection solution along with the final loss
value of the gradient descent
"""
def _analytic_proj(theta_ast: np.float32) -> np.float32:
"""Analytic solution to the point-wise parameter projection.
We separate this solution from the primary method to allow for
vectorisation of the projection across samples.
Args:
theta_ast (np.float32): The reference model posterior
parameter samples
Returns:
np.float32: Analytic solution to the posterior parameter projection problem
"""
f = X_ast @ theta_ast
jitter = np.identity(submodel_structure.num_terms) * 1e-5
theta_perp = np.linalg.inv(X_perp.T @ X_perp + jitter) @ X_perp.T @ f
return theta_perp
def _kld(theta_ast: np.float32, theta_perp: np.float32) -> np.float32:
"""Compute the analytic KL divergence between two posterior samples.
We separate this solution from the primary method to allow for
vectorisation of the projection across samples.
Args:
theta_ast (np.float32): The reference model posterior
parameter samples
theta_per (np.float32): The submodel posterior parameter samples
Returns:
np.float32: Analytic Kullback-Leibler divergence between the two
samples
"""
f = X_ast @ theta_ast
f_perp = X_ast @ theta_ast
kld = (f - f_perp).T @ (f - f_perp)
return kld
# extract submodel and reference model design matrices
X_perp = submodel_structure.X.numpy()
X_ast = self.data.structure.X.numpy()
# extract reference model posterior parameter samples
theta_ast = (
self.data.idata.posterior.stack(samples=("chain", "draw"))[
self.data.structure.term_names
]
.to_array()
.transpose(*("samples", "variable"))
.values
)
# vectorise the analytic solution function
vec_analytic_proj = np.vectorize(
_analytic_proj,
signature="(n)->(m)",
doc="Vectorised `_analytic_proj` method",
)
# project the reference model posterior parameter samples
theta_perp = vec_analytic_proj(theta_ast)
# compute the Kullback-Leibler divergence between projection and truth
vec_kld = np.vectorize(
_kld,
signature="(n),(m)->()",
doc="Vectorised `_kld` method",
)
total_kld = torch.from_numpy(vec_kld(theta_ast, theta_perp)).float()
loss = torch.mean(total_kld).item()
# convert the projected parameters to a tensor and return
theta_perp = torch.from_numpy(theta_perp).float()
return theta_perp, loss
def solve_dispersion(self, theta_perp: torch.tensor, X_perp: torch.tensor):
"""Analytic projection of the model dispersion parameters.
Args:
theta_perp (torch.tensor): A PyTorch tensor of the restricted
parameter draws
X_perp (np.ndarray): The design matrix of the restricted model we
are projecting onto
Returns:
torch.tensor: The restricted projections of the dispersion parameters
"""
def _dispersion_proj(
theta_ast: torch.tensor,
theta_perp: torch.tensor,
sigma_ast: torch.tensor,
) -> np.ndarray:
"""Analytic solution to the point-wise dispersion projection.
We separate this solution from the primary method to allow for
vectorisation of the projection across samples.
Args:
theta_ast (torch.tensor): Reference model posterior parameter
sample
theta_perp (torch.tensor): Submodel projected parameter sample
sigma_ast (torch.tensor): Reference model posterior dispersion
parameter sample
Returns:
np.ndarray: The sample projection of the dispersion parameter in
a Gaussian model according to the analytic solution
"""
f = X_ast @ theta_ast
f_perp = X_perp @ theta_perp
sigma_perp = torch.sqrt(
sigma_ast**2
+ 1 / self.data.structure.num_obs * (f - f_perp).T @ (f - f_perp)
)
sigma_perp = sigma_perp.numpy()
return sigma_perp
# extract parameter draws from both models
theta_ast = torch.from_numpy(
self.data.idata.posterior.stack(samples=("chain", "draw"))[
self.data.structure.term_names
]
.to_array()
.transpose(*("samples", "variable"))
.values
).float()
sigma_ast = torch.from_numpy(
self.data.idata.posterior.stack(samples=("chain", "draw"))[
self.data.structure.response_name + "_sigma"
]
.transpose()
.values
).float()
X_ast = self.data.structure.X
# project the dispersion parameter
vec_dispersion_proj = np.vectorize(
_dispersion_proj,
signature="(n),(m),()->()",
doc="Vectorised `_dispersion_proj` method",
)
sigma_perp = (
torch.from_numpy(vec_dispersion_proj(theta_ast, theta_perp, sigma_ast))
.flatten()
.float()
)
# assure correct shape
assert sigma_perp.shape == sigma_ast.shape
return sigma_perp
| StarcoderdataPython |
8124465 | <gh_stars>0
from rest_framework import serializers
from .models import Account
class AccountSerializer(serializers.ModelSerializer):
url=serializers.CharField(source='get_absolute_url',read_only=True)
class Meta:
model = Account
fields = ['id', 'name', 'url_link', 'username', 'password', 'created','url']
| StarcoderdataPython |
5049626 | from random import randint
print("""As opções são:
1 - Tesoura
2 - Pedra
3 - Papel\n""")
es = int(input("Digite sua escolha: "))
esc = randint(1, 3)
print("JO")
print("KEN")
print("PO")
if esc == 1:
mostrar = "Tesoura"
elif esc == 2:
mostrar = "Pedra"
elif esc == 3:
mostrar = "Papel"
else:
mostrar = "{}".format(esc)
print("Computador jogou {}\n".format(mostrar))
if es == esc:
print("Empate")
elif es == 1 and esc == 2 or es == 2 and esc == 3 or es == 3 and esc == 1:
print("Você perdeu, o computador ganhou!!!!!!!!!!")
elif es == 1 and esc == 3 or es == 2 and esc == 1 or es == 3 and esc == 2:
print("Parabens, você ganhou!!!!!!!!!!!!")
else:
print("Número digitado é inválido")
| StarcoderdataPython |
1851452 | '''Pusher environment.'''
import os
from gym.envs.mujoco import mujoco_env
import numpy as np
class PusherEnv(mujoco_env.MujocoEnv):
'''PusherEnv.'''
GOAL_ZERO_POS = [0.45, -0.05, -0.3230] # from xml
OBJ_ZERO_POS = [0.45, -0.05, -0.275] # from xml
def __init__(self, task='forward'):
self._task = task
envs_folder = os.path.dirname(os.path.abspath(__file__))
xml_filename = os.path.join(envs_folder,
'assets/pusher.xml')
super(PusherEnv, self).__init__(xml_filename, 5)
# Note: self._goal is the same for the forward and reset tasks. Only
# the reward function changes.
(self._goal, self._start) = self._get_goal_start()
def viewer_setup(self):
self.viewer.cam.trackbodyid = -1
self.viewer.cam.distance = 4.0
def _get_goal_start(self):
qpos = self.init_qpos
qvel = self.init_qvel
qpos[:] = 0
qvel[:] = 0
self.set_state(qpos, qvel)
goal = self.get_body_com('goal').copy()
start = self.get_body_com('object').copy()
return (goal, start)
def _step(self, a):
self.do_simulation(a, self.frame_skip)
obs = self._get_obs()
done = False
(forward_shaped_reward, reset_shaped_reward) = self._get_rewards(obs, a)
if self._task == 'forward':
r = forward_shaped_reward
elif self._task == 'reset':
r = reset_shaped_reward
else:
raise ValueError('Unknown task: %s', self._task)
return (obs, r, done, {})
def _get_obs(self):
obs = np.concatenate([self.model.data.qpos.flat[:5],
self.model.data.qvel.flat[:3],
self.get_body_com('tips_arm')[:2]])
return obs
def reset_model(self):
qpos = self.init_qpos
qpos[:] = 0
qpos[-4:-2] += self.np_random.uniform(-0.05, 0.05, 2)
qvel = self.init_qvel + self.np_random.uniform(
low=-0.005, high=0.005, size=self.model.nv)
qvel[-4:] = 0
# For the reset task, flip the initial positions of the goal and puck
if self._task == 'reset':
qpos[-3] -= 0.7
qpos[-1] += 0.7
self.set_state(qpos, qvel)
return self._get_obs()
def _huber(self, x, bound, delta=0.2):
assert delta < bound
if x < delta:
loss = 0.5 * x * x
else:
loss = delta * (x - 0.5 * delta)
return loss
def _reward_fn(self, x, bound=0.7):
# Using bound = 0.7 because that's the initial puck-goal distance.
x = np.clip(x, 0, bound)
loss = self._huber(x, bound)
loss /= self._huber(bound, bound)
reward = 1 - loss
assert 0 <= loss <= 1
return reward
def _get_rewards(self, s, a):
del s
if not hasattr(self, '_goal'):
print('Warning: goal or start has not been set')
return (0, 0)
obj_to_arm = self.get_body_com('object') - self.get_body_com('tips_arm')
obj_to_goal = self.get_body_com('object') - self._goal
obj_to_start = self.get_body_com('object') - self._start
obj_to_arm_dist = np.linalg.norm(obj_to_arm)
obj_to_goal_dist = np.linalg.norm(obj_to_goal)
obj_to_start_dist = np.linalg.norm(obj_to_start)
control_dist = np.linalg.norm(a)
forward_reward = self._reward_fn(obj_to_goal_dist)
reset_reward = self._reward_fn(obj_to_start_dist)
obj_to_arm_reward = self._reward_fn(obj_to_arm_dist)
# The control_dist is between 0 and sqrt(2^2 + 2^2 + 2^2) = 3.464
control_reward = self._reward_fn(control_dist, bound=3.464)
forward_reward_vec = [forward_reward, obj_to_arm_reward, control_reward]
reset_reward_vec = [reset_reward, obj_to_arm_reward, control_reward]
reward_coefs = (0.5, 0.375, 0.125)
forward_shaped_reward = sum(
[coef * r for (coef, r) in zip(reward_coefs, forward_reward_vec)])
reset_shaped_reward = sum(
[coef * r for (coef, r) in zip(reward_coefs, reset_reward_vec)])
assert 0 <= forward_shaped_reward <= 1
assert 0 <= reset_shaped_reward <= 1
return (forward_shaped_reward, reset_shaped_reward)
if __name__ == '__main__':
import time
env = PusherEnv(task='reset')
env.reset()
for _ in range(10000):
action = env.action_space.sample()
env.step(action)
env.render()
time.sleep(0.01)
| StarcoderdataPython |
12829711 | <reponame>FGtatsuro/myatcoder
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
ans = 0
for i in range(1, n+1):
j = n // i
ans += (j * (j+1) * i) // 2
print(ans)
| StarcoderdataPython |
9712059 | <reponame>cloudblue/product-sync
import click
import pytest
from connect.cli.plugins.translation.activate import activate_translation
def test_activate_translation(
mocked_responses,
mocked_translation_response,
):
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/localization/translations/TRN-8100-3865-4869/activate',
json=mocked_translation_response,
status=200,
)
translation = activate_translation(
api_url='https://localhost/public/v1',
api_key='<KEY>',
translation_id='TRN-8100-3865-4869',
)
assert translation['status'] == 'active'
def test_translation_already_activated(mocked_responses):
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/localization/translations/TRN-8100-3865-4869/activate',
json={
"error_code": "TRE_003",
"errors": [
"This translation is already activated.",
],
},
status=400,
)
with pytest.raises(click.ClickException) as e:
activate_translation(
api_url='https://localhost/public/v1',
api_key='<KEY>',
translation_id='TRN-8100-3865-4869',
)
assert str(e.value) == '400 - Bad Request: TRE_003 - This translation is already activated.'
def test_activate_translation_not_exists(mocked_responses):
mocked_responses.add(
method='POST',
url='https://localhost/public/v1/localization/translations/TRN-0000-0000-0000/activate',
status=404,
)
with pytest.raises(click.ClickException) as e:
activate_translation(
api_url='https://localhost/public/v1',
api_key='<KEY>',
translation_id='TRN-0000-0000-0000',
)
assert str(e.value) == '404 - Not Found: Translation TRN-0000-0000-0000 not found.'
| StarcoderdataPython |
8053546 | <reponame>MinisterioPublicoRJ/api-cadg<filename>dominio/tests/test_dao.py<gh_stars>1-10
from unittest import mock
import pytest
from rest_framework import serializers
from django.conf import settings
from dominio.exceptions import APIEmptyResultError
from dominio.dao import (
GenericDAO,
SingleDataObjectDAO,
)
QUERIES_DIR = settings.BASE_DIR.child("dominio", "tests", "queries")
class TestGenericDAO:
@mock.patch("dominio.dao.GenericDAO.QUERIES_DIR",
new_callable=mock.PropertyMock)
@mock.patch("dominio.dao.GenericDAO.table_namespaces",
new_callable=mock.PropertyMock)
@mock.patch("dominio.dao.GenericDAO.query_file",
new_callable=mock.PropertyMock)
def test_query_method(self, _query_file, _namespaces, _queries_dir):
_query_file.return_value = "test_query.sql"
_namespaces.return_value = {"schema": "test_schema"}
_queries_dir.return_value = QUERIES_DIR
with open(QUERIES_DIR.child("test_query.sql")) as fobj:
query = fobj.read()
expected_query = query.format(schema="test_schema")
output = GenericDAO.query()
assert output == expected_query
@mock.patch.object(GenericDAO, "query")
@mock.patch("dominio.dao.impala_execute")
def test_execute_method(self, _impala_execute, _query):
_query.return_value = "SELECT * FROM dual"
orgao_id = "12345"
GenericDAO.execute(orgao_id=orgao_id)
_impala_execute.assert_called_once_with(
"SELECT * FROM dual", {"orgao_id": orgao_id}
)
@mock.patch("dominio.dao.GenericDAO.columns",
new_callable=mock.PropertyMock)
def test_serialize_result_no_serializer(self, _columns):
_columns.return_value = ["col1", "col2", "col3"]
result_set = [
("1", "2", "3"),
("4", "5", "6"),
("7", "8", "9"),
]
ser_data = GenericDAO.serialize(result_set)
expected_data = [
{"col1": "1", "col2": "2", "col3": "3"},
{"col1": "4", "col2": "5", "col3": "6"},
{"col1": "7", "col2": "8", "col3": "9"},
]
assert ser_data == expected_data
@mock.patch("dominio.dao.GenericDAO.serializer",
new_callable=mock.PropertyMock)
@mock.patch("dominio.dao.GenericDAO.columns",
new_callable=mock.PropertyMock)
def test_serialize_result_with_serializer(self, _columns, _serializer):
class TestSerializer(serializers.Serializer):
col1 = serializers.IntegerField()
col2 = serializers.IntegerField()
col3 = serializers.IntegerField()
_serializer.return_value = TestSerializer
_columns.return_value = ["col1", "col2", "col3"]
result_set = [
("1", "2", "3"),
("4", "5", "6"),
("7", "8", "9"),
]
ser_data = GenericDAO.serialize(result_set)
expected_data = [
{"col1": 1, "col2": 2, "col3": 3},
{"col1": 4, "col2": 5, "col3": 6},
{"col1": 7, "col2": 8, "col3": 9},
]
assert ser_data == expected_data
@mock.patch.object(GenericDAO, "execute")
@mock.patch.object(GenericDAO, "serialize")
def test_get_data(self, _serialize, _execute):
result_set = [("0.133")]
_execute.return_value = result_set
_serialize.return_value = {"data": 1}
orgao_id = "12345"
data = GenericDAO.get(orgao_id=orgao_id)
_execute.assert_called_once_with(orgao_id=orgao_id)
_serialize.assert_called_once_with(result_set)
assert data == {"data": 1}
@mock.patch.object(GenericDAO, "execute")
@mock.patch.object(GenericDAO, "serialize")
def test_get_data_404_exception(self, _serialize, _execute):
result_set = []
_execute.return_value = result_set
orgao_id = "12345"
with pytest.raises(APIEmptyResultError):
GenericDAO.get(orgao_id=orgao_id)
_execute.assert_called_once_with(orgao_id=orgao_id)
_serialize.assert_not_called()
class TestSingleDataObjectDAO:
@mock.patch("dominio.dao.SingleDataObjectDAO.columns",
new_callable=mock.PropertyMock)
def test_serialize(self, _columns):
_columns.return_value = ["col1", "col2", "col3"]
result_set = [
("1", "2", "3")
]
ser_data = SingleDataObjectDAO.serialize(result_set)
expected_data = {"col1": "1", "col2": "2", "col3": "3"}
assert ser_data == expected_data
| StarcoderdataPython |
301181 | <reponame>landdafku11/cryptocurrencybot<gh_stars>1-10
import re
result = re.match(r'Analytics', 'AV Analytics ESET AV')
ad | StarcoderdataPython |
5195223 | # encoding: utf-8
from ..handlers import BaseRequestHandler
import setting
from worker import REQUESTS_PER_MINUTE, LOCAL_OBJECT_DURATION, BROADCAST_ACTIVE_DURATION, BROADCAST_INCREMENTAL_BACKUP, IMAGE_LOCAL_CACHE
class General(BaseRequestHandler):
"""
设置
"""
def get(self, flash=''):
requests_per_minute = setting.get('worker.requests-per-minute', int, REQUESTS_PER_MINUTE)
local_object_duration = setting.get('worker.local-object-duration', int, LOCAL_OBJECT_DURATION)
broadcast_active_duration = setting.get('worker.broadcast-active-duration', int, BROADCAST_ACTIVE_DURATION)
broadcast_incremental_backup = setting.get('worker.broadcast-incremental-backup', bool, BROADCAST_INCREMENTAL_BACKUP)
image_local_cache = setting.get('worker.image-local-cache', bool, IMAGE_LOCAL_CACHE)
self.render(
'settings/general.html',
requests_per_minute=requests_per_minute,
local_object_duration=int(local_object_duration / (60 * 60 *24)),
broadcast_active_duration=int(broadcast_active_duration / (60 * 60 *24)),
broadcast_incremental_backup=broadcast_incremental_backup,
image_local_cache=image_local_cache,
flash=flash
)
def post(self):
requests_per_minute = self.get_argument('requests-per-minute')
setting.set('worker.requests-per-minute', requests_per_minute, int)
local_object_duration_days = int(self.get_argument('local-object-duration'))
local_object_duration = local_object_duration_days * 60 * 60 *24
setting.set('worker.local-object-duration', local_object_duration, int)
broadcast_active_duration_days = int(self.get_argument('broadcast-active-duration'))
broadcast_active_duration = broadcast_active_duration_days * 60 * 60 *24
setting.set('worker.broadcast-active-duration', broadcast_active_duration, int)
broadcast_incremental_backup = int(self.get_argument('broadcast-incremental-backup'))
setting.set('worker.broadcast-incremental-backup', broadcast_incremental_backup, bool)
image_local_cache = int(self.get_argument('image-local-cache'))
setting.set('worker.image-local-cache', image_local_cache, bool)
return self.get('需要重启工作进程或者程序才能使设置生效')
class Network(BaseRequestHandler):
"""
设置
"""
def get(self, flash=''):
proxies = setting.get('worker.proxies', 'json', [])
self.render('settings/network.html', proxies='\n'.join(proxies), flash=flash)
def post(self):
proxies = self.get_argument('proxies').split('\n')
proxies = [proxy.strip() for proxy in list(set(proxies)) if proxy.strip()]
setting.set('worker.proxies', proxies, 'json')
return self.get('需要重启工作进程或者程序才能使设置生效')
| StarcoderdataPython |
3300374 | #!/usr/bin/env python
import os.path
import logging
import uuid
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.escape
import tornado.web
from tornado.escape import json_encode
from tornado.options import define, options
define("port", default=8080, help="run on the given port", type=int)
# The MainHandler returns the index.html to the client
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html")
# The IdHandler generates a specific UUID for the client to track its state
# TODO: the UUID should be saved in Redis to persist state for the client during the game and for 30 minutes after the game ended
class IdHandler(tornado.web.RequestHandler):
def get(self):
clientid = { 'id': str(uuid.uuid4()) }
self.set_header('Content-Type', 'application/javascript')
self.write(json_encode(clientid))
# The ColorHandler receives the color value selected by the client and saves it with the client Id in the Redis database
class ColorHandler(tornado.web.RequestHandler):
def post(self):
print(self.request.body)
data_json = tornado.escape.json_decode(self.request.body)
# TODO: parsing works, Redis code goes here
def main():
tornado.options.parse_command_line()
# Define request routes to be available to the client
application = tornado.web.Application([
(r"/", MainHandler),
(r"/GetId", IdHandler),
(r"/SetColor", ColorHandler),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': "static"}),
],
)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1800486 | <reponame>Mishrasubha/napari
import numpy as np
import numpy.testing as npt
import pytest
from scipy.stats import special_ortho_group
from napari.utils.transforms import Affine, CompositeAffine, ScaleTranslate
transform_types = [Affine, CompositeAffine, ScaleTranslate]
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate(Transform):
coord = [10, 13]
transform = Transform(scale=[2, 3], translate=[8, -5], name='st')
new_coord = transform(coord)
target_coord = [2 * 10 + 8, 3 * 13 - 5]
assert transform.name == 'st'
npt.assert_allclose(new_coord, target_coord)
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate_broadcast_scale(Transform):
coord = [1, 10, 13]
transform = Transform(scale=[4, 2, 3], translate=[8, -5], name='st')
new_coord = transform(coord)
target_coord = [4, 2 * 10 + 8, 3 * 13 - 5]
assert transform.name == 'st'
npt.assert_allclose(transform.scale, [4, 2, 3])
npt.assert_allclose(transform.translate, [0, 8, -5])
npt.assert_allclose(new_coord, target_coord)
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate_broadcast_translate(Transform):
coord = [1, 10, 13]
transform = Transform(scale=[2, 3], translate=[5, 8, -5], name='st')
new_coord = transform(coord)
target_coord = [6, 2 * 10 + 8, 3 * 13 - 5]
assert transform.name == 'st'
npt.assert_allclose(transform.scale, [1, 2, 3])
npt.assert_allclose(transform.translate, [5, 8, -5])
npt.assert_allclose(new_coord, target_coord)
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate_inverse(Transform):
coord = [10, 13]
transform = Transform(scale=[2, 3], translate=[8, -5])
new_coord = transform(coord)
target_coord = [2 * 10 + 8, 3 * 13 - 5]
npt.assert_allclose(new_coord, target_coord)
inverted_new_coord = transform.inverse(new_coord)
npt.assert_allclose(inverted_new_coord, coord)
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate_compose(Transform):
coord = [10, 13]
transform_a = Transform(scale=[2, 3], translate=[8, -5])
transform_b = Transform(scale=[0.3, 1.4], translate=[-2.2, 3])
transform_c = transform_b.compose(transform_a)
new_coord_1 = transform_c(coord)
new_coord_2 = transform_b(transform_a(coord))
npt.assert_allclose(new_coord_1, new_coord_2)
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate_slice(Transform):
transform_a = Transform(scale=[2, 3], translate=[8, -5])
transform_b = Transform(scale=[2, 1, 3], translate=[8, 3, -5], name='st')
npt.assert_allclose(transform_b.set_slice([0, 2]).scale, transform_a.scale)
npt.assert_allclose(
transform_b.set_slice([0, 2]).translate, transform_a.translate
)
assert transform_b.set_slice([0, 2]).name == 'st'
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate_expand_dims(Transform):
transform_a = Transform(scale=[2, 3], translate=[8, -5], name='st')
transform_b = Transform(scale=[2, 1, 3], translate=[8, 0, -5])
npt.assert_allclose(transform_a.expand_dims([1]).scale, transform_b.scale)
npt.assert_allclose(
transform_a.expand_dims([1]).translate, transform_b.translate
)
assert transform_a.expand_dims([1]).name == 'st'
@pytest.mark.parametrize('Transform', transform_types)
def test_scale_translate_identity_default(Transform):
coord = [10, 13]
transform = Transform()
new_coord = transform(coord)
npt.assert_allclose(new_coord, coord)
def test_affine_properties():
transform = Affine(scale=[2, 3], translate=[8, -5], rotate=90, shear=[1])
npt.assert_allclose(transform.translate, [8, -5])
npt.assert_allclose(transform.scale, [2, 3])
npt.assert_almost_equal(transform.rotate, [[0, -1], [1, 0]])
npt.assert_almost_equal(transform.shear, [1])
def test_affine_properties_setters():
transform = Affine()
transform.translate = [8, -5]
npt.assert_allclose(transform.translate, [8, -5])
transform.scale = [2, 3]
npt.assert_allclose(transform.scale, [2, 3])
transform.rotate = 90
npt.assert_almost_equal(transform.rotate, [[0, -1], [1, 0]])
transform.shear = [1]
npt.assert_almost_equal(transform.shear, [1])
def test_rotate():
coord = [10, 13]
transform = Affine(rotate=90)
new_coord = transform(coord)
# As rotate by 90 degrees, can use [-y, x]
target_coord = [-coord[1], coord[0]]
npt.assert_allclose(new_coord, target_coord)
def test_scale_translate_rotate():
coord = [10, 13]
transform = Affine(scale=[2, 3], translate=[8, -5], rotate=90)
new_coord = transform(coord)
post_scale = np.multiply(coord, [2, 3])
# As rotate by 90 degrees, can use [-y, x]
post_rotate = [-post_scale[1], post_scale[0]]
target_coord = np.add(post_rotate, [8, -5])
npt.assert_allclose(new_coord, target_coord)
def test_scale_translate_rotate_inverse():
coord = [10, 13]
transform = Affine(scale=[2, 3], translate=[8, -5], rotate=90)
new_coord = transform(coord)
post_scale = np.multiply(coord, [2, 3])
# As rotate by 90 degrees, can use [-y, x]
post_rotate = [-post_scale[1], post_scale[0]]
target_coord = np.add(post_rotate, [8, -5])
npt.assert_allclose(new_coord, target_coord)
inverted_new_coord = transform.inverse(new_coord)
npt.assert_allclose(inverted_new_coord, coord)
def test_scale_translate_rotate_compose():
coord = [10, 13]
transform_a = Affine(scale=[2, 3], translate=[8, -5], rotate=25)
transform_b = Affine(scale=[0.3, 1.4], translate=[-2.2, 3], rotate=65)
transform_c = transform_b.compose(transform_a)
new_coord_1 = transform_c(coord)
new_coord_2 = transform_b(transform_a(coord))
npt.assert_allclose(new_coord_1, new_coord_2)
def test_scale_translate_rotate_shear_compose():
coord = [10, 13]
transform_a = Affine(scale=[2, 3], translate=[8, -5], rotate=25, shear=[1])
transform_b = Affine(
scale=[0.3, 1.4],
translate=[-2.2, 3],
rotate=65,
shear=[-0.5],
)
transform_c = transform_b.compose(transform_a)
new_coord_1 = transform_c(coord)
new_coord_2 = transform_b(transform_a(coord))
npt.assert_allclose(new_coord_1, new_coord_2)
@pytest.mark.parametrize('dimensionality', [2, 3])
def test_affine_matrix(dimensionality):
np.random.seed(0)
N = dimensionality
A = np.eye(N + 1)
A[:-1, :-1] = np.random.random((N, N))
A[:-1, -1] = np.random.random(N)
# Create transform
transform = Affine(affine_matrix=A)
# Check affine was passed correctly
np.testing.assert_almost_equal(transform.affine_matrix, A)
# Create input vector
x = np.ones(N + 1)
x[:-1] = np.random.random(N)
# Apply transform and direct matrix multiplication
result_transform = transform(x[:-1])
result_mat_multiply = (A @ x)[:-1]
np.testing.assert_almost_equal(result_transform, result_mat_multiply)
@pytest.mark.parametrize('dimensionality', [2, 3])
def test_affine_matrix_compose(dimensionality):
np.random.seed(0)
N = dimensionality
A = np.eye(N + 1)
A[:-1, :-1] = np.random.random((N, N))
A[:-1, -1] = np.random.random(N)
B = np.eye(N + 1)
B[:-1, :-1] = np.random.random((N, N))
B[:-1, -1] = np.random.random(N)
# Create transform
transform_A = Affine(affine_matrix=A)
transform_B = Affine(affine_matrix=B)
# Check affine was passed correctly
np.testing.assert_almost_equal(transform_A.affine_matrix, A)
np.testing.assert_almost_equal(transform_B.affine_matrix, B)
# Compose tranform and directly matrix multiply
transform_C = transform_B.compose(transform_A)
C = B @ A
np.testing.assert_almost_equal(transform_C.affine_matrix, C)
@pytest.mark.parametrize('dimensionality', [2, 3])
def test_numpy_array_protocol(dimensionality):
N = dimensionality
A = np.eye(N + 1)
A[:-1] = np.random.random((N, N + 1))
transform = Affine(affine_matrix=A)
np.testing.assert_almost_equal(transform.affine_matrix, A)
np.testing.assert_almost_equal(np.asarray(transform), A)
coords = np.random.random((20, N + 1)) * 20
coords[:, -1] = 1
np.testing.assert_almost_equal(
(transform @ coords.T).T[:, :-1], transform(coords[:, :-1])
)
@pytest.mark.parametrize('dimensionality', [2, 3])
def test_affine_matrix_inverse(dimensionality):
np.random.seed(0)
N = dimensionality
A = np.eye(N + 1)
A[:-1, :-1] = np.random.random((N, N))
A[:-1, -1] = np.random.random(N)
# Create transform
transform = Affine(affine_matrix=A)
# Check affine was passed correctly
np.testing.assert_almost_equal(transform.affine_matrix, A)
# Check inverse is create correctly
np.testing.assert_almost_equal(
transform.inverse.affine_matrix, np.linalg.inv(A)
)
def test_repeat_shear_setting():
"""Test repeatedly setting shear with a lower triangular matrix."""
# Note this test is needed to check lower triangular
# decomposition of shear is working
mat = np.eye(3)
mat[2, 0] = 0.5
transform = Affine(shear=mat.copy())
# Check shear decomposed into lower triangular
np.testing.assert_almost_equal(mat, transform.shear)
# Set shear to same value
transform.shear = mat.copy()
# Check shear still decomposed into lower triangular
np.testing.assert_almost_equal(mat, transform.shear)
# Set shear to same value
transform.shear = mat.copy()
# Check shear still decomposed into lower triangular
np.testing.assert_almost_equal(mat, transform.shear)
@pytest.mark.parametrize('dimensionality', [2, 3])
def test_composite_affine_equiv_to_affine(dimensionality):
np.random.seed(0)
translate = np.random.randn(dimensionality)
scale = np.random.randn(dimensionality)
rotate = special_ortho_group.rvs(dimensionality)
shear = np.random.randn((dimensionality * (dimensionality - 1)) // 2)
composite = CompositeAffine(
translate=translate, scale=scale, rotate=rotate, shear=shear
)
affine = Affine(
translate=translate, scale=scale, rotate=rotate, shear=shear
)
np.testing.assert_almost_equal(
composite.affine_matrix, affine.affine_matrix
)
| StarcoderdataPython |
9654074 | import string
import random
def unique_strings(k: int, ntokens: int,
pool: str = string.ascii_letters) -> set:
'creeza un string unic'
seen = set()
join = ''.join
add = seen.add
while len(seen) < ntokens:
token = join(random.choices(pool, k=k))
add(token)
return seen
def unique_number() -> int:
'alege un numar intre 1800 - 2021'
return random.randint(1800, 2021)
def inProgram() -> str:
'alege intre da sau nu'
items = ['da', 'nu']
return random.choice(items)
| StarcoderdataPython |
1642679 | from distutils.core import setup
setup(
name='momap',
version='0.01.c',
packages=['momap'],
install_requires=['sortedcontainers'],
platforms='any',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
url='https://github.com/Endle/momap',
description='Multi Ordered Map for python',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
]
)
| StarcoderdataPython |
5013351 | <reponame>FHU-yezi/JianshuAvailabilityMonitor
from peewee import (BooleanField, CharField, DateTimeField, IntegerField,
Model, SqliteDatabase)
class RunLog(Model):
id = IntegerField(primary_key=True)
time = DateTimeField()
level = IntegerField()
message = CharField()
class Meta:
database = SqliteDatabase("log.db")
class MonitorLog(Model):
id = IntegerField(primary_key=True)
time = DateTimeField()
monitor_name = CharField()
successed = BooleanField()
status_code = IntegerField()
message = CharField()
class Meta:
database = SqliteDatabase("log.db")
def init_db() -> None:
"""初始化数据库"""
RunLog.create_table()
MonitorLog.create_table()
| StarcoderdataPython |
3325669 | <gh_stars>0
# Get data from comprehend.rightcall and from dynamodb database,
# combine it and load it into elasticsearch
import elasticsearch_tools
import s3 as s3py
import logging
import boto3
import dynamodb_tools
from requests_aws4auth import AWS4Auth
LOGLEVEL = 'DEBUG'
# Logging
levels = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
if LOGLEVEL not in levels:
raise ValueError(f"Invalid log level choice {LOGLEVEL}")
logger = logging.getLogger('rightcall')
logger.setLevel(LOGLEVEL)
ch = logging.StreamHandler()
ch.setLevel(LOGLEVEL)
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def get_reference_number_from_object_name(object_name_string):
""" Given s3 object name: 'e23413582523--QUIDP.json' or 'e23413582523P.json':
return just 'e23413582523'
"""
logger.debug(f"Received: {object_name_string}")
if '--' in object_name_string:
reference_number = object_name_string.split('--')[0]
elif '.json' in object_name_string:
reference_number = object_name_string.split('.')[0]
else:
reference_number = object_name_string
logger.debug(f"Ref Num: {reference_number}")
if '--' in reference_number or '.json' in reference_number:
raise ValueError(f"Invalid characters detected in reference number: {object_name_string}")
return reference_number
def get_all_refs_from_s3_objects(bucket_name):
"""Given an s3 bucket name, returns a list of the reference numbers
contained in the names of all objects in that bucket
Input: <string> 'comprehend.rightcall'
Output: <list> ['b310f08130r3', 'c210935j22239', ...]
"""
logger.info(f"get_all_refs_from_s3_objects: Getting objects from {bucket_name}")
keys = s3.list_objects_v2(Bucket=bucket_name)
logger.info(f"Received {len(keys['Contents'])} objects from {bucket_name}")
list_of_reference_numbers = []
for key in keys['Contents']:
ref = get_reference_number_from_object_name(key['Key'])
list_of_reference_numbers.append({'Name': ref})
return list_of_reference_numbers
def add_new_or_incomplete_items(bucket_name, es, table):
"""Ensures elasticsearch index has all the records that exist in comprehend.rightcall bucket
and that they are fully populated with as much information as possible.
Pulls objects down from comprehend.rightcall bucket.
For each object:
Checks if it exists in elasticsearch already.
Checks if it has all the required fields populated with data.
If so - moves on to next item
If not - Checks if that missing data can be found in dynamodb
if so - grabs it from dynamodb, combines it with s3 obeject data
and uploads to elasticsearch index
if not - adds the filename (refNumber) to csv file to be returned.
INPUTS:
bucket_name (str) - name of bucket - json file source
es (elasticsearch_tools.Elasticsearch object) - destination
table (dynamodb_tools.RightcallTable object) - metadata source"""
refs = get_all_refs_from_s3_objects(bucket_name)
get_meta_data = []
# For each reference number:
for i, call_record in enumerate(refs):
s3_item = None
db_item = None
logger.info('---------------------------------------')
logger.info(f"Working on {i} : {call_record['Name']}")
ref = call_record['Name']
# Check if it is in the ES index
logger.info(f"Checking if {ref} is in {es.index}")
if es.exists(ref):
logger.info(f"{ref} already in {es.index} index")
logger.info(f"Checking if {ref} fully populated in {es.index} index")
# Check if es document has all its fields
if es.fully_populated_in_elasticsearch(ref):
logger.info(f"{ref} fully populated in {es.index}")
continue
else:
# Exists in ES but missing fields
logger.info(f"{ref} missing metadata")
# If not, get object from s3 in order to construct object to index
else:
logger.info(f"{ref} not in {es.index} index")
logger.info(f"Checking {es.index} database for missing metadata")
db_item = rtable.get_db_item(ref)
if not db_item:
logger.info(f"Adding {ref} to 'get_meta_data'")
get_meta_data.append(ref)
continue
else:
logger.info(f"Data present in {es.index} database: {db_item}")
# Upload to elasticsearch
if s3_item is None:
logger.info(f"Ensuring object is downloaded from {bucket_name}")
s3_item = s3py.get_first_matching_item(ref, bucket_name)
logger.info(f"{ref} found in {bucket_name}")
logger.info(f"Preparing data: old data: {s3_item.keys()}")
s3_item = es.rename(s3_item)
logger.info(f"Cleaned data: {s3_item.keys()}")
logger.info(f"Combining data for {ref} from {rtable} and {bucket_name} and adding to {es.index} index")
result = es.load_call_record(db_item, s3_item)
if result:
logger.info(f"{ref} successfully added to {es.index} index")
else:
logger.error(f"Couldn't upload to elasticsearch: {result}")
return get_meta_data
if __name__ == '__main__':
region = 'eu-west-1'
dynamodb_table_name = 'rightcall_metadata'
BUCKET = 'comprehend.rightcall'
s3 = boto3.client('s3')
credentials = boto3.Session().get_credentials()
awsauth = AWS4Auth(
credentials.access_key,
credentials.secret_key,
region,
'es',
session_token=credentials.token)
es = elasticsearch_tools.Elasticsearch(
'search-rightcall-445kqimzhyim4r44blgwlq532y.eu-west-1.es.amazonaws.com',
region,
index='rightcall',
auth=awsauth)
rtable = dynamodb_tools.RightcallTable(region, dynamodb_table_name)
mapping = {
"mappings": {
"_doc": {
"properties": {
"referenceNumber": {"type": "keyword"},
"text": {"type": "text"},
"sentiment": {"type": "keyword"},
"promotion": {"type": "keyword"},
"entities": {"type": "keyword"},
"keyPhrases": {"type": "keyword"},
"date": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss"},
"country": {"type": "keyword"},
"length": {"type": "integer"}
}
}
}
}
# es.create_index('demo', mapping=mapping, set_as_current_index=True)
# calls_missing_meta_data = add_new_or_incomplete_items(BUCKET, es, rtable)
# logger.info(f"Refs without metadata {calls_missing_meta_data}")
| StarcoderdataPython |
374454 | <filename>arxiv_html/renders/tests/test_integration.py
import time
import os
from django.conf import settings
from django.test import override_settings
from rest_framework.test import APITestCase
import timeout_decorator
@override_settings(
ARXIV_SOURCE_URL_FORMAT="/code/arxiv_html/renders/tests/fixtures/{source_id}.tex"
)
class IntegrationTest(APITestCase):
"""
Tests the entire rendering system using a local file.
"""
@timeout_decorator.timeout(10)
def test_creating_a_render(self):
response = self.client.put("/renders?source_type=arxiv&source_id=helloworld")
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["source_type"], "arxiv")
self.assertEqual(response.data["source_id"], "helloworld")
self.assertEqual(response.data["state"], "PENDING")
self.assertEqual(response.data["output_url"], None)
while response.data["state"] in ("PENDING", "STARTED"):
response = self.client.put("/renders?source_type=arxiv&source_id=helloworld")
self.assertEqual(response.status_code, 200)
time.sleep(0.1)
render_id = response.data["id"]
self.assertEqual(response.data["state"], "SUCCESS")
self.assertEqual(
response.data["output_url"], f"/media/render-output/{render_id}"
)
self.assertIn("No obvious problems", response.data["logs"])
self.assertIn("Document successfully rendered", response.data["logs"])
output_path = os.path.join(
settings.MEDIA_ROOT, "render-output", str(render_id), "index.html"
)
with open(output_path) as fh:
html = fh.read()
self.assertIn("Generated by LaTeXML", html)
self.assertIn('<p class="ltx_p">Hello world</p>', html)
@timeout_decorator.timeout(10)
def test_creating_a_failing_render(self):
response = self.client.put("/renders?source_type=arxiv&source_id=broken")
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data["source_type"], "arxiv")
self.assertEqual(response.data["source_id"], "broken")
self.assertEqual(response.data["state"], "PENDING")
self.assertEqual(response.data["output_url"], None)
while response.data["state"] in ("PENDING", "STARTED"):
response = self.client.put("/renders?source_type=arxiv&source_id=broken")
self.assertEqual(response.status_code, 200)
time.sleep(0.1)
self.assertEqual(response.data["state"], "FAILURE")
self.assertIn("1 fatal error", response.data["logs"])
| StarcoderdataPython |
5156920 | from flask_restplus import Namespace, Resource, fields
from werkzeug.datastructures import FileStorage
from flask import make_response
from PIL import Image
import io
import glob
import os
import shutil
from flask import abort
from config import MODEL_META_DATA
from core.backend import ModelWrapper
from api.pre_process import alignMain
import re
api = Namespace('model', description='Model information and inference operations')
model_meta = api.model('ModelMetadata', {
'id': fields.String(required=True, description='Model identifier'),
'name': fields.String(required=True, description='Model name'),
'description': fields.String(required=True, description='Model description'),
'license': fields.String(required=False, description='Model license')
})
@api.route('/metadata')
class Model(Resource):
@api.doc('get_metadata')
@api.marshal_with(model_meta)
def get(self):
"""Return the metadata associated with the model"""
return MODEL_META_DATA
# Creating a JSON response model: https://flask-restplus.readthedocs.io/en/stable/marshalling.html#the-api-model-factory
label_prediction = api.model('LabelPrediction', {
'label_id': fields.String(required=False, description='Label identifier'),
'label': fields.String(required=True, description='Class label'),
'probability': fields.Float(required=True)
})
# Set up parser for input data (http://flask-restplus.readthedocs.io/en/stable/parsing.html)
input_parser = api.parser()
# Example parser for file input
input_parser.add_argument('file', type=FileStorage, location='files', required=True, help='An image file (encoded as PNG or JPG/JPEG)')
input_parser.add_argument('mask_type', type=str, default='center', required=True,
choices=('random', 'center', 'left', 'grid'),
help='Available options for mask_type are random, center, left and grid. ')
@api.route('/predict')
class Predict(Resource):
model_wrapper = ModelWrapper()
@api.doc('predict')
@api.expect(input_parser)
def post(self):
"""Make a prediction given input data"""
result = {'status': 'error'}
args = input_parser.parse_args()
if not args['file'].mimetype.endswith(('jpg', 'jpeg', 'png')):
abort(400, 'Invalid file type/extension. Please provide an image in JPEG or PNG format.')
image_input_read = Image.open(args['file'])
image_mask_type = args['mask_type']
# creating directory for storing input
input_directory = '/workspace/assets/input/file'
if not os.path.exists(input_directory):
os.mkdir(input_directory)
# clear input directory
input_folder = '/workspace/assets/input/file/'
for file in glob.glob(input_folder + '*'):
try:
try:
os.remove(file)
except:
shutil.rmtree(file)
except:
continue
# save input image
image_input_read = image_input_read.convert('RGB')
image_input_read.save('/workspace/assets/input/file/input.jpg')
# face detection, alignment and resize using openface
args = {
'inputDir':input_directory,
'outputDir':'/workspace/assets/input/file/align',
'landmarks':'innerEyesAndBottomLip',
'dlibFacePredictor':'/workspace/openface/models/dlib/shape_predictor_68_face_landmarks.dat',
'verbose':True,
'size':64,
'skipMulti':False,
'fallbackLfw':None,
'mode':'align'
}
try:
coordinates = alignMain(args)
coordinates_string = str(coordinates)
pattern='^\s*\[\s*\(\s*(\d+)\s*,\s*(\d+)\s*\)\s*\(\s*(\d+)\s*,\s*(\d+)\s*\)\s*\]\s*$'
m = re.match(pattern, coordinates_string)
if m:
final_coordinates = '[[{},{}],[{},{}]]'.format(m.group(1), m.group(2), m.group(3),m.group(4))
except:
#abort if there face is not detected
abort(400, 'No face was detected in the image.')
# store aligned input
input_data = '/workspace/assets/input/file/align/file/input.png'
#
image_path = self.model_wrapper.predict(input_data, image_mask_type)
"""
preparing image collage
"""
new_collage_path = "/workspace/assets/center_mask/completed/Collage.jpg"
img_columns = 5
img_rows = 4
img_width = 320
img_height = 256
thumbnail_width = img_width//img_columns
thumbnail_height = img_height//img_rows
size = thumbnail_width, thumbnail_height
new_collage = Image.new('RGB', (img_width, img_height))
images_list = []
filenames = []
for filename in glob.glob(image_path):
filenames.append(filename)
filenames.sort()
for i in filenames:
im=Image.open(i)
im.thumbnail(size)
images_list.append(im)
i = 0
x = 0
y = 0
for col in range(img_columns):
for row in range(img_rows):
new_collage.paste(images_list[i], (x, y))
i += 1
y += thumbnail_height
x += thumbnail_width
y = 0
new_collage.save(new_collage_path)
"""
end of collage creation process
"""
img = Image.open(new_collage_path, mode='r')
imgByteArr = io.BytesIO()
img.save(imgByteArr, format='JPEG')
imgByteArr = imgByteArr.getvalue()
response = make_response(imgByteArr)
response.headers.set('Content-Type', 'image/jpeg')
response.headers.set('Content-Disposition', 'attachment', filename='result.jpg')
response.headers.set('coordinates', final_coordinates)
return response
| StarcoderdataPython |
6462604 | <reponame>faribas/RMG-Py<filename>external/cclib/bridge/__init__.py
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
"""
__revision__ = "$Revision: 839 $"
try:
import openbabel
except Exception:
pass
else:
from cclib2openbabel import makeopenbabel
try:
import PyQuante
except ImportError:
pass
else:
from cclib2pyquante import makepyquante
try:
from cclib2biopython import makebiopython
except ImportError:
pass
| StarcoderdataPython |
1683809 | <filename>modules/templates/CERT/controllers.py
# -*- coding: utf-8 -*-
from os import path
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from s3 import *
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
request = current.request
response = current.response
view = path.join(request.folder, "modules", "templates",
"CERT", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
current.response.s3.stylesheets.append("../themes/CERT/homepage.css")
appname = request.application
settings = current.deployment_settings
title = settings.get_system_name()
response.title = title
T = current.T
# Check logged in and permissions
auth = current.auth
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
#ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
#s3_has_role = auth.s3_has_role
# @ToDo: Add event/human_resource - but this requires extending event_human_resource to link to event.
menus = [{"title":T("Volunteers"),
"icon":"user",
"description":T("Manage people who have volunteered for your organization, their contact details, certicates and trainings."),
"module":"vol",
"function":"volunteer",
"buttons":[{"args":"summary",
"icon":"list",
"label":T("View"),
},
{"args":"create",
"icon":"plus-sign",
"label":T("Create"),
}]
},
{"title":T("Trainings"),
"icon":"book",
"description":T("Catalog of Training Courses which your Volunteers can attend."),
"module":"vol",
"function":"course",
"buttons":[{"args":"summary",
"icon":"list",
"label":T("View"),
},
{"args":"create",
"icon":"plus-sign",
"label":T("Create"),
}]
},
{"title":T("Certificates"),
"icon":"certificate",
"description":T("Catalog of Certificates which your Volunteers can get."),
"module":"vol",
"function":"certificate",
"buttons":[{"args":"summary",
"icon":"list",
"label":T("View"),
},
{"args":"create",
"icon":"plus-sign",
"label":T("Create"),
}]
},
{"title":T("Messaging"),
"icon":"envelope-alt",
"description":T("Send Email, SMS and Twitter messages to your Volunteers."),
"module":"msg",
"function":"Index",
"args":None,
"buttons":[{"function":"inbox",
"args":None,
"icon":"inbox",
"label":T("Inbox"),
},
{"function":"compose",
"args":None,
"icon":"plus-sign",
"label":T("Compose"),
}]
},
]
return dict(title = title,
menus=menus,
)
# END =========================================================================
| StarcoderdataPython |
9609252 | """ Contains SQL Dialects """
def dialect_selector(s):
lookup = {
'ansi': AnsiSQLDialiect
}
return lookup[s]
class AnsiSQLDialiect(object):
name = 'ansi'
# Whitespace is what divides other bits of syntax
# # whitespace_regex = RegexMatchPattern(r'\s+', 'whitespace')
# Anything after an inline comment gets chunked together as not code
# Priority required here, because it's potentially ambiguous with the operator regex
# # inline_comment_regex = RegexMatchPattern(r'(--|#)[^\n]*', 'comment', priority=2) # In MySQL, we need a space after the '--'
# Anything between the first and last part of this tuple counts as not code
# Priority required because of divide operator or multiply operator
# # closed_block_comment = RegexMatchPattern(r'/\*[^\n]*\*/', 'closed_block_comment', priority=3)
# # open_block_comment_start = RegexMatchPattern(r'/\*[^\n]*', 'open_block_comment_start', priority=2)
# # open_block_comment_end = RegexMatchPattern(r'[^\n]*\*/', 'open_block_comment_end', priority=3)
# String Quote Characters
# # string_quote_characters = MatcherBag(CharMatchPattern("'", 'string_literal')) # NB in Mysql this should also include "
# Identifier Quote Characters
# # identifier_quote_characters = MatcherBag(CharMatchPattern('"', 'object_literal')) # NB in Mysql this should be `
# Singleton Match Patterns
# # comma_characters = SingleCharMatchPattern(',', 'comma')
# NB: A Star and a mulitply look the same to a lexer!!!! We'll have to
# resolve that ambiguity later, for now it's a multiply
# star_characters = SingleCharMatchPattern('*', 'star')
# Operator Match Patterns (A slightly larger supserset of ansi)
# # operator_regex = RegexMatchPattern(r'(\+|-|\*|/)', 'operator')
# These are case insensitive but require spaces to distinguish from words
# # text_operator_regex = RegexMatchPattern(r'(?i)(or|and)', 'operator')
# Number matchers, and these must have a higher priority than the operator one
# to make sure we deal with minus signs correctly
# # number_regex = RegexMatchPattern(r'-?[0-9]+\.?[0-9]*', 'number', priority=2)
# Bracket matchers
# # open_bracket_matcher = SingleCharMatchPattern('(', 'open_bracket')
# # close_bracket_matcher = SingleCharMatchPattern(')', 'close_bracket')
# # outside_block_comment_matchers = MatcherBag(
# # whitespace_regex, inline_comment_regex, closed_block_comment,
# # open_block_comment_start, string_quote_characters, identifier_quote_characters,
# # comma_characters, operator_regex, open_bracket_matcher, close_bracket_matcher,
# # number_regex)
# # inside_block_comment_matchers = MatcherBag(open_block_comment_end)
| StarcoderdataPython |
327603 | <gh_stars>10-100
"""Test kytos.core.connection module."""
from socket import error as SocketError
from unittest import TestCase
from unittest.mock import MagicMock
from kytos.core.connection import Connection, ConnectionState
class TestConnection(TestCase):
"""Connection tests."""
def setUp(self):
"""Instantiate a Connection."""
socket = MagicMock()
switch = MagicMock()
self.connection = Connection('addr', 123, socket, switch)
switch.connection = self.connection
def test__str__(self):
"""Test __str__ method."""
self.assertEqual(str(self.connection), "Connection('addr', 123)")
def test__repr__(self):
"""Test __repr__ method."""
self.connection.socket = 'socket'
self.connection.switch = 'switch'
expected = "Connection('addr', 123, 'socket', 'switch', " + \
"<ConnectionState.NEW: 0>)"
self.assertEqual(repr(self.connection), expected)
def test_state(self):
"""Test state property."""
self.assertEqual(self.connection.state.value, 0)
self.connection.state = ConnectionState.FINISHED
self.assertEqual(self.connection.state.value, 4)
def test_state__error(self):
"""Test state property to error case."""
with self.assertRaises(Exception):
self.connection.state = 1000
def test_id(self):
"""Test id property."""
self.assertEqual(self.connection.id, ('addr', 123))
def test_send(self):
"""Test send method."""
self.connection.send(b'data')
self.connection.socket.sendall.assert_called_with(b'data')
def test_send__error(self):
"""Test send method to error case."""
self.connection.socket.sendall.side_effect = SocketError
self.connection.send(b'data')
self.assertIsNone(self.connection.socket)
def test_close(self):
"""Test close method."""
self.connection.close()
self.assertIsNone(self.connection.socket)
def test_close__os_error(self):
"""Test close method to OSError case."""
self.connection.socket.shutdown.side_effect = OSError
with self.assertRaises(OSError):
self.connection.close()
self.assertIsNotNone(self.connection.socket)
def test_close__attribute_error(self):
"""Test close method to AttributeError case."""
self.connection.socket = None
self.connection.close()
self.assertIsNone(self.connection.socket)
def test_is_alive(self):
"""Test is_alive method to True and False returns."""
self.assertTrue(self.connection.is_alive())
self.connection.state = ConnectionState.FINISHED
self.assertFalse(self.connection.is_alive())
def test_is_new(self):
"""Test is_new method."""
self.assertTrue(self.connection.is_new())
def test_established_state(self):
"""Test set_established_state and is_established methods."""
self.connection.set_established_state()
self.assertTrue(self.connection.is_established())
def test_setup_state(self):
"""Test set_setup_state and is_during_setup methods."""
self.connection.set_setup_state()
self.assertTrue(self.connection.is_during_setup())
def test_update_switch(self):
"""Test update_switch method."""
switch = MagicMock()
self.connection.update_switch(switch)
self.assertEqual(self.connection.switch, switch)
self.assertEqual(switch.connection, self.connection)
| StarcoderdataPython |
9751630 | # -*- coding: UTF-8 -*-
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
import threading
import time
import json
from io import BytesIO
from pathlib import Path
from tbparser import SummaryReader
from tbparser import Projector_Reader
from utils.cache_io import CacheIO
from utils.path_utils import path_parser
from utils.redis_utils import RedisInstance
import pickle
class Trace_Thread(threading.Thread):
def __init__(self, runname, filename, current_size, uid, cache_path):
threading.Thread.__init__(self, name=filename.name)
self.uid = uid
self.runname = runname
self.cache_path = cache_path
self.filename = filename
self.current_size = current_size
self.r = RedisInstance
# 该日志中是否有超参数
self.has_hparams = False
self.first_write = False
self.metrics = []
# 是否完成初始化
self._finish_init = 0
self.redis_tag = []
def run(self):
print('监听文件 %s' % self.filename)
self.trace(self.current_size)
def trace(self, current_size):
filename = Path(self.filename)
if filename.suffix == ".json":
self.load_model_file(filename)
self.finish_init = 1
return
f = open(filename, "rb")
# for event file
if "event" in filename.name:
_io = BytesIO(
f.read(current_size)
)
self.load_event_file(_io)
# 设置初始化完成标志
self.finish_init = 1
while True:
rest = f.read()
if not rest:
time.sleep(2)
continue
_io = BytesIO(rest)
self.load_event_file(_io)
# for projector file
elif "projector" in filename.name:
self.load_projector_file(f)
# 设置初始化完成标志
self.finish_init = 1
@property
def finish_init(self):
return self._finish_init
# 设置标志
@finish_init.setter
def finish_init(self, is_finish):
self.r.set("{}_{}_{}_is_finish".format(self.uid, self.runname,
self.filename.name), 1)
print(self.name + " is finish")
self._finish_init = is_finish
def set_redis_key(self, type, tag, file_path):
_key = self.uid + '_' + self.runname + '_' + type + '_' + tag
if _key in self.redis_tag:
pass
else:
self.r.set(_key, str(file_path))
self.redis_tag.append(_key)
def set_cache(self, file_name, data):
if not file_name.parent.exists():
file_name.parent.mkdir(parents=True, exist_ok=True)
with open(file_name, 'ab') as f:
pickle.dump(data, f)
f.close()
def load_event_file(self, fileIO):
reader = SummaryReader(fileIO, types=[
'scalar',
'graph',
'hist',
'text',
'image',
'audio',
'hparams'
])
for items in reader:
if items.type == "graph":
file_path = path_parser(self.cache_path, self.runname,
items.type, tag='c_graph')
CacheIO(file_path).set_cache(data=items.value)
self.set_redis_key(items.type, tag='c_graph',
file_path=file_path)
continue
elif items.type == "hparams":
file_path = path_parser(self.cache_path, self.runname,
type='hyperparm',
tag='hparams')
self.set_cache(file_name=file_path, data=items.value)
self.set_redis_key(type='hyperparm',
tag='hparams',
file_path=file_path)
continue
item_data = {
'step': items.step,
'wall_time': items.wall_time,
'value': items.value,
'type': items.type
}
file_path = path_parser(self.cache_path, self.runname,
type=items.type,
tag=items.tag)
CacheIO(file_path).set_cache(data=item_data)
self.set_redis_key(type=items.type, tag=items.tag,
file_path=file_path)
def load_projector_file(self, fileIO):
p_reader = Projector_Reader(fileIO).read()
for items in p_reader.projectors:
item_data = {
'step': items.step,
'wall_time': items.wall_time,
'value': items.value.reshape(items.value.shape[0], -1)
if items.value.ndim > 2 else items.value,
'label': items.label,
}
file_path = path_parser(self.cache_path, self.runname,
type=p_reader.metadata.type,
tag=items.tag)
CacheIO(file_path).set_cache(data=item_data)
self.set_redis_key(type=p_reader.metadata.type, tag=items.tag,
file_path=file_path)
if p_reader.sample:
file_path = path_parser(self.cache_path, self.runname,
type="embedding",
tag="sample_" + items.tag)
CacheIO(file_path).set_cache(data=p_reader.sample)
self.set_redis_key(type="embedding", tag="sample_" + items.tag,
file_path=file_path)
def filter_graph(self, file):
variable_names = {}
graph = json.loads(file)
for sub_graph in graph:
cfg = sub_graph["config"]
# 拷贝一份,用于循环
cfg_copy = cfg["layers"].copy()
for layer in cfg_copy:
if layer["class_name"] == "variable":
_name = layer["name"]
variable_names[_name] = layer
cfg["layers"].remove(layer)
# 第二遍循环,删除`variable_names`出现在`inbound_nodes`中的名字
for sub_graph in graph:
cfg = sub_graph["config"]
for layer in cfg["layers"]:
in_nodes = layer["inbound_nodes"]
in_nodes_copy = in_nodes.copy()
for node in in_nodes_copy:
# 在里面则删除
if node in variable_names.keys():
in_nodes.remove(node)
graph_str = json.dumps(graph)
return graph_str
def load_model_file(self, file):
with open(file, "r") as f:
# 结构图内容
_cg_content = f.read()
_sg_content = self.filter_graph(_cg_content)
# caclulate_graph.json
sg_file_path = path_parser(self.cache_path, self.runname,
type="graph",
tag="s_graph")
cg_file_path = path_parser(self.cache_path, self.runname,
type="graph",
tag="c_graph")
CacheIO(sg_file_path).set_cache(data=_sg_content)
CacheIO(cg_file_path).set_cache(data=_cg_content)
self.set_redis_key(type="graph", tag="s_graph",
file_path=sg_file_path)
self.set_redis_key(type="graph", tag="c_graph",
file_path=cg_file_path)
| StarcoderdataPython |
6565044 | <gh_stars>0
import setuptools
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name='erg',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
description='Package for unpacking erg data.',
long_description=long_description,
long_description_content_type='ext/markdown',
packages=setuptools.find_packages(),
install_requires=["numpy","pandas","scipy","matplotlib","holoviews","bokeh>=1.4.0"],
classifiers=(
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
),
) | StarcoderdataPython |
5015947 | <reponame>keeplerteam/thekpi
from network.ipfs import Ipfs
MOCK_SERVER_VERSION = {"Version": "0.9.1", "Commit": "", "Repo": "11", "System": "amd64/windows", "Golang": "go1.16.6"}
PUT_BLOCK_RESPONSE = {'Key': '<KEY>', 'Size': 14}
GET_BLOCK_RESPONSE = {"test": True}
def test_load_version():
node = Ipfs()
assert node.ipfs_version == MOCK_SERVER_VERSION
def test_create_cid():
node = Ipfs()
assert node.create_cid({"test": True}) == PUT_BLOCK_RESPONSE
def test_load_cid():
node = Ipfs()
assert node.load_cid("QmX24kz2ykEuXHd3ojWFniok9peNyJDsAz4XCJfvurob8B") == GET_BLOCK_RESPONSE
| StarcoderdataPython |
3328102 | <filename>packaging/setup/plugins/ovirt-engine-remove/base/files/simple.py<gh_stars>1-10
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Simple plugin."""
import configparser
import gettext
import glob
import hashlib
import os
from otopi import constants as otopicons
from otopi import filetransaction
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_setup_lib import dialog
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Simple plugin."""
def _digestFile(self, filename):
md5 = hashlib.new('md5')
# Read file in chunks of 10KB
with open(filename, 'rb') as f:
while True:
data = f.read(10240)
if not data:
break
md5.update(data)
return md5.hexdigest()
def _safeDelete(self, filename):
try:
os.unlink(filename)
except OSError as e:
self.logger.debug(
"Cannot delete '%s'",
filename,
exc_info=True,
)
self.logger.error(
_("Cannot delete '{file}': {error}").format(
file=filename,
error=e,
)
)
def _revertChanges(self, filename, changes):
new_content = []
with open(filename, 'r') as f:
old_content = f.read().splitlines()
just_remove = []
just_add = []
replace = {}
for c in changes:
if 'removed' not in c:
just_remove.append(c['added'])
elif 'added' not in c:
just_add.append(c['removed'])
else:
replace[c['added']] = c['removed']
# For checking if remove/replace lines were found, we work on copies,
# because there might be duplicate lines in the file.
remove_unremoved = just_remove[:]
replace_unremoved = replace.copy()
for line in old_content:
if line in just_remove:
if line in remove_unremoved:
remove_unremoved.remove(line)
else:
# should be updated or added
if line in replace:
orig_line = line
line = replace[line]
if orig_line in replace_unremoved:
del replace_unremoved[orig_line]
new_content.append(line)
new_content.extend(just_add)
if remove_unremoved or replace_unremoved:
self.logger.warning(
_(
'Some changes to {file} could not be reverted. More '
'details can be found in the log.'
).format(
file=filename,
)
)
if remove_unremoved:
self.logger.debug(
(
'The following lines were not found in {file} and so '
'were not removed:\n{lines}'
).format(
file=filename,
lines='\n'.join(
[
'\t{line}'.format(line=newline)
for newline in remove_unremoved
]
),
)
)
if replace_unremoved:
self.logger.debug(
(
'The following lines were not found in {file} and so '
'were not reverted to their old content:\n{lines}'
).format(
file=filename,
lines='\n'.join(
[
'\tnew:\t{new}\n\told:\t{old}\n'.format(
new=new,
old=old,
)
for new, old in replace_unremoved.items()
]
),
)
)
if new_content != old_content:
self.environment[otopicons.CoreEnv.MAIN_TRANSACTION].append(
filetransaction.FileTransaction(
name=filename,
content=new_content
)
)
def __init__(self, context):
super(Plugin, self).__init__(context=context)
self._infos = None
self._files = {}
self._toremove = None
self._lines = {}
self._descriptions = {}
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment.setdefault(
osetupcons.RemoveEnv.REMOVE_GROUPS,
''
)
self.environment.setdefault(
osetupcons.RemoveEnv.ASK_GROUPS,
True
)
self.environment.setdefault(
osetupcons.RemoveEnv.FILES_TO_REMOVE,
[]
)
# TODO: check if we need to allow to override this by answer file.
# Using a list here won't allow you to override this
self.environment.setdefault(
osetupcons.RemoveEnv.REMOVE_SPEC_OPTION_GROUP_LIST,
[]
)
self.environment.setdefault(
osetupcons.RemoveEnv.REMOVE_CHANGED,
None
)
self._infos = sorted(
glob.glob(
os.path.join(
osetupcons.FileLocations.OVIRT_SETUP_UNINSTALL_DIR,
'*.conf',
)
)
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
name=osetupcons.Stages.REMOVE_CUSTOMIZATION_GROUPS,
after=(
osetupcons.Stages.REMOVE_CUSTOMIZATION_COMMON,
),
)
def _customization(self):
interactive = self.environment[
osetupcons.RemoveEnv.ASK_GROUPS
]
unremovable = {}
already_asked = []
for info in self._infos:
config = configparser.ConfigParser()
config.optionxform = str
config.read([info])
for section in config.sections():
if section.startswith(
osetupcons.Const.FILE_GROUP_SECTION_PREFIX
):
group = section[
len(osetupcons.Const.FILE_GROUP_SECTION_PREFIX):
]
description = config.get(section, 'description')
template = "%s.description" % group
msg = gettext.dgettext(
message=template,
domain='ovirt-engine-setup'
)
if msg == template:
msg = description
self._descriptions[group] = msg
add_group = self.environment[
osetupcons.RemoveEnv.REMOVE_ALL
]
if not add_group:
if group in self.environment[
osetupcons.RemoveEnv.REMOVE_SPEC_OPTION_GROUP_LIST
]:
add_group = True
if (
not add_group and
interactive and
group not in already_asked
):
if group not in self.environment[
osetupcons.RemoveEnv.REMOVE_SPEC_OPTION_GROUP_LIST
]:
already_asked.append(group)
add_group = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_REMOVE_GROUP/' + group,
note=_(
'Do you want to remove {description}? '
'(@VALUES@) [@DEFAULT@]: '
).format(
description=msg,
),
prompt=True,
true=_('Yes'),
false=_('No'),
default=False,
)
if add_group:
self.environment[
osetupcons.RemoveEnv.
REMOVE_GROUPS
] += ',' + group
def getFiles(section):
files = {}
for name, value in config.items(section):
comps = name.split('.')
if comps[0] == 'file':
files.setdefault(comps[1], {})[comps[2]] = value
# python 2.6 doesn't support dict comprehensions.
# TODO: we may move to it when minimal python version
# available is 2.7+
return dict((f['name'], f['md5']) for f in files.values())
def getLines(section):
associated_lines = {}
aggregated_lines = {}
# line.{file_index:03}{line_index:03}.name
# line.{file_index:03}{line_index:03}.content.added
# line.{file_index:03}{line_index:03}.content.removed
for name, value in config.items(section):
comps = name.split('.')
if comps[0] == 'line':
index = comps[1] # '00001', '00002', etc
line_type = comps[2] # 'name' or 'content'
if len(comps) == 3 and line_type == 'content':
comps.append('added')
if line_type == 'content':
action = comps[3] # 'added' or 'removed'
associated_lines.setdefault(index, {})
if line_type == 'name':
associated_lines[index][line_type] = value
elif line_type == 'content':
associated_lines[index].setdefault(line_type, {})[
action
] = value
for f in associated_lines.values():
aggregated_lines.setdefault(
f['name'], []
).append(f['content'])
self.logger.debug(
'getLines: aggregated_lines = %s',
aggregated_lines,
)
return aggregated_lines
for uninstall_group in [
x.strip()
for x in self.environment[
osetupcons.RemoveEnv.REMOVE_GROUPS
].split(',')
if x.strip()
]:
uninstall_section = (
osetupcons.Const.FILE_GROUP_SECTION_PREFIX +
uninstall_group
)
if config.has_section(uninstall_section):
# section could be missing in a conf file, for example if
# PKI config was not done because already existing
self._files.update(
getFiles(uninstall_section)
)
self._lines.update(
getLines(uninstall_section)
)
if config.has_section('unremovable'):
unremovable.update(getFiles('unremovable'))
self._toremove = set(self._files.keys()) - set(unremovable.keys())
changed = []
for f in self._toremove:
if os.path.exists(f):
if self._digestFile(f) != self._files[f]:
changed.append(f)
self.logger.debug('changed=%s', changed)
if changed:
if self.environment[osetupcons.RemoveEnv.REMOVE_CHANGED] is None:
self.environment[
osetupcons.RemoveEnv.REMOVE_CHANGED
] = dialog.queryBoolean(
dialog=self.dialog,
name='OVESETUP_ENGINE_REMOVE_CHANGED',
note=_(
'The following files were changed since setup:\n'
'{files}\n'
'Remove them anyway? '
'(@VALUES@) [@DEFAULT@]: '
).format(
files='\n'.join(changed),
),
prompt=True,
true=_('Yes'),
false=_('No'),
default=True,
)
if not self.environment[osetupcons.RemoveEnv.REMOVE_CHANGED]:
self._toremove -= set(changed)
self._tomodifylines = self._lines.keys()
self.logger.debug('tomodifylines=%s', self._tomodifylines)
self.logger.debug('files=%s', self._files)
self.logger.debug('unremovable=%s', unremovable)
self.logger.debug('toremove=%s', self._toremove)
self.environment[
osetupcons.RemoveEnv.FILES_TO_REMOVE
] = self._toremove
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
priority=plugin.Stages.PRIORITY_LOW,
)
def _misc(self):
self.logger.info(_('Removing files'))
for f in self._toremove:
if os.path.exists(f):
self._safeDelete(f)
elif os.path.islink(f):
# dead link
self._safeDelete(f)
self.logger.info(_('Reverting changes to files'))
for f in self._tomodifylines:
if os.path.exists(f):
self._revertChanges(f, self._lines[f])
for info in self._infos:
self._safeDelete(info)
@plugin.event(
stage=plugin.Stages.STAGE_CLOSEUP,
before=(
osetupcons.Stages.DIALOG_TITLES_E_SUMMARY,
),
after=(
osetupcons.Stages.DIALOG_TITLES_S_SUMMARY,
),
)
def _closeup(self):
all_groups = set(self._descriptions.keys())
uninstalled_groups = set([
x.strip()
for x in self.environment[
osetupcons.RemoveEnv.REMOVE_GROUPS
].split(',')
if x.strip()
])
not_uninstalled = set(all_groups - uninstalled_groups)
for group in not_uninstalled:
self.dialog.note(
text=_(
'{description} files not removed'
).format(
description=self._descriptions[group],
),
)
# vim: expandtab tabstop=4 shiftwidth=4
| StarcoderdataPython |
8114709 | <reponame>fekblom/critic<filename>testing/tests/001-main/004-extensions/002-tests/004-TestExtension/008-processcommits.py
import os
import re
def to(name):
return testing.mailbox.ToRecipient("<EMAIL>" % name)
def about(subject):
return testing.mailbox.WithSubject(subject)
FILENAME = "008-processcommits.txt"
SUMMARY = "Added %s" % FILENAME
SETTINGS = { "review.createViaPush": True }
review_id = None
with testing.utils.settings("alice", SETTINGS), frontend.signin("alice"):
with repository.workcopy() as work:
base_sha1 = work.run(["rev-parse", "HEAD"]).strip()
work.run(["remote", "add", "critic",
"alice@%s:/var/git/critic.git" % instance.hostname])
def commit(fixup_message=None):
if fixup_message:
full_message = "fixup! %s\n\n%s" % (SUMMARY, fixup_message)
else:
full_message = SUMMARY
work.run(["add", FILENAME])
work.run(["commit", "-m", full_message],
GIT_AUTHOR_NAME="<NAME>",
GIT_AUTHOR_EMAIL="<EMAIL>",
GIT_COMMITTER_NAME="<NAME>",
GIT_COMMITTER_EMAIL="<EMAIL>")
return work.run(["rev-parse", "HEAD"]).strip()
def push():
output = work.run(
["push", "-q", "critic",
"HEAD:refs/heads/r/008-processcommits"])
all_lines = []
for line in output.splitlines():
if not line.startswith("remote:"):
continue
all_lines.append(line[len("remote:"):].split("\x1b", 1)[0].strip())
extension_lines = []
for line in all_lines:
if line.startswith("[TestExtension] "):
extension_lines.append(line[len("[TestExtension] "):])
return all_lines, extension_lines
with open(os.path.join(work.path, FILENAME), "w") as text_file:
print >>text_file, "First line."
first_commit = commit()
all_lines, extension_lines = push()
next_is_review_url = False
for line in all_lines:
if line == "Submitted review:":
next_is_review_url = True
elif next_is_review_url:
review_id = int(re.search(r"/r/(\d+)$", line).group(1))
break
testing.expect.check(["processcommits.js::processcommits()",
"===================================",
"r/%d" % review_id,
"%s..%s" % (base_sha1[:8], first_commit[:8]),
"%s" % first_commit[:8]],
extension_lines)
mailbox.pop(accept=[to("alice"),
about("New Review: %s" % SUMMARY)])
with open(os.path.join(work.path, FILENAME), "a") as text_file:
print >>text_file, "Second line."
second_commit = commit("Added second line")
with open(os.path.join(work.path, FILENAME), "a") as text_file:
print >>text_file, "Third line."
third_commit = commit("Added third line")
with open(os.path.join(work.path, FILENAME), "a") as text_file:
print >>text_file, "Fourth line."
fourth_commit = commit("Added fourth line")
all_lines, extension_lines = push()
testing.expect.check(["processcommits.js::processcommits()",
"===================================",
"r/%d" % review_id,
"%s..%s" % (first_commit[:8], fourth_commit[:8]),
"%s,%s,%s" % (fourth_commit[:8],
third_commit[:8],
second_commit[:8])],
extension_lines)
mailbox.pop(accept=[to("alice"),
about("Updated Review: %s" % SUMMARY)])
| StarcoderdataPython |
6614285 | import os
from vgazer.command import RunCommand
from vgazer.config.cmake import ConfigCmake
from vgazer.exceptions import CommandError
from vgazer.exceptions import InstallError
from vgazer.platform import GetArFullPath
from vgazer.platform import GetCc
from vgazer.platform import GetInstallPrefix
from vgazer.platform import GetSoPrefix
from vgazer.platform import GetSoFilename
from vgazer.store.temp import StoreTemp
from vgazer.working_dir import WorkingDir
def GetVersionFromSource(filename):
with open(filename) as f:
data = f.read()
lines = data.splitlines()
for line in lines:
if "#define SDL_GPU_VERSION_MAJOR" in line:
versionMajor = line.split(" ")[2]
if "#define SDL_GPU_VERSION_MINOR" in line:
versionMinor = line.split(" ")[2]
if "#define SDL_GPU_VERSION_PATCH" in line:
versionPatch = line.split(" ")[2]
return "{major}.{minor}.{patch}".format(major=versionMajor,
minor=versionMinor, patch=versionPatch)
def Install(auth, software, platform, platformData, mirrors, verbose):
configCmake = ConfigCmake(platformData)
configCmake.GenerateCrossFile()
installPrefix = GetInstallPrefix(platformData)
ar = GetArFullPath(platformData["target"])
cc = GetCc(platformData["target"])
os = platformData["target"].GetOs()
soPrefix = GetSoPrefix(platformData)
soFilename = GetSoFilename(platformData["target"], "SDL2_gpu")
storeTemp = StoreTemp()
storeTemp.ResolveEmptySubdirectory(software)
tempPath = storeTemp.GetSubdirectoryPath(software)
try:
with WorkingDir(tempPath):
RunCommand(
["git", "clone", "https://github.com/grimfang4/sdl-gpu.git",
"sdl2-gpu"],
verbose)
clonedDir = os.path.join(tempPath, "sdl2-gpu")
with WorkingDir(clonedDir):
RunCommand(
["sed", "-i",
"-e", '/\t\t\tlink_libraries (${GLEW_LIBRARIES})/i \t\t\tadd_definitions("-DGLEW_STATIC")',
"./CMakeLists.txt"],
verbose)
RunCommand(["mkdir", "build"], verbose)
sdlGpuHeader = os.path.join(clonedDir, "include/SDL_gpu.h")
version = GetVersionFromSource(sdlGpuHeader)
if os == "linux":
soLibname = "libSDL2_gpu.so"
installedLibPrefix = installPrefix + "/SDL_gpu-" + version + "/lib"
elif os == "windows":
soLibname = "libSDL2_gpu.dll"
installedLibPrefix = installPrefix + "/SDL_gpu-MINGW-" + version + "/lib"
buildDir = os.path.join(clonedDir, "build")
with WorkingDir(buildDir):
RunCommand(
[cc, "-c", "../src/externals/stb_image/stb_image.c",
"-o", "../src/externals/stb_image/stb_image.o", "-O2", "-Wall",
"-mmmx", "-msse", "-msse2", "-mfpmath=sse", "-fPIC",
"-I" + installPrefix + "/include"],
verbose)
RunCommand(
[ar, "rcs", "../src/externals/stb_image/libstbi.a",
"../src/externals/stb_image/stb_image.o"],
verbose)
RunCommand(
[cc, "-c", "../src/externals/stb_image_write/stb_image_write.c",
"-o", "../src/externals/stb_image_write/stb_image_write.o",
"-O2", "-Wall", "-mmmx", "-msse", "-msse2", "-mfpmath=sse",
"-fPIC", "-I" + installPrefix + "/include"],
verbose)
RunCommand(
[ar, "rcs", "../src/externals/stb_image_write/libstbi_write.a",
"../src/externals/stb_image_write/stb_image_write.o"],
verbose)
RunCommand(
[
"cmake", "..", "-G", "Unix Makefiles",
"-DCMAKE_TOOLCHAIN_FILE=" + configCmake.GetCrossFileName(),
"-DCMAKE_INSTALL_PREFIX=" + installPrefix,
"-DSDL_gpu_INSTALL=ON", "-DSDL_gpu_BUILD_DEMOS=OFF",
"-DSDL_gpu_USE_SYSTEM_GLEW=ON",
"-DSTBI_INCLUDE_DIR=" + installPrefix + "/include",
"-DSTBI_LIBRARY=" + buildDir
+ "/../src/externals/stb_image/libstbi.a",
"-DSTBI_FOUND=TRUE",
"-DSTBI_WRITE_INCLUDE_DIR=" + installPrefix + "/include",
"-DSTBI_WRITE_LIBRARY=" + buildDir
+ "/../src/externals/stb_image_write/libstbi_write.a",
"-DSTBI_WRITE_FOUND=TRUE",
"-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON", "-DCMAKE_AR=" + ar
],
verbose)
RunCommand(["make"], verbose)
RunCommand(["make", "install"], verbose)
RunCommand(
["mv", installedLibPrefix + "/" + soLibname,
soPrefix + "/" + soFilename],
verbose)
RunCommand(
["mv", installedLibPrefix + "/libSDL2_gpu.a",
installPrefix + "/lib/libSDL2_gpu.a"],
verbose)
RunCommand(
["rm", "-rf", installPrefix + "/SDL_gpu-" + version],
verbose)
except CommandError:
print("VGAZER: Unable to install", software)
raise InstallError(software + " not installed")
print("VGAZER:", software, "installed")
| StarcoderdataPython |
3494375 | import pytest
from mappgene import cli
def test_vpipe():
assert True | StarcoderdataPython |
1946340 | <filename>altgraph_tests/test_graph.py
import unittest
from altgraph import GraphError
from altgraph.Graph import Graph
class TestGraph(unittest.TestCase):
def test_nodes(self):
graph = Graph()
self.assertEqual(graph.node_list(), [])
o1 = object()
o1b = object()
o2 = object()
graph.add_node(1, o1)
graph.add_node(1, o1b)
graph.add_node(2, o2)
graph.add_node(3)
self.assertRaises(TypeError, graph.add_node, [])
self.assertTrue(graph.node_data(1) is o1)
self.assertTrue(graph.node_data(2) is o2)
self.assertTrue(graph.node_data(3) is None)
self.assertTrue(1 in graph)
self.assertTrue(2 in graph)
self.assertTrue(3 in graph)
self.assertEqual(graph.number_of_nodes(), 3)
self.assertEqual(graph.number_of_hidden_nodes(), 0)
self.assertEqual(graph.hidden_node_list(), [])
self.assertEqual(list(sorted(graph)), [1, 2, 3])
graph.hide_node(1)
graph.hide_node(2)
graph.hide_node(3)
self.assertEqual(graph.number_of_nodes(), 0)
self.assertEqual(graph.number_of_hidden_nodes(), 3)
self.assertEqual(list(sorted(graph.hidden_node_list())), [1, 2, 3])
self.assertFalse(1 in graph)
self.assertFalse(2 in graph)
self.assertFalse(3 in graph)
graph.add_node(1)
self.assertFalse(1 in graph)
graph.restore_node(1)
self.assertTrue(1 in graph)
self.assertFalse(2 in graph)
self.assertFalse(3 in graph)
graph.restore_all_nodes()
self.assertTrue(1 in graph)
self.assertTrue(2 in graph)
self.assertTrue(3 in graph)
self.assertEqual(list(sorted(graph.node_list())), [1, 2, 3])
v = graph.describe_node(1)
self.assertEqual(v, (1, o1, [], []))
def test_edges(self):
graph = Graph()
graph.add_node(1)
graph.add_node(2)
graph.add_node(3)
graph.add_node(4)
graph.add_node(5)
self.assertTrue(isinstance(graph.edge_list(), list))
graph.add_edge(1, 2)
graph.add_edge(4, 5, "a")
self.assertRaises(GraphError, graph.add_edge, "a", "b", create_nodes=False)
self.assertEqual(graph.number_of_hidden_edges(), 0)
self.assertEqual(graph.number_of_edges(), 2)
self.assertEqual(graph.hidden_edge_list(), [])
e = graph.edge_by_node(1, 2)
self.assertTrue(isinstance(e, int))
graph.hide_edge(e)
self.assertEqual(graph.number_of_hidden_edges(), 1)
self.assertEqual(graph.number_of_edges(), 1)
self.assertEqual(graph.hidden_edge_list(), [e])
e2 = graph.edge_by_node(1, 2)
self.assertTrue(e2 is None)
graph.restore_edge(e)
e2 = graph.edge_by_node(1, 2)
self.assertEqual(e, e2)
self.assertEqual(graph.number_of_hidden_edges(), 0)
self.assertEqual(graph.number_of_edges(), 2)
e1 = graph.edge_by_node(1, 2)
e2 = graph.edge_by_node(4, 5)
graph.hide_edge(e1)
graph.hide_edge(e2)
self.assertEqual(graph.number_of_edges(), 0)
graph.restore_all_edges()
self.assertEqual(graph.number_of_edges(), 2)
self.assertEqual(graph.edge_by_id(e1), (1, 2))
self.assertRaises(GraphError, graph.edge_by_id, (e1 + 1) * (e2 + 1) + 1)
self.assertEqual(list(sorted(graph.edge_list())), [e1, e2])
self.assertEqual(graph.describe_edge(e1), (e1, 1, 1, 2))
self.assertEqual(graph.describe_edge(e2), (e2, "a", 4, 5))
self.assertEqual(graph.edge_data(e1), 1)
self.assertEqual(graph.edge_data(e2), "a")
self.assertEqual(graph.head(e2), 4)
self.assertEqual(graph.tail(e2), 5)
graph.add_edge(1, 3)
graph.add_edge(1, 5)
graph.add_edge(4, 1)
self.assertEqual(list(sorted(graph.out_nbrs(1))), [2, 3, 5])
self.assertEqual(list(sorted(graph.inc_nbrs(1))), [4])
self.assertEqual(list(sorted(graph.inc_nbrs(5))), [1, 4])
self.assertEqual(list(sorted(graph.all_nbrs(1))), [2, 3, 4, 5])
graph.add_edge(5, 1)
self.assertEqual(list(sorted(graph.all_nbrs(5))), [1, 4])
self.assertEqual(graph.out_degree(1), 3)
self.assertEqual(graph.inc_degree(2), 1)
self.assertEqual(graph.inc_degree(5), 2)
self.assertEqual(graph.all_degree(5), 3)
v = graph.out_edges(4)
self.assertTrue(isinstance(v, list))
self.assertEqual(graph.edge_by_id(v[0]), (4, 5))
v = graph.out_edges(1)
for e in v:
self.assertEqual(graph.edge_by_id(e)[0], 1)
v = graph.inc_edges(1)
self.assertTrue(isinstance(v, list))
self.assertEqual(graph.edge_by_id(v[0]), (4, 1))
v = graph.inc_edges(5)
for e in v:
self.assertEqual(graph.edge_by_id(e)[1], 5)
v = graph.all_edges(5)
for e in v:
self.assertTrue(graph.edge_by_id(e)[1] == 5 or graph.edge_by_id(e)[0] == 5)
e1 = graph.edge_by_node(1, 2)
self.assertTrue(isinstance(e1, int))
graph.hide_node(1)
self.assertRaises(GraphError, graph.edge_by_node, 1, 2)
graph.restore_node(1)
e2 = graph.edge_by_node(1, 2)
self.assertEqual(e1, e2)
self.assertRaises(GraphError, graph.hide_edge, "foo")
self.assertRaises(GraphError, graph.hide_node, "foo")
self.assertRaises(GraphError, graph.inc_edges, "foo")
self.assertEqual(repr(graph), "<Graph: 5 nodes, 6 edges>")
def test_toposort(self):
graph = Graph()
graph.add_node(1)
graph.add_node(2)
graph.add_node(3)
graph.add_node(4)
graph.add_node(5)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 4)
graph.add_edge(3, 5)
ok, result = graph.forw_topo_sort()
self.assertTrue(ok)
for idx in range(1, 6):
self.assertTrue(idx in result)
self.assertTrue(result.index(1) < result.index(2))
self.assertTrue(result.index(1) < result.index(3))
self.assertTrue(result.index(2) < result.index(4))
self.assertTrue(result.index(3) < result.index(5))
ok, result = graph.back_topo_sort()
self.assertTrue(ok)
for idx in range(1, 6):
self.assertTrue(idx in result)
self.assertTrue(result.index(2) < result.index(1))
self.assertTrue(result.index(3) < result.index(1))
self.assertTrue(result.index(4) < result.index(2))
self.assertTrue(result.index(5) < result.index(3))
# Same graph as before, but with edges
# reversed, which means we should get
# the same results as before if using
# back_topo_sort rather than forw_topo_sort
# (and v.v.)
graph = Graph()
graph.add_node(1)
graph.add_node(2)
graph.add_node(3)
graph.add_node(4)
graph.add_node(5)
graph.add_edge(2, 1)
graph.add_edge(3, 1)
graph.add_edge(4, 2)
graph.add_edge(5, 3)
ok, result = graph.back_topo_sort()
self.assertTrue(ok)
for idx in range(1, 6):
self.assertTrue(idx in result)
self.assertTrue(result.index(1) < result.index(2))
self.assertTrue(result.index(1) < result.index(3))
self.assertTrue(result.index(2) < result.index(4))
self.assertTrue(result.index(3) < result.index(5))
ok, result = graph.forw_topo_sort()
self.assertTrue(ok)
for idx in range(1, 6):
self.assertTrue(idx in result)
self.assertTrue(result.index(2) < result.index(1))
self.assertTrue(result.index(3) < result.index(1))
self.assertTrue(result.index(4) < result.index(2))
self.assertTrue(result.index(5) < result.index(3))
# Create a cycle
graph.add_edge(1, 5)
ok, result = graph.forw_topo_sort()
self.assertFalse(ok)
ok, result = graph.back_topo_sort()
self.assertFalse(ok)
def test_bfs_subgraph(self):
graph = Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 4)
graph.add_edge(2, 4)
graph.add_edge(4, 8)
graph.add_edge(4, 9)
graph.add_edge(4, 10)
graph.add_edge(8, 10)
subgraph = graph.forw_bfs_subgraph(10)
self.assertTrue(isinstance(subgraph, Graph))
self.assertEqual(subgraph.number_of_nodes(), 1)
self.assertTrue(10 in subgraph)
self.assertEqual(subgraph.number_of_edges(), 0)
subgraph = graph.forw_bfs_subgraph(4)
self.assertTrue(isinstance(subgraph, Graph))
self.assertEqual(subgraph.number_of_nodes(), 4)
self.assertTrue(4 in subgraph)
self.assertTrue(8 in subgraph)
self.assertTrue(9 in subgraph)
self.assertTrue(10 in subgraph)
self.assertEqual(subgraph.number_of_edges(), 4)
e = subgraph.edge_by_node(4, 8)
e = subgraph.edge_by_node(4, 9)
e = subgraph.edge_by_node(4, 10)
e = subgraph.edge_by_node(8, 10)
# same graph as before, but switch around
# edges. This results in the same test results
# but now for back_bfs_subgraph rather than
# forw_bfs_subgraph
graph = Graph()
graph.add_edge(2, 1)
graph.add_edge(4, 1)
graph.add_edge(4, 2)
graph.add_edge(8, 4)
graph.add_edge(9, 4)
graph.add_edge(10, 4)
graph.add_edge(10, 8)
subgraph = graph.back_bfs_subgraph(10)
self.assertTrue(isinstance(subgraph, Graph))
self.assertEqual(subgraph.number_of_nodes(), 1)
self.assertTrue(10 in subgraph)
self.assertEqual(subgraph.number_of_edges(), 0)
subgraph = graph.back_bfs_subgraph(4)
self.assertTrue(isinstance(subgraph, Graph))
self.assertEqual(subgraph.number_of_nodes(), 4)
self.assertTrue(4 in subgraph)
self.assertTrue(8 in subgraph)
self.assertTrue(9 in subgraph)
self.assertTrue(10 in subgraph)
self.assertEqual(subgraph.number_of_edges(), 4)
e = subgraph.edge_by_node(4, 8)
e = subgraph.edge_by_node(4, 9)
e = subgraph.edge_by_node(4, 10)
e = subgraph.edge_by_node(8, 10)
def test_bfs_subgraph_does_not_reverse_egde_direction(self):
graph = Graph()
graph.add_node("A")
graph.add_node("B")
graph.add_node("C")
graph.add_edge("A", "B")
graph.add_edge("B", "C")
whole_graph = graph.forw_topo_sort()
subgraph_backward = graph.back_bfs_subgraph("C")
subgraph_backward = subgraph_backward.forw_topo_sort()
self.assertEqual(whole_graph, subgraph_backward)
subgraph_forward = graph.forw_bfs_subgraph("A")
subgraph_forward = subgraph_forward.forw_topo_sort()
self.assertEqual(whole_graph, subgraph_forward)
def test_iterdfs(self):
graph = Graph()
graph.add_edge("1", "1.1")
graph.add_edge("1", "1.2")
graph.add_edge("1", "1.3")
graph.add_edge("1.1", "1.1.1")
graph.add_edge("1.1", "1.1.2")
graph.add_edge("1.2", "1.2.1")
graph.add_edge("1.2", "1.2.2")
graph.add_edge("1.2.2", "1.2.2.1")
graph.add_edge("1.2.2", "1.2.2.2")
graph.add_edge("1.2.2", "1.2.2.3")
result = list(graph.iterdfs("1"))
self.assertEqual(
result,
[
"1",
"1.3",
"1.2",
"1.2.2",
"1.2.2.3",
"1.2.2.2",
"1.2.2.1",
"1.2.1",
"1.1",
"1.1.2",
"1.1.1",
],
)
result = list(graph.iterdfs("1", "1.2.1"))
self.assertEqual(
result,
["1", "1.3", "1.2", "1.2.2", "1.2.2.3", "1.2.2.2", "1.2.2.1", "1.2.1"],
)
result = graph.forw_dfs("1")
self.assertEqual(
result,
[
"1",
"1.3",
"1.2",
"1.2.2",
"1.2.2.3",
"1.2.2.2",
"1.2.2.1",
"1.2.1",
"1.1",
"1.1.2",
"1.1.1",
],
)
result = graph.forw_dfs("1", "1.2.1")
self.assertEqual(
result,
["1", "1.3", "1.2", "1.2.2", "1.2.2.3", "1.2.2.2", "1.2.2.1", "1.2.1"],
)
graph = Graph()
graph.add_edge("1.1", "1")
graph.add_edge("1.2", "1")
graph.add_edge("1.3", "1")
graph.add_edge("1.1.1", "1.1")
graph.add_edge("1.1.2", "1.1")
graph.add_edge("1.2.1", "1.2")
graph.add_edge("1.2.2", "1.2")
graph.add_edge("1.2.2.1", "1.2.2")
graph.add_edge("1.2.2.2", "1.2.2")
graph.add_edge("1.2.2.3", "1.2.2")
result = list(graph.iterdfs("1", forward=False))
self.assertEqual(
result,
[
"1",
"1.3",
"1.2",
"1.2.2",
"1.2.2.3",
"1.2.2.2",
"1.2.2.1",
"1.2.1",
"1.1",
"1.1.2",
"1.1.1",
],
)
result = list(graph.iterdfs("1", "1.2.1", forward=False))
self.assertEqual(
result,
["1", "1.3", "1.2", "1.2.2", "1.2.2.3", "1.2.2.2", "1.2.2.1", "1.2.1"],
)
result = graph.back_dfs("1")
self.assertEqual(
result,
[
"1",
"1.3",
"1.2",
"1.2.2",
"1.2.2.3",
"1.2.2.2",
"1.2.2.1",
"1.2.1",
"1.1",
"1.1.2",
"1.1.1",
],
)
result = graph.back_dfs("1", "1.2.1")
self.assertEqual(
result,
["1", "1.3", "1.2", "1.2.2", "1.2.2.3", "1.2.2.2", "1.2.2.1", "1.2.1"],
)
# Introduce cyle:
graph.add_edge("1", "1.2")
result = list(graph.iterdfs("1", forward=False))
self.assertEqual(
result,
[
"1",
"1.3",
"1.2",
"1.2.2",
"1.2.2.3",
"1.2.2.2",
"1.2.2.1",
"1.2.1",
"1.1",
"1.1.2",
"1.1.1",
],
)
result = graph.back_dfs("1")
self.assertEqual(
result,
[
"1",
"1.3",
"1.2",
"1.2.2",
"1.2.2.3",
"1.2.2.2",
"1.2.2.1",
"1.2.1",
"1.1",
"1.1.2",
"1.1.1",
],
)
def test_iterdata(self):
graph = Graph()
graph.add_node("1", "I")
graph.add_node("1.1", "I.I")
graph.add_node("1.2", "I.II")
graph.add_node("1.3", "I.III")
graph.add_node("1.1.1", "I.I.I")
graph.add_node("1.1.2", "I.I.II")
graph.add_node("1.2.1", "I.II.I")
graph.add_node("1.2.2", "I.II.II")
graph.add_node("1.2.2.1", "I.II.II.I")
graph.add_node("1.2.2.2", "I.II.II.II")
graph.add_node("1.2.2.3", "I.II.II.III")
graph.add_edge("1", "1.1")
graph.add_edge("1", "1.2")
graph.add_edge("1", "1.3")
graph.add_edge("1.1", "1.1.1")
graph.add_edge("1.1", "1.1.2")
graph.add_edge("1.2", "1.2.1")
graph.add_edge("1.2", "1.2.2")
graph.add_edge("1.2.2", "1.2.2.1")
graph.add_edge("1.2.2", "1.2.2.2")
graph.add_edge("1.2.2", "1.2.2.3")
result = list(graph.iterdata("1", forward=True))
self.assertEqual(
result,
[
"I",
"I.III",
"I.II",
"I.II.II",
"I.II.II.III",
"I.II.II.II",
"I.II.II.I",
"I.II.I",
"I.I",
"I.I.II",
"I.I.I",
],
)
result = list(graph.iterdata("1", end="1.2.1", forward=True))
self.assertEqual(
result,
[
"I",
"I.III",
"I.II",
"I.II.II",
"I.II.II.III",
"I.II.II.II",
"I.II.II.I",
"I.II.I",
],
)
result = list(graph.iterdata("1", condition=lambda n: len(n) < 6, forward=True))
self.assertEqual(result, ["I", "I.III", "I.II", "I.I", "I.I.I"])
# And the revese option:
graph = Graph()
graph.add_node("1", "I")
graph.add_node("1.1", "I.I")
graph.add_node("1.2", "I.II")
graph.add_node("1.3", "I.III")
graph.add_node("1.1.1", "I.I.I")
graph.add_node("1.1.2", "I.I.II")
graph.add_node("1.2.1", "I.II.I")
graph.add_node("1.2.2", "I.II.II")
graph.add_node("1.2.2.1", "I.II.II.I")
graph.add_node("1.2.2.2", "I.II.II.II")
graph.add_node("1.2.2.3", "I.II.II.III")
graph.add_edge("1.1", "1")
graph.add_edge("1.2", "1")
graph.add_edge("1.3", "1")
graph.add_edge("1.1.1", "1.1")
graph.add_edge("1.1.2", "1.1")
graph.add_edge("1.2.1", "1.2")
graph.add_edge("1.2.2", "1.2")
graph.add_edge("1.2.2.1", "1.2.2")
graph.add_edge("1.2.2.2", "1.2.2")
graph.add_edge("1.2.2.3", "1.2.2")
result = list(graph.iterdata("1", forward=False))
self.assertEqual(
result,
[
"I",
"I.III",
"I.II",
"I.II.II",
"I.II.II.III",
"I.II.II.II",
"I.II.II.I",
"I.II.I",
"I.I",
"I.I.II",
"I.I.I",
],
)
result = list(graph.iterdata("1", end="1.2.1", forward=False))
self.assertEqual(
result,
[
"I",
"I.III",
"I.II",
"I.II.II",
"I.II.II.III",
"I.II.II.II",
"I.II.II.I",
"I.II.I",
],
)
result = list(
graph.iterdata("1", condition=lambda n: len(n) < 6, forward=False)
)
self.assertEqual(result, ["I", "I.III", "I.II", "I.I", "I.I.I"])
def test_bfs(self):
graph = Graph()
graph.add_edge("1", "1.1")
graph.add_edge("1.1", "1.1.1")
graph.add_edge("1.1", "1.1.2")
graph.add_edge("1.1.2", "1.1.2.1")
graph.add_edge("1.1.2", "1.1.2.2")
graph.add_edge("1", "1.2")
graph.add_edge("1", "1.3")
graph.add_edge("1.2", "1.2.1")
self.assertEqual(
graph.forw_bfs("1"),
["1", "1.1", "1.2", "1.3", "1.1.1", "1.1.2", "1.2.1", "1.1.2.1", "1.1.2.2"],
)
self.assertEqual(
graph.forw_bfs("1", "1.1.1"), ["1", "1.1", "1.2", "1.3", "1.1.1"]
)
# And the "reverse" graph
graph = Graph()
graph.add_edge("1.1", "1")
graph.add_edge("1.1.1", "1.1")
graph.add_edge("1.1.2", "1.1")
graph.add_edge("1.1.2.1", "1.1.2")
graph.add_edge("1.1.2.2", "1.1.2")
graph.add_edge("1.2", "1")
graph.add_edge("1.3", "1")
graph.add_edge("1.2.1", "1.2")
self.assertEqual(
graph.back_bfs("1"),
["1", "1.1", "1.2", "1.3", "1.1.1", "1.1.2", "1.2.1", "1.1.2.1", "1.1.2.2"],
)
self.assertEqual(
graph.back_bfs("1", "1.1.1"), ["1", "1.1", "1.2", "1.3", "1.1.1"]
)
# check cycle handling
graph.add_edge("1", "1.2.1")
self.assertEqual(
graph.back_bfs("1"),
["1", "1.1", "1.2", "1.3", "1.1.1", "1.1.2", "1.2.1", "1.1.2.1", "1.1.2.2"],
)
def test_connected(self):
graph = Graph()
graph.add_node(1)
graph.add_node(2)
graph.add_node(3)
graph.add_node(4)
self.assertFalse(graph.connected())
graph.add_edge(1, 2)
graph.add_edge(3, 4)
self.assertFalse(graph.connected())
graph.add_edge(2, 3)
graph.add_edge(4, 1)
self.assertTrue(graph.connected())
def test_edges_complex(self):
g = Graph()
g.add_edge(1, 2)
e = g.edge_by_node(1, 2)
g.hide_edge(e)
g.hide_node(2)
self.assertRaises(GraphError, g.restore_edge, e)
g.restore_all_edges()
self.assertRaises(GraphError, g.edge_by_id, e)
def test_clust_coef(self):
g = Graph()
g.add_edge(1, 2)
g.add_edge(1, 3)
g.add_edge(1, 4)
self.assertEqual(g.clust_coef(1), 0)
g.add_edge(2, 5)
g.add_edge(3, 5)
g.add_edge(4, 5)
self.assertEqual(g.clust_coef(1), 0)
g.add_edge(2, 3)
self.assertEqual(g.clust_coef(1), 1.0 / 6)
g.add_edge(2, 4)
self.assertEqual(g.clust_coef(1), 2.0 / 6)
g.add_edge(4, 2)
self.assertEqual(g.clust_coef(1), 3.0 / 6)
g.add_edge(2, 3)
g.add_edge(2, 4)
g.add_edge(3, 4)
g.add_edge(3, 2)
g.add_edge(4, 2)
g.add_edge(4, 3)
self.assertEqual(g.clust_coef(1), 1)
g.add_edge(1, 1)
self.assertEqual(g.clust_coef(1), 1)
g.add_edge(2, 2)
self.assertEqual(g.clust_coef(1), 1)
g.add_edge(99, 99)
self.assertEqual(g.clust_coef(99), 0.0)
def test_get_hops(self):
graph = Graph()
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 4)
graph.add_edge(4, 5)
graph.add_edge(5, 7)
graph.add_edge(7, 8)
self.assertEqual(
graph.get_hops(1), [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
)
self.assertEqual(graph.get_hops(1, 5), [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3)])
graph.add_edge(5, 1)
graph.add_edge(7, 1)
graph.add_edge(7, 4)
self.assertEqual(
graph.get_hops(1), [(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
)
# And the reverse graph
graph = Graph()
graph.add_edge(2, 1)
graph.add_edge(3, 1)
graph.add_edge(4, 2)
graph.add_edge(5, 4)
graph.add_edge(7, 5)
graph.add_edge(8, 7)
self.assertEqual(
graph.get_hops(1, forward=False),
[(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)],
)
self.assertEqual(
graph.get_hops(1, 5, forward=False),
[(1, 0), (2, 1), (3, 1), (4, 2), (5, 3)],
)
graph.add_edge(1, 5)
graph.add_edge(1, 7)
graph.add_edge(4, 7)
self.assertEqual(
graph.get_hops(1, forward=False),
[(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)],
)
def test_constructor(self):
graph = Graph(iter([(1, 2), (2, 3, "a"), (1, 3), (3, 4)]))
self.assertEqual(graph.number_of_nodes(), 4)
self.assertEqual(graph.number_of_edges(), 4)
try:
graph.edge_by_node(1, 2)
graph.edge_by_node(2, 3)
graph.edge_by_node(1, 3)
graph.edge_by_node(3, 4)
except GraphError:
self.fail("Incorrect graph")
self.assertEqual(graph.edge_data(graph.edge_by_node(2, 3)), "a")
self.assertRaises(GraphError, Graph, [(1, 2, 3, 4)])
if __name__ == "__main__": # pragma: no cover
unittest.main()
| StarcoderdataPython |
4917609 | ''' probables utilitites tests '''
from __future__ import (unicode_literals, absolute_import, print_function)
import unittest
import os
from probables.utilities import (is_hex_string, is_valid_file, get_x_bits)
from . utilities import (different_hash)
class TestProbablesUtilities(unittest.TestCase):
''' test the utilities for pyprobables '''
def test_is_hex(self):
''' test the is valid hex function '''
self.assertTrue(is_hex_string('1234678909abcdef'))
self.assertTrue(is_hex_string('1234678909ABCDEF'))
self.assertFalse(is_hex_string('1234678909abcdfq'))
self.assertFalse(is_hex_string('1234678909ABCDEFQ'))
def test_is_valid_file(self):
''' test the is valid file function '''
self.assertFalse(is_valid_file(None))
self.assertFalse(is_valid_file('./file_doesnt_exist.txt'))
filename = './create_this_file.txt'
with open(filename, 'w'):
pass
self.assertTrue(is_valid_file(filename))
os.remove(filename)
def test_get_x_bits(self):
''' test the get x bits function '''
for i in range(8):
res = get_x_bits(i, 4, 2, True)
self.assertEqual(res, i % 4)
for i in range(8):
res = get_x_bits(i, 4, 2, False)
if i < 4:
self.assertEqual(res, 0)
else:
self.assertEqual(res, 1)
def test_get_x_bits_large(self):
''' test it on much larger numbers '''
res = different_hash('this is a test', 1)[0]
# 1010100101011011100100010101010011110000001010011010000101001011
tmp1 = get_x_bits(res, 64, 32, True)
tmp2 = get_x_bits(res, 64, 32, False)
self.assertEqual(4029260107, tmp1)
self.assertEqual(2841350484, tmp2)
tmp1 = get_x_bits(res, 64, 16, True)
tmp2 = get_x_bits(res, 64, 16, False)
self.assertEqual(41291, tmp1)
self.assertEqual(43355, tmp2)
tmp1 = get_x_bits(res, 64, 8, True)
tmp2 = get_x_bits(res, 64, 8, False)
self.assertEqual(75, tmp1)
self.assertEqual(169, tmp2)
tmp1 = get_x_bits(res, 64, 4, True)
tmp2 = get_x_bits(res, 64, 4, False)
self.assertEqual(11, tmp1)
self.assertEqual(10, tmp2)
tmp1 = get_x_bits(res, 64, 2, True)
tmp2 = get_x_bits(res, 64, 2, False)
self.assertEqual(3, tmp1)
self.assertEqual(2, tmp2)
tmp1 = get_x_bits(res, 64, 1, True)
tmp2 = get_x_bits(res, 64, 1, False)
self.assertEqual(1, tmp1)
self.assertEqual(1, tmp2)
| StarcoderdataPython |
9783131 | <reponame>Thanaporn09/Cancer_self_transformer
# Copyright (c) OpenMMLab. All rights reserved.
import math
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_activation_layer
from mmcv.cnn.utils.weight_init import trunc_normal_
from mmcv.runner import Sequential
from ..builder import HEADS
from .cls_head import ClsHead
@HEADS.register_module()
class VisionTransformerClsHead(ClsHead):
"""Vision Transformer classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
hidden_dim (int): Number of the dimensions for hidden layer. Only
available during pre-training. Default None.
act_cfg (dict): The activation config. Only available during
pre-training. Defaults to Tanh.
"""
def __init__(self,
num_classes,
in_channels,
hidden_dim=None,
act_cfg=dict(type='Tanh'),
init_cfg=dict(type='Constant', layer='Linear', val=0),
*args,
**kwargs):
super(VisionTransformerClsHead, self).__init__(
init_cfg=init_cfg, *args, **kwargs)
self.in_channels = in_channels
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.act_cfg = act_cfg
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self._init_layers()
def _init_layers(self):
if self.hidden_dim is None:
layers = [('head', nn.Linear(self.in_channels, self.num_classes))]
else:
layers = [
('pre_logits', nn.Linear(self.in_channels, self.hidden_dim)),
('act', build_activation_layer(self.act_cfg)),
('head', nn.Linear(self.hidden_dim, self.num_classes)),
]
self.layers = Sequential(OrderedDict(layers))
def init_weights(self):
super(VisionTransformerClsHead, self).init_weights()
# Modified from ClassyVision
if hasattr(self.layers, 'pre_logits'):
# Lecun norm
trunc_normal_(
self.layers.pre_logits.weight,
std=math.sqrt(1 / self.layers.pre_logits.in_features))
nn.init.zeros_(self.layers.pre_logits.bias)
def pre_logits(self, x):
if isinstance(x, tuple):
x = x[-1]
_, cls_token = x
print(cls_token.shape)
if self.hidden_dim is None:
return cls_token
else:
x = self.layers.pre_logits(cls_token)
return self.layers.act(x)
def simple_test(self, x, softmax=True, post_process=True):
"""Inference without augmentation.
Args:
x (tuple[tuple[tensor, tensor]]): The input features.
Multi-stage inputs are acceptable but only the last stage will
be used to classify. Every item should be a tuple which
includes patch token and cls token. The cls token will be used
to classify and the shape of it should be
``(num_samples, in_channels)``.
softmax (bool): Whether to softmax the classification score.
post_process (bool): Whether to do post processing the
inference results. It will convert the output to a list.
Returns:
Tensor | list: The inference results.
- If no post processing, the output is a tensor with shape
``(num_samples, num_classes)``.
- If post processing, the output is a multi-dimentional list of
float and the dimensions are ``(num_samples, num_classes)``.
"""
x = self.pre_logits(x)
cls_score = self.layers.head(x)
if softmax:
pred = (
F.softmax(cls_score, dim=1) if cls_score is not None else None)
else:
pred = cls_score
if post_process:
return self.post_process(pred)
else:
return pred
def forward_train(self, x, gt_label, **kwargs):
x = self.pre_logits(x)
cls_score = self.layers.head(x)
losses = self.loss(cls_score, gt_label, **kwargs)
return losses
| StarcoderdataPython |
6490707 | <filename>testdata/simple.py<gh_stars>100-1000
from foo import bar
def f(self, a, b):
print('hello!')
| StarcoderdataPython |
37284 | <reponame>wangyibin/biowy
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time: 2019/1/8 16:41
from bioway.apps.base import dmain
if __name__ == "__main__":
dmain() | StarcoderdataPython |
3500323 | """Linked List Cycle II
https://www.lintcode.com/problem/linked-list-cycle-ii/description
"""
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@return: The node where the cycle begins. if there is no cycle, return null
"""
def detectCycle(self, head):
# write your code here
fast = slow = head
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
break
else:
return None
while head != slow:
head = head.next
slow = slow.next
return head
| StarcoderdataPython |
212831 | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: <NAME>
# Date: 2016-09-29 15:00:36 +0100 (Thu, 29 Sep 2016)
#
# https://github.com/harisekhon/devops-python-tools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help steer this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
r"""
Tool to return the first available healthy server or active master from a given list
Configurable test criteria: TCP socket, HTTP, HTTPS, Ping, URL with optional Regex content match.
Can mix and match between a comma separated list of hosts (--host server1,server2,server3... or contents of the $HOST
environment variable if not specified) and general free-form space separated arguments, which is useful if piping
a host list through xargs.
Multi-threaded for speed and exits upon first available host response to minimize delay to ~ 1 second or less.
Useful for pre-determining a server to be passed to tools that only take a single --host argument but for which the
technology has later added multi-master support or active-standby masters (eg. Hadoop, HBase) or where you want to
query cluster wide information available from any online peer (eg. Elasticsearch).
Examples:
Return first web server to respond:
cat host_list.txt | xargs ./find_active_server.py --http
More specific examples follow, and they have more specialised subclassed programs in each example so you don't need
to use all the switches from find_active_server.py
Target a Nagios Plugin to first available cluster node, eg. Elasticsearch check from Advanced Nagios Plugins Collection:
./check_elasticsearch_cluster_status.pl --host $(./find_active_server.py -v --http --port 9200 node1 node2 node3)
./find_active_elasticsearch_node.py node1 node2 node3
Find a SolrCloud node:
./find_active_server.py --http --url /solr/ --regex 'Solr Admin' node1 node2
./find_active_solrcloud_node.py node1 node2
Find the active Hadoop NameNode in a High Availability cluster:
./find_active_server.py --http --port 50070 \
--url 'jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus' \
--regex '"State"\s*:\s*"active"' \
namenode1 namenode2
./find_active_hadoop_namenode.py namenode1 namenode2
Find the active Hadoop Yarn Resource Manager in a High Availability cluster:
./find_active_server.py --http --port 8088 \
--url /ws/v1/cluster \
--regex '"haState"\s*:\s*"ACTIVE"' \
resourcemanager1 resourcemanager2
./find_active_hadoop_yarn_resource_manager.py resourcemanager1 resourcemanager2
Find the active HBase Master in a High Availability cluster:
./find_active_server.py --http --port 16010 \
--url '/jmx?qry=Hadoop:service=HBase,name=Master,sub=Server' \
--regex '"tag.isActiveMaster" : "true"' \
hmaster1 hmaster2
./find_active_hbase_master.py hmaster1 hmaster2
By default checks the same --port on all servers. Hosts may have optional :<port> suffixes added to individually
override each one.
Exits with return code 1 and NO_AVAILABLE_SERVER if none of the supplied servers pass the test criteria,
--quiet mode will return blank output and exit code 1 in that case.
See also Advanced HAProxy configurations (part of the Advanced Nagios Plugins Collection) at:
https://github.com/harisekhon/haproxy-configs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import os
import platform
import re
import socket
import subprocess
import sys
#from threading import Thread
from multiprocessing.pool import ThreadPool
from multiprocessing import cpu_count
# prefer blocking semantics of que.get() rather than handling deque.popleft() => 'IndexError: pop from an empty deque'
#from collections import deque
import traceback
from random import shuffle
# Python 2 Queue vs Python 3 queue module :-/
if sys.version[0] == '2':
import Queue as queue # pylint: disable=import-error
else:
import queue as queue # pylint: disable=import-error
try:
# false positive from pylint, queue is imported first
import requests # pylint: disable=wrong-import-order
except ImportError:
print(traceback.format_exc(), end='')
sys.exit(4)
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, log_option, die, code_error, uniq_list_ordered
from harisekhon.utils import validate_hostport_list, validate_port, validate_int, validate_regex
from harisekhon.utils import isPort, isInt, isStr, isTuple, UnknownError
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = '<NAME>'
__version__ = '0.8.6'
class FindActiveServer(CLI):
def __init__(self):
# Python 2.x
super(FindActiveServer, self).__init__()
# Python 3.x
# super().__init__()
self.host_list = []
self.default_port = 80
self.port = self.default_port
self.protocol = None
self.url_path = None
self.regex = None
self.request_timeout = None
self.default_num_threads = min(cpu_count() * 4, 100)
self.num_threads = None
self.queue = queue.Queue()
self.pool = None
def add_options(self):
self.add_hostoption(name='', default_port=self.default_port)
self.add_opt('-p', '--ping', action='store_true', help='Ping the server only, no socket connection')
self.add_opt('-w', '--http', action='store_true',
help='Fetch web page over HTTP protocol instead of doing a socket test')
self.add_opt('-s', '--https', action='store_true',
help='Fetch web page over HTTPS protocol instead of doing a socket test ' +
'(overrides --http, changes port 80 to 443)')
self.add_opt('-u', '--url', help='URL path to fetch (implies --http)')
self.add_opt('-r', '--regex',
help='Regex to search for in http content (optional). Case sensitive by default ' + \
'for better targeting, wrap with (?i:...) modifier for case insensitivity')
self.add_common_opts()
# only here for subclassed programs convenience
def add_ssl_opt(self):
self.add_opt('-S', '--ssl', action='store_true', help='Use SSL')
def add_common_opts(self):
if self.is_option_defined('ssl'):
if self.get_opt('ssl'):
self.protocol = 'https'
log_option('SSL', 'true')
else:
log_option('SSL', 'false')
self.add_opt('-q', '--quiet', action='store_true', help='Returns no output instead of NO_AVAILABLE_SERVER '\
+ '(convenience for scripting)')
self.add_opt('-n', '--num-threads', default=self.default_num_threads, type='int',
help='Number or parallel threads to speed up processing ' + \
'(default is 4 times number of cores: {}), '.format(self.default_num_threads) + \
'use -n=1 for deterministic host preference order [slower])')
self.add_opt('-T', '--request-timeout', metavar='secs', type='int', default=os.getenv('REQUEST_TIMEOUT', 2),
help='Timeout for each individual server request in seconds ($REQUEST_TIMEOUT, default: 2 secs)')
self.add_opt('-R', '--random', action='store_true', help='Randomize order of hosts tested ' +
'(for use with --num-threads=1)')
def process_options(self):
self.validate_common_opts()
def validate_common_opts(self):
hosts = self.get_opt('host')
self.port = self.get_opt('port')
if hosts:
self.host_list = [host.strip() for host in hosts.split(',') if host]
self.host_list += self.args
self.host_list = uniq_list_ordered(self.host_list)
if not self.host_list:
self.usage('no hosts specified')
validate_hostport_list(self.host_list, port_optional=True)
validate_port(self.port)
self.port = int(self.port)
self.validate_protocol_opts()
self.validate_misc_opts()
def validate_protocol_opts(self):
if self.is_option_defined('https') and self.get_opt('https'):
self.protocol = 'https'
# optparse returns string, even though default we gave from __init__ was int
# comparison would fail without this cast
if str(self.port) == '80':
log.info('overriding port 80 => 443 for https')
self.port = 443
elif self.is_option_defined('http') and self.get_opt('http'):
self.protocol = 'http'
if not self.port:
self.port = 80
if self.is_option_defined('url') and self.get_opt('url'):
self.url_path = self.get_opt('url')
if self.url_path:
if self.protocol is None:
self.protocol = 'http'
elif self.protocol == 'ping':
self.usage('cannot specify --url-path with --ping, mutually exclusive options!')
if self.is_option_defined('ping') and self.get_opt('ping'):
if self.protocol:
self.usage('cannot specify --ping with --http / --https, mutually exclusive tests!')
elif self.port != self.default_port:
self.usage('cannot specify --port with --ping, mutually exclusive options!')
self.protocol = 'ping'
if self.protocol and self.protocol not in ('http', 'https', 'ping'):
code_error('invalid protocol, must be one of http / https / ping')
def validate_misc_opts(self):
if self.is_option_defined('regex') and self.get_opt('regex'):
self.regex = self.get_opt('regex')
if self.regex:
if not self.protocol:
self.usage('--regex cannot be used without --http / --https')
validate_regex(self.regex)
self.regex = re.compile(self.regex)
self.num_threads = self.get_opt('num_threads')
validate_int(self.num_threads, 'num threads', 1, 100)
self.num_threads = int(self.num_threads)
self.request_timeout = self.get_opt('request_timeout')
validate_int(self.request_timeout, 'request timeout', 1, 60)
self.request_timeout = int(self.request_timeout)
if self.get_opt('random'):
log_option('random', True)
shuffle(self.host_list)
def run(self):
self.pool = ThreadPool(processes=self.num_threads)
if self.protocol in ('http', 'https'):
for host in self.host_list:
(host, port) = self.port_override(host)
#if self.check_http(host, port, self.url_path):
# self.finish(host, port)
self.launch_thread(self.check_http, host, port, self.url_path)
elif self.protocol == 'ping':
for host in self.host_list:
# this strips the :port from host
(host, port) = self.port_override(host)
#if self.check_ping(host):
# self.finish(host)
self.launch_thread(self.check_ping, host, 1, self.request_timeout)
else:
for host in self.host_list:
(host, port) = self.port_override(host)
#if self.check_socket(host, port):
# self.finish(host, port)
self.launch_thread(self.check_socket, host, port)
self.collect_results()
if not self.get_opt('quiet'):
print('NO_AVAILABLE_SERVER')
sys.exit(1)
def launch_thread(self, func, *args):
# works but no tunable concurrency
#_ = Thread(target=lambda q, arg1: q.put(self.check_ping(arg1)), args=(que, host))
#_ = Thread(target=lambda: que.put(self.check_ping(host)))
#_.daemon = True
#_.start()
#if self.num_threads == 1:
# _.join()
#
# blocks and prevents concurrency, use que instead
#async_result = pool.apply_async(self.check_ping, (host,))
#return_val = async_result.get()
#
self.pool.apply_async(lambda *args: self.queue.put(func(*args)), args)
def collect_results(self):
return_val = None
for _ in self.host_list:
return_val = self.queue.get()
if return_val:
break
if return_val:
if isTuple(return_val):
self.finish(*return_val)
elif isStr(return_val):
self.finish(return_val)
else:
code_error('collect_results() found non-tuple / non-string on queue')
def port_override(self, host):
port = self.port
if ':' in host:
parts = host.split(':')
if len(parts) == 2:
port = parts[1]
if not isPort(port):
die('error in host definition, not a valid port number: \'{0}\''.format(host))
else:
die('error in host definition, contains more than one colon: \'{0}\''.format(host))
host = parts[0]
return (host, port)
def finish(self, host, port=None):
print(host, end='')
if port is not None and port != self.port:
print(':{0}'.format(port), end='')
print()
sys.exit(0)
@staticmethod
def check_ping(host, count=None, wait=None):
if count is None:
count = 1
if wait is None:
wait = 3
if not isInt(count):
raise UnknownError("passed invalid count '{0}' to check_ping method, must be a valid integer!"\
.format(count))
if not isInt(wait):
raise UnknownError("passed invalid wait '{0}' to check_ping method, must be a valid integer!"\
.format(wait))
log.info("pinging host '%s' (count=%s, wait=%s)", host, count, wait)
count_switch = '-c'
if platform.system().lower() == 'windows':
count_switch = '-n'
wait_switch = '-w'
if platform.system().lower() == 'darwin':
wait_switch = '-W'
# causes hang if count / wait are not cast to string
cmd = ['ping', count_switch, '{0}'.format(count), wait_switch, '{0}'.format(wait), host]
log.debug('cmd: %s', ' '.join(cmd))
#log.debug('args: %s', cmd)
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#log.debug('communicating')
(stdout, stderr) = process.communicate()
#log.debug('waiting for child process')
process.wait()
exitcode = process.returncode
log.debug('stdout: %s', stdout)
log.debug('stderr: %s', stderr)
log.debug('exitcode: %s', exitcode)
if exitcode == 0:
log.info("host '%s' responded to ping", host)
return host
except subprocess.CalledProcessError as _:
log.warn('ping failed: %s', _.output)
except OSError as _:
die('error calling ping: {0}'.format(_))
return None
def check_socket(self, host, port):
log.info("checking host '%s' port '%s' socket", host, port)
try:
#log.debug('creating socket')
#sock = socket.socket()
#log.info("connecting to '%s:%s'", host, port)
#sock.connect((host, int(port)))
socket.create_connection((host, int(port)), self.request_timeout)
#sock.close()
log.info("socket connected to host '%s' port '%s'", host, port)
return (host, port)
except IOError:
return None
def check_http(self, host, port, url_path=''):
if not isStr(url_path):
url_path = ''
url = '{protocol}://{host}:{port}/{url_path}'.format(protocol=self.protocol,
host=host,
port=port,
url_path=url_path.lstrip('/'))
log.info('GET %s', url)
try:
# timeout here isn't total timeout, it's response time
req = requests.get(url, timeout=self.request_timeout)
except requests.exceptions.RequestException as _:
log.info('%s - returned exception: %s', url, _)
return False
except IOError as _:
log.info('%s - returned IOError: %s', url, _)
return False
log.debug("%s - response: %s %s", url, req.status_code, req.reason)
log.debug("%s - content:\n%s\n%s\n%s", url, '='*80, req.content.strip(), '='*80)
if req.status_code != 200:
log.info('%s - status code %s != 200', url, req.status_code)
return None
if self.regex:
log.info('%s - checking regex against content', url)
# if this ends up not being processed properly and remains a string instead
# of the expected compiled regex, then .search() will hang
if isStr(self.regex):
die('string found instead of expected compiled regex!')
if self.regex.search(req.content):
log.info('%s - regex matched http output', url)
else:
log.info('%s - regex did not match http output', url)
return None
log.info("%s - passed all checks", url)
return (host, port)
if __name__ == '__main__':
FindActiveServer().main()
| StarcoderdataPython |
4836388 | <gh_stars>0
""" A Queue using a linked list like structure """
from typing import Any
class Node:
def __init__(self, data: Any) -> None:
self.data = data
self.next = None
def __str__(self) -> str:
return f"{self.data}"
class LinkedQueue:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> queue.put(5)
>>> queue.put(9)
>>> queue.put('python')
>>> queue.is_empty();
False
>>> queue.get()
5
>>> queue.put('algorithms')
>>> queue.get()
9
>>> queue.get()
'python'
>>> queue.get()
'algorithms'
>>> queue.is_empty()
True
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
"""
def __init__(self) -> None:
self.front = self.rear = None
def __iter__(self):
node = self.front
while node:
yield node.data
node = node.next
def __len__(self) -> int:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> len(queue)
5
>>> for i in range(1, 6):
... assert len(queue) == 6 - i
... _ = queue.get()
>>> len(queue)
0
"""
return len(tuple(iter(self)))
def __str__(self) -> str:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 4):
... queue.put(i)
>>> queue.put("Python")
>>> queue.put(3.14)
>>> queue.put(True)
>>> str(queue)
'1 <- 2 <- 3 <- Python <- 3.14 <- True'
"""
return " <- ".join(str(item) for item in self)
def is_empty(self) -> bool:
"""
>>> queue = LinkedQueue()
>>> queue.is_empty()
True
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.is_empty()
False
"""
return len(self) == 0
def put(self, item) -> None:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> for i in range(1, 6):
... queue.put(i)
>>> str(queue)
'1 <- 2 <- 3 <- 4 <- 5'
"""
node = Node(item)
if self.is_empty():
self.front = self.rear = node
else:
assert isinstance(self.rear, Node)
self.rear.next = node
self.rear = node
def get(self) -> Any:
"""
>>> queue = LinkedQueue()
>>> queue.get()
Traceback (most recent call last):
...
IndexError: dequeue from empty queue
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> for i in range(1, 6):
... assert queue.get() == i
>>> len(queue)
0
"""
if self.is_empty():
raise IndexError("dequeue from empty queue")
assert isinstance(self.front, Node)
node = self.front
self.front = self.front.next
if self.front is None:
self.rear = None
return node.data
def clear(self) -> None:
"""
>>> queue = LinkedQueue()
>>> for i in range(1, 6):
... queue.put(i)
>>> queue.clear()
>>> len(queue)
0
>>> str(queue)
''
"""
self.front = self.rear = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| StarcoderdataPython |
6619564 | # -*- coding: utf-8 -*-
# Settings for running the test application testcases
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
import os
import sys
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.normpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(PROJECT_ROOT, '..'))
ADMINS = (
# ('<NAME>', '<EMAIL>'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'personnel', # Or path to database file if using sqlite3.
'USER': 'personnel', # Not used with sqlite3.
'PASSWORD': '<PASSWORD>', # Not used with sqlite3.
'PORT': '5432',
'HOST': 'localhost',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/Helsinki'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = PROJECT_ROOT + '/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '<KEY>'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'personnel.urls'
'''
TEMPLATE_DIRS = (
PROJECT_ROOT + "/personnel/templates/"
)
'''
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [
os.path.join(PROJECT_ROOT, '/personnel/templates/'),
],
'OPTIONS': {
# 'debug': DEBUG,
'context_processors':
(
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
# not needed? 'django.template.context_processors.static',
# not needed? 'django.template.context_processors.tz',
# not needed? 'django.template.context_processors.csrf',
'django.contrib.messages.context_processors.messages',
'ws.dws.context_processors.django_conf_settings',
)
}
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'personnel',
'transhistory',
'cli_query', # for querying objects from command line
'django_nose', # testing nose test runner
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.request',
)
LOGIN_URL = "/login"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'postgres':{
'level':'DEBUG',
'class':'transhistory.db_log_util.PostgresLogHandler',
'formatter': 'simple'
},
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'personnel': {
'handlers': ['console', 'postgres'], # change in real use
'level': 'DEBUG',
'propagate': True,
},
'transhistory': {
'handlers': ['console', 'postgres'], # change in real use
'level': 'DEBUG',
'propagate': True,
},
}
}
| StarcoderdataPython |
4904070 | <reponame>Nyrio/beat-saber-mapping-scripts<gh_stars>10-100
import json
import imageio
import numpy as np
FILES = ["ExpertStandard.dat"]
def fix_time(t):
"""The change of BPM causes a mess with Noodle Extensions...
"""
return t if t < 320 else 320 + 122 * (t - 320) / 136
def load_fonts(lyrics_json):
fonts = {}
for name, alphabet in lyrics_json["fonts"].items():
font_img = imageio.imread("{}.png".format(name))
img_dim = font_img.shape[:2]
# Separate letters
letter_offsets = []
letter_widths = []
start = 0
for i in range(1, img_dim[1]):
if font_img[0, i, 2] > 100:
if i > start:
letter_offsets.append(start)
letter_widths.append(i - start)
start = i + 1
assert len(letter_offsets) == len(alphabet)
# Convert the letters to walls (1 pixel = 1 unit)
font = {}
for k in range(len(alphabet)):
letter = alphabet[k]
letter_img = font_img[
:, letter_offsets[k]:letter_offsets[k] + letter_widths[k], :3]
letter_matrix = (
(letter_img[:, :, 0]
+ letter_img[:, :, 1]
+ letter_img[:, :, 2]) > 100)
letter_walls = [] # list of x, y, w, h
while True:
h_lines = []
v_lines = []
# Horizontal lines
for i in range(img_dim[0]):
start_j = None
for j in range(letter_widths[k]):
if letter_matrix[i, j] and start_j is None:
start_j = j
if not letter_matrix[i, j] and start_j is not None:
h_lines.append((j - start_j, i, start_j))
start_j = None
if start_j is not None:
h_lines.append(
(letter_widths[k] - start_j, i, start_j))
# Vertical lines
for j in range(letter_widths[k]):
start_i = None
for i in range(img_dim[0]):
if letter_matrix[i, j] and start_i is None:
start_i = i
if not letter_matrix[i, j] and start_i is not None:
v_lines.append((i - start_i, start_i, j))
start_i = None
if start_i is not None:
v_lines.append(
(img_dim[0] - start_i, start_i, j))
# Choose the longest line, add it and clear the pixels
h_lines.sort(key=lambda x: x[0], reverse=True)
v_lines.sort(key=lambda x: x[0], reverse=True)
if not h_lines and not v_lines:
break
elif not h_lines or v_lines[0][0] >= h_lines[0][0]:
h, i, j = v_lines[0]
letter_walls.append((j, img_dim[0] - i - h, 1, h))
for r in range(i, i + h):
letter_matrix[r, j] = False
else:
w, i, j = h_lines[0]
letter_walls.append((j, img_dim[0] - i - 1, w, 1))
for c in range(j, j + w):
letter_matrix[i, c] = False
font[letter] = (letter_widths[k], letter_walls)
fonts[name] = (img_dim[0], font)
return fonts
def main():
with open("lyrics.json", encoding='raw_unicode_escape') as json_file:
lyrics_json = json.loads(
json_file.read().encode('raw_unicode_escape').decode())
fonts = load_fonts(lyrics_json)
scale = lyrics_json["pixel_scale"]
word_spacing = lyrics_json["word_spacing"]
letter_spacing = lyrics_json["letter_spacing"]
duration = lyrics_json["text_thickness"]
time_offset = lyrics_json["time_offset"]
walls = []
for group in lyrics_json["lyrics"]:
font_height, font = fonts[group["font"]]
text_width = sum(font[letter][0]
for word in group["text"]
for letter in word["word"])
text_width += word_spacing * (len(group["text"]) - 1)
text_width += letter_spacing * sum(
len(word["word"]) - 1 for word in group["text"])
offset = [lyrics_json["side_offsets"][group["side"]] - text_width / 2,
lyrics_json["line_offsets"][group["line"]]]
text = reversed(group["text"]) if group["side"] == 1 else group["text"]
for word in text:
for letter in word["word"]:
for letter_wall in font[letter][1]:
walls.append({
"_time": fix_time(word["time"] + time_offset),
"_lineIndex": 0,
"_type": 0,
"_duration": duration,
"_width": 0,
"_customData": {
"_position": [scale * (letter_wall[0] + offset[0]),
scale * (letter_wall[1] + offset[1])],
"_scale": [scale * letter_wall[2],
scale * letter_wall[3]],
"_rotation": lyrics_json["side_angles"][group["side"]],
"_localRotation": [0, 0, 0],
}
})
offset[0] += font[letter][0] + letter_spacing
offset[0] += word_spacing - letter_spacing
walls.sort(key=lambda x: x["_time"])
for filename in FILES:
with open(filename, "r") as json_file:
song_json = json.load(json_file)
song_json["_obstacles"] = walls
with open(filename, "w") as json_file:
json.dump(song_json, json_file)
main()
| StarcoderdataPython |
285437 | from django.db import models
from chemreg.lists.models import List, Record
from chemreg.lists.utils import build_rid
from chemreg.substance.models import Substance
def test_record():
"""Tests the validity of the Record Model's attributes"""
# Primitives
assert type(Record.id.field) is models.CharField
assert Record.id.field.max_length == 50
assert not Record.id.field.blank
assert not Record.id.field.null
assert Record.id.field.unique
assert Record.id.field.default is build_rid
assert type(Record.external_id.field) is models.CharField
assert Record.external_id.field.max_length == 500
assert not Record.external_id.field.blank
assert not Record.external_id.field.null
assert type(Record.message.field) is models.CharField
assert Record.message.field.max_length == 500
assert Record.message.field.blank
assert not Record.message.field.null
assert type(Record.score.field) is models.FloatField
assert Record.score.field.null
assert type(Record.is_validated.field) is models.BooleanField
assert not Record.is_validated.field.null
# Foreign Keys
assert type(Record.list.field) is models.ForeignKey
assert Record.list.field.related_model is List
assert not Record.list.field.null
assert type(Record.substance.field) is models.ForeignKey
assert Record.substance.field.related_model is Substance
assert Record.substance.field.null
| StarcoderdataPython |
1944459 | <filename>test/test_cherrypy.py
# -*- coding: utf-8 -*-
import os
import socket
import time
import unittest
from mock import MagicMock, call
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from ws4py.websocket import EchoWebSocket
from ws4py.framing import Frame, OPCODE_TEXT, OPCODE_CLOSE
class FakePoller(object):
def __init__(self, timeout=0.1):
self._fds = []
def release(self):
self._fds = []
def register(self, fd):
if fd not in self._fds:
self._fds.append(fd)
def unregister(self, fd):
if fd in self._fds:
self._fds.remove(fd)
def poll(self):
return self._fds
class App(object):
@cherrypy.expose
def ws(self):
assert cherrypy.request.ws_handler != None
def setup_engine():
# we don't need a HTTP server for this test
cherrypy.server.unsubscribe()
cherrypy.config.update({'log.screen': False})
cherrypy.engine.websocket = WebSocketPlugin(cherrypy.engine)
cherrypy.engine.websocket.subscribe()
cherrypy.engine.websocket.manager.poller = FakePoller()
cherrypy.tools.websocket = WebSocketTool()
config={'/ws': {'tools.websocket.on': True,
'tools.websocket.handler_cls': EchoWebSocket}}
cherrypy.tree.mount(App(), '/', config)
cherrypy.engine.start()
def teardown_engine():
cherrypy.engine.exit()
class CherryPyTest(unittest.TestCase):
def setUp(self):
setup_engine()
def tearDown(self):
teardown_engine()
def test_plugin(self):
manager = cherrypy.engine.websocket.manager
self.assertEqual(len(manager), 0)
s = MagicMock(spec=socket.socket)
s.recv.return_value = Frame(opcode=OPCODE_TEXT, body=b'hello',
fin=1, masking_key=os.urandom(4)).build()
h = EchoWebSocket(s, [], [])
cherrypy.engine.publish('handle-websocket', h, ('127.0.0.1', 0))
self.assertEqual(len(manager), 1)
self.assertTrue(h in manager)
# the following call to .close() on the
# websocket object will initiate
# the closing handshake
# This next line mocks the response
# from the client to actually
# complete the handshake.
# The manager will then remove the websocket
# from its pool
s.recv.return_value = Frame(opcode=OPCODE_CLOSE, body=b"ok we're done",
fin=1, masking_key=os.urandom(4)).build()
h.close()
# the poller runs a thread, give it time to get there
# just wait up to 5 seconds.
left_iteration = 50
while left_iteration:
left_iteration -= 1
time.sleep(.1)
if len(manager) == 0:
break
self.assertEqual(len(manager), 0)
if __name__ == '__main__':
suite = unittest.TestSuite()
loader = unittest.TestLoader()
for testcase in [CherryPyTest]:
tests = loader.loadTestsFromTestCase(testcase)
suite.addTests(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
11382746 | # this the interface to create your own data source
# this class pings a private / public blockchain to get the balance and code information
from web3 import Web3, KeepAliveRPCProvider
class EthereumData:
def __init__(self):
self.host = 'x.x.x.x'
self.port = '8545'
self.web3 = Web3(KeepAliveRPCProvider(host=self.host, port=self.port))
def getBalance(self, address):
return self.web3.eth.getBalance(address)
def getCode(self, address):
return self.web3.eth.getCode(address) | StarcoderdataPython |
1912043 | import linecache
from threading import Thread
import sys
from logging import Handler, Formatter
from logging import CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET
import time
try:
from Queue import Queue
except:
from queue import Queue
import requests
import json
_levelToName = {
CRITICAL: 'FATAL',
ERROR: 'ERROR',
WARNING: 'WARN',
INFO: 'INFO',
DEBUG: 'DEBUG',
NOTSET: 'TRACE',
}
class LoggerJsonFormatter(Formatter):
"""
Format record in LoggerJson format
"""
def format(self, record):
"""Formats LogRecord into python dictionary."""
# Standard document
document = {
'timestamp': time.time() * 1000.0,
'level': _levelToName[record.levelno],
'thread': record.threadName,
'thread_id': record.thread,
'message': record.getMessage(),
'logger': record.name,
'location': {
'filename': record.pathname,
'class': record.module,
'method': record.funcName,
'line': record.lineno
}
}
# Standard document decorated with exception info
if record.exc_info is not None:
document.update({
'throwable': {
'message': str(record.exc_info[1]),
'stack_trace': [
{
"line": stack[1],
"filename": stack[0],
"method": stack[2],
"line_code": stack[3]
}
for stack in LoggerJsonFormatter.extract_tb(record.exc_info[2])
]
}
})
return document
@staticmethod
def extract_tb(tb, limit=None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line:
line = line.strip()
else:
line = None
list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n + 1
return list
class LofkaHandler(Handler):
"""
Log handler which sending
"""
def __init__(self, target_url="http://logger.example.com/", app_name="default_python_application"):
super(LofkaHandler, self).__init__()
try:
with open("lofka.json", "r") as fp:
obj = json.load(fp)
target_url = obj['target_url']
app_name = obj['app_name']
except:
pass
self.target_url = target_url + "lofka/service/push"
self.app_name = app_name
self.formatter = LoggerJsonFormatter()
def emit(self, record):
"""
Commit record to server
:param record:
:return:
"""
record_object = self.formatter.format(record)
record_object["app_name"] = self.app_name
requests.post(self.target_url, data=json.dumps(record_object))
class LofkaAsyncHandler(Handler):
"""
Log handler which sending
"""
def __init__(self,
target_url="http://logger.example.com/",
app_name="default_python_application",
interval=1000,
max_buffer_size=1000
):
super(LofkaAsyncHandler, self).__init__()
try:
with open("lofka.json", "r") as fp:
obj = json.load(fp)
target_url = obj['target_url']
app_name = obj['app_name']
except:
pass
self.target_url = target_url + "lofka/service/push/batch"
self.app_name = app_name
self.formatter = LoggerJsonFormatter()
self.message_queue = Queue(int(max_buffer_size * 1.3)) # type: Queue
self.max_buffer_size = max_buffer_size
def push_data_periodically():
while True:
if self.message_queue.qsize() > 0:
print("Pushing:\n" + json.dumps(list(self.message_queue.queue), indent=2))
self.__submit_batch(list(self.message_queue.queue))
self.message_queue.queue.clear()
print("Pushed")
else:
time.sleep(interval / 1000.0)
Thread(target=push_data_periodically).start()
def __submit_batch(self, data):
"""
Submit messages
:type data: list
:param data: messages
:return:
"""
requests.post(self.target_url, data=json.dumps(data))
def emit(self, record):
"""
Commit record to server
:param record:
:return:
"""
record_object = self.formatter.format(record)
record_object["app_name"] = self.app_name
self.message_queue.put(record_object, timeout=1)
if self.message_queue.qsize() > self.max_buffer_size:
self.__submit_batch(list(self.message_queue.queue))
self.message_queue.queue.clear()
| StarcoderdataPython |
6687051 | """
cpymadtools package
~~~~~~~~~~~~~~~~~~~
cpymadtools is a collection of utilities that integrate within my workflow with the `cpymad` library.
:copyright: (c) 2019 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
from .errors import misalign_lhc_ir_quadrupoles, misalign_lhc_triplets, switch_magnetic_errors
from .generators import LatticeGenerator
from .matching import get_closest_tune_approach, get_lhc_tune_and_chroma_knobs, match_tunes_and_chromaticities
from .orbit import correct_lhc_orbit, get_current_orbit_setup, lhc_orbit_variables, setup_lhc_orbit
from .parameters import query_beam_attributes
from .plotters import (
AperturePlotter,
BeamEnvelopePlotter,
CrossingSchemePlotter,
DynamicAperturePlotter,
LatticePlotter,
PhaseSpacePlotter,
TuneDiagramPlotter,
)
from .ptc import get_amplitude_detuning, get_rdts
from .special import (
apply_lhc_colinearity_knob,
apply_lhc_coupling_knob,
apply_lhc_rigidity_waist_shift_knob,
deactivate_lhc_arc_sextupoles,
install_ac_dipole_as_kicker,
install_ac_dipole_as_matrix,
make_lhc_beams,
make_lhc_thin,
make_sixtrack_output,
power_landau_octupoles,
re_cycle_sequence,
vary_independent_ir_quadrupoles,
)
from .track import track_single_particle
from .tune import make_footprint_table
from .twiss import get_ips_twiss, get_ir_twiss, get_twiss_tfs
from .utils import get_table_tfs
| StarcoderdataPython |
6468671 | <gh_stars>0
from urllib import error as error
import json
import requests
from ansible.module_utils.basic import AnsibleModule
def __api_call(module):
base_url=module.params.get('url')
data=module.params.get('data')
access_token=module.params.get('access_token')
url = str(base_url)
header = { "Authorization": 'Bearer {0}'.format(access_token),
"Content-Type": 'application/json'}
# epoch = module.params.get("epoch")
request = requests.put(url, headers=header, data=json.dumps(data))
try:
response = { "json": str(request.content) }
module.exit_json(changed=True,json=response)
except Exception as e:
module.fail_json(
msg="Error, token is invalid or error in request "
)
def main():
arg = {
"url": {"type": "str"},
"data": {"type": "list"},
"access_token": {"type": "str"},
}
fields = dict(
url=dict(required=True, type='str'),
data=dict(required=True, type='dict'),
access_token=dict(required=True, type='str'),
)
required_together = [["url", "access_token"]]
module = AnsibleModule(
argument_spec=fields,
required_together=required_together,
supports_check_mode=True
)
(changed, json) = __api_call(module)
module.exit_json(changed=changed, json=json)
if __name__ == '__main__':
main()
| StarcoderdataPython |
9777866 | <reponame>qtson/pyjanitor
from .functions import * # noqa: F403, F401
# from .dataframe import JanitorDataFrame as DataFrame # noqa: F401
# from .dataframe import JanitorSeries as Series # noqa: F401
__version__ = "0.17.2"
| StarcoderdataPython |
1644470 | <reponame>ranok92/deepirl
import numpy as np
from numba import njit
import pdb
def angle_between(v1, v2):
v1_conv = v1.astype(np.dtype('float'))
v2_conv = v2.astype(np.dtype('float'))
return np.abs(np.arctan2(np.linalg.det(np.stack((v1_conv,v2_conv))), np.dot(v1_conv,v2_conv)))
@njit
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
@njit
def get_norm(vector):
return np.linalg.norm(vector)
def get_rot_matrix(theta):
'''
returns the rotation matrix given a theta value
'''
return np.asarray([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
def deg_to_rad(val):
'''
converts degree to radians
'''
return val*(np.pi)/180
def rad_to_deg(val):
'''
converts radians to degrees
'''
return val*180/np.pi
class SocialForcesController():
def __init__(self,
speed_div,
orient_div,
orient_quant,
field_of_view=np.pi/2,
desired_speed=1,
relax_time=2,
max_force=2
):
self.orientation_array = []
self.speed_array = []
self.field_of_view = field_of_view
self.desired_speed = desired_speed
self.relaxation_time = relax_time
self.max_force = max_force
self.define_action_array(speed_div, orient_div, orient_quant)
def define_action_array(self, speed_div, orient_div, orient_quant):
force_lim = self.max_force
self.speed_array = [i/speed_div*force_lim for i in range(speed_div)]
orient_range = int((orient_div-1)/2)
deg_array = [i*orient_quant for i in range(-orient_range, orient_range+1, 1)]
unit_v = np.array([-1, 0])
rot_matrix_list = [get_rot_matrix(deg_to_rad(deg)) for deg in deg_array]
self.orientation_array = [np.matmul(rot_matrix, unit_v) for rot_matrix in rot_matrix_list]
#self.orientation_array = deg_array
def calculate_attractive_force(self, agent_state, goal_state):
'''
calculates the force exerted on the agent because of the
goal, current position of the goal and the desired speed
of the agent
input: agent_state, goal_state
output: force exerted on the agent (2 dim vector)
'''
desired_velocity = goal_state - agent_state['position']
current_velocity = agent_state['orientation']*agent_state['speed']
return 1/self.relaxation_time*(desired_velocity-current_velocity)
def calculate_repulsive_force_btw_points(self, agent_state, obstacle):
'''
calculates the repulsive forces acting on the agent due to a single
nearby pedestrian(obstacle) [following the formulation as shown in
the paper IRL algorithms and feature representations for robot navigation
vasquez et al]
input: agent_state, obstacle
output: repulsive force (2 dim vector)
'''
agent_pos = agent_state['position']
obs_pos = obstacle['position']
rel_position = obs_pos - agent_pos
rel_distance = np.linalg.norm(rel_position)
agent_velocity = agent_state['speed']*agent_state['orientation']
# angle_between produces only positive angles
angle = angle_between(rel_position, agent_velocity)
agent_radius = 5
normalized_rel_positions = rel_position / rel_distance
exp_multiplier = np.exp(2 * agent_radius - rel_distance)
anisotropic_term = (2.0 - 0.5 * (1.0 + np.cos(angle)))
social_forces = (
exp_multiplier * normalized_rel_positions * anisotropic_term
)
return social_forces
def calculate_repulsive_forces(self, agent_state, obstacle_list):
'''
calculates the total repulsive force acting on the
agent at a given time due to pedestrians nearby
input: agent_state, obstacle_list
output: repulsive force exerted on the agent (2 dim vector)
'''
net_repulsive_force = np.zeros(2)
for obstacle in obstacle_list:
net_repulsive_force += self.calculate_repulsive_force_btw_points(agent_state,
obstacle)
return net_repulsive_force
def calculate_social_force(self,
attractive_forces,
repulsive_forces):
'''
calculates the net social force being exerted on the agent
input: different forces acting on the agent
output: final social forces being acted on the agent (2 dim vector)
'''
return attractive_forces+repulsive_forces
def calculate_action_from_force(self, agent_state, cur_heading_dir, force_vector):
'''
given the current force being acted on the agent returns the most
appropriate set of action for the agent
input: force vector
output: action integer(containing both change in orientation and speed)
'''
rotated_force_vector = np.matmul(get_rot_matrix(deg_to_rad(cur_heading_dir)),
np.transpose(force_vector))
orientation_action = np.argmin([np.dot(self.orientation_array[i],
rotated_force_vector) for i in range(len(self.orientation_array))])
speed_action = 3
return speed_action*len(self.orientation_array)+orientation_action
def eval_action(self, state):
'''
calculates the action to take given the current state
input: the current state as seen by the agent
output: the action taken by the agent in response
to the state as stated by social forces
'''
agent_state = state['agent_state']
obstacles = state['obstacles']
goal_state = state['goal_state']
cur_heading_dir = state['agent_head_dir']
attr_force = self.calculate_attractive_force(agent_state, goal_state)
repr_force = self.calculate_repulsive_forces(agent_state, obstacles)
net_social_force = self.calculate_social_force(attr_force, repr_force)
return self.calculate_action_from_force(agent_state, cur_heading_dir, net_social_force)
| StarcoderdataPython |
1799150 | <reponame>AvirityInformation/chatbot_Jullie
from enum import Enum
class Payload(Enum):
MESSAGE_WITHOUT_BUTTON_IN_INTRO = 'MESSAGE_WITHOUT_BUTTON_IN_INTRO_PAYLOAD'
SUICIDAL_THOUGHT_DURING_CONVERSATION = 'SUICIDAL_THOUGHT_DURING_CONVERSATION_PAYLOAD'
GREETING = 'GREETING'
CCT_1 = "CCT_1"
CCT_2 = "CCT_2"
CCT_3 = "CCT_3"
CCT_4 = "CCT_4"
CCT_5 = "CCT_5"
SESSION_1 = "SESSION_1"
SESSION_2 = "SESSION_2"
ASK_SUICIDE_ILLNESS = "ASK_SUICIDE_ILLNESS"
HAVE_SUICIDE_ILLNESS = "HAVE_SUICIDE_ILLNESS"
GET_STARTED = "GET_STARTED_PAYLOAD"
REMIND = 'REMIND_PAYLOAD'
SESSION_END = 'PAYLOAD_SESSION_END'
ASK_MOOD_END = "PAYLOAD_ASK_MOOD_END"
intro_payload_list = [
GREETING,
CCT_1,
CCT_2,
CCT_3,
CCT_4,
CCT_5,
SESSION_1,
SESSION_2,
MESSAGE_WITHOUT_BUTTON_IN_INTRO,
GET_STARTED,
ASK_SUICIDE_ILLNESS,
HAVE_SUICIDE_ILLNESS
]
| StarcoderdataPython |
6706230 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import subprocess
import shlex
import pandas as pd
import numpy as np
from astropy.table import Table
from astropy.table import Column
import os
import glob2
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import SNID_Analysis
import numpy as np
import gc
import glob
import threading
import logging
# In[2]:
sample = Table.read("/home/xhall/Documents/NewZTF/ML_sample.ascii", format = "ascii")
# In[3]:
sample_location = "/home/xhall/Documents/NewZTF/spectra/"
source = "/home/xhall/Documents/NewZTF/SNIDoutput/"
image_output = "/home/xhall/Documents/NewZTF/Images/"
snid = "/home/xhall/Documents/SNID/snid-5.0/snid"
# In[4]:
def SNID_run(i):
try:
filenoascii = i.split(".")[0]
fnamelist = sample_location + str(i)
outputname = str(filenoascii)
sample_location_temp = sample_location + str(filenoascii)
output, error, bashCommand = SNID_Analysis.run_files(sample_location_temp, fnamelist, source, filenoascii)
if(not("No template meets" in str(output)) and not("Correlation function is all zero!" in str(output))):
SNID_Analysis.plot_best_15(sample_location_temp, outputname, source, image_output)
SNID_Analysis.parse_output(sample_location_temp, source, outputname, filenoascii)
except:
print(i, bashCommand)
# In[ ]:
if __name__ == "__main__":
counter = 0
for i in np.unique(sample["col8"]):
SNID_run(i)
counter += 1
gc.collect()
if(counter%100 == 0):
print(counter)
"""
threads = []
for i in np.unique(sample["col8"]):
threads.append(threading.Thread(target=SNID_run, args=(i,)))
print("Threads Created")
length = (len(np.unique(sample["col8"])))
ranges = np.linspace(0,length,500)
for i in range(len(ranges)-1):
for i in threads[int(ranges[i]):int(ranges[i+1])]:
i.start()
for i in threads[int(ranges[i]):int(ranges[i+1])]:
i.join()
print("Round" + str(i))
break
"""
| StarcoderdataPython |
3519247 | from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.views.decorators.csrf import csrf_exempt
from linebot import LineBotApi, WebhookParser
from linebot.exceptions import InvalidSignatureError, LineBotApiError
from linebot.models import MessageEvent, TextMessage, TextSendMessage, StickerMessage
line_bot_api = LineBotApi(settings.LINE_CHANNEL_ACCESS_TOKEN)
parser = WebhookParser(settings.LINE_CHANNEL_SECRET)
@csrf_exempt
def callback(request):
if request.method == 'POST':
signature = request.META['HTTP_X_LINE_SIGNATURE']
body = request.body.decode('utf-8')
try:
events = parser.parse(body, signature)
except InvalidSignatureError:
return HttpResponseForbidden()
except LineBotApiError:
return HttpResponseBadRequest()
for event in events:
if isinstance(event, MessageEvent):
if isinstance(event.message, StickerMessage):
messages = []
package_id = event.message.package_id
sticker_id = event.message.sticker_id
sticker = StickerMessage(package_id=package_id, sticker_id=sticker_id)
messages.append(sticker)
text = TextMessage(text='Hi我是詹姆士')
messages.append(text)
try:
line_bot_api.reply_message(
event.reply_token,
messages
)
except:
print('fail to reply')
return HttpResponse(status=200)
else:
return HttpResponseBadRequest()
| StarcoderdataPython |
6410157 | <filename>profiles/migrations/0019_auto_20180514_2128.py
# Generated by Django 2.0.3 on 2018-05-14 21:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0018_auto_20180514_2106'),
]
operations = [
migrations.AddField(
model_name='profile',
name='demo',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='basemap',
name='url',
field=models.FileField(upload_to='basemaps/'),
),
migrations.AlterField(
model_name='otherfiles',
name='url',
field=models.FileField(upload_to='otherfiles/'),
),
migrations.AlterField(
model_name='spatialitedbs',
name='url',
field=models.FileField(upload_to='spatialitedbs/'),
),
migrations.AlterField(
model_name='tag',
name='url',
field=models.FileField(upload_to='tags/'),
),
]
| StarcoderdataPython |
1644831 | """Computation of the dissimilarity representation of a set of objects
(streamlines) from a set of prototypes (streamlines) given a distance
function. Some prototype selection algorithms are available.
See <NAME>., <NAME>., <NAME>., The Approximation of
the Dissimilarity Projection, http://dx.doi.org/10.1109/PRNI.2012.13
Copyright 2017 <NAME>
MIT License
"""
from __future__ import division
import numpy as np
from dipy.tracking.distances import bundles_distances_mam
try:
from joblib import Parallel, delayed, cpu_count
joblib_available = True
except:
joblib_available = False
def furthest_first_traversal(tracks, k, distance, permutation=True):
"""This is the farthest first traversal (fft) algorithm which
selects k streamlines out of a set of streamlines (tracks). This
algorithms is known to be a good sub-optimal solution to the
k-center problem, i.e. the k streamlines are sequentially selected
in order to be far away from each other.
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
k : int
the number of streamlines to select.
distance : function
a distance function between groups of streamlines, like
dipy.tracking.distances.bundles_distances_mam
permutation : bool
True if you want to shuffle the streamlines first. No
side-effect.
Return
------
idx : array of int
an array of k indices of the k selected streamlines.
Notes
-----
- Hochbaum, <NAME>. and Shmoys, <NAME>., A Best Possible
Heuristic for the k-Center Problem, Mathematics of Operations
Research, 1985.
- http://en.wikipedia.org/wiki/Metric_k-center
See Also
--------
subset_furthest_first
"""
if permutation:
idx = np.random.permutation(len(tracks))
tracks = tracks[idx]
else:
idx = np.arange(len(tracks), dtype=np.int)
T = [0]
while len(T) < k:
z = distance(tracks, tracks[T]).min(1).argmax()
T.append(z)
return idx[T]
def subset_furthest_first(tracks, k, distance, permutation=True, c=2.0):
"""The subset furthest first (sff) algorithm is a stochastic
version of the furthest first traversal (fft) algorithm. Sff
scales well on large set of objects (streamlines) because it
does not depend on len(tracks).
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
k : int
the number of streamlines to select.
distance : function
a distance function between groups of streamlines, like
dipy.tracking.distances.bundles_distances_mam
permutation : bool
True if you want to shuffle the streamlines first. No
side-effect.
c : float
Parameter to tune the probability that the random subset of
streamlines is sufficiently representive of tracks. Typically
2.0-3.0.
Return
------
idx : array of int
an array of k indices of the k selected streamlines.
See Also
--------
furthest_first_traversal
Notes
-----
See: <NAME>, <NAME>, <NAME>, The Approximation
of the Dissimilarity Projection, Proceedings of the 2012
International Workshop on Pattern Recognition in NeuroImaging
(PRNI), pp.85,88, 2-4 July 2012 doi:10.1109/PRNI.2012.13
"""
size = int(max(1, np.ceil(c * k * np.log(k))))
if permutation:
idx = np.random.permutation(len(tracks))[:size]
else:
idx = range(size)
return idx[furthest_first_traversal(tracks[idx],
k, distance,
permutation=False)]
def dissimilarity(tracks, prototypes, distance, n_jobs=-1, verbose=False):
"""Compute the dissimilarity (distance) matrix between tracks and
given prototypes. This function supports parallel (multicore)
computation.
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
prototypes : iterable of objects
The prototypes.
distance : function
Distance function between groups of streamlines.
prototype_policy : string
Shortname for the prototype selection policy. The default
value is 'sff'.
n_jobs : int
If joblib is available, split the dissimilarity computation
in n_jobs. If n_jobs is -1, then all available cpus/cores
are used. The default value is -1.
verbose : bool
If true prints some messages. Deafault is True.
Return
------
dissimilarity_matrix : array (N, num_prototypes)
See Also
--------
furthest_first_traversal, subset_furthest_first
Notes
-----
"""
if verbose:
print("Computing the dissimilarity matrix.")
if joblib_available and n_jobs != 1:
if n_jobs is None or n_jobs == -1:
n_jobs = cpu_count()
if verbose:
print("Parallel computation of the dissimilarity matrix: %s cpus." % n_jobs)
if n_jobs > 1:
tmp = np.linspace(0, len(tracks), n_jobs + 1).astype(np.int)
else: # corner case: joblib detected 1 cpu only.
tmp = (0, len(tracks))
chunks = zip(tmp[:-1], tmp[1:])
dissimilarity_matrix = np.vstack(Parallel(n_jobs=n_jobs)(delayed(distance)(tracks[start:stop], prototypes) for start, stop in chunks))
else:
dissimilarity_matrix = distance(tracks, prototypes)
if verbose:
print("Done.")
return dissimilarity_matrix
def compute_dissimilarity(tracks, num_prototypes=40,
distance=bundles_distances_mam,
prototype_policy='sff',
n_jobs=-1,
verbose=False):
"""Compute the dissimilarity (distance) matrix between tracks and
prototypes, where prototypes are selected among the tracks with a
given policy.
Parameters
----------
tracks : list or array of objects
an iterable of streamlines.
num_prototypes : int
The number of prototypes. In most cases 40 is enough, which
is the default value.
distance : function
Distance function between groups of streamlines. The
default is bundles_distances_mam
prototype_policy : string
Shortname for the prototype selection policy. The default
value is 'sff'.
n_jobs : int
If joblib is available, split the dissimilarity computation
in n_jobs. If n_jobs is -1, then all available cpus/cores
are used. The default value is -1.
verbose : bool
If true prints some messages. Deafault is True.
Return
------
dissimilarity_matrix : array (N, num_prototypes)
See Also
--------
furthest_first_traversal, subset_furthest_first
Notes
-----
"""
if verbose:
print("Generating %s prototypes with policy %s." % (num_prototypes, prototype_policy))
if prototype_policy == 'random':
prototype_idx = np.random.permutation(len(tracks))[:num_prototypes]
elif prototype_policy == 'fft':
prototype_idx = furthest_first_traversal(tracks,
num_prototypes, distance)
elif prototype_policy == 'sff':
prototype_idx = subset_furthest_first(tracks, num_prototypes, distance)
else:
if verbose:
print("Prototype selection policy not supported: %s" % prototype_policy)
raise Exception
prototypes = [tracks[i] for i in prototype_idx]
dissimilarity_matrix = dissimilarity(tracks, prototypes, distance,
n_jobs=n_jobs, verbose=verbose)
return dissimilarity_matrix, prototype_idx
| StarcoderdataPython |
5031830 | import time
import os
from multiprocessing import Process, Queue
from cache import set_cache, string_cache, set_global_root
from Queue import Empty
import secondary_functions
from json_socket import make_socket, send_json, get_socket
session = {}
socket = None
procs = None
def main():
init_thread()
def init_thread(*args, **kw):
q = Queue()
procs = start_procs(q, *args, **kw)
return (q, procs,)
def start_procs(q, *args, **kw):
global procs
procs = create_procs(q, *args, **kw)
proc, socket_proc = procs
print 'starting procs'
proc.start()
socket_proc.start()
return procs
def create_procs(q, *args, **kw):
proc = Process(target=init, args=(q, ) + args, kwargs=kw)
socket_proc = Process(target=socket_init, args=(q, ) + args, kwargs=kw)
return (proc, socket_proc,)
def kill_processes():
for proc in procs:
print 'Joining', proc
proc.join()
def socket_init(queue, *a, **kw):
global socket
print 'socket_init', kw['socket_uri']
socket_uri = kw.get('socket_uri', None)
print 'init socket'
socket = make_socket(socket_uri)
run = 1
while run:
print '?',
message = socket.recv()
if message is not None:
print '!', len(message)
if message == 'kill':
print 'socket kill pill'
queue.put_nowait(message)
run = 0
continue
print 'Finish socket'
return socket
def init(queue, **kw):
'''
A initialization function for anything requiring a first boot.
'''
socket_uri = kw.get('socket_uri', None)
cache_path = kw.get('cache_path', None)
# global socket
# if socket_uri is not None:
# print 'Making loop socket'
# socket = make_socket(socket_uri)
print 'Init main loop'
basepath = os.path.abspath(os.path.dirname(__file__))
set_global_root(cache_path)
secondary_functions.init(socket_uri=socket_uri)
start(queue, socket_uri=socket_uri)
def start(queue, socket_uri=None):
run = 1
while run:
message = None
try:
message = queue.get_nowait()
print '.',
except Empty:
time.sleep(.2)
if message == 'kill':
run = 0
if socket_uri is not None:
socket = make_socket(socket_uri)
socket.send('kill')
continue
run = step(message)
print 'End Stepper'
def step(given_data=None):
'''continuous call by a loop'''
if given_data is not None:
print 'Loop react', given_data
secondary_functions.apply_to_context(given_data)
return 1
if __name__ == '__main__':
main()
| StarcoderdataPython |
3552386 | # Copyright (c) WFDetection, Inc. and its affiliates. All Rights Reserved
import logging
import numpy as np
import torch
from torch import nn
from cvpods.layers import (
Conv1d,
DeformConv,
FrozenBatchNorm2d,
ModulatedDeformConv,
ShapeSpec,
get_activation,
get_norm
)
from cvpods.modeling.nn_utils import weight_init
from .backbone import Backbone
__all__ = [
"ResNetBlockBase1d",
"BottleneckBlock1d",
"DeformBottleneckBlock1d",
"BasicStem1d",
"ResNet1d",
"make_stage1d",
"build_resnet1d_backbone",
]
class ResNetBlockBase1d(nn.Module):
def __init__(self, in_channels, out_channels, stride):
"""
The `__init__` method of any subclass should also contain these arguments.
Args:
in_channels (int):
out_channels (int):
stride (1,int):
"""
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.stride = (1,stride)
def freeze(self):
for p in self.parameters():
p.requires_grad = False
FrozenBatchNorm2d.convert_frozen_batchnorm(self)
return self
class BasicBlock1d(ResNetBlockBase1d):
def __init__(self, in_channels, out_channels, *, stride=1, norm="BN1d", activation=None,
**kwargs):
"""
The standard block type for ResNet18 and ResNet34.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int): Stride for the first conv.
norm (str or callable): A callable that takes the number of
channels and returns a `nn.Module`, or a pre-defined string
(one of {"FrozenBN1d", "BN1d","GN"}).
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv1d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
self.activation = get_activation(activation)
self.conv1 = Conv1d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
self.conv2 = Conv1d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = self.activation(out)
out = self.conv2(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = self.activation(out)
return out
class BottleneckBlock1d(ResNetBlockBase1d):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN1d",
activation=None,
stride_in_1x1=False,
dilation=1,
):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN1d", "BN1d","GN"}).
stride_in_1x1 (bool): when stride==2, whether to put stride in the
first 1x1 convolution or the bottleneck 3x3 convolution.
"""
super().__init__(in_channels, out_channels, stride)
if in_channels != out_channels:
self.shortcut = Conv1d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
# The original MSRA ResNet models have stride in the first 1x1 conv
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
# stride in the 3x3 conv
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.activation = get_activation(activation)
self.conv1 = Conv1d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
self.conv2 = Conv1d(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=(1 * dilation),
bias=False,
groups=num_groups,
dilation=dilation,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv1d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
def forward(self, x):
out = self.conv1(x)
out = self.activation(out)
out = self.conv2(out)
out = self.activation(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = self.activation(out)
return out
class DeformBottleneckBlock1d(ResNetBlockBase1d):
def __init__(
self,
in_channels,
out_channels,
*,
bottleneck_channels,
stride=1,
num_groups=1,
norm="BN1d",
activation=None,
stride_in_1x1=False,
dilation=1,
deform_modulated=False,
deform_num_groups=1,
):
"""
Similar to :class:`BottleneckBlock`, but with deformable conv in the 3x3 convolution.
"""
super().__init__(in_channels, out_channels, stride)
self.deform_modulated = deform_modulated
if in_channels != out_channels:
self.shortcut = Conv1d(
in_channels,
out_channels,
kernel_size=1,
stride=stride,
bias=False,
norm=get_norm(norm, out_channels),
)
else:
self.shortcut = None
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
self.activation = get_activation(activation)
self.conv1 = Conv1d(
in_channels,
bottleneck_channels,
kernel_size=1,
stride=stride_1x1,
bias=False,
norm=get_norm(norm, bottleneck_channels),
)
if deform_modulated:
deform_conv_op = ModulatedDeformConv
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
offset_channels = 27
else:
deform_conv_op = DeformConv
offset_channels = 18
self.conv2_offset = Conv1d(
bottleneck_channels,
offset_channels * deform_num_groups,
kernel_size=3,
stride=stride_3x3,
padding=(1 * dilation),
dilation=dilation,
)
self.conv2 = deform_conv_op(
bottleneck_channels,
bottleneck_channels,
kernel_size=3,
stride=stride_3x3,
padding=(1 * dilation),
bias=False,
groups=num_groups,
dilation=dilation,
deformable_groups=deform_num_groups,
norm=get_norm(norm, bottleneck_channels),
)
self.conv3 = Conv1d(
bottleneck_channels,
out_channels,
kernel_size=1,
bias=False,
norm=get_norm(norm, out_channels),
)
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
if layer is not None: # shortcut can be None
weight_init.c2_msra_fill(layer)
nn.init.constant_(self.conv2_offset.weight, 0)
nn.init.constant_(self.conv2_offset.bias, 0)
def forward(self, x):
out = self.conv1(x)
out = self.activation(out)
if self.deform_modulated:
offset_mask = self.conv2_offset(out)
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
offset = torch.cat((offset_x, offset_y), dim=1)
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.activation(out)
out = self.conv3(out)
if self.shortcut is not None:
shortcut = self.shortcut(x)
else:
shortcut = x
out += shortcut
out = self.activation(out)
return out
def make_stage1d(block_class, num_blocks, first_stride, **kwargs):
"""
Create a resnet stage by creating many blocks.
Args:
block_class (class): a subclass of ResNetBlockBase
num_blocks (int):
first_stride (int): the stride of the first block. The other blocks will have stride=1.
A `stride` argument will be passed to the block constructor.
kwargs: other arguments passed to the block constructor.
Returns:
list[nn.Module]: a list of block module.
"""
blocks = []
for i in range(num_blocks):
blocks.append(block_class(stride=first_stride if i == 0 else 1, **kwargs))
kwargs["in_channels"] = kwargs["out_channels"]
return blocks
class BasicStem1d(nn.Module):
def __init__(self, in_channels=3, out_channels=64, norm="BN1d", activation=None,
deep_stem=False, stem_width=32):
"""
Args:
norm (str or callable): a callable that takes the number of
channels and return a `nn.Module`, or a pre-defined string
(one of {"FrozenBN1d", "BN1d", "GN"}).
"""
super().__init__()
self.deep_stem = deep_stem
if self.deep_stem:
self.conv1_1 = Conv1d(
3,
stem_width,
kernel_size=3,
stride=2,
padding=1,
bias=False,
norm=get_norm(norm, stem_width),
)
self.conv1_2 = Conv1d(
stem_width,
stem_width,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, stem_width),
)
self.conv1_3 = Conv1d(
stem_width,
stem_width * 2,
kernel_size=3,
stride=1,
padding=1,
bias=False,
norm=get_norm(norm, stem_width * 2),
)
for layer in [self.conv1_1, self.conv1_2, self.conv1_3]:
if layer is not None:
weight_init.c2_msra_fill(layer)
else:
self.conv1 = Conv1d(
in_channels,
out_channels,
kernel_size=7,
stride=2,
padding=3,
bias=False,
norm=get_norm(norm, out_channels),
)
weight_init.c2_msra_fill(self.conv1)
self.activation = get_activation(activation)
self.max_pool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)
def forward(self, x):
if self.deep_stem:
x = self.conv1_1(x)
x = self.activation(x)
x = self.conv1_2(x)
x = self.activation(x)
x = self.conv1_3(x)
x = self.activation(x)
else:
x = self.conv1(x)
x = self.activation(x)
x = self.max_pool(x)
return x
@property
def out_channels(self):
if self.deep_stem:
return self.conv1_3.out_channels
else:
return self.conv1.out_channels
@property
def stride(self):
return 4 # = stride 2 conv -> stride 2 max pool
class ResNet1d(Backbone):
def __init__(self, stem, stages, num_classes=None, out_features=None, zero_init_residual=False):
"""
Args:
stem (nn.Module): a stem module
stages (list[list[ResNetBlock]]): several (typically 4) stages,
each contains multiple :class:`ResNetBlockBase`.
num_classes (None or int): if None, will not perform classification.
out_features (list[str]): name of the layers whose outputs should
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
If None, will return the output of the last layer.
"""
super(ResNet1d, self).__init__()
self.stem = stem
self.num_classes = num_classes
current_stride = self.stem.stride
self._out_feature_strides = {"stem": current_stride}
self._out_feature_channels = {"stem": self.stem.out_channels}
self.stages_and_names = []
for i, blocks in enumerate(stages):
for block in blocks:
assert isinstance(block, ResNetBlockBase1d), block
curr_channels = block.out_channels
stage = nn.Sequential(*blocks)
name = "res" + str(i + 2)
self.add_module(name, stage)
self.stages_and_names.append((stage, name))
self._out_feature_strides[name] = current_stride = int(
current_stride * np.prod([k.stride for k in blocks])
)
self._out_feature_channels[name] = blocks[-1].out_channels
if num_classes is not None:
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.linear = nn.Linear(curr_channels, num_classes)
nn.init.normal_(self.linear.weight, std=0.01)
name = "linear"
if out_features is None:
out_features = [name]
self._out_features = out_features
assert len(self._out_features)
children = [x[0] for x in self.named_children()]
for out_feature in self._out_features:
assert out_feature in children, "Available children: {}".format(", ".join(children))
if zero_init_residual:
for m in self.modules():
if isinstance(m, BottleneckBlock1d):
nn.init.constant_(m.conv3.norm.weight, 0)
elif isinstance(m, BasicBlock1d):
nn.init.constant_(m.conv2.norm.weight, 0)
def forward(self, x):
outputs = {}
x = self.stem(x)
if "stem" in self._out_features:
outputs["stem"] = x
for stage, name in self.stages_and_names:
x = stage(x)
if name in self._out_features:
outputs[name] = x
if self.num_classes is not None:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.linear(x)
if "linear" in self._out_features:
outputs["linear"] = x
return outputs
def output_shape(self):
return {
name: ShapeSpec(
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
)
for name in self._out_features
}
def build_resnet1d_backbone(cfg, input_shape):
"""
Create a ResNet instance from config.
Returns:
ResNet: a :class:`ResNet` instance.
"""
depth = cfg.MODEL.RESNETS.DEPTH
stem_width = {18: 32, 34: 32, 50: 32, 101: 64, 152: 64, 200: 64, 269: 64}[depth]
deep_stem = cfg.MODEL.RESNETS.DEEP_STEM
if not deep_stem:
assert getattr(cfg.MODEL.RESNETS, "RADIX", 1) <= 1, \
"cfg.MODEL.RESNETS.RADIX: {} > 1".format(cfg.MODEL.RESNETS.RADIX)
# need registration of new blocks/stems?
norm = cfg.MODEL.RESNETS.NORM
activation = cfg.MODEL.RESNETS.ACTIVATION
stem = BasicStem1d(
in_channels=input_shape.channels,
out_channels=cfg.MODEL.RESNETS.STEM_OUT_CHANNELS,
norm=norm,
activation=activation,
deep_stem=deep_stem,
stem_width=stem_width,
)
# freeze_at = cfg.MODEL.BACKBONE.FREEZE_AT
freeze_at = 0
if freeze_at >= 1:
for p in stem.parameters():
p.requires_grad = False
stem = FrozenBatchNorm2d.convert_frozen_batchnorm(stem)
# fmt: off
out_features = cfg.MODEL.RESNETS.OUT_FEATURES
num_groups = cfg.MODEL.RESNETS.NUM_GROUPS
width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP
bottleneck_channels = num_groups * width_per_group
in_channels = cfg.MODEL.RESNETS.STEM_OUT_CHANNELS
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1
res5_dilation = cfg.MODEL.RESNETS.RES5_DILATION
num_classes = cfg.MODEL.RESNETS.NUM_CLASSES
zero_init_residual = cfg.MODEL.RESNETS.ZERO_INIT_RESIDUAL
# fmt: on
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
num_blocks_per_stage = {
18: [2, 2, 2, 2],
34: [3, 4, 6, 3],
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
152: [3, 8, 36, 3],
200: [3, 24, 36, 3],
269: [3, 30, 48, 8],
}[depth]
# Avoid creating variables without gradients
# which consume extra memory and may cause allreduce to fail
out_stage_idx = [
{"res2": 2, "res3": 3, "res4": 4, "res5": 5, "linear": 5}[f] for f in out_features]
max_stage_idx = max(out_stage_idx)
# Apply Deformable Convolution in stages
# Specify if apply deform_conv on Res2, Res3, Res4, Res5
deform_on_per_stage = getattr(cfg.MODEL.RESNETS,
"DEFORM_ON_PER_STAGE",
[False] * (max_stage_idx - 1))
'''[WFD 2022]'''
if depth in [18, 34]:
cfg.MODEL.RESNETS.RES2_OUT_CHANNELS = 64
out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS
cfg.MODEL.RESNETS.RES5_DILATION = 1
res5_dilation = 1
if depth in [18, 34]:
assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34"
assert not any(
deform_on_per_stage
), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34"
assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34"
assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34"
stages = []
in_channels = 2 * stem_width if deep_stem else in_channels
for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)):
dilation = res5_dilation if stage_idx == 5 else 1
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
stage_kargs = {
"num_blocks": num_blocks_per_stage[idx],
"first_stride": first_stride,
"in_channels": in_channels,
"out_channels": out_channels,
"norm": norm,
"activation": activation,
}
# Use BasicBlock for R18 and R34.
if depth in [18, 34]:
stage_kargs["block_class"] = BasicBlock1d
else:
stage_kargs["bottleneck_channels"] = bottleneck_channels
stage_kargs["stride_in_1x1"] = stride_in_1x1
stage_kargs["dilation"] = dilation
stage_kargs["num_groups"] = num_groups
if deform_on_per_stage[idx]:
stage_kargs["block_class"] = DeformBottleneckBlock1d
# Use True to use modulated deform_conv (DeformableV2);
# Use False for DeformableV1.
stage_kargs["deform_modulated"] = cfg.MODEL.RESNETS.DEFORM_MODULATED
# Number of groups in deformable conv.
stage_kargs["deform_num_groups"] = cfg.MODEL.RESNETS.DEFORM_NUM_GROUPS
else:
stage_kargs["block_class"] = BottleneckBlock1d
blocks = make_stage1d(**stage_kargs)
in_channels = out_channels
out_channels *= 2
bottleneck_channels *= 2
if freeze_at >= stage_idx:
for block in blocks:
block.freeze()
stages.append(blocks)
return ResNet1d(stem,
stages,
num_classes=num_classes,
out_features=out_features,
zero_init_residual=zero_init_residual)
| StarcoderdataPython |
3292950 | <filename>bitbot/services/__init__.py<gh_stars>0
from .service import ServiceInterface, Order, OrderDirection, OrderType, CandleInterval, TimeInForce, now_milliseconds, date_time_milliseconds, printProgressBar
from .bittrex import BitTrex | StarcoderdataPython |
216489 | <gh_stars>1-10
import sample_simulations.basic_sim
if __name__ == "__main__":
sample_simulations.basic_sim | StarcoderdataPython |
5033401 | <filename>data_structures/sorting_algos/mergesort/test_mergesort.py
import pytest
from mergesort import mergesort, merge
def test_empty_list_returns_empty_list():
"""Test mergesort on empty list returns same."""
empty = []
assert mergesort(empty) == []
def test_list_with_one_value():
"""Test mergesort on empty list returns same."""
lst = [8]
assert mergesort(lst) == [8]
def test_list_with_two_values():
"""Test mergesort on empty list returns same."""
lst = [8, 3]
assert mergesort(lst) == [3, 8]
def test_list_with_odd_number_of_values():
"""Test odd number of values returns ordered list."""
lst = [8, 3, 7, 9, 5]
assert mergesort(lst) == [3, 5, 7, 8, 9]
def test_list_with_unbalanced_halves():
"""Test list heavy weighted on one half returns ordered list."""
lst = [2, 4, 3, 8, 1, 9, 10, 13]
assert mergesort(lst) == [1, 2, 3, 4, 8, 9, 10, 13]
def test_merge_merges_two_pairs():
"""Test merge function separate of mergesort."""
L = [1, 3, 5]
R = [2, 4, 6]
assert merge(L, R) == [1, 2, 3, 4, 5, 6]
def test_merge_merges_uneven_lists():
L = [1, 3, 5]
R = [2, 4]
assert merge(L, R) == [1, 2, 3, 4, 5]
def test_merge_on_unbalanced_lists():
"""Test list heavy weighted on one half returns ordered list."""
L = [2, 3, 4, 8]
R = [1, 9, 10, 13]
assert merge(L, R) == [1, 2, 3, 4, 8, 9, 10, 13]
| StarcoderdataPython |
3450344 | <filename>BunqWebApp/urls.py
"""MoneyWebApp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from Manager.views import ManagerView, ManagerFormView
from BunqAPI.views import GenerateView, MyBunqView, APIView, RedirectView
from BunqWebApp import views
from filecreator.views import APIView as filecreator
from filecreator.views import FileDownloaderView as file_downlaoder
from bunq_bot.views import WebHook
# from django.contrib.auth import views as auth_views
'''
Each app needs to get its own URL conf. It works fine this way but its not
ideal.
'''
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', views.HomeView.as_view(), name='home'),
url(r'^account/register/$', views.RegisterView.as_view(), name='register'),
url(r'^account/logout/$', views.LogOutView.as_view(), name='logout'),
url(r'^accounts/login/$', views.LogInView.as_view(), name='login'),
url(r'^accounts/login/old$', views.MigrationService.as_view()),
url(r'^accounts/profile/$', RedirectView.as_view(), name='my_bunq'),
url(r'^Manager/(?i)$', ManagerView.as_view(), name='Manager'),
url(r'^Manager/form/(?i)$', ManagerFormView.as_view(), name='managerForm'),
url(r'^generate/$', GenerateView.as_view(), name='generate'),
url(r'^my_bunq/$', MyBunqView.as_view(), name='my_bunq'),
url(r'^API/(?P<selector>[\w-]+)$', APIView.as_view()), # noqa,
url(r'^API/(?P<selector>[\w-]+)/(?P<user_id>\d*)$', APIView.as_view()),
url(r'^API/(?P<selector>[\w-]+)/(?P<user_id>\d*)/(?P<account_id>\d*)$',
APIView.as_view()),
url(r'^API/(?P<selector>[\w-]+)/(?P<user_id>\d*)/(?P<account_id>\d*)/'
r'(?P<payment_id>\d*)$', APIView.as_view()),
url(r'^API/(?P<selector>[\w-]+)/(?P<user_id>\d*)/(?P<account_id>\d*)/'
r'(?P<statement_format>[\w-]+)/(?P<date_start>[\w-]+)/'
r'(?P<date_end>[\w-]+)/(?P<regional_format>[\w-]+)$',
APIView.as_view()),
url(r'^API/filecreator/(?P<selector>[\w-]+)/(?P<extension>[\w-]+)$',
filecreator.as_view(), name='API'),
url(r'^filecreator/download$', file_downlaoder.as_view(),
name='filecreator'),
url(r'bot/%s$' % settings.TELEGRAM_TOKEN, WebHook.as_view(), name='bot'),
url(r'^captcha/', include('captcha.urls')),
# url(r'^.*$', views.RedirectView.as_view(), name='home'),
# NOTE: this redirect is not working properly
] + static(settings.STATIC_URL)
| StarcoderdataPython |
4867341 | <reponame>timvideos/slidelint_site
import unittest
from pyramid import testing
from testfixtures import compare
class CounterModelTests(unittest.TestCase):
def test_constructor(self):
from slidelint_site.models import Counter
instance = Counter()
compare(instance.count, 0)
rez = instance.increment()
compare(rez, 1)
compare(instance.count, 1)
class AppmakerTests(unittest.TestCase):
def _callFUT(self, zodb_root):
from slidelint_site.models import appmaker
return appmaker(zodb_root)
def test_it(self):
root = {}
self._callFUT(root)
compare(root['app_root'].count, 0)
class ViewMainTests(unittest.TestCase):
def test_it(self):
from slidelint_site.views import main_view
context = testing.DummyResource()
context.count = 0
request = testing.DummyRequest()
response = main_view(context, request)
compare(response, {'count': 0})
| StarcoderdataPython |
230800 | <gh_stars>0
import markdown as markdown_library
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def markdown(value):
"""
Translate markdown to a safe subset of HTML.
"""
md_text = markdown_library.markdown(value)
return mark_safe(md_text)
| StarcoderdataPython |
3350190 | <reponame>bbrighttaer/DCCA_demo<filename>objective.py
# Author: bbsipingsoft
# Project: DCCA_demo
# Date: 4/24/19
# Time: 1:16 PM
# File: objective.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
def dcca_objective(pred1, pred2, reg):
"""
Implements the DCCA objective (correlation maximization. It assumes pred1 and pred2 are in row major format
i.e. [n_samples, n_output_nodes]
So each view's data is first transposed before being used for calculating the centered data matrix.
:param pred1: Predictions from view 1 model
:param pred2: Predictions from view 2 model
:param reg: regularization parameter. Must be > 0
:return: corr, dH1, dH2
"""
H1 = torch.Tensor(pred1).t()
H2 = torch.Tensor(pred2).t()
o = H1.shape[0] # latent dimension
m = H1.shape[1] # number of training samples
ones = torch.ones(m, m)
I = torch.eye(o, o) # identity matrix
# centered data matrices
H1_hat = H1.sub(torch.mul(H1.mm(ones), (1.0 / m)))
H2_hat = H2.sub(torch.mul(H2.mm(ones), (1.0 / m)))
SigmaHat12 = torch.mul(H1_hat.mm(torch.t(H2_hat)), 1.0 / (m - 1))
SigmaHat11 = torch.add(torch.mul(H1_hat.mm(torch.t(H1_hat)), 1.0 / (m - 1)), torch.mul(reg, I))
SigmaHat22 = torch.add(torch.mul(H2_hat.mm(torch.t(H2_hat)), 1.0 / (m - 1)), torch.mul(reg, I))
# SVD decomposition for square root calculation
U1, D1, V1 = torch.svd(SigmaHat11)
U2, D2, V2 = torch.svd(SigmaHat22)
# D1, V1 = torch.symeig(SigmaHat11, eigenvectors=True)
# epsilon = 1e-12
# D1_indices = torch.nonzero(D1.squeeze() > epsilon)
# D1 = D1[D1_indices].squeeze()
# V1 = V1[D1_indices].squeeze()
#
# D2, V2 = torch.symeig(SigmaHat22, eigenvectors=True)
# D2_indices = torch.nonzero(D2.squeeze() > epsilon)
# D2 = D2[D2_indices].squeeze()
# V2 = V2[D2_indices].squeeze()
#
# # calculate root inverse of correlation matrices
# SigmaHat11RootInv = V1.mm(torch.diag(torch.pow(D1, -0.5))).mm(torch.t(V1))
# SigmaHat22RootInv = V2.mm(torch.diag(torch.pow(D2, -0.5))).mm(torch.t(V2))
SigmaHat11RootInv = U1.mm(torch.diag(torch.pow(D1, -0.5))).mm(torch.t(V1))
SigmaHat22RootInv = U2.mm(torch.diag(torch.pow(D2, -0.5))).mm(torch.t(V2))
# Total correlation
T = SigmaHat11RootInv.mm(SigmaHat12).mm(SigmaHat22RootInv)
corr = torch.sqrt(torch.trace(torch.t(T).mm(T)))
# corr = torch.trace(torch.sqrt(torch.t(T).mm(T)))
# Gradient calculations
U, D, V = torch.svd(T)
Delta12 = SigmaHat11RootInv.mm(U).mm(torch.t(V)).mm(SigmaHat22RootInv)
Delta11 = torch.mul(SigmaHat11RootInv.mm(U).mm(torch.diag(D)).mm(U.t()).mm(SigmaHat11RootInv), -0.5)
Delta22 = torch.mul(SigmaHat22RootInv.mm(U).mm(torch.diag(D)).mm(U.t()).mm(SigmaHat22RootInv), -0.5)
# dcorr(H1, H2) / dH1 and dcorr(H1, H2) / dH2
dH1 = torch.mul(torch.mul(Delta11.mm(H1_hat), 2.0) + Delta12.mm(H2_hat), 1.0 / (m - 1))
dH2 = torch.mul(torch.mul(Delta22.mm(H2_hat), 2.0) + Delta12.mm(H1_hat), 1.0 / (m - 1))
return -corr, dH1.t(), dH2.t(), SigmaHat11RootInv.mm(U), SigmaHat22RootInv.mm(V)
| StarcoderdataPython |
5034854 | <reponame>comscope/comsuite<gh_stars>10-100
import os
import glob
import warnings
def get_wiencase():
# Determine WIEN2k case name
case = os.path.basename(os.getcwd())
# directory name is not case (happens when submitting to cluster)
if not os.path.isfile(case + '.struct'):
files = glob.glob('*.struct')
if len(files) < 1:
raise Exception, 'No struct file present.'
elif len(files) > 1:
# heuristic algorithm to determine case:
# the struct file whose head is the same as the most files in this
# directory is most likely the case
candidates = [os.path.splitext(f)[0] for f in files]
allheads = [os.path.splitext(f)[0] for f in os.listdir('.')]
counts = [len([1 for f in allheads if f == candidate])
for candidate in candidates]
index = counts.index(max(counts))
case = candidates[index]
warnings.warn(
" case heuristically selected from multiple possibilities!")
else:
case, ext = os.path.splitext(os.path.basename(files[0]))
return case
| StarcoderdataPython |
9708982 | class Jogador():
def __init__(self):
self.cartas = []
self.nome = ""
def adicionar(self, cartas):
self.cartas += cartas
def retirar(self,indexs):
for i in range(len(indexs)):
self.cartas[indexs[i]] = "X"
self.cartas = [i for i in self.cartas if i != "X"]
def verCartas(self):
return self.cartas
def setNome(self, nome):
self.nome = nome | StarcoderdataPython |
3277749 | from math import ceil
import matplotlib.pyplot as plt
colors = {
'uncompressed': 'g',
'fastlz': 'b',
'zlib': 'r',
}
data = {
'indexing': {
'algo': {
'uncompressed': [[0], [70.49318158458]],
'fastlz': [[1, 2], [89.33131507, 155.041745]],
'zlib': [[0, 1, 3, 6, 9], [88.74085122, 107.4996464, 116.3000386, 122.0017465, 226.4432716]],
},
'y_label': 'Time (s)'
},
'space': {
'algo': {
'uncompressed': [[0], [1.452596008]],
'fastlz': [[1, 2], [0.681222584, 0.68060137]],
'zlib': [[0, 1, 3, 6, 9], [1.459414745, 0.433347471, 0.428881416, 0.405332483, 0.40376754]],
},
'y_label': 'Space (GB)'
},
'search1': {
'algo': {
'uncompressed': [[0], [45.5873693]],
'fastlz': [[1, 2], [59.18466978, 56.41575026]],
'zlib': [[0, 1, 3, 6, 9], [54.34283227, 74.10076984, 73.39287504, 66.19744482, 67.62356376]],
},
'y_label': 'Time (ms)'
},
'search2': {
'algo': {
'uncompressed': [[0], [9.189936953]],
'fastlz': [[1, 2], [12.98639579, 12.19613769]],
'zlib': [[0, 1, 3, 6, 9], [11.95358581, 12.96278894, 12.6532173, 12.20321933, 12.62452388]],
},
'y_label': 'Time (ms)'
}
}
for comparison_type in data:
comparison = data[comparison_type]
plt.figure()
comparison_algo = comparison['algo']
for algo in comparison_algo:
color = colors[algo]
algo_values = comparison_algo[algo]
plt.plot(*algo_values, label=algo, color=color)
plt.scatter(*algo_values, marker='v', color=color)
plt.xlabel('Compression Level')
plt.ylabel(comparison['y_label'])
plt.xticks(range(10))
plt.gca().set_ylim(bottom=0)
plt.title(f'Compression Performance- {comparison_type}')
plt.legend()
plt.savefig(f'compression-{comparison_type}.png')
| StarcoderdataPython |
11280453 | import pytest
import allure
from domain_model.group import Group
import random
@allure.severity(allure.severity_level.CRITICAL)
@allure.title('Test modify group')
@pytest.mark.regression
def test_modify_group(app, json_groups: Group):
if app.group.count() == 0:
app.group.create(Group(name='test'))
old_groups = app.group.get_group_list()
group = random.choice(old_groups)
app.group.modify_group_by_id(group.id, json_groups)
new_groups = app.group.get_group_list()
with allure.step('comparison of the number of groups before and after modify'):
assert len(old_groups) == len(new_groups)
old_groups[old_groups.index(group)] = group + json_groups
with allure.step('comparison of the list of groups before and after modify'):
assert sorted(old_groups) == sorted(new_groups)
| StarcoderdataPython |
273874 | <gh_stars>0
from distortion.distortion import (
init,
Distorter,
Combined,
Identity,
Dropout,
Cropout,
Crop,
Resize,
GaussianBlur,
JPEGBase,
JPEGCompression,
JPEGMask,
JPEGDrop,
) | StarcoderdataPython |
3385544 | from argparse import ArgumentParser
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
import sys
from tqdm import tqdm
def get_tusou_data(fpath='/home/jovyan/fast-data/tusou_test/script/valid.lst'):
lines = open(fpath,'r').readlines()
PORN6=['hentai','anime','norm','porn','sexy','very_sexy']
dat={}
print(len(lines))
for line in lines:
a = line.rstrip().split(' ')
file = a[0]
file = file.replace('home/jovyan/','')
file = file.replace('fast-data/tusou_data_440/train','fast-data/tusou')
annt = PORN6[int(a[1])]
if annt not in dat.keys():
dat[annt]=[]
dat[annt].append(file)
else:
dat[annt].append(file)
return dat
def check_result(result):
for i in range(7):
if result[i].shape[0]>0:
return True
return False
def process_image(model,imageList, gt_is_porn=True,grp=4,pt=0):
dat_list = []
for idx, imgfil in tqdm(enumerate(imageList)):
if idx%grp == pt:
try:
result = inference_detector(model, imgfil)
is_porn = check_result(result)
except:
continue
if is_porn != gt_is_porn:
dat_list.append(imgfil)
else:
#print(imgfil,' ',idx)
continue
return dat_list
def main():
parser = ArgumentParser()
#parser.add_argument('img', help='Image file')
parser.add_argument('--config',
default = '../configs/censor/faster_rcnn_x101_32x4d_fpn_1x_censor.py',
help='Config file')
parser.add_argument('--checkpoint',
default = '../work_dirs/faster_rcnn_x101_32x4d_fpn_1x_censor/epoch_48.pth',
help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--groups', type=int, default=4, help='groups of threads')
parser.add_argument(
'--pt', type=int, default=0, help='current thread index')
parser.add_argument(
'--category', default='porn', help='class category to test')
args = parser.parse_args()
print(args.groups, args.pt)
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
img_dat = get_tusou_data('/fast-data/tusou/train.lst')
false_list = process_image(model,img_dat[args.category], grp=args.groups, pt=args.pt)
#fid = open('/fast-data/tusou/train_det/%s_%d_%d_%s.txt'%(args.category,args.groups,args.pt,args.device),'w')
fid = open('./tusou_train_det/%s_%d_%d_%s.txt'%(args.category,args.groups,args.pt,args.device),'w')
for img in false_list:
fid.write(img+'\n')
fid.close()
# show the results
#show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3543597 | from __future__ import unicode_literals, print_function, absolute_import
from scrapel.http import FormRequest
from scrapel.http import HtmlResponse
from scrapel.http import Request
from scrapel.http import Response
from scrapel.http import TextResponse
from scrapel.http import XmlResponse
from .providers import Scrapel
__author__ = '<NAME>'
__all__ = ['Scrapel', 'Request', 'FormRequest', 'Response', 'HtmlResponse', 'TextResponse', 'XmlResponse']
| StarcoderdataPython |
21883 | <gh_stars>1-10
# ================================================================================
# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
"""generic class to keep get real time info about the current process"""
import gc
import sys
import threading
import traceback
from functools import wraps
import psutil
def safe_operation(func):
"""safequard the function against any exception"""
if not func:
return
@wraps(func)
def wrapper(*args, **kwargs):
"""wrapper around the function"""
try:
return func(*args, **kwargs)
except Exception as ex:
return {type(ex).__name__ : str(ex)}
return wrapper
class ProcessInfo(object):
"""static class to calculate process info"""
_BIBYTES_SYMBOLS = ('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
_BIBYTES_VALS = {}
_inited = False
_lock = threading.Lock()
@staticmethod
def init():
"""init static constants"""
if ProcessInfo._inited:
return
with ProcessInfo._lock:
if ProcessInfo._inited:
return
for i, bibytes_symbol in enumerate(ProcessInfo._BIBYTES_SYMBOLS):
ProcessInfo._BIBYTES_VALS[bibytes_symbol] = 1 << (i + 1) * 10
ProcessInfo._BIBYTES_SYMBOLS = list(reversed(ProcessInfo._BIBYTES_SYMBOLS))
ProcessInfo._inited = True
@staticmethod
def bytes_to_bibytes(byte_count):
"""converts byte count to human value in kibi-mebi-gibi-...-bytes"""
if byte_count is None:
return "unknown"
if not byte_count or not isinstance(byte_count, int):
return byte_count
ProcessInfo.init()
for bibytes_symbol in ProcessInfo._BIBYTES_SYMBOLS:
bibytes_value = ProcessInfo._BIBYTES_VALS[bibytes_symbol]
if byte_count >= bibytes_value:
value = float(byte_count) / bibytes_value
return '%.2f %s' % (value, bibytes_symbol)
return "%s B" % byte_count
@staticmethod
@safe_operation
def process_memory():
"""calculates the memory usage of the current process"""
process = psutil.Process()
with process.oneshot():
return dict((k, ProcessInfo.bytes_to_bibytes(v))
for k, v in process.memory_full_info()._asdict().items())
@staticmethod
@safe_operation
def virtual_memory():
"""calculates the virtual memory usage of the whole vm"""
return dict((k, ProcessInfo.bytes_to_bibytes(v))
for k, v in psutil.virtual_memory()._asdict().items())
@staticmethod
@safe_operation
def active_threads():
"""list of active threads"""
return sorted([thr.name + "(" + str(thr.ident) + ")" for thr in threading.enumerate()])
@staticmethod
@safe_operation
def thread_stacks():
"""returns the current threads with their stack"""
thread_names = dict((thr.ident, thr.name) for thr in threading.enumerate())
return [
{
"thread_id" : thread_id,
"thread_name" : thread_names.get(thread_id),
"thread_stack" : [
{
"filename" : filename,
"lineno" : lineno,
"function" : function_name,
"line" : line.strip() if line else None
}
for filename, lineno, function_name, line in traceback.extract_stack(stack)
]
}
for thread_id, stack in sys._current_frames().items()
]
@staticmethod
@safe_operation
def gc_info(full=False):
"""gets info from garbage collector"""
gc_info = {
"gc_count" : str(gc.get_count()),
"gc_threshold" : str(gc.get_threshold())
}
if gc.garbage:
gc_info["gc_garbage"] = ([repr(stuck) for stuck in gc.garbage]
if full else len(gc.garbage))
return gc_info
@staticmethod
def get_all():
"""all info"""
return {
"active_threads" : ProcessInfo.active_threads(),
"gc" : ProcessInfo.gc_info(full=True),
"process_memory" : ProcessInfo.process_memory(),
"virtual_memory" : ProcessInfo.virtual_memory(),
"thread_stacks" : ProcessInfo.thread_stacks()
}
| StarcoderdataPython |
4822954 | <reponame>briqr/CSPN<filename>argmax_layer_CSPN.py<gh_stars>10-100
import sys
sys.path.insert(0,'/home/briq/libs/caffe/python')
import caffe
import random
import numpy as np
import scipy.misc
class ArgmaxLayer(caffe.Layer):
def setup(self, bottom, top):
pass
def reshape(self, bottom, top):
top[0].reshape(bottom[0].num, bottom[0].shape[2], bottom[0].shape[3])
def forward(self, bottom, top):
top[0].data[...] = np.argmax(bottom[0].data[...], axis=1)
def backward(self, top, propagate_down, bottom):
propagate_down = False
pass
| StarcoderdataPython |
11372676 | # Generated from C:/Users/karaznie/apiql/apiql/grammar\ApiQL.g4 by ANTLR 4.7.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .ApiQLParser import ApiQLParser
else:
from ApiQLParser import ApiQLParser
# This class defines a complete generic visitor for a parse tree produced by ApiQLParser.
class ApiQLVisitor(ParseTreeVisitor):
# Visit a parse tree produced by ApiQLParser#criteria.
def visitCriteria(self, ctx:ApiQLParser.CriteriaContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#criterion.
def visitCriterion(self, ctx:ApiQLParser.CriterionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#conjunction.
def visitConjunction(self, ctx:ApiQLParser.ConjunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#disjunction.
def visitDisjunction(self, ctx:ApiQLParser.DisjunctionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#predicate.
def visitPredicate(self, ctx:ApiQLParser.PredicateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#basic_predicate.
def visitBasic_predicate(self, ctx:ApiQLParser.Basic_predicateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#compound_predicate.
def visitCompound_predicate(self, ctx:ApiQLParser.Compound_predicateContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#keyword.
def visitKeyword(self, ctx:ApiQLParser.KeywordContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#basic_operator.
def visitBasic_operator(self, ctx:ApiQLParser.Basic_operatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#compound_operator.
def visitCompound_operator(self, ctx:ApiQLParser.Compound_operatorContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#values.
def visitValues(self, ctx:ApiQLParser.ValuesContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#value.
def visitValue(self, ctx:ApiQLParser.ValueContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#boolean.
def visitBoolean(self, ctx:ApiQLParser.BooleanContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#nil.
def visitNil(self, ctx:ApiQLParser.NilContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#number.
def visitNumber(self, ctx:ApiQLParser.NumberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#datetime.
def visitDatetime(self, ctx:ApiQLParser.DatetimeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ApiQLParser#string.
def visitString(self, ctx:ApiQLParser.StringContext):
return self.visitChildren(ctx)
del ApiQLParser | StarcoderdataPython |
3360258 | <gh_stars>0
import os
import time
import typing
from functools import reduce
from capi.src.implementation.datastructures.graph_file_paths import GraphFilePaths
from capi.src.implementation.shapefiles.shapefile_reader import ShapefileReader
from capi.src.implementation.visibility_graphs import (
VisGraphCoord,
VisGraphPolygon,
generate_visgraph,
generate_visgraph_with_shuffled_range,
save_graph_to_file,
)
from capi.src.interfaces.graph_generator import IGraphGenerator
from capi.src.interfaces.shapefiles.shapefile_reader import IShapefileReader
class GraphGenerator(IGraphGenerator):
def __init__(self, shapefile_reader: typing.Optional[IShapefileReader] = None):
self._shapefile_reader = ShapefileReader() if shapefile_reader is None else shapefile_reader
def generate(self, shape_file_path: str, output_path: str) -> None:
os.mkdir(output_path)
graph_file = GraphFilePaths(output_path)
curr_file_output_path = graph_file.default_graph_path
polygons = self._read_polygons_from_shapefile(shape_file_path)
graph = generate_visgraph(polygons)
save_graph_to_file(graph, curr_file_output_path)
def generate_for_vertex_range(
self, shape_file_path: str, output_path: str, current_split_num: int, num_splits: int, seed: int
) -> None:
os.mkdir(output_path)
graph_file = GraphFilePaths(output_path)
curr_file_output_path = graph_file.default_graph_path
polygons = self._read_polygons_from_shapefile(shape_file_path)
num_vertices = self._get_num_vertices_in_polygons(polygons)
split_size = num_vertices // num_splits
split_start = split_size * current_split_num
split_end = (split_size * (current_split_num + 1)) if current_split_num < num_splits - 1 else num_vertices
graph = generate_visgraph_with_shuffled_range(polygons, split_start, split_end, seed)
save_graph_to_file(graph, curr_file_output_path)
def _read_polygons_from_shapefile(self, shape_file_path: str) -> typing.Sequence[VisGraphPolygon]:
read_polygons = self._shapefile_reader.read(shape_file_path)
unadjusted_polygons = [
VisGraphPolygon([VisGraphCoord(vertex.longitude, vertex.latitude) for vertex in polygon.vertices])
for polygon in read_polygons
]
return unadjusted_polygons
@staticmethod
def _get_num_vertices_in_polygons(polygons: typing.Sequence[VisGraphPolygon]) -> int:
return reduce(lambda a, b: a + b, map(lambda polygon: len(polygon.vertices), polygons))
if __name__ == "__main__":
from capi.test.test_files.test_files_dir import TEST_FILES_DIR
gen = GraphGenerator()
start_time = time.time()
gen.generate(
os.path.join(TEST_FILES_DIR, "smaller.shp"),
os.path.join(TEST_FILES_DIR, "smaller_graph"),
)
end_time = time.time()
print(f"Time taken for smaller: {end_time - start_time}")
start_time = time.time()
gen.generate(
os.path.join(TEST_FILES_DIR, "GSHHS_c_L1.shp"),
os.path.join(TEST_FILES_DIR, "graph"),
)
end_time = time.time()
print(f"Time taken for larger: {end_time - start_time}")
gen.generate_for_vertex_range(
os.path.join(TEST_FILES_DIR, "smaller.shp"),
os.path.join(TEST_FILES_DIR, "smaller_graph_range_1"),
0,
2,
42,
)
gen.generate_for_vertex_range(
os.path.join(TEST_FILES_DIR, "smaller.shp"),
os.path.join(TEST_FILES_DIR, "smaller_graph_range_2"),
1,
2,
42,
)
| StarcoderdataPython |
3261796 | <filename>assets/hangman_words.py<gh_stars>0
animals = [
'aardvark',
'aardwolf',
'abyssinian',
'addax',
'ainu',
'ayeaye',
'aussiedor',
'arcticfox',
'amurleopard',
'anchovies',
'babirusa',
'baboon',
'beabull',
'bananaspider',
'barb',
'barnowl',
'beaski',
'bergamasco',
'bichpoo',
'bison',
'capelion',
'caribou',
'carp',
'chameleon',
'chihuahua',
'crane',
'cricket',
'cuscus',
'corgipoo',
'chicken',
'dingo',
'dog',
'deer',
'dragonfish',
'doxle',
'dunker',
'dolphin',
'dugong',
'dorgi',
'dodo',
'elephant',
'emu',
'elk',
'electriceel',
'echidna',
'earwig',
'emperorpenguin',
'eskipoo',
'eagle',
'earthworm',
'falcon',
'fly',
'firefly',
'fox',
'frog',
'frogfish',
]
languages = [
'english',
'hindi',
'german',
'french',
'spanish',
'turkish',
'roman',
'italian',
'russian',
'chinese',
'japanese',
'thai',
'singapore',
'arabic',
'algerian',
'ancientgreek',
'egyptian',
'latin',
'bulgarian',
'dutch',
'deutsch',
'greek',
'indonesian',
'irish',
'korean',
'mandarin',
'mongolian',
'nepali',
'persian',
'portuguese',
'romanian',
'serbian',
'scottishgaelic',
'swedish',
'susuami',
'tibetan',
'ukrainian',
'vietnamese',
]
programming_langs = [
'a.net',
'ampl',
'angelscript',
'angular',
'assembly',
'apl',
'abap',
'aimms',
'apache',
'autolisp',
'bash',
'basic',
'beanshell',
'beta',
'bliss',
'bosque',
'bcpl',
'blockly',
'ballerina',
'babbage',
'c',
'c minus minus',
'c plus plus',
'cow',
'csharp',
'cobol',
'charm',
'coffeescript',
'clojure',
'cython',
'cryptol',
'css',
'dart',
'dbase',
'darwin',
'datalog',
'datatrieve',
'dynamo',
'dylan',
'drakon',
'delphi',
'dibol',
'ecmascript',
'ease',
'elan',
'epl',
'elm',
'eiffel',
'espol',
'erlang',
'escher',
'esterel',
'fsharp',
'fantom',
'faust',
'flagship',
'focal',
'foil',
'fortan',
'gamemonkey script',
'gap',
'gdscript',
'gdl',
'go',
'golang',
'google apps script',
'gortan',
'graphql',
'gom',
'html',
'html'
'java',
'javascript',
'jython',
'jscript',
'julia',
'kotlin',
'kojo',
'kivy',
'krypton',
'kornshell',
'lava',
'lua',
'lisa',
'logo',
'livescript',
'machinecode',
'matlab',
'mdl',
'mercurry',
'mortan',
'mivascript',
'mutan',
'objective c',
'objective python',
'opencl',
'opencv',
'pascal',
'pdl',
'pearl',
'pharo',
'pico',
'python',
'php',
'powershell',
'postcss',
'r',
'ruby',
'react',
'rapid',
'rust',
'sql',
'scala',
'sas',
'squirrel',
'swift',
'typescript',
'tkinter',
'visual',
'vim script',
'vue',
'webassembly',
'webdna',
] | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.