text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python3
# NZART Exam Trainer
# Written by Richard Walmsley <richwalm+nzarttrainer@gmail.com> (ZL1RSW)
from flask import Flask, request, render_template, redirect, url_for, Response, abort
import random
import string
import json
import sys
app = Flask(__name__, static_folder = 's')
# Constants.
Needed = 40
MaxSeedSize = 8
# Load the database and ensure it's valid.
# Also create a cache for the answers.
with app.open_resource('questions.json') as InputFile:
Data = json.load(InputFile)
Answers = []
Required = Total = 0
for Block in Data:
if Block['RequiredAnswers'] > len(Block['Questions']):
sys.exit(1)
for Q in Block['Questions']:
if Q['Answer'] > len(Q['Choices']):
sys.exit(1)
Answers.append(Q['Answer'])
Required += Block['RequiredAnswers']
Total += len(Block['Questions'])
if Required > Total:
sys.exit(1)
# Common
def GenerateExam(Seed):
""" Returns a list of questions for each block. """
random.seed(Seed)
Blocks = []
for Block in Data:
Indexes = []
for I in range(Block['RequiredAnswers']):
while True:
R = random.randrange(len(Block['Questions']))
if R not in Indexes:
break
Indexes.append(R)
Indexes.sort()
Blocks.append(Indexes)
return Blocks
def GenerateFullExam():
""" Returns the entire exam. """
Blocks = []
for Block in Data:
Indexes = list(range(len(Block['Questions'])))
Blocks.append(Indexes)
return Blocks
# Views
@app.errorhandler(404)
def PageNotFound(e):
return render_template('404.html'), 404
@app.route('/')
def Index():
return render_template('index.html', MaxSeedSize = MaxSeedSize, Total = Total)
@app.route('/exam', methods = [ 'GET', 'POST' ])
def Exam():
AllQuestions = request.form.get('aq') or request.args.get('aq')
if not AllQuestions:
Seed = request.form.get('s') or request.args.get('s')
if not Seed:
if request.method == 'POST':
abort(403)
Seed = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(MaxSeedSize))
else:
Seed = Seed[:MaxSeedSize]
Blocks = GenerateExam(Seed)
T = Required
else:
Blocks = GenerateFullExam()
Seed = None
T = Total
if request.method == 'POST':
# Grading.
BlockAnswers = []
Offset = Correct = BlockIndex = 0
for B in Blocks:
BCorrect = 0
for I in B:
A = request.form.get(str(Offset + I))
if A:
try:
A = int(A)
except Exception:
pass
if A == Answers[Offset + I]:
Correct += 1
BCorrect += 1
BlockAnswers.append(BCorrect)
Offset += len(Data[BlockIndex]['Questions'])
BlockIndex += 1
return render_template('results.html', Seed = Seed, Blocks = Blocks, Data = Data, Needed = Needed, Correct = Correct, Answers = Answers, BlockAnswers = BlockAnswers, Total = T)
return render_template('exam.html', Seed = Seed, Blocks = Blocks, Data = Data, Needed = Needed, Total = T)
"""
@app.route('/answer/<int:ID>')
def Answer(ID):
if ID >= len(Answers):
abort(404)
return Response(str(Answers[ID]), mimetype='text/plain')
"""
if __name__ == '__main__':
app.run(debug = False, host='0.0.0.0')
| 3,587 | 1,174 |
from typing import Dict
import logging
from src.middleware.profiler import do_cprofile
logger = logging.getLogger(__name__)
@do_cprofile
def health() -> Dict[str, str]:
return {"health": "ok"}
def health_sync() -> Dict[str, str]:
return health()
async def health_async() -> Dict[str, str]:
return health()
| 325 | 110 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from os.path import dirname, exists, join
import sys, subprocess
from setuptools import find_packages, setup
setup_dir = dirname(__file__)
git_dir = join(setup_dir, ".git")
version_file = join(setup_dir, "cxxheaderparser", "version.py")
# Automatically generate a version.py based on the git version
if exists(git_dir):
p = subprocess.Popen(
["git", "describe", "--tags", "--long", "--dirty=-dirty"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = p.communicate()
# Make sure the git version has at least one tag
if err:
print("Error: You need to create a tag for this repo to use the builder")
sys.exit(1)
# Convert git version to PEP440 compliant version
# - Older versions of pip choke on local identifiers, so we can't include the git commit
v, commits, local = out.decode("utf-8").rstrip().split("-", 2)
if commits != "0" or "-dirty" in local:
v = "%s.post0.dev%s" % (v, commits)
# Create the version.py file
with open(version_file, "w") as fp:
fp.write("# Autogenerated by setup.py\n__version__ = '{0}'".format(v))
with open(version_file, "r") as fp:
exec(fp.read(), globals())
DESCRIPTION = (
"Parse C++ header files and generate a data structure representing the class"
)
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: C++",
"License :: OSI Approved :: BSD License",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
]
setup(
name="cxxheaderparser",
version=__version__,
author="Dustin Spicuzza",
author_email="dustin@virtualroadside.com",
maintainer="RobotPy Development Team",
maintainer_email="robotpy@googlegroups.com",
url="https://github.com/robotpy/cxxheaderparser",
description=DESCRIPTION,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
install_requires=["dataclasses; python_version < '3.7'"],
license="BSD",
platforms="Platform Independent",
packages=find_packages(),
keywords="c++ header parser ply",
python_requires=">= 3.6",
classifiers=CLASSIFIERS,
)
| 2,498 | 808 |
import json
from flask import Blueprint, request
from RobotCode import BehaviorDB
from RobotCode.RobotModel import getRobotModel
Robot_Api_BluePrint = Blueprint('Robot_Api_BluePrint', __name__, url_prefix="/robot_api")
@Robot_Api_BluePrint.route('/getBehaviorMenu')
def getBehaviorMenu():
return json.dumps(BehaviorDB.getBehaviorDB().getBehaviorMenu())
@Robot_Api_BluePrint.route('/getCurrentBehavior')
def getCurrentBehavior():
return json.dumps(getRobotModel().getBehavior().getCurrentBahaviorSet())
@Robot_Api_BluePrint.route('/getBehaviorForm/<iBehaviorType>/<iBehaviorName>')
def getBehaviorForm(iBehaviorType, iBehaviorName):
wBehavior = BehaviorDB.getBehaviorDB().getBehavior(iBehaviorType, iBehaviorName)
if None != wBehavior:
return json.dumps(wBehavior.getParametersForm())
print("Unable to find Behavior [{}] - [{}]".format(iBehaviorType, iBehaviorName))
return "Unable to find Behavior [{}] - [{}]".format(iBehaviorType, iBehaviorName)
@Robot_Api_BluePrint.route('/setBehavior', methods=["POST"])
def setbehavior():
wData = request.get_json()
if "Type" in wData:
if "Name" in wData:
if "Parameters" in wData:
if True == getRobotModel().getBehavior().selectBehavior(wData["Type"], wData["Name"], wData["Parameters"]):
return "Behavior Set"
return "Behavior not found"
elif True == getRobotModel().getBehavior().selectBehavior(wData["Type"], wData["Name"]):
return "Behavior Set"
return "Behavior not found"
return "Behavior not defined"
| 1,563 | 471 |
import pygame
import random
import math
pygame.init()
def newRain(width, rainlength):
return (random.randint(-50,width),random.randint(-(rainlength),-(rainlength/2)),random.randint(1,3),0)
def endpoint(start,length,angle):
return ((start[0]+length*math.cos((angle/180)*math.pi)),(start[1])+length)
pass
class Main:
elements = []
gravity = 70
rainlength = 150
angle = 80
def __init__(self, windows,win_height,win_width):
self.win = windows
self.width = win_width
self.height = win_height
for i in range(50):
self.elements.append(newRain(self.width,self.rainlength))
pass
def Update(self):
win.fill((0,0,0))
for i in range(4):
self.elements.append(newRain(self.width,self.rainlength))
for e in range(len(self.elements)):
self.elements[e] = (self.elements[e][0] + self.elements[e][3]*math.cos((self.angle/180)*math.pi),
self.elements[e][1] + self.elements[e][3],
self.elements[e][2],
self.gravity/self.elements[e][2])
#draw rain
for e in self.elements:
if e[1] >= self.height + 20:
self.elements.remove(e)
continue
length = self.rainlength / e[2]
thickness = 3 / e[2]
color = (240/e[2],240/e[2],240/e[2])
pygame.draw.line(win, color,(e[0],e[1]),endpoint((e[0],e[1]),length,self.angle))
pygame.display.update()
pass
win_width = 1200
win_height = 500
limitFPS = 40
win = pygame.display.set_mode((win_width,win_height))
pygame.display.set_caption("It's heavy rain now")
main = Main(win,win_height,win_width)
run = True
while run:
clock = pygame.time.Clock()
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
main.Update()
clock.tick(limitFPS)
pass
pygame.quit() | 2,126 | 755 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: utf8 -*-
from django.http import Http404
from django.contrib.auth.decorators import user_passes_test
from django.utils.decorators import method_decorator
from diogenis.teachers.models import Teacher
def user_is_teacher(user):
try:
request_user = Teacher.objects.get(user=user)
return user.is_authenticated() and request_user.is_teacher
except:
return False
class AuthenticatedTeacherMixin(object):
@method_decorator(user_passes_test(user_is_teacher, login_url='/login/'))
def dispatch(self, request, *args, **kwargs):
username = kwargs.get('username')
if username != request.user.username: raise Http404
return super(AuthenticatedTeacherMixin, self).dispatch(request, *args, **kwargs)
| 818 | 266 |
Import("env")
import platform
if platform.system() == 'Windows':
print("Replace MKSPIFFSTOOL with mklittlefs.exe")
env.Replace (MKSPIFFSTOOL = "tools/mklittlefs.exe")
else:
print("Replace MKSPIFFSTOOL with mklittlefs")
env.Replace (MKSPIFFSTOOL = "tools/mklittlefs") | 284 | 107 |
# importing the required libraries
import pyautogui, time
# delay to switch windows
time.sleep(5)
#setting count to 5
count = 5
# loop to spam
while count >= 1:
# fetch and type each word from the file
pyautogui.write('Random Annoying Spam Words')
# press enter to send the message
pyautogui.press('enter')
# decrementing count
count = count - 1
| 371 | 118 |
import binascii
import midstate
import pytest
import struct
def test_initial_state():
SHA256_H0 = bytes([
# Byte-swap 32-bit words
0x6a, 0x09, 0xe6, 0x67,
0xbb, 0x67, 0xae, 0x85,
0x3c, 0x6e, 0xf3, 0x72,
0xa5, 0x4f, 0xf5, 0x3a,
0x51, 0x0e, 0x52, 0x7f,
0x9b, 0x05, 0x68, 0x8c,
0x1f, 0x83, 0xd9, 0xab,
0x5b, 0xe0, 0xcd, 0x19
])
s = midstate.ShaState()
assert s.as_bin(True) == SHA256_H0
def test_btc_four_zeroes():
expected_state = bytes([
0xdc, 0x6a, 0x3b, 0x8d, 0x0c, 0x69, 0x42, 0x1a,
0xcb, 0x1a, 0x54, 0x34, 0xe5, 0x36, 0xf7, 0xd5,
0xc3, 0xc1, 0xb9, 0xe4, 0x4c, 0xbb, 0x9b, 0x8f,
0x95, 0xf0, 0x17, 0x2e, 0xfc, 0x48, 0xd2, 0xdf,
])
expected_second_block = bytes([
0xdc, 0x14, 0x17, 0x87,
0x35, 0x8b, 0x05, 0x53,
0x53, 0x5f, 0x01, 0x19
])
message = struct.pack("<L", 2) + \
binascii.a2b_hex("000000000000000117c80378b8da0e33559b5997f2ad55e2f7d18ec1975b9717")[::-1] + \
binascii.a2b_hex("871714dcbae6c8193a2bb9b2a69fe1c0440399f38d94b3a0f1b447275a29978a")[::-1] + \
struct.pack("<LL", 0x53058b35, 0x19015f53)
assert message[64:] == expected_second_block
s = midstate.ShaState()
s.update(message[:64])
assert s.as_bin(True) == expected_state | 1,370 | 837 |
class Solution:
def sortArrayByParityII(self, a):
"""
:type A: List[int]
:rtype: List[int]
"""
evens = (x for x in a if x % 2 == 0)
odds = (x for x in a if x % 2 == 1)
return list(itertools.chain.from_iterable(zip(evens, odds)))
| 289 | 108 |
import numpy as np
import cv2
import pyrealsense2 as rs
import time, sys, glob
focal = 0.0021
baseline = 0.08
sd = rs.software_device()
depth_sensor = sd.add_sensor("Depth")
intr = rs.intrinsics()
intr.width = 848
intr.height = 480
intr.ppx = 637.951293945312
intr.ppy = 360.783233642578
intr.fx = 638.864135742188
intr.fy = 638.864135742188
vs = rs.video_stream()
vs.type = rs.stream.infrared
vs.fmt = rs.format.y8
vs.index = 1
vs.uid = 1
vs.width = intr.width
vs.height = intr.height
vs.fps = 30
vs.bpp = 1
vs.intrinsics = intr
depth_sensor.add_video_stream(vs)
vs.type = rs.stream.depth
vs.fmt = rs.format.z16
vs.index = 1
vs.uid = 3
vs.bpp = 2
depth_sensor.add_video_stream(vs)
vs.type = rs.stream.depth
vs.fmt = rs.format.z16
vs.index = 2
vs.uid = 4
vs.bpp = 2
depth_sensor.add_video_stream(vs)
vs.type = rs.stream.depth
vs.fmt = rs.format.z16
vs.index = 3
vs.uid = 5
vs.bpp = 2
depth_sensor.add_video_stream(vs)
depth_sensor.add_read_only_option(rs.option.depth_units, 0.001)
name = "virtual camera"
sd.register_info(rs.camera_info.name, name)
ctx = rs.context()
sd.add_to(ctx)
dev = ctx.query_devices()[0]
for d in ctx.query_devices():
if d.get_info(rs.camera_info.name) == name:
dev = d
images_path = "."
if (len(sys.argv) > 1):
images_path = str(sys.argv[1])
rec = rs.recorder(images_path + "/1.bag", dev)
sensor = rec.query_sensors()[0]
q = rs.frame_queue()
sensor.open(sensor.get_stream_profiles())
sensor.start(q)
files = glob.glob1(images_path, "gt*")
index = []
for f in files:
idx = (f.split('-')[1]).split('.')[0]
index.append(int(idx))
for i in index:
left_name = images_path + "/left-" + str(i) + ".png"
depth_name = images_path + "/gt-" + str(i) + ".png"
result_name = images_path + "/res-" + str(i) + ".png"
denoised_name = images_path + "/res_denoised-" + str(i) + ".png"
img = cv2.imread(left_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
f = rs.software_video_frame()
f.stride = intr.width
f.bpp = 1
f.pixels = np.asarray(img, dtype="byte")
f.timestamp = i * 0.01
f.frame_number = i
f.profile = sensor.get_stream_profiles()[0].as_video_stream_profile()
depth_sensor.on_video_frame(f)
time.sleep(0.01)
f3 = rs.software_video_frame()
img = cv2.imread(result_name, cv2.IMREAD_ANYDEPTH)
f3.stride = 2 * intr.width
f3.bpp = 2
px = np.asarray(img, dtype="ushort")
f3.pixels = px
f3.timestamp = i * 0.01
f3.frame_number = i
f3.profile = sensor.get_stream_profiles()[1].as_video_stream_profile()
depth_sensor.on_video_frame(f3)
time.sleep(0.01)
f4 = rs.software_video_frame()
img = cv2.imread(depth_name, cv2.IMREAD_ANYDEPTH)
f4.stride = 2 * intr.width
f4.bpp = 2
px = np.asarray(img, dtype="ushort")
f4.pixels = px
f4.timestamp = i * 0.01
f4.frame_number = i
f4.profile = sensor.get_stream_profiles()[2].as_video_stream_profile()
depth_sensor.on_video_frame(f4)
time.sleep(0.01)
f5 = rs.software_video_frame()
img = cv2.imread(denoised_name, cv2.IMREAD_ANYDEPTH)
f5.stride = 2 * intr.width
f5.bpp = 2
px = np.asarray(img, dtype="ushort")
f5.pixels = px
f5.timestamp = i * 0.01
f5.frame_number = i
f5.profile = sensor.get_stream_profiles()[3].as_video_stream_profile()
depth_sensor.on_video_frame(f5)
time.sleep(0.01)
time.sleep(1)
print("a")
f = q.wait_for_frame()
print("b")
time.sleep(1)
sensor.stop()
sensor.close()
| 3,485 | 1,595 |
from argparse import ArgumentParser
from evidencegraph.argtree import RELATION_SETS_BY_NAME
from evidencegraph.corpus import CORPORA
from evidencegraph.evaluation import evaluate_setting
if __name__ == "__main__":
parser = ArgumentParser(
description="""Evaluate argumentation parsing predictions"""
)
parser.add_argument(
"--corpus",
"-c",
choices=CORPORA,
default="m112en",
help="the corpus to evaluate the predictions of",
)
args = parser.parse_args()
corpus_name = args.corpus
language = CORPORA[corpus_name]["language"]
settings = {
("adu", "SIMPLE_RELATION_SET"): [
"{}-test-adu-simple-noop|equal".format(corpus_name)
]
}
for (segmentation, relationset), conditions in settings.items():
relationset = RELATION_SETS_BY_NAME.get(relationset)
evaluate_setting(
language,
segmentation,
relationset,
conditions,
corpus_id=corpus_name,
)
| 1,043 | 313 |
from common.dataaccess import MongoAccessLayer
from common.timeutil import now
import numpy as np
import os
import sys
from classifier import workload_comparision as wc
# data = []
# data.append({'metric': metric, 'mean': query_mean[0], 'std': query_std[0]})
# data = {
# 'metrics': {'n_samples': QUERY_STEP // SCRAP_INTERVAL, 'values': data},
# 'histogram': histogram,
# 'start': end - QUERY_STEP,
# 'end': end,
# 'step': QUERY_STEP
# }
class HistClassifier:
def __init__(self, application, mongo_url, mongo_port, mongo_db, histogram_collection, tuning_collection):
self.application = application
self.mongo = MongoAccessLayer(mongo_url, mongo_port, mongo_db)
self.histogram_collection = self.mongo.collection(histogram_collection)
self.tuning_collection = self.mongo.collection(tuning_collection)
def close(self):
self.mongo.close()
def tunings(self, start, end):
return self.mongo.find({'start': {'$gte': start}, 'end': {'$lte': end}},
self.tuning_collection)
def histograms(self, start, end):
return self.mongo.find({'application': self.application, 'start': {'$gte': start}, 'end': {'$lte': end}},
self.histogram_collection)
def join_tuning_histogram(self, start, end):
_tunings = self.tunings(start, end)
_histograms = self.histograms(start, end)
processed_tunings = []
for tuning in _tunings:
start = tuning['start']
end = tuning['end']
filtered_histograms = []
for histogram in _histograms:
if histogram['start'] >= start and histogram['end'] <= end:
filtered_histograms.append(histogram)
tuning.update({'histograms': filtered_histograms})
processed_tunings.append(tuning)
return processed_tunings
def fetch(self, start, end):
result_set = self.mongo.find({'application': self.application, 'start': {'$gte': start}, 'end': {'$lte': end}},
self.histogram_collection)
simple_histogram = {}
for result in result_set:
id = str(result['_id'])
simple_histogram[id] = np.array(list(result['histogram'].values()))
return simple_histogram
def compare(self, histograms, threshould=0):
from collections import defaultdict
workflows_group = defaultdict(set)
memory = set()
for i, hist1 in histograms.items():
for j, hist2 in histograms.items():
distance = wc.hellinger(hist1, hist2)
if distance <= threshould:
self._group(i, j, workflows_group, memory)
return workflows_group
# TODO: optimize this in the future
def _group(self, a, b, table, memory):
if a not in memory and b not in memory:
table[a].add(b)
elif a in memory and b not in memory:
if a in table:
table[a].add(b)
else:
return self._group(b, a, table, memory)
elif a not in memory and b in memory:
for key, value in table.items():
if b in value:
value.add(a)
break
memory.add(a)
memory.add(b)
def main():
from common.timeutil import minute, day
start = now(past=day(2))
end = now()
classifier = HistClassifier('acmeair', 'localhost', 27017, 'acmeair_db_experiments', 'acmeair_collection_histogram',
'acmeair_collection_tuning')
histograms = classifier.fetch(start, end)
print(classifier.compare(histograms, 0))
#for hist in classifier.join_tuning_histogram(start, end):
# print(hist)
# results = classifier.fetch(start, end)
# print(len(results))
# print(results)
# print(classifier.compare(results, threshould=0))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 4,221 | 1,284 |
'''
Write a program that maps a list of words into a list of integers representing the lengths of the correponding words.
Write it in three different ways:
1) using a for-loop,
2) using the higher order function map(), and
3) using list comprehensions.
'''
#2
l = ['apple', 'orange', 'cat']
print map( lambda x : len(x), l)
print map( len, l)
#3
print [ len(i) for i in l]
| 400 | 139 |
import filecmp
import os
import shutil
import tempfile
from datetime import date, datetime
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from unittest import TestCase
import sec_certs.constants as constants
import sec_certs.helpers as helpers
from sec_certs.dataset.common_criteria import CCDataset
from sec_certs.sample.common_criteria import CommonCriteriaCert
from sec_certs.sample.protection_profile import ProtectionProfile
class TestCommonCriteriaOOP(TestCase):
def setUp(self):
self.test_data_dir = Path(__file__).parent / "data" / "test_cc_oop"
self.crt_one = CommonCriteriaCert(
"active",
"Access Control Devices and Systems",
"NetIQ Identity Manager 4.7",
"NetIQ Corporation",
"SE",
{"ALC_FLR.2", "EAL3+"},
date(2020, 6, 15),
date(2025, 6, 15),
"https://www.commoncriteriaportal.org/files/epfiles/Certification%20Report%20-%20NetIQ®%20Identity%20Manager%204.7.pdf",
"https://www.commoncriteriaportal.org/files/epfiles/ST%20-%20NetIQ%20Identity%20Manager%204.7.pdf",
"https://www.commoncriteriaportal.org/files/epfiles/Certifikat%20CCRA%20-%20NetIQ%20Identity%20Manager%204.7_signed.pdf",
"https://www.netiq.com/",
set(),
set(),
None,
None,
None,
)
self.crt_two = CommonCriteriaCert(
"active",
"Access Control Devices and Systems",
"Magic SSO V4.0",
"Dreamsecurity Co., Ltd.",
"KR",
set(),
date(2019, 11, 15),
date(2024, 11, 15),
"https://www.commoncriteriaportal.org/files/epfiles/KECS-CR-19-70%20Magic%20SSO%20V4.0(eng)%20V1.0.pdf",
"https://www.commoncriteriaportal.org/files/epfiles/Magic_SSO_V4.0-ST-v1.4_EN.pdf",
None,
"https://www.dreamsecurity.com/",
{
ProtectionProfile(
"Korean National Protection Profile for Single Sign On V1.0",
"https://www.commoncriteriaportal.org/files/ppfiles/KECS-PP-0822-2017%20Korean%20National%20PP%20for%20Single%20Sign%20On%20V1.0(eng).pdf",
)
},
set(),
None,
None,
None,
)
pp = ProtectionProfile("sample_pp", "https://sample.pp")
update = CommonCriteriaCert.MaintenanceReport(
date(1900, 1, 1), "Sample maintenance", "https://maintenance.up", "https://maintenance.up"
)
self.fictional_cert = CommonCriteriaCert(
"archived",
"Sample category",
"Sample certificate name",
"Sample manufacturer",
"Sample scheme",
{"Sample security level"},
date(1900, 1, 2),
date(1900, 1, 3),
"https://path.to/report/link",
"https://path.to/st/link",
"https://path.to/cert/link",
"https://path.to/manufacturer/web",
{pp},
{update},
None,
None,
None,
)
self.template_dataset = CCDataset(
{self.crt_one.dgst: self.crt_one, self.crt_two.dgst: self.crt_two},
Path("/fictional/path/to/dataset"),
"toy dataset",
"toy dataset description",
)
self.template_dataset.timestamp = datetime(2020, 11, 16, hour=17, minute=4, second=14, microsecond=770153)
self.template_dataset.state.meta_sources_parsed = True
self.template_report_pdf_hashes = {
"309ac2fd7f2dcf17": "774c41fbba980191ca40ae610b2f61484c5997417b3325b6fd68b345173bde52",
"8cf86948f02f047d": "533a5995ef8b736cc48cfda30e8aafec77d285511471e0e5a9e8007c8750203a",
}
self.template_target_pdf_hashes = {
"309ac2fd7f2dcf17": "b9a45995d9e40b2515506bbf5945e806ef021861820426c6d0a6a074090b47a9",
"8cf86948f02f047d": "3c8614338899d956e9e56f1aa88d90e37df86f3310b875d9d14ec0f71e4759be",
}
self.template_report_txt_path = self.test_data_dir / "report_869415cc4b91282e.txt"
self.template_target_txt_path = self.test_data_dir / "target_869415cc4b91282e.txt"
def test_certificate_input_sanity(self):
self.assertEqual(
self.crt_one.report_link,
"https://www.commoncriteriaportal.org/files/epfiles/Certification%20Report%20-%20NetIQ®%20Identity%20Manager%204.7.pdf",
"Report link contains some improperly escaped characters.",
)
def test_download_and_convert_pdfs(self):
dset = CCDataset.from_json(self.test_data_dir / "toy_dataset.json")
with TemporaryDirectory() as td:
dset.root_dir = Path(td)
dset.download_all_pdfs()
dset.convert_all_pdfs()
actual_report_pdf_hashes = {
key: helpers.get_sha256_filepath(val.state.report_pdf_path) for key, val in dset.certs.items()
}
actual_target_pdf_hashes = {
key: helpers.get_sha256_filepath(val.state.st_pdf_path) for key, val in dset.certs.items()
}
self.assertEqual(
actual_report_pdf_hashes,
self.template_report_pdf_hashes,
"Hashes of downloaded pdfs (sample report) do not the template",
)
self.assertEqual(
actual_target_pdf_hashes,
self.template_target_pdf_hashes,
"Hashes of downloaded pdfs (security target) do not match the template",
)
self.assertTrue(dset["309ac2fd7f2dcf17"].state.report_txt_path.exists())
self.assertTrue(dset["309ac2fd7f2dcf17"].state.st_txt_path.exists())
self.assertAlmostEqual(
dset["309ac2fd7f2dcf17"].state.st_txt_path.stat().st_size,
self.template_target_txt_path.stat().st_size,
delta=1000,
)
self.assertAlmostEqual(
dset["309ac2fd7f2dcf17"].state.report_txt_path.stat().st_size,
self.template_report_txt_path.stat().st_size,
delta=1000,
)
def test_cert_to_json(self):
with NamedTemporaryFile("w") as tmp:
self.fictional_cert.to_json(tmp.name)
self.assertTrue(
filecmp.cmp(self.test_data_dir / "fictional_cert.json", tmp.name),
"The sample serialized to json differs from a template.",
)
def test_dataset_to_json(self):
with NamedTemporaryFile("w") as tmp:
self.template_dataset.to_json(tmp.name)
self.assertTrue(
filecmp.cmp(self.test_data_dir / "toy_dataset.json", tmp.name),
"The dataset serialized to json differs from a template.",
)
def test_cert_from_json(self):
self.assertEqual(
self.fictional_cert,
CommonCriteriaCert.from_json(self.test_data_dir / "fictional_cert.json"),
"The sample serialized from json differs from a template.",
)
def test_dataset_from_json(self):
self.assertEqual(
self.template_dataset,
CCDataset.from_json(self.test_data_dir / "toy_dataset.json"),
"The dataset serialized from json differs from a template.",
)
def test_build_empty_dataset(self):
with TemporaryDirectory() as tmp_dir:
dset = CCDataset({}, Path(tmp_dir), "sample_dataset", "sample dataset description")
dset.get_certs_from_web(to_download=False, get_archived=False, get_active=False)
self.assertEqual(len(dset), 0, "The dataset should contain 0 files.")
def test_build_dataset(self):
with TemporaryDirectory() as tmp_dir:
dataset_path = Path(tmp_dir)
os.mkdir(dataset_path / "web")
shutil.copyfile(
self.test_data_dir / "cc_products_active.csv", dataset_path / "web" / "cc_products_active.csv"
)
shutil.copyfile(
self.test_data_dir / "cc_products_active.html", dataset_path / "web" / "cc_products_active.html"
)
dset = CCDataset({}, dataset_path, "sample_dataset", "sample dataset description")
dset.get_certs_from_web(
keep_metadata=False, to_download=False, get_archived=False, get_active=True, update_json=False
)
self.assertEqual(
len(os.listdir(dataset_path)),
0,
"Meta files (csv, html) were not deleted properly albeit this was explicitly required.",
)
self.assertEqual(len(dset), 2, "The dataset should contain 2 files.")
self.assertTrue(self.crt_one in dset, "The dataset does not contain the template sample.")
self.assertEqual(dset, self.template_dataset, "The loaded dataset does not match the template dataset.")
def test_download_csv_html_files(self):
with TemporaryDirectory() as tmp_dir:
dataset_path = Path(tmp_dir)
dset = CCDataset({}, dataset_path, "sample_dataset", "sample dataset description")
dset.download_csv_html_resources(get_active=True, get_archived=False)
for x in dset.active_html_tuples:
self.assertTrue(x[1].exists())
self.assertGreaterEqual(x[1].stat().st_size, constants.MIN_CC_HTML_SIZE)
for x in dset.active_csv_tuples:
self.assertTrue(x[1].exists())
self.assertGreaterEqual(x[1].stat().st_size, constants.MIN_CC_CSV_SIZE)
def test_download_pp_dataset(self):
with tempfile.TemporaryDirectory() as tmp_dir:
self.template_dataset.root_dir = tmp_dir
self.template_dataset.process_protection_profiles()
self.assertTrue(self.template_dataset.pp_dataset_path.exists())
self.assertGreaterEqual(
self.template_dataset.pp_dataset_path.stat().st_size, constants.MIN_CC_PP_DATASET_SIZE
)
| 10,199 | 3,416 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import inkyphat
import time
import sys
print("""Inky pHAT: Clean
Displays solid blocks of red, black, and white to clean the Inky pHAT
display of any screen burn.
""".format(sys.argv[0]))
if len(sys.argv) < 2:
print("""Usage: {} <colour> <number of cycles>
Valid colours: red, yellow, black
""".format(sys.argv[0]))
sys.exit(0)
colour = sys.argv[1].lower()
try:
inkyphat.set_colour(colour)
except ValueError:
print('Invalid colour "{}" for V{}\n'.format(colour, inkyphat.get_version()))
if inkyphat.get_version() == 2:
sys.exit(1)
print('Defaulting to "red"')
if len(sys.argv) > 2:
cycles = int(sys.argv[2])
else:
cycles = 3
colours = (inkyphat.RED, inkyphat.BLACK, inkyphat.WHITE)
colour_names= (colour, "black", "white")
for i in range(cycles):
print("Cleaning cycle %i\n" % (i + 1))
for j, c in enumerate(colours):
print("- updating with %s" % colour_names[j])
inkyphat.set_border(c)
for x in range(inkyphat.WIDTH):
for y in range(inkyphat.HEIGHT):
inkyphat.putpixel((x, y), c)
inkyphat.show()
time.sleep(1)
print("\n")
print("Cleaning complete!") | 1,234 | 476 |
# Python 3 version of index.py
import math
# Input -> 10 20 monkey
a, b, c = input().split(' ')
print(a, b, c)
# Input with map -> 10 20
a, b = map(int, input().split(' '))
print(a, b)
# Input with map -> 10, 20
a, b = map(int, input().split(','))
print(a, b)
# Python has dynamic data type so integers, float are same as variables
# Mathematics
# Sum Formulas -> 1 + 2 + 3 ... n = n(n+1)/2
# Sum Formulas -> 1^k + 2^k + 3^k ... n^k = n(n+1)(2n+1)/6
# AP -> a + .. + b = n(a+b)/2
# GP -> a + ak + ak^2 + .. + b = (bk -a) / (k - 1)
ap = [3, 7, 11, 15]
print(4*(3+15)/2)
gp = [3, 6, 12, 24] # k is 2 here
print((24*2 - 3)/(2-1))
# Set
x = set([2, 4, 7])
a = set([1, 2, 5])
b = set([2, 4])
print(x)
print(a.intersection(b))
print(a.union(b))
print(a.difference(b))
# Functions
print(3/2)
print(min([1, 2, 3]))
print(max([1, 2, 3]))
print(math.factorial(10))
# Not memoized
def fibo(n):
if n == 0:
return 0
if n == 1:
return 1
return fibo(n-1) + fibo(n-2)
print(fibo(10))
# Logarithms
# logk(a, b) = logk(a) + logk(b)
# logk(x^n) = n * logk(x)
# logk(a/b) = logk(a) - logk(b)
# logu(x) = logk(x) / logk(u)
print(math.log(32, 2))
| 1,165 | 607 |
# Project 1 - The Scope!
# Scenario: Congrats, your Penetration testing company Red Planet has
# landed an external assessment for Microsoft! Your point of contact has
# give you a few IP addresses for you to test. Like with any test you
# should always verify the scope given to you to make sure there wasn't
# a mistake.
## Beginner Task: Write a script that will have the user input an IP
## address. The script should output the ownership and geolocation of the
## IP. The output should be presented in a way that is clean and organized
## in order to be added to your report.
# Resources:
# https://ipgeolocation.io/
# Get geolocation for an IPv4 IP Address = 8.8.8.8
# $ curl 'https://api.ipgeolocation.io/ipgeo?apiKey=API_KEY&ip=8.8.8.8'
# https://ipgeolocation.io/documentation/ip-geolocation-api.html
import requests
import configparser
# store the API key in an external file and make sure to add the file
# to .gitignore
cfg = configparser.ConfigParser()
cfg.read('ipgeo.cfg')
IPGEO_KEY = cfg.get('KEYS', 'api_key', raw='')
IPGEO_URL = "https://api.ipgeolocation.io/ipgeo"
def locate(ip):
'''Query IP Geo database for given IP and print Owner'''
resp = requests.get(f'{IPGEO_URL}?apiKey={IPGEO_KEY}&ip={ip}')
location_info = resp.json()
print(f'{ip} is owned by {location_info["isp"]}, located in '
f'{location_info["city"]}, {location_info["state_prov"]}.')
if __name__ == '__main__':
target_ip = input("Enter an IP to look up: ")
locate(target_ip)
| 1,507 | 492 |
/home/runner/.cache/pip/pool/75/74/1f/cd550c3fd39c07a88abf9ca8d462c4c05077809e3ca61220a3837e78cd | 96 | 73 |
import itertools
import logging
import random
from collections import defaultdict
from concurrent.futures import wait
from concurrent.futures.thread import ThreadPoolExecutor
from bot import RedditBot
from utils import rand_wait_min, rand_wait_sec
class BotOrchestrator:
def __init__(self, all_credentials: dict, executor=None):
all_usernames = {cred['username'] for cred in all_credentials}
self.bots = [RedditBot(creds, all_bot_names=all_usernames) for creds in all_credentials]
self.bots = [bot for bot in self.bots if not bot.is_broken] # filter out suspended bots
self.executor = executor if executor else ThreadPoolExecutor(max_workers=len(self.bots),
thread_name_prefix="RedditBot")
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.executor.shutdown(wait=True)
# This will cause bots to process same submissions
# You will get suspended for it pretty quickly once detected
def parse_subreddit(self, subreddit: str, **kwargs):
self._submit_to_executor_for_all(lambda bot: bot.work_on_subreddit(subreddit, **kwargs), to_wait=True)
def upvote_other_bot_comments(self, iterations=1, comment_sample_size=3):
session_upvotes = defaultdict(set)
def do_fetch_comment_ids(bot):
return [(c_id, bot.username) for c_id in bot.fetch_new_comments(limit=comment_sample_size)]
def do_upvote_comment(bot, comment_id):
bot.upvote_comment(comment_id)
session_upvotes[bot.username].add(comment_id)
for i_n in range(iterations):
comment_with_owner = {}
futures = []
for bot in self.bots:
futures.append(self.executor.submit(do_fetch_comment_ids, bot))
wait(futures)
for future in futures:
result = future.result()
for entry in result:
c_id, author = entry[0], entry[1]
comment_with_owner[c_id] = author
comment_ids = list(comment_with_owner.keys())
random.shuffle(comment_ids)
comment_id_iter = itertools.cycle(comment_ids)
futures = []
for bot in self.bots:
bot_name = bot.username
loop_passed = 0
while True:
comment_id = next(comment_id_iter)
owner = comment_with_owner[comment_id]
# skip your own comments and comments already upvoted
if owner != bot_name and comment_id not in session_upvotes[bot_name]:
break
loop_passed += 1
# guard from infinite loop if all comments very upvoted
if loop_passed > len(self.bots) + 1:
logging.warning(f"All comments have been already upvoted by {bot_name}")
break
futures.append(self.executor.submit(do_upvote_comment, bot, comment_id))
wait(futures)
if i_n != iterations - 1:
logging.info("Waiting between iterations")
rand_wait_sec(25, 35)
def parse_different_submissions(self, subreddit, **kwargs):
submissions = list(random.choice(self.bots).fetch_submission_ids(subreddit, **kwargs))
submission_ids = iter(submissions)
futures = []
bots_iter = itertools.cycle(self.bots)
for bot in bots_iter:
submission_id = next(submission_ids, None)
if not submission_id:
break
futures.append(self.executor.submit(bot.parse_submission, submission_id))
wait(futures)
def execute_custom_func(self, fn):
self._submit_to_executor_for_all(fn)
def log_karma(self):
self._submit_to_executor_for_all(lambda bot: bot.log_comment_karma())
def upvote_comment_sequentially_with_wait(self, comment_id=None, url=None):
for bot in self.bots:
bot.upvote_comment(comment_id, url)
rand_wait_min(1, 2)
def _submit_to_executor_for_all(self, func, to_wait=False):
futures = []
for bot in self.bots:
futures.append(self.executor.submit(func, bot))
if to_wait:
wait(futures)
| 4,408 | 1,306 |
import numpy as np
def normalize(v):
nv = np.linalg.norm(v)
if nv > 0.0:
v[0] = v[0]/nv
v[1] = v[1]/nv
v[2] = v[2]/nv
return v
def skew_symmetric(v):
"""Returns a skew symmetric matrix from vector. p q r"""
return np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
def rotation_rpy(roll, pitch, yaw):
"""Returns a rotation matrix from roll pitch yaw. ZYX convention."""
cr = np.cos(roll)
sr = np.sin(roll)
cp = np.cos(pitch)
sp = np.sin(pitch)
cy = np.cos(yaw)
sy = np.sin(yaw)
return np.array([[cy*cp, cy*sp*sr - sy*cr, cy*sp*cr + sy*sr],
[sy*cp, sy*sp*sr + cy*cr, sy*sp*cr - cy*sr],
[ -sp, cp*sr, cp*cr]])
def quaternion_rpy(roll, pitch, yaw):
"""Returns a quaternion ([x,y,z,w], w scalar) from roll pitch yaw ZYX
convention."""
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
x = sr * cp * cy - cr * sp * sy
y = cr * sp * cy + sr * cp * sy
z = cr * cp * sy - sr * sp * cy
w = cr * cp * cy + sr * sp * sy
# Remember to normalize:
nq = np.sqrt(x*x + y*y + z*z + w*w)
return np.array([x/nq,
y/nq,
z/nq,
w/nq])
def T_rpy(displacement, roll, pitch, yaw):
"""Homogeneous transformation matrix with roll pitch yaw."""
T = np.zeros([4, 4])
T[:3, :3] = rotation_rpy(roll, pitch, yaw)
T[:3, 3] = displacement
T[3, 3] = 1.0
return T
def quaternion_product(quat0, quat1):
"""Returns the quaternion product of q0 and q1."""
quat = np.zeros(4)
x0, y0, z0, w0 = quat0[0], quat0[1], quat0[2], quat0[3]
x1, y1, z1, w1 = quat1[0], quat1[1], quat1[2], quat1[3]
quat[0] = w0*x1 + x0*w1 + y0*z1 - z0*y1
quat[1] = w0*y1 - x0*z1 + y0*w1 + z0*x1
quat[2] = w0*z1 + x0*y1 - y0*x1 + z0*w1
quat[3] = w0*w1 - x0*x1 - y0*y1 - z0*z1
return quat
def dual_quaternion_product(Q, P):
"""Returns the dual quaternion product of two 8 element vectors
representing a dual quaternions. First four elements are the real
part, last four elements are the dual part.
"""
res = np.zeros(8)
# Real and dual components
xr0, yr0, zr0, wr0 = Q[0], Q[1], Q[2], Q[3]
xd0, yd0, zd0, wd0 = Q[4], Q[5], Q[6], Q[7]
xr1, yr1, zr1, wr1 = P[0], P[1], P[2], P[3]
xd1, yd1, zd1, wd1 = P[4], P[5], P[6], P[7]
# Real part
xr = wr0*xr1 + xr0*wr1 + yr0*zr1 - zr0*yr1
yr = wr0*yr1 - xr0*zr1 + yr0*wr1 + zr0*xr1
zr = wr0*zr1 + xr0*yr1 - yr0*xr1 + zr0*wr1
wr = wr0*wr1 - xr0*xr1 - yr0*yr1 - zr0*zr1
# Dual part
xd = xr0*wd1 + wr0*xd1 + yr0*zd1 - zr0*yd1
xd += xd0*wr1 + wd0*xr1 + yd0*zr1 - zd0*yr1
yd = wr0*yd1 - xr0*zd1 + yr0*wd1 + zr0*xd1
yd += wd0*yr1 - xd0*zr1 + yd0*wr1 + zd0*xr1
zd = wr0*zd1 + xr0*yd1 - yr0*xd1 + zr0*wd1
zd += wd0*zr1 + xd0*yr1 - yd0*xr1 + zd0*wr1
wd = wr1*wd0 - xr1*xd0 - yr1*yd0 - zr1*zd0
wd += wd1*wr0 - xd1*xr0 - yd1*yr0 - zd1*zr0
res[0] = xr
res[1] = yr
res[2] = zr
res[3] = wr
res[4] = xd
res[5] = yd
res[6] = zd
res[7] = wd
return res
def dual_quaternion_conj(Q):
"""Returns the conjugate of a dual quaternion.
"""
res = np.zeros(8)
res[0] = -Q[0]
res[1] = -Q[1]
res[2] = -Q[2]
res[3] = Q[3]
res[4] = -Q[4]
res[5] = -Q[5]
res[6] = -Q[6]
res[7] = Q[7]
return res
def dual_quaternion_norm2(Q):
"""Returns the dual norm of a dual quaternion.
Based on:
https://github.com/bobbens/libdq/blob/master/dq.c
"""
real = Q[0]*Q[0] + Q[1]*Q[1] + Q[2]*Q[2] + Q[3]*Q[3]
dual = 2.*(Q[3]*Q[7] + Q[0]*Q[4] + Q[1]*Q[5] + Q[2]*Q[6])
return real, dual
def dual_quaternion_inv(Q):
"""Returns the inverse of a dual quaternion.
Based on:
https://github.com/bobbens/libdq/blob/master/dq.c
"""
res = np.zeros(8)
real, dual = dual_quaternion_norm2(Q)
res[0] = -Q[0] * real
res[1] = -Q[1] * real
res[2] = -Q[2] * real
res[3] = Q[3] * real
res[4] = Q[4] * (dual-real)
res[5] = Q[5] * (dual-real)
res[6] = Q[6] * (dual-real)
res[7] = Q[7] * (real-dual)
return res
def dual_quaternion_to_transformation_matrix(Q):
"""Transforms a dual quaternion to a 4x4 transformation matrix.
"""
res = np.zeros((4, 4))
# Rotation part:
xr, yr, zr, wr = Q[0], Q[1], Q[2], Q[3]
xd, yd, zd, wd = Q[4], Q[5], Q[6], Q[7]
res[0, 0] = wr*wr + xr*xr - yr*yr - zr*zr
res[1, 1] = wr*wr - xr*xr + yr*yr - zr*zr
res[2, 2] = wr*wr - xr*xr - yr*yr + zr*zr
res[0, 1] = 2.*(xr*yr - wr*zr)
res[1, 0] = 2.*(xr*yr + wr*zr)
res[0, 2] = 2.*(xr*zr + wr*yr)
res[2, 0] = 2.*(xr*zr - wr*yr)
res[1, 2] = 2.*(yr*zr - wr*xr)
res[2, 1] = 2.*(yr*zr + wr*xr)
# Displacement part:
res[0, 3] = 2.*(-wd*xr + xd*wr - yd*zr + zd*yr)
res[1, 3] = 2.*(-wd*yr + xd*zr + yd*wr - zd*xr)
res[2, 3] = 2.*(-wd*zr - xd*yr + yd*xr + zd*wr)
res[3, 3] = 1.0
return res
def dual_quaternion_rpy(rpy):
"""Returns the dual quaternion for a pure roll-pitch-yaw rotation.
"""
roll, pitch, yaw = rpy
# Origin rotation from RPY ZYX convention
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
# The quaternion associated with the origin rotation
# Note: quat = w + ix + jy + kz
x_or = cy*sr*cp - sy*cr*sp
y_or = cy*cr*sp + sy*sr*cp
z_or = sy*cr*cp - cy*sr*sp
w_or = cy*cr*cp + sy*sr*sp
res = np.zeros(8)
# Note, our dual quaternions use a different representation
# dual_quat = [xyz, w, xyz', w']
# where w + xyz represents the "real" quaternion
# and w'+xyz' represents the "dual" quaternion
res[0] = x_or
res[1] = y_or
res[2] = z_or
res[3] = w_or
return res
def dual_quaternion_translation(xyz):
"""Returns the dual quaternion for a pure translation.
"""
res = np.zeros(8)
res[3] = 1.0
res[4] = xyz[0]/2.0
res[5] = xyz[1]/2.0
res[6] = xyz[2]/2.0
return res
def dual_quaternion_axis_translation(axis, qi):
"""Returns the dual quaternion for a translation along an axis.
"""
res = np.zeros(8)
res[3] = 1.0
res[4] = qi*axis[0]/2.0
res[5] = qi*axis[1]/2.0
res[6] = qi*axis[2]/2.0
return res
def dual_quaternion_axis_rotation(axis, qi):
"""Returns the dual quaternion for a rotation along an axis.
AXIS MUST BE NORMALIZED!
"""
res = np.zeros(8)
cqi = np.cos(qi/2.0)
sqi = np.sin(qi/2.0)
res[0] = axis[0]*sqi
res[1] = axis[1]*sqi
res[2] = axis[2]*sqi
res[3] = cqi
return res
def dual_quaternion_prismatic(xyz, rpy, axis, qi):
"""Returns the dual quaternion for a prismatic joint.
"""
# Joint origin rotation from RPY ZYX convention
roll, pitch, yaw = rpy
# Origin rotation from RPY ZYX convention
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
# The quaternion associated with the origin rotation
# Note: quat = w + ix + jy + kz
x_or = cy*sr*cp - sy*cr*sp
y_or = cy*cr*sp + sy*sr*cp
z_or = sy*cr*cp - cy*sr*sp
w_or = cy*cr*cp + sy*sr*sp
# Joint origin translation as a dual quaternion
x_ot = 0.5*xyz[0]*w_or + 0.5*xyz[1]*z_or - 0.5*xyz[2]*y_or
y_ot = - 0.5*xyz[0]*z_or + 0.5*xyz[1]*w_or + 0.5*xyz[2]*x_or
z_ot = 0.5*xyz[0]*y_or - 0.5*xyz[1]*x_or + 0.5*xyz[2]*w_or
w_ot = - 0.5*xyz[0]*x_or - 0.5*xyz[1]*y_or - 0.5*xyz[2]*z_or
Q_o = [x_or, y_or, z_or, w_or, x_ot, y_ot, z_ot, w_ot]
# Joint displacement orientation is just identity
x_jr = 0.0
y_jr = 0.0
z_jr = 0.0
w_jr = 1.0
# Joint displacement translation along axis
x_jt = qi*axis[0]/2.0
y_jt = qi*axis[1]/2.0
z_jt = qi*axis[2]/2.0
w_jt = 0.0
Q_j = [x_jr, y_jr, z_jr, w_jr, x_jt, y_jt, z_jt, w_jt]
# Get resulting dual quaternion
return dual_quaternion_product(Q_o, Q_j)
def dual_quaternion_revolute(xyz, rpy, axis, qi):
"""Returns the dual quaternion for a revolute joint.
AXIS MUST BE NORMALIZED!
"""
# Joint origin rotation from RPY ZYX convention
roll, pitch, yaw = rpy
# Origin rotation from RPY ZYX convention
cr = np.cos(roll/2.0)
sr = np.sin(roll/2.0)
cp = np.cos(pitch/2.0)
sp = np.sin(pitch/2.0)
cy = np.cos(yaw/2.0)
sy = np.sin(yaw/2.0)
# The quaternion associated with the origin rotation
# Note: quat = w + ix + jy + kz
x_or = cy*sr*cp - sy*cr*sp
y_or = cy*cr*sp + sy*sr*cp
z_or = sy*cr*cp - cy*sr*sp
w_or = cy*cr*cp + sy*sr*sp
# Joint origin translation as a dual quaternion
x_ot = 0.5*xyz[0]*w_or + 0.5*xyz[1]*z_or - 0.5*xyz[2]*y_or
y_ot = - 0.5*xyz[0]*z_or + 0.5*xyz[1]*w_or + 0.5*xyz[2]*x_or
z_ot = 0.5*xyz[0]*y_or - 0.5*xyz[1]*x_or + 0.5*xyz[2]*w_or
w_ot = - 0.5*xyz[0]*x_or - 0.5*xyz[1]*y_or - 0.5*xyz[2]*z_or
Q_o = [x_or, y_or, z_or, w_or, x_ot, y_ot, z_ot, w_ot]
# Joint displacement rotation is from axis angle
cqi = np.cos(qi/2.0)
sqi = np.sin(qi/2.0)
x_jr = axis[0]*sqi
y_jr = axis[1]*sqi
z_jr = axis[2]*sqi
w_jr = cqi
# Joint displacement translation is nothing
x_jt = 0.0
y_jt = 0.0
z_jt = 0.0
w_jt = 0.0
Q_j = [x_jr, y_jr, z_jr, w_jr, x_jt, y_jt, z_jt, w_jt]
return dual_quaternion_product(Q_o, Q_j)
def quaternion_ravani_roth_dist(q1, q2):
"""Quaternion distance designed by ravani and roth.
See comparisons at: https://link.springer.com/content/pdf/10.1007%2Fs10851-009-0161-2.pdf"""
return min(np.linalg.norm(q1 - q2), np.linalg.norm(q1 + q2))
def quaternion_inner_product_dist(q1, q2):
"""Quaternion distance based on innerproduct.
See comparisons at: https://link.springer.com/content/pdf/10.1007%2Fs10851-009-0161-2.pdf"""
return 1.0 - abs(q1[0]*q2[0] + q1[1]*q2[1] + q1[2]*q2[2] + q1[3]*q2[3])
def rotation_distance_from_identity(R1, R2):
"""Rotation matrix distance based on distance from identity matrix.
See comparisons at: https://link.springer.com/content/pdf/10.1007%2Fs10851-009-0161-2.pdf"""
return np.linalg.norm(np.eye(1) - np.dot(R1, R2.T))
| 10,476 | 5,166 |
#!/usr/bin/env python3
import os
import re
import sys
import operator
import csv
error_counter = {}
error_user = {}
info_user = {}
#This function will read each line of the syslog.log file and check if it is an error or an info message.
def search_file():
with open('syslog.log', "r") as myfile:
for line in myfile:
if " ERROR " in line:
find_error(line)
add_user_list(line, 1)
elif " INFO " in line:
add_user_list(line, 2)
return
#If it is an error it will read the error from the line and increment into the dictionary
def find_error(str):
match = re.search(r"(ERROR [\w \[]*) ", str)
if match is not None:
aux = match.group(0).replace("ERROR ", "").strip()
if aux == "Ticket":
aux = "Ticket doesn't exist"
if not aux in error_counter:
error_counter[aux] = 1
else:
error_counter[aux] += 1
return
#This whill read the user from the string and add to the error or the info counter depending on the op number
def add_user_list(str, op):
match = re.search(r'\(.*?\)', str)
user = match.group(0)
userA = user.strip("()")
if op == 1:
if not userA in error_user:
error_user[userA] = 1
else:
error_user[userA] += 1
elif op == 2:
if not userA in info_user:
info_user[userA] = 1
else:
info_user[userA] += 1
return
#This function will read the list, arrange it and return a tuple with the dictionary items
def sort_list(op, list):
if op == 1:
s = sorted(list.items(), key=operator.itemgetter(1), reverse=True)
elif op == 2:
s = sorted(list.items(), key=operator.itemgetter(0))
return s
#This is an extra function which will read the value of a user in the error dictionary and return its value if key exists
def getErrValue(keyV):
for key, value in error_user:
if key is keyV:
return value
return 0
#This function writes both csv files
def write_csv(op):
if op == 1:
with open('user_statistics.csv', 'w', newline='') as output:
fieldnames = ['Username', 'INFO', 'ERROR']
csvw = csv.DictWriter(output, fieldnames=fieldnames)
csvw.writeheader()
for key, value in info_user:
valError = getErrValue(key)
csvw.writerow({'Username': key, 'INFO': value, 'ERROR': valError})
if op == 2:
with open('error_message.csv', 'w', newline='') as output:
fieldnames = ['Error', 'Count']
csvw = csv.DictWriter(output, fieldnames=fieldnames)
csvw.writeheader()
for key, value in error_counter:
csvw.writerow({'Error': key, 'Count': value})
return
#This function adds zero to the other dictionary in case that user is not a key, it will add a key with the user and value 0
def add_zeros():
for user in error_user.keys():
if user not in info_user:
info_user[user] = 0
for user in info_user.keys():
if user not in error_user:
error_user[user] = 0
return
#This will execute the functions
search_file()
add_zeros()
error_counter = sort_list(1, error_counter)
error_user = sort_list(2, error_user)
info_user = sort_list(2, info_user)
write_csv(1)
write_csv(2) | 3,363 | 1,039 |
from math import log10
from itertools import tee
class Corpus:
__global_corpus_frequency = 0.0
def __init__(self, size, depth, smoothing_value, label):
self.depth = depth
self.size = size
self.smoothing_value = smoothing_value
self.label = label
self.frequencies = {}
self.total_frequencies = float(size * smoothing_value)
self.local_corpus_frequency = 0.0
def items(self):
return self.frequencies.items()
def keys(self):
return self.frequencies.keys()
def values(self):
return self.frequencies.values()
def update(self, iterator):
for item in iterator:
target = self.frequencies
for character in item:
new_target = target.setdefault(character, [0, {}])
new_target[0] += 1
target = new_target[1]
self.local_corpus_frequency += 1
Corpus.__global_corpus_frequency += 1
def score(self, iterator):
results = log10(self.local_corpus_frequency / Corpus.__global_corpus_frequency)
for item in iterator:
previous_target, target = None, self.frequencies
for character in item:
new_target = target.get(character, [0, {}])
previous_target, target = target, new_target[1]
numerator = previous_target.get(item[-1], [0, {}])[0] + self.smoothing_value
denominator = sum([value for value, __ in previous_target.values()]) + self.smoothing_value * self.size
try:
results += log10(numerator / denominator)
except (ZeroDivisionError, ValueError):
results += log10(1e-64)
return results
def __hash__(self):
return hash(self.label)
class CorpusController:
def __init__(self, size, depth, smoothing_value, *labels):
self.languages = labels
self.smoothing_value = smoothing_value
self.size = size
self.depth = depth
self.corpora = {}
for label in labels:
self.corpora[label] = Corpus(self.size, self.depth, self.smoothing_value, label)
def train(self, iterator, label):
self.corpora[label].update(iterator)
def classify(self, iterator):
copies = iter(tee(iterator, len(self.corpora)))
probabilities = [(corpus.score(next(copies)), label) for label, corpus in self.corpora.items()]
return max(probabilities)
| 2,487 | 748 |
"""
Writer for amber
"""
import time
import pandas as pd
import math
import re
import numpy as np
from collections import Counter
# Python 2/3 compat
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import eex
import logging
# AMBER local imports
from . import amber_metadata as amd
logger = logging.getLogger(__name__)
def _write_1d(file_handle, data, ncols, fmt):
data = data.ravel()
remainder_size = data.size % ncols
if data.size == 0:
file_handle.write("\n".encode())
elif remainder_size == 0:
np.savetxt(file_handle, data.reshape(-1, ncols), fmt=fmt, delimiter="")
else:
rem_data = data[-remainder_size:].reshape(1, -1)
data = data[:-remainder_size].reshape(-1, ncols)
np.savetxt(file_handle, data, fmt=fmt, delimiter="")
np.savetxt(file_handle, rem_data, fmt=fmt, delimiter="")
# print(data.shape, rem_data.shape)
# Write data to file
file_handle.flush()
def _write_amber_data(file_handle, data, category):
fmt_string = amd.data_labels[category][1]
fmt_data = amd.parse_format(fmt_string)
file_handle.write(("%%FLAG %s\n" % category).encode())
file_handle.write((fmt_string + "\n").encode())
ncols = fmt_data[0]
fmt = amd.build_format(fmt_data)
_write_1d(file_handle, np.array(data), ncols, fmt)
def _check_dl_compatibility(dl):
"""
This function examines a datalayer to determine if it is compatible with Amber.
Conversions between functional forms and pairwise interaction mixing are performed (if possible).
"""
# Loop over force field information - check functional form compatibility
for k, v in amd.forcefield_parameters.items():
if k != "nonbond":
terms = dl.list_term_parameters(v["order"])
for j in terms.values():
if j[0] != v["form"]:
# Will need to insert check to see if these can be easily converted (ex OPLS dihedral <-> charmmfsw)
raise TypeError("Functional form %s stored in datalayer is not compatible with Amber.\n" %(j[0]) )
else:
# handle non bonds here
pass
stored_properties = dl.list_atom_properties()
required_properties = list(amd.atom_property_names.values())
diff = np.setdiff1d(required_properties, stored_properties)
natoms = dl.get_atom_count()
index = np.arange(1, natoms + 1)
# Build and curate the data
df = pd.DataFrame({'atom_index': index})
df.dropna(axis=0, how="any", inplace=True)
df.set_index('atom_index', inplace=True)
add_properties = []
# Fill in default or raise error
for req in diff:
if req == 'atom_name':
atom_names = ['A'] * natoms
df[req] = atom_names
add_properties.append(req)
elif req == 'atomic_number':
# Just say it's carbon...doesn't seem like this matters too much for amber
atomic_numbers = [6] * natoms
df[req] = atomic_numbers
add_properties.append(req)
elif req == "mass":
try:
dl.get_atoms(properties=["mass"])
except:
raise KeyError("No masses stored in datalayer")
else:
raise KeyError("Atom property %s is missing from datalayer" %(req))
# Check for residue_index
if "residue_index" not in stored_properties:
# If molecule_index is set, set residue index to this.
# Otherwise, set all to 1.0
if "molecule_index" in stored_properties:
df["residue_index"] = dl.get_atoms(properties=["molecule_index"])
df["residue_name"] = ["BLA"] * natoms
add_properties.append("residue_index")
if len(add_properties) > 0:
dl.add_atoms(df, by_value=True)
def write_amber_file(dl, filename, inpcrd=None):
"""
Parameters
------------
dl : eex.DataLayer
The datalayer containing information about the system to write
filename : str
The name of the file to write
inpcrd : str, optional
If None, attempts to read the file filename.replace("prmtop", "inpcrd") otherwise passes. #
"""
### First get information into Amber pointers. All keys are initially filled with zero.
# Ones that are currently 0, but should be implemented eventually are marked with
_check_dl_compatibility(dl)
### Figure out what is hydrogen for the header
num_H_list = []
inc_hydrogen = {}
without_hydrogen = {}
hidx = (dl.get_atoms("atomic_number") == 1).values.ravel()
for term_type, term_name in zip([2, 3, 4], ["bonds", "angles", "dihedrals"]):
term = dl.get_terms(term_type)
if term.shape[0] == 0:
num_H_list.append(0)
continue
# Build up an index of what is in hydrogen or not
inc_hydrogen_mask = term["atom1"].isin(hidx)
for n in range(term_type - 1):
name = "atom" + str(n + 2)
inc_hydrogen_mask |= term[name].isin(hidx)
num_H_list.append(len(term.loc[inc_hydrogen_mask].values))
inc_hydrogen[term_name] = term.loc[inc_hydrogen_mask].values
without_hydrogen[term_name] = term.loc[~inc_hydrogen_mask].values
output_sizes = {k: 0 for k in amd.size_keys}
output_sizes['NATOM'] = dl.get_atom_count() # Number of atoms
output_sizes["NBONH"] = num_H_list[0] # Number of bonds containing hydrogen
output_sizes["MBONA"] = dl.get_term_count(2, "total") - output_sizes["NBONH"] # Number of bonds not containing hydrogen
output_sizes['NBONA'] = output_sizes["MBONA"] # MBONA + number of constraint bonds (MBONA = NBONA always)
output_sizes["NTHETH"] = num_H_list[1] # Number of angles containing hydrogen
output_sizes["MTHETA"] = dl.get_term_count(3, "total") - output_sizes["NTHETH"] # Number of angles not containing hydrogen
output_sizes['NTHETA'] = output_sizes["MTHETA"] # MTHETA + number of constraint angles (NTHETA = MTHETA always)
output_sizes["NPHIH"] = num_H_list[2] # Number of torsions containing hydrogen
output_sizes["MPHIA"] = dl.get_term_count(4, "total") - output_sizes["NPHIH"] # Number of torsions not containing hydrogen
output_sizes["NPHIA"] = output_sizes["MPHIA"]
output_sizes["NUMBND"] = len(dl.list_term_uids(2)) # Number of unique bond types
output_sizes["NUMANG"] = len(dl.list_term_uids(3)) # Number of unique angle types
output_sizes["NPTRA"] = len(dl.list_term_uids(4)) # Number of unique torsion types
output_sizes["NRES"] = len(dl.list_atom_uids("residue_name")) # Number of residues (not stable)
output_sizes["NTYPES"] = len(np.unique(dl.get_atoms("atom_type"))) # Number of distinct LJ atom types
output_sizes["NPARM"] = 0 # Used to determine if this is a LES-compatible prmtop (??)
output_sizes["NNB"] = dl.get_atom_count(
) # Number of excluded atoms - Set to num atoms for our test cases. Amber will not run with 0
# 0 - no box, 1 - orthorhombic box, 2 - truncated octahedron
output_sizes["NMXRS"] = 0 # Number of atoms in the largest residue
output_sizes["IFCAP"] = 0 # Set to 1 if a solvent CAP is being used
output_sizes["NUMEXTRA"] = 0 # Number of extra points in the topology file
## Needs check for orthorhomibic box (1) or truncated octahedron (2). Currently just 0 or 1
output_sizes["IFBOX"] = [0 if dl.get_box_size() == {} else 1][0] # Flag indicating whether a periodic box is present
written_categories = []
# Figure out size each section should be based on metadata
label_sizes = {}
for k, v in amd.data_labels.items():
if isinstance(v[0], int):
label_sizes[k] = v[0]
elif v[0] in list(output_sizes):
label_sizes[k] = output_sizes[v[0]]
else:
# print("%30s %40s %d" % (k, v[0], int(eval(v[0], sizes_dict))))
label_sizes[k] = int(eval(v[0], output_sizes))
### Write title and version information
f = open(filename, "w")
f.write('%%VERSION VERSION_STAMP = V0001.000 DATE = %s %s\n' % (time.strftime("%x"), time.strftime("%H:%M:%S")))
f.write("%FLAG TITLE\n%FORMAT(20a4)\n")
f.write("prmtop generated by MolSSI EEX\n")
## Write pointers section
f.write("%%FLAG POINTERS\n%s\n" % (amd.data_labels["POINTERS"][1]))
ncols, dtype, width = amd.parse_format(amd.data_labels["POINTERS"][1])
format_string = "%%%sd" % width
count = 0
for k in amd.size_keys:
f.write(format_string % output_sizes[k])
count += 1
if count % ncols == 0:
f.write("\n")
f.write("\n")
f.close()
written_categories.append("POINTERS")
### Write atom properties sections
file_handle = open(filename, "ab")
for k in amd.atom_property_names:
# Get data
data = dl.get_atoms(amd.atom_property_names[k], by_value=True, utype=amd.atom_data_units).values.ravel()
_write_amber_data(file_handle, data, k)
written_categories.append(k)
### Handle residues
# We assume these are sorted WRT to atom and itself at the moment... not great
res_data = dl.get_atoms(["residue_index", "residue_name"], by_value=True)
uvals, uidx, ucnts = np.unique(res_data["residue_index"], return_index=True, return_counts=True)
labels = res_data["residue_name"].iloc[uidx].values
_write_amber_data(file_handle, labels, "RESIDUE_LABEL")
written_categories.append("RESIDUE_LABEL")
starts = np.concatenate(([1], np.cumsum(ucnts) + 1))[:-1]
_write_amber_data(file_handle, starts, "RESIDUE_POINTER")
written_categories.append("RESIDUE_POINTER")
### Write out term parameters
for term_type in ["bond", "angle", "dihedral"]:
uids = sorted(dl.list_term_uids(term_type))
if len(uids) == 0: continue
term_md = amd.forcefield_parameters[term_type]
tmps = {k: [] for k in term_md["column_names"].keys()}
utype = term_md["units"]
order = term_md["order"]
inv_lookup = {v: k for k, v in term_md["column_names"].items()}
# Build lists of data since AMBER holds this as 1D
for uid in uids:
params = dl.get_term_parameter(order, uid, utype=utype)
for k, v in params[1].items():
tmps[inv_lookup[k]].append(v)
# Write out FLAGS
for k, v in tmps.items():
_write_amber_data(file_handle, v, k)
written_categories.append(k)
for term_type, term_name in zip([2, 3, 4], ["bonds", "angles", "dihedrals"]):
term = dl.get_terms(term_type)
if term.shape[0] == 0: continue
# Build up an index of what is in hydrogen or not
inc_hydrogen_mask = term["atom1"].isin(hidx)
# Scale by weird AMBER factors
inc_hydrogen[term_name][:, :-1] = (inc_hydrogen[term_name][:, :-1] - 1) * 3
without_hydrogen[term_name][:, :-1] = (without_hydrogen[term_name][:, :-1] - 1) * 3
inc_h_name = term_name.upper() + "_INC_HYDROGEN"
without_h_name = term_name.upper() + "_WITHOUT_HYDROGEN"
_write_amber_data(file_handle, inc_hydrogen[term_name], inc_h_name)
written_categories.append(inc_h_name)
_write_amber_data(file_handle, without_hydrogen[term_name], without_h_name)
written_categories.append(without_h_name)
# Handle SOLVENT_POINTERS, ATOMS_PER_MOLECULE and BOX_DIMENSIONS. Only present if IFBOX>0.
if output_sizes["IFBOX"] > 0:
#Solvent pointers section
# There are three numbers here - IPTRES, NSPM, NSPSOL
# where
# IPTRES = final residue part of solute, NSPM = total number of molecules, NSPSOL = first solvent molecule
# Just say everything is solute for now.
iptres = dl.get_atoms(["residue_index"]).values[-1]
nspm = len(np.unique(dl.get_atoms(["molecule_index"]).values))
solvent_pointers = [iptres, nspm, nspm]
_write_amber_data(file_handle, solvent_pointers, "SOLVENT_POINTERS")
# Handle atoms per molecule
molecule_list = dl.get_atoms(["molecule_index"]).values.ravel()
count_atoms_per_molecule = Counter(molecule_list)
atoms_per_molecule = []
for x in range(1, nspm+1):
atoms_per_molecule.append(count_atoms_per_molecule[x])
_write_amber_data(file_handle, atoms_per_molecule, "ATOMS_PER_MOLECULE")
# Write box dimensions section
box_dimensions = dl.get_box_size(utype={"a": amd.box_units["length"], "b": amd.box_units["length"],
"c" : amd.box_units["length"], "alpha": amd.box_units["angle"],
"beta": amd.box_units["angle"], "gamma": amd.box_units["angle"]})
write_box = [box_dimensions["beta"], box_dimensions["a"], box_dimensions["b"], box_dimensions["c"]]
_write_amber_data(file_handle, write_box, "BOX_DIMENSIONS")
written_categories.append("BOX_DIMENSIONS")
written_categories.append("SOLVENT_POINTERS")
written_categories.append("ATOMS_PER_MOLECULE")
# Quick fix for radius set will be one line string description in files prepared by xleap
_write_amber_data(file_handle, ["Place holder - EEX"], "RADIUS_SET")
written_categories.append("RADIUS_SET")
# Handle NB data
# Relevant headers = NONBOND_PARM_INDEX, LENNARD_JONES_ACOEF, LENNARD_JONES_BCOEF
stored_atom_types = dl.get_unique_atom_types()
ntypes = len(stored_atom_types)
nb_forms = dl.list_stored_nb_types()
# This can be removed if compatibility check inserted at beginning
if set(nb_forms) != set(["LJ"]):
# Write better message here
raise KeyError("Nonbond forms stored in datalayer are not compatible with Amber - %s" % nb_forms)
# Get parameters from datalayer using correct amber units
stored_nb_parameters = dl.list_nb_parameters(
nb_name="LJ", nb_model="AB", utype=amd.forcefield_parameters["nonbond"]["units"], itype="pair")
nonbonded_parm_index = np.zeros(ntypes * ntypes)
lj_a_coeff = []
lj_b_coeff = []
# Build a_coeff, b_coeff, and nb_parm lists
for key, value in stored_nb_parameters.items():
lj_a_coeff.append(value['A'])
lj_b_coeff.append(value['B'])
index_to_nb = ntypes * (key[0] - 1) + key[1]
index_to_nb2 = ntypes * (key[1] - 1) + key[0]
nonbonded_parm_index[index_to_nb - 1] = len(lj_a_coeff)
nonbonded_parm_index[index_to_nb2 - 1] = len(lj_a_coeff)
_write_amber_data(file_handle, nonbonded_parm_index, "NONBONDED_PARM_INDEX")
_write_amber_data(file_handle, lj_a_coeff, "LENNARD_JONES_ACOEF")
_write_amber_data(file_handle, lj_b_coeff, "LENNARD_JONES_BCOEF")
for category in amd.forcefield_parameters["nonbond"]["column_names"]:
written_categories.append(category)
### Write headers for other sections (file will not work in AMBER without these)
for k in amd.data_labels:
if k not in written_categories:
if label_sizes[k] > 0:
data = np.zeros(label_sizes[k])
_write_amber_data(file_handle, data, k)
else:
file_handle.write(("%%FLAG %s\n%s\n\n" % (k, amd.data_labels[k][1])).encode())
written_categories.append(k)
file_handle.close()
# Now we need to write out the INPCRD
if '.prmtop' in filename:
inpcrd_file = filename.replace('.prmtop', '.inpcrd')
else:
inpcrd_file = filename + '.inpcrd'
file_handle = open(inpcrd_file, "wb")
xyz = dl.get_atoms("XYZ", utype={"XYZ": "angstrom"})
file_handle.write("default_name\n".encode())
file_handle.write(("%6d\n" % xyz.shape[0]).encode())
_write_1d(file_handle, xyz.values.ravel(), 6, "%12.6f")
if output_sizes["IFBOX"] > 0:
box = pd.DataFrame(box_dimensions, index=[0])
box = box[['a', 'b', 'c', 'alpha', 'beta', 'gamma']]
_write_1d(file_handle, box.values.ravel(), 6, "%12.6f")
file_handle.close()
return 0
| 16,117 | 5,754 |
import galaxy.model
from galaxy.model.orm import *
from galaxy.model.mapping import context as sa_session
from base.twilltestcase import *
import sys
def delete_obj( obj ):
sa_session.delete( obj )
sa_session.flush()
def delete_request_type_permissions( id ):
rtps = sa_session.query( galaxy.model.RequestTypePermissions ) \
.filter( and_( galaxy.model.RequestTypePermissions.table.c.request_type_id==id ) ) \
.order_by( desc( galaxy.model.RequestTypePermissions.table.c.create_time ) )
for rtp in rtps:
sa_session.delete( rtp )
sa_session.flush()
def delete_user_roles( user ):
for ura in user.roles:
sa_session.delete( ura )
sa_session.flush()
def flush( obj ):
sa_session.add( obj )
sa_session.flush()
def get_all_histories_for_user( user ):
return sa_session.query( galaxy.model.History ) \
.filter( and_( galaxy.model.History.table.c.user_id==user.id,
galaxy.model.History.table.c.deleted==False ) ) \
.all()
def get_dataset_permissions_by_dataset( dataset ):
return sa_session.query( galaxy.model.DatasetPermissions ) \
.filter( galaxy.model.DatasetPermissions.table.c.dataset_id==dataset.id ) \
.all()
def get_dataset_permissions_by_role( role ):
return sa_session.query( galaxy.model.DatasetPermissions ) \
.filter( galaxy.model.DatasetPermissions.table.c.role_id == role.id ) \
.first()
def get_default_history_permissions_by_history( history ):
return sa_session.query( galaxy.model.DefaultHistoryPermissions ) \
.filter( galaxy.model.DefaultHistoryPermissions.table.c.history_id==history.id ) \
.all()
def get_default_history_permissions_by_role( role ):
return sa_session.query( galaxy.model.DefaultHistoryPermissions ) \
.filter( galaxy.model.DefaultHistoryPermissions.table.c.role_id == role.id ) \
.all()
def get_default_user_permissions_by_role( role ):
return sa_session.query( galaxy.model.DefaultUserPermissions ) \
.filter( galaxy.model.DefaultUserPermissions.table.c.role_id == role.id ) \
.all()
def get_default_user_permissions_by_user( user ):
return sa_session.query( galaxy.model.DefaultUserPermissions ) \
.filter( galaxy.model.DefaultUserPermissions.table.c.user_id==user.id ) \
.all()
def get_form( name ):
fdc_list = sa_session.query( galaxy.model.FormDefinitionCurrent ) \
.filter( galaxy.model.FormDefinitionCurrent.table.c.deleted == False ) \
.order_by( galaxy.model.FormDefinitionCurrent.table.c.create_time.desc() )
for fdc in fdc_list:
sa_session.refresh( fdc )
sa_session.refresh( fdc.latest_form )
if fdc.latest_form.name == name:
return fdc.latest_form
return None
def get_folder( parent_id, name, description ):
return sa_session.query( galaxy.model.LibraryFolder ) \
.filter( and_( galaxy.model.LibraryFolder.table.c.parent_id==parent_id,
galaxy.model.LibraryFolder.table.c.name==name,
galaxy.model.LibraryFolder.table.c.description==description ) ) \
.first()
def get_group_by_name( name ):
return sa_session.query( galaxy.model.Group ).filter( galaxy.model.Group.table.c.name==name ).first()
def get_group_role_associations_by_group( group ):
return sa_session.query( galaxy.model.GroupRoleAssociation ) \
.filter( galaxy.model.GroupRoleAssociation.table.c.group_id == group.id ) \
.all()
def get_group_role_associations_by_role( role ):
return sa_session.query( galaxy.model.GroupRoleAssociation ) \
.filter( galaxy.model.GroupRoleAssociation.table.c.role_id == role.id ) \
.all()
def get_latest_dataset():
return sa_session.query( galaxy.model.Dataset ) \
.order_by( desc( galaxy.model.Dataset.table.c.create_time ) ) \
.first()
def get_latest_hda():
return sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
.order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ) \
.first()
def get_latest_history_for_user( user ):
return sa_session.query( galaxy.model.History ) \
.filter( and_( galaxy.model.History.table.c.deleted==False,
galaxy.model.History.table.c.user_id==user.id ) ) \
.order_by( desc( galaxy.model.History.table.c.create_time ) ) \
.first()
def get_latest_ldda_by_name( name ):
return sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.filter( and_( galaxy.model.LibraryDatasetDatasetAssociation.table.c.name==name,
galaxy.model.LibraryDatasetDatasetAssociation.table.c.deleted == False ) ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.create_time ) ) \
.first()
def get_latest_lddas( limit ):
return sa_session.query( galaxy.model.LibraryDatasetDatasetAssociation ) \
.order_by( desc( galaxy.model.LibraryDatasetDatasetAssociation.table.c.update_time ) ) \
.limit( limit )
def get_library( name, description, synopsis ):
return sa_session.query( galaxy.model.Library ) \
.filter( and_( galaxy.model.Library.table.c.name==name,
galaxy.model.Library.table.c.description==description,
galaxy.model.Library.table.c.synopsis==synopsis,
galaxy.model.Library.table.c.deleted==False ) ) \
.first()
def get_private_role( user ):
for role in user.all_roles():
if role.name == user.email and role.description == 'Private Role for %s' % user.email:
return role
raise AssertionError( "Private role not found for user '%s'" % user.email )
def get_request_by_name( name ):
return sa_session.query( galaxy.model.Request ) \
.filter( and_( galaxy.model.Request.table.c.name==name,
galaxy.model.Request.table.c.deleted==False ) ) \
.first()
def get_request_type_by_name( name ):
return sa_session.query( galaxy.model.RequestType ) \
.filter( and_( galaxy.model.RequestType.table.c.name==name ) ) \
.order_by( desc( galaxy.model.RequestType.table.c.create_time ) ) \
.first()
def get_role_by_name( name ):
return sa_session.query( galaxy.model.Role ).filter( galaxy.model.Role.table.c.name==name ).first()
def get_user( email ):
return sa_session.query( galaxy.model.User ) \
.filter( galaxy.model.User.table.c.email==email ) \
.first()
def get_user_address( user, short_desc ):
return sa_session.query( galaxy.model.UserAddress ) \
.filter( and_( galaxy.model.UserAddress.table.c.user_id==user.id,
galaxy.model.UserAddress.table.c.desc==short_desc,
galaxy.model.UserAddress.table.c.deleted==False ) ) \
.order_by( desc( galaxy.model.UserAddress.table.c.create_time ) ) \
.first()
def get_user_group_associations_by_group( group ):
return sa_session.query( galaxy.model.UserGroupAssociation ) \
.filter( galaxy.model.UserGroupAssociation.table.c.group_id == group.id ) \
.all()
def get_user_info_form_definition():
return galaxy.model.FormDefinition.types.USER_INFO
def get_user_role_associations_by_role( role ):
return sa_session.query( galaxy.model.UserRoleAssociation ) \
.filter( galaxy.model.UserRoleAssociation.table.c.role_id == role.id ) \
.all()
def mark_obj_deleted( obj ):
obj.deleted = True
sa_session.add( obj )
sa_session.flush()
def refresh( obj ):
sa_session.refresh( obj )
| 8,457 | 2,538 |
from helpers.Logger import Logger
from calendar import Calendar
from employee import Employee
import json
import sys
import prettytable
import os
import time
import re
# Logger setup
logging = Logger()
log = logging.realm('Shift Manager')
def main():
# 1] Load Configuration file
with open('../data/konfigurace.json') as json_file1:
config_data = json.load(json_file1)
# 2] Load Configuration file
with open('../data/dovolene_volna.json') as json_file2:
absences_data = json.load(json_file2)
# 3] Count public holidays in current month
vychozi_mesic = config_data['datumova_konfigurace']['mesic']
vychozi_rok = config_data['datumova_konfigurace']['rok']
vychozi_svatky = []
# 3.1] Holiday counter
for unit in config_data['kalendar']['svatky'][vychozi_rok]:
search_re = re.split("\.", unit)
if search_re[1] == vychozi_mesic:
vychozi_svatky.append(unit)
# 3.2] Holiday binder
public_holidays_list = ", ".join(vychozi_svatky)
# 3.3] Vacation counter
day_list = config_data["zamestnanci"]["denni"]
night_list = config_data["zamestnanci"]["nocni"]
day_vacation = []
day_absence = []
night_vacation = []
night_absence = []
# 3.4.1] Daily list vacations and absences
for employee in day_list:
for vacation_d in absences_data["dovolene"][employee]:
day_vacation.append(vacation_d)
for absence_d in absences_data["volna"][employee]:
day_absence.append(absence_d)
# 3.4.2] Nightly list vacations and absences
for employee in night_list:
for vacation_n in absences_data["dovolene"][employee]:
night_vacation.append(vacation_n)
for absence_n in absences_data["volna"][employee]:
night_absence.append(absence_n)
# 4] Config & Absences counter
log.info('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
log.info('Konfiguracni report')
log.info(' Vychozi mesic a rok: ' + vychozi_mesic + '/' + vychozi_rok)
log.info(' Denni smeny - interval: ' + config_data['intervaly_smen']['denni_smena'])
log.info(' Nocni smeny - interval: ' + config_data['intervaly_smen']['nocni_smena'])
log.info(' Pocet svatku v akt. mesici: ' + repr(len(vychozi_svatky)))
log.info(' Data svatku v akt. mesici: ' + public_holidays_list)
log.info('Report nahlasenych absenci')
log.info(' Nahlasene dovolene (D): ' + repr(len(day_vacation)))
log.info(' Nahlasene dovolene (N): ' + repr(len(night_vacation)))
log.info(' Nahlasene volno (D): ' + repr(len(day_absence)))
log.info(' Nahlasene volno (N): ' + repr(len(night_absence)))
log.info('Report smen')
log.info(' Zahajujici smena (D): ' + config_data['zahajeni_smen']['denni'])
log.info(' Pocet navazujicich smen (D): ' + repr(config_data['zahajeni_smen']['navazujici_denni']))
log.info(' Zahajujici smena (N): ' + config_data['zahajeni_smen']['nocni'])
log.info(' Pocet navazujicich smen (N): ' + repr(config_data['zahajeni_smen']['navazujici_nocni']))
log.info('*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*')
# Check modules availability
modules = ['json', 'sys', 'logging', 'os', 'time', 'prettytable']
missing_module = False
for module in modules:
if module not in sys.modules:
log.critical('Pozadovany modul <' + module + '> nebyl nalezen.')
missing_module = True
if missing_module:
sys.exit(1)
log.debug('Pozadovane moduly jsou pritomny [json, sys, logging, os, time, prettytable].')
# 5] TODO: Calendar Builder
# Create calendar based on conf. data
calendar = Calendar(vychozi_mesic, vychozi_rok)
last_day = int(calendar.determine_last_day())
calendar.initialize_calendar_month(last_day)
# print(calendar.calendar_list)
log.info('Delka kalendare (denni + nocni): {}'.format(len(calendar.calendar_list)))
log.info('Delka kalendaroveho elementu (ocekavano 4): {}'.format(len(calendar.calendar_list[0])))
#calendar.allocate_vacations(absences_data)
# 6] TODO: Initialize Employees
employee_list = []
for shift_type in config_data["zamestnanci"].values():
for employee_name in shift_type:
# Employee card for every employee
employee_card = Employee(employee_name)
employee_card.initialize_schedule(last_day)
employee_card.determine_shift_type(config_data["zamestnanci"])
employee_card.process_absences(absences_data)
employee_card.process_vacations(absences_data)
# Generate a list object of employee cards
employee_list.append(employee_card)
# Checkpoint - Show employees' cards
# for employee in employee_list:
# employee.show()
# 7] TODO: Calendar - Allocate Public Holiday
calendar.allocate_public_holiday(employee_list, public_holidays_list, config_data)
# Checkpoint - Show employees' cards
# for employee in employee_list:
# employee.show()
# 8] TODO: Calendar - Shifts starter
# Finish night-shift from the last night
calendar.allocate_starting_night_shift(employee_list, config_data)
# Loop shift starters
calendar.allocate_shift_starters(employee_list, config_data)
# Checkpoint - Show employees' cards
# for employee in employee_list:
# employee.show()
#
# calendar.show()
# 8] TODO: Calendar Looper
# Loop through whole calendar and provide equal hours across employees (first shot with expected errors)
calendar.loop_calendar(employee_list, config_data, last_day)
# Fill in empties
# calendar.fill_empty_calendar_sequence(employee_list)
# Clean Overview files
open('../vystup/Prehled_zamestnancu.txt', 'w').close()
open('../vystup/Mesicni_rozpis.txt', 'w').close()
open('../vystup/Smeny_zamestnancu.txt', 'w').close()
employee_overview_table = calendar.note_employees_overview(employees=employee_list)
employees_schedule_table = calendar.note_employee_schedule(employees=employee_list, calc_month=vychozi_mesic, calc_year=vychozi_rok)
monthly_calendar_table = calendar.note_calendar_allocation(calc_month=vychozi_mesic, calc_year=vychozi_rok,
publ_holiday=public_holidays_list)
# 8] TODO: Results Review
# BIIIIG TODOOOOOS HERE :-(((((((
# 9] TODO: Results Export (Calendar, Employees)
with open('../vystup/Prehled_zamestnancu.txt', 'w') as employees_overview:
employees_overview.write(str(employee_overview_table))
with open('../vystup/Smeny_zamestnancu.txt', 'w') as employees_schedule_overview:
employees_schedule_overview.write(str(employees_schedule_table))
with open('../vystup/Mesicni_rozpis.txt', 'w') as shifts_plan:
shifts_plan.write(str(monthly_calendar_table))
if __name__ == "__main__":
main()
| 7,085 | 2,492 |
"""Helpers for polyphemus.
Utilities API
=============
"""
from __future__ import print_function
import os
import io
import re
import sys
import glob
import tempfile
import functools
import subprocess
from copy import deepcopy
from pprint import pformat
from collections import Mapping, Iterable, Hashable, Sequence, namedtuple, \
MutableMapping
from hashlib import md5
from warnings import warn
try:
import cPickle as pickle
except ImportError:
import pickle
if sys.version_info[0] >= 3:
basestring = str
DEFAULT_RC_FILE = "polyphemusrc.py"
"""Default run control file name."""
DEFAULT_PLUGINS = ('polyphemus.base', 'polyphemus.githubhook', 'polyphemus.batlabrun',
'polyphemus.batlabstat', 'polyphemus.githubstat',
'polyphemus.dashboard')
"""Default list of plugin module names."""
FORBIDDEN_NAMES = frozenset(['del', 'global'])
def warn_forbidden_name(forname, inname=None, rename=None):
"""Warns the user that a forbidden name has been found."""
msg = "found forbidden name {0!r}".format(forname)
if inname is not None:
msg += " in {0!r}".format(inname)
if rename is not None:
msg += ", renaming to {0!r}".format(rename)
warn(msg, RuntimeWarning)
def indent(s, n=4, join=True):
"""Indents all lines in the string or list s by n spaces."""
spaces = " " * n
lines = s.splitlines() if isinstance(s, basestring) else s
lines = lines or ()
if join:
return '\n'.join([spaces + l for l in lines if l is not None])
else:
return [spaces + l for l in lines if l is not None]
class indentstr(str):
"""A special string subclass that can be used to indent the whol string
inside of format strings by accessing an ``indentN`` attr. For example,
``s.indent8`` will return a copy of the string s where every line starts
with 8 spaces."""
def __getattr__(self, key):
if key.startswith('indent'):
return indent(self, n=int(key[6:]))
return getattr(super(indentstr, self), key)
def expand_default_args(methods):
"""This function takes a collection of method tuples and expands all of
the default arguments, returning a set of all methods possible."""
methitems = set()
for mkey, mrtn in methods:
mname, margs = mkey[0], mkey[1:]
havedefaults = [3 == len(arg) for arg in margs]
if any(havedefaults):
# expand default arguments
n = havedefaults.index(True)
items = [((mname,)+tuple(margs[:n]), mrtn)] + \
[((mname,)+tuple(margs[:i]), mrtn) for i in range(n+1,len(margs)+1)]
methitems.update(items)
else:
# no default args
methitems.add((mkey, mrtn))
return methitems
def newoverwrite(s, filename, verbose=False):
"""Useful for not forcing re-compiles and thus playing nicely with the
build system. This is acomplished by not writing the file if the existsing
contents are exactly the same as what would be written out.
Parameters
----------
s : str
string contents of file to possible
filename : str
Path to file.
vebose : bool, optional
prints extra message
"""
if os.path.isfile(filename):
with io.open(filename, 'rb') as f:
old = f.read()
if s == old:
return
else:
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with io.open(filename, 'wb') as f:
f.write(s.encode())
if verbose:
print(" wrote " + filename)
def newcopyover(f1, f2, verbose=False):
"""Useful for not forcing re-compiles and thus playing nicely with the
build system. This is acomplished by not writing the file if the existsing
contents are exactly the same as what would be written out.
Parameters
----------
f1 : str
Path to file to copy from
f2 : str
Path to file to copy over
vebose : bool, optional
prints extra message
"""
if os.path.isfile(f1):
with io.open(f1, 'r') as f:
s = f.read()
return newoverwrite(s, f2, verbose)
def writenewonly(s, filename, verbose=False):
"""Only writes the contents of the string to a file if the file does not exist.
Useful for not tocuhing files.
Parameters
----------
s : str
string contents of file to possible
filename : str
Path to file.
vebose : bool, optional
prints extra message
"""
if os.path.isfile(filename):
return
with open(filename, 'w') as f:
f.write(str(s))
if verbose:
print(" wrote " + filename)
def ensuredirs(f):
"""For a file path, ensure that its directory path exists."""
d = os.path.split(f)[0]
if not os.path.isdir(d):
os.makedirs(d)
def touch(filename):
"""Opens a file and updates the mtime, like the posix command of the same name."""
with io.open(filename, 'a') as f:
os.utime(filename, None)
def exec_file(filename, glb=None, loc=None):
"""A function equivalent to the Python 2.x execfile statement."""
with io.open(filename, 'r') as f:
src = f.read()
exec(compile(src, filename, "exec"), glb, loc)
#
# Run Control
#
class NotSpecified(object):
"""A helper class singleton for run control meaning that a 'real' value
has not been given."""
def __repr__(self):
return "NotSpecified"
NotSpecified = NotSpecified()
"""A helper class singleton for run control meaning that a 'real' value
has not been given."""
class RunControl(object):
"""A composable configuration class. Unlike argparse.Namespace,
this keeps the object dictionary (__dict__) separate from the run
control attributes dictionary (_dict)."""
def __init__(self, **kwargs):
"""Parameters
-------------
kwargs : optional
Items to place into run control.
"""
self._dict = {}
for k, v in kwargs.items():
setattr(self, k, v)
self._updaters = {}
def __getattr__(self, key):
if key in self._dict:
return self._dict[key]
elif key in self.__dict__:
return self.__dict__[key]
elif key in self.__class__.__dict__:
return self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __setattr__(self, key, value):
if key.startswith('_'):
self.__dict__[key] = value
else:
if value is NotSpecified and key in self:
return
self._dict[key] = value
def __delattr__(self, key):
if key in self._dict:
del self._dict[key]
elif key in self.__dict__:
del self.__dict__[key]
elif key in self.__class__.__dict__:
del self.__class__.__dict__[key]
else:
msg = "RunControl object has no attribute {0!r}.".format(key)
raise AttributeError(msg)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
keys = sorted(self._dict.keys())
s = ", ".join(["{0!s}={1!r}".format(k, self._dict[k]) for k in keys])
return "{0}({1})".format(self.__class__.__name__, s)
def _pformat(self):
keys = sorted(self._dict.keys())
f = lambda k: "{0!s}={1}".format(k, pformat(self._dict[k], indent=2))
s = ",\n ".join(map(f, keys))
return "{0}({1})".format(self.__class__.__name__, s)
def __contains__(self, key):
return key in self._dict or key in self.__dict__ or \
key in self.__class__.__dict__
def __eq__(self, other):
if hasattr(other, '_dict'):
return self._dict == other._dict
elif isinstance(other, Mapping):
return self._dict == other
else:
return NotImplemented
def __ne__(self, other):
if hasattr(other, '_dict'):
return self._dict != other._dict
elif isinstance(other, Mapping):
return self._dict != other
else:
return NotImplemented
def _update(self, other):
"""Updates the rc with values from another mapping. If this rc has
if a key is in self, other, and self._updaters, then the updaters
value is called to perform the update. This function should return
a copy to be safe and not update in-place.
"""
if hasattr(other, '_dict'):
other = other._dict
elif not hasattr(other, 'items'):
other = dict(other)
for k, v in other.items():
if v is NotSpecified:
pass
elif k in self._updaters and k in self:
v = self._updaters[k](getattr(self, k), v)
setattr(self, k, v)
def infer_format(filename, format):
"""Tries to figure out a file format."""
if isinstance(format, basestring):
pass
elif filename.endswith('.pkl.gz'):
format = 'pkl.gz'
elif filename.endswith('.pkl'):
format = 'pkl'
else:
raise ValueError("file format could not be determined.")
return format
def sortedbytype(iterable):
"""Sorts an iterable by types first, then value."""
items = {}
for x in iterable:
t = type(x).__name__
if t not in items:
items[t] = []
items[t].append(x)
rtn = []
for t in sorted(items.keys()):
rtn.extend(sorted(items[t]))
return rtn
nyansep = r'~\_/' * 17 + '~=[,,_,,]:3'
"""WAT?!"""
def flatten(iterable):
"""Generator which returns flattened version of nested sequences."""
for el in iterable:
if isinstance(el, basestring):
yield el
elif isinstance(el, Iterable):
for subel in flatten(el):
yield subel
else:
yield el
#
# Memoization
#
def ishashable(x):
"""Tests if a value is hashable."""
if isinstance(x, Hashable):
if isinstance(x, basestring):
return True
elif isinstance(x, Iterable):
return all(map(ishashable, x))
else:
return True
else:
return False
def memoize(obj):
"""Generic memoziation decorator based off of code from
http://wiki.python.org/moin/PythonDecoratorLibrary .
This is not suitabe for method caching.
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = (args, tuple(sorted(kwargs.items())))
hashable = ishashable(key)
if hashable:
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
else:
return obj(*args, **kwargs)
return memoizer
class memoize_method(object):
"""Decorator suitable for memoizing methods, rather than functions
and classes. This is based off of code that may be found at
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
This code was originally released under the MIT license.
"""
def __init__(self, meth):
self.meth = meth
def __get__(self, obj, objtype=None):
if obj is None:
return self.meth
p = functools.partial(self, obj)
p.__doc__ = self.meth.__doc__
p.__name__ = self.meth.__name__
return p
def __call__(self, *args, **kwargs):
obj = args[0]
cache = obj._cache = getattr(obj, '_cache', {})
key = (self.meth, args[1:], tuple(sorted(kwargs.items())))
hashable = ishashable(key)
if hashable:
if key not in cache:
cache[key] = self.meth(*args, **kwargs)
return cache[key]
else:
return self.meth(*args, **kwargs)
def check_cmd(args):
"""Runs a command in a subprocess and verifies that it executed properly.
"""
#if not isinstance(args, basestring):
# args = " ".join(args)
f = tempfile.NamedTemporaryFile()
#rtn = subprocess.call(args, shell=True, stdout=f, stderr=f)
rtn = subprocess.call(args, stdout=f, stderr=f)
f.seek(0)
out = f.read()
f.close()
return rtn, out
#
# Persisted Cache
#
class PersistentCache(MutableMapping):
"""A quick persistent cache."""
def __init__(self, cachefile='cache.pkl'):
"""Parameters
-------------
cachefile : str, optional
Path to description cachefile.
"""
self.cachefile = cachefile
if os.path.isfile(cachefile):
with io.open(cachefile, 'rb') as f:
self.cache = pickle.load(f)
else:
self.cache = {}
def __len__(self):
return len(self.cache)
def __contains__(self, key):
return key in self.cache
def __getitem__(self, key):
return self.cache[key] # return the results of the finder only
def __setitem__(self, key, value):
self.cache[key] = value
self.dump()
def __delitem__(self, key):
del self.cache[key]
self.dump()
def __iter__(self):
for key in self.cache.keys():
yield key
def dump(self):
"""Writes the cache out to the filesystem."""
if not os.path.exists(self.cachefile):
pardir = os.path.split(os.path.abspath(self.cachefile))[0]
if not os.path.exists(pardir):
os.makedirs(pardir)
with io.open(self.cachefile, 'wb') as f:
pickle.dump(self.cache, f, pickle.HIGHEST_PROTOCOL)
def __str__(self):
return pformat(self.cache)
| 13,792 | 4,244 |
output_filename = 'kings_mountain_taxes.csv'
output_pfilename = 'kmes_taxes.p'
base_url = "https://gis.smcgov.org/maps/rest/services/WEBAPPS/COUNTY_SAN_MATEO_TKNS/MapServer/identify"
token = "fytmg9tR2rSx-1Yp0SWJ_qkAExGi-ftZoK7h4wk91UY."
polygon = [(-13622312.48,4506393.674),
(-13622866.64,4504129.241),
(-13622054.51,4501702.363),
(-13622081.51,4500703.546),
(-13622336.7,4500699.901),
(-13622209.69,4500208.989),
(-13620628.37,4498576.899),
(-13620855.91,4496456.415),
(-13621178.77,4496056.135),
(-13620850.69,4493901.594),
(-13619861.84,4493897.488),
(-13619569,4490187.675),
(-13619905.81,4489530.952),
(-13619314.07,4488339.813),
(-13618317.52,4488701.441),
(-13618258.04,4488474.013),
(-13615983.9,4488310.236),
(-13615652.87,4488885.978),
(-13615005.01,4489100.013),
(-13613917.21,4488836.71),
(-13613914.39,4488379.079),
(-13612911.75,4488366.21),
(-13612888.41,4494314.066),
(-13615097.8,4495888.695),
(-13615271.32,4496853.081),
(-13616508.99,4497514.873),
(-13616383.3,4498273.144),
(-13615602.16,4498927.021),
(-13616669.94,4499925.725),
(-13617650.42,4501543.218),
(-13618538.41,4501849.55),
(-13619271.78,4503718.206),
(-13620684.15,4505168.724),
(-13620959.32,4506823.444),
(-13622312.48,4506393.674)]
sr = 102100
tax_base_url = 'https://sanmateo-ca.county-taxes.com/public/search'
tax_bill_url = 'https://sanmateo-ca.county-taxes.com'
tax_link_contents = '2019 Secured Annual Bill'
tax_key_bond = 'Cabrillo Usd Bond'
tax_key_B = 'CAB USD MEAS B 2015-20'
tax_key_countywide = 'Countywide Tax (Secured)'
def get_apns_and_tras(extent, plot_boundaries = True):
(xmin, ymin, xmax, ymax) = extent
from shapely.geometry import Polygon
p1 = Polygon(polygon)
p2 = Polygon([(xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin), (xmin, ymax)])
p3 = p1.intersection(p2)
try:
coords = list(p3.exterior.coords)
except:
coords = []
if plot_boundaries:
from matplotlib import pylab as plt
plt.ion()
plt.figure(1)
for i in range(len(coords)-1):
plt.plot([coords[i][0], coords[i+1][0]], [coords[i][1], coords[i+1][1]], 'b-')
plt.axis('equal')
geometry = '{"rings": [[' + ','.join(['[' + str(c[0]) + ',' + str(c[1]) + ']' for c in coords]) + ']]}'
import requests
payload = {"token": token,
"f": "json",
"tolerance": 0,
"returnGeometry": "false",
"geometry": geometry,
"geometryType": "esriGeometryPolygon",
"sr": sr,
"mapExtent": "{xmin}, {ymin}, {xmax}, {ymax}".format(xmin = xmin, ymin = ymin, xmax = xmax, ymax = ymax),
"layers": "visible:0",
"imageDisplay": "572%2C774%2C96"}
r = requests.get(base_url,params=payload)
records = r.json()
if records.get('exceededTransferLimit', None) is not None:
print('WARNING: Transfer limit exceeded. Reduce square size')
return [[s['attributes']['NOGEOMAPN'], s['attributes']['TRA']] for s in records['results']]
def collect_all_apns_and_tras(square_size = 5000, plot_boundaries = True):
x, y = zip(*polygon)
(minx, maxx, miny, maxy) = (min(x), max(x), min(y), max(y))
import math
apns_and_tras = list()
for i in range(math.ceil((maxy-miny)/square_size)):
tile_y_min = square_size * float(i) + miny
tile_y_max = square_size * float(i+1) + miny if square_size * float(i+1) + miny < maxy else maxy
for j in range(math.ceil((maxx-minx)/square_size)):
tile_x_min = square_size * float(j) + minx
tile_x_max = square_size * float(j+1) + minx if square_size * float(j+1) + minx else maxx
extent = (tile_x_min, tile_y_min, tile_x_max, tile_y_max)
this_apns_and_tras = get_apns_and_tras(extent, plot_boundaries=plot_boundaries)
apns_and_tras += this_apns_and_tras
return apns_and_tras
def get_tax_record(apn):
payload = {"search_query":apn,
"category":all}
import requests
r = requests.get(tax_base_url, params=payload)
from bs4 import BeautifulSoup
soup = BeautifulSoup(r.content, features="html.parser")
a_tags = soup.find_all('a')
clickthrough = None
for tag in a_tags:
if tax_link_contents in tag.contents[0]:
clickthrough = tag['href']
bond_tax = 0
b_tax = 0
countywide_tax = 0
if clickthrough is not None:
r = requests.get(tax_bill_url + clickthrough)
soup = BeautifulSoup(r.content, features="html.parser")
td_countywide = soup.find("td", text=tax_key_countywide)
if td_countywide is not None:
countywide_tax = float(td_countywide.find_next_sibling("td").find_next_sibling("td") \
.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text.replace('$',
'').replace(
',', ''))
td_tax_bond = soup.find("td", text=tax_key_bond)
if td_tax_bond is not None:
bond_tax = float(td_tax_bond.find_next_sibling("td").find_next_sibling("td")\
.find_next_sibling("td").find_next_sibling("td").find_next_sibling("td").text.replace('$','').replace(',',''))
td_B_tax = soup.find("td", text=tax_key_B)
if td_B_tax is not None:
b_tax = float(td_B_tax.find_next_sibling("td").text.replace('$','').replace(',',''))
return countywide_tax, bond_tax, b_tax
data = list()
APNs_and_TRAs = list(collect_all_apns_and_tras())
APN_dictionary = {a[0]:a[1] for a in APNs_and_TRAs}
APNs = list(APN_dictionary.keys())
APNs.sort()
total = len(APNs)
counter = 1
for APN in APNs:
print('{counter} / {total}'.format(counter = counter, total = total))
countywide_tax, bond_tax, b_tax = get_tax_record(APN)
this_APN = APN[0:3] + '-' + APN[3:6] + '-' + APN[6:]
data.append([this_APN, APN_dictionary[APN], countywide_tax, bond_tax, b_tax])
counter += 1
import csv
with open(output_filename, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in data:
writer.writerow(row)
import pickle as p
with open(output_pfilename,'wb') as pfile:
p.dump(data, pfile)
| 6,698 | 2,940 |
from django.urls import path, re_path
import multiprocessing
import sys
from . import views
urlpatterns = [
path("", views.index, name='index'),
path("create/", views.index, {"create": True}, name="create"),
re_path(r"^logs/(?P<wid>[0-9]+)$", views.logs, name='logs'),
re_path(r"^rules/(?P<wid>[0-9]+)$", views.rules, name='rules')
]
| 351 | 128 |
from views import dump_result
from django.http import HttpResponse
class ErrorHandlingMiddleware(object):
def process_exception(self, request, exception):
response = HttpResponse(mimetype="application/json")
error = {'error' : True, 'error_message' : str(exception)}
dump_result(request, response, [], error)
return response
| 344 | 105 |
class Solution:
def XXX(self, root: TreeNode) -> List[int]:
stack=[(None,root)]
result=[]
while stack:
val,node=stack.pop()
if val!=None:
result.append(val)
if node:
stack.append([node.val,node.right])
if node.left:
stack.append([None,node.left])
return result
undefined
for (i = 0; i < document.getElementsByTagName("code").length; i++) { console.log(document.getElementsByTagName("code")[i].innerText); }
| 555 | 156 |
#!/usr/bin/env python
# Copyright (c) 2019 Nerian Vision GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
##############
#
# This script is called by CMake in order to generate C++ initialization
# and update code for the parameter server and dynamic_reconfigure
# (it reuses the .cfg file the dynamic_reconfigure build operates on).
#
import sys
CODE_TEMPLATE = '''// This file is AUTOGENERATED CODE produced by generate_nerian_config_cpp.py
#include <ros/ros.h>
#include <dynamic_reconfigure/server.h>
#include <nerian_stereo/NerianStereoConfig.h>
#include <visiontransfer/deviceparameters.h>
#include "nerian_stereo_node_base.h"
using namespace visiontransfer;
namespace nerian_stereo {
// Callback that receives an updated config from ROS
void StereoNodeBase::autogen_dynamicReconfigureCallback(nerian_stereo::NerianStereoConfig &config, uint32_t level) {
std::stringstream ss;
// == START of autogenerated parameter blocks ==
%s
// == END of autogenerated parameter blocks ==
}
// Obtain current parameter values from device and copy them to parameter server
void StereoNodeBase::autogen_updateParameterServerFromDevice(std::map<std::string, ParameterInfo>& cfg) {
ROS_INFO("Setting initial parameters in the parameter server");
std::string node_name = ros::this_node::getName();
// Publish reboot flag to definitely be set to false in the parameter server
getNH().setParam(node_name + "/reboot", false);
// Publish the current config to the parameter server
// == START of autogenerated parameter blocks ==
%s
// == END of autogenerated parameter blocks ==
}
// Override the default parameter bounds with current (run-time) config
void StereoNodeBase::autogen_updateDynamicReconfigureFromDevice(std::map<std::string, ParameterInfo>& cfg) {
nerian_stereo::NerianStereoConfig config_default, config_min, config_max;
ROS_INFO("Updating dynamic_reconfigure defaults and limits");
// Set defaults and min/max values according to Nerian stereo device API
// == START of autogenerated parameter blocks ==
%s
// == END of autogenerated parameter blocks ==
// Publish them
dynReconfServer->setConfigMin(config_min);
dynReconfServer->setConfigMax(config_max);
dynReconfServer->setConfigDefault(config_default);
}
} // namespace
'''
TEMPLATE_PARAMETER_CHANGE = ''' if (config.{varname} != lastKnownConfig.{varname}) {{
ROS_INFO("Request to set {varname} = %s", std::to_string(config.{varname}).c_str());
deviceParameters->setNamedParameter("{varname}", config.{varname});
}}'''
TEMPLATE_SETPARAM = ''' getNH().setParam(node_name + "/{varname}", cfg["{varname}"].getValue<{typ}>());'''
TEMPLATE_SETDEFAULTS = '''
config_default.{varname} = cfg["{varname}"].getValue<{typ}>();
config_min.{varname} = cfg["{varname}"].getMin<{typ}>();
config_max.{varname} = cfg["{varname}"].getMax<{typ}>();'''
if __name__ == '__main__':
# Default to filter
infile = sys.stdin
outfile = sys.stdout
if len(sys.argv) >= 3:
infile = open(sys.argv[1], 'r')
outfile = open(sys.argv[2], 'w')
# Parse cfg file contents and extract the parameter lines (name + type)
varnames_and_types = []
for line in infile.readlines():
if line.startswith('gen.add('):
varname = line.split('"')[1]
typ = line.split(',')[1].strip().split('_')[0]
varnames_and_types.append([varname, typ])
# Dump code for each parameter. 'reboot' is handled specially (must not be True initially)
paramchange = '\n'.join(TEMPLATE_PARAMETER_CHANGE.format(varname=vt[0]) for vt in varnames_and_types)
setparam = '\n'.join(TEMPLATE_SETPARAM.format(varname=vt[0], typ=vt[1]) for vt in varnames_and_types if vt[0] != 'reboot')
setdefaults = '\n'.join(TEMPLATE_SETDEFAULTS.format(varname=vt[0], typ=vt[1]) for vt in varnames_and_types if vt[0] != 'reboot')
outfile.write(CODE_TEMPLATE % (paramchange, setparam, setdefaults))
| 4,552 | 1,431 |
from micarraylib.arraycoords.core import micarray
from micarraylib.arraycoords import array_shapes_raw
from micarraylib.arraycoords.array_shapes_utils import _polar2cart
import pytest
import numpy as np
def test_micarray_init():
arr = micarray(array_shapes_raw.cube2l_raw, "cartesian", None, "foo")
assert arr.name == "foo"
assert arr.capsule_names == list(array_shapes_raw.cube2l_raw.keys())
assert arr.coords_dict == array_shapes_raw.cube2l_raw
assert arr.coords_form == "cartesian"
assert arr.angle_units == None
# no coordinates form
with pytest.raises(ValueError):
micarray(array_shapes_raw.ambeovr_raw)
# cartesian with angle units
with pytest.raises(ValueError):
micarray(array_shapes_raw.cube2l_raw, "cartesian", "degree")
def test_micarray_center_coords():
arr = micarray(array_shapes_raw.cube2l_raw, "cartesian")
arr.center_coords()
assert np.allclose(
np.mean(np.array([c for c in arr.coords_dict.values()]), axis=0), [0, 0, 0]
)
arr = micarray(array_shapes_raw.ambeovr_raw, "polar", "degrees")
arr.center_coords()
assert np.allclose(
np.mean(np.array([c for c in arr.coords_dict.values()]), axis=0), [0, 0, 0]
)
assert arr.coords_form == "cartesian"
assert arr.angle_units == None
def test_micarray_standard_coords():
arr = micarray(array_shapes_raw.eigenmike_raw, "polar", "degrees")
arr.standard_coords("cartesian")
assert np.allclose(
np.mean(np.array([c for c in arr.coords_dict.values()]), axis=0), [0, 0, 0]
)
arr.standard_coords("polar")
assert arr.coords_form == "polar"
assert arr.angle_units == "radians"
# sanity check on range of angles in polar coordinates
assert all([c[0] > 0 and c[0] < 180 for c in arr.coords_dict.values()])
assert all([c[1] <= 180 and c[1] >= -180 for c in arr.coords_dict.values()])
# returning to cartesian should result in coordinates centered around zero
coords_cart = _polar2cart(arr.coords_dict, "radians")
assert np.allclose(
np.mean(np.array([v for v in coords_cart.values()]), axis=0),
[0, 0, 0],
)
# value when form not specified
with pytest.raises(ValueError):
arr.standard_coords()
| 2,263 | 834 |
import ocr.utils as utils
def toDataURL(img, mime = None):
"""Image to Data URL.
Args:
img: CV2 Image object or image path.
mime: The media type of the Data URL. Required if the image is an CV2 Image object.
Returns:
Return Data URL and MIME type.
Raises:
ValueError: Image types other than png, jpg, jpeg.
"""
# to base64.
b64, mime = utils.toBase64(img, mime)
# Generates and returns a Data URL string based on base64.
return f'data:image/{mime};base64,{b64}', mime | 519 | 195 |
import math
import heapq
class Solution:
def minStoneSum(self, piles, k: int) -> int:
q=list()
for i in piles:
heapq.heappush(q,i)
while k:
c=q[-1]
q.pop()
c=c-math.floor(c/2)
heapq.heappush(q,c)
k-=1
res=0
for i in q:
res+=i
return res
if __name__ == '__main__':
sol=Solution()
piles = [4, 3, 6, 7]
k = 3
print(sol.minStoneSum(piles,k)) | 491 | 190 |
from .element import Element
class Navigation(Element):
"""
Represents navigation link section.
"""
def __str__(self):
return "nav"
| 159 | 45 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Leap Motion + Trello
A plain Trello view with Leap Motion UI.
"""
import Leap, sys, os, math, time
import ConfigParser, argparse
from collections import deque
from PyQt4 import QtGui, QtCore
from LeapListener import LeapListener
from trolly.client import Client
from trolly.board import Board
from trolly.list import List
from trolly.card import Card
from trolly.member import Member
# from trolly.organisation import Organisation
# from trolly.checklist import Checklist
# from trolly import ResourceUnavailable
class ScrollEventMixin(object):
def mouseMoveEvent(self, event):
bx, by, w, h = self.board.geometry().getRect()
x,y = event.globalPos().x(), event.globalPos().y()
scrollZone = self.config.getint('TrelloBoard', 'scroll_zone_size')
scrollSpeed = self.config.getint('TrelloBoard', 'scroll_speed')
def maxi(i): return max(i, scrollSpeed)
# horizontal scroll
wDiff = x - bx
isLeft = wDiff < scrollZone
isRight = (w - wDiff) < scrollZone
sb = self.board.scrollArea.horizontalScrollBar()
if (isLeft): sb.setValue(sb.value() - maxi(wDiff))
if (isRight): sb.setValue(sb.value() + maxi(w - wDiff))
# vertical scroll
hDiff = y - by
isTop = hDiff < scrollZone
isBottom = (h - hDiff) < scrollZone
sb = self.board.scrollArea.verticalScrollBar()
if (isTop): sb.setValue(sb.value() - maxi(hDiff))
if (isBottom): sb.setValue(sb.value() + maxi((h - hDiff)))
def dragEnterEvent(self, e):
e.accept()
class TrelloBoard(QtGui.QMainWindow):
pointingMultiplier = QtCore.pyqtSignal(int)
def __init__(self, client, app, boardId, config):
QtGui.QMainWindow.__init__(self)
self.lists = []
self.app = app
self.client = client
self.boardId = boardId
self.config = config
self.trelloBoardStyle = self.config.get('TrelloBoard', 'style')
self.board = Board(client, boardId)
self.screen = QtGui.QDesktopWidget().screenGeometry()
self.setMouseTracking(True)
self.updatePointingMultiplier()
self.style()
self.show()
def style(self):
self.window = QtGui.QWidget();
hbox = QtGui.QHBoxLayout()
self.window.setLayout(hbox)
self.setCentralWidget(self.window)
hbox.setSpacing(0)
lists = self.board.getLists()
for rawlist in lists:
cards = rawlist.getCards()
hbox.addWidget( TrelloList( self.config, self, self.client, rawlist.id, rawlist.name, cards ) )
self.scrollArea = QtGui.QScrollArea()
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setEnabled(True)
self.scrollArea.setWidget(self.window)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setCentralWidget(self.scrollArea)
self.currentCard = None
self.shadowCard = None
self.setWindowTitle(self.config.get('main', 'title'))
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setStyleSheet(self.trelloBoardStyle)
self.logo = QtGui.QLabel(self)
self.logo.setPixmap(QtGui.QPixmap(os.getcwd() + self.config.get('resources', 'trellol_logo_small')))
#self.cursorImg = QtGui.QPixmap(os.getcwd() + self.config.get('resources', 'ball_cursor'))
self.cursorImg = QtGui.QPixmap(os.getcwd() + self.config.get('resources', 'null_cursor'))
self.setCursor(QtGui.QCursor(self.cursorImg, -1, -1))
self.center()
def center(self):
mainposx = self.config.getint('main', 'pos_x')
mainposy = self.config.getint('main', 'pos_y')
mainwidth = self.config.getint('main', 'width')
mainheight = self.config.getint('main', 'height')
self.setGeometry(mainposx, mainposy, mainwidth, mainheight)
size = self.geometry()
self.move((self.screen.width() - size.width()) / 2, (self.screen.height() - size.height()) / 2)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Escape:
self.close()
elif key == QtCore.Qt.Key_F:
if self.windowState() & QtCore.Qt.WindowFullScreen:
self.showNormal()
else:
self.showFullScreen()
self.updatePointingMultiplier()
return QtGui.QWidget.keyPressEvent(self, event)
def resizeEvent(self, e):
logo_h = self.config.getint('TrelloBoard', 'logo_height')
logo_w = self.config.getint('TrelloBoard', 'logo_width')
logo_x = self.config.getint('TrelloBoard', 'logo_pos_x')
logo_y = self.height() - logo_h - self.config.getint('TrelloBoard', 'logo_pos_x')
self.logo.setGeometry(logo_x, logo_y, logo_w, logo_h)
def mouseMoveEvent(self, event):
if (self.currentCard is not None):
TrelloCard.mouseMoveEvent(self.currentCard, event)
def mousePressEvent(self, event):
if (self.currentCard is not None):
self.cardDetails = TrelloCardDescription(self, self.currentCard.id)
self.cardDetails.exec_()
def updatePointingMultiplier(self):
diagonal = math.sqrt( (math.pow(self.width(), 2) + math.pow(self.height(), 2)))
multiplier = max(min(diagonal / 100, 20), 5)
self.pointingMultiplier.emit(multiplier)
class TrelloCardDescription(QtGui.QDialog):
def __init__(self, board, cardId):
QtGui.QDialog.__init__(self, board)
self.id = cardId
self.card = Card(board.client, self.id).getCardInformation()
self.width = 800
self.height = 600
self.style()
self.center()
self.render()
def style(self):
self.setFixedWidth(self.width)
self.setFixedHeight(self.height)
def center(self):
screen = QtGui.QDesktopWidget().screenGeometry()
self.move((screen.width() - self.width) / 2, (screen.height() - self.height) / 2)
def render(self):
vbox = QtGui.QFormLayout()
vbox.addWidget(QtGui.QLabel("Card Label: %s" % self.card['name']))
vbox.addWidget(QtGui.QLabel("Id: %s" % self.id))
vbox.addWidget(QtGui.QLabel("Description: %s" % self.card['desc']))
self.setLayout(vbox)
class TrelloList(QtGui.QWidget, ScrollEventMixin):
def __init__(self, config, board, client, listId, name, cards):
QtGui.QWidget.__init__(self, board)
self.config = config
self.board = board
self.client = client
self.id = listId
self.name = name
layout = QtGui.QFormLayout()
self.head = TrelloListHeader(config, self, self.name)
layout.addWidget(self.head)
self.tail = TrelloListCards(config, self, cards)
layout.addWidget(self.tail)
self.setLayout(layout)
self.style()
self.show()
self.setAcceptDrops(True)
self.setMouseTracking(True)
def style(self):
self.layout().setHorizontalSpacing(0)
self.layout().setContentsMargins(0,0,0,0)
def __str__(self):
return "TrelloList|'%s'" % (self.name)
def __repr__(self):
return self.__str__()
def dragEnterEvent(self, e):
e.accept()
def dragMoveEvent(self, e):
# HACK: we compute a limit for the end of the card list and
# consider a tail move only below that point
h = self.config.getint('TrelloCard', 'height') + 5
lim = h * self.tail.layout().count()
if lim < e.pos().y():
self.tail.addCard(e.source())
e.accept()
return
print "dragMoveEvent TList"
TrelloCard.dragMoveEvent(self.board.currentCard, e)
def dropEvent(self, e):
self.thread = UpdateThread(e.source(), "MOVE")
self.thread.start()
def mousePressEvent(self, event):
if (self.board.currentCard is not None):
if (event.buttons() == QtCore.Qt.RightButton):
self.board.cardDetails = TrelloCardDescription(self.board, self.board.currentCard.id)
self.board.cardDetails.exec_()
elif (event.buttons() == QtCore.Qt.LeftButton):
TrelloCard.mouseMoveEvent(self.board.currentCard, event)
class TrelloCard(QtGui.QLabel, ScrollEventMixin):
def __init__(self, config, tlist, cardId, name):
QtGui.QLabel.__init__(self, tlist)
self.config = config
self.id = cardId
self.name = name
self.tlist = tlist
self.board = tlist.board
self.trelloCardDeselectStyle = self.config.get('TrelloCard', 'deselect_style')
self.trelloCardSelectStyle = self.config.get('TrelloCard', 'select_style')
self.trelloCardDragStyle = self.config.get('TrelloCard', 'drag_style')
self.trelloCardShadowStyle = self.config.get('TrelloCard', 'shadow_style')
self.trelloCardMemberStyle = config.get('TrelloCard', 'member_style')
self.trelloCardMemberHeight = config.getint('TrelloCard', 'member_height')
self.trelloCardMemberBorder = config.getint('TrelloCard', 'member_border')
self.trelloCardWidth = self.config.getint('TrelloCard', 'width')
self.trelloCardHeight = self.config.getint('TrelloCard', 'height')
self.setMouseTracking(True)
self.setText(self.name)
self.addMembers(self.id)
self.setAcceptDrops(True)
self.isShadow = False
self.style()
def style(self):
self.setAlignment(QtCore.Qt.AlignCenter)
self.deselect()
self.setFixedHeight(self.trelloCardHeight)
self.setFixedWidth(self.trelloCardWidth)
def select(self):
self.isShadow = False
self.setStyleSheet(self.trelloCardSelectStyle)
def deselect(self):
self.isShadow = False
self.setStyleSheet(self.trelloCardDeselectStyle)
def drag(self): # TODO cf. shadow()
self.setStyleSheet(self.trelloCardDragStyle)
def shadow(self):
self.isShadow = True
self.setStyleSheet(self.trelloCardShadowStyle)
def getCentroid(self):
x,y,w,h = self.x(), self.y(), self.width(), self.height()
return (x + (w/2), y + (h/2))
def getDistTo(self, x, y):
thisx, thisy = self.getCentroid()
dist = math.sqrt( (math.pow(thisx - x, 2) + math.pow(thisy - y, 2)))
return dist
def addMembers(self, cardId):
members = Card(self.tlist.board.client, cardId).getCardInformation()['idMembers']
for i, member in enumerate(members):
initials = Member(self.tlist.board.client, member).getMemberInformation()['initials']
self.addMemberLabel(
self,
initials,
self.trelloCardMemberBorder + 25 * i,
self.trelloCardHeight - self.trelloCardMemberHeight - self.trelloCardMemberBorder
)
def addMemberLabel(self, parent, text, x, y):
label = QtGui.QLabel(text, parent)
label.setFixedHeight(self.trelloCardMemberHeight)
label.setStyleSheet(self.trelloCardMemberStyle)
label.move(x, y)
def setTrellolist(self, tlist):
self.tlist = tlist
def mouseMoveEvent(self, event):
ScrollEventMixin.mouseMoveEvent(self, event)
# select by hover over card
if (self.tlist.board.currentCard is not self):
if (self.tlist.board.currentCard is not None):
self.tlist.board.currentCard.deselect()
self.tlist.board.currentCard = self
self.select()
# start drag on 'mouse' press
if not (event.buttons() == QtCore.Qt.NoButton):
mimeData = QtCore.QMimeData()
# pixmap = QtGui.QPixmap.grabWidget(self) # TODO decide between shadow+dragImg vs. no-mouse
# self.tlist.board.shadowCard = self
# self.shadow()
self.drag()
drag = QtGui.QDrag(self)
dragCursor = QtGui.QPixmap(os.getcwd() + self.config.get('resources', 'null_cursor'))
drag.setDragCursor(dragCursor, QtCore.Qt.MoveAction)
drag.setMimeData(mimeData)
drag.setHotSpot(event.pos())
drag.exec_(QtCore.Qt.MoveAction)
def dragEnterEvent(self, e):
e.accept() # needed for DragMoveEvent
def dragMoveEvent(self, e):
# # TODO: scroll while dragging; a good start below
# glob = QtCore.QPoint(self.board.x() + e.pos().x(), self.board.y() + e.pos().y())
# ev = QtGui.QMouseEvent(QtCore.QEvent.MouseMove, e.pos(), glob,
# QtCore.Qt.NoButton, QtCore.Qt.NoButton, QtCore.Qt.NoModifier)
# print ev.pos(), ev.globalPos()
# ScrollEventMixin.mouseMoveEvent(self, ev)
if (self == e.source()): return
cardUpperHalf = e.pos().y() <= (self.height() / 2)
temp = deque()
cardlist = self.tlist.tail
for i in reversed(range(cardlist.count())):
if (cardlist.getCardAt(i) == self):
if cardUpperHalf: temp.append(cardlist.takeCardAt(i))
temp.append(e.source())
if not cardUpperHalf: temp.append(cardlist.takeCardAt(i))
break
elif (cardlist.getCardAt(i) == e.source()):
cardlist.removeCard(e.source())
else:
temp.append(cardlist.takeCardAt(i))
for i in range(len(temp)):
w = temp.pop()
cardlist.addCard(w)
def dropEvent(self, e):
e.source().deselect()
self.thread = UpdateThread(e.source(), "MOVE")
self.thread.start()
def __str__(self):
return "Card %s %s %s" % (self.id, self.name, self.geometry())
def __repr__(self):
return self.__str__()
class TrelloListHeader(QtGui.QLabel, ScrollEventMixin):
def __init__(self, config, tlist, text):
QtGui.QLabel.__init__(self)
self.config = config
self.board = tlist.board
self.setText(text)
self.trelloListHeaderStyle = self.config.get('TrelloListHeader', 'style')
self.trelloCardWidth = self.config.getint('TrelloCard', 'width')
self.trelloCardHeight = self.config.getint('TrelloCard', 'height')
self.style()
self.setMouseTracking(True)
def style(self):
self.setFixedHeight(self.trelloCardHeight / 4)
self.setFixedWidth(self.trelloCardWidth)
self.setStyleSheet(self.trelloListHeaderStyle)
def __str__(self):
return "TrelloListHeader|'%s'" % (self.text())
def __repr__(self):
return self.__str__()
class TrelloListCards(QtGui.QWidget, ScrollEventMixin):
def __init__( self, config, tlist, cards):
QtGui.QWidget.__init__(self, tlist)
self.config = config
self.tlist = tlist
self.board = tlist.board
self.trelloListCardsStyle = self.config.get('TrelloListCards', 'style')
self.setLayout(QtGui.QFormLayout())
for index,card in enumerate(cards):
tc = TrelloCard(config, tlist, card.id, card.name)
self.addCard(tc)
self.setMouseTracking(True)
self.style()
def style(self):
self.setStyleSheet(self.trelloListCardsStyle)
self.layout().setHorizontalSpacing(0)
self.layout().setContentsMargins(0,0,0,0)
def count(self):
return self.layout().count()
def getCardAt(self, index):
return self.layout().itemAt(index).widget()
def takeCardAt(self, index):
return self.layout().takeAt(index).widget()
def removeCard(self, card):
card.setTrellolist(None)
card.setParent(None)
for i in range(self.layout().count()):
if self.layout().itemAt(i) == card:
return self.layout().takeAt(i)
return None
def addCard(self, card):
card.setTrellolist(self.tlist)
self.layout().addWidget(card)
def __str__(self):
return "TrelloListCards| %d card(s)" % (self.layout().count())
def __repr__(self):
return self.__str__()
class UpdateThread(QtCore.QThread):
def __init__(self, card, op):
QtCore.QThread.__init__(self)
self.OPS = { 'MOVE' : self.move }
self.card = card
self.client = self.card.tlist.board.client
self.op = self.OPS[op]
def move(self):
tcard = Card(self.client, self.card.id)
queryParams = { 'idList' : self.card.tlist.id, 'pos' : 'bottom' }
temp = deque()
cardlist = self.card.tlist.tail
for i in reversed(range(cardlist.count())):
ca = cardlist.getCardAt(i)
if (ca == self.card):
Card(self.client, ca.id).updateCard(queryParams)
break
else:
temp.append(ca)
for i in range(len(temp)):
ca = temp.pop()
Card(self.client, ca.id).updateCard(queryParams)
def run(self):
self.op()
def main():
parser = argparse.ArgumentParser(description="Leap Motion Trello")
parser.add_argument("-c", "--configFile", type=str, default="default.cfg",
help="Self.Configuration file is currently the only way to customize LeapMotionTrello.")
args = parser.parse_args()
config = ConfigParser.ConfigParser()
config.read(args.configFile)
apiKey = config.get('main', 'api_key')
userAuthToken = config.get('main', 'user_token')
client = Client(apiKey, userAuthToken)
app = QtGui.QApplication(sys.argv)
boardId = config.get('main', 'board_id')
board = TrelloBoard(client, app, boardId, config)
listener = LeapListener()
board.pointingMultiplier[int].connect(listener.setPointingMultiplier)
controller = Leap.Controller()
controller.add_listener(listener)
app.exec_() # blocking
controller.remove_listener(listener)
print "Finished"
if __name__ == "__main__":
main()
| 18,562 | 5,917 |
from keckdrpframework.primitives.base_primitive import BasePrimitive
from kcwidrp.primitives.kcwi_file_primitives import kcwi_fits_reader, \
kcwi_fits_writer, get_master_name, strip_fname
import os
class SubtractSky(BasePrimitive):
def __init__(self, action, context):
BasePrimitive.__init__(self, action, context)
self.logger = context.pipeline_logger
def _pre_condition(self):
"""
Checks if a master sky exists to subtract
:return:
"""
self.logger.info("Checking precondition for SubtractSky")
skyfile = None
skymask = None
# check if kcwi.sky exists
if os.path.exists('kcwi.sky'):
f = open('kcwi.sky')
skyproc = f.readlines()
f.close()
# is our file in the list?
ofn = self.action.args.name
for row in skyproc:
if ofn in row.split()[0]:
skyfile = row.split()[1]
if len(row.split()) > 2:
skymask = row.split()[2]
if skyfile:
if not os.path.exists(skyfile):
skyfile = None
if skymask:
if not os.path.exists(skymask):
skymask = None
self.action.args.skyfile = skyfile
self.action.args.skymask = skymask
if skyfile:
self.logger.info("pre condition got 1 master sky, expected 1")
return True
else:
target_type = 'SKY'
tab = self.context.proctab.search_proctab(frame=self.action.args.ccddata,
target_type=target_type,
nearest=True)
self.logger.info("pre condition got %d master sky, expected 1"
% len(tab))
if len(tab) <= 0:
return False
else:
return True
def _perform(self):
self.logger.info("Subtracting sky background")
# Header keyword to update
key = 'SKYCOR'
keycom = 'sky corrected?'
target_type = 'SKY'
skyfile = self.action.args.skyfile
skymask = self.action.args.skymask
if not self.action.args.skyfile:
tab = self.context.proctab.search_proctab(frame=self.action.args.ccddata,
target_type=target_type,
nearest=True)
self.logger.info("%d master sky frames found" % len(tab))
if len(tab) > 0:
skyfile = tab['filename'][0]
msname = strip_fname(skyfile) + '_' + target_type.lower() + ".fits"
if os.path.exists(os.path.join(self.config.instrument.cwd,
'redux', msname)):
self.logger.info("Reading image: %s" % msname)
msky = kcwi_fits_reader(
os.path.join(self.config.instrument.cwd, 'redux',
msname))[0]
# scale the sky?
obtime = self.action.args.ccddata.header['XPOSURE']
sktime = msky.header['XPOSURE']
if obtime <= 0. or sktime <= 0.:
self.logger.warning("Bad exposure times (obj, sky): %.1f, %1f"
% (obtime, sktime))
skscl = 1.
else:
skscl = obtime / sktime
self.logger.info("Sky scale factor = %.3f" % skscl)
# do the subtraction
self.action.args.ccddata.data -= msky.data * skscl
# update header keywords
self.action.args.ccddata.header[key] = (True, keycom)
self.action.args.ccddata.header['SKYMAST'] = (msname,
"Master sky filename")
self.action.args.ccddata.header['SKYSCL'] = (skscl,
'sky scale factor')
if skymask:
self.action.args.ccddata.header['SKYMSKF'] = (skymask,
'sky mask file')
else:
# update header keywords
self.action.args.ccddata.header[key] = (False, keycom)
log_string = SubtractSky.__module__
self.action.args.ccddata.header['HISTORY'] = log_string
# write out int image
kcwi_fits_writer(self.action.args.ccddata,
table=self.action.args.table,
output_file=self.action.args.name,
output_dir=self.config.instrument.output_directory,
suffix="intk")
self.context.proctab.update_proctab(frame=self.action.args.ccddata,
suffix="intk",
filename=self.action.args.name)
self.context.proctab.write_proctab()
self.logger.info(log_string)
return self.action.args
# END: class SubtractSky()
| 5,115 | 1,458 |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
import typing
if typing.TYPE_CHECKING:
from .serializer import Serializer
from .api_client import ApiClient
class ApiConfiguration(object):
"""Represents a class that provides API configuration options needed by
service clients.
:param serializer: serializer implementation for encoding/decoding JSON
from/to Object models.
:type serializer: (optional) ask_sdk_model_runtime.serializer.Serializer
:param api_client: API Client implementation
:type api_client: (optional) ask_sdk_model_runtime.api_client.ApiClient
:param authorization_value: Authorization value to be used on any calls of
the service client instance
:type authorization_value: (optional) str
:param api_endpoint: Endpoint to hit by the service client instance
:type api_endpoint: (optional) str
"""
def __init__(self, serializer=None, api_client=None,
authorization_value=None, api_endpoint=None):
# type: (Serializer, ApiClient, str, str) -> None
"""Represents a class that provides API configuration options needed by
service clients.
:param serializer: serializer implementation for encoding/decoding JSON
from/to Object models.
:type serializer: (optional) ask_sdk_model_runtime.serializer.Serializer
:param api_client: API Client implementation
:type api_client: (optional) ask_sdk_model_runtime.api_client.ApiClient
:param authorization_value: Authorization value to be used on any calls
of the service client instance
:type authorization_value: (optional) str
:param api_endpoint: Endpoint to hit by the service client instance
:type api_endpoint: (optional) str
"""
self.serializer = serializer
self.api_client = api_client
self.authorization_value = authorization_value
self.api_endpoint = api_endpoint
| 2,517 | 669 |
import pytest
import random
import time
import itertools
from multiprocessing.pool import ThreadPool
from datetime import timedelta
from emit.globals import Proxy, GuardedProxy, Config, ConfigDescriptor, LoggerProxy, log, conf
from ..helpers import TestCase
@pytest.mark.conf
@pytest.mark.log
@pytest.mark.proxy
class TestProxy(TestCase):
def test_proxy_init_no_arg(self):
p = Proxy()
assert p._resolve() is None
def test_proxy_init_with_arg(self):
p_obj = dict()
p = Proxy(p_obj)
assert p._resolve() == p_obj
def test_proxy_dir(self):
p_obj = dict()
p = Proxy(p_obj)
assert p._resolve() == p_obj
# Dir p should contain at least the objects of dir
assert set(dir(p)) >= set(dir(p_obj))
def test_proxy_repr(self):
p_obj = dict(foo='bar')
p = Proxy(p_obj)
assert p._resolve() == p_obj
assert "{'foo': 'bar'}" in repr(p)
def test_proxy_str(self):
p_obj = dict(foo='bar')
p = Proxy(p_obj)
assert p._resolve() == p_obj
assert "{'foo': 'bar'}" in str(p)
def test_proxy_contains(self):
p_obj = dict(key='foo')
p = Proxy(p_obj)
assert p._resolve() == p_obj
assert 'key' in p
def test_proxy_len(self):
p_obj = dict(key='foo')
p = Proxy(p_obj)
assert p._resolve() == p_obj
assert len(p) == 1
def test_proxy_iter(self):
p_iters = 0
p_obj = dict(key='foo')
p = Proxy(p_obj)
for x in p:
p_iters += 1
assert x == 'key'
assert len(p) == 1
assert p_iters == 1
def test_proxy_items(self):
p_obj = dict()
p = Proxy(p_obj)
assert p._resolve() == p_obj
assert not ('key' in p)
p['key'] = 'foo'
assert 'key' in p
assert p._resolve()['key'] == 'foo'
assert p['key'] == 'foo'
del p['key']
assert not ('key' in p)
def test_proxy_attr(self):
p = Proxy()
p._set_resolver_obj(self)
assert p._resolve() == self
assert hasattr(p, 'foo') == False
p.foo = 'foo'
assert hasattr(p, 'foo')
assert p.foo == 'foo'
del p.foo
assert hasattr(p, 'foo') == False
def test_proxy_call(self):
class _test_proxy(object):
calls = 0
def __call__(self, *args, **kwargs):
_test_proxy.calls += 1
return _test_proxy.calls
p = Proxy(_test_proxy())
p_arg_obj = object()
assert p(p_arg_obj) == 1
assert p._resolve()() == 2
def test_proxy_callable(self):
p_stack = []
p = Proxy()
assert p._resolve() is None
def p_func(arg):
p_stack.append(arg)
return arg
def p_resolve():
return p_func
p._set_resolver(p_resolve)
assert p._resolve() == p_func
p_arg_obj = object()
assert p(p_arg_obj) == p_arg_obj
assert p_stack.pop() == p_arg_obj
def test_proxy_callable_obj(self):
p_stack = []
p = Proxy()
assert p._resolve() is None
def p_func(arg):
p_stack.append(arg)
return arg
p._set_resolver_obj(p_func)
assert p._resolve() == p_func
p_arg_obj = object()
assert p(p_arg_obj) == p_arg_obj
assert p_stack.pop() == p_arg_obj
def test_proxy_setattr(self):
class TT(object):
foo = 'foo'
tt = Proxy(TT())
tt.foo = 'bar'
assert tt.__getattr__('foo') == 'bar'
def test_proxy_slots_obj_access(self):
class T(Proxy):
__slots__ = ('__resolver', 'test_proxy_slots_obj_access')
test_proxy_slots_obj_access = '...'
t = T(object())
assert t.test_proxy_slots_obj_access == '...'
assert t.__getattr__('test_proxy_slots_obj_access') == '...'
with pytest.raises(AttributeError):
t.__setattr__('test_proxy_slots_obj_access', 'bbb')
assert t.test_proxy_slots_obj_access == '...'
assert t.__getattr__('test_proxy_slots_obj_access') == '...'
class TestGlobals(TestCase):
@pytest.mark.conf
@pytest.mark.config
def test_global_conf(self):
assert isinstance(conf, Proxy)
assert isinstance(conf._resolve(), Config)
@pytest.mark.log
def test_global_log(self):
assert isinstance(log, LoggerProxy)
@pytest.mark.log
def test_global_log_call(self, logs):
msg = 'TestGlobals.test_global_log_call - test'
assert callable(log)
log(msg)
last_record = logs.pop()
assert msg == last_record.getMessage()
@pytest.mark.log
def test_global_logger_call(self, logs):
msg = 'TestGlobals.test_global_logger_call - test'
assert callable(log.logger)
log.logger(msg)
last_record = logs.pop()
assert msg == last_record.getMessage()
@pytest.mark.conf
@pytest.mark.config
@pytest.mark.config_value
class TestConfigDescriptor(TestCase):
def test_init(self):
with pytest.raises(AttributeError) as excinfo:
assert conf.TestConfigDescriptor_test_init is None
assert '\'Config\' object has no attribute \'TestConfigDescriptor_test_init\'' \
== str(excinfo.value)
ConfigDescriptor('TestConfigDescriptor_test_init')
with pytest.raises(AttributeError) as excinfo:
assert conf.TestConfigDescriptor_test_init is None
assert '\'Config\' object has no attribute \'TestConfigDescriptor_test_init\'' \
== str(excinfo.value)
def test_init_default(self):
with pytest.raises(AttributeError) as excinfo:
assert conf.TestConfigDescriptor_test_init is None
assert '\'Config\' object has no attribute \'TestConfigDescriptor_test_init\'' \
== str(excinfo.value)
ConfigDescriptor('TestConfigDescriptor_test_init', False)
assert conf.TestConfigDescriptor_test_init is False
def test_init_as_prop(self):
class T(object):
TestConfigDescriptor_test_init_as_descriptor = \
ConfigDescriptor('TestConfigDescriptor_test_init_as_descriptor')
t_obj = T()
assert T.TestConfigDescriptor_test_init_as_descriptor is None
assert t_obj.TestConfigDescriptor_test_init_as_descriptor is None
with pytest.raises(AttributeError) as excinfo:
assert conf.TestConfigDescriptor_test_init_as_descriptor is None
assert '\'Config\' object has no attribute \'TestConfigDescriptor_test_init_as_descriptor\'' \
== str(excinfo.value)
conf.TestConfigDescriptor_test_init_as_descriptor = 'set_G'
assert conf.TestConfigDescriptor_test_init_as_descriptor == 'set_G'
assert T.TestConfigDescriptor_test_init_as_descriptor == 'set_G'
assert t_obj.TestConfigDescriptor_test_init_as_descriptor == 'set_G'
t_obj.TestConfigDescriptor_test_init_as_descriptor = 'set_T_OBJ'
assert conf.TestConfigDescriptor_test_init_as_descriptor == 'set_G'
assert T.TestConfigDescriptor_test_init_as_descriptor == 'set_G'
assert t_obj.TestConfigDescriptor_test_init_as_descriptor == 'set_T_OBJ'
def test_init_as_descriptor_pass_through(self):
class A(object):
TestConfigDescriptor_test_init_as_descriptor_pass_through = \
ConfigDescriptor(
'TestConfigDescriptor_test_init_as_descriptor_pass_through',
'set_A')
a_obj = A()
assert A.TestConfigDescriptor_test_init_as_descriptor_pass_through == 'set_A'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_pass_through == 'set_A'
assert conf.TestConfigDescriptor_test_init_as_descriptor_pass_through == 'set_A'
conf.TestConfigDescriptor_test_init_as_descriptor_pass_through = \
'set_A_CONF'
assert A.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
assert conf.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
a_obj.TestConfigDescriptor_test_init_as_descriptor_pass_through = 'set_A_obj'
assert A.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_obj'
assert conf.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
del a_obj.TestConfigDescriptor_test_init_as_descriptor_pass_through
assert A.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
assert conf.TestConfigDescriptor_test_init_as_descriptor_pass_through == \
'set_A_CONF'
def test_init_as_descriptor_default(self):
class A(object):
TestConfigDescriptor_test_init_as_descriptor_default_a = \
ConfigDescriptor(
'TestConfigDescriptor_test_init_as_descriptor_default_a',
'set_A')
def foo(self):
self.TestConfigDescriptor_test_init_as_descriptor_default_a = 'set_A_foo'
class B(object):
TestConfigDescriptor_test_init_as_descriptor_default_b = 'set_B'
def foo(self):
self.TestConfigDescriptor_test_init_as_descriptor_default_b = 'set_B_foo'
a_obj = A()
b_obj = B()
assert A.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
assert B.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
assert b_obj.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B'
assert conf.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
a_obj.TestConfigDescriptor_test_init_as_descriptor_default_a = 'set_A_a_obj'
b_obj.TestConfigDescriptor_test_init_as_descriptor_default_b = 'set_B_a_obj'
assert A.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
assert B.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A_a_obj'
assert b_obj.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B_a_obj'
assert conf.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
a_obj.foo()
b_obj.foo()
assert A.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
assert B.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A_foo'
assert b_obj.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B_foo'
assert conf.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
A.TestConfigDescriptor_test_init_as_descriptor_default_a = 'set_A_C'
B.TestConfigDescriptor_test_init_as_descriptor_default_b = 'set_B_C'
assert A.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A_C'
assert B.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B_C'
assert a_obj.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A_foo'
assert b_obj.TestConfigDescriptor_test_init_as_descriptor_default_b == 'set_B_foo'
assert conf.TestConfigDescriptor_test_init_as_descriptor_default_a == 'set_A'
@pytest.mark.proxy
@pytest.mark.guarded_proxy
@pytest.mark.config
class TestGuardedProxy(TestCase):
def test_init(self):
p = GuardedProxy()
with p:
assert p._resolve() is None
def test_init_with_arg(self):
p_obj = dict()
p = GuardedProxy(p_obj)
with p:
assert p._resolve() == p_obj
def test_out_of_context(self):
p_obj = dict()
p = GuardedProxy(p_obj)
with pytest.raises(RuntimeError):
assert p._resolve() == p_obj
@pytest.mark.slow
def test_thread_safe(self):
def ctx_worker(o):
for i in range(iterations):
with o as ctx:
for x in range(calls):
ctx(zip(enters, exits))
def worker(o):
for i in range(iterations):
for x in range(calls):
o(zip(enters, exits))
# For our CallableStack to append on enter/exit to assert thread safety
def work(f, ctx):
enter, exit = ctx.pop(0)
try:
items.append(enter)
sleep_random()
return f(ctx)
finally:
items.append(exit)
def sleep_random():
# Contention is created through random sleeps, shouldn't take anymore
# time than this to trigger a race condition @ 100%
time.sleep(timedelta(0, .0001).total_seconds() * random.random())
def test_ctx(ctx_func, ctx):
pool = ThreadPool(processes=concurrency)
pool.imap_unordered(ctx_func, list(ctx for i in range(concurrency)))
pool.close()
pool.join()
expect = 2 * depth
expect *= calls * iterations * concurrency
assert len(items) == expect
expect_iter = itertools.cycle(range(depth * 2))
for item in items:
assert item == next(expect_iter), 'race condition'
# how much / long to run
concurrency = 12
iterations = 40
calls = 3
depth = 10
# each work() func leaves a call trail counter at enter and exit
enters = list(range(depth))
exits = list(reversed(range(depth, depth * 2)))
cs = CallableStack(*list(work for i in range(depth)))
cs_guarded = GuardedProxy(cs)
items = []
test_ctx(ctx_worker, cs_guarded)
items = []
with pytest.raises(AssertionError) as excinfo:
assert test_ctx(worker, cs)
assert str(excinfo.value).startswith('race condition')
class CallableStack(list):
"""
Unit testing utility provides a simple pattern for reentrant ordered call
recursion through a stack of `funcs`. This is useful for setting up groups
of context that may expire after a period of time and need support from
parent calls to rebuild. I.E.:
ctx = Context()
cs = CallableStack(initialize, prepare, process, cleanup)
cs(ctx):
initialize(cs, ctx):
additional_ctx = ...
cs(ctx, additional_ctx) ->
additional_ctx.transform()
prepare(cs, ctx, additional_ctx):
cs() ->
process(cs):
cs() ->
cleanup()
...
# prepare exits
# initialize exits
"""
def __init__(self, *callables):
super(CallableStack, self).__init__(callables)
def __call__(self, *args):
return self[0](CallableStack(*self[1:]), *args) if len(self) else None
| 15,581 | 4,860 |
numeros = input("Digite dois números: ").split(" ")
num1 = float(numeros[0])
num2 = float(numeros[1])
# print(f"{num1} + {num2} = {num1 + num2}")
print("%.2f + %.2f = %.2f" %(num1, num2, (num1 + num2)))
# print(num1, "+", num2, "=", num1 + num2)
print(num1, "-", num2, "=", num1 - num2)
print(num1, "*", num2, "=", num1 * num2)
print(num1, "/", num2, "=", num1 / num2)
| 374 | 179 |
#!/usr/bin/env python3
## Copyright (c) 2011 Steven D'Aprano.
## See the file __init__.py for the licence terms for this software.
"""
Run the stats package as if it were an executable module.
Usage:
$ python3 -m stats [options]
Options:
-h --help Print this help text.
-V --version Print the version number.
-v --verbose Run tests verbosely.
-q --quiet Don't print anything on success.
With no options, perform a self-test of the stats package by running all
doctests in the package. By default, failed tests will be printed. If all
tests pass, a count of how many tests were performed is printed.
To print details of all tests regardless of whether they succeed or fail,
pass the verbose flag after the package name:
$ python3 -m stats -v
To suppress output if all tests pass, pass the quiet flag:
$ python3 -m stats -q
"""
import sys
def process_options():
argv = sys.argv[1:]
if '-h' in argv or '--help' in argv:
print(__doc__)
sys.exit(0)
verbose = '-v' in argv or '--verbose' in argv
quiet = '-q' in argv or '--quiet' in argv
if verbose and quiet:
print('cannot be both quiet and verbose', file=sys.stderr)
sys.exit(1)
if '-V' in argv or '--version' in argv:
import stats
print(stats.__version__)
sys.exit(0)
return verbose, quiet
def self_test(verbose, quiet):
assert not (verbose and quiet)
import doctest
import stats, stats.co, stats.multivar, stats.order, \
stats.univar, stats.utils, stats.vectorize
modules = (stats, stats.co, stats.multivar, stats.order,
stats.univar, stats.utils, stats.vectorize,
)
failed = tried = 0
for module in modules:
a, b = doctest.testmod(module, verbose=verbose)
failed += a
tried += b
if failed == 0 and not quiet:
print("Successfully run %d doctests from %d files."
% (tried, len(modules)))
return failed
if __name__ == '__main__' and __package__ is not None:
verbose, quiet = process_options()
sys.exit(self_test(verbose, quiet))
| 2,149 | 675 |
import unittest
import pymongo
import datetime
from bson import ObjectId
from iu_mongo import Document, connect
from iu_mongo.fields import *
from iu_mongo.errors import ValidationError
import iu_mongo
class Person(Document):
meta = {
'db_name': 'test'
}
name = StringField()
age = IntField(default=30, required=False)
userid = StringField(default=lambda: 'test', required=True)
created = DateTimeField(default=datetime.datetime.utcnow)
day = DateField(default=datetime.date.today)
class FieldTests(unittest.TestCase):
def setUp(self):
connect(db_names=['test'])
def tearDown(self):
Person.remove({})
def test_default_not_set(self):
person = Person(name="Ross")
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved,
['age', 'created', 'day', 'name', 'userid']
)
self.assertTrue(person.validate() is None)
self.assertEqual(person.name, person.name)
self.assertEqual(person.age, person.age)
self.assertEqual(person.userid, person.userid)
self.assertEqual(person.created, person.created)
self.assertEqual(person.day, person.day)
self.assertEqual(person._data['name'], person.name)
self.assertEqual(person._data['age'], person.age)
self.assertEqual(person._data['userid'], person.userid)
self.assertEqual(person._data['created'], person.created)
self.assertEqual(person._data['day'], person.day)
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(
data_to_be_saved, ['age', 'created', 'day', 'name', 'userid'])
def test_default_set_none(self):
person = Person(name=None, age=None, userid=None,
created=None, day=None)
data_to_be_saved = sorted(person.to_mongo().keys())
self.assertEqual(data_to_be_saved, ['age', 'created', 'day', 'userid'])
def test_int_field(self):
# max integer value mongodb can handle, i.e. 64-bit signed integer
max_int_val = (1 << 63)-1
class Doc(Document):
meta = {
'db_name': 'test'
}
test_int = IntField(min_value=-123, max_value=max_int_val)
Doc.remove({})
doc1 = Doc(test_int=max_int_val)
doc2 = Doc(test_int=None)
doc3 = Doc(test_int=max_int_val+1)
doc4 = Doc(test_int=-200)
doc1.save()
doc2.save()
self.assertEqual(Doc.count({'test_int': None}), 1)
self.assertEqual(Doc.count({'test_int': {'$ne': None}}), 1)
doc1 = Doc.find_one({'test_int': None})
doc2 = Doc.find_one({'test_int': {'$ne': None}})
self.assertEqual(doc1.test_int, None)
self.assertEqual(doc2.test_int, max_int_val)
self.assertRaises(iu_mongo.errors.ValidationError, doc3.save)
self.assertRaises(iu_mongo.errors.ValidationError, doc4.save)
doc5 = Doc(test_int='-123')
doc5.save()
self.assertEqual(Doc.count({'test_int': '-123'}), 0)
doc5 = Doc.find_one({'test_int': -123})
self.assertEqual(doc5.test_int, -123)
# 32-bit signed type
self.assertEqual(Doc.count({'test_int': {'$type': 'int'}}), 1)
# 64-bit signed type
self.assertEqual(Doc.count({'test_int': {'$type': 'long'}}), 1)
Doc.remove({})
def test_string_field(self):
class Doc(Document):
meta = {
'db_name': 'test'
}
test_str = StringField()
Doc.remove({})
doc1 = Doc(test_str=None)
doc2 = Doc(test_str='')
doc3 = Doc(test_str='abcdefghij')
doc4 = Doc(test_str='我')
doc1.save()
doc2.save()
doc3.save()
doc4.save()
self.assertEqual(Doc.count({'test_str': None}), 1)
self.assertEqual(Doc.count({'test_str': {'$ne': None}}), 3)
self.assertEqual(Doc.count({'test_str': ''}), 1)
doc4.reload()
doc3.reload()
self.assertIsInstance(doc3.test_str, str)
self.assertIsInstance(doc4.test_str, str)
self.assertEqual(Doc.count({'test_str': {'$type': 'string'}}), 3)
Doc.remove({})
| 4,277 | 1,441 |
# -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chrome OS Image file signing."""
from __future__ import print_function
import collections
from chromite.lib import commandline
from chromite.lib import cros_logging as logging
from chromite.signing.image_signing import imagefile
class Error(Exception):
"""Base exception for all exceptions in this module."""
# For a given image_type argument, what are the arguments that we
# need to pass to imagefile.SignImage()? Several of the image types
# take very similar parameters. See the dictionary immediately following.
SignArgs = collections.namedtuple(
'SignArgs', ('image_type', 'kernel_id', 'keyA_prefix'))
# Dictionary to convert the user's given image_type to the correct arguments for
# image_type, kernel_id, and keyA_prefix to imagefile.Signimage.
SignImageArgs = {
'ssd': SignArgs('SSD', 2, ''),
'base': SignArgs('SSD', 2, ''),
'usb': SignArgs('USB', 2, ''),
'recovery': SignArgs('recovery', 4, 'recovery_'),
'factory': SignArgs('factory_install', 2, 'installer_'),
'install': SignArgs('factory_install', 2, 'installer_'),
}
# All of the valid |image_type| values from the user. Many of these are not
# implemented yet. Since they will be, they get their own exit code.
ValidImageTypes = (
'ssd', 'base', 'usb', 'recovery', 'factory', 'install', 'firmware',
'kernel', 'recovery_kernel', 'update_payload',
'accessory_usbpd', 'accessory_rwsig',
)
def main(argv):
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument('-t', '--type', required=True, dest='image_type',
choices=list(ValidImageTypes), help='Type of image')
parser.add_argument('-i', '--input', required=True, dest='input_image',
type='path', help='Path to input image file')
parser.add_argument('-k', '--keyset-dir', required=True, dest='key_dir',
help='Path to keyset directory')
parser.add_argument('-o', '--output', required=True, dest='output_image',
help='Path to output image file')
options = parser.parse_args(argv)
options.Freeze()
# See what kind of image this is.
call_args = SignImageArgs.get(options.image_type, None)
if call_args:
imagefile.SignImage(
call_args.image_type, options.input_image, options.output_image,
call_args.kernel_id, options.key_dir, call_args.keyA_prefix)
return 0
# TODO(lamontjones): implement signing for the other supported image types:
# firmware, kernel, recovery_kernel, update_payload,
# accessory_usbpd, accessory_rwsig.
logging.error('Unimplemented --type %s', options.image_type)
return 2
| 2,821 | 879 |
import requests
import json
from decimal import Decimal
from bitsv.network import currency_to_satoshi
from bitsv.network.meta import Unspent
# left here as a reminder to normalize get_transaction()
from bitsv.network.transaction import Transaction, TxInput, TxOutput
from bitsv.constants import BSV
DEFAULT_TIMEOUT = 30
BSV_TO_SAT_MULTIPLIER = BSV
class BCHSVExplorerAPI:
"""
Simple bitcoin SV REST API --> uses base58 address format (addresses start with "1")
- get_address_info
- get_balance
- get_transactions
- get_transaction
- get_unspent
- broadcast_tx
"""
MAIN_ENDPOINT = 'https://bchsvexplorer.com/'
MAIN_ADDRESS_API = MAIN_ENDPOINT + 'api/addr/{}'
MAIN_BALANCE_API = MAIN_ADDRESS_API + '/balance'
MAIN_UNSPENT_API = MAIN_ADDRESS_API + '/utxo'
MAIN_TX_PUSH_API = MAIN_ENDPOINT + 'api/tx/send/'
MAIN_TX_API = MAIN_ENDPOINT + 'api/tx/{}'
MAIN_TX_AMOUNT_API = MAIN_TX_API
TX_PUSH_PARAM = 'create_rawtx'
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
@classmethod
def get_address_info(cls, address):
r = requests.get(cls.MAIN_ADDRESS_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()
@classmethod
def get_balance(cls, address):
r = requests.get(cls.MAIN_BALANCE_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()
@classmethod
def get_transactions(cls, address):
r = requests.get(cls.MAIN_ADDRESS_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()['transactions']
@classmethod
def get_transaction(cls, txid):
r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
response = r.json(parse_float=Decimal)
tx = Transaction(response['txid'],
(Decimal(response['valueIn']) * BSV_TO_SAT_MULTIPLIER).normalize(),
(Decimal(response['valueOut']) * BSV_TO_SAT_MULTIPLIER).normalize())
for txin in response['vin']:
part = TxInput(txin['addr'], txin['valueSat'])
tx.add_input(part)
for txout in response['vout']:
addr = None
if 'addresses' in txout['scriptPubKey'] and \
txout['scriptPubKey']['addresses'] is not None:
addr = txout['scriptPubKey']['addresses'][0]
part = TxOutput(addr,
(Decimal(txout['value']) * BSV_TO_SAT_MULTIPLIER).normalize(),
txout['scriptPubKey']['asm'])
tx.add_output(part)
return tx
@classmethod
def raw_get_transaction(cls, txid):
"""un-altered return value from API - useful for debugging"""
r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return r.json()
@classmethod
def get_unspents(cls, address):
r = requests.get(cls.MAIN_UNSPENT_API.format(address), timeout=DEFAULT_TIMEOUT)
r.raise_for_status() # pragma: no cover
return [
Unspent(currency_to_satoshi(tx['amount'], 'bsv'),
tx['confirmations'],
tx['scriptPubKey'],
tx['txid'],
tx['vout'])
for tx in r.json()
]
@classmethod
def send_transaction(cls, rawtx): # pragma: no cover
r = requests.post(
'https://bchsvexplorer.com/api/tx/send',
data=json.dumps({'rawtx': rawtx}),
headers=cls.headers,
)
r.raise_for_status()
return r.json()['txid']
| 3,874 | 1,275 |
from rest_framework.response import Response
def create_response(data=None, error=None, status=None):
if 200 <= status < 400:
success = True
else:
success = False
response = {
"data": data,
"error": error,
"success": success
}
return Response(data=response, status=status)
| 336 | 101 |
import pandas as pd
import requests
from pandas_datareader.base import _BaseReader
class EcondbReader(_BaseReader):
"""Get data for the given name from Econdb."""
_URL = "https://www.econdb.com/api/series/"
_format = None
_show = "labels"
@property
def url(self):
"""API URL"""
if not isinstance(self.symbols, str):
raise ValueError("data name must be string")
return "{0}?{1}&format=json&page_size=500&expand=both".format(
self._URL, self.symbols
)
def read(self):
""" read one data from specified URL """
results = requests.get(self.url).json()["results"]
df = pd.DataFrame({"dates": []}).set_index("dates")
if self._show == "labels":
def show_func(x):
return x.split(":")[1]
elif self._show == "codes":
def show_func(x):
return x.split(":")[0]
for entry in results:
series = pd.DataFrame(entry["data"])[["dates", "values"]].set_index("dates")
head = entry["additional_metadata"]
if head != "": # this additional metadata is not blank
series.columns = pd.MultiIndex.from_tuples(
[[show_func(x) for x in head.values()]],
names=[show_func(x) for x in head.keys()],
)
if not df.empty:
df = df.join(series, how="outer")
else:
df = series
if df.shape[0] > 0:
df.index = pd.to_datetime(df.index, errors="ignore")
df.index.name = "TIME_PERIOD"
df = df.truncate(self.start, self.end)
return df
| 1,699 | 510 |
# coding:utf-8
import numpy as np
import torch
import math
class IOUMetric(object):
"""
Class to calculate mean-iou using fast_hist method
"""
def __init__(self, num_classes):
self.num_classes = num_classes
self.hist = np.zeros((num_classes, num_classes))
def _fast_hist(self, label_pred, label_true):
mask = (label_true >= 0) & (label_true < self.num_classes)
hist = np.bincount(
self.num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
return hist
def add_batch(self, predictions, gts):
for lp, lt in zip(predictions, gts):
self.hist += self._fast_hist(lp.flatten(), lt.flatten())
def evaluate(self):
acc = np.diag(self.hist).sum() / self.hist.sum()
acc_cls = np.diag(self.hist) / self.hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
mean_iu = np.nanmean(iu)
freq = self.hist.sum(axis=1) / self.hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, iu, mean_iu, fwavacc
def soft_thresholding(x, lm):
ze_ = torch.zeros(size=x.size(), device=x.device)
return torch.sign(x) * torch.maximum(torch.abs(x) - lm, ze_)
@torch.no_grad()
def fast_ista(b, A, lmbda, max_iter):
"""
This is the fast Iterative Shrinkage-Thresholding Algorithm to solve the following objective:
min: {L2_norm(Ax - b) + L1_norm(x)}
:param b: input data with shape: [n_samples, n_features]
:param A: a pre-learned Dictionary, with shape: [n_coeffs, n_features]
:param lmbda: sparsity term to control the importance of the L1 term
:param max_iter:
:return: sparse codes with shape: [n_samples, n_coeffs]
"""
n_coeffs, n_feats = A.size()
n_samples = b.size()[0]
x = torch.zeros(size=(n_samples, n_coeffs), device=b.device)
t = 1.
z = torch.zeros(size=(n_samples, n_coeffs), device=b.device)
L = torch.linalg.norm(A, ord=2) ** 2 # Lipschitz constant, 2-norm (largest sing. value)
for k in range(max_iter):
x_old = x.clone()
z = z + torch.matmul(b - torch.matmul(z, A), A.T) / L
x = soft_thresholding(z, lmbda / L)
t0 = t
t = (1. + math.sqrt(1. + 4. * t ** 2)) / 2.
z = x + ((t0 - 1.) / t) * (x - x_old)
return x
| 2,500 | 991 |
# flake8: noqa: F401
from .core import db
from .core import db_cli
from .core import execute_query
from .models.incidents import Incidents, IncidentSchema
from .models.users import Users
| 188 | 59 |
"""Views to receive inbound notifications from Mattermost, parse them, and enqueue worker actions."""
import json
import logging
import shlex
from django.conf import settings
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from nautobot_chatops.workers import get_commands_registry, commands_help, parse_command_string
from nautobot_chatops.dispatchers.mattermost import MattermostDispatcher, Driver
from nautobot_chatops.utils import check_and_enqueue_command
from nautobot_chatops.metrics import signature_error_cntr
from nautobot_chatops.models import CommandToken
from nautobot_chatops.choices import CommandTokenPlatformChoices
# pylint: disable=logging-fstring-interpolation
logger = logging.getLogger(__name__)
def verify_signature(request):
"""Verify that a given request was legitimately signed by Mattermost.
https://developers.mattermost.com/integrate/slash-commands/
Returns:
tuple: (valid, reason)
"""
if request.headers.get("Authorization"):
expected_signature = request.headers.get("Authorization")
else:
# For some reason Integration Messages from Mattermost do not show up in POST.items()
# in these cases, we have to load the request.body
try:
data = json.loads(request.body)
except ValueError as err:
logger.info("No request body to decode, setting data to empty dict. Error: %s", err)
data = {}
if request.POST.items():
data.update(request.POST)
# For Interactive Messages, the token will be passed in the context.
if data.get("context"):
action = data.get("context")
expected_signature = action.get("token")
# For Interactive Dialogs, the token will be passed in the state.
elif data.get("state"):
expected_signature = data.get("state")
else:
signature_error_cntr.labels("mattermost", "missing_signature").inc()
return False, "Missing Command Token in Body or Header"
if not expected_signature:
signature_error_cntr.labels("mattermost", "missing_signature").inc()
return False, "Missing Command Token"
command_tokens = CommandToken.objects.filter(platform=CommandTokenPlatformChoices.MATTERMOST)
if not command_tokens.filter(token=expected_signature.split("Token ")[1]):
signature_error_cntr.labels("mattermost", "incorrect_signature").inc()
return False, "Incorrect signature"
return True, "Signature is valid"
@method_decorator(csrf_exempt, name="dispatch")
class MattermostSlashCommandView(View):
"""Handle notifications from a Mattermost /command."""
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
"""Handle an inbound HTTP POST request representing a user-issued /command."""
valid, reason = verify_signature(request)
if not valid:
return HttpResponse(status=401, reason=reason)
command = request.POST.get("command")
if not command:
return HttpResponse("No command specified")
command = command.replace("/", "")
params = request.POST.get("text", "")
context = {
"request_scheme": request.scheme,
"request_host": request.get_host(),
"org_id": request.POST.get("team_id"),
"org_name": request.POST.get("team_domain"),
"channel_id": request.POST.get("channel_id"),
"channel_name": request.POST.get("channel_name"),
"user_id": request.POST.get("user_id"),
"user_name": request.POST.get("user_name"),
"response_url": request.POST.get("response_url"),
"trigger_id": request.POST.get("trigger_id"),
"integration_url": request.build_absolute_uri("/api/plugins/chatops/mattermost/interaction/"),
"token": request.headers.get("Authorization"),
}
try:
command, subcommand, params = parse_command_string(f"{command} {params}")
except ValueError as err:
logger.error("%s", err)
return HttpResponse(status=400, reason=f"'Error: {err}' encountered on '{command} {params}")
registry = get_commands_registry()
if command not in registry:
MattermostDispatcher(context).send_markdown(commands_help(prefix="/"))
return HttpResponse()
MattermostDispatcher(context).send_busy_indicator()
return check_and_enqueue_command(registry, command, subcommand, params, context, MattermostDispatcher)
@method_decorator(csrf_exempt, name="dispatch")
class MattermostInteractionView(View):
"""Handle notifications resulting from a Mattermost interactive block."""
http_method_names = ["post"]
# pylint: disable=too-many-locals,too-many-return-statements,too-many-branches,too-many-statements
def post(self, request, *args, **kwargs):
"""Handle an inbound HTTP POST request representing a user interaction with a UI element."""
valid, reason = verify_signature(request)
if not valid:
return HttpResponse(status=401, reason=reason)
# For some reason Integration Messages from Mattermost do not show up in POST.items()
# in these cases, we have to load the request.body
try:
data = json.loads(request.body)
except ValueError as err:
logger.info("No request body to decode, setting data to empty dict. Error: %s", err)
data = {}
if request.POST.dict():
data.update(request.POST)
context = {
"org_id": data.get("team_id"),
"org_name": data.get("team_domain"),
"channel_id": data.get("channel_id"),
"channel_name": data.get("channel_name"),
"user_id": data.get("user_id"),
"user_name": data.get("user_name"),
"response_url": data.get("response_url"),
"trigger_id": data.get("trigger_id"),
"post_id": data.get("post_id"),
"request_scheme": request.get_host(),
"request_host": request.get_host(),
"integration_url": request.build_absolute_uri("/api/plugins/chatops/mattermost/interaction/"),
}
# Check for channel_name if channel_id is present
mm_url = settings.PLUGINS_CONFIG["nautobot_chatops"]["mattermost_url"]
token = settings.PLUGINS_CONFIG["nautobot_chatops"]["mattermost_api_token"]
if context["channel_name"] is None and context["channel_id"] is not None:
# Build a Mattermost Client Object
mm_client = Driver(
{
"url": mm_url,
"token": token,
}
)
# Get the channel information from Mattermost API
channel_info = mm_client.get(f'/channels/{context["channel_id"]}')
# Assign the Channel name out of the conversations info end point
context["channel_name"] = channel_info["name"]
if context["user_name"] is None and context["user_id"] is not None:
# Build a Mattermost Client Object
mm_client = Driver(
{
"url": mm_url,
"token": token,
}
)
# Get the channel information from Mattermost API
user_info = mm_client.get(f'/users/{context["user_id"]}')
# Assign the Channel name out of the conversations info end point
context["user_name"] = user_info["username"]
# Block action triggered by a non-modal interactive component
if data.get("context"):
action = data.get("context")
action_id = action.get("action_id", "")
context["token"] = action.get("token", "")
if action["type"] == "static_select":
value = action.get("selected_option", "")
elif action["type"] == "button":
value = action.get("value")
else:
logger.error(f"Unhandled action type {action['type']} in Mattermost Dispatcher")
return HttpResponse(status=500)
selected_value = f"'{value}'"
elif data.get("submission"):
# View submission triggered from a modal dialog
logger.info("Submission triggered from a modal dialog")
values = data.get("submission")
context["token"] = data.get("state")
callback_id = data.get("callback_id")
logger.debug(json.dumps(data, indent=2))
# Handling for multiple fields. This will be used when the multi_input_dialog() method of the Mattermost
# Dispatcher class is utilized.
if len(values) > 1:
selected_value = ""
# sometimes in the case of back-to-back dialogs there will be
# parameters included in the callback_id. Below parses those
# out and adds them to selected_value.
try:
cmds = shlex.split(callback_id)
except ValueError as err:
logger.error("Mattermost: %s", err)
return HttpResponse(status=400, reason=f"Error: {err} encountered when processing {callback_id}")
for i, cmd in enumerate(cmds):
if i == 2:
selected_value += f"'{cmd}'"
elif i > 2:
selected_value += f" '{cmd}'"
action_id = f"{cmds[0]} {cmds[1]}"
sorted_params = sorted(values.keys())
for blk_id in sorted_params:
selected_value += f" '{values[blk_id]}'"
# Original un-modified single-field handling below
else:
action_id = sorted(values.keys())[0]
selected_value = values[action_id]
else:
return HttpResponse(status=500, reason="I didn't understand that notification.")
if settings.PLUGINS_CONFIG["nautobot_chatops"].get("delete_input_on_submission"):
# Delete the interactive element since it's served its purpose
# Does not work for Ephemeral Posts.
if context["post_id"] is not None:
MattermostDispatcher(context).delete_message(context["post_id"])
if action_id == "action" and selected_value == "cancel":
# Nothing more to do
return HttpResponse()
logger.info(f"action_id: {action_id}, selected_value: {selected_value}")
try:
command, subcommand, params = parse_command_string(f"{action_id} {selected_value}")
except ValueError as err:
logger.error("%s", err)
return HttpResponse(
status=400, reason=f"Error: {err} encountered on command '{action_id} {selected_value}'"
)
logger.info(f"command: {command}, subcommand: {subcommand}, params: {params}")
registry = get_commands_registry()
if command not in registry:
MattermostDispatcher(context).send_markdown(commands_help())
return HttpResponse()
MattermostDispatcher(context).send_busy_indicator()
return check_and_enqueue_command(registry, command, subcommand, params, context, MattermostDispatcher)
| 11,524 | 3,114 |
from twisted.spread import pb
from twisted.internet import reactor
from tserver import User, CopyUser
import sys, signal
player_count = 0
loop = 0
client = None
game_creator = False
class Client(pb.Referenceable):
def __init__(self, name):
self.name = name
self.players = []
def connected(self, remote_ref):
"""Connected, send information and a remote reference"""
self.server = remote_ref
self.server.callRemote("join", self, self.name)
# TODO: game creator starts game here
if game_creator:
# generate map
self.server.callRemote("start_game", self, map)
main_loop()
def remote_send_players(self, data):
self.players = data
def remote_send_chat_msg(self, msg):
print msg
def remote_server_disconnect(self, msg):
"""Server forced disconnect"""
print "Disconnected:", msg
# Go back to menu
reactor.callLater(0, reactor.stop)
def handle_connect_error(self, error):
print error.value # TODO dialog for this
# callLater workaround for regression in twisted 8.0.1
reactor.callLater(0, reactor.stop())
class RemoteUser(User, pb.RemoteCopy):
pass
pb.setUnjellyableForClass(CopyUser, RemoteUser)
def main_loop():
global player_count, loop
cur_players = len(client.players)
if cur_players > player_count:
print "Got new player data:", cur_players, "players"
print "Names ",
for i in client.players:
print i.name,
print ''
sys.stdout.flush()
player_count = cur_players
#msg = input("Chat: ")
if loop == 0:
msg = 'Hello!'
client.server.callRemote("get_chat_msg", client, msg)
loop += 1
reactor.callLater(0, main_loop)
def handle_signal(signal, frame):
def disconnect(ref=None):
reactor.callLater(0, reactor.stop)
# We haven't made a connection yet
if not 'server' in client.__dict__:
disconnect()
try:
d = client.server.callRemote("leave", client)
d.addCallback(disconnect)
except pb.DeadReferenceError:
disconnect()
def connect(name, host, port):
global client
client = Client(name)
factory = pb.PBClientFactory()
reactor.connectTCP(host, port, factory)
d = factory.getRootObject()
d.addCallback(client.connected)
d.addErrback(client.handle_connect_error)
reactor.run()
signal.signal(signal.SIGINT, handle_signal)
if __name__ == '__main__':
connect('markus', 'localhost', 10101) # TODO remove
| 2,592 | 821 |
"""
MIT License
Copyright (c) 2021 NextChai
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import annotations
from .errors import IncorrectType
__all__ = (
'Post',
)
class Post:
"""A custom class to organise different posting types."""
__slots__ = ('_is_auto', '_intervals', 'interval')
def __init__(self, post_type: str, interval: int = 1800):
self._is_auto = False
self._intervals = False
if post_type not in {'auto', 'intervals'}:
raise IncorrectType(f'type {post_type} can not be used with class Post.')
if post_type == "auto":
self.is_auto = True
elif post_type == "intervals":
self.interval = interval
@property
def is_auto(self) -> bool:
"""":class:`bool`: Denotes if the Post is automatic."""
return self._is_auto
@is_auto.setter
def is_auto(self, new: bool) -> None:
self._is_auto = new
@property
def uses_intervals(self) -> bool:
"""":class:`bool`: Denotes if the Post uses intervals."""
return not self.is_auto
@classmethod
def auto(cls):
return cls('auto')
@classmethod
def intervals(cls, seconds: int = 0, minutes: int = 0, hours: int = 0):
interval = seconds + minutes*60 + hours*3600
return cls("intervals", interval=interval)
| 2,401 | 753 |
COLOR_SCHEMES = [
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
]
| 218 | 83 |
import os
import webbrowser
import numpy as np
import csv
import traceback
import arabic_reshaper
from tkinter import *
from tkinter import messagebox
from tkinter.filedialog import askopenfilename
from PIL import ImageTk, Image
from run_model import create_and_run_model
def make_menu(w):
global the_menu
the_menu = Menu(w, tearoff=0)
the_menu.add_command(label="Cut")
the_menu.add_command(label="Copy")
the_menu.add_command(label="Paste")
def show_menu(e):
w = e.widget
the_menu.entryconfigure("Cut", command=lambda: w.event_generate("<<Cut>>"))
the_menu.entryconfigure("Copy", command=lambda: w.event_generate("<<Copy>>"))
the_menu.entryconfigure("Paste", command=lambda: w.event_generate("<<Paste>>"))
the_menu.tk.call("tk_popup", the_menu, e.x_root, e.y_root)
def main_window():
window = Tk()
make_menu(window)
window.title("Urdu Handwriting Recognition System")
window.geometry('1000x1000')
title = Label(window, text="Urdu Handwriting Recognition System", font=("Arial Bold", 30))
title.grid(column=1, row=0, columnspan=10)
window.grid_rowconfigure(0, minsize=100)
window.grid_rowconfigure(1, minsize=70)
window.grid_columnconfigure(0, weight=1)
window.grid_columnconfigure(11, weight=1)
window.grid_columnconfigure(11, weight=1)
col_path = 3
row_path = 2
display_path = Label(window, text="Enter Image Path: ")
display_path.grid(column=col_path, row=row_path)
window.grid_rowconfigure(row_path+1, minsize=50)
window.grid_rowconfigure(row_path+2, minsize=100)
display_image = Label(window, image='')
display_image.grid(column=col_path-2, row=row_path+2, columnspan=10)
display_raw_output = Label(window, text='', font=("Arial Bold", 15))
display_raw_output.grid(column= col_path-2, row=row_path+3, columnspan=10)
window.grid_rowconfigure(row_path+3, minsize=60)
#display_output = Label(window, text='', font=("Arial Bold", 15))
display_output = Entry(window, width=40, justify='right')
display_output.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_menu)
display_output.grid(column= col_path-2, row=row_path+4, columnspan=10)
get_image_path = Entry(window,width=40)
get_image_path.bind_class("Entry", "<Button-3><ButtonRelease-3>", show_menu)
get_image_path.grid(column=col_path+1, row=row_path)
get_image_path.focus()
def select():
image_path = askopenfilename()
get_image_path.delete(0, END)
get_image_path.insert(0, image_path)
img = ImageTk.PhotoImage(Image.open(image_path))
display_image.configure(image = img)
display_image.image = img
display_raw_output.configure(text = '')
#display_output.configure(text = '')
display_output.delete(0, END)
def clicked():
image_path = get_image_path.get()
if image_path is '':
messagebox.showinfo("Error", "Select an image")
elif os.path.isfile(image_path) == False:
messagebox.showinfo("Error", "File does not exist")
else:
img = ImageTk.PhotoImage(Image.open(image_path))
display_image.configure(image = img)
display_image.image = img
output = create_and_run_model('CONV_BLSTM_CTC', None, image_path)
raw_output, join_char = get_urdu_output(output)
with open("output.txt", "w") as text_file:
text_file.write("%s" % join_char)
webbrowser.open("output.txt")
#with open("output.txt", "r") as text_file:
# join_char = text_file.read().replace('\n', '')
display_raw_output.configure(text = raw_output)
#display_output.configure(text = join_char)
display_output.delete(0, END)
display_output.insert(0, join_char)
#with open("output.csv", mode='w') as f:
# f_w = csv.writer(f, delimiter=',')
# f_w.writerow(join_char)
browse = Button(window, text="Browse", command=select)
browse.grid(column=col_path+2, row=row_path)
recognize = Button(window, text="Recognize", command=clicked)
recognize.grid(column=col_path+3, row=row_path)
window.mainloop()
def get_urdu_output(output):
lt_file = 'data/segmented_cc/labels/lt_char.csv'
lt = {}
with open(lt_file, 'r', encoding='utf8') as file:
text = csv.reader(file)
for row in text:
lt[int(row[1])] = row[0]
urdu_output = [lt[output[i]] for i in range(len(output)-1, -1, -1)]
join_char = ''
for i in range(len(urdu_output)-1, -1, -1):
#for i in range(0, len(urdu_output)):
join_char += urdu_output[i][0]
if urdu_output[i][2:] == 'final' or urdu_output[i][2:] == 'isolated':
join_char += ' '
#join_char = arabic_reshaper.reshape(join_char)
return urdu_output, join_char
if __name__ == "__main__":
main_window()
| 5,007 | 1,741 |
import logging
import os
from crispy.lib.myparser import CrispyArgumentParser
from rpyc.utils.classic import download
from crispy.lib.module import *
from crispy.lib.fprint import *
logger = logging.getLogger(__name__)
__class_name__ = "DownloadModule"
class DownloadModule(CrispyModule):
""" Download file from remote machine. """
compatible_systems = ['all']
def check_args(self, args):
self.parser = CrispyArgumentParser(prog="download", description=self.__doc__)
self.parser.add_argument("remote_file", metavar="<remote_path>", type=str)
self.parser.add_argument("local_file", metavar="<local_path>", type=str)
return self.parser.parse_args(args)
def run(self, args):
logger.debug("run(args) was called.")
if os.path.isfile(args.local_file):
warning("\"{}\" already exists locally.".format(args.local_file))
else:
logger.info("Attempting to download file...")
info("Attempting to download file...")
try:
download(self.client.conn, args.remote_file, args.local_file)
logger.info("File transfer complete.")
success("File transfer complete.")
except KeyboardInterrupt:
logger.info("Caught Ctrl-C")
except ValueError as e:
error("Cannot download file")
| 1,394 | 393 |
"""
Copyright (c) 2014-2015-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from ..helpers import (get_vector_dimensions, get_square_matrix_dimension, get_cblas_info,
check_equal_sizes, convert_uplo, convert_trans, convert_diag, ROW_MAJOR)
from ctypes import c_int, POINTER
def trmv(A, x, uplo='u', trans_a='n', diag='n', lda=None, inc_x=1):
"""
Perform a triangular matrix-vector multiplication operation.
x := A * x
where alpha is a scalar, A is a triangular matrix, and x is a general column vector.
The 'uplo' argument indicates whether the lower or upper triangle of A is to be referenced and
updated by the operation. The 'trans_a' argument allows the computation to proceed as if A is
transposed. The 'diag' argument indicates whether the diagonal of A is unit or non-unit.
Vector x can be passed in as either row or column vector. If necessary, an implicit
transposition occurs.
Args:
x: 2D NumPy matrix or ndarray representing vector x
A: 2D NumPy matrix or ndarray representing matrix A
--optional arguments--
uplo: 'u' if the upper triangle of A is to be used
'l' if the lower triangle A is to be used
< default is 'u' >
trans_a: 'n' if the operation is to proceed normally
't' if the operation is to proceed as if A is transposed
< default is 'n' >
diag: 'n' if the diagonal of A is non-unit
'u' if the diagonal of A is unit
< default is 'n' >
lda: leading dimension of A (must be >= # of cols in A)
< default is the number of columns in A >
inc_x: stride of x (increment for the elements of x)
< default is 1 >
Returns:
Vector x (which is also overwritten)
Raises:
ValueError: if any of the following conditions occur:
- A or x is not a 2D NumPy ndarray or NumPy matrix
- A and x do not have the same dtype or that dtype is not supported
- A is not a square matrix
- x is not a vector
- the effective length of x does not equal the dimension of A
- uplo is not equal to one of the following: 'u', 'U', 'l', 'L'
- trans_a is not equal to one of the following: 'n', 'N', 't', 'T'
- diag is not equal to one fo the following: 'n', 'N', 'u', 'U'
"""
# get the dimensions of the parameters
m_x, n_x, x_length = get_vector_dimensions('x', x, inc_x)
dim_A = get_square_matrix_dimension('A', A)
# assign a default value to lda if necessary (assumes row-major order)
if lda is None:
lda = dim_A
# ensure the parameters are appropriate for the operation
check_equal_sizes('A', dim_A, 'x', x_length)
# convert to appropriate CBLAS values
cblas_uplo = convert_uplo(uplo)
cblas_trans_a = convert_trans(trans_a)
cblas_diag = convert_diag(diag)
# determine which CBLAS subroutine to call and which ctypes data type to use
cblas_func, data_type = get_cblas_info('trmv', (A.dtype, x.dtype))
# create a ctypes POINTER for each vector and matrix
ctype_x = POINTER(data_type * n_x * m_x)
ctype_A = POINTER(data_type * dim_A * dim_A)
# call CBLAS using ctypes
cblas_func.argtypes = [c_int, c_int, c_int, c_int, c_int, ctype_A, c_int, ctype_x, c_int]
cblas_func.restype = None
cblas_func(ROW_MAJOR, cblas_uplo, cblas_trans_a, cblas_diag, dim_A,
A.ctypes.data_as(ctype_A), lda, x.ctypes.data_as(ctype_x), inc_x)
return x # x is also overwritten | 4,042 | 1,277 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from utils.torch_utils import *
class Content_Encoder(nn.Module):
def __init__(self, conv_dim=64, repeat_num=4, norm='in', activation='relu'):
super(Content_Encoder, self).__init__()
layers = []
layers += [ConvBlock(3, conv_dim, 7, 1, 3, norm=norm, activation=activation)] # H,W,3 => H,W,64
# Down-sampling layers
curr_dim = conv_dim
for i in range(2):
layers += [ConvBlock(curr_dim, curr_dim*2, 4, 2, 1, norm=norm, activation=activation)] # H,W,64 => H/2,W/2,128 => H/4,W/4,256
curr_dim = curr_dim * 2
# Bottleneck layers
for i in range(repeat_num):
layers += [ResidualBlock(dim=curr_dim, norm=norm, activation=activation)]
self.main = nn.Sequential(*layers)
self.curr_dim = curr_dim
def forward(self, x):
return self.main(x)
class Style_Encoder(nn.Module):
def __init__(self, conv_dim=64, n_group=32, norm='ln', activation='relu'):
super(Style_Encoder, self).__init__()
curr_dim = conv_dim
layers = []
layers += [ConvBlock(3, conv_dim, 7, 1, 3, norm='none', n_group=n_group, activation=activation)] # H,W,3 => H,W,64
# Down-sampling layers (dim*2)
curr_dim = conv_dim
for i in range(2):
layers += [ConvBlock(curr_dim, curr_dim*2, 4, 2, 1, norm=norm, n_group=n_group, activation=activation)] # H,W,64 => H/2,W/2,128 => H/4,W/4,256
curr_dim = curr_dim * 2
# Down-sampling layers (keep dim)
for i in range(2): # original: 2
layers += [ConvBlock(curr_dim, curr_dim, 4, 2, 1, norm=norm, n_group=n_group, activation=activation)] # H/4,W/4,256, H/8,W/8,256, H/16,W/16,256
layers += [nn.AdaptiveAvgPool2d(1)] # H/16,W/16,256 => 1,1,256
self.main = nn.Sequential(*layers)
self.curr_dim = curr_dim
def forward(self, x):
return self.main(x)
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, dim, num_block=1, norm='none', n_group=32, activation='relu'):
super(MLP, self).__init__()
layers = []
curr_dim = dim
layers += [LinearBlock(input_dim, curr_dim, norm=norm, n_group=n_group, activation=activation)]
for _ in range(num_block):
layers += [LinearBlock(curr_dim, curr_dim, norm=norm, n_group=n_group, activation=activation)]
layers += [LinearBlock(curr_dim, output_dim, norm='none', activation='none')] # no output activations
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x.view(x.size(0), -1))
class WCT(nn.Module):
def __init__(self, n_group, device, input_dim, mlp_dim, bias_dim, mask, w_alpha=0.4):
super(WCT, self).__init__()
self.G = n_group
self.device = device
self.alpha = nn.Parameter(torch.ones(1)-w_alpha)
self.mask = mask
self.sqrt_root = MPA_Lya.apply
self.sqrt_root_inv = MPA_Lya_Inv.apply
def forward(self, c_A, s_B):
return self.wct(c_A, s_B)
def wct(self, c_A, s_B):
B,C,H,W = c_A.size()
n_mem = C // self.G # 32 if G==8
eps = 1e-5
#Whitening Transform
c_A_ = c_A.view(self.G*B, n_mem, -1) # B,C,H,W => C,B,H,W => GB,C//G,HW
c_A_mean = torch.mean(c_A_, dim=2, keepdim=True)
c_A_ = c_A_ - c_A_mean # GB,C//G,HW
cov_c = torch.bmm(c_A_, c_A_.transpose(1,2)).div(H * W - 1) + eps*torch.eye(n_mem).unsqueeze(0).to(self.device) # GB,C//G,C//G
cov_c_inv_sqrt=self.sqrt_root_inv(cov_c)
whitened = cov_c_inv_sqrt.bmm(c_A_)
#Coloring Transform
s_B_ = s_B.view(self.G*B, n_mem, -1) # B,C,H,W => C,B,H,W => GB,C//G,HW
s_B_mean = torch.mean(s_B_, dim=2, keepdim=True)
s_B_ = s_B_ - s_B_mean # GB,C//G,HW
cov_b = torch.bmm(s_B_, s_B_.transpose(1,2)).div(H * W - 1) + eps * torch.eye(n_mem).unsqueeze(0).to(self.device) # GB,C//G,C//G
cov_b_sqrt=self.sqrt_root(cov_b)
colored_B = cov_b_sqrt.bmm(whitened).view(B, C, H, W)
return self.alpha * (colored_B + s_B_mean.view(B,C,1,1)) + (1 - self.alpha) * c_A
class Decoder(nn.Module):
def __init__(self, input_dim, mask, n_group, bias_dim, mlp_dim, repeat_num=4,
norm='ln', device=None):
super(Decoder, self).__init__()
curr_dim = input_dim
# Bottleneck layers
self.resblocks = nn.ModuleList([ResidualBlock(dim=curr_dim, norm='none', n_group=n_group) for i in range(repeat_num)])
self.gdwct_modules = nn.ModuleList([WCT(n_group, device, input_dim, mlp_dim, bias_dim, mask) for i in range(repeat_num+1)])
# Up-sampling layers
layers = []
for i in range(2):
layers += [Upsample(scale_factor=2, mode='nearest')]
layers += [ConvBlock(curr_dim, curr_dim//2, 5, 1, 2, norm=norm, n_group=n_group)]
curr_dim = curr_dim // 2
layers += [ConvBlock(curr_dim, 3, 7, 1, 3, norm='none', activation='tanh')]
self.main = nn.Sequential(*layers)
def forward(self, c_A, s_B):
# Multi-hops
for i, resblock in enumerate(self.resblocks):
if i == 0:
c_A = self.gdwct_modules[i](c_A, s_B)
c_A = resblock(c_A)
c_A = self.gdwct_modules[i+1](c_A, s_B)
return self.main(c_A)
class Generator(nn.Module):
"""Generator network."""
def __init__(self, conv_dim=64, repeat_num=8, mask=None, n_group=16,
mlp_dim=256, bias_dim=512, content_dim=256, device=None):
super(Generator, self).__init__()
self.c_encoder = Content_Encoder(conv_dim, repeat_num//2, norm='in',activation='relu')
self.s_encoder = Style_Encoder(conv_dim, n_group, norm= 'gn', activation='relu')
self.decoder = Decoder(content_dim, mask, n_group, bias_dim, mlp_dim, repeat_num//2, norm='ln', device=device)
def forward(self, c_A, s_B_):
return self.decoder(c_A, s_B_)
class ResidualBlock(nn.Module):
"""Residual Block with instance normalization."""
def __init__(self, dim, norm='in', n_group=32, activation='relu', use_affine=True):
super(ResidualBlock, self).__init__()
layers = []
layers += [ConvBlock(dim, dim, 3, 1, 1, norm=norm, n_group=n_group, activation=activation, use_affine=use_affine)]
layers += [ConvBlock(dim, dim, 3, 1, 1, norm=norm, n_group=n_group, activation='none', use_affine=use_affine)]
self.main = nn.Sequential(*layers)
def forward(self, x):
return x + self.main(x)
class ConvBlock(nn.Module):
def __init__(self, input_dim, output_dim, k, s, p, dilation=False, norm='in', n_group=32,
activation='relu', pad_type='mirror', use_affine=True, use_bias=True):
super(ConvBlock, self).__init__()
# Init Normalization
if norm == 'in':
self.norm = nn.InstanceNorm2d(output_dim, affine=use_affine, track_running_stats=True)
elif norm == 'ln':
# LayerNorm(output_dim, affine=use_affine)
self.norm = nn.GroupNorm(1, output_dim)
elif norm == 'bn':
self.norm = nn.BatchNorm2d(output_dim)
elif norm == 'gn':
self.norm = nn.GroupNorm(n_group, output_dim)
elif norm == 'none':
self.norm = None
# Init Activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.01, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU(num_parameters=1, init=0.25)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
# Init pad-type
if pad_type == 'mirror':
self.pad = nn.ReflectionPad2d(p)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(p)
# initialize convolution
if dilation:
self.conv = nn.Conv2d(input_dim, output_dim, k, s, dilation=p, bias=use_bias)
else:
self.conv = nn.Conv2d(input_dim, output_dim, k, s, bias=use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='ln', n_group=32, activation='relu', use_affine=True):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# Init Normalization
if norm == 'ln':
# self.norm = LayerNorm(output_dim, affine=use_affine)
self.norm = nn.GroupNorm(1, output_dim)
elif norm == 'gn':
self.norm = nn.GroupNorm(n_group, output_dim)
elif norm == 'none':
self.norm = None
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.01, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU(num_parameters=1, init=0.25)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
elif activation == 'none':
self.activation = None
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class Upsample(nn.Module):
def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=None):
super(Upsample, self).__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, input):
return F.interpolate(input, self.size, self.scale_factor, self.mode, self.align_corners)
def extra_repr(self):
if self.scale_factor is not None:
info = 'scale_factor=' + str(self.scale_factor)
else:
info = 'size=' + str(self.size)
info += ', mode=' + self.mode
return info
class Discriminator(nn.Module):
# Multi-scale discriminator architecture
def __init__(self, input_dim, params):
super(Discriminator, self).__init__()
self.n_layer = params['N_LAYER']
self.gan_type = params['GAN_TYPE']
self.dim = params['FIRST_DIM']
self.norm = params['NORM']
self.activ = params['ACTIVATION']
self.num_scales = params['NUM_SCALES']
self.pad_type = params['PAD_TYPE']
self.input_dim = input_dim
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
cnn_x = []
cnn_x += [ConvBlock(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.activ, pad_type=self.pad_type)]
for i in range(self.n_layer - 1):
cnn_x += [ConvBlock(dim, dim * 2, 4, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)]
dim *= 2
cnn_x += [nn.Conv2d(dim, 1, 1, 1, 0)]
cnn_x = nn.Sequential(*cnn_x)
return cnn_x
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
x = self.downsample(x)
return outputs
def calc_dis_loss(self, input_fake, input_real):
# calculate the loss to train D
outs0 = self.forward(input_fake)
outs1 = self.forward(input_real)
loss = 0
for it, (out0, out1) in enumerate(zip(outs0, outs1)):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 0)**2) + torch.mean((out1 - 1)**2)
elif self.gan_type == 'nsgan':
all0 = Variable(torch.zeros_like(out0.data).cuda(), requires_grad=False)
all1 = Variable(torch.ones_like(out1.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all0) +
F.binary_cross_entropy(F.sigmoid(out1), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
def calc_gen_loss(self, input_fake):
# calculate the loss to train G
outs0 = self.forward(input_fake)
loss = 0
for it, (out0) in enumerate(outs0):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 1)**2) # LSGAN
elif self.gan_type == 'nsgan':
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss | 13,822 | 5,222 |
def test_late_crimes_2():
assert late_crimes.loc[7, 'HOUR'] == 20 | 69 | 31 |
import json
import pythonosc
import argparse
import math
import datetime
from pythonosc import dispatcher, osc_server, udp_client, osc_message_builder
import requests
from collections import OrderedDict
from statistics import mean
## added variables to change the ip and port easily
## testing if Git works with ST
ip_osc = '192.168.1.255'
##ip_osc = '192.168.0.255'
ip_osc_server='0.0.0.0'
ip_osc_editor='196.168.1.255'
## ip_osc = '10.253.0.255'
port_server = 7007
port_client = 7007
port_client_editor = 7007
api_url = "http://frankenstein.hunterowens.net/"
## Some comments
current_state = OrderedDict()
current_state["/state"] = "calm"
current_state["/action"] = "start"
current_state["/sentiment"] = 0.0
current_state["/energy"] = 0.0
current_state["/focus"] = 0.0
def change_state(current_state, new_state):
"""
Change the current state dict to
reflect state param: new_state
return current_state
"""
current_state['/state'] = new_state
print("New State Set to {0}".format(current_state))
return current_state
def send_surface_state_to_ai(sentiment, energy, focus):
"""
sents the state / new talking as JSON to the AI
focus, energy, and sentiment are floats; unit is a string; words and parts are arrays of strings where the indexes correspond, so words[0] goes with parts[0]
"""
print("AI State is: {0} focus, {1} energy, and {2} sentiment".format(current_focus, current_energy, current_sentiment))
data = {
'focus': focus,
'sentiment': sentiment,
'energy': energy
}
r = requests.post(api_url + 'interact-surface', data = data)
return r
def send_answer_to_ai(answer):
"""
Sents an answer to the AI
"""
print("Answer sending ", answer)
headers = {
"content-type": "application/json"
}
r = requests.post(api_url + 'interact',
json={'string': answer},
headers=headers)
return r
def get_api_interact_data():
"""
Gets state from AI, transforms into sentiment.
Returns a string of JSON
"""
print("Getting Data from AI")
r = requests.get(api_url + 'interact')
if r.status_code == 200:
data = r.json()
else:
data = pickle.load(open('./default-api-response.p','rb'))
print("Using Default Data: {}".format(data))
current_state['/state'] = data['state']
current_state['/sentiment'] = data['sentiment']
current_state['/focus'] = data['focus']
current_state['/energy'] = data['energy']
print('state updated')
return data
def setup():
"""
sets AI in waiting state
"""
r = requests.get(api_url + "reset")
print("AI Init State Waiting")
current_state = get_api_interact_data()
##pull text from AI
return None
def osc_dispatch(addr, msg, ip=ip_osc, port=port_client):
"""
Dispatches a message in state change over OSC to all listeners
"""
client = udp_client.UDPClient(ip, port,1)
## SimpleOSCClientRedux(client)
## client._sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
print("Sent {0} with {1} to {2} at {3}".format(addr, msg, ip, port))
builder = osc_message_builder.OscMessageBuilder(address=addr)
builder.add_arg(msg)
client.send(builder.build())
## print(client(addr, msg))
return None
def broadcast_state(state=current_state, ip=ip_osc, port=port_client):
"""
Broadcasts state
"""
print("Called Broadcast State Function")
#client = udp_client.UDPClient(ip, port,1)
#builder = osc_message_builder.OscMessageBuilder(address='/status')
#for k,v in state.items():
# builder.add_arg(v)
#client.send(builder.build())
#print("sent {0} to {1}:{2}".format(builder.args, ip, port))
return None
def broadcast_text(AItext):
"""
send a fixed piece of text from the AI
add delay into this OSC as second args
get text somehow
"""
osc_dispatch('/textnoquest', AItext, port=port_client_editor)
print("Updating State")
broadcast_state()
return None
def send_questions_to_line_editor():
"""
Sends data for display to Line Editor
"""
data = get_api_interact_data()['questions']
print("Called send question to the line editor")
#client = udp_client.UDPClient(ip_osc_editor, port_client_editor,1)
#builder = osc_message_builder.OscMessageBuilder(address='/textques')
#for k,v in data.items():
# builder.add_arg(v)
#builder.add_arg(.75)
#print('builder ', builder.address)
#client.send(builder.build())
#osc_dispatch('/textquest', .75, ip=ip_osc_server, port=port_client_editor)
# print("sent {0} to {1}:{2}".format(builder.args, ip_osc_editor, port_client_editor))
ip=ip_osc
port=port_client_editor
client = udp_client.UDPClient(ip, port,1)
print("Send Data to Line Editor {}:{}", ip, port)
builder = osc_message_builder.OscMessageBuilder(address='/textques')
for k,v in data.items():
print(k,v)
builder.add_arg(v)
client.send(builder.build())
print("sent {0} to {1}:{2}".format(builder.args, ip, port))
broadcast_state()
return None
surface_data = []
def surface_handler(unused_addr, args):
"""
Handles the surface messages, alts sentiment
Surface argument to be OSC String Formatted as followed
"sentiment: value; focus: value; energy: value"
"""
print("Got Surface Message")
try:
vals = json.loads(args)
## surfaces need to be directed to pi, look in js/machineConfiguration.json
except ValueError:
print("Unable to decode JSON from Surface")
exit()
current_sentiment = vals['sentiment']
current_focus = vals['focus']
current_energy = vals['energy']
current_unit = vals['unit']
print("From Surface Unit {0}".format(current_unit))
current_words = vals['words']
current_parts = vals['parts']
surface_data.append(vals)
return None
def reset_handler(unused_addr, args):
"""
Handles the reset from Editor
"""
## TODO: Implement
print("reset handler")
setup()
surface_data = []
current_state.update({'/action': 'start'})
broadcast_state()
current_state.update({'/action': 'expectant'})
return None
def answer_handler(unused_addr, args):
"""
Starts answering
"""
print("send answer to ai")
send_answer_to_ai(args)
current_state.update({'/action': 'thinking'})
broadcast_state()
## Call line editor
send_questions_to_line_editor()
return None
def refresh_handler(unused_addr, args):
"""
Refresh text
"""
print("Refreshing text")
send_questions_to_line_editor()
return None
def talking_handler(unused_addr, args):
"""
Starts talking
"""
print("talking handler")
current_state.update({'/action': 'talking'})
broadcast_state()
send_questions_to_line_editor()
return None
def question_handler(unused_addr, args):
"""
shuts the machine up
"""
print('question handler')
current_state.update({'/action': 'question'})
broadcast_state()
return None
def thinking_handler(unsused_addr, args):
"""
shuts the machine up
"""
print('thinking handler')
current_state.update({'/action': 'thinking'})
broadcast_state()
return None
def silent_handler(unused_addr, args):
"""
silences the system after TTS
"""
print("silence handles")
current_state.update({'/action': 'expectant'})
broadcast_state()
return None
def surfacestart_handler(unused_addr, args):
"""
blasts start to the surfaces
"""
print("Blasting Start to the Surfaces")
osc_dispatch('/start-surface', 1)
def surfacereset_handler(unused_addr, args):
"""
blasts reset to surface
"""
print("Blasting Reset to the Surface")
osc_dispatch('/reset-surface', 1)
def surfaceclose_handler(unused_addr, args):
"""
blasts close to surface
"""
print("Blasting Close to the Surface")
osc_dispatch('/close-surface', 1)
sentiment = mean([d['sentiment'] for d in surface_data])
energy = mean([d['energy'] for d in surface_data])
focus = mean([d['focus'] for d in surface_data])
send_surface_state_to_ai(sentiment, energy, focus)
def end_handler(unused_addr, args):
"""
ends the show
"""
print("end of show")
current_state.update({'/action': 'end'})
broadcast_state()
return
print("some stupid stuff")
def osc_server(ip=ip_osc_server, port=port_server):
"""
sets up and runs the OSC server.
"""
dispatch = dispatcher.Dispatcher()
dispatch.map("/surface-sentiments", surface_handler)
dispatch.map("/reset", reset_handler)
dispatch.map("/silent", silent_handler)
dispatch.map("/answer", answer_handler)
dispatch.map("/refresh", refresh_handler)
dispatch.map("/talking", talking_handler)
dispatch.map("/end", end_handler)
dispatch.map("/question", question_handler)
dispatch.map("/thinking", thinking_handler)
dispatch.map("/startsurface", surfacestart_handler)
dispatch.map("/closesurface", surfaceclose_handler)
dispatch.map("/resetsurface", surfacereset_handler)
## TODO: Talk State - > triger from AI to get new words/questions etc from teh AI on the server and then broadcast
server = pythonosc.osc_server.ThreadingOSCUDPServer(
(ip, port), dispatch)
print("Serving on {}".format(server.server_address))
server.serve_forever()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default=ip_osc,
help="The ip of the OSC server")
parser.add_argument("--port", type=int, default=port_server,
help="The port the OSC server is listening on")
parser.add_argument('--server', action='store_true', default=False,
help="Run in server mode")
parser.add_argument('--text', action='store_true', default=False,
help="broadcast the text questions")
parser.add_argument('--silent', action='store_true', default=False, help="end talking cue")
parser.add_argument('--talking', action='store_true', default=False, help="get talking cue")
parser.add_argument('--answer', action='store_true', default=False, help="get answer")
parser.add_argument('--reset', action='store_true', default=False, help="start over")
parser.add_argument('--refresh', action='store_true', default=False, help="refresh questions")
parser.add_argument('--end', action='store_true', default=False, help="end experience")
parser.add_argument('--question', action='store_true', default=False, help='test question handler')
parser.add_argument('--thinking', action='store_true', default=False, help='test thinking handler')
parser.add_argument('--surface', action='store_true', default=False, help="send dummy surface data")
parser.add_argument( "--set-state", dest="new-state", default='guarded',
help="set teh new state", metavar="STATE")
parser.add_argument('--startsurface', action='store_true', default=False, help="test surface start")
parser.add_argument('--resetsurface', action='store_true', default=False, help="test surface reset")
parser.add_argument('--closesurface', action='store_true', default=False, help="test surface stop")
args = parser.parse_args()
print("Got argument: {}".format(args))
if args.server:
print("Sending Server")
osc_server()
elif args.text:
print("Sending Text")
broadcast_questions()
elif args.silent:
print("Sending OSC Test Message")
osc_dispatch('/silent', 1)
elif args.talking:
print("Sending Talking")
osc_dispatch('/talking', "answer")
elif args.answer:
print("Sending Answer") ## verified with web app
osc_dispatch('/answer', "answer")
elif args.reset:
print("Reseting") ## verified with web app
osc_dispatch('/reset', 1)
elif args.refresh:
print("Refreshing questions")
osc_dispatch('/refresh', 1)
elif args.end:
print("End experience")
osc_dispatch('/end', 1)
elif args.question:
print("Sending a question")
osc_dispatch('/question', 1)
elif args.thinking:
print("Setting thinking")
osc_dispatch('/thinking', 1)
elif args.startsurface:
print("Telling surfaces to turn on")
osc_dispatch('/startsurface', 1)
elif args.closesurface:
print("Telling surfaces to close")
osc_dispatch('/closesurface', 1)
elif args.resetsurface:
print("Telling surfaces to start over")
osc_dispatch('/resetsurface', 1)
elif args.surface:
print("Sending Surface Message")
## foo = json.loads('{"number": 1.0, "other": 4.3}')
osc_dispatch('/surface-sentiments', '{"sentiment": 0.15, "focus": 0.65, "energy": -0.3, "unit": "test", "words": ["inspired", "anxious", "understanding"], "parts": ["hand", "eye", "head"]}')
elif vars(args)['new-state']:
print('changing state')
change_state(current_state, vars(args)['new-state'])
| 13,355 | 4,237 |
import argparse
import importlib
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', help='Path that contains data in MOT format', required=True)
parser.add_argument('--output_path', help='Path that will contains the output', required=True)
parser.add_argument('--converter_name', help='Name of the converter to use', required=True)
args = parser.parse_args()
module = importlib.import_module("lit_tracking.converter.mot_to_coco")
mot2coco = getattr(module, args.converter_name)(input_path=args.input_path, output_path=args.output_path)
mot2coco.convert()
| 635 | 196 |
# Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestNeg(TestCase):
def cpu_op_exec(self, input1):
output = torch.neg(input1)
output = output.numpy()
return output
def npu_op_exec(self, input1):
output = torch.neg(input1)
output = output.to("cpu")
output = output.numpy()
return output
def npu_op_exec_out(self, input1, input2):
torch.neg(input1, out=input2)
output = input2.to("cpu")
output = output.numpy()
return output
def cpu_inp_op_exec(self, input1):
torch.neg_(input1)
output = input1.numpy()
return output
def npu_inp_op_exec(self, input1):
torch.neg_(input1)
output = input1.to("cpu")
output = output.numpy()
return output
def neg_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output = self.npu_op_exec(npu_input1)
cpu_output = cpu_output.astype(npu_output.dtype)
self.assertRtolEqual(cpu_output, npu_output)
cpu_input_inp, npu_input_inp = create_common_tensor(item[0], -100, 100)
if cpu_input_inp.dtype == torch.float16:
cpu_input_inp = cpu_input_inp.to(torch.float32)
cpu_output_inp = self.cpu_inp_op_exec(cpu_input_inp)
npu_output_inp = self.npu_inp_op_exec(npu_input_inp)
cpu_output_inp = cpu_output_inp.astype(npu_output_inp.dtype)
self.assertRtolEqual(cpu_output_inp, npu_output_inp)
def neg_out_result(self, shape_format):
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], -100, 100)
cpu_input2, npu_input2 = create_common_tensor(item[0], -100, 100)
cpu_input3, npu_input3 = create_common_tensor(item[1], -100, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1)
npu_output_out1 = self.npu_op_exec_out(npu_input1, npu_input2)
npu_output_out2 = self.npu_op_exec_out(npu_input1, npu_input3)
cpu_output = cpu_output.astype(npu_output_out1.dtype)
self.assertRtolEqual(cpu_output, npu_output_out1)
self.assertRtolEqual(cpu_output, npu_output_out2)
def test_neg_out_result(self, device):
shape_format = [
[[np.float16, 0, [128, 116, 14, 14]], [np.float16, 0, [256, 116, 1, 1]]],
[[np.float16, 0, [128, 58, 28, 28]], [np.float16, 0, [58, 58, 1, 1]]],
[[np.float16, 0, [128, 3, 224, 224]], [np.float16, 0, [3, 3, 3, 3]]],
[[np.float16, 0, [128, 116, 14, 14]], [np.float16, 0, [116, 116, 1, 1]]],
[[np.float32, 0, [256, 128, 7, 7]], [np.float32, 0, [128, 128, 3, 3]]],
[[np.float32, 0, [256, 3, 224, 224]], [np.float32, 0, [3, 3, 7, 7]]],
[[np.float32, 0, [2, 3, 3, 3]], [np.float32, 0, [3, 1, 3, 3]]],
[[np.float32, 0, [128, 232, 7, 7]], [np.float32, 0, [232, 232, 1, 1]]],
]
self.neg_out_result(shape_format)
def test_neg_shape_format_fp16_1d(self, device):
format_list = [0, 3]
shape_format = [[[np.float16, i, [96]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_1d(self, device):
format_list = [0, 3]
shape_format = [[[np.float32, i, [96]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_2d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [448, 1]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_2d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [448, 1]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_3d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [64, 24, 38]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_3d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [64, 24, 38]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp16_4d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float16, i, [32, 3, 3, 3]]] for i in format_list]
self.neg_result(shape_format)
def test_neg_shape_format_fp32_4d(self, device):
format_list = [0, 3, 29]
shape_format = [[[np.float32, i, [32, 3, 3, 3]]] for i in format_list]
self.neg_result(shape_format)
instantiate_device_type_tests(TestNeg, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
| 5,825 | 2,347 |
import requests
from bs4 import BeautifulSoup
url = 'https://www.vanityfair.com/style/society/2014/06/monica-lewinsky-humiliation-culture'
vf = requests.get(url)
vf.status_code
s = BeautifulSoup(vf.text, 'lxml')
print(s.prettify())
text_article = s.find('div', attrs={'class' : 'content-background'}).find_all('p')
text = [print(i.get_text()) for i in text_article] | 370 | 149 |
import os
from envpy.variables import Var as Variables
from envpy.variables import __printenv__
def get_variables(readFile:bool=True, requiredFile:bool=False, readOS:bool=True, filepath:str=f'{os.getcwd()}', filename:str='.env', prefix:str=''):
return Variables(readFile=readFile, requiredFile=requiredFile, readOS=readOS, filepath=filepath, filename=filename, prefix=prefix).get_all_variables()
def printenv(variables, preat:bool=True):
__printenv__(variables, preat) | 480 | 152 |
from models.base_model import BaseModel
from keras.models import Sequential
from keras.layers import Input, Dense, Conv1D, MaxPooling1D, Dropout, Flatten, BatchNormalization
from keras.optimizers import Adam
import tensorflow as tf
class DeepFactorizedModel(BaseModel):
def __init__(self, config):
super(DeepFactorizedModel, self).__init__(config)
self.build_model()
def build_model(self):
self.model = Sequential()
# sublayer 1
self.model.add(Conv1D(48, 3, padding='same', activation='relu', input_shape=(self.config.input_sequence_length, 4)))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(Conv1D(64, 3, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(Conv1D(100, 3, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(Conv1D(150, 7, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(Conv1D(300, 7, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(MaxPooling1D(3))
# sublayer 2
self.model.add(Conv1D(200, 7, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(Conv1D(200, 3, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(Conv1D(200, 3, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(MaxPooling1D(4))
# sublayer 3
self.model.add(Conv1D(200, 7, padding='same', activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(MaxPooling1D(4))
self.model.add(Flatten())
self.model.add(Dense(100, activation='relu'))
self.model.add(BatchNormalization())
self.model.add(Dropout(0.1))
self.model.add(Dense(self.config.number_of_outputs, activation='linear'))
self.model.compile(
loss= "mean_squared_error",
optimizer=self.config.optimizer,
# custom metrics in trainer
)
| 2,518 | 868 |
debug_print = False
has_run_once = False
def d_print(*args, **kwargs):
global debug_print
global has_run_once
if not has_run_once:
m = "enabled" if debug_print else "disabled"
print(f"Debug Print Mode is {m}")
has_run_once = True
if debug_print:
print(*args, **kwargs)
| 323 | 109 |
# Copyright 2020 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The purpose of this script is to validate the mapping weight attribute addition that was performed by
# the script incorporate_mapping_weight_into_accessioning.py
import click
import logging
import psycopg2
from collections import defaultdict
from ebi_eva_common_pyutils.command_utils import run_command_with_output
from ebi_eva_common_pyutils.config_utils import get_pg_metadata_uri_for_eva_profile
from ebi_eva_common_pyutils.metadata_utils import get_species_info, get_db_conn_for_species
from ebi_eva_common_pyutils.pg_utils import get_all_results_for_query, get_result_cursor
logger = logging.getLogger(__name__)
def get_assemblies_with_multimap_snps_for_species(metadata_connection_handle):
assembly_GCA_accession_map = defaultdict(dict)
query = "select distinct database_name, assembly, assembly_accession " \
"from dbsnp_ensembl_species.EVA2015_snpmapinfo_asm_lookup " \
"where assembly_accession is not null"
for result in get_all_results_for_query(metadata_connection_handle, query):
species_name, assembly, GCA_accession = result
assembly_GCA_accession_map[species_name][assembly] = GCA_accession
return assembly_GCA_accession_map
def export_all_multimap_snps_from_dbsnp_dumps(private_config_xml_file):
result_file = "all_multimap_snp_ids_from_dbsnp_dumps.txt"
with psycopg2.connect(get_pg_metadata_uri_for_eva_profile("development", private_config_xml_file), user="evadev") \
as metadata_connection_handle:
assembly_GCA_accession_map = get_assemblies_with_multimap_snps_for_species(metadata_connection_handle)
for species_info in get_species_info(metadata_connection_handle):
species_name = species_info["database_name"]
logger.info("Processing species {0}...".format(species_name))
if species_name in assembly_GCA_accession_map:
with get_db_conn_for_species(species_info) as species_connection_handle:
export_query = "select snp_id, assembly from dbsnp_{0}.multimap_snps " \
"where assembly in ({1})"\
.format(species_name,",".join(["'{0}'".format(assembly) for assembly in
assembly_GCA_accession_map[species_name].keys()]))
logger.info("Running export query: " + export_query)
with open(result_file, 'a') as result_file_handle:
for snp_id, assembly in get_result_cursor(species_connection_handle, export_query):
result_file_handle.write("{0},{1}\n"
.format(snp_id,
assembly_GCA_accession_map[species_name][assembly]))
run_command_with_output("Sorting multimap SNP IDs from dbSNP source dumps...",
"sort -u {0} -o {0}".format(result_file))
@click.option("--private-config-xml-file", help="ex: /path/to/eva-maven-settings.xml", required=True)
@click.command()
def main(private_config_xml_file):
export_all_multimap_snps_from_dbsnp_dumps(private_config_xml_file)
if __name__ == "__main__":
main()
| 3,849 | 1,152 |
def partido_rec_mem_camino(C, S, N, M):
def L(n, m):
if n == 0:
return 0
if (n, m) not in mem:
if m >= C[n - 1]:
mem[n, m] = max((L(n - 1, m - d * C[n - 1]) + d * S[n - 1] + (1 - d) * N[n - 1], d) for d in range(2))
else:
mem[n, m] = (L(n - 1, m) + N[n - 1], 0)
return mem[n, m][0]
# nr ciudades
nr = len(S)
mem = {}
sol = []
score = L(nr, M)
n, m = len(S), M
# n = len(S)
while n > 0:
d = mem[n, m][1]
sol.append(d)
m -= d * C[n - 1]
n -= 1
sol.reverse()
return score, sol
if __name__ == '__main__':
C = [5, 15, 8, 10]
S = [5, 4, 1, 6]
N = [3, 5, 6, 4]
M = 15
K = 5
print(partido_rec_mem_camino(C, S, N, M))
| 662 | 407 |
#!/usr/bin/env python3
# From https://people.sc.fsu.edu/~jburkardt/py_src/hilbert_curve/hilbert_curve.py
#
def d2xy ( m, d ):
#*****************************************************************************80
#
## D2XY converts a 1D Hilbert coordinate to a 2D Cartesian coordinate.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Parameters:
#
# Input, integer M, the index of the Hilbert curve.
# The number of cells is N=2^M.
# 0 < M.
#
# Input, integer D, the Hilbert coordinate of the cell.
# 0 <= D < N * N.
#
# Output, integer X, Y, the Cartesian coordinates of the cell.
# 0 <= X, Y < N.
#
n = 2 ** m
x = 0
y = 0
t = d
s = 1
while ( s < n ):
rx = ( ( t // 2 ) % 2 )
if ( rx == 0 ):
ry = ( t % 2 )
else:
ry = ( ( t ^ rx ) % 2 )
x, y = rot ( s, x, y, rx, ry )
x = x + s * rx
y = y + s * ry
t = ( t // 4 )
s = s * 2
return x, y
def d2xy_test ( ):
#*****************************************************************************80
#
## D2XY_TEST tests D2XY.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'D2XY_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' D2XY converts a Hilbert linear D coordinate to an (X,Y) 2D coordinate.' )
m = 3
n = 2 ** m
print ( '' )
print ( ' D X Y' )
print ( '' )
for d in range ( 0, n * n ):
x, y = d2xy ( m, d )
print ( ' %3d %3d %3d' % ( d, x, y ) )
#
# Terminate.
#
print ( '' )
print ( 'D2XY_TEST:' )
print ( ' Normal end of execution.' )
return
def rot ( n, x, y, rx, ry ):
#*****************************************************************************80
#
## ROT rotates and flips a quadrant appropriately.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Parameters:
#
# Input, integer N, the length of a side of the square.
# N must be a power of 2.
#
# Input/output, integer X, Y, the coordinates of a point.
#
# Input, integer RX, RY, ???
#
if ( ry == 0 ):
#
# Reflect.
#
if ( rx == 1 ):
x = n - 1 - x
y = n - 1 - y
#
# Flip.
#
t = x
x = y
y = t
return x, y
def rot_test ( ):
#*****************************************************************************80
#
## ROT_TEST tests ROT.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'ROT_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' ROT rotates and flips a quadrant appropriately.' )
print ( '' )
print ( ' X Y X0 Y0 X1 Y1' )
print ( '' )
m = 3
n = 2 ** m
ry = 0
for y in range ( 0, n ):
for x in range ( 0, n ):
rx = 0
x0 = x
y0 = y
x0, y0 = rot ( n, x0, y0, rx, ry )
rx = 1
x1 = x
y1 = y
x1, y1 = rot ( n, x1, y1, rx, ry )
print ( ' %2d %2d %2d %2d %2d %2d' % ( x, y, x0, y0, x1, y1 ) )
#
# Terminate.
#
print ( '' )
print ( 'ROT_TEST:' )
print ( ' Normal end of execution.' )
return
def timestamp ( ):
#*****************************************************************************80
#
## TIMESTAMP prints the date as a timestamp.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 06 April 2013
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import time
t = time.time ( )
print ( time.ctime ( t ) )
return None
def timestamp_test ( ):
#*****************************************************************************80
#
## TIMESTAMP_TEST tests TIMESTAMP.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 December 2014
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# None
#
import platform
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' TIMESTAMP prints a timestamp of the current date and time.' )
print ( '' )
timestamp ( )
#
# Terminate.
#
print ( '' )
print ( 'TIMESTAMP_TEST:' )
print ( ' Normal end of execution.' )
return
def xy2d ( m, x, y ):
#*****************************************************************************80
#
## XY2D converts a 2D Cartesian coordinate to a 1D Hilbert coordinate.
#
# Discussion:
#
# It is assumed that a square has been divided into an NxN array of cells,
# where N is a power of 2.
#
# Cell (0,0) is in the lower left corner, and (N-1,N-1) in the upper
# right corner.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Parameters:
#
# Input, integer M, the index of the Hilbert curve.
# The number of cells is N=2^M.
# 0 < M.
#
# Input, integer X, Y, the Cartesian coordinates of a cell.
# 0 <= X, Y < N.
#
# Output, integer D, the Hilbert coordinate of the cell.
# 0 <= D < N * N.
#
xcopy = x
ycopy = y
d = 0
n = 2 ** m
s = ( n // 2 )
while ( 0 < s ):
if ( 0 < ( abs ( xcopy ) & s ) ):
rx = 1
else:
rx = 0
if ( 0 < ( abs ( ycopy ) & s ) ):
ry = 1
else:
ry = 0
d = d + s * s * ( ( 3 * rx ) ^ ry )
xcopy, ycopy = rot ( s, xcopy, ycopy, rx, ry )
s = ( s // 2 )
return d
def xy2d_test ( ):
#*****************************************************************************80
#
## XY2D_TEST tests XY2D.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'XY2D_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' XY2D converts an (X,Y) 2D coordinate to a Hilbert linear D coordinate.' )
m = 3
n = 2 ** m
print ( '' )
print ( ' ', end = '' )
for x in range ( 0, n ):
print ( '%3d' % ( x ), end = '' )
print ( '' )
print ( '' )
for y in range ( n - 1, -1, -1 ):
print ( ' %3d: ' % ( y ), end = '' )
for x in range ( 0, n ):
d = xy2d ( m, x, y )
print ( '%3d' % ( d ), end = '' )
print ( '' )
#
# Terminate.
#
print ( '' )
print ( 'XY2D_TEST:' )
print ( ' Normal end of execution.' )
return
def hilbert_curve_test ( ):
#*****************************************************************************80
#
## HILBERT_CURVE_TEST tests the HILBERT_CURVE library.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 03 January 2016
#
# Author:
#
# John Burkardt
#
import platform
print ( '' )
print ( 'HILBERT_CURVE_TEST' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' Test the HILBERT_CURVE library.' )
d2xy_test ( )
rot_test ( )
xy2d_test ( )
#
# Terminate.
#
print ( '' )
print ( 'HILBERT_CURVE_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
timestamp ( )
hilbert_curve_test ( )
timestamp ( )
| 7,357 | 2,967 |
from rest_framework import permissions
class IsReporter(permissions.BasePermission):
"""
Permission allowing reporters to access their own reports
"""
def has_object_permission(self, request, view, obj):
return obj.reporter == request.user.IITGUser | 295 | 77 |
#!/usr/bin/env python
import sys
import getopt
import requests
import urllib.parse
import json
class YoutubeSearch:
def __init__(self, search_terms: str, max_results=None):
self.search_terms = search_terms
self.max_results = max_results
self.videos = self.search()
def search(self):
encoded_search = urllib.parse.quote(self.search_terms)
BASE_URL = "https://youtube.com"
url = f"{BASE_URL}/results?search_query={encoded_search}"
response = requests.get(url).text
while "ytInitialData" not in response:
response = requests.get(url).text
results = self.parse_html(response)
if self.max_results is not None and len(results) > self.max_results:
return results[: self.max_results]
return results
def parse_html(self, response):
results = []
start = (
response.index("ytInitialData")
+ len("ytInitialData")
+ 3
)
end = response.index("};", start) + 1
json_str = response[start:end]
data = json.loads(json_str)
videos = data["contents"]["twoColumnSearchResultsRenderer"]["primaryContents"][
"sectionListRenderer"
]["contents"][0]["itemSectionRenderer"]["contents"]
for video in videos:
res = {}
if "videoRenderer" in video.keys():
video_data = video.get("videoRenderer", {})
res["id"] = video_data.get("videoId", None)
res["thumbnails"] = [thumb.get("url", None) for thumb in video_data.get("thumbnail", {}).get("thumbnails", [{}])]
res["title"] = video_data.get("title", {}).get("runs", [[{}]])[0].get("text", None)
res["channel"] = video_data.get("longBylineText", {}).get("runs", [[{}]])[0].get("text", None)
res["duration"] = video_data.get("lengthText", {}).get("simpleText", 0)
res["views"] = video_data.get("viewCountText", {}).get("simpleText", 0)
res["url_suffix"] = video_data.get("navigationEndpoint", {}).get("commandMetadata", {}).get("webCommandMetadata", {}).get("url", None)
results.append(res)
return results
def to_dict(self):
return self.videos
def to_json(self):
return json.dumps({"videos": self.videos}, indent=4)
argumentList = sys.argv[1:]
# Options
options = "ht:"
# Long options
long_options = ["help", "title"]
def help():
print("\nYoutube Search \n")
print("Usage:")
print(" -t or --title search with title")
print(" -h or --help show this useful help message ...")
print("")
print("Example:")
print(' -t "interesting title"')
print("")
try:
# Parsing argument
arguments, values = getopt.getopt(argumentList, options, long_options)
# checking each argument
for currentArgument, currentValue in arguments:
if currentArgument in ("-h", "--help"):
help()
elif currentArgument in ("-t", "--title"):
results = YoutubeSearch(sys.argv[2], max_results=15).to_json()
print(results)
else:
help()
except getopt.error as err:
# output error, and return with an error code
print(str(err))
| 3,314 | 982 |
# -*- coding: utf-8 -*-
'''
script for initialization.
'''
import os
import requests
from .script_init_tabels import run_init_tables
from mapmeta.model.mapmeta_model import MMapMeta
from lxml import etree
def do_for_maplet(mapserver_ip):
'''
代码来自 `maplet_arch//030_gen_mapproxy.py` , 原用来找到 mapfile , 生成 yaml .
'''
rst_ws = '/opt/mapws/maplet/00_China_png'
for wroot, wdirs, wfiles in os.walk(rst_ws):
for png in wfiles:
(lyr_name, lyr_ext) = os.path.splitext(png)
if png.endswith('.png') :
pass
else:
continue
maplet_uid = lyr_name
# http://121.42.29.253/cgi-bin/mapserv?map=/opt/mapws/maplet/vect3857/China/China3857_v/mapfile.map&layer=landuse2000_v000&SERVICE=WMS&version=1.3.0&REQUEST=GetCapabilities
mapurl = 'http://{mapserver_ip}/cgi-bin/mapserv?map=/opt/mapws/maplet/maplet_00.map' \
'&layer={layer}&SERVICE=WMS&version=1.3.0' \
'&REQUEST=GetCapabilities'.format(
mapserver_ip=mapserver_ip,
layer='maplet_' + maplet_uid,
)
print(mapurl)
# xml = requests.get(mapurl)
lyr_meta = get_meta(mapurl, maplet_uid)
mapinfo = {
'uid': maplet_uid,
'url': mapurl,
'meta': lyr_meta
}
MMapMeta.add_or_update(mapinfo)
def get_meta(url, sig):
uu = requests.get(url)
uu.encoding='utf-8'
uu.encoding
xml_text = uu.text
xml_text2 = xml_text.encode('utf-8')
root = etree.XML(xml_text2) # xml_text 为xml纯文本文件
root.tag
namespace = "{http://www.opengis.net/wms}"
uu = root.findall('.//{0}Layer'.format(namespace))
bb = ''
for x in uu:
# print(x.tag)
# print(x.attrib)
tt = x.find('.//{0}Name'.format(namespace))
# tt = x.getroottree()
sig_arr = tt.text.split('_')
if sig_arr[-1] == sig:
bb= etree.tostring(x, pretty_print=True).decode()
return bb
def do_for_vector(mapserver_ip):
'''
代码来自 `maplet_arch//030_gen_mapproxy.py` , 原用来找到 mapfile , 生成 yaml .
'''
rst_ws = '/opt/mapws/maplet/vect3857'
for wroot, wdirs, wfiles in os.walk(rst_ws):
for png in wfiles:
(lyr_name, lyr_ext) = os.path.splitext(png)
lyr_name_arr = lyr_name.split('_')
if png.startswith('lyr_') and len(lyr_name_arr[-1]) == 4 and lyr_name_arr[-1][0] == 'v':
pass
else:
continue
maplet_uid = lyr_name_arr[-1]
# http://121.42.29.253/cgi-bin/mapserv?map=/opt/mapws/maplet/vect3857/China/China3857_v/mapfile.map&layer=landuse2000_v000&SERVICE=WMS&version=1.3.0&REQUEST=GetCapabilities
mapurl = 'http://{mapserver_ip}/cgi-bin/mapserv?map={mapfile}' \
'&layer={layer}&SERVICE=WMS&version=1.3.0' \
'&REQUEST=GetCapabilities'.format(
mapserver_ip=mapserver_ip,
mapfile=os.path.join(wroot, 'mapfile.map'),
layer='maplet_' + maplet_uid,
)
print(mapurl)
# print(the_html)
#for uu in the_html.iter():
# print(uu.tag)
lyr_meta = get_meta(mapurl, maplet_uid)
mapinfo = {
'uid': maplet_uid,
'url': mapurl,
'meta': lyr_meta
}
MMapMeta.add_or_update(mapinfo)
def run_init(*args):
'''
running init.
:return:
'''
run_init_tables()
do_for_vector('121.42.29.253')
do_for_maplet('121.42.29.253')
| 3,716 | 1,423 |
from typing import Dict, Any
import pandas as pd
from tqdm.contrib import tmap
from sage.all import RR, ZZ
import dissect.utils.database_handler as database
from dissect.definitions import STD_CURVE_DICT, ALL_CURVE_COUNT
class Modifier:
"""a class of lambda functions for easier modifications if visualised values"""
def __init__(self):
pass
@staticmethod
def identity():
return lambda x: x
@staticmethod
def ratio(ratio_precision=3):
return lambda x: RR(x).numerical_approx(digits=ratio_precision)
@staticmethod
def bits():
return lambda x: ZZ(x).nbits()
@staticmethod
def factorization_bits(factor_index=-1):
return lambda x: ZZ(x[factor_index]).nbits()
@staticmethod
def length():
return lambda x: len(x)
def load_trait(
trait: str, params: Dict[str, Any] = None, curve: str = None, db = None
) -> pd.DataFrame:
if not db:
db = database.connect()
trait_results = database.get_trait_results(db, trait)
return pd.DataFrame(trait_results).convert_dtypes()
def load_curves(filters: Any = {}, db=None) -> pd.DataFrame:
if not db:
db = database.connect()
def project(record: Dict[str, Any]):
projection = {}
projection["curve"] = record["name"]
projection["simulated"] = record["simulated"]
projection["bitlength"] = int(record["field"]["bits"])
projection["field"] = record["field"]["type"]
projection["cofactor"] = (
int(record["cofactor"], base=16)
if isinstance(record["cofactor"], str)
else int(record["cofactor"])
)
return projection
curve_records = database.get_curves(db, filters, raw=True)
df = pd.DataFrame(
tmap(project, curve_records, desc="Loading curves", total=ALL_CURVE_COUNT)
).convert_dtypes()
return df
def get_trait_df(curves, trait_name, db=None):
# load all results for the given trait
df_trait = load_trait(trait_name, db=db)
# join curve metadata to trait results
df_trait = curves.merge(df_trait, "right", "curve")
return df_trait
def filter_choices(choices, ignored):
filtered = {}
for key in choices:
if key not in ignored:
filtered[key] = choices[key]
return filtered
def get_params(choices):
return filter_choices(
choices, ["source", "bitlength","field", "cofactor", "Feature:", "Modifier:"]
)
def filter_df(df, choices):
# TODO this way of checking is expensive - add curve categories to DB
allowed_curves = []
for source in choices["source"]:
allowed_curves += STD_CURVE_DICT.get(source, [])
if "sim" not in choices["source"]:
df = df[df.simulated == False]
if "std" not in choices["source"]:
df = df[df.curve.isin(allowed_curves) | (df.simulated == True)]
df = df[df.field.isin(choices["field"])]
filtered = filter_choices(choices, ["source", "field","Feature:", "Modifier:"])
for key, value in filtered.items():
options = list(map(int, value))
df = df[df[key].isin(options)]
return df
def get_all(df, choices):
modifier = getattr(Modifier, choices["Modifier:"])()
feature = choices["Feature:"]
params = get_params(choices)
if len(params)==0:
return [(filter_df(df, choices),params,feature,modifier,choices["Modifier:"])]
param, values = params.popitem()
choices.pop(param)
results = []
for v in values:
param_choice = choices.copy()
param_choice[param] = [v]
results.append((filter_df(df, param_choice), param_choice, feature, modifier, choices["Modifier:"]))
return results
| 3,713 | 1,180 |
"""Bosch quirks."""
BOSCH = "Bosch"
| 37 | 21 |
# -*- coding: utf-8 -*-
# This file is part of dj-cookieauth released under the Apache 2 license.
# See the NOTICE for more information.
import base64
import hmac
import hashlib
import time
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth.models import User, AnonymousUser
class CookieAuthMiddleware(object):
def __init__(self):
self.cookie_name = getattr(settings, 'COOKIE_AUTH_NAME',
'AuthSession')
def process_request(self, request):
try:
cookie = request.COOKIES[self.cookie_name]
except KeyError:
return
try:
auth_session = base64.decode(cookie)
user, timestr, cur_hash = auth_session.split(":")
except:
raise ValueError("Malformed AuthSession cookie. Please clear your cookies.")
try:
secret = settings.SECRET_KEY
except KeyError:
raise ImproperlyConfigured("secret key isn't set")
try:
user_obj = User.objects.get(username=user)
except User.DoesNotExist:
return
now = time.time()
salt = self.get_user_salt(user_obj)
full_secret = "%s%s" % (secret, salt)
expected_hash = hmac.new(full_secret, msg="%s:%s" % (user,
timestr), digestmod=hashlib.sha256).digest()
timeout = getattr(settings, 'COOKIE_AUTH_TIMEOUT', 600)
try:
timestamp = int(timestr, 16)
except:
return
if now < timestamp + timeout:
if expected_hash == cur_hash:
timeleft = timestamp + timeout - now
request.user = user_obj
request.user.timeleft = timeleft
return
request.user = AnonymousUser
def process_response(self, request, response):
if not request.user.is_authenticated():
# delete cookie
if self.cookie_name in request.COOKIES:
response.delete_cookie(
self.cookie_name,
path=settings.SESSION_COOKIE_PATH,
domain=settings.SESSION_COOKIE_DOMAIN
)
return response
salt = request.get_user_salt(request.user)
try:
secret = settings.SECRET_KEY
except KeyError:
raise ImproperlyConfigured("secret key isn't set")
now = time.time()
full_secret = "%s%s" % (secret, salt)
new_hash = hmac.new(full_secret, msg="%s:%s" % (request.user,
now), digestmod=hashlib.sha256).digest()
key = "%s:%s:%s" % (request.user, now, new_hash)
response.set_cookie(
self.cookie_name,
base64.encode(key),
max_age=None,
expires=None,
domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH,
secure=True,
httponly=True
)
return response
def get_user_salt(self, user):
if '$' not in user.password:
return ''
algo, salt, hsh = user.password.split('$')
return salt
| 3,251 | 934 |
import sys
import fileinput
from collections import defaultdict
from itertools import chain
def solve(algo, grid, times):
defaults = ['.', algo[0]]
image = grid
for i in range(times):
image = enhance(algo, defaults[i%2], image)
return sum(map(lambda c: c == '#', chain.from_iterable(image)))
def enhance(algo, default, grid):
w = len(grid[0])
h = len(grid)
infinite = defaultdict(lambda:default)
for y in range(h):
for x in range(w):
infinite[(x, y)] = grid[y][x]
result = []
d = 1
for y in range(-d, h+d):
row = ''
for x in range(-d, w+d):
i = index(infinite, x, y)
row += algo[i]
result.append(row)
return result
def index(infinite, x, y):
s = ''
for dx, dy in [(-1, -1), (0, -1), (1, -1),(-1, 0), (0, 0), (1, 0),(-1, 1), (0, 1), (1, 1)]:
p = (x+dx, y+dy)
c = infinite[p]
if c == '#':
s += '1'
else:
s += '0'
return int(s, 2)
if __name__ == '__main__' and not sys.flags.interactive:
lines = [line.strip() for line in fileinput.input()]
print(solve(lines[0], lines[2:], 2))
print(solve(lines[0], lines[2:], 50))
| 1,225 | 466 |
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
11:23 - 11:36 7/30
high level: BFS, level order traversal
mid level: queue store cur level res, for the popped node, for its all children, add them to queue
test:
size = 2
cur_res = [5, 6]
q = []
cur = 6
res = [[1], [3, 2, 4], [5, 6]]
time: O(# of nodes) -> O(n)
space:O(max(width of nodes)) ->O(n)
"""
from collections import deque
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
res = []
if not root:
return res
queue = deque([root])
while queue:
size = len(queue)
cur_res = []
for _ in range(size):
cur = queue.popleft()
cur_res.append(cur.val)
for child in cur.children:
queue.append(child)
res.append(cur_res)
return res | 981 | 323 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-payfast',
version='0.3.dev',
author='Mikhail Korobov',
author_email='kmike84@gmail.com',
maintainer='Piet Delport',
maintainer_email='pjdelport@gmail.com',
packages=find_packages(exclude=['payfast_tests']),
url='https://bitbucket.org/pjdelport/django-payfast',
license='MIT license',
description='A pluggable Django application for integrating payfast.co.za payment system.',
long_description=open('README.rst').read().decode('utf8'),
classifiers=(
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
),
)
| 926 | 286 |
from utils import *
from mpmath import ellipe, ellipk, ellippi
from scipy.integrate import quad
import numpy as np
C1 = 3.0 / 14.0
C2 = 1.0 / 3.0
C3 = 3.0 / 22.0
C4 = 3.0 / 26.0
def J(N, k2, kappa, gradient=False):
# We'll need to solve this with gaussian quadrature
func = (
lambda x: np.sin(x) ** (2 * N) * (np.maximum(0, 1 - np.sin(x) ** 2 / k2)) ** 1.5
)
res = 0.0
for i in range(0, len(kappa), 2):
res += quad(
func, 0.5 * kappa[i], 0.5 * kappa[i + 1], epsabs=1e-12, epsrel=1e-12,
)[0]
if gradient:
# Deriv w/ respect to kappa is analytic
dJdkappa = (
0.5
* (
np.sin(0.5 * kappa) ** (2 * N)
* (np.maximum(0, 1 - np.sin(0.5 * kappa) ** 2 / k2)) ** 1.5
)
* np.repeat([-1, 1], len(kappa) // 2).reshape(1, -1)
)
# Deriv w/ respect to k2 is tricky, need to integrate
func = (
lambda x: (1.5 / k2 ** 2)
* np.sin(x) ** (2 * N + 2)
* (np.maximum(0, 1 - np.sin(x) ** 2 / k2)) ** 0.5
)
dJdk2 = 0.0
for i in range(0, len(kappa), 2):
dJdk2 += quad(
func, 0.5 * kappa[i], 0.5 * kappa[i + 1], epsabs=1e-12, epsrel=1e-12,
)[0]
return res, (dJdk2, dJdkappa)
else:
return res
def pal(bo, ro, kappa, gradient=False):
# TODO
if len(kappa) != 2:
raise NotImplementedError("TODO!")
def func(phi):
c = np.cos(phi)
z = np.minimum(
1 - 1e-12, np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
)
return (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
res, _ = quad(func, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,)
if gradient:
# Deriv w/ respect to kappa is analytic
dpaldkappa = func(kappa - np.pi) * np.repeat([-1, 1], len(kappa) // 2).reshape(
1, -1
)
# Derivs w/ respect to b and r are tricky, need to integrate
def func_bo(phi):
c = np.cos(phi)
z = np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
P = (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
q = 3.0 * z ** 0.5 / (1.0 - z ** 1.5) - 2.0 / (1.0 - z)
return P * ((bo + ro * c) * q + 1.0 / (bo + ro / c))
dpaldbo, _ = quad(
func_bo, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,
)
def func_ro(phi):
c = np.cos(phi)
z = np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
P = (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
q = 3.0 * z ** 0.5 / (1.0 - z ** 1.5) - 2.0 / (1.0 - z)
return P * ((ro + bo * c) * q + 1.0 / ro + 1.0 / (ro + bo * c))
dpaldro, _ = quad(
func_ro, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,
)
return res, (dpaldbo, dpaldro, dpaldkappa)
else:
return res
def hyp2f1(a, b, c, z, gradient=False):
term = a * b * z / c
value = 1.0 + term
n = 1
while (np.abs(term) > STARRY_2F1_TOL) and (n < STARRY_2F1_MAXITER):
a += 1
b += 1
c += 1
n += 1
term *= a * b * z / c / n
value += term
if n == STARRY_2F1_MAXITER:
raise ValueError("Series for 2F1 did not converge.")
if gradient:
dFdz = a * b / c * hyp2f1(a + 1, b + 1, c + 1, z)
return value, dFdz
else:
return value
def el2(x, kc, a, b):
"""
Vectorized implementation of the `el2` function from
Bulirsch (1965). In this case, `x` is a *vector* of integration
limits. The halting condition does not depend on the value of `x`,
so it's much faster to evaluate all values of `x` at once!
"""
if kc == 0:
raise ValueError("Elliptic integral diverged because k = 1.")
c = x * x
d = 1 + c
p = np.sqrt((1 + kc * kc * c) / d)
d = x / d
c = d / (2 * p)
z = a - b
i = a
a = (b + a) / 2
y = np.abs(1 / x)
f = 0
l = np.zeros_like(x)
m = 1
kc = np.abs(kc)
for n in range(STARRY_EL2_MAX_ITER):
b = i * kc + b
e = m * kc
g = e / p
d = f * g + d
f = c
i = a
p = g + p
c = (d / p + c) / 2
g = m
m = kc + m
a = (b / m + a) / 2
y = -e / y + y
y[y == 0] = np.sqrt(e) * c[y == 0] * b
if np.abs(g - kc) > STARRY_EL2_CA * g:
kc = np.sqrt(e) * 2
l = l * 2
l[y < 0] = 1 + l[y < 0]
else:
break
if n == STARRY_EL2_MAX_ITER - 1:
raise ValueError(
"Elliptic integral EL2 failed to converge after {} iterations.".format(
STARRY_EL2_MAX_ITER
)
)
l[y < 0] = 1 + l[y < 0]
e = (np.arctan(m / y) + np.pi * l) * a / m
e[x < 0] = -e[x < 0]
return e + c * z
def EllipF(tanphi, k2, gradient=False):
kc2 = 1 - k2
F = el2(tanphi, np.sqrt(kc2), 1, 1)
if gradient:
E = EllipE(tanphi, k2)
p2 = (1 + tanphi ** 2) ** -1
q2 = p2 * tanphi ** 2
dFdtanphi = p2 * (1 - k2 * q2) ** -0.5
dFdk2 = 0.5 * (E / (k2 * kc2) - F / k2 - tanphi * dFdtanphi / kc2)
return F, (dFdtanphi, dFdk2)
else:
return F
def EllipE(tanphi, k2, gradient=False):
kc2 = 1 - k2
E = el2(tanphi, np.sqrt(kc2), 1, kc2)
if gradient:
F = EllipF(tanphi, k2)
p2 = (1 + tanphi ** 2) ** -1
q2 = p2 * tanphi ** 2
dEdtanphi = p2 * (1 - k2 * q2) ** 0.5
dEdk2 = 0.5 * (E - F) / k2
return E, (dEdtanphi, dEdk2)
else:
return E
def rj(x, y, z, p):
"""
Carlson elliptic integral RJ.
Bille Carlson,
Computing Elliptic Integrals by Duplication,
Numerische Mathematik,
Volume 33, 1979, pages 1-16.
Bille Carlson, Elaine Notis,
Algorithm 577, Algorithms for Incomplete Elliptic Integrals,
ACM Transactions on Mathematical Software,
Volume 7, Number 3, pages 398-403, September 1981
https://people.sc.fsu.edu/~jburkardt/f77_src/toms577/toms577.f
"""
# Limit checks
if x < STARRY_CRJ_LO_LIM:
x = STARRY_CRJ_LO_LIM
elif x > STARRY_CRJ_HI_LIM:
x = STARRY_CRJ_HI_LIM
if y < STARRY_CRJ_LO_LIM:
y = STARRY_CRJ_LO_LIM
elif y > STARRY_CRJ_HI_LIM:
y = STARRY_CRJ_HI_LIM
if z < STARRY_CRJ_LO_LIM:
z = STARRY_CRJ_LO_LIM
elif z > STARRY_CRJ_HI_LIM:
z = STARRY_CRJ_HI_LIM
if p < STARRY_CRJ_LO_LIM:
p = STARRY_CRJ_LO_LIM
elif p > STARRY_CRJ_HI_LIM:
p = STARRY_CRJ_HI_LIM
xn = x
yn = y
zn = z
pn = p
sigma = 0.0
power4 = 1.0
for k in range(STARRY_CRJ_MAX_ITER):
mu = 0.2 * (xn + yn + zn + pn + pn)
invmu = 1.0 / mu
xndev = (mu - xn) * invmu
yndev = (mu - yn) * invmu
zndev = (mu - zn) * invmu
pndev = (mu - pn) * invmu
eps = np.max([np.abs(xndev), np.abs(yndev), np.abs(zndev), np.abs(pndev)])
if eps < STARRY_CRJ_TOL:
ea = xndev * (yndev + zndev) + yndev * zndev
eb = xndev * yndev * zndev
ec = pndev * pndev
e2 = ea - 3.0 * ec
e3 = eb + 2.0 * pndev * (ea - ec)
s1 = 1.0 + e2 * (-C1 + 0.75 * C3 * e2 - 1.5 * C4 * e3)
s2 = eb * (0.5 * C2 + pndev * (-C3 - C3 + pndev * C4))
s3 = pndev * ea * (C2 - pndev * C3) - C2 * pndev * ec
value = 3.0 * sigma + power4 * (s1 + s2 + s3) / (mu * np.sqrt(mu))
return value
xnroot = np.sqrt(xn)
ynroot = np.sqrt(yn)
znroot = np.sqrt(zn)
lam = xnroot * (ynroot + znroot) + ynroot * znroot
alpha = pn * (xnroot + ynroot + znroot) + xnroot * ynroot * znroot
alpha = alpha * alpha
beta = pn * (pn + lam) * (pn + lam)
if alpha < beta:
sigma += power4 * np.arccos(np.sqrt(alpha / beta)) / np.sqrt(beta - alpha)
elif alpha > beta:
sigma += power4 * np.arccosh(np.sqrt(alpha / beta)) / np.sqrt(alpha - beta)
else:
sigma = sigma + power4 / np.sqrt(beta)
power4 *= 0.25
xn = 0.25 * (xn + lam)
yn = 0.25 * (yn + lam)
zn = 0.25 * (zn + lam)
pn = 0.25 * (pn + lam)
if k == STARRY_CRJ_MAX_ITER - 1:
raise ValueError(
"Elliptic integral RJ failed to converge after {} iterations.".format(
STARRY_CRJ_MAX_ITER
)
)
def EllipJ(kappa, k2, p):
phi = (kappa - np.pi) % (2 * np.pi)
cx = np.cos(phi / 2)
sx = np.sin(phi / 2)
w = 1 - cx ** 2 / k2
J = np.zeros_like(phi)
for i in range(len(w)):
J[i] = (np.cos(phi[i]) + 1) * cx[i] * rj(w[i], sx[i] * sx[i], 1.0, p[i])
return J
def ellip(bo, ro, kappa):
# Helper variables
k2 = (1 - ro ** 2 - bo ** 2 + 2 * bo * ro) / (4 * bo * ro)
if np.abs(1 - k2) < STARRY_K2_ONE_TOL:
if k2 == 1.0:
k2 = 1 + STARRY_K2_ONE_TOL
elif k2 < 1.0:
k2 = 1.0 - STARRY_K2_ONE_TOL
else:
k2 = 1.0 + STARRY_K2_ONE_TOL
k = np.sqrt(k2)
k2inv = 1 / k2
kinv = np.sqrt(k2inv)
kc2 = 1 - k2
# Complete elliptic integrals (we'll need them to compute offsets below)
if k2 < 1:
K0 = float(ellipk(k2))
E0 = float(ellipe(k2))
E0 = np.sqrt(k2inv) * (E0 - (1 - k2) * K0)
K0 *= np.sqrt(k2)
RJ0 = 0.0
else:
K0 = float(ellipk(k2inv))
E0 = float(ellipe(k2inv))
if (bo != 0) and (bo != ro):
p0 = (ro * ro + bo * bo + 2 * ro * bo) / (ro * ro + bo * bo - 2 * ro * bo)
PI0 = float(ellippi(1 - p0, k2inv))
RJ0 = -12.0 / (1 - p0) * (PI0 - K0)
else:
RJ0 = 0.0
if k2 < 1:
# Analytic continuation from (17.4.15-16) in Abramowitz & Stegun
# A better format is here: https://dlmf.nist.gov/19.7#ii
# Helper variables
arg = kinv * np.sin(kappa / 2)
tanphi = arg / np.sqrt(1 - arg ** 2)
tanphi[arg >= 1] = STARRY_HUGE_TAN
tanphi[arg <= -1] = -STARRY_HUGE_TAN
# Compute the elliptic integrals
F = EllipF(tanphi, k2) * k
E = kinv * (EllipE(tanphi, k2) - kc2 * kinv * F)
# Add offsets to account for the limited domain of `el2`
for i in range(len(kappa)):
if kappa[i] > 3 * np.pi:
F[i] += 4 * K0
E[i] += 4 * E0
elif kappa[i] > np.pi:
F[i] = 2 * K0 - F[i]
E[i] = 2 * E0 - E[i]
else:
# Helper variables
tanphi = np.tan(kappa / 2)
# Compute the elliptic integrals
F = EllipF(tanphi, k2inv) # el2(tanphi, kcinv, 1, 1)
E = EllipE(tanphi, k2inv) # el2(tanphi, kcinv, 1, kc2inv)
# Add offsets to account for the limited domain of `el2`
for i in range(len(kappa)):
if kappa[i] > 3 * np.pi:
F[i] += 4 * K0
E[i] += 4 * E0
elif kappa[i] > np.pi:
F[i] += 2 * K0
E[i] += 2 * E0
# Must compute RJ separately
if np.abs(bo - ro) > STARRY_PAL_BO_EQUALS_RO_TOL:
p = (ro * ro + bo * bo - 2 * ro * bo * np.cos(kappa)) / (
ro * ro + bo * bo - 2 * ro * bo
)
RJ = EllipJ(kappa, k2, p)
# Add offsets to account for the limited domain of `rj`
if RJ0 != 0.0:
for i in range(len(kappa)):
if kappa[i] > 3 * np.pi:
RJ[i] += 2 * RJ0
elif kappa[i] > np.pi:
RJ[i] += RJ0
else:
RJ = np.zeros_like(kappa)
# Compute the *definite* elliptic integrals
F = pairdiff(F)
E = pairdiff(E)
PIprime = pairdiff(RJ)
return F, E, PIprime
| 12,036 | 5,276 |
from django.core.exceptions import ValidationError
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _
@deconstructible
class CapitalistAccountValidator:
message = _('Invalid Capitalist account number.')
code = 'invalid_capitalist_account'
account_types = (
'R', # rub
'U', # usd
'E', # eur
'T', # usd tether
'B', # btc
)
def __init__(self, account_types=None):
self.account_types = account_types
def __call__(self, value):
if len(value) < 2 or value[0] not in self.account_types:
raise ValidationError(self.message, code=self.code)
try:
int(value[1:])
except ValueError:
raise ValidationError(self.message, code=self.code)
| 821 | 250 |
from bolt_srl.model import BoltSRLModel
from bolt_srl.predictor import BoltSRLPredictor
from bolt_srl.reader import BoltSRLReader
| 130 | 49 |
from __future__ import print_function
import sys
import importlib
from django.conf import settings
import graphene
from .base import sharedql
for imports in settings.INSTALLED_APPS:
imports = imports + ".schema"
try:
mod = importlib.import_module(imports + ".schema")
except ImportError:
pass
# print("Failed to load {module}".format(module=imports),file=sys.stderr)
bases = tuple(sharedql.query_classes + [graphene.ObjectType, object])
# for cls in bases:
# print("Including '{}' in global GraphQL Query...".format(cls.__name__))
SharedQuery = type('Query', bases, {})
schema = graphene.Schema(query=SharedQuery) | 661 | 200 |
class Solution(object):
def numIslands(self, grid):
if not grid:
return 0
count = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if grid[i][j] == 1:
self.dfs(grid, i, j)
count += 1
return count
def dfs(self, grid, i, j):
if i < 0 or j < 0 or i >= len(grid) or j >= len(grid[0]) or grid[i][j] != 1:
return
# Set to 2 means visited
grid[i][j] = 2
self.dfs(grid, i + 1, j)
self.dfs(grid, i - 1, j)
self.dfs(grid, i, j + 1)
self.dfs(grid, i, j - 1)
test = Solution()
print test.numIslands(
[[1, 1, 0, 0, 0],
[0, 1, 0, 0, 1],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]])
| 785 | 333 |
"""
import pyaudio
p = pyaudio.PyAudio()
for i in range(p.get_device_count())
print(p.get_device_info_by_index(i))
"""
import pyaudio
p = pyaudio.PyAudio()
for i in range(p.get_device_count()):
info = p.get_device_info_by_index(i)
print(info['index'], info['name'])
| 279 | 114 |
A_0203_11 = {0: {'A': -0.12570904708437197, 'C': -0.20260572185781892, 'E': -4.0, 'D': -4.0, 'G': -4.0, 'F': -4.0, 'I': 0.7830374415420228, 'H': -0.6847918068824875, 'K': -0.6847918068824875, 'M': 0.981018297402464, 'L': 1.1760718746127061, 'N': -0.09649609424570203, 'Q': 0.39410708558422325, 'P': -4.0, 'S': -0.20260572185781892, 'R': -0.4643452234664465, 'T': -0.20260572185781892, 'W': -4.0, 'V': 0.981018297402464, 'Y': -4.0}, 1: {'A': -0.3403513392977473, 'C': -0.5706629683616748, 'E': -0.8255548505683803, 'D': -0.8255548505683803, 'G': -4.0, 'F': -4.0, 'I': 0.9531395462189001, 'H': -4.0, 'K': -4.0, 'M': 0.6129250324406762, 'L': 1.228331821009925, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -0.5706629683616748, 'R': -4.0, 'T': -0.562748417416651, 'W': -4.0, 'V': -0.33761786332302834, 'Y': -4.0}, 2: {'A': -4.0, 'C': -4.0, 'E': 0.23670505214784382, 'D': 0.23670505214784382, 'G': -4.0, 'F': -4.0, 'I': 0.42243027379318165, 'H': -1.1231934889064845, 'K': -1.1231934889064845, 'M': 0.42243027379318165, 'L': 0.49079465357298746, 'N': -0.6340925619710869, 'Q': -0.9864687962058627, 'P': 0.8737459206814067, 'S': -4.0, 'R': -1.2985424934801089, 'T': -4.0, 'W': -4.0, 'V': 0.42243027379318165, 'Y': -4.0}, 3: {'A': -4.0, 'C': -0.10268688515087193, 'E': 0.798625443205945, 'D': 0.04685544415564716, 'G': -0.8713665210100304, 'F': -0.11724471547541107, 'I': 0.5023433941416019, 'H': 0.1964127848099425, 'K': 0.1964127848099425, 'M': 0.5023433941416019, 'L': 0.8648526865940196, 'N': -1.246319293273537, 'Q': -1.246319293273537, 'P': -4.0, 'S': -0.225063321826744, 'R': 0.1964127848099425, 'T': -0.10268688515087193, 'W': -0.11724471547541107, 'V': 0.5023433941416019, 'Y': -0.11724471547541107}, 4: {'A': -0.15538479149430195, 'C': -0.5729222482329892, 'E': 0.5608828212675953, 'D': 0.4377951470419558, 'G': 0.9044127857344268, 'F': -4.0, 'I': -0.37532329845920753, 'H': 0.7629678170164823, 'K': 0.7629678170164823, 'M': -0.30615329374291206, 'L': -0.30615329374291206, 'N': 0.1902506596653246, 'Q': -0.02201461690735955, 'P': -4.0, 'S': -0.7926623896862102, 'R': 0.7629678170164823, 'T': -0.5729222482329892, 'W': -4.0, 'V': -0.30615329374291206, 'Y': -4.0}, 5: {'A': -4.0, 'C': 0.24400425857325767, 'E': -0.09482336309954434, 'D': 0.11453889603948189, 'G': -0.05773521834250697, 'F': -4.0, 'I': 0.9402027034114638, 'H': -0.17809486610864272, 'K': -0.17809486610864272, 'M': -0.13069486723141577, 'L': -1.0852448817585207, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': 0.24400425857325767, 'R': -0.17313599795719686, 'T': 0.24400425857325767, 'W': -4.0, 'V': -0.13069486723141577, 'Y': -4.0}, 6: {'A': -4.0, 'C': -0.23263505475693882, 'E': -0.139933797054501, 'D': -1.0294883184337862, 'G': 0.2745172870714848, 'F': -4.0, 'I': 0.002696009692144382, 'H': -4.0, 'K': -4.0, 'M': 0.002696009692144382, 'L': 0.20098566078545885, 'N': -4.0, 'Q': -4.0, 'P': -0.013076289259420854, 'S': -0.23263505475693882, 'R': -4.0, 'T': -0.5182826632227399, 'W': -4.0, 'V': 0.1886119940743905, 'Y': -4.0}, 7: {'A': -0.7959587403534084, 'C': -0.4056706424113438, 'E': 0.04508448172902091, 'D': 0.04508448172902091, 'G': 0.3178958696288293, 'F': -0.3919711008500978, 'I': 0.693568217814708, 'H': -0.22333454873323907, 'K': -0.22333454873323907, 'M': 0.693568217814708, 'L': 0.7163076440262162, 'N': -0.18296238410395974, 'Q': -0.18296238410395974, 'P': -4.0, 'S': -0.4056706424113438, 'R': -0.22333454873323907, 'T': 0.2038261916632583, 'W': -0.3919711008500978, 'V': 0.693568217814708, 'Y': -0.3919711008500978}, 8: {'A': -4.0, 'C': 0.26023555367482776, 'E': -0.3538520240116577, 'D': -0.23418903787985348, 'G': -4.0, 'F': -1.1226887683330944, 'I': 0.582578651051582, 'H': -0.1888189196560037, 'K': -0.1888189196560037, 'M': 0.582578651051582, 'L': 1.2978761471202371, 'N': 0.6545307662702273, 'Q': 0.3292391040113501, 'P': -4.0, 'S': 0.26023555367482776, 'R': -0.1888189196560037, 'T': 0.26023555367482776, 'W': -1.1226887683330944, 'V': 0.582578651051582, 'Y': -1.1226887683330944}, 9: {'A': 0.20062112019547046, 'C': -0.2854309411856969, 'E': -4.0, 'D': -4.0, 'G': -0.6046621948352506, 'F': -4.0, 'I': 0.7395851595080252, 'H': -0.5200214452987034, 'K': -0.5200214452987034, 'M': 0.3813834296984175, 'L': 0.24856066321801923, 'N': -0.7874703169589286, 'Q': -0.7874703169589286, 'P': -4.0, 'S': -0.2854309411856969, 'R': -0.5200214452987034, 'T': -0.404652124993784, 'W': -4.0, 'V': 0.18080996663942733, 'Y': -4.0}, 10: {'A': 0.3821540129505695, 'C': -4.0, 'E': -4.0, 'D': -4.0, 'G': -4.0, 'F': -1.3249184992500316, 'I': 0.7372045737079358, 'H': -1.2530000347896086, 'K': -1.2530000347896086, 'M': 1.1325335673883987, 'L': 0.015146957676202309, 'N': -4.0, 'Q': -4.0, 'P': -4.0, 'S': -4.0, 'R': -1.2530000347896086, 'T': -4.0, 'W': -1.3249184992500316, 'V': 1.5210080375139177, 'Y': -1.3267309428143914}, -1: {'slope': 0.11563050533541314, 'intercept': -0.058700970796826395}} | 4,847 | 4,215 |
import werkzeug
try:
import dotmap
except:
dotmap = None
try:
import addict
except:
addict = None
class QuerySpec(object):
def __init__(self, query):
# TODO: list all attributes of a query spec up front so others know what to expect
md = werkzeug.MultiDict()
for q in query:
if type(q) == dict:
md.add(q['name'], q['value'].strip())
elif type(q) == list or type(q) == tuple:
md.add(q[0], q[1].strip())
else:
md.add(q, query[q])
self.ismultidict = False
self.isdotmap = False
if isinstance(query, werkzeug.MultiDict):
self.ismultidict = True
elif addict and isinstance(query, addict.Dict):
self.isdotmap = True
elif dotmap and isinstance(query, dotmap.DotMap):
self.isdotmap = True
elif isinstance(query, list):
self.ismultidict = True
else:
raise Exception("Unknown entry for query spec")
self.md = md
# we will need to put together an exported interface
self.fields = self.get_fields()
self.groupby = self.get_groupby()
def __makedict__(self):
ret = {
}
for f in self.md:
if f.endswith("[]"):
if self.ismultidict:
ret[f] = self.md.getlist(f)
else:
ret[f] = self.md.get(f)
else:
ret[f] = self.md.get(f)
return ret
def __json__(self):
return self.__makedict__()
def setlist(self, k, v):
self.md.setlist(k, v)
def set(self, k, v):
if k in self.md:
self.md.pop(k)
self.md.add(k,v)
def add(self, k, v):
self.md.add(k, v)
def getlist(self, k, d=[]):
if self.ismultidict:
return self.md.getlist(k)
return self.md.get(k) or []
def get(self, k, d=None):
return self.md.get(k, d)
def get_metric(self):
op = self.md.get('metric')
if not op:
op = self.md.get('agg', '')
op = op.lstrip("$")
return op
def get_groupby(self):
g = self.getlist('groupby[]')
if not g:
g = self.getlist('group_by')
return g
def get_fields(self):
g = self.getlist('fields[]')
if not g:
g = self.getlist('fieldset')
return g
def get_custom_fields(self):
g = self.getlist('custom_fields[]')
if not g:
g = self.getlist('custom_fields')
return g
| 2,644 | 852 |
# Generated by Django 2.2.1 on 2019-05-26 00:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organization', '0002_courseorg_category'),
]
operations = [
migrations.AddField(
model_name='courseorg',
name='course_num',
field=models.IntegerField(default=0, verbose_name='课程人数'),
),
migrations.AddField(
model_name='courseorg',
name='student_num',
field=models.IntegerField(default=0, verbose_name='学习人数'),
),
]
| 595 | 194 |
import sys
MASTER_PASSWORD = "opensesame"
password = input("Please enter the super secret password: ")
attempt_count = 1
while password != MASTER_PASSWORD:
if attempt_count > 3:
sys.exit("Too many invalid password attempts")
password = input("Invalid password, try again: ")
attempt_count += 1
print("Welcome to secret town")
| 349 | 105 |
import json
import os
from typing import Dict, Tuple, List
import numpy as np
import tensorflow as tf
from keras import backend as K
from textgenrnn.model import textgenrnn_model
def rnn_generate(config_path: str, vocab_path: str, weights_path: str, min_words=50, temperature=0.5,
start_char='\n', reset=False) -> Tuple[str, int]:
# Load configs
with open(config_path, 'r', encoding='utf8', errors='ignore') as json_file:
config = json.load(json_file)
with open(vocab_path, 'r', encoding='utf8', errors='ignore') as json_file:
vocab = json.load(json_file)
# Prepare vars
num_classes = len(vocab) + 1
indices_char = {v: k for k, v in vocab.items()}
# Build model
model = textgenrnn_model(num_classes, cfg=config, weights_path=weights_path)
# Start with random letter
if not start_char:
ret_str = np.random.choice(list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'))
else:
ret_str = start_char
if len(model.inputs) > 1:
model = tf.keras.models.Model(inputs=model.inputs[0], outputs=model.outputs[1])
num_words = 0
num_char = 0
# Add 50 buffer, ~5 is the average length of an English word, add a bit more
min_chars = (min_words + 50) * 6
while num_char < min_chars:
encoded = np.array([vocab.get(x, 0) for x in ret_str])
encoded_text = tf.keras.preprocessing.sequence.pad_sequences([encoded], maxlen=config['max_length'])
preds = np.asarray(model.predict(encoded_text, batch_size=1)[0]).astype('float64')
if temperature is None or temperature == 0.0:
index = np.argmax(preds)
else:
preds = np.log(preds + tf.keras.backend.epsilon()) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
index = np.argmax(probas)
# prevent function from being able to choose 0 (placeholder)
# choose 2nd best index from preds
if index == 0:
index = np.argsort(preds)[-2]
next_char = indices_char[index]
ret_str += next_char
num_char += 1
if next_char == ' ':
num_words += 1
# Only stop after new line
if (num_words >= min_words) and (next_char == '\n'):
break
if reset:
K.clear_session()
tf.reset_default_graph()
return ret_str, num_words
def rnn_guess(models_dir: str, check_models: List[str], in_str: str, reset=False) -> Dict[str, float]:
"""Keyword arguments for get_auto_name required"""
ret_dict: Dict[str, float] = {}
for name in check_models:
paths = get_paths(models_dir=models_dir, model_name=name)
# Load configs
with open(paths['config_path'], 'r', encoding='utf8', errors='ignore') as json_file:
config = json.load(json_file)
with open(paths['vocab_path'], 'r', encoding='utf8', errors='ignore') as json_file:
vocab = json.load(json_file)
# Prepare vars
num_classes = len(vocab) + 1
# Build model
model = textgenrnn_model(num_classes, cfg=config, weights_path=paths['weights_path'])
# Config vars
if len(model.inputs) > 1:
model = tf.keras.models.Model(inputs=model.inputs[0], outputs=model.outputs[1])
encoded = np.array([vocab.get(x, 0) for x in in_str[:-1]])
encoded_text = tf.keras.preprocessing.sequence.pad_sequences([encoded], maxlen=config['max_length'])
preds = np.asarray(model.predict(encoded_text, batch_size=1)[0]).astype('float64')
pred_next = preds[vocab.get(in_str[-1], 0)]
ret_dict[name] = pred_next * 100
if reset:
K.clear_session()
tf.reset_default_graph()
return ret_dict
def get_auto_name(name: str, rnn_layers: int, rnn_size: int, rnn_bidirectional=False, rnn_type='lstm') -> str:
"""Generate unique name from parameters"""
model_name = f"{name}_{rnn_layers}l{rnn_size}"
if rnn_bidirectional:
model_name += 'bi'
if rnn_type == 'gru':
model_name += '_gru'
return model_name
def get_paths(model_name='', model_dir='', models_dir='', **kwargs) -> Dict[str, str]:
"""Return model base directory and paths to config, vocab and weights
If model_dir is specified, returns the respective files in it or raises RuntimeError.
Otherwise, model_name, models_dir and parameters for get_auto_name are required.
"""
ret = {
"model_dir": "",
"config_path": "",
"vocab_path": "",
"weights_path": "",
}
# We have model directory, use that
if model_dir:
if not os.path.exists(model_dir):
raise RuntimeError(f'{model_dir} does not exist')
ret['model_dir'] = model_dir
for f in os.listdir(model_dir):
if f.endswith('_config.json'):
ret['config_path'] = os.path.join(model_dir, f)
elif f.endswith('_vocab.json'):
ret['vocab_path'] = os.path.join(model_dir, f)
elif f.endswith('_weights.hdf5') and '_epoch_' not in f:
ret['weights_path'] = os.path.join(model_dir, f)
# Check that we have all
for k, v in ret.items():
if not v:
raise RuntimeError(f'Cannot find {k}')
# Generate from model name and parameters
else:
# Check args
if not models_dir:
raise TypeError('models_dir parameter is required')
if not model_name:
model_name = get_auto_name(**kwargs)
ret['model_dir'] = os.path.join(models_dir, model_name)
ret['config_path'] = os.path.join(ret['model_dir'], f'{model_name}_config.json')
ret['vocab_path'] = os.path.join(ret['model_dir'], f'{model_name}_vocab.json')
ret['weights_path'] = os.path.join(ret['model_dir'], f'{model_name}_weights.hdf5')
return ret
| 5,984 | 2,003 |
from datetime import datetime
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, no_translations
from note.models import NoteIndexPage
from note.models import NotePage
class Command(BaseCommand):
help = 'Create note page'
def add_arguments(self, parser):
parser.add_argument(
'--index-id', action='store', required=True,
help='set index page id')
parser.add_argument(
'--title', action='store', required=True,
help='set title')
parser.add_argument(
'--intro', action='store', required=True,
help='set intro')
parser.add_argument(
'--owner', action='store', required=True,
help='set owner')
@no_translations
def handle(self, *args, **options):
index = NoteIndexPage.objects.get(id=options['index_id'])
User = get_user_model()
owner = User.objects.get(username=options['owner'])
note = NotePage(
title=options['title'],
intro=options['intro'],
date=datetime.now(),
owner=owner)
index.add_child(instance=note)
self.stdout.write(self.style.SUCCESS(f'created: {repr(note)}'))
| 1,268 | 351 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-16 12:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizations', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='organization',
name='notes',
),
migrations.AddField(
model_name='organization',
name='instagram',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='organization',
name='twitter',
field=models.CharField(blank=True, max_length=64),
),
migrations.AlterField(
model_name='organization',
name='facebook',
field=models.CharField(blank=True, max_length=128),
),
]
| 918 | 277 |
from flask_unchained import BundleConfig
class Config(BundleConfig):
"""
Default configuration options for the controller bundle.
"""
FLASH_MESSAGES = True
"""
Whether or not to enable flash messages.
NOTE: This only works for messages flashed using the
:meth:`flask_unchained.Controller.flash` method;
using the :func:`flask.flash` function directly will not respect this setting.
"""
TEMPLATE_FILE_EXTENSION = '.html'
"""
The default file extension to use for templates.
"""
WTF_CSRF_ENABLED = False
"""
Whether or not to enable CSRF protection.
"""
CSRF_TOKEN_COOKIE_NAME = 'csrf_token'
"""
The cookie name to set on responses for the CSRF token. Defaults to "csrf_token".
"""
| 780 | 240 |
from .norms.api import *
from .losses.api import *
from .sampler import selective_sampler_MH
| 94 | 34 |
from rest_framework import serializers
from .models import Category, Brand, Supply, Unit, SupplyItem, Packaging, Product
class CategorySerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Category
fields = ('id', 'name')
class BrandSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Brand
fields = ('id', 'name')
class UnitSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Unit
fields = ('id', 'name')
class PackagingSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Packaging
fields = ('id', 'name')
class SupplySerializer(serializers.HyperlinkedModelSerializer):
category = CategorySerializer()
num_items = serializers.IntegerField(read_only=True)
order_value = serializers.IntegerField(read_only=True)
class Meta:
model = Supply
fields = ('id', 'name', 'category', 'min_count', 'num_items', 'order_value')
class ProductSerializer(serializers.HyperlinkedModelSerializer):
supply = SupplySerializer()
brand = BrandSerializer()
unit = UnitSerializer()
packaging = PackagingSerializer()
num_items = serializers.IntegerField(read_only=True)
order_value = serializers.IntegerField(read_only=True)
class Meta:
model = Product
fields = ('id', 'supply', 'name', 'brand', 'ean', 'unit', 'amount', 'bio_label', 'packaging', 'min_count', 'num_items', 'order_value')
class SupplyItemSerializer(serializers.HyperlinkedModelSerializer):
product = ProductSerializer()
class Meta:
model = SupplyItem
fields = ('id', 'product', 'purchase_date', 'best_before_date')
| 1,731 | 490 |
from flask import url_for, request, current_app
from app.auth.models import User, PreAllowedUser
from app.extensions import db
def test_register_and_login(client, database):
# Register unconfirmed user
response = client.post(url_for('auth.register'), data={
'email': 'a@a.com',
'password': 'a',
'password2': 'a',
}, follow_redirects=True)
assert response.status_code == 200
response_data = response.get_data(as_text=True)
assert 'mensagem de confirmação' in response_data
# Login and see unfonfirmed page
response = client.post(url_for('auth.login'), data={
'email': 'a@a.com',
'password': 'a',
}, follow_redirects=True)
assert response.status_code == 200
response_data = response.get_data(as_text=True)
assert 'Você ainda não confirmou sua conta' in response_data
# Confirm user account by token
user = User.query.filter_by(email='a@a.com').first()
token = user.generate_confirmation_token()
response = client.get(url_for('auth.confirm', token=token),
follow_redirects=True)
response_data = response.get_data(as_text=True)
assert 'Conta verificada' in response_data
# TODO: fix this code to consider an authorized and non-authorized user
# with is needed to access request context
# with client:
# # Log in and redirect user to MAIN_ENDPOINT
# response = client.post(url_for('auth.login'), data={
# 'email': 'a@a.com',
# 'password': 'a',
# }, follow_redirects=True)
# assert response.status_code == 200
# assert request.path == url_for(current_app.config.get('MAIN_ENDPOINT'))
# print(user.role)
# # Log out and redirect user to login screen
# response = client.get(url_for('auth.logout'),
# follow_redirects=True)
# data = response.get_data(as_text=True)
# assert response.status_code == 200
# assert 'Log Out realizado' in data
# assert request.path == '/auth/login'
| 2,077 | 650 |
from django.forms import ModelForm, ModelChoiceField
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from dal import autocomplete
from .models import (
DiscountCategory, DiscountCombo, DiscountComboComponent,
PointGroup, PricingTierGroup, RegistrationDiscount,
CustomerGroupDiscount, CustomerDiscount
)
from danceschool.core.models import (
Registration, Registration, PricingTier, Customer
)
class DiscountCategoryAdmin(admin.ModelAdmin):
list_display = ('name', 'order', 'cannotCombine')
list_editable = ('order', )
list_filter = ('cannotCombine', )
search_fields = ('name', )
class DiscountComboComponentInline(admin.StackedInline):
model = DiscountComboComponent
extra = 1
fields = (('pointGroup', 'quantity', ), 'allWithinPointGroup', ('level', 'weekday'), )
class CustomerDiscountInlineForm(ModelForm):
customer = ModelChoiceField(
queryset=Customer.objects.all(),
widget=autocomplete.ModelSelect2(
url='autocompleteCustomer',
attrs={
# This will set the input placeholder attribute:
'data-placeholder': _('Enter a customer name'),
# This will set the yourlabs.Autocomplete.minimumCharacters
# options, the naming conversion is handled by jQuery
'data-minimum-input-length': 2,
'data-max-results': 4,
'class': 'modern-style',
}
)
)
class Meta:
model = CustomerDiscount
exclude = []
class Media:
js = (
'admin/js/vendor/jquery/jquery.min.js',
'admin/js/jquery.init.js',
)
class CustomerGroupDiscountInline(admin.StackedInline):
model = CustomerGroupDiscount
extra = 1
classes = ['collapse', ]
class CustomerDiscountInline(admin.StackedInline):
model = CustomerDiscount
form = CustomerDiscountInlineForm
extra = 1
classes = ['collapse', ]
class DiscountComboAdminForm(ModelForm):
class Meta:
model = DiscountCombo
exclude = []
class Media:
js = (
'admin/js/vendor/jquery/jquery.min.js',
'js/discountcombo_collapsetypes.js',
)
class DiscountComboAdmin(admin.ModelAdmin):
inlines = [
DiscountComboComponentInline, CustomerGroupDiscountInline,
CustomerDiscountInline
]
form = DiscountComboAdminForm
list_display = (
'name', 'category', 'discountType', 'active', 'expirationDate',
'restrictions'
)
list_filter = (
'category', 'discountType', 'active', 'newCustomersOnly', 'expirationDate'
)
ordering = ('name', )
actions = ['enableDiscount', 'disableDiscount']
fieldsets = (
(None, {
'fields': (
'name', 'category',
('active', 'expirationDate'),
'newCustomersOnly', 'studentsOnly', 'daysInAdvanceRequired',
'firstXRegistered', 'customerMatchRequired', 'discountType',
)
}),
(_('Flat-Price Discount (in default currency)'), {
'classes': ('type_flatPrice', ),
'fields': ('onlinePrice', 'doorPrice'),
}),
(_('Dollar Discount (in default currency)'), {
'classes': ('type_dollarDiscount', ),
'fields': ('dollarDiscount', ),
}),
(_('Percentage Discount'), {
'classes': ('type_percentageDiscount', ),
'fields': ('percentDiscount', 'percentUniversallyApplied'),
}),
)
def restrictions(self, obj):
text = []
if obj.studentsOnly:
text.append(_('Students only'))
if obj.newCustomersOnly:
text.append(_('First-time customer'))
if obj.daysInAdvanceRequired:
text.append(_('%s day advance registration' % obj.daysInAdvanceRequired))
if obj.firstXRegistered:
text.append(_('First %s to register' % obj.firstXRegistered))
if obj.customerMatchRequired:
text.append(_('Primary customer registrations only'))
return ', '.join([str(x) for x in text])
restrictions.short_description = _('Restrictions')
def disableDiscount(self, request, queryset):
rows_updated = queryset.update(active=False)
if rows_updated == 1:
message_bit = "1 discount was"
else:
message_bit = "%s discounts were" % rows_updated
self.message_user(request, "%s successfully disabled." % message_bit)
disableDiscount.short_description = _('Disable selected Discounts')
def enableDiscount(self, request, queryset):
rows_updated = queryset.update(active=True)
if rows_updated == 1:
message_bit = "1 discount was"
else:
message_bit = "%s discounts were" % rows_updated
self.message_user(request, "%s successfully enabled." % message_bit)
enableDiscount.short_description = _('Enable selected Discounts')
class RegistrationDiscountInline(admin.TabularInline):
model = RegistrationDiscount
readonly_fields = ('discount', 'discountAmount')
exclude = ('applied',)
extra = 0
# Prevents adding new discounts without going through
# the standard registration process
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class PricingTierGroupInline(admin.TabularInline):
model = PricingTierGroup
extra = 0
verbose_name = _('pricing tier discount group')
verbose_name_plural = _('pricing tier discount groups')
class PointGroupAdmin(admin.ModelAdmin):
inlines = (PricingTierGroupInline, )
list_display = ('name', )
ordering = ('name', )
# This adds the inlines to Registration and PricingTier without subclassing
admin.site._registry[Registration].inlines.insert(0, RegistrationDiscountInline)
admin.site._registry[PricingTier].inlines.insert(0, PricingTierGroupInline)
admin.site.register(DiscountCategory, DiscountCategoryAdmin)
admin.site.register(DiscountCombo, DiscountComboAdmin)
admin.site.register(PointGroup, PointGroupAdmin)
| 6,241 | 1,774 |
#!/usr/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to start up a demo Cassandra cluster on Google Compute Engine."""
import os
import time
import sys
# Read in global config variables
mydir = os.path.dirname(os.path.realpath(__file__))
common = mydir + os.path.sep + "common.py"
execfile(common, globals())
# Find a US region with at least two UP zones.
def find_zones():
"""Find a US region with at least two UP zones."""
print("=> Finding suitable region, selecting zones:"),
regions = subprocess.check_output(["gcutil", "--service_version",
API_VERSION, "--format=names", "listregions", "--filter",
"name eq 'us.*'"], stderr=NULL).split('\n')[0:-1]
for region in regions:
zones = subprocess.check_output(["gcutil", "--service_version",
API_VERSION, "--format=names", "listzones", "--filter",
"status eq UP", "--filter", "name eq '%s.*'" % region],
stderr=NULL).split('\n')[0:-1]
if len(zones) > 1:
print(zones)
return zones
raise BE("Error: No suitable US regions found with 2+ zones")
# Create all nodes synchronously
def create_nodes(zones):
"""Create all nodes synchronously."""
print("=> Creating %d '%s' '%s' nodes" % (NODES_PER_ZONE*len(zones),
IMAGE, MACHINE_TYPE))
for zone in zones:
for i in range(NODES_PER_ZONE):
nodename = "%s-%s-%d" % (NODE_PREFIX, zone[-1:], i)
r = subprocess.call(["gcutil",
"--service_version=%s" % API_VERSION,
"addinstance", nodename, "--zone=%s" % zone,
"--machine_type=%s" % MACHINE_TYPE, "--image=%s" % IMAGE,
"--service_account_scopes=%s" % SCOPES,
"--wait_until_running"], stdout=NULL, stderr=NULL)
if r != 0:
raise BE("Error: could not create node %s" % nodename)
print("--> Node %s created" % nodename)
# Customize node_config_tmpl script
def customize_config_script(cluster):
"""Customize the node_config_tmpl script"""
variable_substitutes = {
'@GCE_USERNAME@': GCE_USERNAME,
'@GCS_BUCKET@': GCS_BUCKET,
'@JRE7_INSTALL@': JRE7_INSTALL,
'@JRE7_VERSION@': JRE7_VERSION
}
seed_data, seed_ips = _identify_seeds(cluster)
variable_substitutes['@SEED_IPS@'] = ",".join(seed_ips)
variable_substitutes['@SNITCH_TEXT@'] = _generate_snitch_text(cluster)
script_path = _update_node_script(variable_substitutes)
return seed_data, script_path
# Configure each cluster node
def configure_nodes(cluster, script_path):
"""Configure each cluster node."""
print("=> Uploading and running configure script on nodes:"),
for zone in cluster.keys():
for node in cluster[zone]:
_ = subprocess.call(["gcutil",
"--service_version=%s" % API_VERSION, "push",
"--zone=%s" % zone, node['name'], script_path,
"/tmp/c.sh"], stdout=NULL, stderr=NULL)
done = subprocess.call(["gcutil",
"--service_version=%s" % API_VERSION, "ssh",
"--zone=%s" % zone, node['name'],
"sudo chmod +x /tmp/c.sh && sudo /tmp/c.sh"],
stdout=NULL, stderr=NULL)
if done != 0:
err = "Error: problem uploading/running config script "
err += "on %s" % node['name']
raise BE(err)
print("."),
sys.stdout.flush()
print("done.")
# Perform variable substituions on the node_config_tmpl script
def _update_node_script(variable_substitutes):
"""Update the node_config_tmpl script"""
template = "%s%s%s" % (os.path.dirname(os.path.realpath(__file__)),
os.path.sep,"node_config_tmpl")
script_path = template + ".sh"
template_fh = open(template, "r")
script_fh = open(script_path, "w")
for line in template_fh:
for k, v in variable_substitutes.iteritems():
if line.find(k) > -1:
line = line.replace(k,v)
script_fh.write(line)
template_fh.close()
script_fh.close()
return script_path
# Update the SEED list on each node.
def _identify_seeds(cluster):
"""Update the SEED list on each node."""
# Select first node from each zone as a SEED node.
seed_ips = []
seed_data = []
for z in cluster.keys():
seed_node = cluster[z][0]
seed_ips.append(seed_node['ip'])
seed_data.append(seed_node)
return seed_data, seed_ips
# Generate the text for the PropertyFileSnitch file
def _generate_snitch_text(cluster):
"""Generate the text for the PropertyFileSnitch file"""
i=1
contents = [
"# Auto-generated topology snitch during cluster turn-up", "#",
"# Cassandra node private IP=Datacenter:Rack", "#", ""
]
for z in cluster.keys():
contents.append("# Zone \"%s\" => ZONE%d" % (z, i))
for node in cluster[z]:
contents.append("%s=ZONE%d:RAC1" % (node['ip'], i))
i+=1
contents.append("")
contents.append("# default for unknown hosts")
contents.append("default=ZONE1:RAC1")
contents.append("")
return "\n".join(contents)
# Cleanly start up Cassandra on specified node
def node_start_cassandra(zone, nodename):
"""Cleanly start up Cassandra on specified node"""
status = "notok"
tries = 0
print("--> Attempting to start cassandra on node %s" % nodename),
while status != "ok" and tries < 5:
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone, nodename,
"sudo service cassandra stop"], stdout=NULL, stderr=NULL)
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone, nodename,
"sudo rm -f /var/run/cassandra/cassandra.pid"],
stdout=NULL, stderr=NULL)
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone, nodename,
"sudo rm -rf /var/lib/cassandra/*"], stdout=NULL, stderr=NULL)
_ = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone,nodename,
"sudo service cassandra start"], stdout=NULL, stderr=NULL)
r = subprocess.call(["gcutil", "--service_version=%s" % API_VERSION,
"ssh", "--zone=%s" % zone,nodename,
"sudo ls /var/run/cassandra/cassandra.pid"],
stdout=NULL, stderr=NULL)
if r == 0:
status = "ok"
print("UP")
break
tries += 1
print("."),
if status == "notok":
print("FAILED")
raise BE("Error: cassandra failing to start on node %s" % nodename)
# Bring up cassandra on cluster nodes, SEEDs first
def start_cluster(seed_data, cluster):
"""Bring up cassandra on cluster nodes, SEEDs first"""
# Start SEED nodes first.
print("=> Starting cassandra cluster SEED nodes")
started_nodes = []
for node in seed_data:
node_start_cassandra(node['zone'], node['name'])
started_nodes.append(node['name'])
# Start remaining non-seed nodes.
print("=> Starting cassandra cluster non-SEED nodes")
for z in cluster.keys():
for node in cluster[z]:
if node['name'] not in started_nodes:
node_start_cassandra(z, node['name'])
# Display cluster status by running 'nodetool status' on a node
def verify_cluster(cluster):
"""Display cluster status by running 'nodetool status' on a node"""
keys = cluster.keys()
zone = keys[0]
nodename = cluster[zone][0]['name']
status = subprocess.check_output(["gcutil",
"--service_version=%s" % API_VERSION, "ssh",
"--zone=%s" % zone, nodename, "nodetool status"], stderr=NULL)
print("=> Output from node %s and 'nodetool status'" % nodename)
print(status)
def main():
# Find a suitable US region with more than a single UP zone.
zones = find_zones()
# Make sure we don't exceed MAX_NODES.
if NODES_PER_ZONE * len(zones) > MAX_NODES:
error_string = "Error: MAX_NODES exceeded. Adjust tools/common.py "
error_string += "NODES_PER_ZONE or MAX_NODES."
raise BE(error_string)
# Create the nodes, upload/install JRE, customize/execute config script
create_nodes(zones)
cluster = get_cluster()
seed_data, script_path = customize_config_script(cluster)
configure_nodes(cluster, script_path)
# Bring up the cluster and give it a minute for nodes to join.
start_cluster(seed_data, cluster)
print("=> Cassandra cluster is up and running on all nodes")
print("=> Sleeping 30 seconds to give nodes time to join cluster")
time.sleep(30)
# Run nodetool status on a node and display output.
verify_cluster(cluster)
if __name__ == '__main__':
main()
sys.exit(0)
| 9,666 | 3,035 |
from dataclasses import dataclass
# https://community.finicity.com/s/article/207505363-Multi-Factor-Authentication-MFA
@dataclass
class AnsweredMfaQuestion(object):
text: str
answer: str # Added by the partner for calls to the "MFA Answers" services
_unused_fields: dict # this is for forward compatibility and should be empty
def to_dict(self) -> dict:
return {
'text': self.text,
'answer': self.answer,
}
@staticmethod
def from_dict(data: dict):
data = dict(data) # don't mutate the original
text = data.pop('text')
answer = data.pop('answer')
return AnsweredMfaQuestion(
text=text,
answer=answer,
_unused_fields=data,
)
| 770 | 231 |
from __future__ import absolute_import
import base64
import os
import pathlib
import unittest
import conjur
from OpenSSL import crypto, SSL
CERT_DIR = pathlib.Path('config/https')
SSL_CERT_FILE = 'ca.crt'
CONJUR_CERT_FILE = 'conjur.crt'
CONJUR_KEY_FILE = 'conjur.key'
def generateKey(type, bits):
"""Generates a key using OpenSSL"""
key = crypto.PKey()
key.generate_key(type, bits)
return key
def generateCSR(host_id, key):
"""Generate a Certificate Signing Request"""
pod_name = os.environ['MY_POD_NAME']
namespace = os.environ['TEST_APP_NAMESPACE']
SANURI = f'spiffe://cluster.local/namespace/{namespace}/podname/{pod_name}'
req = crypto.X509Req()
req.get_subject().CN = host_id
req.set_pubkey(key)
formatted_SAN = f'URI:{SANURI}'
req.add_extensions([
crypto.X509Extension(
'subjectAltName'.encode('ascii'), False, formatted_SAN.encode('ascii')
)
])
req.sign(key, "sha1")
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, req)
class TestClientCertInject(unittest.TestCase):
def setUp(self):
with open(os.environ['CONJUR_AUTHN_TOKEN_FILE'], 'r') as content:
encoded_token = base64.b64encode(content.read().replace('\r', '').encode()).decode('utf-8')
config = conjur.Configuration(
host='https://conjur-oss:9443'
)
with open(CERT_DIR.joinpath(SSL_CERT_FILE), 'w') as content:
content.write(os.environ['CONJUR_SSL_CERTIFICATE'])
config.ssl_ca_cert = CERT_DIR.joinpath(SSL_CERT_FILE)
config.username = 'admin'
config.api_key = {'Authorization': 'Token token="{}"'.format(encoded_token)}
self.client = conjur.ApiClient(config)
self.api = conjur.api.AuthenticationApi(self.client)
key = generateKey(crypto.TYPE_RSA, 2048)
self.csr = generateCSR('app-test/*/*', key)
def tearDown(self):
self.client.close()
def test_inject_202(self):
"""Test 202 status response when successfully requesting a client certificate injection
202 - successful request and injection
"""
# optional prefix
# prefix = 'host/conjur/authn-k8s/my-authenticator-id/apps'
response, status, _ = self.api.k8s_inject_client_cert_with_http_info(
'my-authenticator-id',
body=self.csr
)
self.assertEqual(status, 202)
self.assertEqual(None, response)
def test_inject_400(self):
"""Test 400 status response when successfully requesting a cert injection
400 - Bad Request caught by NGINX
"""
with self.assertRaises(conjur.ApiException) as context:
self.api.k8s_inject_client_cert(
'\00',
body=self.csr
)
self.assertEqual(context.exception.status, 400)
def test_inject_401(self):
"""Test 401 status response when requesting a cert injection
401 - unauthorized request. This happens from invalid Conjur auth token,
incorrect service ID, malformed CSR and others
"""
with self.assertRaises(conjur.ApiException) as context:
self.api.k8s_inject_client_cert(
'wrong-service-id',
body=self.csr
)
self.assertEqual(context.exception.status, 401)
def test_inject_404(self):
"""Test 404 status response when requesting a cert injection
404 - Resource not found, malformed service ID
"""
with self.assertRaises(conjur.ApiException) as context:
self.api.k8s_inject_client_cert(
'00.00',
body=self.csr
)
self.assertEqual(context.exception.status, 404)
if __name__ == '__main__':
unittest.main()
| 3,829 | 1,256 |
import os
import pandas as pd
def load_banana():
print(os.curdir)
file_path = 'data/xgb_dataset/banana/banana.csv'
data = pd.read_csv(file_path, delimiter=',',converters={2: lambda x:int(int(x) == 1)}).values
return data[:, :2], data[:, 2] | 256 | 101 |
import asyncio
from asyncio.events import AbstractEventLoop
import json
import logging
import zlib
from aiohttp import ClientSession, ClientWebSocketResponse
from ..cert import Cert
from ..hardcoded import API_URL
from ..net_client import BaseClient
class WebsocketClient(BaseClient):
"""
implements BaseClient with websocket protocol
"""
__slots__ = 'cert', 'compress', 'event_queue', 'NEWEST_SN', 'RAW_GATEWAY'
logger = logging.getLogger('khl.WebsocketClient')
__loop = asyncio.get_event_loop()
def __init__(self, cert: Cert, compress: bool = True):
super().__init__()
self.cert = cert
self.compress = compress
self.event_queue = asyncio.Queue()
self.NEWEST_SN = 0
self.RAW_GATEWAY = ''
async def heartbeater(self, ws_conn: ClientWebSocketResponse):
while True:
await asyncio.sleep(26)
await ws_conn.send_json({'s': 2, 'sn': self.NEWEST_SN})
def setup_event_loop(self, loop: AbstractEventLoop):
self.__loop = loop
self.event_queue = asyncio.Queue(loop=loop)
return
def __raw_2_req(self, data: bytes) -> dict:
"""
convert raw data to human-readable request data
decompress and decrypt data(if configured with compress or encrypt)
:param data: raw data
:return human-readable request data
"""
data = self.compress and zlib.decompress(data) or data
data = json.loads(str(data, encoding='utf-8'))
return data
async def _main(self):
async with ClientSession() as cs:
headers = {
'Authorization': f"Bot {self.cert.token}",
'Content-type': 'application/json'
}
params = {'compress': self.compress and 1 or 0}
async with cs.get(f"{API_URL}/gateway/index",
headers=headers,
params=params) as res:
res_json = await res.json()
if res_json['code'] != 0:
self.logger.error(f'error getting gateway: {res_json}')
return
self.RAW_GATEWAY = res_json['data']['url']
async with cs.ws_connect(self.RAW_GATEWAY) as ws_conn:
asyncio.ensure_future(self.heartbeater(ws_conn),
loop=self.__loop)
async for msg in ws_conn:
try:
req_json = self.__raw_2_req(msg.data)
except Exception as e:
logging.error(e)
return
if req_json['s'] == 0:
self.NEWEST_SN = req_json['sn']
event = req_json['d']
await self.event_queue.put(event)
async def run(self):
await self._main()
| 2,909 | 834 |