id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3386723 | <gh_stars>0
#!/usr/bin/env python
import rospy
import message_filters
import matplotlib.pyplot as pl
import numpy as np
from rospy_tutorials.msg import Floats
from rospy.numpy_msg import numpy_msg
from geometry_msgs.msg import Twist
from math import exp
class Controller:
''' The controller uses the (r)elative interagent distances (r_values) to determine the desired velocity of the Nexus '''
def __init__(self):
''' Initiate self and subscribe to /r_values and /zeta_value topic '''
# controller variables
self.running = np.float32(1)
# self.d = np.float32(0.8*1.025)
self.p21_star = np.array([[0],[0.8]])
self.p31_star = np.array([[0.8],[0.8]])
self.p41_star = np.array([[0.8],[0]])
self.d = np.float32(0.8)
self.r_safe = np.float32(0.5)
self.dd = np.float32(np.sqrt(np.square(self.d)+np.square(self.d)))
self.c = np.float32(0.5)
self.cf = np.float(1.0) # new gain for formation control Nelson
self.calpha = np.float(1.0)
self.czeta = np.float(1)
self.U_old = np.array([0, 0])
self.U_oldd = np.array([0, 0])
self.k = 0 # current iteration of the Euler method
self.h = 0.1 # stepsize
self.zeta1_old = np.zeros((2,1))
self.zeta_values = np.empty(shape=(0,0))
self.zeta1 = np.zeros((2,1))
# motion parameters
self.x_dot = np.float32(0)
self.y_dot = np.float32(0)
self.r_dot = np.float32(0)
self.mu_x = self.x_dot*np.array([0, -1, 0, -1, 0])
self.mut_x = self.x_dot*np.array([0, 1, 0, 1, 0])
self.mu_y = self.y_dot*np.array([-1, 0, 0, 0, 1])
self.mut_y = self.y_dot*np.array([1, 0, 0, 0, -1])
self.mu_r = self.r_dot*np.array([-1, -1, 0, 1, -1])
self.mut_r = self.r_dot*np.array([1, 1, 0, -1, 1])
self.mu = self.mu_x+self.mu_y+self.mu_r
self.mut = self.mut_x+self.mut_y+self.mut_r
# prepare Log arrays
self.E1_log = np.array([])
self.E4_log = np.array([])
self.Un = np.float32([])
self.U_log = np.array([])
self.time = np.float64([])
self.time_log = np.array([])
self.now = np.float64([rospy.get_time()])
self.begin = np.float64([rospy.get_time()])
self.l = 0
# prepare shutdown
rospy.on_shutdown(self.shutdown)
# prepare publisher
self.pub_vel = rospy.Publisher('/n_1/cmd_vel', Twist, queue_size=1)
self.velocity = Twist()
self.pub_zeta = rospy.Publisher('/n_1/zeta_values', numpy_msg(Floats), queue_size=1)
# subscribe to r_values topic
rospy.Subscriber('/n_1/r_values', numpy_msg(Floats), self.controller)
rospy.Subscriber('/n_1/obstacles', numpy_msg(Floats), self.obstacleAvoidance)
# subscribe to zeta_values topic of each controller
zeta2_sub = message_filters.Subscriber('/n_2/zeta_values', numpy_msg(Floats))
zeta3_sub = message_filters.Subscriber('/n_3/zeta_values', numpy_msg(Floats))
zeta4_sub = message_filters.Subscriber('/n_4/zeta_values', numpy_msg(Floats))
# obs_values_sub = message_filters.Subscriber('/n_1/obstacles', numpy_msg(Floats))
ts1 = message_filters.TimeSynchronizer([zeta2_sub, zeta3_sub, zeta4_sub], 10)
ts1.registerCallback(self.orderZeta)
def controller(self, data):
''' Calculate U based on r_values and save error velocity in log arrays '''
if self.running < 10:
# input for controller
r_values= data.data
# Nelson's formation shape control
p41 = np.array([[r_values[1]], [r_values[2]]])
p31 = np.array([[r_values[4]], [r_values[5]]])
p21 = np.array([[r_values[7]], [r_values[8]]])
# Error
Ed = np.array([[r_values[0]-self.d], \
[r_values[6]-self.d]])
#Nelsons motion to goal control
#pcen = ((4**(-1)-cI*gamma))
#Nelsons obstacle avoidance
# zeta1 = self.obstacleAvoidance()
# Formation motion control
Ab = np.array([[self.mu[0], 0, self.mu[3], 0], \
[0, self.mu[0], 0, self.mu[3]]])
z = np.array([r_values[7], r_values[8], r_values[1], r_values[2]])
#z = [ edge 1 , edge 4]
print 'self.zeta1', self.zeta1
if self.zeta1[0] != 0 and self.zeta1[1] != 0:
print 'zeta1', self.zeta1
U0 = self.zeta1
else:
print 'zeta1 set to zero'
U0 = np.zeros((2,1))
# Control law
Uf = self.cf * (p21-self.p21_star+p31-self.p31_star+p41-self.p41_star)
# U0 = zeta1
# Ug = self.cp * (pcen
U = Uf - U0
# U = self.c*BbDz.dot(Dzt).dot(Ed) + + (Ab.dot(z)).reshape((2, 1))
# print "U = ", -U
# Saturation
v_max = 0.3
v_min = 0.02
for i in range(len(U)):
if U[i] > v_max:
U[i] = v_max
elif U[i] < -v_max:
U[i] = -v_max
elif -v_min < U[i]+self.U_old[i]+self.U_oldd[i] < v_min : # preventing shaking
U[i] = 0
# Set old U values in order to prevent shaking
self.U_oldd = self.U_old
self.U_old = U
# Append error and velocity in Log arrays
self.E1_log = np.append(self.E1_log, Ed[1])
self.E4_log = np.append(self.E4_log, Ed[0])
self.Un = np.float32([np.sqrt(np.square(U[0])+np.square(U[1]))])
self.U_log = np.append(self.U_log, self.Un)
# Save current time in time log array
if self.l < 1:
self.begin = np.float64([rospy.get_time()])
self.l = 10
self.now = np.float64([rospy.get_time()])
self.time = np.float64([self.now-self.begin])
self.time_log = np.append(self.time_log, self.time)
# publish
self.publish_control_inputs(U[0], U[1])
self.publish_zeta(self.zeta1)
elif 10 < self.running < 1000:
self.shutdown()
def publish_control_inputs(self,x,y):
''' Publish the control inputs to command velocities
NOTE: somehow the y direction has been reversed from the indigo-version from Johan and
the regular kinetic version. Hence the minus sign.
'''
self.velocity.linear.x = x
self.velocity.linear.y = y
self.pub_vel.publish(self.velocity)
# def findEta(self,zeta):
# zetadot = np.array([[0],[0]])
# norm_po11 = np.linalg.norm(po11)
# alpha1_1 = (po11/(norm_po11))*((norm_po11)**(-1))
# zetadot[0] = self.czeta*((zeta2-zeta1)+(zeta3-zeta1)+(zeta4-zeta1))+self.calpha*alpha1_1
# return zetadot1
def publish_zeta(self, zeta1):
'''communicates the state variable zeta to the neighbouring agents'''
self.zeta1 = zeta1
print 'zeta1', zeta1
self.pub_zeta.publish(self.zeta1)
def obstacleAvoidance(self, data):
obstacles = data.data
print 'obstacles', obstacles
if self.zeta_values.any():
print 'self.z_values!!!!', self.zeta_values
zeta2 = self.zeta_values[0]
zeta3 = self.zeta_values[1]
zeta4 = self.zeta_values[2]
else:
zeta2 = zeta3 = zeta4 = np.zeros((2,1))
if obstacles[0] <= 0.8:
p01_obs01 = np.array([[obstacles[1]], [obstacles[2]]])
norm_p01_obs01 = obstacles[0]
print 'norm p01 obs01', norm_p01_obs01
print 'zeta234', zeta2, zeta3, zeta4
alpha1_1 = np.divide(p01_obs01, np.square(norm_p01_obs01)) - self.r_safe
print 'alpha1_1', alpha1_1
print 'zeta', zeta2
zeta1dot = self.czeta*((zeta2-self.zeta1_old)+(zeta3-self.zeta1_old)+(zeta4-self.zeta1_old))+self.calpha*alpha1_1
print 'zzeta1dot', zeta1dot
print 'zzetaold', self.zeta1_old
zeta1 = self.zeta1_old + self.h*zeta1dot
self.zeta1_old = zeta1
self.zeta1 = np.array(zeta1, dtype=np.float32)
return self.zeta1
else:
alpha1_1 = 0.0
self.zeta1 = np.zeros((2,1), dtype=np.float32)
return self.zeta1
def orderZeta(self, data):
if data:
self.zeta_values = data.data
print 'zeta values', self.zeta_values
def shutdown(self):
''' Stop the robot when shutting down the controller_1 node '''
rospy.loginfo("Stopping Nexus_1...")
self.running = np.float32(10000)
self.velocity = Twist()
self.pub_vel.publish(self.velocity)
# np.save('/home/s2604833/Documents/Master Thesis/experiments/experiment_x/E1_log_nx1', self.E1_log)
# np.save('/home/s2604833/Documents/Master Thesis/experiments/experiment_x/E4_log_nx1', self.E4_log)
# np.save('/home/s2604833/Documents/Master Thesis/experiments/experiment_x/U_log_nx1', self.U_log)
# np.save('/home/s2604833/Documents/Master Thesis/experiments/experiment_x/time_log_nx1', self.time_log)
rospy.sleep(1)
pl.close("all")
pl.figure(0)
pl.title("Inter-agent distance error measured by Nexus 1")
pl.plot(self.time_log, self.E1_log, label="e1_nx1", color='b')
pl.plot(self.time_log, self.E4_log, label="e4_nx1", color='y')
pl.xlabel("Time [s]")
pl.ylabel("Error [m]")
pl.grid()
pl.legend()
pl.figure(1)
pl.title("Input velocity Nexus 1 ")
pl.plot(self.time_log, self.U_log, label="pdot_nx1", color='b')
pl.xlabel("Time [s]")
pl.ylabel("Velocity [m/s]")
pl.grid()
pl.legend()
pl.pause(0)
# time synchronizer
if __name__ == '__main__':
try:
rospy.init_node('controller_1', anonymous=False)
Controller()
rospy.spin()
except:
rospy.loginfo("Controller node_1 terminated.")
| StarcoderdataPython |
1942018 | <gh_stars>10-100
#!/usr/bin/env python
# coding: utf-8
import random
from argparse import ArgumentParser
from time import time
from uuid import uuid1 as uuid
from ams import Waypoint, Arrow, Route, Schedule, Target
from ams.nodes import User, SimTaxiUser
parser = ArgumentParser()
parser.add_argument("-H", "--host", type=str, default="localhost", help="host")
parser.add_argument("-P", "--port", type=int, default=1883, help="port")
parser.add_argument("-ID", "--id", type=str, default=None, help="node id")
parser.add_argument("-N", "--name", type=str, default="tu1", help="name")
parser.add_argument("-W", "--path_waypoint_json", type=str,
default="../../res/waypoint.json", help="waypoint.json path")
parser.add_argument("-A", "--path_arrow_json", type=str,
default="../../res/arrow.json", help="arrow.json path")
parser.add_argument("-WID", "--start_waypoint_id", type=str,
default=None, help="start waypoint id")
args = parser.parse_args()
if __name__ == '__main__':
waypoint = Waypoint()
waypoint.load(args.path_waypoint_json)
arrow = Arrow(waypoint)
arrow.load(args.path_arrow_json)
stop_waypoint_ids = [
"8910", "8911", "8912", "8913", "8914", "8915", "8916", "8917", "8918", "8919", "8920", "8921", "8922", "8923",
"8924", "8925", "8926",
"9362", "9363", "9364", "9365", "9366", "9367", "9368", "9369", "9370", "9371", "9372", "9373", "9374", "9375",
"9376", "9377",
"8883", "8884", "8885", "8886", "8887", "8888", "8889", "8890", "8891", "8892", "8893", "8894", "8895", "8896",
"8897",
"9392", "9393", "9394", "9395", "9396", "9397", "9398", "9399", "9400", "9401", "9402", "9403", "9404",
"10350", "10351", "10352", "10353", "10354", "10355", "10356", "10357", "10358", "10359", "10360", "10361",
"10362", "10363", "10364", "10365", "10366", "10367", "10368", "10369", "10370", "10371", "10372", "10373",
"10374",
"9697", "9698", "9699", "9700", "9701", "9702", "9703", "9704", "9705", "9706", "9707", "9708",
"8936", "8937", "8938", "8939", "8940", "8941", "8942", "8943", "8944", "8945", "8946", "8947", "8948", "8949",
"8950", "8951", "8952", "8953", "8954", "8955", "8956", "8957", "8958", "8959", "8960", "8961", "8962", "8963",
"8964", "8965", "8966", "8967", "8968",
]
start_waypoint_id = args.start_waypoint_id
if start_waypoint_id is None:
start_waypoint_id = random.choice(stop_waypoint_ids)
start_arrow_code = arrow.get_arrow_codes_from_waypoint_id(start_waypoint_id)[0]
start_time = time() - 5
stop_waypoint_ids.remove(start_waypoint_id)
goal_waypoint_id = random.choice(stop_waypoint_ids)
goal_arrow_code = arrow.get_arrow_codes_from_waypoint_id(goal_waypoint_id)[0]
taxi_user = SimTaxiUser(
_id=args.id if args.id is not None else str(uuid()),
name=args.name,
dt=3.0
)
trip_schedule = Schedule.new_schedule(
[Target.new_node_target(taxi_user)],
User.CONST.EVENT.TRIP, start_time, start_time+9999,
Route.new_route(start_waypoint_id, goal_waypoint_id, [start_arrow_code, goal_arrow_code])
)
taxi_user.set_trip_schedules([trip_schedule])
taxi_user.start(host=args.host, port=args.port)
| StarcoderdataPython |
3518675 | <reponame>jlebunetel/agile
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
class SiteCustomization(models.Model):
# history = HistoricalRecords() # done in translation.py
site = models.OneToOneField(
Site,
on_delete=models.CASCADE, # if Site is deleted, SiteCustomization will also be deleted!
primary_key=True,
verbose_name=_("site"),
)
is_open_for_signup = models.BooleanField(
default=True, verbose_name=_("is open for signup")
)
tagline = models.CharField( # [i18n]
blank=True,
max_length=settings.CORE_SITECUSTOMIZATION_TAGLINE_LENGHT,
verbose_name=_("tagline"),
help_text=_("A few words to describe this very website."),
default="A few words to describe this very website.",
)
description = models.TextField( # [i18n]
blank=True,
max_length=settings.CORE_SITECUSTOMIZATION_DESCRIPTION_LENGHT,
verbose_name=_("description"),
help_text=_("A short text to describe this very website."),
default=_("A short text to describe this very website."),
)
class Meta:
verbose_name = _("site customization")
verbose_name_plural = _("site customizations")
ordering = ["site"]
def __str__(self):
return self.site.name if self.site.name else str(_("unknown"))
def save(self, *args, **kwargs):
super(SiteCustomization, self).save(*args, **kwargs)
# Clear cached content
Site.objects.clear_cache()
@property
def github_repo_name(self):
return settings.GITHUB_REPO_NAME
@property
def github_repo_url(self):
return settings.GITHUB_REPO_URL
@property
def github_team_name(self):
return settings.GITHUB_TEAM_NAME
@property
def github_team_url(self):
return settings.GITHUB_TEAM_URL
@property
def github_contributors_url(self):
return settings.GITHUB_CONTRIB_URL
@property
def license_name(self):
return settings.LICENSE_NAME
@property
def license_url(self):
return settings.LICENSE_URL
@property
def fontawesome_site_icon(self):
return settings.FONTAWESOME_SITE_ICON
| StarcoderdataPython |
11358554 | <gh_stars>1-10
from setuptools import setup, convert_path
main_ns = {}
with open(convert_path("pythonhere/version_here.py")) as ver_file:
exec(ver_file.read(), main_ns)
version = main_ns["__version__"]
with open(convert_path("README.rst")) as readme_file:
long_description = readme_file.read()
setup(
name="pythonhere",
version=main_ns["__version__"],
packages=[
"pythonhere",
"pythonhere.magic_here",
"pythonhere.ui_here",
],
description="Here is the Kivy based app to run code from the Jupyter magic %there",
long_description=long_description,
long_description_content_type="text/x-rst",
author="b3b",
author_email="<EMAIL>",
install_requires=[
"kivy>=2.0.0",
"herethere>=0.1.0,<0.2.0",
"ifaddr",
"ipython",
"ipywidgets",
"nest_asyncio",
"Pillow",
],
extras_require={
"dev": [
"black",
"codecov",
"docutils",
"flake8",
"jupytext",
"pylint",
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-mock",
],
"docker": [
"jupytext==1.7.1"
]
},
url="https://github.com/b3b/ipython-pythonhere",
project_urls={
'Changelog': 'https://github.com/b3b/pythonhere/blob/master/CHANGELOG.rst',
},
# https://pypi.org/classifiers/
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="android ipython jupyter magic kivy",
license="MIT",
)
| StarcoderdataPython |
1953690 | <gh_stars>1-10
import pytest
from jina.parser import set_gateway_parser, set_pea_parser
from jina.peapods.pod import GatewayPod
if False:
from jina.peapods.remote import PeaSpawnHelper
@pytest.mark.skip
def test_remote_not_allowed():
f_args = set_gateway_parser().parse_args([])
p_args = set_pea_parser().parse_args(['--host', 'localhost', '--port-expose', str(f_args.port_expose)])
with GatewayPod(f_args):
PeaSpawnHelper(p_args).start()
@pytest.mark.skip
@pytest.mark.parametrize('args', [['--allow-spawn'], []])
def test_cont_gateway(args):
parsed_args = set_gateway_parser().parse_args(args)
with GatewayPod(parsed_args):
pass
| StarcoderdataPython |
6636018 | import csv
import sqlite3
conn = sqlite3.connect('/home/iwk/src/worlddata-python-example/data/import/world-gdp.db')
c = conn.cursor()
with open("/home/iwk/src/worlddata-python-example/data/import/data.countries.csv", 'r', encoding="utf-8") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
data = row["CountryCode"], row["CountryName"], row["CountryCode2"], row["UNCode"]
print(data)
conn.execute("INSERT INTO countries (CountryCode, CountryName, CountryCode2, UNCode) VALUES (?,?, ?, ?)", data)
conn.commit()
conn.close()
| StarcoderdataPython |
4980056 | import treesimi as ts
from treesimi.convert import adjac_to_nested_recur
def test1():
adjac = [(1, 2), (2, 0), (3, 2), (4, 3)]
nested = ts.adjac_to_nested(adjac)
assert nested == [[2, 1, 8, 0], [1, 2, 3, 1], [3, 4, 7, 1], [4, 5, 6, 2]]
subtree = ts.get_subtree(nested, 3)
assert subtree == [[3, 4, 7, 1], [4, 5, 6, 2]]
attrs = [(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D')]
nested2 = ts.set_attr(nested, attrs)
assert nested2 == [
[2, 1, 8, 0, 'B'], [1, 2, 3, 1, 'A'],
[3, 4, 7, 1, 'C'], [4, 5, 6, 2, 'D']]
subtree = ts.get_subtree(nested2, 3)
assert subtree == [[3, 4, 7, 1, 'C'], [4, 5, 6, 2, 'D']]
def test2():
adjac = [(1, 0), (2, 1), (3, 1), (4, 2)]
_, nested = adjac_to_nested_recur(
adjac, nested=[], parent_id=1, lft=1, depth=0)
assert [4, 3, 4, 2] in nested
assert [2, 2, 5, 1] in nested
assert [3, 6, 7, 1] in nested
assert [1, 1, 8, 0] in nested
assert len(nested) == 4
def test3():
adjac = [(1, 0), (2, 1), (3, 1), (4, 2)]
nested = ts.adjac_to_nested(adjac)
assert [1, 1, 8, 0] in nested
assert [2, 2, 5, 1] in nested
assert [4, 3, 4, 2] in nested
assert [3, 6, 7, 1] in nested
assert len(nested) == 4
def test4():
nested = [[i, None, None, None] for i in range(1, 5)]
attrs = [(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D')]
nested2 = ts.set_attr(nested, attrs)
assert [1, None, None, None, 'A'] in nested2
assert [2, None, None, None, 'B'] in nested2
assert [3, None, None, None, 'C'] in nested2
assert [4, None, None, None, 'D'] in nested2
assert len(nested2) == 4
def test5():
adjac = [(1, 2, 'A'), (2, 0, 'B'), (3, 2, 'C'), (4, 3, 'D')]
nested = ts.adjac_to_nested_with_attr(adjac)
assert [2, 1, 8, 0, 'B'] in nested
assert [1, 2, 3, 1, 'A'] in nested
assert [3, 4, 7, 1, 'C'] in nested
assert [4, 5, 6, 2, 'D'] in nested
assert len(nested) == 4
def test6():
adjac = [(1, 2, 'A'), (2, 0, 'B'),
((2, "-", 3), None, "mwe"),
(3, 2, 'C'), (4, 3, 'D')]
nested = ts.adjac_to_nested_with_attr(adjac)
assert [2, 1, 8, 0, 'B'] in nested
assert [1, 2, 3, 1, 'A'] in nested
assert [3, 4, 7, 1, 'C'] in nested
assert [4, 5, 6, 2, 'D'] in nested
assert len(nested) == 4
| StarcoderdataPython |
9626781 | <reponame>Octoberr/swm0920<gh_stars>1-10
"""
爬取facebook的内容
"""
import scrapy
import scrapy_splash
from scrapy import http
from scrapy.selector import Selector
from scrapy_splash import SplashRequest
import json
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36",
"cookie":"sb=oHADW-ZcKeEjonBqTB8aej77; datr=oHADW5gHZ9Z03CBY-QhlJXq5; locale=zh_CN; c_user=100026301698530; xs=11%3AMlHEwT9fLhrHrQ%3A2%3A1526952876%3A-1%3A-1; pl=n; ; fr=0ZI8S8W5dSa7utuLH.AWXzJTM5QlSvb195FCkjALtPzzA.Ba77es.qj.AAA.0.0.BbBQ4D.AWW4O79X; act=1527058935570%2F86; presence=EDvF3EtimeF1527058937EuserFA21B26301698530A2EstateFDt3F_5bDiFA2user_3a1B26063503367A2ErF1EoF4EfF10CAcDiFA2user_3a1B26B242731A2ErF1EoF6EfF9C_5dEutc3F1527058431371G527058937646CEchFDp_5f1B26301698530F137CC; wd=594x769"
}
class FACEBOOK(scrapy.Spider):
name = 'facebook'
url = 'https://www.facebook.com/CWTheFlash/'
# anotest = 'http://www.dytt8.net/'
def start_requests(self):
yield SplashRequest(url=self.url, headers=headers, callback=self.parse_page,
args={
'wait': 1
}
)
# yield http.Request(url=self.anotest, callback=self.parse_page)
def parse_page(self, response):
print("resbody:", response.body)
html = Selector(response)
data = html.xpath('//*[@id="u_hl_7"]/div[3]/div[1]/div[2]/div[2]/div/p').extract()
print('data:', data)
# div = html.xpath('//*[@id="u_0_l"]')
# div1 = div.xpath('//*[@id="globalContainer"]')
# div2 = div1.xpath('//*[@id="u_0_1l"]/div/div[3]/div[3]').extract()
# print('div1:', div2)
# print('length', len(div2))
# data = response.body.decode("utf-8")
# print("data:", data)
# html =data['items_html']
# print(html)
# respon = response.body.decode("utf-8")
# text = response.selector.xpath('//*[@id="u_0_1l"]/div').extract()
# print("data:", text)
| StarcoderdataPython |
287578 | per_cent = {'ТКБ': 5.6, 'СКБ': 5.9, 'ВТБ': 4.28, 'СБЕР': 4.0}
for k, v in per_cent.items():
per_cent[k] = round(v * m) # умножаем каждое значение из словаря на вводимое число
print("Сумма, которую вы можете заработать: ", list(per_cent.values()))
my_max_val = 0
for k,v in per_cent.items():
if v > my_max_val:
my_max_val=v
my_max_key=k
print("Самый выгодный депозит в банке: ", my_max_key)
print("Сумма самого выгодного депозита: ", my_max_val)
| StarcoderdataPython |
6569123 | # https://gist.github.com/jongwony/7c9af218a8b93555124194b660add97d
"""
javascript tagged template literals
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Template_literals
"""
import re
def comment(string):
return string.replace('{{', '{').replace('}}', '}')
def ttl(func, literal):
regex = re.compile(r'(?<!{){(?!{)(.*?)(?<!\})}(?!\})')
m = regex.search(literal)
l = []
s = []
if m and m.start() > 0:
s.append(comment(literal[0:m.start()]))
while m:
l.append(eval(m.group(1)))
start, end = m.span()
m = regex.search(literal, pos=start + 1)
tail_str = literal[end:m.start() if m else None]
if tail_str:
s.append(comment(tail_str))
return func(s, *l)
# Example
person = 'Mike'
age = 28
def my_tag(s, person_exp, age_exp):
s0, s1 = s # 'That ', ' is a '
age_str = 'centenarian' if age_exp > 99 else 'youngster'
return f'{s0}{person_exp}{s1}{age_str}'
output1 = ttl(my_tag, 'That {person} is a {age}')
output2 = ttl(my_tag, 'That {{person}} {person} is a ${age}')
print(output1)
print(output2)
a = [1,2,3,4]
def bb(x,y,z):
print(x, y, z)
bb(*a) | StarcoderdataPython |
3575173 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import struct
import time
from importlib import import_module
import redis
from django.conf import settings
from django.contrib.sessions.backends.base import CreateError, SessionBase
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
logger = logging.getLogger('redisession')
conf = {
'SERVER': {},
'USE_HASH': True,
'KEY_GENERATOR': lambda x: x.decode('hex'),
'HASH_KEY_GENERATOR': lambda x: x[:4].decode('hex'),
'HASH_KEYS_CHECK_FOR_EXPIRY':
lambda r: (reduce(
lambda p, y: p.randomkey(), xrange(100), r.pipeline()).execute()),
'COMPRESS_LIB': 'snappy',
'COMPRESS_MIN_LENGTH': 400,
'LOG_KEY_ERROR': False
}
conf.update(getattr(settings, 'REDIS_SESSION_CONFIG', {}))
if isinstance(conf['SERVER'], dict):
class GetRedis(object):
def __call__(self, conf):
if not hasattr(self, '_redis'):
self._redis = redis.Redis(**conf)
return self._redis
get_redis = GetRedis()
else:
from redisession.helper import get_redis
if conf['COMPRESS_LIB']:
compress_lib = import_module(conf['COMPRESS_LIB'])
FLAG_COMPRESSED = 1
class SessionStore(SessionBase):
def __init__(self, session_key=None):
self._redis = get_redis(conf['SERVER'])
super(SessionStore, self).__init__(session_key)
if not hasattr(self, 'serializer'):
self.serializer = lambda: pickle
def encode(self, session_dict):
data = self.serializer().dumps(session_dict)
flag = 0
if conf['COMPRESS_LIB'] and len(data) >= conf['COMPRESS_MIN_LENGTH']:
compressed = compress_lib.compress(data)
if len(compressed) < len(data):
flag |= FLAG_COMPRESSED
data = compressed
return chr(flag) + data
def decode(self, session_data):
flag, data = ord(session_data[:1]), session_data[1:]
if flag & FLAG_COMPRESSED:
if conf['COMPRESS_LIB']:
return self.serializer().loads(compress_lib.decompress(data))
raise ValueError('redisession: found compressed data without '
'COMPRESS_LIB specified.')
return self.serializer().loads(data)
def create(self):
for i in xrange(10000):
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
raise RuntimeError('Unable to create a new session key.')
if conf['USE_HASH']:
def _make_key(self, session_key):
try:
return (
conf['HASH_KEY_GENERATOR'](session_key),
conf['KEY_GENERATOR'](session_key)
)
except Exception:
if conf['LOG_KEY_ERROR']:
logger.warning(
'misconfigured key-generator or bad key "{}"'.format(
session_key
)
)
def save(self, must_create=False):
if must_create:
func = self._redis.hsetnx
else:
func = self._redis.hset
session_data = self.encode(self._get_session(no_load=must_create))
expire_date = struct.pack(
'>I', int(time.time() + self.get_expiry_age()))
key = self._make_key(self._get_or_create_session_key())
if key is None:
raise CreateError
result = func(*key, value=expire_date + session_data)
if must_create and not result:
raise CreateError
def load(self):
key = self._make_key(self._get_or_create_session_key())
if key is not None:
session_data = self._redis.hget(*key)
if session_data is not None:
expire_date = struct.unpack('>I', session_data[:4])[0]
if expire_date > time.time():
return self.decode(session_data[4:])
self.create()
return {}
def exists(self, session_key):
key = self._make_key(session_key)
if key is not None:
return self._redis.hexists(*key)
return False
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
key = self._make_key(session_key)
if key is not None:
self._redis.hdel(*key)
else: # not conf['USE_HASH']
def _make_key(self, session_key):
try:
return conf['KEY_GENERATOR'](session_key)
except Exception:
if conf['LOG_KEY_ERROR']:
logger.warning(
'misconfigured key-generator or bad key "{}"'.format(
session_key
)
)
def save(self, must_create=False):
pipe = self._redis.pipeline()
if must_create:
pipe = pipe.setnx
else:
pipe = pipe.set
session_data = self.encode(self._get_session(no_load=must_create))
key = self._make_key(self._get_or_create_session_key())
if key is None:
raise CreateError
result = pipe(key, session_data).expire(
key, self.get_expiry_age()
).execute()
# for Python 2.4 (Django 1.3)
if must_create and not (result[0] and result[1]):
raise CreateError
def load(self):
key = self._make_key(self._get_or_create_session_key())
if key is not None:
session_data = self._redis.get(key)
if session_data is not None:
return self.decode(session_data)
self.create()
return {}
def exists(self, session_key):
key = self._make_key(session_key)
if key is not None:
return key in self._redis
return False
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
key = self._make_key(session_key)
if key is not None:
self._redis.delete(key)
| StarcoderdataPython |
8021386 | import pandas as pd
import numpy as np
if __name__ == "__main__":
for subset_method in ["All", "Union", "intersection"]:
for correlation_type in ["Pearson", "Spearman"]:
for mean_or_std in ["", "_sd"]:
correlations = pd.read_csv(
f"../../data/page6_LinearXWASCorrelations/average_correlations/Correlations{mean_or_std}_comparisons_{subset_method}_{correlation_type}.csv",
index_col=[0],
)
correlations.index = pd.MultiIndex.from_tuples(
correlations.index.str.split("_").tolist(), names=("organ_2", "organ_1")
)
correlations.swaplevel().to_excel(
f"../../data/page6_LinearXWASCorrelations/average_correlations/Correlations{mean_or_std}_comparisons_{subset_method}_{correlation_type}.xlsx"
)
| StarcoderdataPython |
4935210 | # Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__ == "__main__":
inp = input().split()
n = int(inp[0])
m = int(inp[1])
for i in range(n // 2):
print((".|."*((i+1)*2-1)).center(m, "-"))
print("WELCOME".center(m, "-"))
for i in range(n // 2):
print((".|."*((n//2-i)*2-1)).center(m, "-"))
| StarcoderdataPython |
9750508 | # -*- coding: utf-8 -*-
"""Console script for assignment3."""
import sys
sys.path.append('.')
import click
from assignment3 import utils
from assignment3 import LEDTester
click.disable_unicode_literals_warning = True
@click.command()
@click.option("--input", default=None, help="input URI (file or URL)")
def main(input=None):
N, instructions = utils.parseFile(input)
ledTester = LEDTester.LEDTester(N)
for i in instructions:
ledTester.apply(i)
print('#occupied: ', ledTester.count())
if __name__ == "__main__":
import sys
sys.exit(main())
| StarcoderdataPython |
366913 | <reponame>BertVanAcker/steam-jack<gh_stars>0
#imports
import time
from steam_jack.DeviceLibrary import Emlid_navio
from steam_jack.Communicator.Communicator_Constants import *
#instantiate the device
#device = Emlid_navio.Emlid_navio(UDP_IP='192.168.0.150',UDP_PORT=6789,DEBUG=False) #emulator Windows
device = Emlid_navio.Emlid_navio(UDP_IP='127.0.0.1',UDP_PORT=6789,UDP_IP_RESPONSE='127.0.0.1',DEBUG=False) #emulator mac
#change the color of the build-in LED
device.buildinLED(NAVIO_LED_Green)
#blinking the build-in LED
device.blinkLED(delay=1) #in seconds
time.sleep(10)
device.stopBlinkLED()
device.deactivate() | StarcoderdataPython |
269803 | <filename>step_motor.py<gh_stars>1-10
#!/usr/bin/env python
#
# Hardware 28BYJ-48 Stepper
# Gear Reduction Ratio: 1/64
# Step Torque Angle: 5.625 degrees /64
# 360/5.625 = 64
import sys
import os
import time
import LMK.GPIO as GPIO
# The stepper motor can be driven in different ways
# See http://en.wikipedia.org/wiki/Stepper_motor
# Highest torque speed
out1=[]
out2=[]
out3=[]
out4=[]
# Full step drive (Maximum Torque)
Aout1=[1,1,0,0]
Aout2=[0,1,1,0]
Aout3=[0,0,1,1]
Aout4=[1,0,0,1]
# Wave drive (increase angular resolution)
Bout1=[1,0,0,0]
Bout2=[0,1,0,0]
Bout3=[0,0,1,0]
Bout4=[0,0,0,1]
# Half step drive ( Maximum angle minimum torque)
Cout1=[1,1,0,0,0,0,0,1]
Cout2=[0,1,1,1,0,0,0,0]
Cout3=[0,0,0,1,1,1,0,0]
Cout4=[0,0,0,0,0,1,1,1]
class Motor:
CLOCKWISE = 0
ANTICLOCKWISE = 1
GEARING = 64
STEPS = 8
REVOLUTION = GEARING * STEPS
NORMAL = 0.0025
SLOW = NORMAL * 2
running = False
position = 0
halt = False
def __init__(self, pin1, pin2, pin3, pin4):
self.pin1 = pin1
self.pin2 = pin2
self.pin3 = pin3
self.pin4 = pin4
self.speed = self.NORMAL
self.moving = False
self.setFullStepDrive()
return
# Initialise GPIO pins for this motor
def init(self):
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin1,GPIO.OUT)
GPIO.setup(self.pin2,GPIO.OUT)
GPIO.setup(self.pin3,GPIO.OUT)
GPIO.setup(self.pin4,GPIO.OUT)
self.zeroPosition()
return
# Reset (stop) motor
def reset(self):
GPIO.output(self.pin1,GPIO.LOW)
GPIO.output(self.pin2,GPIO.LOW)
GPIO.output(self.pin3,GPIO.LOW)
GPIO.output(self.pin4,GPIO.LOW)
self.moving = False
return
# Turn the motor
def turn(self,steps,direction):
global CLOCKWISE
self.stop()
self.moving = True
self.steps = steps
mrange = len(out1)
while self.steps > 0:
if direction == self.CLOCKWISE:
for pin in range(mrange):
GPIO.output(self.pin1,out1[pin])
GPIO.output(self.pin2,out2[pin])
GPIO.output(self.pin3,out3[pin])
GPIO.output(self.pin4,out4[pin])
time.sleep(self.speed)
self.incrementPosition()
else:
for pin in reversed(range(mrange)):
GPIO.output(self.pin1,out1[pin])
GPIO.output(self.pin2,out2[pin])
GPIO.output(self.pin3,out3[pin])
GPIO.output(self.pin4,out4[pin])
time.sleep(self.speed)
self.decrementPosition()
self.steps -= 1
if self.halt:
break
self.stop()
return
def interrupt(self):
self.halt = True
return
# Increment current position
def incrementPosition(self):
self.position += 1
if self.position >= self.REVOLUTION:
self.position -= self.REVOLUTION
return self.position
# Increment current position
def decrementPosition(self):
self.position -= 1
if self.position < 0:
self.position += self.REVOLUTION
return self.position
# Increment current position
def zeroPosition(self):
self.position = 0
return self.position
# Is the motor running (Future use)
def isRunning(self):
return self.running
# Goto a specific position
def goto(self, position):
newpos = position
while newpos > self.REVOLUTION:
newpos -= self.REVOLUTION
delta = newpos - self.position
# Figure which direction to turn
if delta > self.REVOLUTION/2:
delta = self.REVOLUTION/2 - delta
elif delta < (0-self.REVOLUTION/2):
delta = self.REVOLUTION + delta
# Turn the most the efficient direction
if delta > 0:
self.turn(delta,self.CLOCKWISE)
elif delta < 0:
delta = 0 - delta
self.turn(delta,self.ANTICLOCKWISE)
self.position = newpos
if self.position == self.REVOLUTION:
self.position = 0
return self.position
# Stop the motor (calls reset)
def stop(self):
self.reset()
return
# Lock the motor (also keeps motor warm)
def lock(self):
GPIO.output(self.pin1,GPIO.HIGH)
GPIO.output(self.pin2,GPIO.LOW)
GPIO.output(self.pin3,GPIO.LOW)
GPIO.output(self.pin4,GPIO.HIGH)
self.moving = False
return
# Set Full Step Drive
def setFullStepDrive(self):
global out1,out2,out3,out4
global Aout1,Aout2,Aout3,Aout4
out1 = Aout1
out2 = Aout2
out3 = Aout3
out4 = Aout4
self.speed = self.NORMAL
return
# Set Half Step Drive
def setHalfStepDrive(self):
global out1,out2,out3,out4
global Bout1,Bout2,Bout3,Bout4
out1 = Bout1
out2 = Bout2
out3 = Bout3
out4 = Bout4
self.speed = self.NORMAL
return
# Set Wave Drive
def setWaveDrive(self):
global out1,out2,out3,out4
global Cout1,Cout2,Cout3,Cout4
out1 = Cout1
out2 = Cout2
out3 = Cout3
out4 = Cout4
self.speed = self.NORMAL/3
return
# Set speed of motor
def setSpeed(self,speed):
self.speed = speed
return
# Get motor position
def getPosition(self):
return self.position
# End of Motor class
| StarcoderdataPython |
3417468 | <gh_stars>0
import os
from collections import defaultdict
from copy import deepcopy
from functools import partial
from pathlib import Path
from typing import Type
import numpy as np
from qtpy.QtCore import QByteArray, Qt, Signal, Slot
from qtpy.QtGui import QCloseEvent, QGuiApplication, QIcon, QKeySequence, QTextOption
from qtpy.QtWidgets import (
QAbstractSpinBox,
QCheckBox,
QDoubleSpinBox,
QFileDialog,
QFormLayout,
QGridLayout,
QHBoxLayout,
QLabel,
QMessageBox,
QProgressBar,
QPushButton,
QSizePolicy,
QSpinBox,
QTabWidget,
QTextEdit,
QVBoxLayout,
QWidget,
)
import PartSegData
from PartSegCore import UNIT_SCALE, Units, state_store
from PartSegCore.io_utils import HistoryElement, HistoryProblem, WrongFileTypeException
from PartSegCore.mask import io_functions
from PartSegCore.mask.algorithm_description import mask_algorithm_dict
from PartSegCore.mask.history_utils import create_history_element_from_segmentation_tuple
from PartSegCore.mask.io_functions import (
LoadSegmentation,
LoadSegmentationParameters,
SaveSegmentation,
SegmentationTuple,
)
from PartSegCore.mask_create import calculate_mask_from_project
from PartSegCore.segmentation.algorithm_base import SegmentationResult
from PartSegImage import Image, TiffImageReader
from ..common_gui.advanced_tabs import AdvancedWindow
from ..common_gui.algorithms_description import AlgorithmChoose, AlgorithmSettingsWidget, EnumComboBox
from ..common_gui.channel_control import ChannelProperty
from ..common_gui.custom_load_dialog import CustomLoadDialog
from ..common_gui.custom_save_dialog import SaveDialog
from ..common_gui.flow_layout import FlowLayout
from ..common_gui.main_window import BaseMainMenu, BaseMainWindow
from ..common_gui.mask_widget import MaskDialogBase
from ..common_gui.multiple_file_widget import MultipleFileWidget
from ..common_gui.napari_image_view import LabelEnum
from ..common_gui.select_multiple_files import AddFiles
from ..common_gui.stack_image_view import ColorBar
from ..common_gui.universal_gui_part import right_label
from ..common_gui.waiting_dialog import ExecuteFunctionDialog
from ..segmentation_mask.segmentation_info_dialog import SegmentationInfoDialog
from .batch_proceed import BatchProceed, BatchTask
from .image_view import StackImageView
from .simple_measurements import SimpleMeasurements
from .stack_settings import StackSettings, get_mask
CONFIG_FOLDER = os.path.join(state_store.save_folder, "mask")
class MaskDialog(MaskDialogBase):
def __init__(self, settings: StackSettings):
super().__init__(settings)
self.settings = settings
def next_mask(self):
project_info: SegmentationTuple = self.settings.get_project_info()
mask_property = self.mask_widget.get_mask_property()
self.settings.set("mask_manager.mask_property", mask_property)
mask = calculate_mask_from_project(mask_description=mask_property, project=project_info)
self.settings.add_history_element(create_history_element_from_segmentation_tuple(project_info, mask_property,))
self.settings.mask = mask
self.settings.chosen_components_widget.un_check_all()
self.close()
def prev_mask(self):
history: HistoryElement = self.settings.history_pop()
history.arrays.seek(0)
seg = np.load(history.arrays)
history.arrays.seek(0)
self.settings.segmentation = seg["segmentation"]
self.settings.set_segmentation(
seg["segmentation"],
False,
history.segmentation_parameters["selected"],
history.segmentation_parameters["parameters"],
)
if "mask" in seg:
self.settings.mask = seg["mask"]
else:
self.settings.mask = None
self.close()
class MainMenu(BaseMainMenu):
image_loaded = Signal()
def __init__(self, settings: StackSettings, main_window):
"""
:type settings: StackSettings
:param settings:
"""
super().__init__(settings, main_window)
self.settings = settings
self.segmentation_cache = None
self.read_thread = None
self.advanced_window = None
self.measurements_window = None
self.load_image_btn = QPushButton("Load image")
self.load_image_btn.clicked.connect(self.load_image)
self.load_segmentation_btn = QPushButton("Load segmentation")
self.load_segmentation_btn.clicked.connect(self.load_segmentation)
self.save_segmentation_btn = QPushButton("Save segmentation")
self.save_segmentation_btn.clicked.connect(self.save_segmentation)
self.save_catted_parts = QPushButton("Save components")
self.save_catted_parts.clicked.connect(self.save_result)
self.advanced_window_btn = QPushButton("Advanced settings")
self.advanced_window_btn.clicked.connect(self.show_advanced_window)
self.mask_manager_btn = QPushButton("Mask manager")
self.mask_manager_btn.clicked.connect(self.mask_manager)
self.measurements_btn = QPushButton("Simple measurements")
self.measurements_btn.clicked.connect(self.simple_measurement)
self.segmentation_dialog = SegmentationInfoDialog(
self.main_window.settings,
self.main_window.options_panel.algorithm_options.algorithm_choose_widget.change_algorithm,
)
self.setContentsMargins(0, 0, 0, 0)
layout = QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
# layout.setSpacing(0)
layout.addWidget(self.load_image_btn)
layout.addWidget(self.load_segmentation_btn)
layout.addWidget(self.save_catted_parts)
layout.addWidget(self.save_segmentation_btn)
layout.addWidget(self.advanced_window_btn)
layout.addWidget(self.mask_manager_btn)
layout.addWidget(self.measurements_btn)
self.setLayout(layout)
def simple_measurement(self):
if self.measurements_window is None:
self.measurements_window = SimpleMeasurements(self.settings)
self.measurements_window.show()
def mask_manager(self):
if self.settings.segmentation is None:
QMessageBox.information(self, "No segmentation", "Cannot create mask without segmentation")
return
if not self.settings.chosen_components():
QMessageBox.information(self, "No selected components", "Mask is created only from selected components")
return
dial = MaskDialog(self.settings)
dial.exec_()
def show_advanced_window(self):
if self.advanced_window is None:
self.advanced_window = AdvancedWindow(self.settings, ["channelcontrol"])
# FIXME temporary workaround
self.advanced_window.reload_list = []
self.advanced_window.show()
def load_image(self):
# TODO move segmentation with image load to load_segmentaion
dial = CustomLoadDialog(io_functions.load_dict)
dial.setDirectory(self.settings.get("io.load_image_directory", str(Path.home())))
dial.selectNameFilter(self.settings.get("io.load_data_filter", next(iter(io_functions.load_dict.keys()))))
dial.setHistory(dial.history() + self.settings.get_path_history())
if not dial.exec_():
return
load_property = dial.get_result()
self.settings.set("io.load_image_directory", os.path.dirname(load_property.load_location[0]))
self.settings.set("io.load_data_filter", load_property.selected_filter)
self.settings.add_path_history(os.path.dirname(load_property.load_location[0]))
def exception_hook(exception):
if isinstance(exception, ValueError) and exception.args[0] == "not a TIFF file":
QMessageBox.warning(self, "Open error", "Image is not proper tiff/lsm image")
elif isinstance(exception, MemoryError):
QMessageBox.warning(self, "Open error", "Not enough memory to read this image")
elif isinstance(exception, IOError):
QMessageBox.warning(self, "Open error", f"Some problem with reading from disc: {exception}")
elif isinstance(exception, WrongFileTypeException):
QMessageBox.warning(
self,
"Open error",
"No needed files inside archive. Most probably you choose file from segmentation analysis",
)
else:
raise exception
execute_dialog = ExecuteFunctionDialog(
load_property.load_class.load,
[load_property.load_location],
{"metadata": {"default_spacing": self.settings.image.spacing}},
text="Load data",
exception_hook=exception_hook,
)
if execute_dialog.exec():
result = execute_dialog.get_result()
if result is None:
return
self.set_data(result)
def set_image(self, image: Image) -> bool:
if image is None:
return False
if image.is_time:
if image.is_stack:
QMessageBox.warning(self, "Not supported", "Data that are time data are currently not supported")
return False
else:
res = QMessageBox.question(
self,
"Not supported",
"Time data are currently not supported. Maybe You would like to treat time as z-stack",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
)
if res == QMessageBox.Yes:
image = image.swap_time_and_stack()
else:
return False
self.settings.image = image
return True
def load_segmentation(self):
dial = CustomLoadDialog(
{
LoadSegmentation.get_name(): LoadSegmentation,
LoadSegmentationParameters.get_name(): LoadSegmentationParameters,
}
)
dial.setDirectory(self.settings.get("io.open_segmentation_directory", str(Path.home())))
dial.setHistory(dial.history() + self.settings.get_path_history())
if not dial.exec_():
return
load_property = dial.get_result()
self.settings.set("io.open_segmentation_directory", os.path.dirname(load_property.load_location[0]))
self.settings.add_path_history(os.path.dirname(load_property.load_location[0]))
def exception_hook(exception):
mess = QMessageBox(self)
if isinstance(exception, ValueError) and exception.args[0] == "Segmentation do not fit to image":
mess.warning(self, "Open error", "Segmentation do not fit to image")
elif isinstance(exception, MemoryError):
mess.warning(self, "Open error", "Not enough memory to read this image")
elif isinstance(exception, IOError):
mess.warning(self, "Open error", "Some problem with reading from disc")
elif isinstance(exception, WrongFileTypeException):
mess.warning(
self,
"Open error",
"No needed files inside archive. Most probably you choose file from segmentation analysis",
)
else:
raise exception
dial = ExecuteFunctionDialog(
load_property.load_class.load,
[load_property.load_location],
text="Load segmentation",
exception_hook=exception_hook,
)
if dial.exec():
result = dial.get_result()
if result is None:
QMessageBox.critical(self, "Data Load fail", "Fail of loading data")
return
if result.segmentation is not None:
try:
self.settings.set_project_info(dial.get_result())
return
except ValueError as e:
if e.args != ("Segmentation do not fit to image",):
raise
self.segmentation_dialog.set_additional_text(
"Segmentation do not fit to image, maybe you would lie to load parameters only."
)
except HistoryProblem:
QMessageBox().warning(
self,
"Load Problem",
"You set to save selected components when loading "
"another segmentation but history is incomatybile",
)
else:
self.segmentation_dialog.set_additional_text("")
self.segmentation_dialog.set_parameters_dict(result.segmentation_parameters)
self.segmentation_dialog.show()
def save_segmentation(self):
if self.settings.segmentation is None:
QMessageBox.warning(self, "No segmentation", "No segmentation to save")
return
dial = SaveDialog(io_functions.save_segmentation_dict, False, history=self.settings.get_path_history())
dial.setDirectory(self.settings.get("io.save_segmentation_directory", str(Path.home())))
dial.selectFile(os.path.splitext(os.path.basename(self.settings.image_path))[0] + ".seg")
if not dial.exec_():
return
save_location, _selected_filter, save_class, values = dial.get_result()
self.settings.set("io.save_segmentation_directory", os.path.dirname(str(save_location)))
self.settings.add_path_history(os.path.dirname(str(save_location)))
# self.settings.save_directory = os.path.dirname(str(file_path))
def exception_hook(exception):
QMessageBox.critical(self, "Save error", f"Error on disc operation. Text: {exception}", QMessageBox.Ok)
raise exception
dial = ExecuteFunctionDialog(
save_class.save,
[save_location, self.settings.get_project_info(), values],
text="Save segmentation",
exception_hook=exception_hook,
)
dial.exec()
def save_result(self):
if self.settings.image_path is not None and QMessageBox.Yes == QMessageBox.question(
self, "Copy", "Copy name to clipboard?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes
):
clipboard = QGuiApplication.clipboard()
clipboard.setText(os.path.splitext(os.path.basename(self.settings.image_path))[0])
if self.settings.segmentation is None or len(self.settings.sizes) == 1:
QMessageBox.warning(self, "No components", "No components to save")
return
dial = SaveDialog(
io_functions.save_components_dict,
False,
history=self.settings.get_path_history(),
file_mode=QFileDialog.Directory,
)
dial.setDirectory(self.settings.get("io.save_components_directory", str(Path.home())))
dial.selectFile(os.path.splitext(os.path.basename(self.settings.image_path))[0])
if not dial.exec_():
return
res = dial.get_result()
potential_names = self.settings.get_file_names_for_save_result(res.save_destination)
conflict = []
for el in potential_names:
if os.path.exists(el):
conflict.append(el)
if len(conflict) > 0:
# TODO modify because of long lists
conflict_str = "\n".join(conflict)
if QMessageBox.No == QMessageBox.warning(
self,
"Overwrite",
f"Overwrite files:\n {conflict_str}",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No,
):
self.save_result()
return
self.settings.set("io.save_components_directory", os.path.dirname(str(res.save_destination)))
self.settings.add_path_history(os.path.dirname(str(res.save_destination)))
def exception_hook(exception):
QMessageBox.critical(self, "Save error", f"Error on disc operation. Text: {exception}", QMessageBox.Ok)
dial = ExecuteFunctionDialog(
res.save_class.save,
[res.save_destination, self.settings.get_project_info(), res.parameters],
text="Save components",
exception_hook=exception_hook,
)
dial.exec()
class ComponentCheckBox(QCheckBox):
mouse_enter = Signal(int)
mouse_leave = Signal(int)
def __init__(self, number: int, parent=None):
super().__init__(str(number), parent)
self.number = number
def enterEvent(self, _event):
self.mouse_enter.emit(self.number)
def leaveEvent(self, _event):
self.mouse_leave.emit(self.number)
class ChosenComponents(QWidget):
"""
:type check_box: dict[int, QCheckBox]
"""
check_change_signal = Signal()
mouse_enter = Signal(int)
mouse_leave = Signal(int)
def __init__(self):
super(ChosenComponents, self).__init__()
# self.setLayout(FlowLayout())
self.check_box = dict()
self.check_all_btn = QPushButton("Select all")
self.check_all_btn.clicked.connect(self.check_all)
self.un_check_all_btn = QPushButton("Unselect all")
self.un_check_all_btn.clicked.connect(self.un_check_all)
main_layout = QVBoxLayout()
btn_layout = QHBoxLayout()
btn_layout.addWidget(self.check_all_btn)
btn_layout.addWidget(self.un_check_all_btn)
self.check_layout = FlowLayout()
main_layout.addLayout(btn_layout)
main_layout.addLayout(self.check_layout)
self.setLayout(main_layout)
def other_component_choose(self, num):
check = self.check_box[num]
check.setChecked(not check.isChecked())
def check_all(self):
for el in self.check_box.values():
el.setChecked(True)
def un_check_all(self):
for el in self.check_box.values():
el.setChecked(False)
def remove_components(self):
self.check_layout.clear()
for el in self.check_box.values():
el.deleteLater()
el.stateChanged.disconnect()
el.mouse_leave.disconnect()
el.mouse_enter.disconnect()
self.check_box.clear()
def new_choose(self, num, chosen_components):
self.set_chose(range(1, num + 1), chosen_components)
def set_chose(self, components_index, chosen_components):
chosen_components = set(chosen_components)
self.blockSignals(True)
self.remove_components()
chosen_components = set(chosen_components)
for el in components_index:
check = ComponentCheckBox(el)
if el in chosen_components:
check.setChecked(True)
check.stateChanged.connect(self.check_change)
check.mouse_enter.connect(self.mouse_enter.emit)
check.mouse_leave.connect(self.mouse_leave.emit)
self.check_box[el] = check
self.check_layout.addWidget(check)
self.blockSignals(False)
self.update()
self.check_change_signal.emit()
def check_change(self):
self.check_change_signal.emit()
def change_state(self, num, val):
self.check_box[num].setChecked(val)
def get_state(self, num: int) -> bool:
# TODO Check what situation create report of id ID: af9b57f074264169b4353aa1e61d8bc2
if num >= len(self.check_box):
return False
return self.check_box[num].isChecked()
def get_chosen(self):
res = []
for num, check in self.check_box.items():
if check.isChecked():
res.append(num)
return res
def get_mask(self):
res = [0]
for _, check in sorted(self.check_box.items()):
res.append(check.isChecked())
return np.array(res, dtype=np.uint8)
class AlgorithmOptions(QWidget):
def __init__(self, settings: StackSettings, image_view: StackImageView, component_checker):
control_view = image_view.get_control_view()
super().__init__()
self.settings = settings
self.show_result = EnumComboBox(LabelEnum) # QCheckBox("Show result")
self.show_result.set_value(control_view.show_label)
self.opacity = QDoubleSpinBox()
self.opacity.setRange(0, 1)
self.opacity.setSingleStep(0.1)
self.opacity.setValue(control_view.opacity)
self.only_borders = QCheckBox("Only borders")
self.only_borders.setChecked(control_view.only_borders)
self.borders_thick = QSpinBox()
self.borders_thick.setRange(1, 11)
self.borders_thick.setSingleStep(2)
self.borders_thick.setValue(control_view.borders_thick)
# noinspection PyUnresolvedReferences
self.borders_thick.valueChanged.connect(self.border_value_check)
self.execute_in_background_btn = QPushButton("Execute in background")
self.execute_in_background_btn.setToolTip("Run calculation in background. Put result in multiple files panel")
self.execute_btn = QPushButton("Execute")
self.execute_btn.setStyleSheet("QPushButton{font-weight: bold;}")
self.execute_all_btn = QPushButton("Execute all")
self.execute_all_btn.setToolTip(
"Execute in batch mode segmentation with current parameter. " "File list need to be specified in image tab."
)
self.execute_all_btn.setDisabled(True)
self.save_parameters_btn = QPushButton("Save parameters")
self.block_execute_all_btn = False
self.algorithm_choose_widget = AlgorithmChoose(settings, mask_algorithm_dict)
self.algorithm_choose_widget.result.connect(self.execution_done)
self.algorithm_choose_widget.finished.connect(self.execution_finished)
self.algorithm_choose_widget.progress_signal.connect(self.progress_info)
# self.stack_layout = QStackedLayout()
self.keep_chosen_components_chk = QCheckBox("Save selected components")
self.keep_chosen_components_chk.setToolTip(
"Save chosen components when loading segmentation form file\n" "or from multiple file widget."
)
self.keep_chosen_components_chk.stateChanged.connect(self.set_keep_chosen_components)
self.keep_chosen_components_chk.setChecked(settings.keep_chosen_components)
self.show_parameters = QPushButton("Show parameters")
self.show_parameters.setToolTip("Show parameters of segmentation for each components")
self.show_parameters_widget = SegmentationInfoDialog(
self.settings, self.algorithm_choose_widget.change_algorithm
)
self.show_parameters.clicked.connect(self.show_parameters_widget.show)
self.choose_components = ChosenComponents()
self.choose_components.check_change_signal.connect(control_view.components_change)
self.choose_components.mouse_leave.connect(image_view.component_unmark)
self.choose_components.mouse_enter.connect(image_view.component_mark)
# WARNING works only with one channels algorithms
# SynchronizeValues.add_synchronization("channels_chose", widgets_list)
self.chosen_list = []
self.progress_bar2 = QProgressBar()
self.progress_bar2.setHidden(True)
self.progress_bar = QProgressBar()
self.progress_bar.setHidden(True)
self.progress_info_lab = QLabel()
self.progress_info_lab.setHidden(True)
self.file_list = []
self.batch_process = BatchProceed()
self.batch_process.progress_signal.connect(self.progress_info)
self.batch_process.error_signal.connect(self.execution_all_error)
self.batch_process.execution_done.connect(self.execution_all_done)
self.batch_process.range_signal.connect(self.progress_bar.setRange)
self.is_batch_process = False
self.setContentsMargins(0, 0, 0, 0)
main_layout = QVBoxLayout()
# main_layout.setSpacing(0)
opt_layout = QHBoxLayout()
opt_layout.setContentsMargins(0, 0, 0, 0)
opt_layout.addWidget(self.show_result)
opt_layout.addWidget(right_label("Opacity:"))
opt_layout.addWidget(self.opacity)
main_layout.addLayout(opt_layout)
opt_layout2 = QHBoxLayout()
opt_layout2.setContentsMargins(0, 0, 0, 0)
opt_layout2.addWidget(self.only_borders)
opt_layout2.addWidget(right_label("Border thick:"))
opt_layout2.addWidget(self.borders_thick)
main_layout.addLayout(opt_layout2)
btn_layout = QGridLayout()
btn_layout.setContentsMargins(0, 0, 0, 0)
btn_layout.addWidget(self.execute_btn, 0, 0)
btn_layout.addWidget(self.execute_in_background_btn, 0, 1)
btn_layout.addWidget(self.execute_all_btn, 1, 0)
btn_layout.addWidget(self.save_parameters_btn, 1, 1)
main_layout.addLayout(btn_layout)
main_layout.addWidget(self.progress_bar2)
main_layout.addWidget(self.progress_bar)
main_layout.addWidget(self.progress_info_lab)
main_layout.addWidget(self.algorithm_choose_widget, 1)
# main_layout.addWidget(self.algorithm_choose)
# main_layout.addLayout(self.stack_layout, 1)
main_layout.addWidget(self.choose_components)
down_layout = QHBoxLayout()
down_layout.addWidget(self.keep_chosen_components_chk)
down_layout.addWidget(self.show_parameters)
main_layout.addLayout(down_layout)
main_layout.addStretch()
main_layout.setContentsMargins(0, 0, 0, 0)
# main_layout.setSpacing(0)
self.setLayout(main_layout)
# noinspection PyUnresolvedReferences
self.execute_in_background_btn.clicked.connect(self.execute_in_background)
self.execute_btn.clicked.connect(self.execute_action)
self.execute_all_btn.clicked.connect(self.execute_all_action)
self.save_parameters_btn.clicked.connect(self.save_parameters)
# noinspection PyUnresolvedReferences
self.opacity.valueChanged.connect(control_view.set_opacity)
# noinspection PyUnresolvedReferences
self.show_result.current_choose.connect(control_view.set_show_label)
self.only_borders.stateChanged.connect(control_view.set_borders)
# noinspection PyUnresolvedReferences
self.borders_thick.valueChanged.connect(control_view.set_borders_thick)
component_checker.component_clicked.connect(self.choose_components.other_component_choose)
settings.chosen_components_widget = self.choose_components
settings.components_change_list.connect(self.choose_components.new_choose)
settings.image_changed.connect(self.choose_components.remove_components)
@Slot(int)
def set_keep_chosen_components(self, val):
self.settings.set_keep_chosen_components(val)
def save_parameters(self):
dial = SaveDialog(io_functions.save_parameters_dict, False, history=self.settings.get_path_history())
if not dial.exec_():
return
res = dial.get_result()
self.settings.add_path_history(os.path.dirname(str(res.save_destination)))
res.save_class.save(res.save_destination, self.algorithm_choose_widget.current_parameters())
def border_value_check(self, value):
if value % 2 == 0:
self.borders_thick.setValue(value + 1)
def file_list_change(self, val):
self.file_list = val
if len(self.file_list) > 0 and not self.block_execute_all_btn:
self.execute_all_btn.setEnabled(True)
else:
self.execute_all_btn.setDisabled(True)
def get_chosen_components(self):
return sorted(self.choose_components.get_chosen())
@property
def segmentation(self):
return self.settings.segmentation
@segmentation.setter
def segmentation(self, val):
self.settings.segmentation = val
def _image_changed(self):
self.settings.segmentation = None
self.choose_components.set_chose([], [])
def _execute_in_background_init(self):
if self.batch_process.isRunning():
return
self.progress_bar2.setVisible(True)
self.progress_bar2.setRange(0, self.batch_process.queue.qsize())
self.progress_bar2.setValue(self.batch_process.index)
self.progress_bar.setVisible(True)
self.progress_bar.setValue(0)
self.execute_btn.setDisabled(True)
self.batch_process.start()
def execute_in_background(self):
# TODO check if components are properly passed
widget = self.algorithm_choose_widget.current_widget()
segmentation_profile = widget.get_segmentation_profile()
task = BatchTask(self.settings.get_project_info(), segmentation_profile, None)
self.batch_process.add_task(task)
self.progress_bar2.setRange(0, self.progress_bar2.maximum() + 1)
self._execute_in_background_init()
def execute_all_action(self):
dial = SaveDialog(
{SaveSegmentation.get_name(): SaveSegmentation},
history=self.settings.get_path_history(),
system_widget=False,
)
dial.setFileMode(QFileDialog.Directory)
dial.setDirectory(self.settings.get("io.save_batch", self.settings.get("io.save_segmentation_directory", "")))
if not dial.exec_():
return
folder_path = str(dial.selectedFiles()[0])
self.settings.set("io.save_batch", folder_path)
widget = self.algorithm_choose_widget.current_widget()
save_parameters = dial.values
segmentation_profile = widget.get_segmentation_profile()
for file_path in self.file_list:
task = BatchTask(file_path, segmentation_profile, (folder_path, save_parameters))
self.batch_process.add_task(task)
self.progress_bar2.setRange(0, self.progress_bar2.maximum() + len(self.file_list))
self._execute_in_background_init()
def execution_all_error(self, text):
QMessageBox.warning(self, "Proceed error", text)
def execution_all_done(self):
if not self.batch_process.queue.empty():
self._execute_in_background_init()
return
self.execute_btn.setEnabled(True)
self.block_execute_all_btn = False
if len(self.file_list) > 0:
self.execute_all_btn.setEnabled(True)
self.progress_bar.setHidden(True)
self.progress_bar2.setHidden(True)
self.progress_info_lab.setHidden(True)
def execute_action(self):
self.execute_btn.setDisabled(True)
self.execute_all_btn.setDisabled(True)
self.block_execute_all_btn = True
self.is_batch_process = False
self.progress_bar.setRange(0, 0)
self.choose_components.setDisabled(True)
chosen = sorted(self.choose_components.get_chosen())
blank = get_mask(self.settings.segmentation, self.settings.mask, chosen)
if blank is not None:
# Problem with handling time data in algorithms
# TODO Fix This
blank = blank[0]
self.progress_bar.setHidden(False)
widget: AlgorithmSettingsWidget = self.algorithm_choose_widget.current_widget()
widget.set_mask(blank)
self.progress_bar.setRange(0, widget.algorithm.get_steps_num())
widget.execute()
self.chosen_list = chosen
def progress_info(self, text, num, file_name="", file_num=0):
self.progress_info_lab.setVisible(True)
if file_name != "":
self.progress_info_lab.setText(file_name + "\n" + text)
else:
self.progress_info_lab.setText(text)
self.progress_bar.setValue(num)
self.progress_bar2.setValue(file_num)
def execution_finished(self):
self.execute_btn.setEnabled(True)
self.block_execute_all_btn = False
if len(self.file_list) > 0:
self.execute_all_btn.setEnabled(True)
self.progress_bar.setHidden(True)
self.progress_info_lab.setHidden(True)
self.choose_components.setDisabled(False)
def execution_done(self, segmentation: SegmentationResult):
if np.max(segmentation.segmentation) == 0:
QMessageBox.information(
self, "No result", "Segmentation contains no component, check parameters, " "especially chosen channel."
)
if segmentation.info_text != "":
QMessageBox.information(self, "Algorithm info", segmentation.info_text)
parameters_dict = defaultdict(lambda: deepcopy(segmentation.parameters))
self.settings.additional_layers = segmentation.additional_layers
self.settings.set_segmentation(segmentation.segmentation, True, [], parameters_dict)
def showEvent(self, _):
widget = self.algorithm_choose_widget.current_widget()
widget.image_changed(self.settings.image)
class ImageInformation(QWidget):
def __init__(self, settings: StackSettings, parent=None):
""":type settings: ImageSettings"""
super(ImageInformation, self).__init__(parent)
self._settings = settings
self.path = QTextEdit("<b>Path:</b> example image")
self.path.setWordWrapMode(QTextOption.WrapAnywhere)
self.path.setReadOnly(True)
self.setMinimumHeight(20)
self.spacing = [QDoubleSpinBox() for _ in range(3)]
self.multiple_files = QCheckBox("Show multiple files panel")
self.multiple_files.setChecked(settings.get("multiple_files_widget", True))
self.multiple_files.stateChanged.connect(self.set_multiple_files)
units_value = self._settings.get("units_value", Units.nm)
for el in self.spacing:
el.setAlignment(Qt.AlignRight)
el.setButtonSymbols(QAbstractSpinBox.NoButtons)
el.setRange(0, 100000)
# noinspection PyUnresolvedReferences
el.valueChanged.connect(self.image_spacing_change)
self.units = EnumComboBox(Units)
self.units.set_value(units_value)
# noinspection PyUnresolvedReferences
self.units.currentIndexChanged.connect(self.update_spacing)
self.add_files = AddFiles(settings, btn_layout=FlowLayout)
spacing_layout = QFormLayout()
spacing_layout.addRow("x:", self.spacing[0])
spacing_layout.addRow("y:", self.spacing[1])
spacing_layout.addRow("z:", self.spacing[2])
spacing_layout.addRow("Units:", self.units)
layout = QVBoxLayout()
layout.addWidget(self.path)
layout.addWidget(QLabel("Image spacing:"))
layout.addLayout(spacing_layout)
layout.addWidget(self.add_files)
layout.addStretch(1)
layout.addWidget(self.multiple_files)
self.setLayout(layout)
self._settings.image_changed[str].connect(self.set_image_path)
@Slot(int)
def set_multiple_files(self, val):
self._settings.set("multiple_files_widget", val)
def update_spacing(self, index=None):
units_value = self.units.get_value()
if index is not None:
self._settings.set("units_value", units_value)
for el, val in zip(self.spacing, self._settings.image_spacing[::-1]):
el.blockSignals(True)
el.setValue(val * UNIT_SCALE[units_value.value])
el.blockSignals(False)
if self._settings.is_image_2d():
# self.spacing[2].setValue(0)
self.spacing[2].setDisabled(True)
else:
self.spacing[2].setDisabled(False)
def set_image_path(self, value):
self.path.setText("<b>Path:</b> {}".format(value))
self.update_spacing()
def image_spacing_change(self):
self._settings.image_spacing = [
el.value() / UNIT_SCALE[self.units.currentIndex()] for i, el in enumerate(self.spacing[::-1])
]
def showEvent(self, _a0):
units_value = self._settings.get("units_value", Units.nm)
for el, val in zip(self.spacing, self._settings.image_spacing[::-1]):
el.setValue(val * UNIT_SCALE[units_value.value])
if self._settings.is_image_2d():
self.spacing[2].setValue(0)
self.spacing[2].setDisabled(True)
else:
self.spacing[2].setDisabled(False)
class Options(QTabWidget):
def __init__(self, settings, image_view, component_checker, parent=None):
super(Options, self).__init__(parent)
self._settings = settings
self.algorithm_options = AlgorithmOptions(settings, image_view, component_checker)
self.image_properties = ImageInformation(settings, parent)
self.image_properties.add_files.file_list_changed.connect(self.algorithm_options.file_list_change)
self.algorithm_options.batch_process.multiple_result.connect(
partial(self.image_properties.multiple_files.setChecked, True)
)
self.addTab(self.image_properties, "Image")
self.addTab(self.algorithm_options, "Segmentation")
self.setMinimumWidth(370)
self.setCurrentIndex(1)
def get_chosen_components(self):
return self.algorithm_options.get_chosen_components()
class MainWindow(BaseMainWindow):
@classmethod
def get_setting_class(cls) -> Type[StackSettings]:
return StackSettings
initial_image_path = PartSegData.segmentation_mask_default_image
def __init__(
self, config_folder=CONFIG_FOLDER, title="PartSeg", settings=None, signal_fun=None, initial_image=None
):
super().__init__(config_folder, title, settings, signal_fun)
self.channel_info = "channelcontrol"
self.channel_control = ChannelProperty(self.settings, start_name="channelcontrol")
self.image_view = StackImageView(self.settings, self.channel_control, name="channelcontrol")
self.image_view.setMinimumWidth(450)
self.info_text = QLabel()
self.info_text.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Preferred)
self.image_view.text_info_change.connect(self.info_text.setText)
self.options_panel = Options(self.settings, self.image_view, self.image_view)
self.main_menu = MainMenu(self.settings, self)
self.main_menu.image_loaded.connect(self.image_read)
self.settings.image_changed.connect(self.image_read)
self.color_bar = ColorBar(self.settings, self.image_view)
self.multiple_file = MultipleFileWidget(self.settings, io_functions.load_dict)
self.multiple_file.setVisible(self.options_panel.image_properties.multiple_files.isChecked())
self.options_panel.algorithm_options.batch_process.multiple_result.connect(
partial(self.multiple_file.save_state_action, custom_name=False)
)
self.options_panel.image_properties.multiple_files.stateChanged.connect(self.multiple_file.setVisible)
icon = QIcon(os.path.join(PartSegData.icons_dir, "icon_stack.png"))
self.setWindowIcon(icon)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu("File")
file_menu.addAction("&Open").triggered.connect(self.main_menu.load_image)
file_menu.addAction("&Save segmentation").triggered.connect(self.main_menu.save_segmentation)
file_menu.addAction("&Save components").triggered.connect(self.main_menu.save_result)
view_menu = menu_bar.addMenu("View")
view_menu.addAction("Settings and Measurement").triggered.connect(self.main_menu.show_advanced_window)
view_menu.addAction("Additional output").triggered.connect(self.additional_layers_show)
view_menu.addAction("Additional output with data").triggered.connect(lambda: self.additional_layers_show(True))
view_menu.addAction("Napari viewer").triggered.connect(self.napari_viewer_show)
action = view_menu.addAction("Screenshot")
action.triggered.connect(self.screenshot(self.image_view))
action.setShortcut(QKeySequence.Print)
image_menu = menu_bar.addMenu("Image operations")
image_menu.addAction("Image adjustment").triggered.connect(self.image_adjust_exec)
help_menu = menu_bar.addMenu("Help")
help_menu.addAction("State directory").triggered.connect(self.show_settings_directory)
help_menu.addAction("About").triggered.connect(self.show_about_dialog)
layout = QVBoxLayout()
layout.addWidget(self.main_menu)
sub_layout = QHBoxLayout()
sub2_layout = QVBoxLayout()
sub3_layout = QVBoxLayout()
sub_layout.addWidget(self.multiple_file)
sub_layout.addWidget(self.color_bar, 0)
sub3_layout.addWidget(self.image_view, 1)
sub3_layout.addWidget(self.info_text, 0)
sub2_layout.addWidget(self.options_panel, 1)
sub2_layout.addWidget(self.channel_control, 0)
sub_layout.addLayout(sub3_layout, 1)
sub_layout.addLayout(sub2_layout, 0)
layout.addLayout(sub_layout)
self.widget = QWidget()
self.widget.setLayout(layout)
self.setCentralWidget(self.widget)
if initial_image is None:
reader = TiffImageReader()
im = reader.read(self.initial_image_path)
im.file_path = ""
self.settings.image = im
elif initial_image is False:
# FIXME This is for test opening
pass
else:
self.settings.image = initial_image
try:
geometry = self.settings.get_from_profile("main_window_geometry")
self.restoreGeometry(QByteArray.fromHex(bytes(geometry, "ascii")))
except KeyError:
pass
def image_read(self):
self.image_view.reset_image_size()
self.setWindowTitle(f"{self.title_base}: {os.path.basename(self.settings.image_path)}")
def closeEvent(self, event: QCloseEvent):
# print(self.settings.dump_view_profiles())
# print(self.settings.segmentation_dict["default"].my_dict)
self.settings.set_in_profile("main_window_geometry", self.saveGeometry().toHex().data().decode("ascii"))
self.options_panel.algorithm_options.algorithm_choose_widget.recursive_get_values()
self.main_menu.segmentation_dialog.close()
self.options_panel.algorithm_options.show_parameters_widget.close()
if self.main_menu.advanced_window is not None:
self.main_menu.advanced_window.close()
del self.main_menu.advanced_window
if self.main_menu.measurements_window is not None:
self.main_menu.measurements_window.close()
del self.main_menu.measurements_window
del self.main_menu.segmentation_dialog
del self.options_panel.algorithm_options.show_parameters_widget
self.settings.dump()
super().closeEvent(event)
def read_drop(self, paths):
self._read_drop(paths, io_functions)
@staticmethod
def get_project_info(file_path, image):
return SegmentationTuple(file_path=file_path, image=image)
def set_data(self, data):
self.main_menu.set_data(data)
def change_theme(self):
self.image_view.set_theme(self.settings.theme_name)
super().change_theme()
| StarcoderdataPython |
6699195 | <filename>server.py
import tornado.ioloop
import tornado.web
#import tornado.database
import sqlite3
import json
from backend.sql import process_fn
from tornado.escape import json_encode
def _execute(query, params):
dbPath = 'data/db'
connection = sqlite3.connect(dbPath)
cursorobj = connection.cursor()
try:
cursorobj.execute(query, params)
result = cursorobj.fetchall()
connection.commit()
except Exception:
raise
connection.close()
return result
dataprocess = process_fn(_execute)
class AJAX(tornado.web.RequestHandler):
def get(self):
# NOTE:
# DO NOT USE THIS CODE IN PRODUCTION OR ANYWHERE CLOSE TO PRODUCTION
TABLE = self.get_argument("table", None)
LIMIT = self.get_argument("limit", "1000")
spec = self.get_argument("spec", None)
spec = json.loads(spec)
retobj = dataprocess(TABLE, LIMIT, spec)
self.write(json_encode(retobj))
application = tornado.web.Application([
(r"/db",AJAX),
(r"/(.*)",tornado.web.StaticFileHandler, {'path':'.'}),
],debug=True)
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
| StarcoderdataPython |
6482315 | <filename>src/models/train_model.py
from src.models.optimize import *
from src.helpers.train_helpers import train_cnn_cv, train_lstm_cv
def optimize_models(config):
# Load the split training data (used during optimization of weights and hyperparameters) and unseen testing data.
X_train_loaded = pd.read_pickle('kepler//testing_data//lstm_binneddata_XTRAIN.pkl')
y_train_loaded = pd.read_pickle('kepler//testing_data//lstm_binneddata_YTRAIN.pkl')
X_test_loaded = pd.read_pickle('kepler//testing_data//lstm_binneddata_XTEST.pkl')
y_test_loaded = pd.read_pickle('kepler//testing_data//lstm_binneddata_YTEST.pkl')
X_train_loaded = X_train_loaded.as_matrix().astype(np.float)
X_test_loaded = X_test_loaded.as_matrix().astype(np.float)
X_train_loaded = X_train_loaded.reshape(X_train_loaded.shape[0], X_train_loaded.shape[1], 1)
X_test_loaded = X_test_loaded.reshape(X_test_loaded.shape[0], X_test_loaded.shape[1], 1)
models = config['models']
for model in range(len(models)):
if model == "LSTM":
best_model_config = run_trials('lstm', evals=5)
print('Final Evaluation of Best LSTM Model Configuration')
print('\n')
best_lstm = LSTM_Model(output_dim=1, sequence_length=X_train_loaded.shape[1])
best_lstm.LoadLSTMConfiguration(best_model_config)
#best_lstm = LSTM_Model(output_dim=1, sequence_length=X_train_loaded.shape[1], nb_lstm_layers=0, nb_lstm_units=10, nb_fc_layers=2, nb_fc_units=64, dropout=0.29796647089233186,
# activation='prelu', batch_normalisation=True)
best_lstm.Build()
best_lstm.Compile(loss='binary_crossentropy',
optimizer=SGD(lr=0.001 * 1.64341939565237, momentum=0.25, decay=0.0001,
nesterov=True), metrics=['accuracy'])
train_lstm_cv(best_lstm, X_train_loaded, y_train_loaded, X_test_loaded, y_test_loaded, nb_cv=5, batch_size=16, nb_epochs=43,
save_name='LSTM')
elif model == "CNN":
best_model_config = run_trials('cnn', evals=10)
print('Final Evaluation of Best CNN Model Configuration')
print('\n')
best_cnn = CNN_Model(output_dim=1, sequence_length=X_train_loaded.shape[1])
best_cnn.LoadCNNConfiguration(best_model_config)
# best_lstm = LSTM_Model(output_dim=1, sequence_length=X_train_loaded.shape[1], nb_lstm_layers=0, nb_lstm_units=10, nb_fc_layers=2, nb_fc_units=64, dropout=0.29796647089233186,
# activation='prelu', batch_normalisation=True)
best_cnn.Build()
best_cnn.Compile(loss='binary_crossentropy',
optimizer=SGD(lr=0.001 * 1.64341939565237, momentum=0.25, decay=0.0001,
nesterov=True), metrics=['accuracy'])
train_cnn_cv(best_cnn, X_train_loaded, y_train_loaded, X_test_loaded, y_test_loaded, nb_cv=10,
batch_size=16, nb_epochs=43,
save_name='CNN')
def train_models(config):
print('')
def main():
models_config = ["CNN"]
optimize_models(models_config)
if __name__ == '__main__':
main()
| StarcoderdataPython |
251818 | __author__ = "<NAME> (<EMAIL>)"
__license__ = "MIT"
__date__ = "2016-08-08"
from snakemake.exceptions import MissingInputException
import os
def getAllFASTQ(wildcards):
fn =[]
for i in config["samples"][wildcards["assayID"]][wildcards["runID"]]:
for j in config["samples"][wildcards["assayID"]][wildcards["runID"]][i]:
fn.append("RNA-Seq/NB501086_0114_B_Azad_JCSMR_hRNAseq/fastq/" + j)
return(fn)
rule dummy:
input:
"RNA-Seq/NB501086_0114_B_Azad_JCSMR_hRNAseq/processed_data/reports/"
rule fastqc:
version:
0.3
threads:
16
input:
getAllFASTQ
output:
"{assayID}/{runID}/{processed_dir}/{reports_dir}/"
shell:
"fastqc {input} --threads {threads} --noextract --outdir {output}"
| StarcoderdataPython |
127128 | <filename>aoc_2018/aoc_day05.py
def reduce_polymer(orig, to_remove=None, max_len=-1):
polymer = []
for i in range(len(orig)):
# We save a lot of processing time for Part 2
# if we cut off the string building once the
# array is too long
if max_len > 0 and len(polymer) >= max_len: return None
# The test character for Part 2 is 'removed'
# by just not adding it to the polymer array
if to_remove and orig[i].lower() == to_remove: continue
polymer.append(orig[i])
end_pair = polymer[len(polymer)-2:]
while len(polymer) > 1 and end_pair[0] != end_pair[1] and end_pair[0].lower() == end_pair[1].lower():
# If the end pair meets the criteria of being a matched upper and lower case
# then remove it from the end of the array. Repeat until the end pair is not removed
polymer = polymer[:-2]
end_pair = polymer[len(polymer)-2:]
return polymer
input = open('input/input05.txt').read()
print('Solution 5.1:', len(reduce_polymer(input)))
distinct_char = set(input.lower())
min_len = len(input)
char_to_remove = ''
for c in distinct_char:
poly = reduce_polymer(input, c, min_len)
if poly is not None and len(poly) < min_len:
min_len = len(poly)
char_to_remove = c
print('Most troublesome character is', char_to_remove)
print('Solution 5.2:', min_len)
| StarcoderdataPython |
3308964 | import copy
import logging
from dataclasses import dataclass
from typing import Type, TypeVar
from dataclasses_json import dataclass_json
from thenewboston_node.core.logging import validates
from thenewboston_node.core.utils.cryptography import derive_verify_key
from thenewboston_node.core.utils.dataclass import fake_super_methods
from ..mixins.signable import SignableMixin
from ..signed_request_message import SignedRequestMessage
T = TypeVar('T', bound='SignedRequest')
logger = logging.getLogger(__name__)
@fake_super_methods
@dataclass_json
@dataclass
class SignedRequest(SignableMixin):
message: SignedRequestMessage
"""Request payload"""
@classmethod
def from_signed_request_message(cls: Type[T], message: SignedRequestMessage, signing_key: str) -> T:
request = cls(signer=derive_verify_key(signing_key), message=copy.deepcopy(message))
request.sign(signing_key)
return request
def override_to_dict(self): # this one turns into to_dict()
dict_ = self.super_to_dict()
# TODO(dmu) LOW: Implement a better way of removing optional fields or allow them in normalized message
dict_['message'] = self.message.to_dict()
return dict_
@validates('signed request')
def validate(self):
self.validate_message()
with validates('block signature'):
self.validate_signature()
@validates('signed request message')
def validate_message(self):
self.message.validate()
| StarcoderdataPython |
8112202 | <gh_stars>0
# -*- coding: utf-8 -*-
from tesstlog import Log
import torch
import math
import numpy
logger = Log.init_log(__name__, False)
from matplotlib import pyplot as plt
class adaboost:
def __init__(self):
figsize = (3.5, 2.5)
plt.rcParams['figure.figsize'] = figsize
def _wrap_to_tensor(self,obj,deepcopy=True):
if torch.is_tensor(obj):
return obj
else:
return torch.tensor(obj)
def load_simp_data(self):
dataMatrix = torch.tensor([[1.0, 2.1], [2.0, 1.1], [1.3, 1.0], [1.0, 1.0], [2.0, 1.0]], dtype=torch.float)
classLabels = torch.tensor([1.0, 1.0, -1.0, -1.0, 1.0], dtype=torch.float)
plt.scatter(dataMatrix[:, 0].numpy(), dataMatrix[:, 1].numpy(), 3)
plt.show()
return dataMatrix, classLabels
def strump_classify(self, dataMat, dim=0, thresval=100, threshIneq='lt'):
retArray = torch.ones(dataMat.shape[0], 1)
if threshIneq == 'lt':
retArray[dataMat[:, dim] <= thresval] = -1.0
else:
retArray[dataMat[:, dim] > thresval] = 1.0
return retArray
def build_strump(self, dataArray, classLabels, D):
transMat = self._wrap_to_tensor(dataArray)
labelMatTranspose = self._wrap_to_tensor(classLabels).t()
m, n = transMat.shape
numSteps = 10.0
bestTrump = {}
bestClassEst = torch.zeros(m, 1)
minError = float('inf')
for i in range(n):
rangeMin = transMat[:, i].min()
rangeMax = transMat[:, i].max()
stepSize = (rangeMax - rangeMin) / numSteps
for j in range(-1, int(numSteps) + 1):
for inequal in ['lt', 'gt']:
threshVal = rangeMin + float(j) * stepSize
predictedVals = self.strump_classify(transMat, i, threshVal, inequal)
errArr = torch.ones(m, 1)
errArr[predictedVals.view(1,5)[0] == labelMatTranspose] = 0
weightError = torch.mm(D.t(),errArr).sum()
if weightError < minError:
minError = weightError
bestClassEst = predictedVals.clone()
bestTrump['dim'] = i
bestTrump['thresh'] = threshVal
bestTrump['ineq'] = inequal
# end if
# enf for
# end for
# end for
return bestTrump, minError, bestClassEst
def adaBoostTrainDS(self,dataArr, classLabels, numIt =40):
dataMat = self._wrap_to_tensor(dataArr)
labels = self._wrap_to_tensor(classLabels)
m = dataMat.shape[0]
D = torch.ones((m,1))/m
aggClassEst = torch.zeros((m,1))
weakClassArr = []
for i in range(numIt):
bestStrump, error, classEst = self.build_strump(dataMat,labels, D)
print("D:{}".format(D.T))
alpha = float(0.5*math.log((1-error)/max(error, 1e-16)))
bestStrump['alpha']=alpha
weakClassArr.append(bestStrump)
print("classEst:{}".format(classEst.T))
expon = (-1*labels.t())*(classEst)
D = D*expon.exp()
D = D/D.sum()
aggClassEst+=alpha*classEst
print("aggClassEst:{}".format(aggClassEst.T))
aggError = torch.mul(aggClassEst.sign()!= labels.t(),torch.ones((m,1)))
errorRate = aggError.sum()/m
print("errorRate is :{}".format(errorRate.T))
if errorRate == 0.0:break;
return weakClassArr
| StarcoderdataPython |
1816329 | # -*- coding: utf-8 -*-
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
import requests
from hashlib import sha1
import random
import string
import time
import os
@click.command()
@click.argument("input_filepath", type=click.Path(exists=True))
@click.argument("output_filepath", type=click.Path())
def main(input_filepath, output_filepath):
"""Runs data processing scripts to turn raw data from (../raw) into
cleaned data ready to be analyzed (saved in ../processed).
"""
logger = logging.getLogger(__name__)
logger.info("making final data set from raw data")
booli = Booli(api_key=BOOLI_API_KEY, caller_id=BOOLI_CALLER_ID)
data = booli.get_booli_data(query="Uddevalla", limit=100)
print(data)
class Booli:
"""
Class for managing requests to the Booli API.
User must specify a Booli API Key (Contact Booli to get one) and a Caller ID (Choose an identifier for your app).
"""
def __init__(self, api_key, caller_id):
self.caller_id = caller_id
self.api_key = api_key
def get_booli_data(
self, query, limit=1000, offset=None, min_sold_date=None, max_sold_date=None
):
"""
Requests data from the Booli API.
"""
ts = int(time.time())
unique = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(16)
)
hash_str = sha1(
(self.caller_id + str(ts) + self.api_key + unique).encode("utf-8")
).hexdigest()
parameters = {
"callerId": self.caller_id,
"time": ts,
"unique": unique,
"hash": hash_str,
"q": query,
"limit": limit,
"offset": offset,
"minSoldDate": min_sold_date,
"maxSoldDate": max_sold_date,
}
response = requests.get("https://api.booli.se/sold", params=parameters)
# print (response.status_code)
# print (response.content.decode("utf-8"))
# data = json.loads(response.content.decode("utf-8"))
data = response.json()
# sold = data["sold"]
return data
if __name__ == "__main__":
log_fmt = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
logging.basicConfig(level=logging.INFO, format=log_fmt)
# not used in this stub but often useful for finding various files
project_dir = Path(__file__).resolve().parents[2]
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
BOOLI_API_KEY = os.environ["BOOLI_API_KEY"]
BOOLI_QUERY = os.environ["BOOLI_QUERY"]
BOOLI_CALLER_ID = os.environ["BOOLI_CALLER_ID"]
main()
| StarcoderdataPython |
3499642 | """Setup:
- Add the following files into sandbox directory under project root directory:
- sa.json with GCP credential
- target-config.json:
{
"project_id": "{your-project-id}",
"dataset_id": "{your_dataset_id}"
}
"""
from tests import unittestcore
from google.cloud.bigquery import SchemaField, Client
import json
import os
from decimal import Decimal
import pandas as pd
class TestComplexStreamLoadJob(unittestcore.BaseUnitTest):
def test_klaviyo_stream(self):
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'klaviyo_stream.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
def test_klaviyo_stream_load_job_should_fail_due_to_dupes_in_fiels_names(self):
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'klaviyo_stream_contains_dupe_fields.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
processhandler="load-job"
)
ret = main()
self.assertEqual(ret, 2, msg="Exit code is not 2!") # expected exit code is 2 - serious problem
def test_recharge_stream(self):
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'recharge_stream.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
def test_bing_ads_stream(self):
"""
data vs schema match here
"""
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'bing_ads_stream.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
def test_bing_ads_stream_data_vs_schema_dont_match(self):
"""
This test succeeds
It tests if we overcame the following error in Tap Bing Ads
JSON schema library validator flags a mismatch in data type between data and schema.
CRITICAL 123456 is not of type 'null', 'string'
Failed validating 'type' in schema['properties']['BillToCustomerId']:
{'type': ['null', 'string']}
On instance['BillToCustomerId']:
123456
"""
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'bing_ads_stream_schema_vs_data_have_diff_data_types.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
def test_complex_stream(self):
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'facebook_stream.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
# self.assertDictEqual(state, {"bookmarks": {"simple_stream": {"timestamp": "2020-01-11T00:00:00.000000Z"}}})
#
# table = self.client.get_table("{}.simple_stream_dev".format(self.dataset_id))
# self.assertEqual(3, table.num_rows, msg="Number of rows mismatch")
# self.assertIsNone(table.clustering_fields)
# self.assertIsNone(table.partitioning_type)
def test_complex_stream_decimal_schema_valid(self):
# DATA AND CONFIG
input_file = os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'facebook_stream_decimal_test_schema_valid.json')
config_file = os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json')
# VERIFY INPUTS
data = []
with open(input_file) as f:
for line in f:
data.append(json.loads(line))
# verify input data and schema
assert data[0]['schema']['properties']["budget_remaining"]["multipleOf"] == 1e-03
assert data[1]['record']["budget_remaining"] == 5000000.1
assert data[2]['record']["budget_remaining"] == 5000000.12
assert data[3]['record']["budget_remaining"] == 57573500.123
assert data[4]['record']["budget_remaining"] == 2450980.0
assert data[5]['record']["budget_remaining"] == 2450980.0
# RUN SYNC
from target_bigquery import main
self.set_cli_args(
stdin=input_file,
config=config_file,
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
# VERIFY OUTPUTS
config = json.load(open(config_file))
project_id = config["project_id"]
dataset_id = config["dataset_id"]
bq_client = Client(project=project_id)
bq_schemas_dict = {}
# Make an API request.
tables_http_iterator = bq_client.list_tables(project_id + '.' + dataset_id)
for table_list_item in tables_http_iterator:
table = bq_client.get_table(
f"{table_list_item.project}.{table_list_item.dataset_id}.{table_list_item.table_id}")
bq_schemas_dict.update({table.table_id: table.schema})
# verify schema
stream = "adsets"
try:
test_field = bq_schemas_dict[stream][7]
except:
stream = stream+"_dev"
test_field = bq_schemas_dict[stream][7]
assert test_field.name == "budget_remaining"
assert test_field.field_type in ["NUMERIC", "DECIMAL"] # NUMERIC is the same as DECIMAL
assert test_field.precision == 32
assert test_field.scale == 3
# verify data
query_string = f"SELECT budget_remaining FROM `{project_id}.{dataset_id}.{stream}`"
dataframe = (
bq_client.query(query_string)
.result()
.to_dataframe()
)
actual = dataframe["budget_remaining"]
expected = pd.Series([Decimal('2450980'), Decimal('2450980'), Decimal('5000000.1'),
Decimal('5000000.12'), Decimal('57573500.123')])
assert actual.equals(expected)
def test_complex_stream_decimal_schema_invalid(self):
file = os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'facebook_stream_decimal_test_schema_invalid.json')
data = []
with open(file) as f:
for line in f:
data.append(json.loads(line))
assert data[0]['schema']['properties']["budget_remaining"]["multipleOf"] == 1e-02
assert data[1]['record']["budget_remaining"] == 5000000.1
assert data[2]['record']["budget_remaining"] == 5000000.12
assert data[3]['record']["budget_remaining"] == 57573500.123
from target_bigquery import main
self.set_cli_args(
stdin=file,
config=os.path.join(
os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
processhandler="load-job"
)
ret = main()
self.assertEqual(ret, 2, msg="Exit code is not 2!") # load must fail
# weird error: Failed validating 'type' in schema['properties']['daily_budget']:
# I changed nothing about daily_budget, I made budget_remaining invalid
def test_complex_stream_with_tables_config(self):
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'facebook_stream.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
tables=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'config'), 'facebook_stream_tables_config.json'),
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
# self.assertDictEqual(state, {"bookmarks": {"simple_stream": {"timestamp": "2020-01-11T00:00:00.000000Z"}}})
#
# table = self.client.get_table("{}.simple_stream_dev".format(self.dataset_id))
# self.assertEqual(3, table.num_rows, msg="Number of rows mismatch")
# self.assertIsNotNone(table.clustering_fields)
# self.assertIsNotNone(table.partitioning_type)
def test_complex_stream_with_tables_config_force_field(self):
"""
the purpose of this test is to make sure that if you supply date_start field
in Facebook ads_insights_age_and_gender streams
as a required string,
build_schema function will force this field to NULLABLE DATE, according to target tables config file
"""
target_config_file = json.load(open(os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'config'), 'facebook_stream_tables_config.json')))
assert target_config_file['streams']['ads_insights_age_and_gender']['force_fields']['date_start'][
'type'] == 'DATE'
assert target_config_file['streams']['ads_insights_age_and_gender']['force_fields']['date_start'][
'mode'] == 'NULLABLE'
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'facebook_stream_date_start_is_required_string.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'target-config.json'),
tables=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'config'), 'facebook_stream_tables_config.json'),
processhandler="load-job"
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
# self.assertDictEqual(state, {"bookmarks": {"simple_stream": {"timestamp": "2020-01-11T00:00:00.000000Z"}}})
table = self.client.get_table("{}.ads_insights_age_and_gender".format(self.dataset_id))
self.assertEqual(15, table.num_rows, msg="Number of rows mismatch")
self.assertIsNotNone(table.clustering_fields)
self.assertIsNotNone(table.partitioning_type)
actual = table.schema[42]
expected = SchemaField(name='date_start',
field_type='DATE',
mode='NULLABLE',
description=None,
fields=(),
policy_tags=None)
assert actual == expected
def test_misformed_complex_stream(self):
"""
Note that the target config's "validate_records" flag should be set to False
sandbox/malformed_target_config.json:
{
"project_id": "{your-project-id}",
"dataset_id": "{your_dataset_id}",
"validate_records": false
}
"""
from target_bigquery import main
self.set_cli_args(
stdin=os.path.join(os.path.join(
os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'tests'), 'rsc'),
'data'), 'facebook_stream.json'),
config=os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'sandbox'),
'malformed_target_config.json'),
processhandler="load-job",
)
ret = main()
state = self.get_state()[-1]
print(state)
self.assertEqual(ret, 0, msg="Exit code is not 0!")
| StarcoderdataPython |
269249 | """Audit log
Revision ID: cf0c99c08578
Revises:
Create Date: 2017-12-12 21:12:56.282095
"""
from datetime import datetime
from alembic import op
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy_continuum import version_class
from sqlalchemy_continuum import versioning_manager
from sqlalchemy_continuum.operation import Operation
from tracker import db
from tracker.model import CVE
from tracker.model import Advisory
from tracker.model import CVEGroup
from tracker.model import CVEGroupEntry
from tracker.model import CVEGroupPackage
# revision identifiers, used by Alembic.
revision = 'cf0c99c08578'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ensure new transaction/version tables exist
db.create_all()
# update CVE table
op.add_column('cve',
Column('created',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
op.add_column('cve',
Column('changed',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
for cve in CVE.query.all():
cve.created = datetime.utcnow()
cve.changed = cve.created
db.session.commit()
db.session.flush()
# update AVG table
op.add_column('cve_group',
Column('changed',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
for group in CVEGroup.query.all():
group.changed = group.created
db.session.commit()
db.session.flush()
VersionClassGroup = version_class(CVEGroup)
uow = versioning_manager.unit_of_work(db.session)
uow.create_transaction(db.session)
for group in VersionClassGroup.query.all():
for package in CVEGroupPackage.query.filter(
CVEGroupPackage.group_id == group.id).all():
package_version = uow.get_or_create_version_object(package)
package_version.group_id = group.id
package_version.pkgname = package.pkgname
package_version.transaction_id = group.transaction_id
package_version.end_transaction_id = group.end_transaction_id
package_version.operation_type = Operation.INSERT
package_version.group_id_mod = 1
package_version.pkgname_mod = 1
uow.process_operation(Operation(package, Operation.INSERT))
for cve in CVEGroupEntry.query.filter(
CVEGroupEntry.group_id == group.id).all():
cve_version = uow.get_or_create_version_object(cve)
cve_version.group_id = group.id
cve_version.cve_id = cve.cve_id
cve_version.transaction_id = group.transaction_id
cve_version.end_transaction_id = group.end_transaction_id
cve_version.operation_type = Operation.INSERT
cve_version.group_id_mod = 1
cve_version.cve_id_mod = 1
uow.process_operation(Operation(cve, Operation.INSERT))
uow.make_versions(db.session)
db.session.commit()
db.session.flush()
with op.batch_alter_table('cve_group', schema=None) as batch_op:
batch_op.alter_column('changed', nullable=False)
# update advisory table
op.add_column('advisory',
Column('changed',
DateTime,
default=datetime.utcnow,
nullable=True,
index=True))
for advisory in Advisory.query.all():
advisory.changed = group.created
db.session.commit()
db.session.flush()
with op.batch_alter_table('advisory', schema=None) as batch_op:
batch_op.alter_column('changed', nullable=False)
# set all fields to modified for initial insert
VersionClassCVE = version_class(CVE)
VersionClassCVE.query.update({
VersionClassCVE.operation_type: Operation.INSERT,
VersionClassCVE.issue_type_mod: 1,
VersionClassCVE.description_mod: 1,
VersionClassCVE.severity_mod: 1,
VersionClassCVE.remote_mod: 1,
VersionClassCVE.reference_mod: 1,
VersionClassCVE.notes_mod: 1
})
VersionClassGroup = version_class(CVEGroup)
VersionClassGroup.query.update({
VersionClassGroup.operation_type: Operation.INSERT,
VersionClassGroup.status_mod: 1,
VersionClassGroup.severity_mod: 1,
VersionClassGroup.affected_mod: 1,
VersionClassGroup.fixed_mod: 1,
VersionClassGroup.bug_ticket_mod: 1,
VersionClassGroup.reference_mod: 1,
VersionClassGroup.notes_mod: 1,
VersionClassGroup.created_mod: 1,
VersionClassGroup.changed_mod: 1,
VersionClassGroup.advisory_qualified_mod: 1
})
VersionClassAdvisory = version_class(Advisory)
VersionClassAdvisory.query.update({
VersionClassAdvisory.operation_type: Operation.INSERT,
VersionClassAdvisory.group_package_id_mod: 1,
VersionClassAdvisory.advisory_type_mod: 1,
VersionClassAdvisory.publication_mod: 1,
VersionClassAdvisory.workaround_mod: 1,
VersionClassAdvisory.impact_mod: 1,
VersionClassAdvisory.content_mod: 1,
VersionClassAdvisory.created_mod: 1,
VersionClassAdvisory.changed_mod: 1,
VersionClassAdvisory.reference_mod: 1
})
db.session.commit()
def downgrade():
with op.batch_alter_table('cve', schema=None) as batch_op:
batch_op.drop_index('ix_cve_created')
batch_op.drop_index('ix_cve_changed')
batch_op.drop_column('created')
batch_op.drop_column('changed')
with op.batch_alter_table('cve_group', schema=None) as batch_op:
batch_op.drop_index('ix_cve_group_changed')
batch_op.drop_column('changed')
with op.batch_alter_table('advisory', schema=None) as batch_op:
batch_op.drop_index('ix_advisory_changed')
batch_op.drop_column('changed')
def drop(model):
model.__table__.drop(db.engine)
drop(version_class(CVE))
drop(version_class(CVEGroup))
drop(version_class(CVEGroupEntry))
drop(version_class(CVEGroupPackage))
drop(version_class(Advisory))
drop(versioning_manager.transaction_cls)
db.session.commit()
| StarcoderdataPython |
11207804 | #!/usr/bin/env python
from typing import Any, Dict, Optional
from hummingbot.connector.exchange.alpaca.alpaca_order_book_message import AlpacaOrderBookMessage
from hummingbot.core.data_type.order_book import OrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessageType
class AlpacaOrderBook(OrderBook):
@classmethod
def snapshot_message_from_exchange(cls,
msg: Dict[str, any],
timestamp: float,
metadata: Optional[Dict] = None):
"""
Convert json snapshot data into standard OrderBookMessage format
:param msg: json snapshot data from live web socket stream
:param timestamp: timestamp attached to incoming data
:return: AlpacaOrderBookMessage
"""
if metadata:
msg.update(metadata)
return AlpacaOrderBookMessage(
message_type=OrderBookMessageType.SNAPSHOT,
content=msg,
timestamp=timestamp
)
@classmethod
def trade_message_from_exchange(cls,
msg: Dict[str, Any],
timestamp: Optional[float] = None,
metadata: Optional[Dict] = None):
"""
Convert a trade data into standard OrderBookMessage format
:param record: a trade data from the database
:return: AlpacaOrderBookMessage
"""
if metadata:
msg.update(metadata)
msg.update({
"exchange_order_id": str(msg.get("s_t")),
"trade_type": msg.get("side"),
"price": msg.get("price"),
"amount": msg.get("size"),
})
return AlpacaOrderBookMessage(
message_type=OrderBookMessageType.TRADE,
content=msg,
timestamp=timestamp
)
| StarcoderdataPython |
3381820 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
@enum.unique
class Facility(enum.IntEnum):
kernel = 0
user = 1
mail = 2
daemon = 3
auth = 4
syslog = 5
lpr = 6
news = 7
uucp = 8
clock = 9
authpriv = 10
ftp = 11
ntp = 12
audit = 13
alert = 14
cron = 15
local0 = 16
local1 = 17
local2 = 18
local3 = 19
local4 = 20
local5 = 21
local6 = 22
local7 = 23
@enum.unique
class Severity(enum.IntEnum):
emergency = 0
alert = 1
critical = 2
error = 3
warning = 4
notice = 5
informational = 6
debug = 7
| StarcoderdataPython |
42346 | # Advent of Code 2019, Day 6
# (c) blu3r4y
import networkx as nx
from aocd.models import Puzzle
from funcy import print_calls
@print_calls
def part1(graph):
checksum = 0
for target in graph.nodes:
checksum += nx.shortest_path_length(graph, "COM", target)
return checksum
@print_calls
def part2(graph):
return nx.shortest_path_length(graph.to_undirected(), "YOU", "SAN") - 2
def load(data):
return nx.DiGraph([line.split(")") for line in data.split()])
if __name__ == "__main__":
puzzle = Puzzle(year=2019, day=6)
ans1 = part1(load(puzzle.input_data))
# puzzle.answer_a = ans1
ans2 = part2(load(puzzle.input_data))
# puzzle.answer_b = ans2
| StarcoderdataPython |
3535823 | import warnings
# Dash configuration
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from datetime import date
from server import app
from initialize_blockchain import *
blockchain = initialize_blockchain()
from load_blockchain import *
import pickle
import os
import pandas as pd
fpath_insurdb = os.path.join('.', 'insurdb1.xlsx')
fpath_search_results = './search_results.xlsx'
fpath_jobtype = './job_type.xlsx'
jobs = pd.read_excel('job_type.xlsx')['Job']
options_jobs = [{'label':job, 'value':job} for job in jobs]
options_policy_period = [{'label': _, 'value': _} for _ in ['10 years', '20 years', '30 years', '35 years', '40', 'To 60 years old', 'To 65 years old', 'To 70 years old', 'To 75 years old', 'To 80 years old']]
options_payment_term = [{'label': _, 'value': _} for _ in ['pay off in one time', '5 years', '10 years', '15 years', '20 years', '30 years', '40 years', 'To 55 years old', 'To 60 years old', 'To 70 years old', 'To 75 years old']]
jobs = {}
# 保险产品搜索页面
layout = html.Div(children=[
dcc.Location(id='url_search', refresh=True),
html.Div(
className="container",
children=[
html.Div(
html.Div(
className="row",
children=[
html.Div(
className="ten columns",
children=[
dcc.Markdown('''
### Tell us something about yourself
'''),
dcc.Dropdown(id="gender", options=[{'label':'M', 'value':'M'}, {'label':'F', 'value':'F'}], placeholder="gender"),
html.Hr(),
dcc.Dropdown(id="age", options=[{'label':str(i), 'value':i} for i in range(0, 100)], placeholder="Age"),
html.Hr(),
dcc.Dropdown(id="job", options=options_jobs, placeholder="Occupation"),
html.Hr(),
dcc.Dropdown(id="smoke", options=[{'label':'Yes', 'value':'Yes'}, {'label':'No', 'value':'No'}], placeholder="Do you smoke?"),
html.Hr(),
dcc.Dropdown(
id='insurance_options',
options=[{"label":"Critical Illness Insurance", "value":"Critical Illness Insurance"}],
placeholder="Select Type of Insurance"
),
html.Hr(),
dcc.Markdown('''
Maximum Coverage
'''),
dcc.RangeSlider(
id='maximum_coverage',
min=150,
max=350,
value=[200, 250],
tooltip={"placement": "bottom", "always_visible": False},
marks={
150: {'label': '$1,500,000', 'style': {'color': '#77b0b1'}},
250: {'label': '$2,500,000'},
350: {'label': '$3,500,000', 'style': {'color': '#f50'}}
}
),
html.Hr(),
dcc.Dropdown(id="policy_period", options=options_policy_period, placeholder="Select Policy Period"),
html.Hr(),
dcc.Dropdown(id="payment_term", options=options_payment_term, placeholder="Select Payment Term"),
html.Hr(),
html.Div([
html.Button(id='search', children='Search', n_clicks=0),
html.Button(id='homepage3', children='Homepage', n_clicks=0)
]
)
]
)
]
)
)
]
)
])
@app.callback(Output('url_search', 'pathname'),
Input('search', 'n_clicks'),
Input("age", 'value'),
Input("smoke", 'value'),
Input("maximum_coverage", 'value'),
Input("payment_term", 'value'),
Input("policy_period", 'value'),
Input('homepage3', 'n_clicks')
)
def update_output(submit_n_clicks, age, smoke, maximum_coverage, payment_term, policy_period, n_clciks_homepage3):
# 跳转搜索结果页面
if submit_n_clicks and submit_n_clicks > 0:
df = pd.read_excel(fpath_insurdb)
df['min_age'], df['max_age'] = df['age_group'].apply(lambda x: int(x.split('~')[0])), df['age_group'].apply(lambda x: int(x.split('~')[1]))
df = df.loc[
(df['min_age'] <= age) &
(df['max_age'] >= age) &
(maximum_coverage[0] <= df['maximum_coverage']) &
(maximum_coverage[1] >= df['maximum_coverage']) &
(df['smoking_habit'] == smoke) &
(df['payment_term'] == payment_term) &
(df['policy_period'] == policy_period) &
(df['intelligent_underwriting'] == 'Medical History') &
(df['occupations'] == '1-4'),
:
]
with open('test.txt', 'w') as f:
f.write(''.join([str(age), str(maximum_coverage[0]), str(maximum_coverage[1]), smoke, payment_term, policy_period]))
if len(df) > 0:
df.to_excel(fpath_search_results, index=False)
else:
df.DataFrame().to_excel(fpath_search_results, index=False)
return '/results'
# 跳转首页
if n_clciks_homepage3 and n_clciks_homepage3 > 0:
return '/'
| StarcoderdataPython |
12849267 | """ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from qa.urls import *
from qa.views import test ###only for now
urlpatterns = [
url(r'^admin/', admin.site.urls, name='admin'),
url(r'^login/', test, name='login'),
url(r'^signup/', test, name='signup'),
url(r'^question/', include('qa.urls')), ###good parctice as tree-structure
url(r'^ask/', test, name='ask'),
url(r'^popular/', test, name='popular'),
url(r'^new/', test, name='new'),
url(r'^$', test),
] | StarcoderdataPython |
358405 | __doc__ = '''NURBS - Non Uniform Rational B-Splines.
This python module is a port of Mark Spink's SCILAB/MATLAB tolbox to python with help of numpy.
Information about Mark Spink's tolbox and more background NURBS information can be found at:
http://www.aria.uklinux.net/nurbs
Dependency
============
Python 2.0 or newer
Numerical Python
Dislin -> optional but recomended
PyOpenGL -> optional
Installation
============
See python documentation for Distutils.
License
=======
All the software within this tool is covered by the GPL Version 2 license
which is stated in the file 'LICENSE'.
<NAME>
<EMAIL>'''
__all__=['_Bas','Util','Crv','Srf']
import Util, Crv, Srf
| StarcoderdataPython |
41106 | <reponame>psusmars/rex<filename>rex/rechunk_h5/__init__.py
# -*- coding: utf-8 -*-
"""
.h5 rechunking tool
"""
from .chunk_size import ArrayChunkSize, TimeseriesChunkSize
from .combine_h5 import CombineH5
from .rechunk_h5 import RechunkH5, get_dataset_attributes
| StarcoderdataPython |
3379913 | <filename>Condicionais/buzz.py
num = int(input("Digite um numero: "))
if num % 5 == 0:
print("Buzz")
else:
print(num) | StarcoderdataPython |
236734 | <filename>heaps/constructHeap.py
class MinHeap:
def __init__(self, array):
# Do not edit the line below.
self.heap = self.buildHeap(array)
def buildHeap(self, array):
# Write your code here.
pass
def siftDown(self):
# Write your code here.
pass
def siftUp(self):
# Write your code here.
pass
def peek(self):
# Write your code here.
pass
def remove(self):
# Write your code here.
pass
def insert(self, value):
# Write your code here.
pass
| StarcoderdataPython |
3221906 | import time
from openpyxl import load_workbook, Workbook
def get_name_phone(file_path, sheet_name, name_prefix, phone_refix):
result = {}
book = load_workbook(file_path, read_only=True)
sheet = book[sheet_name]
max_row = sheet.max_row
i = 2
while i < max_row:
name = sheet["%s%d" % (name_prefix, i)].value
phone = sheet["%s%d" % (phone_refix, i)].value
result[name] = phone
i += 1
book.close()
return result
def mix_save(target_file_name, src_file, src_sheet_name, name_phone):
target_book = Workbook(write_only=True)
src_file = load_workbook(src_file, read_only=True)
src_sheet = src_file[src_sheet_name]
max_row = src_sheet.max_row
max_column = src_sheet.max_column
src_all_rows = list(src_sheet.rows)
dest_sheet = target_book.create_sheet("done", 1)
for j in range(1, max_column + 1):
dest_sheet.cell(1, j).value = src_all_rows[0][j].value
dest_sheet.cell(1, max_column+1).value = '电话号'
i = 2
for r in src_all_rows[1:]:
j = 0
for c in r:
if c.col_idx == 2:
name = c.value
ph = name_phone.get(name, '-1')
dest_sheet.cell(i, j).value = c.value
j += 1
dest_sheet.cell(i, j+1).value = ph
target_book.save(target_file_name)
if __name__ == "__main__":
name_ph = {}
st = time.time()
name_ph = get_name_phone('截至4月17日8点会员数据.xlsx', 'Sheet1', 'B', 'E')
et1 = time.time()
print("read1 cost : %.3f" % (et1-st))
mix_save("done.xlsx", '回款日志汇总.xlsx', 'Sheet1', name_ph)
et2 = time.time()
print("read2 cost : %.3f" % (et2 - et1))
| StarcoderdataPython |
5133747 | import os
from pynsett.discourse import Discourse
from pynsett.extractor import Extractor
from pynsett.knowledge import Knowledge
_path = os.path.dirname(__file__)
text = "<NAME> is blond. He is a carpenter. There is no reason to panic. <NAME> is ginger. She is a carpenter. "
knowledge = Knowledge()
knowledge.add_rules(open(os.path.join(_path, '../rules/test.rules')).read())
discourse = Discourse(text)
discourse._discourse.plot()
extractor = Extractor(discourse, knowledge)
triplets = extractor.extract()
for triplet in triplets:
print(triplet)
| StarcoderdataPython |
4809129 | from django.utils import timezone
from django.db import models
from django.contrib.auth.models import User as u
from django.db.models.signals import post_save
from django.dispatch import receiver
class Store(models.Model):
id = models.AutoField(primary_key=True)
store_name = models.CharField(max_length=50)
branch = models.CharField(max_length=100, null=True)
area = models.CharField(max_length=50, null=True)
tel = models.CharField(max_length=20, null=True)
address = models.CharField(max_length=200, null=True)
latitude = models.FloatField(max_length=10, null=True)
longitude = models.FloatField(max_length=10, null=True)
category = models.CharField(max_length=200, null=True)
reviewCnt = models.IntegerField(default=0)
image = models.CharField(max_length=200, null=True)
@property
def category_list(self):
return self.category.split("|") if self.category else []
class Menu(models.Model):
id = models.IntegerField(primary_key=True)
store = models.ForeignKey(
Store, on_delete=models.CASCADE)
menu_name = models.CharField(max_length=190, null=True)
price = models.FloatField(null=True)
class Bhour(models.Model):
id = models.IntegerField(primary_key=True)
store = models.ForeignKey(
Store, on_delete=models.CASCADE)
bhour_type = models.PositiveSmallIntegerField(null=True)
week_type = models.PositiveSmallIntegerField(null=True)
mon = models.PositiveSmallIntegerField(null=True)
tue = models.PositiveSmallIntegerField(null=True)
wed = models.PositiveSmallIntegerField(null=True)
thu = models.PositiveSmallIntegerField(null=True)
fri = models.PositiveSmallIntegerField(null=True)
sat = models.PositiveSmallIntegerField(null=True)
sun = models.PositiveSmallIntegerField(null=True)
start_time = models.CharField(max_length=20, null=True)
end_time = models.CharField(max_length=20, null=True)
etc = models.CharField(max_length=180, null=True)
class User(models.Model):
id = models.IntegerField(primary_key=True)
gender = models.CharField(max_length=20, null=True)
age = models.IntegerField(null=True)
class Account(models.Model):
user = models.OneToOneField(u, on_delete=models.CASCADE)
gender = models.CharField(max_length=20, null=True)
age = models.IntegerField(null=True)
def __str__(self):
return str(self.user) + ", " + self.gender + ", " + str(self.age)
@receiver(post_save, sender=User)
def create_user_Account(sender, instance, created, **kwargs):
if created:
Account.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_Account(sender, instance, **kwargs):
instance.account.save()
class Review(models.Model):
id = models.AutoField(primary_key=True)
store = models.ForeignKey(
Store, on_delete=models.CASCADE)
user = models.ForeignKey(u, on_delete=models.CASCADE)
score = models.IntegerField(null=True)
content = models.TextField(null=True)
reg_time = models.DateTimeField(auto_now_add=True)
| StarcoderdataPython |
5076284 | <gh_stars>10-100
import sys
import time
from llspi import c_llspi
from ad9653 import c_ad9653
# This class "just" constructs data lists to be sent to llspi.v
# that will perform the desired AD9653 SPI transaction
class c_llspi_ad9653(c_llspi, c_ad9653):
def __init__(self, chip):
self.chip = chip # this is the llspi chipsel == ctl_bits[2:0]
def write(self, addr, data):
dlist = (self.ctl_bits(write=1, chipsel=self.chip, read_en=0))
dlist += (self.data_bytes(
self.instruction_word(
read=0, w0w1=0, addr=addr), Nbyte=2))
dlist += (self.data_bytes(self.data_words([eval('0b' + data)]), 1))
dlist += (self.ctl_bits(write=1, chipsel=0))
return dlist
def read(self, addr):
dlist = (self.ctl_bits(write=1, chipsel=self.chip))
dlist += (self.data_bytes(
self.instruction_word(
read=1, w0w1=0, addr=addr), Nbyte=2))
dlist += (self.ctl_bits(
write=1, chipsel=self.chip, read_en=1, adc_sdio_dir=1))
dlist += (self.data_bytes(self.data_words([0b01010101]), 1))
dlist += (self.ctl_bits(write=1, chipsel=0))
return dlist
# This class uses LEEP to interact with llspi.v
# Indepenent of which peripheral chip is attached to llspi.
# All of these methods are unmodified cut-and-paste from zest_setup.py
class leep_llspi():
def __init__(self, leep):
self.leep = leep
def spi_write(self, obj, addr, value):
self.verbose_send(obj.write(addr, value))
def wait_for_tx_fifo_empty(self):
retries = 0
while 1:
rrvalue = self.leep.reg_read([('llspi_status')])[0]
empty = (rrvalue >> 4) & 1
please_read = (rrvalue + 1) & 0xf
if empty:
break
time.sleep(0.002)
retries += 1
# print(rrvalue, type(rrvalue), hex(rrvalue), please_read)
if retries > 0:
print("%d retries" % retries)
return please_read
def verbose_send(self, dlist):
write_list = []
[write_list.append(('llspi_we', x)) for x in dlist]
self.leep.reg_write(write_list)
time.sleep(0.002)
return self.wait_for_tx_fifo_empty()
# Each element of obj_list needs to have a read(addr) method to construct the
# llspi command list.
def spi_readn(self, obj_list, addr):
please_read = self.verbose_send(sum([adc.read(addr) for adc in obj_list], []))
lol = len(obj_list)
if please_read != lol:
print("spi_readn mismatch please_read %d len(obj_list) %d" % (please_read, lol))
if please_read:
result1 = self.leep.reg_read([('llspi_result')]*please_read)
return [(None, None, x) for x in result1]
if __name__ == "__main__":
import getopt
import leep
opts, args = getopt.getopt(sys.argv[1:], 'ha:p:', ['help', 'addr='])
ip_addr = '192.168.195.84'
for opt, arg in opts:
if opt in ('-h', '--help'):
sys.exit()
elif opt in ('-a', '--address'):
ip_addr = arg
leep_addr = None
if leep_addr is None:
leep_addr = "leep://" + str(ip_addr)
leep = leep.open(leep_addr, timeout=2.0, instance=[])
leepll = leep_llspi(leep) # temporary(?) stand-in for c_zest
U2_adc_spi = c_llspi_ad9653(2)
# Write test
# If this were a real chip, this would set its test mode
leepll.spi_write(U2_adc_spi, 0xd, '00001100')
# Read test
# If this were a real chip, this would read its Chip ID and Chip Grade
for addr in [0x01, 0x02]:
foo = leepll.spi_readn([U2_adc_spi], addr)
print("Addr 0x%2.2x: 0x%2.2x" % (addr, foo[0][2]))
print("Done")
| StarcoderdataPython |
11379851 | # -*- coding: utf-8 -*-
# Copyright (c) Ezcad Development Team. All Rights Reserved.
"""
This module creates points.
"""
import numpy as np
from ..new.new import from_data_array
from zoeppritz.modeling import modeling
def zoep_modeling(model, inc_angles, equation, reflection, complexity,
object_name):
arr = modeling(model, inc_angles, equation, reflection)
# m, n = ar.shape
# z = np.zeros([m, 1])
# arr = np.append(ar, z, axis=1)
if complexity == 'amplitude':
pass
elif complexity == 'phase':
angles, amplitude, phase = arr[:, 0], arr[:, 1], arr[:, 2]
arr = np.vstack((angles, phase, amplitude)).T
else:
raise ValueError("Unknown value")
props = ('X', 'Y', 'Z')
dob = from_data_array(arr, props=props, object_name=object_name)
return dob
| StarcoderdataPython |
1855779 | USERNAME = 'awesome_username' # 'USERNAME'
PASSWORD = '<PASSWORD>' # 'PASSWORD'
| StarcoderdataPython |
309823 | <reponame>marianfx/python-labs
"""Client that connects to the simplehttpserver (directory listing)."""
import re
import urllib
from urllib import request
URL = "http://127.0.0.1:6996"
REGEX = re.compile(rb"<li><a href=\"([^\"]+)\">(\1)</a></li>")
TXTREGEX = re.compile(r"^([^\.]+)\.txt$")
def access_url(url: str):
"""Access a http server profiding directory listing, and print txts."""
try:
response = urllib.request.urlopen(url).read()
print("Response from server\n--------------------:\n {0}\n--------------------\n".format
(
response
))
reg_obs = REGEX.findall(response)
if len(reg_obs) == 0:
print("No files to display.")
return None
txts = []
for entry in reg_obs:
pfile = entry[0].decode("UTF-8")
match = TXTREGEX.match(pfile)
if match:
txts.append(match.group(1))
if len(txts) == 0:
print("No files to display.")
for txt in txts:
print(txt)
except Exception as exc:
print("Error: {0}.\n".format(str(exc)))
if __name__ == "__main__":
access_url(URL)
| StarcoderdataPython |
6659727 |
mem32 = []
TIM1 = 1
TIM2 = 2
TIM3 = 3
TIM4 = 4
TIM5 = 5
TIM6 = 6
TIM7 = 7
TIM8 = 8
TIM15 = 15
TIM16 = 16
TIM17 = 17
TIM_SMCR = 0
TIM_CCER = 0
TIM_CCMR1 = 0
TIM_CCR1 = 1
TIM_CCR2 = 2
TIM_CCR3 = 3
TIM_CCR4 = 4
TIM_CCR5 = 5
TIM_CCR6 = 6 | StarcoderdataPython |
247282 | <reponame>shivahari/QuarksB
"""
Skype conf
"""
from datetime import datetime
SKYPE_SENDER_ENDPOINT = "https://skype-sender.qxf2.com/send-message"
MESSAGE = 'Test message sent on ' + datetime.now().strftime('%d-%m-%Y %H:%M:%S')
| StarcoderdataPython |
3340656 | from adventure import *
from shop import *
from util import *
#######################################################
# Here is a simple starter level to start learning with.
# This creates three rooms in a cave and populates with
# some items and gets it ready for an adventure.
# Read the descriptions and you can guess how it might
# work. But copy this and use it to start your own!
# Good luck!!!
#######################################################
#You can make things to discover like this. This makes an Item. The first argument is the name. Keep it short. The second is a description. The third is the location where it's found. Optionally, you can specify how valuable it is. This makes it worth 10 gold pieces.
treasure = Item('bag of gems', 'countless gems and jewels', 'scattered around the room.', GP(10))
#Sometimes it's fun to make items that don't have much use, but make us laugh!
fries = Item('bag of fries', 'old, old, moldy, disgusting but somewhat tempting french fries', 'on the wet, soggy ground behind some rocks.')
#You can make monsters like this. You can also use the master list of pre-created monsters with the function Mon which takes a monster name as an argument.
#This Dragon has two attacks. The first part of the attack is what it looks like when it's used. The second is the max damage.
dragon = Monster('Dragon', "On top of the coins sits the largest red dragon you've ever seen!", 60, [ MonsterAttack('clawed', 7), MonsterAttack('spewed flames', 9) ], 300 )
#sometimes it's fun to connect rooms with a door and then hide the key. A key is a special item because of it's name.
#Keep it 'key' until you know how to match it with doors specifically.
key = Item('key', 'a large silver dragon key', 'hidden in the pot under lots of spider webs.')
door = Door("a strong door with a silver dragon crest around the key hole.")
#Here's how to make some rooms. These can be indoor or outdoor spaces. Use your imagination to create a cool environment.
#The first argument is the name.
#The second, optional argument, is the description you see only when you first enter a room.
#The third argument is what you see every time after the first entry.
#The fourth argument finishes the sentence "[Direction] you see" in order to give the party some indication of what can be seen from the current space.
#The fifth optional argument is the list of items you will find if you search.
#The sixth argument is the list of monsters in the room.
cave = Room( "Cave", None, "This large cave has the smell of wet dirt and old grease." ,
"a glimmer of light shines through the distant cave entrance.", [fries] )
small_tunnel = Room("Small Tunnel", None, "Down the small tunnel you see a small golden pot. The tunnel stops here.", "a small tunnel.", [key])
dragon_room = Room("Dragon's Lair", None, "There is a huge pile of coins in the center of a large cave.", "a pile of coins.", [treasure], [dragon])
#If you want to make a fun ascii banner, like the Ragged Keep, check out
#http://patorjk.com/software/taag/#p=display&f=Graffiti&t=Type%20Something%20
#then pass it as the optional second argument to Level
#You need to make one level
level1 = Level ("The Cave of Fear")
#connect your rooms together like this. The door is optional.
cave.connect("East", dragon_room, door)
cave.connect("West", small_tunnel)
#and start the level like this, passing the level an the first room to begin.
run_adventure(level1, cave)
| StarcoderdataPython |
3470157 | import upwork
from upwork.routers import workdays
from unittest.mock import patch
@patch.object(upwork.Client, "get")
def test_get_by_company(mocked_method):
workdays.Api(upwork.Client).get_by_company("company", "from", "till", {})
mocked_method.assert_called_with(
"/team/v3/workdays/companies/company/from,till", {}
)
@patch.object(upwork.Client, "get")
def test_get_by_contract(mocked_method):
workdays.Api(upwork.Client).get_by_contract("company", "from", "till", {})
mocked_method.assert_called_with(
"/team/v3/workdays/contracts/company/from,till", {}
)
| StarcoderdataPython |
3493879 | <filename>turbustat/statistics/density_pdf/density_pdf.py
'''
The density PDF as described by Kowal et al. (2007)
'''
import numpy as np
from scipy.stats import nanmean
def pdf(img, num_bins=1000, verbose=True):
'''
Creates the PDF given an image (of any dimension)
INPUTS
------
img - array
n-dim array
OUTPUTS
-------
'''
img_av = nanmean(img, axis=None) # normalize by average
hist, edges = np.histogram(img / img_av, bins=num_bins, density=True)
hist /= np.sum(~np.isnan(img))
bin_centres = (edges[:-1] + edges[1:]) / 2
if verbose:
import matplotlib.pyplot as p
p.grid(True)
p.loglog(bin_centres, hist, 'bD-')
p.xlabel(r"$\Sigma/\overline{\Sigma}$")
p.ylabel("PDF")
p.show()
return bin_centres, hist
| StarcoderdataPython |
24197 | <reponame>KAGRA-TW-ML/gw-iaas
import abc
import time
from dataclasses import dataclass
from functools import partial
from typing import TYPE_CHECKING
import kubernetes
from kubernetes.utils.create_from_yaml import FailToCreateError
from urllib3.exceptions import MaxRetryError
from hermes.cloudbreak.utils import snakeify, wait_for
if TYPE_CHECKING:
from hermes.cloudbreak.kubernetes import K8sApiClient
@dataclass
class Resource(abc.ABC):
_client: "K8sApiClient"
name: str
namespace: str = "default"
MAX_RETRY_GRACE_SECONDS = 300
STATUS_AVAILABLE_GRACE_SECONDS = 10
@classmethod
def create(cls, client, config):
if config["kind"] == "Deployment":
cls = Deployment
elif config["kind"] == "Service":
cls = Service
elif config["kind"] == "DaemonSet":
cls = DaemonSet
else:
raise ValueError(
"Resource kind {} not supported yet".format(config["kind"])
)
metadata = config["metadata"]
obj = cls(client, metadata["name"], metadata["namespace"])
create_fn = partial(
kubernetes.utils.create_from_dict,
k8s_client=client._client,
data=config,
)
response = obj._make_a_request(create_fn)
if response is None:
raise MaxRetryError
return obj
def __post_init__(self):
self._creation_time = time.time()
self._unavailable = False
self._unavailable_time = None
@abc.abstractproperty
def client(self):
pass
def _make_a_request(self, request_fn, do_raise=False):
try:
# try to make the request
return request_fn()
except (
kubernetes.client.exceptions.ApiException,
FailToCreateError,
) as e:
try:
# create from yaml wraps around API exceptions,
# so grab the underlying exception here first
status = e.api_exceptions[0].status
except AttributeError:
status = e.status
if status != 401:
raise
if not do_raise:
self._client.cluster.refresh_credentials()
self._client._client.configuration.api_key[
"authorization"
] = self._client.cluster.token
# try the request again with do_raise set to
# true to indicate that these credentials just
# don't have access to this cluster
return self._make_a_request(request_fn, do_raise=True)
else:
# if do_raise is set, indicate that the request
# is unauthorized
raise RuntimeError("Unauthorized request to cluster")
except MaxRetryError:
# sometimes this error can get raised if the master nodes
# of the cluster are busy doing something. Return None
# to indicate this is happening but give things a few
# minutes to get back to normal
if not self._unavailable:
self._unavailable = True
self._unavailable_time = time.time()
elif (
time.time() - self._unavailable_time
) < self.MAX_RETRY_GRACE_SECONDS:
raise RuntimeError(
"Deployment {} has been unavailable for {} seconds".format(
self.name, self.MAX_RETRY_GRACE_SECONDS
)
)
return None
except Exception as e:
print(type(e), e)
raise
def get(self):
resource_type = snakeify(self.__class__.__name__)
get_fn = partial(
getattr(self.client, f"read_namespaced_{resource_type}_status"),
name=self.name,
namespace=self.namespace,
)
try:
response = self._make_a_request(get_fn)
self._unavailable = False
return response
except kubernetes.client.ApiException as e:
if e.status == 404:
raise RuntimeError(f"{self.message} no longer exists")
raise
def delete(self):
resource_type = snakeify(self.__class__.__name__)
delete_fn = partial(
getattr(self.client, f"delete_namespaced_{resource_type}_status"),
name=self.name,
namespace=self.namespace,
)
return self._make_a_request(delete_fn)
@abc.abstractmethod
def is_ready(self):
pass
def wait_for_ready(self):
wait_for(
self.is_ready,
f"Waiting for {self.message} to become ready",
)
def submit_delete(self):
try:
response = self.delete()
return response is not None
except kubernetes.client.ApiException as e:
if e.status == 404:
return True
raise
def is_deleted(self):
try:
self.get()
except RuntimeError as e:
if str(e).endswith("no longer exists"):
return True
raise
else:
return False
def remove(self):
if not self.submit_delete():
wait_for(
self.submit_delete,
f"Waiting for {self.message} to become available to delete",
)
if not self.is_deleted():
# give us a chance to not have to display the progress bar
wait_for(self.is_deleted, f"Waiting for {self.message} to delete")
else:
# TODO: logging?
print(f"Deleted {self.message}")
# TODO: remove this from self._client resources?
@property
def message(self):
resource_type = snakeify(self.__class__.__name__).replace("_", " ")
return " ".join([resource_type, self.name])
class Deployment(Resource):
@property
def client(self):
return kubernetes.client.AppsV1Api(self._client._client)
# TODO: custom wait that clocks that the number of available instances
def is_ready(self):
response = self.get()
if response is None:
return False
conditions = response.status.conditions
if conditions is None:
return False
statuses = {i.type: eval(i.status) for i in conditions}
if len(statuses) == 0 and (
(time.time() - self._creation_time)
> self.STATUS_AVAILABLE_GRACE_SECONDS
):
raise RuntimeError(
"Deployment {} has gone {} seconds with no "
"available status information".format(
self.name, self.STATUS_AVAILABLE_GRACE_SECONDS
)
)
try:
if statuses["Available"]:
return True
except KeyError:
try:
if not statuses["Progressing"]:
raise RuntimeError(f"{self.message} stopped progressing")
except KeyError:
return False
def scale(self, replicas: int):
response = self.get()
if response is None:
return False
response.spec.replicas = replicas
scale_fn = partial(
self.client.patch_namespaced_deployment_scale,
name=self.name,
namespace=self.namespace,
body=response,
)
return self._make_a_request(scale_fn)
@dataclass
class Service(Resource):
"""Really represents specifically a LoadBalancer"""
def __post_init__(self):
self._ip = None
@property
def client(self):
return kubernetes.client.CoreV1Api(self._client._client)
@property
def ip(self):
if self._ip is None:
response = self.get()
if response is None:
return None
try:
self._ip = response.status.load_balancer.ingress[0].ip
except TypeError:
return None
return self._ip
def is_ready(self):
# server is considered ready once it has a public IP address
return self.ip is not None
class DaemonSet(Resource):
@property
def client(self):
return kubernetes.client.AppsV1Api(self._client._client)
def is_ready(self):
response = self.get()
if response is None:
return False
status = response.status
return status.desired_number_scheduled == status.number_ready
| StarcoderdataPython |
175197 | """Constants and helper functions used in this module"""
from wordler.__about__ import __title__
from enum import Enum
import logging
from typing import List, Union
from pkg_resources import resource_filename
ALPHABET = "abcdefghijklmnopqrstuvwxyz".upper()
def get_full_dicionary(word_length: int = 5) -> List[str]:
"""Retrive all the words of a certain lenth from the database"""
filename = resource_filename(__title__, "assets/words.txt")
with open(filename) as file:
words = file.readlines()
words = [word.rstrip().upper() for word in words]
words = [word for word in words if len(word) == word_length and word.isalpha()]
return words
def unique_letters(word: str) -> int:
"""Return the numver of unique letters in a given word."""
return sum([letter.upper() in word.upper() for letter in ALPHABET])
class LetterResponse(Enum):
"""What can the letters of the guess be?"""
GREEN = "green"
ORANGE = "orange"
GRAY = "gray"
def __repr__(self):
return self.name
def set_up_logging(log_level: Union[str, int] = logging.DEBUG) -> None:
"""Set up the handlers for logging rcf tests to stderr and journal
:param log_level: Level at which to log to stderr
"""
# the root logger is at debug b/c we want to catch everything
root_logger = logging.getLogger()
root_logger.setLevel("DEBUG")
# the stderr handler can be configured
log_format = "%(levelname)s - %(message)s"
stderr_handler = logging.StreamHandler()
stderr_formatter = logging.Formatter(fmt=log_format)
stderr_handler.setFormatter(stderr_formatter)
stderr_handler.setLevel(log_level)
root_logger.addHandler(stderr_handler)
| StarcoderdataPython |
1601831 | #!/usr/bin/env python
import swiftclient
import os, base64, json
from create_users import CreateUser
from config import *
from secret_manager import sec_manager
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.PublicKey import RSA
from ecdsa import SigningKey, NIST256p
# Size AESKey: 32 bytes = 256 bits, 16 = 128 bits
BLOCK_SIZE = 16
class EscudoUserProperties:
def __init__(self, name, password, barbican,keystone):
self.name = name
self.password = password
self.barbican = barbican
self.keystone = keystone
def create_user(self, force=False):
"""
Create user, sending the public key to the daemon and creating private and master keys
"""
#Generate keys
pvtK, pubK = self.gen_keypair(1024)
master_key = os.urandom(BLOCK_SIZE)
#Generate signing keys
sk = SigningKey.generate(curve=NIST256p)
vk = sk.get_verifying_key()
sk = sk.to_pem()
vk = vk.to_pem()
#Create
if self.create(self.name,self.password,pubK,vk)!= 'OK':
print "Error in create demo users"
return
#Save keys in local files
self.save_keys(pvtK,pubK,master_key,sk,vk,force)
def save_keys(self,pvtK,pubK,master_key,sk,vk,force):
'''
Save keys in local files
Args:
pvtK: User's private key
pubK: User's public key
masterkey: User's master key
sk: USer's signing key
vk: User's verification key
'''
pvk_filename = "obj_world/pvt_%s.key" % (self.usrID)
puk_filename = "obj_world/pub_%s.key" % (self.usrID)
mk_filename = "obj_world/mk_%s.key" % (self.usrID)
vk_filename = "obj_world/vk_%s.key" % (self.usrID)
sk_filename = "obj_world/sk_%s.key" % (self.usrID)
with open(mk_filename, 'w') as mk_file:
mk_file.write(base64.b64encode(master_key))
print("Generated and Stored AES MasterKey.")
with open(vk_filename, 'w') as vk_file:
vk_file.write(vk)
print("Generated and Stored Secure verifying key.")
with open(sk_filename, 'w') as sk_file:
sk_file.write(sk)
print("Generated and Stored Secure signing key.")
# Store RSA keys
with open(pvk_filename, "w") as pvk_file:
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * chr(BLOCK_SIZE - len(s) % BLOCK_SIZE)
pvtK = pad(pvtK)
iv = Random.new().read(AES.block_size)
cipher = AES.new(master_key, AES.MODE_CBC, iv)
pvk_file.write(base64.b64encode(iv + cipher.encrypt(pvtK)))
print("Generated and Stored RSA private key.")
with open(puk_filename, "w") as puk_file:
puk_file.write(pubK)
print("Generated and Stored RSA public key.")
return
def gen_keypair(self, bits):
"""
Generate an RSA keypair with an exponent of 65537 in PEM format
param: bits The key length in bits
"""
new_key = RSA.generate(bits, e=65537)
public_key = new_key.publickey().exportKey("PEM")
private_key = new_key.exportKey("PEM")
return private_key, public_key
def create(self,user, encpass, client_pubKey,client_verificationkey):
"""
Create a new user
Args:
user: tenant + username
encpass: password ciphered
client_pubKey: the user's public key
client_verificationKey: the user's verification key
"""
tenant = user.split(':')[0]
username = user.split(':')[1]
try:
secret = self.barbican.secrets.create(name="public_key",payload=str(client_pubKey))
secret.store()
pub_ref = secret.secret_ref[secret.secret_ref.find('secrets/')+8:]
secret1 = self.barbican.secrets.create(name="verification_key",payload=str(client_verificationkey))
secret1.store()
ver_ref = secret1.secret_ref[secret1.secret_ref.find('secrets/')+8:]
dict_pub_key = {}
dict_pub_key['Public_Key'] = pub_ref
dict_pub_key['Verification_Key'] = ver_ref
CreateUser(username,encpass,tenant,json.dumps(dict_pub_key),'Member',AUTH_URL).start()
self.usrID = filter(lambda x: x.name == username, self.keystone.users.list())[0].id
print "Created user ", username
except Exception,err:
return
return "OK"
| StarcoderdataPython |
1723982 | import sys
old_path = sys.path[:]
safe_path = list(filter(lambda x: x.startswith(sys.prefix), sys.path))
def _safe_import(modname):
sys.path = safe_path
try:
module = __import__(modname)
except ImportError:
module = None
sys.path = old_path
return module
def doc_from_str(objstr):
ns = objstr.split('.')
try:
obj = __builtins__[ns[0]]
except KeyError:
obj = _safe_import(ns[0])
if obj is None:
return None
for scope in ns[1:]:
try:
obj = getattr(obj, scope)
except AttributeError:
return None
return getattr(obj, '__doc__')
| StarcoderdataPython |
6402017 | import codecs
import math
import os
import pickle
import sys
import traceback
import gzip
import pprint
import itertools
import struct
from ctypes import *
from OpenGL.GL import *
import numpy as np
from numpy import array, float32, uint8
def is_gz_compressed_file(filename):
with open(filename,'rb') as f:
return f.read(3) == b'\x1f\x8b\x08'
def export_texture(filepath, export_filepath):
try:
if os.path.exists(filepath):
# Load data (deserialize)
if is_gz_compressed_file(filepath):
with gzip.open(filepath, 'rb') as f:
loaded_data = pickle.load(f)
else:
# human readable data
with open(filepath, 'r') as f:
loaded_data = eval(f.read())
# convert numpy array to regular array
# if 'Texture2DArray' == loaded_data['texture_type'] or 'Texture3D' == loaded_data['texture_type']:
# loaded_data['data'] = list(itertools.chain(*[list(texture_data) for texture_data in loaded_data['data']]))
# else:
# loaded_data['data'] = list(loaded_data['data'])
# with open(export_filepath, 'w') as f:
# f.write(str(loaded_data))
'''
file struct {
texture_type: i32,
width: i32,
height: i32,
depth: i32,
format: i32,
enable_mipmap: i32,
min_filter: i32,
mag_filter: i32,
wrap: i32,
data_bytes: i32,
data: byte array
}
'''
with open(export_filepath, 'wb') as f:
# texture_type
texture_type = loaded_data['texture_type']
if 'Texture2D' == texture_type:
f.write(struct.pack('i', 1)) # VK_IMAGE_VIEW_TYPE_2D
elif 'Texture3D' == texture_type:
f.write(struct.pack('i', 2)) # VK_IMAGE_VIEW_TYPE_3D
elif 'TextureCube' == texture_type:
f.write(struct.pack('i', 3)) # VK_IMAGE_VIEW_TYPE_CUBE
elif 'Texture2DArray' == texture_type:
f.write(struct.pack('i', 5)) # VK_IMAGE_VIEW_TYPE_2D_ARRAY
else:
raise BaseException("Not implemented.")
# width
f.write(struct.pack('i', loaded_data['width']))
# height
f.write(struct.pack('i', loaded_data['height']))
# depth
f.write(struct.pack('i', loaded_data['depth']))
# internal_format
internal_format = loaded_data['internal_format']
if GL_R16F == internal_format:
# 16f -> 32f
f.write(struct.pack('i', 100)) # VK_FORMAT_R32_SFLOAT = 100,
elif GL_RGBA32F == internal_format:
f.write(struct.pack('i', 109)) # VK_FORMAT_R32G32B32A32_SFLOAT = 109,
else:
print(internal_format)
raise BaseException("Not implemented.")
# enable_mipmap & min_filter
mipmap_filters = (GL_LINEAR_MIPMAP_LINEAR, GL_LINEAR_MIPMAP_NEAREST, GL_NEAREST_MIPMAP_LINEAR, GL_NEAREST_MIPMAP_NEAREST)
min_filter = loaded_data['min_filter']
# enable_mipmap
f.write(struct.pack('i', 1 if min_filter in mipmap_filters else 0))
# min_filter
if GL_LINEAR == min_filter:
f.write(struct.pack('i', 1)) # VK_FILTER_LINEAR = 1,
else:
raise BaseException("Not implemented.")
# mag_filter
mag_filter = loaded_data['mag_filter']
if GL_LINEAR == mag_filter:
f.write(struct.pack('i', 1)) # VK_FILTER_LINEAR = 1,
else:
raise BaseException("Not implemented.")
# wrap
wrap = loaded_data['wrap']
if GL_REPEAT == wrap:
f.write(struct.pack('i', 0)) # VK_SAMPLER_ADDRESS_MODE_REPEAT = 0,
elif GL_CLAMP == wrap or GL_CLAMP_TO_EDGE == wrap:
f.write(struct.pack('i', 2)) # VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE = 2,
else:
print("wrap:", wrap)
raise BaseException("Not implemented.")
# data
data = loaded_data['data'].tobytes()
f.write(struct.pack('i', len(data)))
f.write(data)
except:
print(traceback.format_exc())
if __name__ == '__main__':
if 2 < len(sys.argv):
export_texture(sys.argv[1], sys.argv[2])
| StarcoderdataPython |
288462 | # -*- coding: utf-8 -*-
#
# Copyright 2016 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aiohttp
import logging
import json
from urllib.parse import quote_plus
from livebridge.base import BaseTarget, TargetResponse, InvalidTargetResource
from livebridge_liveblog.common import LiveblogClient
logger = logging.getLogger(__name__)
class LiveblogTarget(LiveblogClient, BaseTarget):
type = "liveblog"
def get_id_at_target(self, post):
"""Extracts id from the given **post** of the target resource.
:param post: post being processed
:type post: livebridge.posts.base.BasePost
:returns: string"""
id_at_target = None
if post.target_doc:
id_at_target = post.target_doc.get("_id")
else:
logger.warning("No id at target found.")
return id_at_target
def get_etag_at_target(self, post):
"""Extracts etag from the given **post** of the target resource.
:param post: post being processed
:type post: livebridge.posts.base.BasePost
:returns: string"""
etag_at_target = None
if post.target_doc:
etag_at_target = post.target_doc.get("_etag")
else:
logger.warning("No id at target found.")
return etag_at_target
def _get_post_status(self):
if self.save_as_draft == True:
return "draft"
elif self.save_as_contribution == True:
return "submitted"
return "open"
def _build_post_data(self, post, items):
data = {
"post_status": self._get_post_status(),
"sticky": True if post.is_sticky else False,
"lb_highlight": True if post.is_highlighted else False,
"blog": self.target_id,
"groups": [{
"id": "root",
"refs": [{
"idRef": "main"
}],
"role": "grpRole:NEP"
}, {
"id": "main",
"refs": [{"residRef": item["guid"]} for item in items],
"role": "grpRole:Main"
}]
}
return data
def _build_image_item(self, item, resource):
caption = item["meta"].get("caption", "")
credit = item["meta"].get("credit", "")
# byline
byline = caption
byline += " Credit: {}".format(credit) if credit else ""
# text value for image item
text = '<figure> <img src="{}" alt="{}" srcset="{} {}w, {} {}w, {} {}w, {} {}w" />'
text += '<figcaption>{}</figcaption></figure>'
media = resource.get("renditions", {})
text = text.format(
media["thumbnail"]["href"], quote_plus(caption),
media["baseImage"]["href"], media["baseImage"]["width"],
media["viewImage"]["href"], media["viewImage"]["width"],
media["thumbnail"]["href"], media["thumbnail"]["width"],
media["original"]["href"], media["original"]["width"],
byline)
# build item
new_item = {
"item_type": "image",
"text": text,
"meta": {
"caption": caption,
"credit": credit,
"media": {
"_id": resource.get("_id"),
"renditions": media,
}
}
}
return new_item
async def _save_item(self, data):
if data["item_type"] == "image":
# special handling for image items
img_data = await self._save_image(data)
data = self._build_image_item(data, img_data)
# save item in target blog
data["blog"] = self.target_id
url = "{}/{}".format(self.endpoint, "items")
item = await self._post(url, json.dumps(data), status=201)
return item
async def _save_image(self, img_item):
new_img = None
try:
# upload photo to liveblog instance
url = "{}/{}".format(self.endpoint, "archive")
# build form data
data = aiohttp.FormData()
data.add_field('media',
open(img_item["tmp_path"], 'rb'),
content_type='image/jpg')
# send data
connector = aiohttp.TCPConnector(ssl=False)
headers = self._get_auth_header()
session = aiohttp.ClientSession(connector=connector, headers=headers, conn_timeout=10)
async with session.post(url, data=data) as r:
if r.status == 201:
new_img = await r.json()
else:
raise Exception("Image{} could not be saved!".format(img_item))
await session.close()
except Exception as e:
logger.error("Posting image failed for [{}] - {}".format(self, img_item))
logger.exception(e)
return new_img
async def post_item(self, post):
"""Build your request to create a post."""
await self._login()
# save item parts
items = []
for item in post.content:
items.append(await self._save_item(item))
# save new post
data = self._build_post_data(post, items)
url = "{}/{}".format(self.endpoint, "posts")
return TargetResponse(await self._post(url, json.dumps(data), status=201))
async def update_item(self, post):
"""Build your request to update a post."""
await self._login()
# save item parts
items = []
for item in post.content:
items.append(await self._save_item(item))
data = self._build_post_data(post, items)
# get id of post at target
id_at_target = self.get_id_at_target(post)
if not id_at_target:
raise InvalidTargetResource("No id for resource at target found!")
# patch existing post
url = "{}/{}/{}".format(self.endpoint, "posts", id_at_target)
return TargetResponse(await self._patch(url, json.dumps(data), etag=self.get_etag_at_target(post)))
async def delete_item(self, post):
"""Build your request to delete a post."""
await self._login()
# get id of post at target
id_at_target = self.get_id_at_target(post)
if not id_at_target:
raise InvalidTargetResource("No id for resource at target found!")
# delete post
url = "{}/{}/{}".format(self.endpoint, "posts", id_at_target)
data = {"deleted": True, "post_status": "open"}
return TargetResponse(await self._patch(url, json.dumps(data), etag=self.get_etag_at_target(post)))
async def handle_extras(self, post):
return None
| StarcoderdataPython |
1629420 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_secprof_wanopt
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- <NAME> (@lweighall)
- <NAME> (@Ghilli3)
- <NAME> (@p4r4n0y1ng)
short_description: WAN optimization
description:
- Manage WanOpt security profiles in FortiManager via API
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
transparent:
description:
- Enable/disable transparent mode.
required: false
choices:
- disable
- enable
name:
description:
- Profile name.
required: false
comments:
description:
- Comment.
required: false
auth_group:
description:
- Optionally add an authentication group to restrict access to the WAN Optimization tunnel to
peers in the authentication group.
required: false
cifs:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
cifs_byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching
file data sent across the WAN and in future serving if from the cache.
required: false
choices:
- disable
- enable
cifs_log_traffic:
description:
- Enable/disable logging.
required: false
choices:
- disable
- enable
cifs_port:
description:
- Single port number or port number range for CIFS. Only packets with a destination port number
that matches this port number or range are accepted by this profile.
required: false
cifs_prefer_chunking:
description:
- Select dynamic or fixed-size data chunking for HTTP WAN Optimization.
required: false
choices:
- dynamic
- fix
cifs_secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the
same TCP port (7810).
required: false
choices:
- disable
- enable
cifs_status:
description:
- Enable/disable HTTP WAN Optimization.
required: false
choices:
- disable
- enable
cifs_tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
required: false
choices:
- private
- shared
- express-shared
ftp:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
ftp_byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching
file data sent across the WAN and in future serving if from the cache.
required: false
choices:
- disable
- enable
ftp_log_traffic:
description:
- Enable/disable logging.
required: false
choices:
- disable
- enable
ftp_port:
description:
- Single port number or port number range for FTP. Only packets with a destination port number
that matches this port number or range are accepted by this profile.
required: false
ftp_prefer_chunking:
description:
- Select dynamic or fixed-size data chunking for HTTP WAN Optimization.
required: false
choices:
- dynamic
- fix
ftp_secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the
same TCP port (7810).
required: false
choices:
- disable
- enable
ftp_status:
description:
- Enable/disable HTTP WAN Optimization.
required: false
choices:
- disable
- enable
ftp_tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
required: false
choices:
- private
- shared
- express-shared
http:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
http_byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching
file data sent across the WAN and in future serving if from the cache.
required: false
choices:
- disable
- enable
http_log_traffic:
description:
- Enable/disable logging.
required: false
choices:
- disable
- enable
http_port:
description:
- Single port number or port number range for HTTP. Only packets with a destination port number
that matches this port number or range are accepted by this profile.
required: false
http_prefer_chunking:
description:
- Select dynamic or fixed-size data chunking for HTTP WAN Optimization.
required: false
choices:
- dynamic
- fix
http_secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the
same TCP port (7810).
required: false
choices:
- disable
- enable
http_ssl:
description:
- Enable/disable SSL/TLS offloading (hardware acceleration) for HTTPS traffic in this tunnel.
required: false
choices:
- disable
- enable
http_ssl_port:
description:
- Port on which to expect HTTPS traffic for SSL/TLS offloading.
required: false
http_status:
description:
- Enable/disable HTTP WAN Optimization.
required: false
choices:
- disable
- enable
http_tunnel_non_http:
description:
- Configure how to process non-HTTP traffic when a profile configured for HTTP traffic accepts
a non-HTTP session. Can occur if an application sends non-HTTP traffic using an HTTP destination port.
required: false
choices:
- disable
- enable
http_tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
required: false
choices:
- private
- shared
- express-shared
http_unknown_http_version:
description:
- How to handle HTTP sessions that do not comply with HTTP 0.9, 1.0, or 1.1.
required: false
choices:
- best-effort
- reject
- tunnel
mapi:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
mapi_byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching
file data sent across the WAN and in future serving if from the cache.
required: false
choices:
- disable
- enable
mapi_log_traffic:
description:
- Enable/disable logging.
required: false
choices:
- disable
- enable
mapi_port:
description:
- Single port number or port number range for MAPI. Only packets with a destination port number
that matches this port number or range are accepted by this profile.
required: false
mapi_secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the
same TCP port (7810).
required: false
choices:
- disable
- enable
mapi_status:
description:
- Enable/disable HTTP WAN Optimization.
required: false
choices:
- disable
- enable
mapi_tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
required: false
choices:
- private
- shared
- express-shared
tcp:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
tcp_byte_caching:
description:
- Enable/disable byte-caching for HTTP. Byte caching reduces the amount of traffic by caching
file data sent across the WAN and in future serving if from the cache.
required: false
choices:
- disable
- enable
tcp_byte_caching_opt:
description:
- Select whether TCP byte-caching uses system memory only or both memory and disk space.
required: false
choices:
- mem-only
- mem-disk
tcp_log_traffic:
description:
- Enable/disable logging.
required: false
choices:
- disable
- enable
tcp_port:
description:
- Single port number or port number range for TCP. Only packets with a destination port number
that matches this port number or range are accepted by this profile.
required: false
tcp_secure_tunnel:
description:
- Enable/disable securing the WAN Opt tunnel using SSL. Secure and non-secure tunnels use the
same TCP port (7810).
required: false
choices:
- disable
- enable
tcp_ssl:
description:
- Enable/disable SSL/TLS offloading.
required: false
choices:
- disable
- enable
tcp_ssl_port:
description:
- Port on which to expect HTTPS traffic for SSL/TLS offloading.
required: false
tcp_status:
description:
- Enable/disable HTTP WAN Optimization.
required: false
choices:
- disable
- enable
tcp_tunnel_sharing:
description:
- Tunnel sharing mode for aggressive/non-aggressive and/or interactive/non-interactive protocols.
required: false
choices:
- private
- shared
- express-shared
'''
EXAMPLES = '''
- name: DELETE Profile
fmgr_secprof_wanopt:
name: "Ansible_WanOpt_Profile"
mode: "delete"
- name: Create FMGR_WANOPT_PROFILE
fmgr_secprof_wanopt:
mode: "set"
adom: "root"
transparent: "enable"
name: "Ansible_WanOpt_Profile"
comments: "Created by Ansible"
cifs: {byte-caching: "enable",
log-traffic: "enable",
port: 80,
prefer-chunking: "dynamic",
status: "enable",
tunnel-sharing: "private"}
ftp: {byte-caching: "enable",
log-traffic: "enable",
port: 80,
prefer-chunking: "dynamic",
secure-tunnel: "disable",
status: "enable",
tunnel-sharing: "private"}
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
from ansible.module_utils.network.fortimanager.common import prepare_dict
from ansible.module_utils.network.fortimanager.common import scrub_dict
###############
# START METHODS
###############
def fmgr_wanopt_profile_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/wanopt/profile'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/wanopt/profile/{name}'.format(adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
transparent=dict(required=False, type="str", choices=["disable", "enable"]),
name=dict(required=False, type="str"),
comments=dict(required=False, type="str"),
auth_group=dict(required=False, type="str"),
cifs=dict(required=False, type="dict"),
cifs_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]),
cifs_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]),
cifs_port=dict(required=False, type="str"),
cifs_prefer_chunking=dict(required=False, type="str", choices=["dynamic", "fix"]),
cifs_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]),
cifs_status=dict(required=False, type="str", choices=["disable", "enable"]),
cifs_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]),
ftp=dict(required=False, type="dict"),
ftp_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]),
ftp_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]),
ftp_port=dict(required=False, type="str"),
ftp_prefer_chunking=dict(required=False, type="str", choices=["dynamic", "fix"]),
ftp_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]),
ftp_status=dict(required=False, type="str", choices=["disable", "enable"]),
ftp_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]),
http=dict(required=False, type="dict"),
http_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]),
http_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]),
http_port=dict(required=False, type="str"),
http_prefer_chunking=dict(required=False, type="str", choices=["dynamic", "fix"]),
http_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]),
http_ssl=dict(required=False, type="str", choices=["disable", "enable"]),
http_ssl_port=dict(required=False, type="str"),
http_status=dict(required=False, type="str", choices=["disable", "enable"]),
http_tunnel_non_http=dict(required=False, type="str", choices=["disable", "enable"]),
http_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]),
http_unknown_http_version=dict(required=False, type="str", choices=["best-effort", "reject", "tunnel"]),
mapi=dict(required=False, type="dict"),
mapi_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]),
mapi_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]),
mapi_port=dict(required=False, type="str"),
mapi_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]),
mapi_status=dict(required=False, type="str", choices=["disable", "enable"]),
mapi_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]),
tcp=dict(required=False, type="dict"),
tcp_byte_caching=dict(required=False, type="str", choices=["disable", "enable"]),
tcp_byte_caching_opt=dict(required=False, type="str", choices=["mem-only", "mem-disk"]),
tcp_log_traffic=dict(required=False, type="str", choices=["disable", "enable"]),
tcp_port=dict(required=False, type="str"),
tcp_secure_tunnel=dict(required=False, type="str", choices=["disable", "enable"]),
tcp_ssl=dict(required=False, type="str", choices=["disable", "enable"]),
tcp_ssl_port=dict(required=False, type="str"),
tcp_status=dict(required=False, type="str", choices=["disable", "enable"]),
tcp_tunnel_sharing=dict(required=False, type="str", choices=["private", "shared", "express-shared"]),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"transparent": module.params["transparent"],
"name": module.params["name"],
"comments": module.params["comments"],
"auth-group": module.params["auth_group"],
"cifs": {
"byte-caching": module.params["cifs_byte_caching"],
"log-traffic": module.params["cifs_log_traffic"],
"port": module.params["cifs_port"],
"prefer-chunking": module.params["cifs_prefer_chunking"],
"secure-tunnel": module.params["cifs_secure_tunnel"],
"status": module.params["cifs_status"],
"tunnel-sharing": module.params["cifs_tunnel_sharing"],
},
"ftp": {
"byte-caching": module.params["ftp_byte_caching"],
"log-traffic": module.params["ftp_log_traffic"],
"port": module.params["ftp_port"],
"prefer-chunking": module.params["ftp_prefer_chunking"],
"secure-tunnel": module.params["ftp_secure_tunnel"],
"status": module.params["ftp_status"],
"tunnel-sharing": module.params["ftp_tunnel_sharing"],
},
"http": {
"byte-caching": module.params["http_byte_caching"],
"log-traffic": module.params["http_log_traffic"],
"port": module.params["http_port"],
"prefer-chunking": module.params["http_prefer_chunking"],
"secure-tunnel": module.params["http_secure_tunnel"],
"ssl": module.params["http_ssl"],
"ssl-port": module.params["http_ssl_port"],
"status": module.params["http_status"],
"tunnel-non-http": module.params["http_tunnel_non_http"],
"tunnel-sharing": module.params["http_tunnel_sharing"],
"unknown-http-version": module.params["http_unknown_http_version"],
},
"mapi": {
"byte-caching": module.params["mapi_byte_caching"],
"log-traffic": module.params["mapi_log_traffic"],
"port": module.params["mapi_port"],
"secure-tunnel": module.params["mapi_secure_tunnel"],
"status": module.params["mapi_status"],
"tunnel-sharing": module.params["mapi_tunnel_sharing"],
},
"tcp": {
"byte-caching": module.params["tcp_byte_caching"],
"byte-caching-opt": module.params["tcp_byte_caching_opt"],
"log-traffic": module.params["tcp_log_traffic"],
"port": module.params["tcp_port"],
"secure-tunnel": module.params["tcp_secure_tunnel"],
"ssl": module.params["tcp_ssl"],
"ssl-port": module.params["tcp_ssl_port"],
"status": module.params["tcp_status"],
"tunnel-sharing": module.params["tcp_tunnel_sharing"],
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ['cifs', 'ftp', 'http', 'mapi', 'tcp']
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_wanopt_profile_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| StarcoderdataPython |
202463 | <reponame>KyungMinJin/Pointnet<filename>setup.py
from setuptools import setup
setup(name='pointnet',
packages=['pointnet'],
package_dir={'pointnet': 'pointnet'},
install_requires=['torch', 'tqdm', 'plyfile'],
version='0.0.1')
| StarcoderdataPython |
3540915 | import spacy
class Lemmatizer:
def __init__(self, model='en_core_web_sm'):
self.nlp = spacy.load(model)
def _print_lemmas(self, sentence: str):
text = self.nlp(sentence)
for token in text:
print(f"{token.text:{12}} {token.pos_:{6}} {token.lemma:<{20}} {token.lemma_}")
if __name__ == "__main__":
sentence = 'I am a student studying various Natural Language Processing algorothms to obtain a deeper understanding.'
l = Lemmatizer()
l._print_lemmas(sentence)
| StarcoderdataPython |
3512813 | <gh_stars>10-100
from __future__ import print_function
from pybilt.bilayer_analyzer.prefab_analysis_protocols import com_lateral_rdf
def test_prefab_protocol_com_lateral_rdf():
sel_string = "resname POPC DOPE TLCL2"
print("Run...")
com_lateral_rdf(structure_file='../pybilt/sample_bilayer/sample_bilayer.psf',
trajectory_file='../pybilt/sample_bilayer/sample_bilayer_10frames.dcd',
bilayer_selection_string=sel_string,
resnames=['POPC', 'DOPE', 'TLCL2'], frame_interval=1)
return
if __name__ == '__main__':
test_prefab_protocol_com_lateral_rdf()
| StarcoderdataPython |
3463570 | <filename>db/migrations/0035_languagelevel_description.py
# Generated by Django 3.1.5 on 2021-02-15 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('db', '0034_auto_20210215_1341'),
]
operations = [
migrations.AddField(
model_name='languagelevel',
name='description',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| StarcoderdataPython |
3470765 | from django.conf import settings
from django.test import TestCase, override_settings
from institution.exceptions import (InvalidInstitutionalEmailAddress, InvalidInstitutionalIndentityProvider)
from institution.models import Institution
class InstitutionTests(TestCase):
def _check_institution_system(self, institution):
inst_name = institution.base_domain.split('.')[0]
iss = institution.is_sunbird
ics = institution.is_hawk
if inst_name in ['swan', 'aber']:
self.assertTrue(iss)
self.assertFalse(ics)
elif inst_name in ['cardiff', 'bangor']:
self.assertTrue(ics)
self.assertFalse(iss)
else:
raise ValueError(f'Institution {inst_name} not recognised')
def test_institutional_predicates(self):
"""
Ensure institutional methods (e.g. is_swan, is_swan_system etc) return correct values
"""
institutions = Institution.objects.filter(name__in=['swan', 'aber', 'cardiff', 'bangor'])
for institution in institutions:
for institution_predicate in institutions:
cond = institution.id == institution_predicate.id
inst_name = institution_predicate.base_domain.split('.')[0]
property_name = f'is_{inst_name}'
property_val = getattr(institution, property_name)
self.assertEqual(cond, property_val)
self._check_institution_system(institution)
def test_invalid_institutional_system(self):
with self.assertRaises(ValueError) as e:
institution = Institution.objects.create(
name='Example University',
base_domain='example.ac.uk',
identity_provider='https://idp.example.ac.uk/shibboleth',
logo_path='/static/img/example-logo.png',
)
self._check_institution_system(institution)
self.assertEqual(str(e.exception), 'Institution example not recognised')
def test_valid_institutional_email_address(self):
self.assertTrue(Institution.is_valid_email_address('<EMAIL>'))
def test_invalid_institutional_email_address(self):
with self.assertRaises(InvalidInstitutionalEmailAddress) as e:
Institution.is_valid_email_address('<EMAIL>')
self.assertEqual(str(e.exception), 'Email address domain is not supported.')
def test_valid_institutional_identity_provider(self):
self.assertTrue(Institution.is_valid_identity_provider('https://idp.bangor.ac.uk/shibboleth'))
def test_invalid_institutional_identity_provider(self):
with self.assertRaises(InvalidInstitutionalIndentityProvider) as e:
Institution.is_valid_identity_provider('https://idp.invalid-identity-provider.ac.uk/shibboleth')
self.assertEqual(str(e.exception), 'Identity provider is not supported.')
def test_id_str_produced(self):
institution = Institution.objects.create(
name='example University',
base_domain='example.ac.uk',
identity_provider='https://example.ac.uk/shibboleth',
)
self.assertEqual(institution.id_str(), "example-university")
@override_settings(DEFAULT_SUPPORT_EMAIL='<EMAIL>')
def test_parse_support_email_from_user_email(self):
"""
Ensure the correct support email address is returned.
"""
institution = Institution.objects.create(
name='Example University',
base_domain='example.ac.uk',
support_email='<EMAIL>',
)
test_cases = {
"<EMAIL>": institution.support_email,
"<EMAIL>": settings.DEFAULT_SUPPORT_EMAIL
}
for user_email, support_email in test_cases.items():
result = Institution.parse_support_email_from_user_email(user_email)
self.assertEqual(result, support_email)
| StarcoderdataPython |
1706595 | import pyqtgraph
from pyqtgraph.Qt import QtGui
import numpy as np
from osu_analysis import StdScoreData
from app.data_recording.data import RecData
class DevGraphAngle(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.DEV_DATA_X = 0
self.DEV_DATA_Y = 1
self.DEV_DATA_T = 2
self.DEV_TYPE_AVG = 0
self.DEV_TYPE_DEV = 1
self.NEEDED_NUM_DATA_POINTS = 30
self.__dev_data_select = self.DEV_DATA_X
self.__dev_type_select = self.DEV_TYPE_DEV
self.__avg_data_points = True
# Main graph
self.__graph = pyqtgraph.PlotWidget(title='Aim dev-x (angle)')
self.__graph.getPlotItem().getAxis('left').enableAutoSIPrefix(False)
self.__graph.getPlotItem().getAxis('bottom').enableAutoSIPrefix(False)
self.__graph.enableAutoRange(axis='x', enable=False)
self.__graph.enableAutoRange(axis='y', enable=False)
self.__graph.setLimits(xMin=-10, xMax=190, yMin=-10, yMax=200)
self.__graph.setRange(xRange=[-10, 190], yRange=[-10, 20])
self.__graph.setLabel('left', 'deviation (averaged)', units='σ', unitPrefix='')
self.__graph.setLabel('bottom', 'angle', units='deg', unitPrefix='')
self.__graph.addLegend()
# Deviation marker indicating expected deviation according to set CS
self.__dev_marker_95 = pyqtgraph.InfiniteLine(angle=0, movable=False, pen=pyqtgraph.mkPen(color=(255, 100, 0, 100), style=pyqtgraph.QtCore.Qt.DashLine))
self.__graph.addItem(self.__dev_marker_95, ignoreBounds=True)
# Used to set text in legend item
self.__label_style = pyqtgraph.PlotDataItem(pen=(0,0,0))
self.__graph.getPlotItem().legend.addItem(self.__label_style, '')
self.__text = self.__graph.getPlotItem().legend.getLabel(self.__label_style)
# Put it all together
self.__layout = QtGui.QHBoxLayout(self)
self.__layout.setContentsMargins(0, 0, 0, 0)
self.__layout.setSpacing(2)
self.__layout.addWidget(self.__graph)
def __get_deviation_data(self, play_data):
'''
x-axis: angles
y-axis: deviation or mean
color: bpm
Meant to be used on single play and not multiple plays
'''
# Filters to get just hitcircles with valid hits
data_filter = np.ones(play_data.shape[0], dtype=bool)
# Filter out sliders
data_filter[:-1] = \
(play_data[:-1, RecData.ACT_TYPE] == StdScoreData.ACTION_PRESS) & ~(
(play_data[1:, RecData.ACT_TYPE] == StdScoreData.ACTION_HOLD) | \
(play_data[1:, RecData.ACT_TYPE] == StdScoreData.ACTION_RELEASE)
)
# Select hit presses
data_filter &= (play_data[:, RecData.HIT_TYPE] == StdScoreData.TYPE_HITP)
# Apply filter
play_data = play_data[data_filter]
# Gather relevant data
data_c = 15000/play_data[:, RecData.DT]
data_x = play_data[:, RecData.ANGLE]
if self.__dev_data_select == self.DEV_DATA_X:
data_y = play_data[:, RecData.X_OFFSETS]
elif self.__dev_data_select == self.DEV_DATA_Y:
data_y = play_data[:, RecData.Y_OFFSETS]
elif self.__dev_data_select == self.DEV_DATA_T:
data_y = play_data[:, RecData.T_OFFSETS]
# MIN MAX MIN DELTA
chunks_c = [ 0, 400, 20 ] # BPM, 20 bins max
chunks_x = [ 0, 180, 3 ] # Angle, 60 bins max
# Filter out data outside the range
range_filter = \
(chunks_c[0] <= data_c) & (data_c <= chunks_c[1]) & \
(chunks_x[0] <= data_x) & (data_x <= chunks_x[1])
data_c = data_c[range_filter]
data_x = data_x[range_filter]
data_y = data_y[range_filter]
# Reduce data to bins
num_bins_c = (chunks_c[1] - chunks_c[0])//chunks_c[2]
num_bins_x = (chunks_x[1] - chunks_x[0])//chunks_x[2]
dev_data_c = np.linspace(chunks_c[0], chunks_c[1], num_bins_c)
dev_data_x = np.linspace(chunks_x[0], chunks_x[1], num_bins_x)
idx_data_c = np.digitize(data_c, dev_data_c) - 1
idx_data_x = np.digitize(data_x, dev_data_x) - 1
c_unique_idxs = np.unique(idx_data_c)
x_unique_idxs = np.unique(idx_data_x)
dev_data = np.zeros((c_unique_idxs.shape[0]*x_unique_idxs.shape[0], 3), dtype=float)
for c_idx in range(c_unique_idxs.shape[0]):
for x_idx in range(x_unique_idxs.shape[0]):
data_select = (idx_data_c == c_unique_idxs[c_idx]) & (idx_data_x == x_unique_idxs[x_idx])
if np.sum(data_select) < self.NEEDED_NUM_DATA_POINTS:
continue
if self.__dev_type_select == self.DEV_TYPE_AVG:
dev_data_y = np.mean(data_y[data_select])
elif self.__dev_type_select == self.DEV_TYPE_DEV:
dev_data_y = np.std(data_y[data_select])
else:
print('Unknown deviation type')
return
idx_dev_data = c_idx*x_unique_idxs.shape[0] + x_idx
dev_data[idx_dev_data, 0] = dev_data_y
dev_data[idx_dev_data, 1] = dev_data_x[x_unique_idxs[x_idx]]
dev_data[idx_dev_data, 2] = dev_data_c[c_unique_idxs[c_idx]]
return dev_data
def plot_data(self, play_data):
dev_data = self.__get_deviation_data(play_data)
# Clear plots for redraw
self.__graph.clearPlots()
if dev_data.shape[0] == 0:
return
bpm_data = dev_data[:, 2]
unique_bpms = np.unique(bpm_data)
bpm_lut = pyqtgraph.ColorMap(
np.linspace(min(unique_bpms), max(unique_bpms), 3),
np.array(
[
[ 0, 100, 255, 200],
[100, 255, 100, 200],
[255, 100, 100, 200],
]
)
)
# Main plot - deviation vs osu!px
# Adds a plot for every unique BPM recorded
for bpm in unique_bpms:
data_select = (bpm_data == bpm)
if not any(data_select):
# Selected region has no data. Nothing else to do
continue
data_y = dev_data[data_select, 0]
data_x = dev_data[data_select, 1]
if self.__avg_data_points:
# Average overlapping data points (those that fall on same angle)
data_y = np.asarray([ np.sort(data_y[data_x == x]).mean() for x in np.unique(data_x) ])
unique_data_x = np.unique(data_x)
# Get sort mapping to make points on line graph connect in proper order
idx_sort = np.argsort(unique_data_x)
data_x = unique_data_x[idx_sort]
data_y = data_y[idx_sort]
# Plot color
color = bpm_lut.map(bpm, 'qcolor')
self.__graph.plot(x=data_x, y=data_y, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm:.2f} bpm')
'''
m, b = MathUtils.linear_regresion(angles, stdevs)
if type(m) == type(None) or type(b) == type(None):
self.__graph.plot(x=angles, y=stdevs, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm} bpm')
continue
if self.model_compensation:
y_model = m*angles + b
self.__graph.plot(x=angles, y=stdevs - y_model, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm} bpm σ = {np.std(stdevs - y_model):.2f} m={m:.5f} b={b:.2f}')
else:
self.__graph.plot(x=angles, y=stdevs, symbol='o', symbolPen=None, symbolSize=5, pen=None, symbolBrush=color, name=f'{bpm:.0f} bpm')
'''
def set_dev(self, dev):
self.__dev_marker_95.setPos(dev/4)
| StarcoderdataPython |
1638551 | import json
import io
from typing import Iterable, Dict, List, Optional, Tuple
from allennlp.data import DatasetReader, Instance, Token, TokenIndexer, Field
from allennlp.data.fields import MetadataField, TextField, LabelField, ListField, SpanField, \
SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from overrides import overrides
from eventx import NEGATIVE_TRIGGER_LABEL, NEGATIVE_ARGUMENT_LABEL
@DatasetReader.register('smartdata-eventx-reader')
class SmartdataEventxReader(DatasetReader):
def __init__(self,
lazy: bool = False,
token_indexers: Dict[str, TokenIndexer] = None) -> None:
super().__init__(lazy=lazy)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
with io.open(file_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
example = json.loads(line)
entities = example['entities']
triggers = [e for e in entities if e['entity_type'].lower() == 'trigger']
entity_spans = [(e['start'], e['end']) for e in entities]
trigger_spans = [(t['start'], t['end']) for t in triggers]
events = example['events']
entity_ids = [e['id'] for e in entities]
trigger_ids = [t['id'] for t in triggers]
# If no triggers are found the model can not learn anything from this instance, so skip it
if len(triggers) == 0:
continue
# Extract event trigger labels
trigger_labels = []
id_to_label_pairs = [(event['trigger']['id'], event['event_type'])
for event in events]
trigger_id_to_label = dict(id_to_label_pairs)
for trigger in triggers:
trigger_id = trigger['id']
if trigger_id in trigger_id_to_label:
trigger_label = trigger_id_to_label[trigger_id]
else:
trigger_label = NEGATIVE_TRIGGER_LABEL
trigger_labels.append(trigger_label)
# Extract argument role labels
# Initialize the argument roles to be the negative class by default
arg_role_labels = [[NEGATIVE_ARGUMENT_LABEL for _ in range(len(entity_spans))]
for _ in range(len(trigger_spans))]
for event in events:
trigger_idx = trigger_ids.index(event['trigger']['id'])
for argument in event['arguments']:
entity_idx = entity_ids.index(argument['id'])
# Set positive event argument roles overwriting the default
arg_role_labels[trigger_idx][entity_idx] = argument['role']
yield self.text_to_instance(tokens=example['tokens'],
ner_tags=example['ner_tags'],
entity_spans=entity_spans,
trigger_spans=trigger_spans,
trigger_labels=trigger_labels,
arg_role_labels=arg_role_labels)
@overrides
def text_to_instance(self,
tokens: List[str],
ner_tags: List[str],
entity_spans: List[Tuple[int, int]],
trigger_spans: List[Tuple[int, int]],
trigger_labels: Optional[List[str]] = None,
arg_role_labels: Optional[List[List[str]]] = None) -> Instance:
assert len(trigger_spans) > 0, 'Examples without triggers are not supported'
text_field = TextField([Token(t) for t in tokens], token_indexers=self._token_indexers)
entity_spans_field = ListField([
SpanField(span_start=span[0], span_end=span[1] - 1, sequence_field=text_field)
for span in entity_spans
])
entity_tags_field = SequenceLabelField(labels=ner_tags,
sequence_field=text_field,
label_namespace='entity_tags')
trigger_spans_field = ListField([
SpanField(span_start=span[0], span_end=span[1] - 1, sequence_field=text_field)
for span in trigger_spans
])
fields: Dict[str, Field] = {
'metadata': MetadataField({"words": tokens}),
'tokens': text_field,
'entity_tags': entity_tags_field,
'entity_spans': entity_spans_field,
'trigger_spans': trigger_spans_field,
}
# Optionally add trigger labels
if trigger_labels is not None:
trigger_labels_field = ListField([
LabelField(label=trigger_label, label_namespace='trigger_labels')
for trigger_label in trigger_labels
])
fields['trigger_labels'] = trigger_labels_field
# Optionally add argument role labels
if arg_role_labels is not None:
arg_role_labels_field = ListField([
ListField([LabelField(label=label, label_namespace='arg_role_labels')
for label in trigger_role_labels])
for trigger_role_labels in arg_role_labels
])
fields['arg_roles'] = arg_role_labels_field
return Instance(fields)
| StarcoderdataPython |
3233241 | <gh_stars>1-10
# import pandas as pd
import numpy as np
import os
import sys
# from sklearn import preprocessing
# import seaborn as sns
print("current working directory: ", os.getcwd())
# type = "_small" # nothing i.e. "" normal or "_small" for small files
type = ""
# sample_size: 1000, 500, 250, 32, 64
# sample_size = 192 # 500 for small data # how many values in a single sample collected
# sample_size = 2048
# for sample_size in [2**x for x in range(12, 0, -1)]:
for sample_size in [512]:
print("sample size: ", str(sample_size))
train_rate = 0.5 # rate of training data, test data rate is 1 - train_rate
outlier_std_count = 10
# class_counter = 5
# class_counter = 6
# prefix="test_one_wifi_"
# prefix="wifi6_data/"
# suffix="_wifi_165"
class_counter = 2
class_number = None
# prefix = "ML_"
prefix = 'CaseF'
suffix = "WiFi_"
los_type = 'LOS' # LOS or NLOS
datasets = []
min_len = sys.maxsize # get the minimum length of dataset for each class
csv_paths = [
"data_journal/raw_6F_" + los_type + "/" + los_type + "_6F_1WIFI.txt",
"data_journal/raw_15F_" + los_type + "/" + los_type + "_15F_1WIFI.txt"
]
for csv_path in csv_paths:
# csv_path = prefix + los_type + '/' + str(
# distance) + 'F_' + los_type + '/Test_165_' + str(
# distance) + 'F_' + str(
# counter) + suffix + los_type.lower() + type + ".txt"
# csv_path = prefix + '/Test_165_' + prefix + '_1WiFi_' + los_type.lower() + '.txt'
# csv_path = 'All/raw_files/all_data'
# csv_path = 'All/one_two_wifi/' + str(counter + 1) + '_wifi'
# csv_path = 'All/one_two_wifi_2/' + str(counter + 1) + '_wifi_2'
print('csv path: ', csv_path)
# data1 = pd.read_csv(csv_path1, header=None)
# # print("data1 values: ", data1.values)
# data1 = np.array(data1.values).squeeze()
dataset = np.genfromtxt(csv_path, delimiter="\n")
for expression in ['-inf', '-Inf', 'inf', 'Inf']:
dataset = np.delete(dataset, np.where(dataset == float(expression)))
dataset = dataset[~np.isnan(dataset)]
print("dataset" + str(counter))
print("max: ", dataset.max())
print("min: ", dataset.min())
print("mean: ", dataset.mean())
print("len: ", len(dataset))
# print("data1 head: ", data1.head())
print("head: ", dataset[:10])
if len(dataset) < min_len:
min_len = len(dataset)
datasets.append(dataset)
print("min_len of the dataset for a class: ", min_len)
# make sure we have the same number of samples from each class
for i in range(len(datasets)):
datasets[i] = datasets[i][:min_len]
del min_len
def get_samples(array):
with_step = True
if with_step:
# make more data by overlapping the signals
step = max(sample_size // 4, 1)
# step = 1
samples = []
# i - a start index for a sample
for i in range(0, len(array) - sample_size, step):
samples.append(array[i:i + sample_size])
frame = np.array(samples)
else:
len_final = len(array)
len_reminder = len_final % (sample_size * 2)
len_final -= len_reminder
array = array[:len_final]
# cut off the data to the multiple of sample size
# take sample_size value and create a row from them
frame = array.reshape(-1, sample_size)
# shuffle the data
np.random.shuffle(frame)
return frame
# get 2 dimensional datasets
for i in range(len(datasets)):
datasets[i] = get_samples(datasets[i])
# divide into train/test datasets
stop_train_index = int(len(datasets[0]) * train_rate)
train_arrays = []
test_arrays = []
for dataset in datasets:
train_arrays.append(dataset[:stop_train_index])
test_arrays.append(dataset[stop_train_index:])
# find the mean and std of the train data
train_raw = np.concatenate(train_arrays, axis=0)
mean = train_raw.mean()
print("train mean value: ", mean)
std = train_raw.std()
print("train std: ", std)
def get_final_data(data, class_number, mean, std, type=''):
# replace outliers with the mean value
count_outliers = np.sum(np.abs(data - mean) > outlier_std_count * std)
print(f"count_outliers {type} (for class: {class_number}): ",
count_outliers)
data[np.abs(data - mean) > outlier_std_count * std] = mean
# normalize the data
data = (data - mean) / std
# create and add column with the class number
class_column = np.full((len(data), 1), class_number)
data = np.concatenate((class_column, data), axis=1)
return data
train_datasets = []
for i, array in enumerate(train_arrays):
if class_number is None:
class_number = i
train_datasets.append(
get_final_data(array, class_number=class_number, mean=mean,
std=std, type='train'))
data_train = np.concatenate(train_datasets, axis=0)
del train_datasets
test_datasets = []
for i, array in enumerate(test_arrays):
if class_number is None:
class_number = i
test_datasets.append(
get_final_data(array, class_number=class_number, mean=mean,
std=std, type='test'))
data_test = np.concatenate(test_datasets, axis=0)
del test_datasets
# print("data train dims: ", data_train.shape)
# np.savetxt("WIFI_TRAIN", data_train, delimiter=",")
sample_size = str(sample_size)
# dataset_name = "WIFI_class_" + str(class_counter)
dataset_name = prefix + los_type + '/' + str(
distance) + 'F_' + los_type + '/'
dir_name = str(class_counter) + '_classes_WiFi'
# full_dir = dataset_name + "/" + dir_name
# full_dir = prefix + '/' + prefix + '_' + los_type.lower()
# full_dir = 'All/raw_files/all_data'
# full_dir = 'All/one_two_wifi/2_classes_WiFi'
full_dir = 'All/one_two_wifi_2/2_classes_WiFi'
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def write_data(data_set, file_name):
with open(file_name, "w") as f:
for row in data_set:
# first row is a class number (starting from 0)
f.write(str(int(row[0])))
# then we have proper values starting from position 1 in each row
for value in row[1:]:
f.write("," + str(value))
f.write("\n")
write_data(data_train, full_dir + "_TRAIN")
write_data(data_test, full_dir + "_TEST")
print("normalized train mean (should be close to 0): ", data_train.mean())
print("normalized train std: ", data_train.std())
print("normalized test mean (should be close to 0): ", data_test.mean())
print("normalized test std: ", data_test.std())
| StarcoderdataPython |
11360660 | <reponame>GeographicaGS/GdalReclassify<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# Author: <NAME>, 2014.
"""
Library to reclassify raster data using GDAL.
Based on the command line utility gdal_reclasify.py
developed by <NAME>
https://github.com/chiatt/gdal_reclassify
"""
import sys
from osgeo import gdal
from gdalconst import *
import numpy as np
import operator
gdal.AllRegister()
class GdalReclassify(object):
def __init__(self, infile, outfile):
self.infile = infile
self.outfile = outfile
def __getIntType(self, array_of_numbers):
low, high = min(array_of_numbers), max(array_of_numbers)
int_types = [
(0, 255, np.uint8),
(-128, 127, np.int16),
(0, 65535, np.uint16),
(-32768, 32767, np.int16),
(0, 4294967295, np.uint32),
(-2147483648, 2147483647, np.int32),
(0, 18446744073709551615, np.uint64),
(-9223372036854775808, 9223372036854775807, np.int64)
]
for i in int_types:
if low >= i[0] and high <= i[1]:
int_np_type = i[2]
break
return int_np_type
def __parseOutClasses(self, number_string, default=None):
data_types = {
np.dtype(np.uint8): GDT_Byte,
np.dtype(np.int8): GDT_Int16,
np.dtype(np.uint16): GDT_UInt16,
np.dtype(np.int16): GDT_Int16,
np.dtype(np.uint32): GDT_UInt32,
np.dtype(np.int32): GDT_Int32,
np.dtype(np.float32): GDT_Float32,
np.dtype(np.int64): GDT_Int32,
np.dtype(np.float64): GDT_Float64
}
out_classes = [i.strip() for i in number_string]
pytype = int
np_dtype = np.int
for i in out_classes:
if '.' in i:
pytype = float
if default:
pytype = type(default)
out_classes_parsed = [pytype(g) for g in out_classes]
if pytype == float:
np_dtype = np.float_
else:
np_dtype = self.__getIntType(out_classes_parsed)
gdal_dtype = data_types[np.dtype(np_dtype)]
return np_dtype, gdal_dtype, out_classes_parsed
def __parseDefault(self, default_in):
if '.' in default_in:
default_out = float(default_in)
else:
default_out = int(default_in)
return default_out
def __parseInClasses(self, conds, pytype):
parsed_conds = []
for i in conds:
oplist = ["!", "=", ">", "<"]
op = ''
num = ''
for j in i:
if j in oplist:
op += j
else:
num += j
parsed_conds.append((op, pytype(num)))
return parsed_conds
def __reclassArray(self, np_array, in_classes, out_classes, np_dtype, default):
if np_dtype not in (np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64):
in_array = np_array.astype(float)
else:
in_array = np_array
op_dict = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
"!=": operator.ne, ">=": operator.ge, ">": operator.gt}
try:
#rr = np.piecewise(in_array, [op_dict[i[0]](in_array,i[1]) for i in in_classes], out_classes)
select_result = np.select([op_dict[i[0]](in_array,i[1]) for i in in_classes], out_classes, default)
select_result_type_set = select_result.astype(np_dtype)
finally:
in_array = None
return select_result_type_set
def processDataset(self, classes, reclasses, default, nodata, output_format, compress_type):
"""
Much of the code in this function relating to reading and writing gdal
datasets - especially reading block by block was acquired from
<NAME>'s Utah State Python Programming GIS slides:
http://www.gis.usu.edu/~chrisg/
"""
if default:
default = self.__parseDefault(default)
np_dtype, gdal_dtype, out_classes = self.__parseOutClasses(reclasses, default)
src_ds = gdal.Open(self.infile)
if src_ds is None:
print 'Could not open image'
sys.exit(1)
rows, cols = src_ds.RasterYSize, src_ds.RasterXSize
transform = src_ds.GetGeoTransform()
block_size = src_ds.GetRasterBand(1).GetBlockSize()
proj = src_ds.GetProjection()
driver = gdal.GetDriverByName(output_format)
dst_ds = driver.Create(self.outfile, cols, rows, 1, gdal_dtype, options = compress_type)
#dst_ds = driver.Create(self.outfile, cols, rows, 1, 6, options = compress_type)
out_band = dst_ds.GetRasterBand(1)
x_block_size = block_size[0]
y_block_size = block_size[1]
sample = src_ds.ReadAsArray(0, 0, 1, 1)
pytype = float
if sample.dtype in (np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64):
pytype = int
in_classes = self.__parseInClasses(classes, pytype)
for i in range(0, rows, y_block_size):
if i + y_block_size < rows:
num_rows = y_block_size
else:
num_rows = rows - i
for j in range(0, cols, x_block_size):
if j + x_block_size < cols:
num_cols = x_block_size
else:
num_cols = cols - j
block = src_ds.ReadAsArray(j, i, num_cols, num_rows)
reclassed_block = self.__reclassArray(block, in_classes, out_classes, np_dtype, default)
out_band.WriteArray(reclassed_block, j, i)
out_band.FlushCache()
dst_ds.SetGeoTransform(transform)
if nodata in ["True", "true", "t", "T", "yes", "Yes", "Y", "y"]:
out_band.SetNoDataValue(default)
print 'setting', default, 'as no data value'
out_band.GetStatistics(0, 1)
dst_ds.SetProjection(proj)
src_ds = None
| StarcoderdataPython |
5150221 | <reponame>youngage/pynetlinux<filename>pynetlinux/vconfig.py<gh_stars>1-10
"""
Interfaces for Linux tagged VLAN functionality.
"""
__author__ = '<EMAIL> (<NAME>)'
import fcntl
import struct
from . import ifconfig
"""
This file makes the following assumptions about data structures:
// From linux/if_vlan.h
enum vlan_ioctl_cmds {
ADD_VLAN_CMD,
DEL_VLAN_CMD,
SET_VLAN_INGRESS_PRIORITY_CMD,
SET_VLAN_EGRESS_PRIORITY_CMD,
GET_VLAN_INGRESS_PRIORITY_CMD,
GET_VLAN_EGRESS_PRIORITY_CMD,
SET_VLAN_NAME_TYPE_CMD,
SET_VLAN_FLAG_CMD,
GET_VLAN_REALDEV_NAME_CMD,
GET_VLAN_VID_CMD
};
enum vlan_flags {
VLAN_FLAG_REORDER_HDR = 0x1,
VLAN_FLAG_GVRP = 0x2,
};
enum vlan_name_types {
VLAN_NAME_TYPE_PLUS_VID, /* Name will look like: vlan0005 */
VLAN_NAME_TYPE_RAW_PLUS_VID, /* name will look like: eth1.0005 */
VLAN_NAME_TYPE_PLUS_VID_NO_PAD, /* Name will look like: vlan5 */
VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD, /* Name will look like: eth0.5 */
VLAN_NAME_TYPE_HIGHEST
};
struct vlan_ioctl_args {
int cmd; /* Should be one of the vlan_ioctl_cmds enum above. */
char device1[24];
union {
char device2[24];
int VID;
unsigned int skb_priority;
unsigned int name_type;
unsigned int bind_type;
unsigned int flag; /* Matches vlan_dev_info flags */
} u;
short vlan_qos;
};
"""
# From linux/sockios.h
SIOCGIFVLAN = 0x8982
SIOCSIFVLAN = 0x8983
# From linux/if_vlan.h
ADD_VLAN_CMD = 0
DEL_VLAN_CMD = 1
SET_VLAN_INGRESS_PRIORITY_CMD = 2
SET_VLAN_EGRESS_PRIORITY_CMD = 3
GET_VLAN_INGRESS_PRIORITY_CMD = 4
GET_VLAN_EGRESS_PRIORITY_CMD = 5
SET_VLAN_NAME_TYPE_CMD = 6
SET_VLAN_FLAG_CMD = 7
GET_VLAN_REALDEV_NAME_CMD = 8
GET_VLAN_VID_CMD = 9
class VlanInterface(ifconfig.Interface):
'''Class representing a Linux vlan.'''
def __init__(self, ifname, vid):
vlanname = ifname + '.' + str(vid)
ifconfig.Interface.__init__(self, vlanname)
def get_vid(self):
'''Return the integer Vlan ID.'''
return get_vid(self.name)
def get_realdev_name(self):
'''Get the underlying netdev for a VLAN interface.'''
return get_realdev_name(self.name)
def del_vlan(self):
'''Delete the VLAN from this interface. The VlanInterface object
will become unuseable after this, the kernel device no longer exists.'''
vlanioc = struct.pack('i24s26x', DEL_VLAN_CMD, self.name)
result = struct.unpack('i24s24sh', fcntl.ioctl(ifconfig.sockfd,
SIOCSIFVLAN, vlanioc))
def add_vlan(ifname, vid):
''' Add a VLAN with the given id to the given interface name. '''
vlanioc = struct.pack('i24si22x', ADD_VLAN_CMD, ifname, vid)
try:
fcntl.ioctl(ifconfig.sockfd, SIOCSIFVLAN, vlanioc)
except IOError:
return False
return True
def get_realdev_name(ifname):
'''Get the underlying netdev for a VLAN interface.'''
ioc = struct.pack('i24s26x', GET_VLAN_REALDEV_NAME_CMD, ifname)
result = struct.unpack('i24s24s2x', fcntl.ioctl(ifconfig.sockfd,
SIOCGIFVLAN, ioc))
return result[2].rstrip('\0')
def get_vid(ifname):
''' Get the interface's VLAN id. '''
vlanioc = struct.pack('i24s26x', GET_VLAN_VID_CMD, ifname)
result = struct.unpack('i24si22x', fcntl.ioctl(ifconfig.sockfd,
SIOCGIFVLAN, vlanioc))
return int(result[2])
def shutdown():
''' Shut down the library '''
ifconfig.shutdown()
| StarcoderdataPython |
3229943 | # -*- coding: utf-8 -*-
"""Download documents from the Plymouth County Registry of Deeds (ROD)
The system used by the ROD to uniquely identify documents is Book and Page,
which stems from a historic practice of physically appending pages to an
archival book each time a new document was added to the record.
By providing a valid book and page number, this script will return a PDF
containing the relevant ROD document.
"""
import argparse
import os
import pathlib
import sys
import tempfile
from glob import glob
from time import sleep
from typing import Dict, Tuple
import requests
from fpdf import FPDF
from mypy_extensions import TypedDict
from requests.cookies import RequestsCookieJar
from requests.models import Response
from requests.sessions import Session
from splinter import Browser
from splinter.driver.webdriver.chrome import WebDriver
from gooey import Gooey, GooeyParser
TITLE: str = 'Plymouth County Registry of Deeds Downloader'
DEFAULT_SLEEP_TIME: float = 0.5
MAX_RETRIES: int = 2
BookParamsDict = TypedDict('BookParamsDict', {
'dir': str,
'bk': int,
'pg': int,
'curr': int,
'tot': int,
})
def _resource_path(relative_path: str) -> str:
""" Get absolute path to resource.
Obtain the path to a resource, even after the app has been frozen by
Pyinstaller.
Args:
relative_path (str): Path relative to the current file
Returns:
str: absolute path to resource
"""
base_path = getattr(sys, '_MEIPASS',
os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def _go_to_search_page(browser: WebDriver, book: int) -> None:
"""Navigate to the correct search page.
The books available on the ROD are split across two search pages. This
returns the page of the appropriate search link given the requested book.
Args:
browser (WebDriver): Selenium browser
book (int): The requested book number
Returns:
None
"""
menu_id: str = 'Navigator1_SearchCriteria1_menuLabel'
if book < 2393:
search_button_id: str = 'Navigator1_SearchCriteria1_LinkButton02'
else:
search_button_id = 'Navigator1_SearchCriteria1_LinkButton01'
# sleep to ensure the menu has a chance to load
sleep(DEFAULT_SLEEP_TIME)
browser.find_by_id(menu_id).first.mouse_over()
browser.find_by_id(search_button_id).first.click()
def _fill_in_search_terms(browser: WebDriver, book: int, page: int) -> None:
"""Search for the requested book and page
Args:
browser (WebDriver): Selenium browser
book (int): The requested book number
page (int): The requested page number
Returns:
None
"""
book_input_name: str = 'SearchFormEx1$ACSTextBox_Book'
page_input_name: str = 'SearchFormEx1$ACSTextBox_PageNumber'
search_button_id: str = 'SearchFormEx1_btnSearch'
browser.fill(book_input_name, book)
browser.fill(page_input_name, page)
browser.find_by_id(search_button_id).first.click()
def _select_document(browser: WebDriver) -> None:
"""Selects the first search result based on the book and page
Because a given book and page corresponds to a single record, we can safely
return the first result and be sure it is the result that the user is
looking for
Args:
browser (WebDriver): Selenium browser
Returns:
None
"""
book_link_id: str = 'DocList1_GridView_Document_ctl02_ButtonRow_Book_0'
view_image_id: str = 'TabController1_ImageViewertabitem'
browser.find_by_id(book_link_id).first.click()
sleep(DEFAULT_SLEEP_TIME)
browser.find_by_id(view_image_id).first.click()
# Wait for page to load
sleep(2)
# viewing the image opens a new tab/window, requiring Selenium to switch
browser.windows.current = browser.windows.current.next
def _create_session(browser: WebDriver) -> Session:
"""Creates a session using the requests library
Selenium isn't great at downlading files, but requests is much better.
Downloading a file from the ROD site requires certain cookies, so we create
a request session object that contains the same cookies at the Selenium
browser that loaded the page originally.
Args:
browser (WebDriver): Selenium browser
Returns:
Session
"""
cookies: Dict[str, str] = browser.cookies.all()
session: Session = requests.Session()
cookie_jar: RequestsCookieJar = session.cookies
requests.utils.cookiejar_from_dict(cookies, cookiejar=cookie_jar)
return session
def _get_number_of_pages(browser: WebDriver) -> int:
"""Fetch the number of pages in the requested document
A book/page tells us where a document begins--but it doesn't tell us where
it ends. The image viewer tells us which page we're currently viewing in
the form "Page x of y" We grab "y" to determine the total number of pages
we need to read.
Args:
browser (WebDriver): Selenium browser
Returns:
int: number of pages to read
"""
page_number_id: str = 'ImageViewer1_lblPageNum'
page_range: str = browser.find_by_id(page_number_id).first.value
num_pages: int = int(page_range.split(' ')[-1])
return num_pages
def _get_page_url(browser: WebDriver) -> str:
"""Get the URL of the image asset of a jpg image to download.
By default, the image asset returned has very small dimensions. By changing
the zoom parameter, we can get a higher resolution version of the image.
Args:
browser (WebDriver): Selenium browser
Returns:
str: URL of the image to download
"""
image_id: str = 'ImageViewer1_docImage'
# Image first loads as spinning GIF. Sleep for long enough so that real
# photo has a chance to load
sleep(2)
image_url: str = browser.find_by_id(image_id)[0]['src']
image_url = image_url.replace('ZOOM=1', 'ZOOM=6')
return image_url
def _download_image(session: Session, image_url: str, directory: str,
img_name: str, img_range: Tuple[int, int]) -> None:
"""Download image to a designated directory
Args:
session (Session): requests session object, containing cookies from
Selenium browser
image_url (str): URL of image to download
directory (str): Directory to save image to
img_name (str): Name of image
img_range (tuple): Tuple containing ints (current_page, total_pages)
Returns:
None
"""
response: Response = session.get(image_url)
file_path: str = os.path.join(directory, '{}.jpg'.format(img_name))
with open(file_path, 'wb') as out_file:
out_file.write(response.content)
print('Downloaded page {} of {}'.format(img_range[0], img_range[1]))
def _go_to_next_page(browser: WebDriver) -> None:
"""Go to the next page in a document
Args:
browser (WebDriver): Selenium browser
Returns:
None
"""
next_page_button_id: str = 'ImageViewer1_BtnNext'
browser.find_by_id(next_page_button_id).first.click()
# make sure the page has enough time to load
sleep(DEFAULT_SLEEP_TIME)
def _create_pdf(image_dir: str, output_dir: str, pdf_name: str) -> None:
"""Combine all downloaded images into a single PDF
Args:
image_dir (str): Directory containing the image files
output_dir (str): Directory where the output PDF should be saved
pdf_name (str): What to name the PDF
Returns:
None
"""
directory: str = os.path.join(image_dir, '*.jpg')
# makes sure the images are in the correct order by page number
images: list = sorted(glob(directory))
output_path: str = '{}/{}'.format(output_dir, pdf_name)
pdf: FPDF = FPDF(unit='in', format='letter')
for image in images:
pdf.add_page()
pdf.image(image, 0, 0, 8.5, 11)
os.remove(image)
pdf.output(output_path, "F")
@Gooey(program_name=TITLE, image_dir=_resource_path('images'))
def get_book_and_page() -> argparse.Namespace:
"""Get user input to find a registry of deeds document. Also creates GUI.
Args:
None
Returns:
argparse.Namespace
"""
parser = GooeyParser(
description=(
'Enter a book and page number, and a PDF of the document will '
'be saved to a folder of your choice'))
download_dir = parser.add_argument_group("Download location")
download_dir.add_argument(
'Directory',
help="Select where to save the downloaded files",
widget='DirChooser',
type=str,
default=os.path.join(pathlib.Path.home(), 'Downloads'))
book_page = parser.add_argument_group("Deed Information")
book_page.add_argument(
'Book',
type=int,
help='Book number',
gooey_options={
'validator': {
'test': 'int(user_input)',
'message': 'Must be an integer'
}
})
book_page.add_argument(
'Page',
type=int,
help='Page number',
gooey_options={
'validator': {
'test': 'int(user_input)',
'message': 'Must be an integer'
}
})
args = parser.parse_args()
return args
def download_pdf(book: int, page: int, output_dir: str) -> None:
"""Given a book and page number, download a PDF document from the ROD
Args:
book (int): Book number
page (int): Page number
output_dir (str): Directory where the PDF should be saved
Returns:
None
"""
print('Trying to obtain Book {} Page {}'.format(book, page))
base_url: str = 'http://titleview.org/plymouthdeeds/'
with tempfile.TemporaryDirectory() as page_download_dir:
driver: str = _resource_path("chromedriver")
browser: WebDriver = Browser(
'chrome', executable_path=driver, headless=True)
browser.visit(base_url)
_go_to_search_page(browser, book)
_fill_in_search_terms(browser, book, page)
_select_document(browser)
session: Session = _create_session(browser)
current_page: int = 1
total_pages: int = _get_number_of_pages(browser)
while current_page <= total_pages:
page_url: str = _get_page_url(browser)
params: BookParamsDict = {
'dir': page_download_dir,
'bk': book,
'pg': page,
'curr': current_page,
'tot': total_pages,
}
img_range: Tuple[int, int] = (current_page, total_pages)
page_name: str = ('{dir}/bk_{bk:06d}_pg_{pg:06d}'
'_{curr:02d}_of_{tot:02d}')
page_name = page_name.format(**params)
_download_image(session, page_url, page_download_dir, page_name,
img_range)
if current_page != total_pages:
_go_to_next_page(browser)
current_page += 1
pdf_name: str = 'plymouth_cty_reg_deeds_book{:06d}_page{:06d}.pdf'
pdf_name = pdf_name.format(book, page)
_create_pdf(page_download_dir, output_dir, pdf_name)
def main() -> None:
args: argparse.Namespace = get_book_and_page()
book: int = args.Book
page: int = args.Page
download_dir: str = args.Directory
retries: int = 0
while retries <= MAX_RETRIES:
try:
download_pdf(book, page, download_dir)
print('Success!')
return
except Exception as ex:
print('Error! Retrying')
print('Exception:\n{}\n'.format(ex))
retries += 1
raise ValueError('Unable to obtain document. Confirm the book and page '
'numbers entered are valid')
if __name__ == '__main__':
main()
| StarcoderdataPython |
350133 | <reponame>cbabalis/csa-streetmap
from casymda.blocks import Entity
class Truck(Entity):
""" drives tours """
speed = 30 / 3.6 # km/h -> m/s
geo_icon = "main/visu/img/truck.png"
| StarcoderdataPython |
1911613 | from particles import *
class Simulation():
def __init__(self, steps, input_file, box_length):
self.steps = steps
self.gen_input(input_file,box_length)
self.particles = particles(input_file, 0.0001, 300, 10)
self.dump = open('dump_file','w')
self.energies_file = open('energy','w')
self.trajectory_file = open('trajectory.xyz','w')
self.energies_file.write('Timestep' + ' ' + 'Total_Energy' + ' ' + 'Temperature' '\n')
self.dump_frequency = 10 # outputs every n steps
def gen_input(self, filename, box_length):
min_distance = 3
coordinates = open(filename,'w')
atom_l = int(box_length/min_distance)
water = ([1.2361419,1.0137761,-0.0612424],[0.5104418,0.8944555,0.5514190],[1.9926927,1.1973129,0.4956931])
masses = ([16,1,1])
x_vec = np.transpose([1,0,0])
y_vec = np.transpose([0,1,0])
z_vec = np.transpose([0,0,1])
atom_count = 0
molecule_count = 1
for x in range(atom_l):
for y in range(atom_l):
for z in range(atom_l):
opt_water = water
opt_water = opt_water + x*min_distance*x_vec + y*min_distance*y_vec + z*min_distance*z_vec
for i in range(len(water)):
coordinates.write('%s %s %s %s %s %s \n' % (atom_count, masses[i], molecule_count, opt_water[i][0],opt_water[i][1],opt_water[i][2]))
atom_count += 1
if atom_count % len(water) == 0:
molecule_count += 1
coordinates.close()
def close_files(self):
self.dump.close()
self.energies_file.close()
self.trajectory_file.close()
def run(self):
self.particles.centre_velocity()
self.particles.calc_intforces()
self.particles.calc_extforces()
for t in range(self.steps):
self.update(t)
#print("step " + str(t))
self.close_files()
def update(self, timestep):
if timestep % self.dump_frequency == 0:
self.total_energy, self.temperature = self.particles.calculate_hamiltonian()
self.particles.write_output(timestep,self.dump,self.total_energy,self.temperature)
self.particles.write_energies(timestep,self.energies_file)
self.particles.write_trajectory(timestep,self.trajectory_file)
self.particles.update_velocities() # intermediate velocity, t+1/2dt vel-verlet
self.particles.update_positions()
self.particles.PBC()
self.particles.calc_intforces()
self.particles.calc_extforces()
self.particles.update_acceleration()
self.particles.update_velocities() # computed velocity, with update acceleration at t + dt
self.particles.calculate_hamiltonian()
| StarcoderdataPython |
3335325 | <reponame>lbolanos/aws-sfn-builder<filename>tests/test_runner.py
import pytest
from aws_sfn_builder import Machine, ResourceManager, Runner, State
@pytest.mark.parametrize("input_path,expected_resource_input", [
[None, {"guid": "123-456"}],
["$", {"guid": "123-456"}],
["$.guid", "123-456"],
])
def test_format_resource_input_returns_filtered_input(input_path, expected_resource_input):
state = State.parse({
"InputPath": input_path
})
resource_input = state.format_state_input({"guid": "123-456"})
assert expected_resource_input == resource_input
@pytest.mark.parametrize("result_path,expected_result", [
[None, "ok"],
["$", "ok"],
["$.status", {"guid": "123-456", "status": "ok"}]
])
def test_format_result_returns_applied_result(result_path, expected_result):
state = State.parse({
"ResultPath": result_path,
})
result = state.format_result_selector("ok")
result = state.format_result({"guid": "123-456"}, result)
assert expected_result == result
@pytest.mark.parametrize("result_path,expected_result", [
[{
"ResultSelector": {
"ClusterId.$": "$.output.ClusterId",
"ResourceType.$": "$.resourceType",
"StaticValue": "foo"
},
"ResultPath": "$.EMROutput"
}, {
"OtherDataFromInput": {},
"EMROutput": {
"ResourceType": "elasticmapreduce",
"ClusterId": "AKIAIOSFODNN7EXAMPLE",
"StaticValue": "foo"
}
}],
[
{
"ResultSelector": {
"modifiedPayload": {
"body.$": "$.output.SdkHttpMetadata.HttpHeaders.Date",
"statusCode.$": "$.resourceType",
"requestId.$": "$.output.SdkResponseMetadata.RequestId"
}
},
"ResultPath": "$.mipres_result"
},
{
'OtherDataFromInput': {},
'mipres_result':
{
'modifiedPayload':
{
'body': 'Mon, 25 Nov 2019 19:41:29 GMT',
'statusCode': 'elasticmapreduce',
'requestId': '1234-5678-9012'
}
}
}
]
])
def test_format_result_selector_returns_applied_result(result_path, expected_result):
state = State.parse(result_path)
input = {
"resourceType": "elasticmapreduce",
"resource": "createCluster.sync",
"output": {
"SdkHttpMetadata": {
"HttpHeaders": {
"Content-Length": "1112",
"Content-Type": "application/x-amz-JSON-1.1",
"Date": "Mon, 25 Nov 2019 19:41:29 GMT",
"x-amzn-RequestId": "1234-5678-9012"
},
"HttpStatusCode": 200
},
"SdkResponseMetadata": {
"RequestId": "1234-5678-9012"
},
"ClusterId": "AKIAIOSFODNN7EXAMPLE"
}
}
result = state.format_result_selector(input)
result_final = state.format_result({"OtherDataFromInput": {}}, result)
assert expected_result == result_final
@pytest.mark.parametrize("result_path,expected_result", [
[{
"ResultSelector": {
"modifiedPayload": {
"body.$": "$.Payload.body",
"statusCode.$": "$.Payload.statusCode",
"requestId.$": "$.SdkResponseMetadata.RequestId"
}
},
"ResultPath": "$.TaskResult",
"OutputPath": "$.TaskResult.modifiedPayload"
}, {
"body": "hello, world!",
"statusCode": "200",
"requestId": "88fba57b-adbe-467f-abf4-daca36fc9028"
}]
])
def test_format_result_all_applied_result(result_path, expected_result):
state = State.parse(result_path)
output = {
"ExecutedVersion": "$LATEST",
"Payload": {
"statusCode": "200",
"body": "hello, world!"
},
"SdkHttpMetadata": {
"HttpHeaders": {
"Connection": "keep-alive",
"Content-Length": "43",
"Content-Type": "application/json",
"Date": "Thu, 16 Apr 2020 17:58:15 GMT",
"X-Amz-Executed-Version": "$LATEST",
"x-amzn-Remapped-Content-Length": "0",
"x-amzn-RequestId": "88fba57b-adbe-467f-abf4-daca36fc9028",
"X-Amzn-Trace-Id": "root=1-5e989cb6-90039fd8971196666b022b62;sampled=0"
},
"HttpStatusCode": 200
},
"SdkResponseMetadata": {
"RequestId": "88fba57b-adbe-467f-abf4-daca36fc9028"
},
"StatusCode": 200
}
next_state, result = state.get_output({"OtherDataFromInput": {}}, output)
assert expected_result == result
@pytest.mark.parametrize("result_path,expected_result", [
[{
"InputPath": "$.library",
"Parameters": {
"staticValue": "Just a string",
"catalog": {
"myFavoriteMovie.$": "$.movies[0]"
}
}
}, {
"staticValue": "Just a string",
"catalog": {
"myFavoriteMovie": {
"genre": "crime",
"director": "<NAME>",
"title": "Reservoir Dogs",
"year": 1992
}
}
}]
])
def test_format_input_all_applied_result(result_path, expected_result):
state = State.parse(result_path)
input = {
"version": 4,
"library": {
"movies": [
{
"genre": "crime",
"director": "<NAME>",
"title": "Reservoir Dogs",
"year": 1992
},
{
"genre": "action",
"director": "<NAME>",
"title": "Mission: Impossible",
"year": 1996,
"staring": [
"<NAME>"
]
}
],
"metadata": {
"lastUpdated": "2020-05-27T08:00:00.000Z"
}
}
}
result = state.get_input(input)
assert expected_result == result
def test_executes_mipres_input(example):
test = [
{
"InputPath": "$",
"Parameters": {
"staticValue": "Just a string",
"page[*]": {
"staticValue": "Just a string",
"error_count.$": "$.page[*].error_count",
"page.$": "$.page[*].page",
"TAG.$": "$.page[*].status.mipres_result.MIPRES_TAG"
}
}
}, {
'staticValue': 'Just a string',
'page': [
{'error_count': 1, 'page': '2'},
{'error_count': 1, 'page': '3'},
{'error_count': 0, 'page': '1', 'TAG': 'MiPres'},
{'error_count': 1, 'page': '4'}]
}
]
result_path = test[0]
expected_result = test[1]
mipres_out = example("mipres.out")
state = State.parse(result_path)
result = state.get_input(mipres_out)
assert expected_result == result
@pytest.mark.parametrize("output_path,expected_state_output", [
[None, {"guid": "123-456"}],
["$", {"guid": "123-456"}],
["$.guid", "123-456"],
])
def test_format_state_output_returns_filtered_output(output_path, expected_state_output):
state = State.parse({
"OutputPath": output_path
})
state_output = state.format_state_output({"guid": "123-456"})
assert expected_state_output == state_output
def test_executes_hello_world_state(example):
hello_world_state = Machine.parse(example("hello_world")).start_at_state
assert isinstance(hello_world_state, State)
resources = ResourceManager(providers={
"arn:aws:lambda:us-east-1:123456789012:function:HelloWorld": lambda x: "Hello, world!"
})
next_state, output = hello_world_state.execute({}, resource_resolver=resources)
assert output == "Hello, world!"
def test_runs_hello_world_machine(example):
sm = Machine.parse(example("hello_world"))
runner = Runner(resources=ResourceManager(providers={
"arn:aws:lambda:us-east-1:123456789012:function:HelloWorld": lambda x: "Hello, world!"
}))
assert runner.run(sm) == (sm.start_at_state, "Hello, world!")
def test_input_passed_to_next_task():
sm = Machine.parse([
{
"InputPath": "$.first_input",
"ResultPath": "$.first_output",
"Resource": "MultiplierByTwo",
},
{
"InputPath": "$.first_output",
"ResultPath": "$.second_output",
"Resource": "MultiplierByThree",
},
{
"Resource": "Validator",
},
])
runner = Runner()
runner.resource_provider("MultiplierByTwo")(lambda x: x * 2)
runner.resource_provider("MultiplierByThree")(lambda x: x * 3)
@runner.resource_provider("Validator")
def validate_input(input):
assert input == {
"first_input": 1111,
"first_output": 2222,
"second_output": 6666,
}
# NB!
return input
final_state, output = runner.run(sm, input={"first_input": 1111})
assert output == {
"first_input": 1111,
"first_output": 2222,
"second_output": 6666,
}
@pytest.mark.parametrize("input,expected_output", [
[{}, {}],
[{"x": 1}, {"x": 1}],
])
def test_executes_wait_state(input, expected_output):
wait = State.parse({
"Type": "Wait",
"Seconds": 10,
"Next": "NextState",
})
next_state, output = wait.execute(input=input)
assert next_state == "NextState"
assert expected_output == output
def test_executes_fail_state():
fail = State.parse({
"Type": "Fail",
"Error": "ErrorA",
"Cause": "Kaiju attack",
})
# TODO No idea what should be the next state or output of fail state.
# TODO Should it just raise an exception?
next_state, output = fail.execute(input=input)
assert next_state is None
| StarcoderdataPython |
1642938 | <filename>service/machines/migrations/0007_machine_rate.py
# Generated by Django 3.2.10 on 2022-01-30 00:25
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('machines', '0006_auto_20220125_0217'),
]
operations = [
migrations.AddField(
model_name='machine',
name='rate',
field=models.SmallIntegerField(default=3, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)]),
preserve_default=False,
),
]
| StarcoderdataPython |
5169148 | import unittest
from utils import helper
from datetime import date
class NewEventCommandTest(unittest.TestCase):
def setUp(self):
self.upcomingTestDates = [
(date(2021, 2, 25), date(2021, 3, 3)),
(date(2021, 3, 12), date(2021, 3, 17)),
(date(2021, 3, 17), date(2021, 3, 17)),
(date(2021, 3, 18), date(2021, 3, 24)),
]
def test_getUpcomingWednesdayDate(self):
for today, expected in self.upcomingTestDates:
result = helper.get_upcoming_wednesday_date(today)
self.assertEqual(
result.weekday(),
2,
"Wrong day of week. Should be 2 as 0 is monday and 6 is sunday",
)
self.assertEqual(
result,
expected,
f"Wrong calculated date! Expected: {expected}. Actual: {result}",
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
11353568 | <filename>tests/test_utils.py
import unittest
from src.wea.utils import roundup, checkdims
class TestUtils(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestUtils, self).__init__(*args, **kwargs)
self._server = None
def setUp(self) -> None:
super(TestUtils, self).setUp()
pass
def tearDown(self) -> None:
super(TestUtils, self).tearDown()
pass
def test_checkdims(self):
count = checkdims((5, 2))
self.assertEqual(count, 10)
count = checkdims((5, 2, 3))
self.assertEqual(count, 30)
def test_roundup(self):
round = roundup(10, 20)
self.assertEqual(round, 40)
round = roundup(11, 20)
self.assertEqual(round, 40)
round = roundup(21, 20)
self.assertEqual(round, 40)
round = roundup(30, 20)
self.assertEqual(round, 60)
round = roundup(50, 20)
self.assertEqual(round, 80)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
9755105 | <reponame>zhs007/slotsgamealgo_fwbro
{
"targets": [
{
"target_name": "sga_fwbro",
"sources": [ "src/main.cpp", "src/sga_fwbro.cpp", "src/fwbro.cpp", "src/slotslogic.cpp", "src/proportion.cpp" ],
"include_dirs" : [
"<!(node -e \"require('nan')\")"
]
}
],
} | StarcoderdataPython |
11357105 | <reponame>berryman121/faxplus-python<gh_stars>1-10
# coding: utf-8
"""
FAX.PLUS REST API
This is the fax.plus API v1 developed for third party developers and organizations. In order to have a better coding experience with this API, let's quickly go through some points:<br /><br /> - This API assumes **/accounts** as an entry point with the base url of **https://restapi.fax.plus/v1**. <br /><br /> - This API treats all date and times sent to it in requests as **UTC**. Also, all dates and times returned in responses are in **UTC**<br /><br /> - Once you have an access_token, you can easily send a request to the resource server with the base url of **https://restapi.fax.plus/v1** to access your permitted resources. As an example to get the user's profile info you would send a request to **https://restapi.fax.plus/v1/accounts/self** when **Authorization** header is set to \"Bearer YOUR_ACCESS_TOKEN\" and custom header of **x-fax-clientid** is set to YOUR_CLIENT_ID # noqa: E501
OpenAPI spec version: 1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from faxplus.models.account_settings_send_fax_retry import AccountSettingsSendFaxRetry # noqa: F401,E501
from faxplus.models.outbox_file_changes import OutboxFileChanges # noqa: F401,E501
from faxplus.models.outbox_initiated_from import OutboxInitiatedFrom # noqa: F401,E501
from faxplus.models.outbox_status_changes import OutboxStatusChanges # noqa: F401,E501
from faxplus.models.payload_outbox_comment import PayloadOutboxComment # noqa: F401,E501
class Outbox(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'files': 'list[str]',
'src': 'str',
'retry': 'AccountSettingsSendFaxRetry',
'should_enhance': 'bool',
'uid': 'str',
'designated_src': 'str',
'ip': 'str',
'page_count': 'int',
'comment': 'PayloadOutboxComment',
'id': 'str',
'file_changes': 'list[OutboxFileChanges]',
'to': 'list[str]',
'status': 'str',
'status_changes': 'list[OutboxStatusChanges]',
'contact_name': 'str',
'send_time': 'str',
'initiated_from': 'OutboxInitiatedFrom',
'submit_time': 'str',
'last_updated_status_time': 'str',
'options': 'object',
'extra_info': 'object'
}
attribute_map = {
'files': 'files',
'src': 'src',
'retry': 'retry',
'should_enhance': 'should_enhance',
'uid': 'uid',
'designated_src': 'designated_src',
'ip': 'ip',
'page_count': 'page_count',
'comment': 'comment',
'id': 'id',
'file_changes': 'file_changes',
'to': 'to',
'status': 'status',
'status_changes': 'status_changes',
'contact_name': 'contact_name',
'send_time': 'send_time',
'initiated_from': 'initiated_from',
'submit_time': 'submit_time',
'last_updated_status_time': 'last_updated_status_time',
'options': 'options',
'extra_info': 'extra_info'
}
def __init__(self, files=None, src=None, retry=None, should_enhance=None, uid=None, designated_src=None, ip=None, page_count=None, comment=None, id=None, file_changes=None, to=None, status=None, status_changes=None, contact_name=None, send_time=None, initiated_from=None, submit_time=None, last_updated_status_time=None, options=None, extra_info=None): # noqa: E501
"""Outbox - a model defined in Swagger""" # noqa: E501
self._files = None
self._src = None
self._retry = None
self._should_enhance = None
self._uid = None
self._designated_src = None
self._ip = None
self._page_count = None
self._comment = None
self._id = None
self._file_changes = None
self._to = None
self._status = None
self._status_changes = None
self._contact_name = None
self._send_time = None
self._initiated_from = None
self._submit_time = None
self._last_updated_status_time = None
self._options = None
self._extra_info = None
self.discriminator = None
if files is not None:
self.files = files
if src is not None:
self.src = src
if retry is not None:
self.retry = retry
if should_enhance is not None:
self.should_enhance = should_enhance
if uid is not None:
self.uid = uid
if designated_src is not None:
self.designated_src = designated_src
if ip is not None:
self.ip = ip
if page_count is not None:
self.page_count = page_count
if comment is not None:
self.comment = comment
if id is not None:
self.id = id
if file_changes is not None:
self.file_changes = file_changes
if to is not None:
self.to = to
if status is not None:
self.status = status
if status_changes is not None:
self.status_changes = status_changes
if contact_name is not None:
self.contact_name = contact_name
if send_time is not None:
self.send_time = send_time
if initiated_from is not None:
self.initiated_from = initiated_from
if submit_time is not None:
self.submit_time = submit_time
if last_updated_status_time is not None:
self.last_updated_status_time = last_updated_status_time
if options is not None:
self.options = options
if extra_info is not None:
self.extra_info = extra_info
@property
def files(self):
"""Gets the files of this Outbox. # noqa: E501
:return: The files of this Outbox. # noqa: E501
:rtype: list[str]
"""
return self._files
@files.setter
def files(self, files):
"""Sets the files of this Outbox.
:param files: The files of this Outbox. # noqa: E501
:type: list[str]
"""
self._files = files
@property
def src(self):
"""Gets the src of this Outbox. # noqa: E501
:return: The src of this Outbox. # noqa: E501
:rtype: str
"""
return self._src
@src.setter
def src(self, src):
"""Sets the src of this Outbox.
:param src: The src of this Outbox. # noqa: E501
:type: str
"""
self._src = src
@property
def retry(self):
"""Gets the retry of this Outbox. # noqa: E501
:return: The retry of this Outbox. # noqa: E501
:rtype: AccountSettingsSendFaxRetry
"""
return self._retry
@retry.setter
def retry(self, retry):
"""Sets the retry of this Outbox.
:param retry: The retry of this Outbox. # noqa: E501
:type: AccountSettingsSendFaxRetry
"""
self._retry = retry
@property
def should_enhance(self):
"""Gets the should_enhance of this Outbox. # noqa: E501
:return: The should_enhance of this Outbox. # noqa: E501
:rtype: bool
"""
return self._should_enhance
@should_enhance.setter
def should_enhance(self, should_enhance):
"""Sets the should_enhance of this Outbox.
:param should_enhance: The should_enhance of this Outbox. # noqa: E501
:type: bool
"""
self._should_enhance = should_enhance
@property
def uid(self):
"""Gets the uid of this Outbox. # noqa: E501
:return: The uid of this Outbox. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this Outbox.
:param uid: The uid of this Outbox. # noqa: E501
:type: str
"""
self._uid = uid
@property
def designated_src(self):
"""Gets the designated_src of this Outbox. # noqa: E501
:return: The designated_src of this Outbox. # noqa: E501
:rtype: str
"""
return self._designated_src
@designated_src.setter
def designated_src(self, designated_src):
"""Sets the designated_src of this Outbox.
:param designated_src: The designated_src of this Outbox. # noqa: E501
:type: str
"""
self._designated_src = designated_src
@property
def ip(self):
"""Gets the ip of this Outbox. # noqa: E501
:return: The ip of this Outbox. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this Outbox.
:param ip: The ip of this Outbox. # noqa: E501
:type: str
"""
self._ip = ip
@property
def page_count(self):
"""Gets the page_count of this Outbox. # noqa: E501
:return: The page_count of this Outbox. # noqa: E501
:rtype: int
"""
return self._page_count
@page_count.setter
def page_count(self, page_count):
"""Sets the page_count of this Outbox.
:param page_count: The page_count of this Outbox. # noqa: E501
:type: int
"""
self._page_count = page_count
@property
def comment(self):
"""Gets the comment of this Outbox. # noqa: E501
:return: The comment of this Outbox. # noqa: E501
:rtype: PayloadOutboxComment
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this Outbox.
:param comment: The comment of this Outbox. # noqa: E501
:type: PayloadOutboxComment
"""
self._comment = comment
@property
def id(self):
"""Gets the id of this Outbox. # noqa: E501
:return: The id of this Outbox. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Outbox.
:param id: The id of this Outbox. # noqa: E501
:type: str
"""
self._id = id
@property
def file_changes(self):
"""Gets the file_changes of this Outbox. # noqa: E501
:return: The file_changes of this Outbox. # noqa: E501
:rtype: list[OutboxFileChanges]
"""
return self._file_changes
@file_changes.setter
def file_changes(self, file_changes):
"""Sets the file_changes of this Outbox.
:param file_changes: The file_changes of this Outbox. # noqa: E501
:type: list[OutboxFileChanges]
"""
self._file_changes = file_changes
@property
def to(self):
"""Gets the to of this Outbox. # noqa: E501
:return: The to of this Outbox. # noqa: E501
:rtype: list[str]
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this Outbox.
:param to: The to of this Outbox. # noqa: E501
:type: list[str]
"""
self._to = to
@property
def status(self):
"""Gets the status of this Outbox. # noqa: E501
:return: The status of this Outbox. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this Outbox.
:param status: The status of this Outbox. # noqa: E501
:type: str
"""
self._status = status
@property
def status_changes(self):
"""Gets the status_changes of this Outbox. # noqa: E501
:return: The status_changes of this Outbox. # noqa: E501
:rtype: list[OutboxStatusChanges]
"""
return self._status_changes
@status_changes.setter
def status_changes(self, status_changes):
"""Sets the status_changes of this Outbox.
:param status_changes: The status_changes of this Outbox. # noqa: E501
:type: list[OutboxStatusChanges]
"""
self._status_changes = status_changes
@property
def contact_name(self):
"""Gets the contact_name of this Outbox. # noqa: E501
:return: The contact_name of this Outbox. # noqa: E501
:rtype: str
"""
return self._contact_name
@contact_name.setter
def contact_name(self, contact_name):
"""Sets the contact_name of this Outbox.
:param contact_name: The contact_name of this Outbox. # noqa: E501
:type: str
"""
self._contact_name = contact_name
@property
def send_time(self):
"""Gets the send_time of this Outbox. # noqa: E501
:return: The send_time of this Outbox. # noqa: E501
:rtype: str
"""
return self._send_time
@send_time.setter
def send_time(self, send_time):
"""Sets the send_time of this Outbox.
:param send_time: The send_time of this Outbox. # noqa: E501
:type: str
"""
self._send_time = send_time
@property
def initiated_from(self):
"""Gets the initiated_from of this Outbox. # noqa: E501
:return: The initiated_from of this Outbox. # noqa: E501
:rtype: OutboxInitiatedFrom
"""
return self._initiated_from
@initiated_from.setter
def initiated_from(self, initiated_from):
"""Sets the initiated_from of this Outbox.
:param initiated_from: The initiated_from of this Outbox. # noqa: E501
:type: OutboxInitiatedFrom
"""
self._initiated_from = initiated_from
@property
def submit_time(self):
"""Gets the submit_time of this Outbox. # noqa: E501
:return: The submit_time of this Outbox. # noqa: E501
:rtype: str
"""
return self._submit_time
@submit_time.setter
def submit_time(self, submit_time):
"""Sets the submit_time of this Outbox.
:param submit_time: The submit_time of this Outbox. # noqa: E501
:type: str
"""
self._submit_time = submit_time
@property
def last_updated_status_time(self):
"""Gets the last_updated_status_time of this Outbox. # noqa: E501
:return: The last_updated_status_time of this Outbox. # noqa: E501
:rtype: str
"""
return self._last_updated_status_time
@last_updated_status_time.setter
def last_updated_status_time(self, last_updated_status_time):
"""Sets the last_updated_status_time of this Outbox.
:param last_updated_status_time: The last_updated_status_time of this Outbox. # noqa: E501
:type: str
"""
self._last_updated_status_time = last_updated_status_time
@property
def options(self):
"""Gets the options of this Outbox. # noqa: E501
:return: The options of this Outbox. # noqa: E501
:rtype: object
"""
return self._options
@options.setter
def options(self, options):
"""Sets the options of this Outbox.
:param options: The options of this Outbox. # noqa: E501
:type: object
"""
self._options = options
@property
def extra_info(self):
"""Gets the extra_info of this Outbox. # noqa: E501
:return: The extra_info of this Outbox. # noqa: E501
:rtype: object
"""
return self._extra_info
@extra_info.setter
def extra_info(self, extra_info):
"""Sets the extra_info of this Outbox.
:param extra_info: The extra_info of this Outbox. # noqa: E501
:type: object
"""
self._extra_info = extra_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Outbox):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
6541051 | # Generated by Django 3.2.9 on 2021-12-01 19:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_ordered', models.DateTimeField(auto_now_add=True)),
('complete', models.BooleanField(default=False)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sample_app.customer')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('price', models.DecimalField(decimal_places=2, max_digits=7)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(blank=True, default=0, null=True)),
('order', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sample_app.order')),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='sample_app.product')),
],
),
]
| StarcoderdataPython |
6619216 | <filename>BioSIMI-Python/IFFL_model_reduce.py
from modules.System import *
from modules.Subsystem import *
cell = System('cell')
IFFL = cell.createSubsystem('models/IFFL.xml','1')
IFFL.setFastReactions(1)
writeSBML(IFFL.getSubsystemDoc(),'models/IFFLfast.xml')
timepointsFast = np.linspace(0,10000,10)
IFFLreduced = IFFL.modelReduce(timepointsFast)
writeSBML(IFFLreduced.getSubsystemDoc(),'models/IFFLreduced.xml')
timepoints = np.linspace(0,10,1000)
plotSbmlWithBioscrape(['models/IFFLfast.xml','models/IFFLreduced.xml'],0,timepoints,[['inp_IFFL','out_IFFL'],['inp_IFFL','out_IFFL']])
| StarcoderdataPython |
5164708 | <filename>winmutex/__init__.py
from .winmutex import *
| StarcoderdataPython |
5196900 | <reponame>maldins46/CovidTracker<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Uses the library to generate all available charts in the ./assets/ directory.
@author: riccardomaldini
"""
def test_italy_charts():
from charts import italy
italy.parameters()
italy.weekly_incidence()
italy.rt_per_regions()
italy.ti_occupation()
def test_marche_charts():
from charts import marche
marche.parameters()
marche.weekly_incidence()
marche.cases_per_provinces_abs()
def test_vaccines_charts():
from charts import vaccines
vaccines.immunes_percentage()
vaccines.regional_doses()
vaccines.adm_doses_marche()
vaccines.adm_doses_italy()
def test_italy_geocharts():
from geocharts import italy
italy.ti_occupation()
italy.weekly_increment_ti()
italy.weekly_incidence_regions()
italy.weekly_incidence_provinces()
italy.weekly_increment_regions()
italy.weekly_increment_provinces()
def test_vaccines_geocharts():
from geocharts import vaccines
vaccines.adm_doses()
vaccines.immunes_percentage()
vaccines.coverage_percentage()
def test_marche_geocharts():
from geocharts import marche
marche.new_positives()
marche.weekly_incidence()
marche.weekly_increment()
def test_summaries():
from summaries import italy as sum_italy, vaccines as sum_vaccines, marche as sum_marche
sum_italy.compute_summary()
sum_vaccines.compute_summary()
sum_marche.compute_summary()
| StarcoderdataPython |
3258161 | import logging
from deploy.utils.constants import DEPLOYABLE_COMPONENTS
from utils.shell_utils import run
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def _get_resource_info(
resource_type="pod",
labels={},
json_path=".items[0].metadata.name",
errors_to_ignore=("array index out of bounds: index 0",),
verbose=False,
):
"""Runs 'kubectl get <resource_type>' command to retrieve info about this resource.
Args:
resource_type (string): "pod", "service", etc.
labels (dict): (eg. {'name': 'phenotips'})
json_path (string): a json path query string (eg. ".items[0].metadata.name")
errors_to_ignore (list):
verbose (bool):
Returns:
(string) resource value (eg. "postgres-410765475-1vtkn")
"""
l_arg = ""
if labels:
l_arg = "-l" + ",".join(["%s=%s" % (key, value) for key, value in labels.items()])
output = run(
"kubectl get %(resource_type)s %(l_arg)s -o jsonpath={%(json_path)s}" % locals(),
errors_to_ignore=errors_to_ignore,
print_command=False,
verbose=verbose,
)
return output.strip('\n') if output is not None else None
POD_READY_STATUS = "is_ready"
POD_RUNNING_STATUS = "is_running"
def get_pod_status(pod_name, deployment_target=None, print_status=True, status_type=POD_RUNNING_STATUS, pod_number=0):
labels = {"name": pod_name}
if deployment_target:
labels["deployment"] = deployment_target
if status_type == POD_READY_STATUS:
json_path = ".items[%(pod_number)s].status.containerStatuses[0].ready" % locals()
elif status_type == POD_RUNNING_STATUS:
json_path = ".items[%(pod_number)s].status.phase" % locals()
else:
raise ValueError("Unexpected status_type arg: %s" % str(status_type))
result = _get_resource_info(
labels = labels,
resource_type="pod",
json_path=json_path,
errors_to_ignore=["array index out of bounds: index 0"],
verbose=False,
)
if print_status:
logger.info("%s %s status = %s" % (pod_name, status_type, result))
return result
def get_pod_name(pod_name, deployment_target=None, pod_number=0):
labels = {"name": pod_name}
if deployment_target:
labels["deployment"] = deployment_target
return _get_resource_info(
labels=labels,
resource_type="pod",
json_path=".items[%(pod_number)s].metadata.name" % locals(),
errors_to_ignore=["array index out of bounds: index 0"],
verbose=False,
)
def get_service_name(service_name, deployment_target=None):
labels = {"name": service_name}
if deployment_target:
labels["deployment"] = deployment_target
return _get_resource_info(
labels=labels,
resource_type="pod",
json_path=".items[0].metadata.name",
errors_to_ignore=["array index out of bounds: index 0"],
verbose=False,
)
def get_node_name():
return _get_resource_info(
resource_type="nodes",
json_path=".items[0].metadata.name",
errors_to_ignore=["array index out of bounds: index 0"],
verbose=True,
)
def run_in_pod(pod_name, command, deployment_target=None, errors_to_ignore=None, verbose=False, is_interactive=False):
"""Runs a kubernetes command to execute an arbitrary linux command string on the given pod.
Args:
pod_name (string): keyword to use for looking up a kubernetes pod (eg. 'phenotips' or 'nginx')
command (string): the command to execute.
is_interactive (bool): whether the command expects input from the user
"""
if pod_name in DEPLOYABLE_COMPONENTS:
full_pod_name = get_pod_name(pod_name, deployment_target=deployment_target)
if not full_pod_name:
raise ValueError("No '%(pod_name)s' pods found. Is the kubectl environment configured in this terminal? and has this type of pod been deployed?" % locals())
else:
full_pod_name = pod_name
it_arg = "-it" if is_interactive else ""
run("kubectl exec %(it_arg)s %(full_pod_name)s -- %(command)s" % locals(), errors_to_ignore=errors_to_ignore, verbose=verbose, is_interactive=is_interactive)
| StarcoderdataPython |
11366093 | from typing import NamedTuple
import pytest
from hydra.core.config_store import ConfigStore
from hydra.core.utils import JobReturn
from hydra.experimental.callback import Callback
from omegaconf import DictConfig
from hydra_zen import builds, instantiate
from hydra_zen.experimental import hydra_multirun, hydra_run
class Tracker(NamedTuple):
job_start: bool = False
job_end: bool = False
run_start: bool = False
run_end: bool = False
multirun_start: bool = False
multirun_end: bool = False
class CustomCallback(Callback):
JOB_START_CALLED = False
JOB_END_CALLED = False
RUN_START_CALLED = False
RUN_END_CALLED = False
MULTIRUN_START_CALLED = False
MULTIRUN_END_CALLED = False
def __init__(self, callback_name):
self.name = callback_name
def on_job_start(self, config: DictConfig, **kwargs) -> None:
CustomCallback.JOB_START_CALLED = True
def on_job_end(self, config: DictConfig, job_return: JobReturn, **kwargs) -> None:
CustomCallback.JOB_END_CALLED = True
def on_run_start(self, config: DictConfig, **kwargs) -> None:
CustomCallback.RUN_START_CALLED = True
def on_run_end(self, config: DictConfig, **kwargs) -> None:
CustomCallback.RUN_END_CALLED = True
def on_multirun_start(self, config: DictConfig, **kwargs) -> None:
CustomCallback.MULTIRUN_START_CALLED = True
def on_multirun_end(self, config: DictConfig, **kwargs) -> None:
CustomCallback.MULTIRUN_END_CALLED = True
cs = ConfigStore.instance()
cs.store(
group="hydra/callbacks",
name="test_callback",
node=dict(test_callback=builds(CustomCallback, callback_name="test")),
)
def tracker(x=CustomCallback):
# this will get called after the job and run have started
# but before they end
return Tracker(
job_start=x.JOB_START_CALLED,
job_end=x.JOB_END_CALLED,
run_start=x.RUN_START_CALLED,
run_end=x.RUN_END_CALLED,
multirun_start=x.MULTIRUN_START_CALLED,
multirun_end=x.MULTIRUN_END_CALLED,
)
@pytest.mark.usefixtures("cleandir")
@pytest.mark.parametrize("fn", [hydra_run, hydra_multirun])
def test_hydra_run_with_callback(fn):
# Tests that callback methods are called during appropriate
# stages
try:
is_multirun = fn is hydra_multirun
cfg = builds(tracker)
assert not any(tracker()) # ensures all flags are false
job = fn(
cfg, task_function=instantiate, overrides=["hydra/callbacks=test_callback"]
)
if is_multirun:
job = job[0][0]
tracked_mid_run: Tracker = job.return_value
assert tracked_mid_run.job_start is True
assert tracked_mid_run.run_start is not is_multirun
assert tracked_mid_run.multirun_start is is_multirun
assert tracked_mid_run.job_end is False
assert tracked_mid_run.run_end is False
assert tracked_mid_run.multirun_end is False
assert CustomCallback.JOB_END_CALLED is True
assert CustomCallback.RUN_END_CALLED is not is_multirun
assert CustomCallback.MULTIRUN_END_CALLED is is_multirun
finally:
CustomCallback.JOB_START_CALLED = False
CustomCallback.JOB_END_CALLED = False
CustomCallback.RUN_START_CALLED = False
CustomCallback.RUN_END_CALLED = False
CustomCallback.MULTIRUN_START_CALLED = False
CustomCallback.MULTIRUN_END_CALLED = False
| StarcoderdataPython |
11307210 | <filename>Algorithms/Reducing_Dishes/main.py
### Reducing Dishes - Solution
class Solution:
def maxSatisfaction(self, satisfaction: List[int]) -> int:
satisfaction.sort()
max_sum, acc, i = 0, 0, len(satisfaction)-1
while (i >= 0) and (satisfaction[i]+acc > 0):
acc += satisfaction[i]
max_sum += acc
i -= 1
return max_sum | StarcoderdataPython |
1679472 | from src.models import DBSession, Base, Colleague, ColleagueLocus, Dbentity, Locusdbentity, Filedbentity, FileKeyword, LocusAlias, Dnasequenceannotation, So, Locussummary, Phenotypeannotation, PhenotypeannotationCond, Phenotype, Goannotation, Go, Goslimannotation, Goslim, Apo, Straindbentity, Strainsummary, Reservedname, GoAlias, Goannotation, Referencedbentity, Referencedocument, Referenceauthor, ReferenceAlias, Chebi, Disease, Diseaseannotation, DiseaseAlias, Complexdbentity, ComplexAlias, ComplexReference, Complexbindingannotation
from sqlalchemy import create_engine, and_
from elasticsearch import Elasticsearch
from mapping import mapping
import os
import requests
from threading import Thread
import json
import collections
from index_es_helpers import IndexESHelper
import concurrent.futures
import uuid
import logging
engine = create_engine(os.environ["NEX2_URI"], pool_recycle=3600)
DBSession.configure(bind=engine)
Base.metadata.bind = engine
INDEX_NAME = os.environ.get("ES_INDEX_NAME", "searchable_items_aws")
DOC_TYPE = "searchable_item"
ES_URI = os.environ["WRITE_ES_URI"]
es = Elasticsearch(ES_URI, retry_on_timeout=True)
def delete_mapping():
print("Deleting mapping...")
response = requests.delete(ES_URI + INDEX_NAME + "/")
if response.status_code != 200:
print(("ERROR: " + str(response.json())))
else:
print("SUCCESS")
def put_mapping():
print("Putting mapping... ")
response = requests.put(ES_URI + INDEX_NAME + "/", json=mapping)
if response.status_code != 200:
print(("ERROR: " + str(response.json())))
else:
print("SUCCESS")
def index_toolbar_links():
links = [
("Gene List", "https://yeastmine.yeastgenome.org/yeastmine/bag.do",[]),
("Yeastmine", "https://yeastmine.yeastgenome.org","yeastmine"),
("Submit Data", "/submitData", []),
("SPELL", "https://spell.yeastgenome.org","spell"),
("BLAST", "/blast-sgd", "blast"),
("Fungal BLAST", "/blast-fungal","blast"),
("Pattern Matching", "/nph-patmatch",[]),
("Design Primers", "/primer3", []),
("Restriction Mapper", "/restrictionMapper",[]),
("Genome Browser", "https://browse.yeastgenome.org",[]),
("Gene/Sequence Resources","/seqTools", []),
("Download Genome","https://downloads.yeastgenome.org/sequence/S288C_reference/genome_releases/","download"),
("Genome Snapshot", "/genomesnapshot",[]),
("Chromosome History","https://wiki.yeastgenome.org/index.php/Summary_of_Chromosome_Sequence_and_Annotation_Updates", []),
("Systematic Sequencing Table", "/cache/chromosomes.shtml",[]),
("Original Sequence Papers","http://wiki.yeastgenome.org/index.php/Original_Sequence_Papers",[]),
("Variant Viewer", "/variant-viewer",[]),
("GO Term Finder", "/goTermFinder","go"),
("GO Slim Mapper","/goSlimMapper", "go"),
("GO Slim Mapping File","https://downloads.yeastgenome.org/curation/literature/go_slim_mapping.tab","go"),
("Expression", "https://spell.yeastgenome.org/#",[]),
("Biochemical Pathways","http://pathway.yeastgenome.org/",[]),
("Browse All Phenotypes", "/ontology/phenotype/ypo",[]),
("Interactions", "/interaction-search", []),
("YeastGFP", "https://yeastgfp.yeastgenome.org/","yeastgfp"),
("Full-text Search", "http://textpresso.yeastgenome.org/","texxtpresso"),
("New Yeast Papers", "/reference/recent",[]),
("Genome-wide Analysis Papers", "https://yeastmine.yeastgenome.org/yeastmine/loadTemplate.do?name=GenomeWide_Papers&scope=all&method=results&format=tab",[]),
("Find a Colleague", "/search?q=&category=colleague",[]),
("Add or Update Info", "/colleague_update",[]),
("Career Resources","http://wiki.yeastgenome.org/index.php/Career_Resources", []),
("Future","http://wiki.yeastgenome.org/index.php/Meetings#Upcoming_Conferences_.26_Courses",[]),
("Yeast Genetics","http://wiki.yeastgenome.org/index.php/Meetings#Past_Yeast_Meetings",[]),
("Submit a Gene Registration", "/reserved_name/new",[]),
("Nomenclature Conventions","https://sites.google.com/view/yeastgenome-help/community-help/nomenclature-conventions", []),
("Strains and Constructs","http://wiki.yeastgenome.org/index.php/Strains",[]),
("Reagents","http://wiki.yeastgenome.org/index.php/Reagents",[]),
("Protocols and Methods","http://wiki.yeastgenome.org/index.php/Methods", []),
("Physical & Genetic Maps","http://wiki.yeastgenome.org/index.php/Combined_Physical_and_Genetic_Maps_of_S._cerevisiae",[]),
("Genetic Maps","http://wiki.yeastgenome.org/index.php/Yeast_Mortimer_Maps_-_Edition_12",[]),
("Sequence","http://wiki.yeastgenome.org/index.php/Historical_Systematic_Sequence_Information",[]),
("Wiki", "http://wiki.yeastgenome.org/index.php/Main_Page","wiki"),
("Resources","http://wiki.yeastgenome.org/index.php/External_Links",[])
]
print(("Indexing " + str(len(links)) + " toolbar links"))
for l in links:
obj = {
"name": l[0],
"href": l[1],
"description": None,
"category": "resource",
"keys": l[2]
}
es.index(index=INDEX_NAME, doc_type=DOC_TYPE, body=obj, id=l[1])
def index_colleagues():
colleagues = DBSession.query(Colleague).all()
_locus_ids = IndexESHelper.get_colleague_locus()
_locus_names = IndexESHelper.get_colleague_locusdbentity()
_combined_list = IndexESHelper.combine_locusdbentity_colleague(
colleagues, _locus_names, _locus_ids)
print(("Indexing " + str(len(colleagues)) + " colleagues"))
bulk_data = []
for item_k, item_v in list(_combined_list.items()):
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(item_v)
if len(bulk_data) == 1000:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_genes():
# Indexing just the S228C genes
# dbentity: 1364643 (id) -> straindbentity -> 274901 (taxonomy_id)
# list of dbentities comes from table DNASequenceAnnotation with taxonomy_id 274901
# feature_type comes from DNASequenceAnnotation as well
gene_ids_so = DBSession.query(
Dnasequenceannotation.dbentity_id, Dnasequenceannotation.so_id).filter(
Dnasequenceannotation.taxonomy_id == 274901).all()
dbentity_ids_to_so = {}
dbentity_ids = set([])
so_ids = set([])
for gis in gene_ids_so:
dbentity_ids.add(gis[0])
so_ids.add(gis[1])
dbentity_ids_to_so[gis[0]] = gis[1]
# add some non S288C genes
not_s288c = DBSession.query(Locusdbentity.dbentity_id).filter(
Locusdbentity.not_in_s288c == True).all()
for id in not_s288c:
dbentity_ids.add(id[0])
# assume non S288C features to be ORFs
dbentity_ids_to_so[id[0]] = 263757
all_genes = DBSession.query(Locusdbentity).filter(
Locusdbentity.dbentity_id.in_(list(dbentity_ids))).all()
# make list of merged/deleted genes so they don"t redirect when they show up as an alias
merged_deleted_r = DBSession.query(Locusdbentity.format_name).filter(
Locusdbentity.dbentity_status.in_(["Merged", "Deleted"])).all()
merged_deleted = [d[0] for d in merged_deleted_r]
feature_types_db = DBSession.query(
So.so_id, So.display_name).filter(So.so_id.in_(list(so_ids))).all()
feature_types = {}
for ft in feature_types_db:
feature_types[ft[0]] = ft[1]
tc_numbers_db = DBSession.query(LocusAlias).filter_by(
alias_type="TC number").all()
tc_numbers = {}
for tc in tc_numbers_db:
if tc.locus_id in tc_numbers:
tc_numbers[tc.locus_id].append(tc.display_name)
else:
tc_numbers[tc.locus_id] = [tc.display_name]
ec_numbers_db = DBSession.query(LocusAlias).filter_by(
alias_type="EC number").all()
ec_numbers = {}
for ec in ec_numbers_db:
if ec.locus_id in ec_numbers:
ec_numbers[ec.locus_id].append(ec.display_name)
else:
ec_numbers[ec.locus_id] = [ec.display_name]
secondary_db = DBSession.query(LocusAlias).filter_by(
alias_type="SGDID Secondary").all()
secondary_sgdids = {}
for sid in secondary_db:
if sid.locus_id in secondary_sgdids:
secondary_sgdids[sid.locus_id].append(sid.display_name)
else:
secondary_sgdids[sid.locus_id] = [sid.display_name]
bulk_data = []
print(("Indexing " + str(len(all_genes)) + " genes"))
##### test newer methods ##########
_summary = IndexESHelper.get_locus_dbentity_summary()
_protein = IndexESHelper.get_locus_dbentity_alias(["NCBI protein name"])
_phenos = IndexESHelper.get_locus_phenotypeannotation()
_goids = IndexESHelper.get_locus_go_annotation()
_aliases_raw = IndexESHelper.get_locus_dbentity_alias(
["Uniform", "Non-uniform", "Retired name", "UniProtKB ID"])
###################################
not_mapped_genes = IndexESHelper.get_not_mapped_genes()
is_quick_flag = True
for gene in all_genes:
if gene.gene_name:
_name = gene.gene_name
if gene.systematic_name and gene.gene_name != gene.systematic_name:
_name += " / " + gene.systematic_name
else:
_name = gene.systematic_name
#summary = DBSession.query(Locussummary.text).filter_by(locus_id=gene.dbentity_id).all()
summary = []
if (_summary is not None):
summary = _summary.get(gene.dbentity_id)
#protein = DBSession.query(LocusAlias.display_name).filter_by(locus_id=gene.dbentity_id, alias_type="NCBI protein name").one_or_none()
protein = _protein.get(gene.dbentity_id)
if protein is not None:
protein = protein[0].display_name
# TEMP don"t index due to schema schange
# sequence_history = DBSession.query(Locusnoteannotation.note).filter_by(dbentity_id=gene.dbentity_id, note_type="Sequence").all()
# gene_history = DBSession.query(Locusnoteannotation.note).filter_by(dbentity_id=gene.dbentity_id, note_type="Locus").all()
#phenotype_ids = DBSession.query(Phenotypeannotation.phenotype_id).filter_by(dbentity_id=gene.dbentity_id).all()
phenotype_ids = []
if _phenos is not None:
temp = _phenos.get(gene.dbentity_id)
if temp is not None:
phenotype_ids = [x.phenotype_id for x in temp]
if len(phenotype_ids) > 0:
phenotypes = DBSession.query(Phenotype.display_name).filter(
Phenotype.phenotype_id.in_(phenotype_ids)).all()
else:
phenotypes = []
#go_ids = DBSession.query(Goannotation.go_id).filter(and_(Goannotation.go_qualifier != "NOT", Goannotation.dbentity_id == gene.dbentity_id)).all()
go_ids = _goids.get(gene.dbentity_id)
if go_ids is not None:
go_ids = [x.go_id for x in go_ids]
else:
go_ids = []
go_annotations = {
"cellular component": set([]),
"molecular function": set([]),
"biological process": set([])
}
if len(go_ids) > 0:
#go_ids = [g[0] for g in go_ids]
go = DBSession.query(
Go.display_name,
Go.go_namespace).filter(Go.go_id.in_(go_ids)).all()
for g in go:
go_annotations[g[1]].add(g[0] + " (direct)")
go_slim_ids = DBSession.query(Goslimannotation.goslim_id).filter(
Goslimannotation.dbentity_id == gene.dbentity_id).all()
if len(go_slim_ids) > 0:
go_slim_ids = [g[0] for g in go_slim_ids]
go_slim = DBSession.query(Goslim.go_id, Goslim.display_name).filter(
Goslim.goslim_id.in_(go_slim_ids)).all()
go_ids = [g[0] for g in go_slim]
go = DBSession.query(
Go.go_id, Go.go_namespace).filter(Go.go_id.in_(go_ids)).all()
for g in go:
for gs in go_slim:
if (gs[0] == g[0]):
go_annotations[g[1]].add(gs[1])
# add "quick direct" keys such as aliases, SGD, UniProt ID and format aliases
#aliases_raw = DBSession.query(LocusAlias.display_name, LocusAlias.alias_type).filter(and_(LocusAlias.locus_id==gene.dbentity_id, LocusAlias.alias_type.in_())).all()
aliases_raw = _aliases_raw.get(gene.dbentity_id)
alias_quick_direct_keys = []
aliases = []
if aliases_raw is not None:
for alias_item in aliases_raw:
name = alias_item.display_name
if name not in merged_deleted:
alias_quick_direct_keys.append(name)
if alias_item.alias_type != "UniProtKB ID":
aliases.append(name)
'''for d in aliases_raw:
name = d[0]
if name not in merged_deleted:
alias_quick_direct_keys.append(name)
if d[1] != "UniProtKB ID":
aliases.append(name)'''
# make everything in keys lowercase to ignore case
keys = []
_keys = [gene.gene_name, gene.systematic_name, gene.sgdid
] + alias_quick_direct_keys
# Add SGD:<gene SGDID> to list of keywords for quick search
_keys.append("SGD:{}".format(gene.sgdid))
# If this gene has a reservedname associated with it, add that reservedname to
# the list of keywords used for the quick search of this gene
reservedname = DBSession.query(Reservedname).filter_by(locus_id=gene.dbentity_id).one_or_none()
if reservedname:
_keys.append(reservedname.display_name)
for k in _keys:
if k:
keys.append(k.lower())
obj = {
"name":
_name,
"href":
gene.obj_url,
"description":
gene.description,
"category":
"locus",
"feature_type":
feature_types[dbentity_ids_to_so[gene.dbentity_id]],
"name_description":
gene.name_description,
"summary":
summary,
"phenotypes": [p[0] for p in phenotypes],
"aliases":
aliases,
"cellular_component":
list(go_annotations["cellular component"] - set([
"cellular component", "cellular component (direct)",
"cellular_component", "cellular_component (direct)"
])),
"biological_process":
list(go_annotations["biological process"] - set([
"biological process (direct)", "biological process",
"biological_process (direct)", "biological_process"
])),
"molecular_function":
list(go_annotations["molecular function"] - set([
"molecular function (direct)", "molecular function",
"molecular_function (direct)", "molecular_function"
])),
"ec_number":
ec_numbers.get(gene.dbentity_id),
"protein":
protein,
"tc_number":
tc_numbers.get(gene.dbentity_id),
"secondary_sgdid":
secondary_sgdids.get(gene.dbentity_id),
"status":
gene.dbentity_status,
# TEMP don"t index due to schema change
# "sequence_history": [s[0] for s in sequence_history],
# "gene_history": [g[0] for g in gene_history],
"bioentity_id":
gene.dbentity_id,
"keys":
list(keys),
"is_quick_flag": str(is_quick_flag)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 1000:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_phenotypes():
bulk_data = []
phenotypes = DBSession.query(Phenotype).all()
_result = IndexESHelper.get_pheno_annotations(phenotypes)
print(("Indexing " + str(len(_result)) + " phenotypes"))
for phenotype_item in _result:
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(phenotype_item)
if len(bulk_data) == 50:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_observables():
observables = DBSession.query(Apo).filter_by(
apo_namespace="observable").all()
print(("Indexing " + str(len(observables)) + " observables"))
bulk_data = []
for observable in observables:
obj = {
"name": observable.display_name,
"href": observable.obj_url,
"description": observable.description,
"category": "observable",
"keys": []
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 300:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_strains():
strains = DBSession.query(Straindbentity).all()
print(("Indexing " + str(len(strains)) + " strains"))
for strain in strains:
key_values = [
strain.display_name, strain.format_name, strain.genbank_id
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
paragraph = DBSession.query(Strainsummary.text).filter_by(
strain_id=strain.dbentity_id).one_or_none()
description = None
if paragraph:
description = paragraph[0]
obj = {
"name": strain.display_name,
"href": strain.obj_url,
"description": strain.headline,
"category": "strain",
"keys": list(keys)
}
es.index(
index=INDEX_NAME, doc_type=DOC_TYPE, body=obj, id=str(uuid.uuid4()))
def index_reserved_names():
# only index reservednames that do not have a locus associated with them
reserved_names = DBSession.query(Reservedname).all()
print(("Indexing " + str(len(reserved_names)) + " reserved names"))
for reserved_name in reserved_names:
name = reserved_name.display_name
href = reserved_name.obj_url
keys = [reserved_name.display_name.lower()]
# change name if has an orf
if reserved_name.locus_id:
locus = DBSession.query(Locusdbentity).filter(
Locusdbentity.dbentity_id ==
reserved_name.locus_id).one_or_none()
name = name + " / " + locus.systematic_name
href = locus.obj_url
keys = []
obj = {
"name": name,
"href": href,
"description": reserved_name.name_description,
"category": "reserved_name",
"keys": keys
}
es.index(
index=INDEX_NAME, doc_type=DOC_TYPE, body=obj, id=str(uuid.uuid4()))
def index_reserved_names():
# only index reservednames that do not have a locus associated with them
reserved_names = DBSession.query(Reservedname).all()
print(("Indexing " + str(len(reserved_names)) + " reserved names"))
for reserved_name in reserved_names:
name = reserved_name.display_name
href = reserved_name.obj_url
keys = [reserved_name.display_name.lower()]
# change name if has an orf
if reserved_name.locus_id:
locus = DBSession.query(Locusdbentity).filter(Locusdbentity.dbentity_id == reserved_name.locus_id).one_or_none()
name = name + " / " + locus.systematic_name
href = locus.obj_url
keys = []
obj = {
"name": name,
"href": href,
"description": reserved_name.name_description,
"category": "reserved_name",
"keys": keys
}
es.index(
index=INDEX_NAME, doc_type=DOC_TYPE, body=obj, id=str(uuid.uuid4()))
def load_go_id_blacklist(list_filename):
go_id_blacklist = set()
for l in open(list_filename, "r"):
go_id_blacklist.add(l[:-1])
return go_id_blacklist
def index_go_terms():
go_id_blacklist = load_go_id_blacklist("scripts/search/go_id_blacklist.lst")
gos = DBSession.query(Go).all()
print(("Indexing " + str(len(gos) - len(go_id_blacklist)) + " GO terms"))
bulk_data = []
for go in gos:
if go.goid in go_id_blacklist:
continue
synonyms = DBSession.query(GoAlias.display_name).filter_by(
go_id=go.go_id).all()
references = set([])
gene_ontology_loci = set([])
annotations = DBSession.query(Goannotation).filter_by(
go_id=go.go_id).all()
for annotation in annotations:
if annotation.go_qualifier != "NOT":
gene_ontology_loci.add(annotation.dbentity.display_name)
references.add(annotation.reference.display_name)
numerical_id = go.goid.split(":")[1]
key_values = [
go.goid, "GO:" + str(int(numerical_id)), numerical_id,
str(int(numerical_id))
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
obj = {
"name": go.display_name,
"href": go.obj_url,
"description": go.description,
"synonyms": [s[0] for s in synonyms],
"go_id": go.goid,
"gene_ontology_loci": sorted(list(gene_ontology_loci)),
"number_annotations": len(annotations),
"references": list(references),
"category": go.go_namespace.replace(" ", "_"),
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 800:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data=[]
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_disease_terms():
dos = DBSession.query(Disease).all()
print(("Indexing " + str(len(dos)) + " DO terms"))
bulk_data = []
for do in dos:
synonyms = DBSession.query(DiseaseAlias.display_name).filter_by(disease_id=do.disease_id).all()
references = set([])
disease_loci = set([])
annotations = DBSession.query(Diseaseannotation).filter_by(disease_id=do.disease_id).all()
for annotation in annotations:
if annotation.disease_qualifier != "NOT":
disease_loci.add(annotation.dbentity.display_name)
references.add(annotation.reference.display_name)
if do.doid != 'derives_from':
numerical_id = do.doid.split(":")[1]
key_values = [
do.doid, "DO:" + str(int(numerical_id)), numerical_id,
str(int(numerical_id))
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
obj = {
"name": do.display_name,
"category": "disease",
"href": do.obj_url,
"description": do.description,
"synonyms": [s[0] for s in synonyms],
"doid": do.doid,
"disease_loci": sorted(list(disease_loci)),
"number_annotations": len(annotations),
"references": list(references),
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 800:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data=[]
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_references():
_ref_loci = IndexESHelper.get_dbentity_locus_note()
_references = DBSession.query(Referencedbentity).all()
_abstracts = IndexESHelper.get_ref_abstracts()
_authors = IndexESHelper.get_ref_authors()
_aliases = IndexESHelper.get_ref_aliases()
bulk_data = []
print(("Indexing " + str(len(_references)) + " references"))
for reference in _references:
reference_loci = []
if len(_ref_loci) > 0:
temp_loci = _ref_loci.get(reference.dbentity_id)
if temp_loci is not None:
reference_loci = list(set([x.display_name for x in IndexESHelper.flattern_list(temp_loci)]))
abstract = _abstracts.get(reference.dbentity_id)
if abstract is not None:
abstract = abstract[0]
sec_sgdids = _aliases.get(reference.dbentity_id)
sec_sgdid = None
authors = _authors.get(reference.dbentity_id)
if sec_sgdids is not None:
sec_sgdid = sec_sgdids[0]
if authors is None:
authors = []
journal = reference.journal
if journal:
journal = journal.display_name
key_values = [
reference.pmcid, reference.pmid, "pmid: " + str(reference.pmid),
"pmid:" + str(reference.pmid), "pmid " + str(reference.pmid),
reference.sgdid
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(str(k).lower())
obj = {
"name": reference.citation,
"href": reference.obj_url,
"description": abstract,
"author": authors,
"journal": journal,
"year": str(reference.year),
"reference_loci": reference_loci,
"secondary_sgdid": sec_sgdid,
"category": "reference",
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 1000:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_chemicals():
all_chebi_data = DBSession.query(Chebi).all()
_result = IndexESHelper.get_chebi_annotations(all_chebi_data)
bulk_data = []
print(("Indexing " + str(len(all_chebi_data)) + " chemicals"))
for item_key, item_v in list(_result.items()):
if item_v is not None:
obj = {
"name": item_v.display_name,
"href": item_v.obj_url,
"description": item_v.description,
"category": "chemical",
"keys": [],
"chebiid": item_v.chebiid
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": "chemical_" + str(item_key)
}
})
bulk_data.append(obj)
if len(bulk_data) == 300:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def cleanup():
delete_mapping()
put_mapping()
def setup():
# see if index exists, if not create it
indices = list(es.indices.get_aliases().keys())
index_exists = INDEX_NAME in indices
if not index_exists:
put_mapping()
def index_not_mapped_genes():
url = "https://downloads.yeastgenome.org/curation/literature/genetic_loci.tab"
bulk_data = []
with open("./scripts/search/not_mapped.json",
"r") as json_data:
_data = json.load(json_data)
print(("indexing " + str(len(_data)) + " not physically mapped genes"))
for item in _data:
temp_aliases = []
if len(item["FEATURE_NAME"]) > 0:
obj = {
"name": item["FEATURE_NAME"],
"href": url,
"category": "locus",
"feature_type": ["Unmapped Genetic Loci"],
"aliases": item["ALIASES"].split("|"),
"description": item["DESCRIPTION"],
"is_quick_flag": "False"
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 300:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_downloads():
bulk_data = []
dbentity_file_obj = IndexESHelper.get_file_dbentity_keyword()
files = DBSession.query(Filedbentity).filter(Filedbentity.is_public == True,
Filedbentity.s3_url != None).all()
print(("indexing " + str(len(files)) + " download files"))
for x in files:
try:
keyword = []
status = ""
temp = dbentity_file_obj.get(x.dbentity_id)
if temp:
keyword = temp
if (x.dbentity_status == "Active" or x.dbentity_status == "Archived"):
if x.dbentity_status == "Active":
status = "Active"
else:
status = "Archived"
obj = {
""
"name":
x.display_name,
"href": x.s3_url if x else None,
"category":
"download",
"description":
x.description,
"keyword":
keyword,
"format":
str(x.format.display_name),
"status":
str(status),
"file_size":
str(IndexESHelper.convertBytes(x.file_size))
if x.file_size is not None else x.file_size,
"year":
str(x.year),
"readme_url": x.readme_file.s3_url if x.readme_file else None,
"topic": x.topic.display_name,
"data": x.data.display_name,
"path_id": x.get_path_id()
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 50:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data = []
except Exception as e:
logging.error(e.message)
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_complex_names():
complexes = DBSession.query(Complexdbentity).all()
print(("Indexing " + str(len(complexes)) + " complex names"))
bulk_data = []
for c in complexes:
synonyms = DBSession.query(ComplexAlias.display_name).filter_by(complex_id=c.dbentity_id).all()
references = set([])
refs = DBSession.query(ComplexReference).filter_by(complex_id=c.dbentity_id).all()
for ref in refs:
references.add(ref.reference.display_name)
complex_loci = set([])
annotations = DBSession.query(Complexbindingannotation).filter_by(complex_id=c.dbentity_id).all()
for a in annotations:
interactor = a.interactor
if interactor.locus_id is not None:
complex_loci.add(interactor.locus.display_name)
key_values = [
c.intact_id, c.complex_accession, c.sgdid
]
keys = set([])
for k in key_values:
if k is not None:
keys.add(k.lower())
obj = {
"name": c.display_name,
"href": "/complex/" + c.complex_accession,
"description": c.description + "; " + c.properties,
"category": "complex",
"synonyms": [s[0] for s in synonyms],
"systematic_name": c.systematic_name,
"intact_id": c.intact_id,
"complex_accession": c.complex_accession,
"complex_loci": sorted(list(complex_loci)),
"references": list(references),
"keys": list(keys)
}
bulk_data.append({
"index": {
"_index": INDEX_NAME,
"_type": DOC_TYPE,
"_id": str(uuid.uuid4())
}
})
bulk_data.append(obj)
if len(bulk_data) == 800:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
bulk_data=[]
if len(bulk_data) > 0:
es.bulk(index=INDEX_NAME, body=bulk_data, refresh=True)
def index_part_1():
index_phenotypes()
index_downloads()
index_not_mapped_genes()
index_genes()
index_strains()
index_colleagues()
index_chemicals()
def index_part_2():
index_reserved_names()
index_toolbar_links()
index_observables()
index_go_terms()
index_disease_terms()
index_complex_names()
index_references()
if __name__ == "__main__":
'''
To run multi-processing add this:
with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
index_references()
'''
cleanup()
setup()
t1 = Thread(target=index_part_1)
t2 = Thread(target=index_part_2)
t1.start()
t2.start()
| StarcoderdataPython |
1702794 | # Copyright (c) 2016 Jiocloud.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from dss_op import *
from dss_auth import *
from jcsclient import utils
import os
import sys
import time
import hmac
import json
import base64
import requests
import exceptions
from email.utils import formatdate
import xml.sax
class BucketOp(DSSOp):
def __init__(self):
DSSOp.__init__(self)
def parse_args(self, options):
self.dss_op_path = '/' + self.bucket_name
def validate_args(self):
pass
def execute(self):
resp = self.make_request()
return resp
def process_result(self, result, response_json=None):
if result is not None:
status = result.status_code
if status != 200 and status != 204:
response_json = {"headers": result.headers, "status_code": result.status_code,
"status_message": result.reason, "error_message": result.text}
else:
response_json = {"headers": result.headers, "status_code": result.status_code,
"status_message": result.reason, "content": result.content}
else:
response_json = {"status_code": "500", "error_message": "Connection not established"}
return response_json
class ListBucketsOp(BucketOp):
def __init__(self):
BucketOp.__init__(self)
self.dss_op_path = '/'
self.http_method = 'GET'
def parse_args(self, options):
pass
class CreateBucketOp(BucketOp):
def __init__(self, name):
BucketOp.__init__(self)
self.http_method = 'PUT'
self.bucket_name = name
class DeleteBucketOp(BucketOp):
def __init__(self, name):
BucketOp.__init__(self)
self.bucket_name = name
self.http_method = 'DELETE'
class HeadBucketOp(BucketOp):
def __init__(self, name):
DSSOp.__init__(self)
self.http_method = 'HEAD'
self.bucket_name = name
class ListObjectsOp(BucketOp):
def __init__(self, name):
DSSOp.__init__(self)
self.http_method = 'GET'
self.bucket_name = name
def parse_args(self, args_dict):
params = {}
is_query_params_set = False
self.dss_query_str = ''
self.dss_op_path = '/' + self.bucket_name
if (args_dict is None):
return
if(args_dict['prefix'] is not None):
self.dss_query_str = 'prefix=' + args_dict['prefix']
is_query_params_set = True
if(args_dict['marker'] is not None):
if(not is_query_params_set):
self.dss_query_str += 'marker=' + args_dict['marker']
is_query_params_set = True
else:
self.dss_query_str += '&marker=' + args_dict['marker']
if(args_dict['max-keys'] is not None):
if(not is_query_params_set):
self.dss_query_str += 'max-keys=' + args_dict['max-keys']
is_query_params_set = True
else:
self.dss_query_str += '&max-keys=' + args_dict['max-keys']
if(args_dict['delimiter'] is not None):
if(not is_query_params_set):
self.dss_query_str += 'delimiter=' + args_dict['delimiter']
is_query_params_set = True
else:
self.dss_query_str += '&delimiter=' + args_dict['delimiter']
if(self.dss_query_str == ''):
self.dss_query_str = None
class ListMPUploadsOp(BucketOp):
def __init__(self, buckname):
BucketOp.__init__(self)
self.http_method = 'GET'
self.dss_query_str_for_signature = 'uploads'
self.dss_query_str = 'uploads'
self.bucket_name = buckname
| StarcoderdataPython |
138749 | # Python - 3.6.0
Test.assert_equals(make_negative(42), -42)
| StarcoderdataPython |
11203717 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import numpy as np
import os
# import dependencies
import time
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
#Pytorch requirements
import unicodedata
import string
import re
import random
import argparse
import math
from subprocess import run, PIPE
import torch
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
if torch.cuda.is_available():
dtype = torch.cuda.FloatTensor
dtype_l = torch.cuda.LongTensor
torch.cuda.manual_seed(0)
else:
dtype = torch.FloatTensor
dtype_l = torch.LongTensor
torch.manual_seed(0)
class Generator():
def __init__(self, path_dataset, num_examples_train, num_examples_test, N, clusters, dim):
self.path_dataset = path_dataset
self.num_examples_train = num_examples_train
self.num_examples_test = num_examples_test
self.data_train = [] # 每一个元素是一个dict 带有 key "points" 和 "targets" 每个key对应一个numpy数组
self.data_test = []
self.N = N # 每个data sample里有几个点
self.clusters = clusters # 每个data sample里有几个聚类中心 每个聚类中心生成了 N // clusters 个点
self.dim = dim # 点向量的维度
def gaussian_example(self, N, clusters):
centers = np.random.uniform(0, 1, [clusters, self.dim])
per_cl = N // clusters
Pts = []
cov = 0.001 * np.eye(self.dim, self.dim)
target = np.zeros([N])
for c in range(clusters):
points = np.random.multivariate_normal(centers[c], cov, per_cl)
target[c * per_cl: (c + 1) * per_cl] = c
Pts.append(points)
points = np.reshape(Pts, [-1, self.dim])
rand_perm = np.random.permutation(N)
points = points[rand_perm]
target = target[rand_perm]
return points, target
def uniform_example(self, N):
points = np.random.uniform(0, 1, 2*N)
points = np.reshape(points, (2, N))
target = np.zeros(N)
return points, target
def compute_example(self):
example = {}
if self.clusters > 0:
points, target = self.gaussian_example(self.N, self.clusters)
else:
points, target = self.uniform_example(self.N)
example['points'] = points
example['target'] = target
return example
def create_dataset_train(self):
for i in range(self.num_examples_train):
example = self.compute_example()
self.data_train.append(example)
if i % 100 == 0:
print('Train example {} of length {} computed.'
.format(i, self.N))
def create_dataset_test(self):
for i in range(self.num_examples_test):
example = self.compute_example()
self.data_test.append(example)
if i % 100 == 0:
print('Test example {} of length {} computed.'
.format(i, self.N))
def load_dataset(self):
"""
载入数据集(若无则构造)并存储在self.data_train和self.data_test中
:return:
"""
# load train dataset
filename = 'KMEANS_GM{}_clusters{}_dim{}_train.np'.format(self.N, self.clusters, self.dim)
path = os.path.join(self.path_dataset, filename)
if os.path.exists(path):
print('Reading training dataset at {}'.format(path))
self.data_train = np.load(open(path, 'rb'), allow_pickle=True)
else:
print('Creating training dataset.')
self.create_dataset_train()
print('Saving training datatset at {}'.format(path))
np.save(open(path, 'wb'), self.data_train)
# load test dataset
filename = 'KMEANS_GM{}_clusters{}_dim{}_test.np'.format(self.N, self.clusters, self.dim)
path = os.path.join(self.path_dataset, filename)
if os.path.exists(path):
print('Reading testing dataset at {}'.format(path))
self.data_test = np.load(open(path, 'rb'), allow_pickle=True)
else:
print('Creating testing dataset.')
self.create_dataset_test()
print('Saving testing datatset at {}'.format(path))
np.save(open(path, 'wb'), self.data_test)
def sample_batch(self, num_samples, is_training=True, it=0, cuda=True, volatile=False):
points = torch.zeros(num_samples, self.N, self.dim)
target = torch.zeros(num_samples, self.N)
if is_training:
dataset = self.data_train
else:
dataset = self.data_test
for b in range(num_samples):
if is_training:
# random element in the dataset
ind = np.random.randint(0, len(dataset))
else:
ind = it * num_samples + b
points[b] = torch.from_numpy(dataset[ind]['points'])
target[b] = torch.from_numpy(dataset[ind]['target'])
# wrap as variables
points = Variable(points, volatile=volatile)
target = Variable(target, volatile=volatile)
if cuda:
return points.cuda(), target.cuda()
else:
return points, target
if __name__ == '__main__':
# Test Generator module
gen = Generator('/data/folque/dataset/', 20000, 1000, 50, 5, 2)
gen.load_dataset()
print(gen.sample_batch(1))
| StarcoderdataPython |
204793 | <reponame>pablintino/Altium-DBlib-source<filename>sources/app/routes.py
#
# MIT License
#
# Copyright (c) 2020 <NAME>, @pablintino
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from app import api
from rest_layer.component_list_resource import ComponentListResource
from rest_layer.component_resource import ComponentResource
from rest_layer.footprint_component_reference_resource import FootprintComponentReferenceResource
from rest_layer.footprint_data_resource import FootprintDataResource
from rest_layer.footprint_element_component_reference_resource import FootprintElementComponentReferenceResource
from rest_layer.footprint_list_resource import FootprintListResource
from rest_layer.footprint_resource import FootprintResource
from rest_layer.inventory.inventory_category_item_list_resource import InventoryCategoryItemListResource
from rest_layer.inventory.inventory_category_list_resource import InventoryCategoryListResource
from rest_layer.inventory.inventory_category_parent_resource import InventoryCategoryParentResource
from rest_layer.inventory.inventory_category_resource import InventoryCategoryResource
from rest_layer.inventory.inventory_item_category_resource import InventoryItemCategoryResource
from rest_layer.inventory.inventory_item_list_resource import InventoryItemListResource
from rest_layer.inventory.inventory_item_location_resource import InventoryItemLocationResource
from rest_layer.inventory.inventory_item_property_element_resource import InventoryItemPropertyElementResource
from rest_layer.inventory.inventory_item_property_list_resource import InventoryItemPropertyListResource
from rest_layer.inventory.inventory_item_resource import InventoryItemResource
from rest_layer.inventory.inventory_item_stock_location_resource import InventoryItemStockLocationResource
from rest_layer.inventory.inventory_location_list_resource import InventoryLocationListResource
from rest_layer.inventory.inventory_location_resource import InventoryLocationResource
from rest_layer.inventory.inventory_stocks_mass_update_resource import InventoryStocksMassUpdateResource
from rest_layer.metadata_api import MetadataResource
from rest_layer.symbol_component_reference_resource import SymbolComponentReferenceResource
from rest_layer.symbol_data_resource import SymbolDataResource
from rest_layer.symbol_list_resource import SymbolListResource
from rest_layer.symbol_resource import SymbolResource
api.add_resource(MetadataResource, '/metadata')
api.add_resource(ComponentListResource, '/components')
api.add_resource(ComponentResource, '/components/<int:id>')
api.add_resource(SymbolComponentReferenceResource, '/components/<int:id>/symbol')
api.add_resource(FootprintComponentReferenceResource, '/components/<int:id>/footprints')
api.add_resource(FootprintElementComponentReferenceResource, '/components/<int:id>/footprints/<int:id_f>')
api.add_resource(SymbolListResource, '/symbols')
api.add_resource(SymbolResource, '/symbols/<int:id>')
api.add_resource(SymbolDataResource, '/symbols/<int:id>/data')
api.add_resource(FootprintListResource, '/footprints')
api.add_resource(FootprintResource, '/footprints/<int:id>')
api.add_resource(FootprintDataResource, '/footprints/<int:id>/data')
# Items endpoints
api.add_resource(InventoryItemListResource, '/inventory/items')
api.add_resource(InventoryItemResource, '/inventory/items/<int:id>')
api.add_resource(InventoryItemLocationResource, '/inventory/items/<int:id>/locations')
api.add_resource(InventoryItemPropertyListResource, '/inventory/items/<int:id>/properties')
api.add_resource(InventoryItemPropertyElementResource, '/inventory/items/<int:id>/properties/<int:prop_id>')
api.add_resource(InventoryItemStockLocationResource, '/inventory/items/<int:id>/locations/<int:id_loc>/stock')
api.add_resource(InventoryItemCategoryResource, '/inventory/items/<int:id>/category')
# Locations endpoints
api.add_resource(InventoryLocationListResource, '/inventory/locations')
api.add_resource(InventoryLocationResource, '/inventory/locations/<int:id>')
# Stock management endpoints
api.add_resource(InventoryStocksMassUpdateResource, '/inventory/stocks/updates')
# Categories endpoints
api.add_resource(InventoryCategoryListResource, '/inventory/categories')
api.add_resource(InventoryCategoryResource, '/inventory/categories/<int:id>')
api.add_resource(InventoryCategoryItemListResource, '/inventory/categories/<int:id>/items')
api.add_resource(InventoryCategoryParentResource, '/inventory/categories/<int:id>/parent')
| StarcoderdataPython |
291781 | <gh_stars>0
from enum import Enum
from rest_framework.pagination import PageNumberPagination
def is_authenticated(request):
""" whether or not request user is authenticated or not """
return request.user and request.user.is_authenticated
class DynamicPagination(PageNumberPagination):
""" pagination class for returning paginated response """
page_size = 25
page_query_param = 'page'
page_size_query_param = 'page_size'
max_page_size = 1000
class ChoiceBase(Enum):
""" base class for model choice fields as Enum """
@classmethod
def to_list(cls):
""" return a list of (name, value) which can be used as choice field in models"""
return [(d.value, d.name) for d in cls]
class AuditVerbEnum(ChoiceBase):
CREATED = 'C'
UPDATED = 'U'
DELETED = 'D'
def backoff(attempts):
"""Return a backoff delay, in seconds, given a number of attempts.
The delay increases very rapidly with the number of attempts:
1, 2, 4, 8, 16, 32, ... seconds
"""
return 2 ** attempts
| StarcoderdataPython |
11245080 | import functools
import numpy as np
import pandas as pd
import tensorflow as tf
# from tensorflow.keras import utils
#
# TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
# TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
# train_file_path = utils.get_file("train.csv", TRAIN_DATA_URL)
# test_file_path = utils.get_file("eval.csv", TEST_DATA_URL)
# print("downloaded path : ", train_file_path, test_file_path)
data_path = "../../../data"
train_file_path = "%s/titanic/train.csv" % data_path
test_file_path = "%s/titanic/eval.csv" % data_path
LABEL_COLUMN = 'survived'
LABELS = [0, 1]
def get_dataset(file_path, **kwargs):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size=5, # Artificially small to make examples easier to show.
label_name=LABEL_COLUMN,
na_value="?",
num_epochs=1,
ignore_errors=True,
**kwargs)
return dataset
raw_train_data = get_dataset(train_file_path)
raw_test_data = get_dataset(test_file_path)
def show_batch(dataset):
for batch, label in dataset.take(1):
for key, value in batch.items():
print("{:20s}: {}".format(key, value.numpy()))
show_batch(raw_train_data)
CSV_COLUMNS = ['survived', 'sex', 'age', 'n_siblings_spouses', 'parch', 'fare', 'class', 'deck', 'embark_town', 'alone']
temp_dataset = get_dataset(train_file_path, column_names=CSV_COLUMNS)
show_batch(temp_dataset)
SELECT_COLUMNS = ['survived', 'age', 'n_siblings_spouses', 'parch', 'fare']
DEFAULTS = [0, 0.0, 0.0, 0.0, 0.0]
temp_dataset = get_dataset(train_file_path,
select_columns=SELECT_COLUMNS,
column_defaults=DEFAULTS)
show_batch(temp_dataset)
example_batch, labels_batch = next(iter(temp_dataset))
def pack(features, label):
return tf.stack(list(features.values()), axis=-1), label
packed_dataset = temp_dataset.map(pack)
for features, labels in packed_dataset.take(1):
print(features.numpy())
print()
print(labels.numpy())
class PackNumericFeatures(object):
def __init__(self, names):
self.names = names
def __call__(self, features, labels):
numeric_freatures = [features.pop(name) for name in self.names]
numeric_features = [tf.cast(feat, tf.float32) for feat in numeric_freatures]
numeric_features = tf.stack(numeric_features, axis=-1)
features['numeric'] = numeric_features
return features, labels
NUMERIC_FEATURES = ['age', 'n_siblings_spouses', 'parch', 'fare']
packed_train_data = raw_train_data.map(
PackNumericFeatures(NUMERIC_FEATURES))
packed_test_data = raw_test_data.map(
PackNumericFeatures(NUMERIC_FEATURES))
show_batch(packed_train_data)
example_batch, labels_batch = next(iter(packed_train_data))
desc = pd.read_csv(train_file_path)[NUMERIC_FEATURES].describe()
MEAN = np.array(desc.T['mean'])
STD = np.array(desc.T['std'])
def normalize_numeric_data(data, mean, std):
# Center the data
return (data - mean) / std
# See what you just created.
normalizer = functools.partial(normalize_numeric_data, mean=MEAN, std=STD)
numeric_column = tf.feature_column.numeric_column('numeric', normalizer_fn=normalizer, shape=[len(NUMERIC_FEATURES)])
numeric_columns = [numeric_column]
numeric_layer = tf.keras.layers.DenseFeatures(numeric_columns)
numeric_layer(example_batch).numpy()
CATEGORIES = {
'sex': ['male', 'female'],
'class': ['First', 'Second', 'Third'],
'deck': ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'],
'embark_town': ['Cherbourg', 'Southhampton', 'Queenstown'],
'alone': ['y', 'n']
}
categorical_columns = []
for feature, vocab in CATEGORIES.items():
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=feature, vocabulary_list=vocab)
categorical_columns.append(tf.feature_column.indicator_column(cat_col))
categorical_layer = tf.keras.layers.DenseFeatures(categorical_columns)
print(categorical_layer(example_batch).numpy()[0])
preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns + numeric_columns)
print(preprocessing_layer(example_batch).numpy()[0])
model = tf.keras.Sequential([
preprocessing_layer,
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_data = packed_train_data.shuffle(500)
test_data = packed_test_data
model.fit(train_data, epochs=20)
test_loss, test_accuracy = model.evaluate(test_data)
print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))
predictions = model.predict(test_data)
# Show some results
for prediction, survived in zip(predictions[:10], list(test_data)[0][1][:10]):
print("Predicted survival: {:.2%}".format(prediction[0]),
" | Actual outcome: ",
("SURVIVED" if bool(survived) else "DIED"))
| StarcoderdataPython |
3361722 | from multiprocessing import Queue, Pool
from queue import PriorityQueue
import config
from core.detect import *
def video():
input_q = Queue(maxsize=config.m_queue_size)
output_q = Queue(maxsize=config.m_queue_size)
output_pq = PriorityQueue(maxsize=3 * config.m_queue_size)
pool = Pool(config.m_pool_size, worker, (input_q, output_q))
vs = cv2.VideoCapture(config.input_video_path)
out = cv2.VideoWriter(config.output_video_path, cv2.VideoWriter_fourcc(*'XVID'), vs.get(cv2.CAP_PROP_FPS),
(int(vs.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vs.get(cv2.CAP_PROP_FRAME_HEIGHT))))
countReadFrame = 0
countWriteFrame = 1
nFrame = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))
firstReadFrame = True
firstTreatedFrame = True
firstUsedFrame = True
while True:
if not input_q.full():
ret, frame = vs.read()
if ret:
input_q.put((int(vs.get(cv2.CAP_PROP_POS_FRAMES)), frame))
countReadFrame = countReadFrame + 1
if firstReadFrame:
print(" --> Reading first frames from input file. Feeding input queue.\n")
firstReadFrame = False
if not output_q.empty():
output_pq.put(output_q.get())
if firstTreatedFrame:
print(" --> Recovering the first treated frame.\n")
firstTreatedFrame = False
if not output_pq.empty():
prior, output_frame = output_pq.get()
if prior > countWriteFrame:
output_pq.put((prior, output_frame))
else:
countWriteFrame = countWriteFrame + 1
output_rgb = cv2.cvtColor(output_frame, cv2.COLOR_RGB2BGR)
out.write(output_rgb)
cv2.imshow('frame', output_rgb)
if firstUsedFrame:
print(" --> Start using recovered frame (displaying and/or writing).\n")
firstUsedFrame = False
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print("Read frames: %-3i %% -- Write frame: %-3i %%" % (
int(countReadFrame / nFrame * 100), int(countWriteFrame / nFrame * 100)), end='\r')
if (not ret) & input_q.empty() & output_q.empty() & output_pq.empty():
break
print(
"\nFile have been successfully read and treated:\n --> {}/{} read frames \n --> {}/{} write frames \n".format(
countReadFrame, nFrame, countWriteFrame - 1, nFrame))
pool.terminate()
vs.release()
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
video()
| StarcoderdataPython |
15290 | from collections import defaultdict, namedtuple
import torch
# When using the sliding window trick for long sequences,
# we take the representation of each token with maximal context.
# Take average of the BERT embeddings of these BPE sub-tokens
# as the embedding for the word.
# Take *weighted* average of the word embeddings through all layers.
def extract_bert_ques_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, turn_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, turn_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = []
for t, para_feature in enumerate(ex_feature): # Turn
para_token_count = defaultdict(int)
for j, chunk_feature in enumerate(para_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, t, doc_word_idx] += all_encoder_layers[:, i, t, j, k]
para_token_count[doc_word_idx] += 1
ex_token_count.append(para_token_count)
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for t, para_token_count in enumerate(ex_token_count):
for doc_word_idx, count in para_token_count.items():
out_features[:, i, t, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def extract_bert_ctx_hidden_states(all_encoder_layers, max_doc_len, features, weighted_avg=False):
num_layers, batch_size, num_chunk, max_token_len, bert_dim = all_encoder_layers.shape
out_features = torch.Tensor(num_layers, batch_size, max_doc_len, bert_dim).fill_(0)
device = all_encoder_layers.get_device() if all_encoder_layers.is_cuda else None
if device is not None:
out_features = out_features.to(device)
token_count = []
# Map BERT tokens to doc words
for i, ex_feature in enumerate(features): # Example
ex_token_count = defaultdict(int)
for j, chunk_feature in enumerate(ex_feature): # Chunk
for k in chunk_feature.token_is_max_context: # Token
if chunk_feature.token_is_max_context[k]:
doc_word_idx = chunk_feature.token_to_orig_map[k]
out_features[:, i, doc_word_idx] += all_encoder_layers[:, i, j, k]
ex_token_count[doc_word_idx] += 1
token_count.append(ex_token_count)
for i, ex_token_count in enumerate(token_count):
for doc_word_idx, count in ex_token_count.items():
out_features[:, i, doc_word_idx] /= count
# Average through all layers
if not weighted_avg:
out_features = torch.mean(out_features, 0)
return out_features
def convert_text_to_bert_features(text, bert_tokenizer, max_seq_length, doc_stride):
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
tok_to_orig_index = []
all_doc_tokens = []
for (i, token) in enumerate(text):
sub_tokens = bert_tokenizer.wordpiece_tokenizer.tokenize(token.lower())
for sub_ in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_)
# The -2 accounts for [CLS] and [SEP]
max_tokens_for_doc = max_seq_length - 2
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
out_features = []
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = bert_tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
feature = BertInputFeatures(
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids)
out_features.append(feature)
return out_features
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class BertInputFeatures(object):
"""A single set of BERT features of data."""
def __init__(self,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids):
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
| StarcoderdataPython |
3379012 | from .version_requirements import is_installed
has_mpl = is_installed("matplotlib", ">=3.0.3")
| StarcoderdataPython |
8115781 | import uuid
from django.contrib.auth.models import AbstractUser
from django.core.exceptions import ValidationError
from django.db import models
class Naan(models.Model):
naan = models.PositiveBigIntegerField(primary_key=True)
name = models.CharField(max_length=200)
description = models.TextField()
url = models.URLField()
def __str__(self):
return f"{self.name} - {self.naan}"
class User(AbstractUser):
naan = models.ForeignKey(Naan, on_delete=models.PROTECT, null=True)
def __str__(self):
return self.username
class Key(models.Model):
key = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
naan = models.ForeignKey(Naan, on_delete=models.CASCADE)
active = models.BooleanField()
def __str__(self):
return f"Key-{self.naan.naan}-{self.key.hex[:8]}..."
class Shoulder(models.Model):
shoulder = models.CharField(max_length=50)
naan = models.ForeignKey(Naan, on_delete=models.DO_NOTHING)
name = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return f"{self.naan.naan}{self.shoulder}"
class Ark(models.Model):
ark = models.CharField(primary_key=True, max_length=200, editable=False)
naan = models.ForeignKey(Naan, on_delete=models.DO_NOTHING, editable=False)
shoulder = models.CharField(max_length=50, editable=False)
assigned_name = models.CharField(max_length=100, editable=False)
url = models.URLField()
metadata = models.TextField()
commitment = models.TextField()
def clean(self):
expected_ark = f"{self.naan.naan}{self.shoulder}{self.assigned_name}"
if self.ark != expected_ark:
raise ValidationError(f"expected {expected_ark} got {self.ark}")
def __str__(self):
return f"ark:/{self.ark}"
| StarcoderdataPython |
3207620 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['GlobalNetworkEndpointArgs', 'GlobalNetworkEndpoint']
@pulumi.input_type
class GlobalNetworkEndpointArgs:
def __init__(__self__, *,
global_network_endpoint_group: pulumi.Input[str],
port: pulumi.Input[int],
fqdn: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a GlobalNetworkEndpoint resource.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
pulumi.set(__self__, "port", port)
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> pulumi.Input[str]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: pulumi.Input[str]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter
def port(self) -> pulumi.Input[int]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input[int]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _GlobalNetworkEndpointState:
def __init__(__self__, *,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering GlobalNetworkEndpoint resources.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if global_network_endpoint_group is not None:
pulumi.set(__self__, "global_network_endpoint_group", global_network_endpoint_group)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if port is not None:
pulumi.set(__self__, "port", port)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def fqdn(self) -> Optional[pulumi.Input[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@fqdn.setter
def fqdn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fqdn", value)
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> Optional[pulumi.Input[str]]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@global_network_endpoint_group.setter
def global_network_endpoint_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "global_network_endpoint_group", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class GlobalNetworkEndpoint(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: GlobalNetworkEndpointArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Global Network endpoint represents a IP address and port combination that exists outside of GCP.
**NOTE**: Global network endpoints cannot be created outside of a
global network endpoint group.
To get more information about GlobalNetworkEndpoint, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups)
* How-to Guides
* [Official Documentation](https://cloud.google.com/load-balancing/docs/negs/)
## Example Usage
### Global Network Endpoint
```python
import pulumi
import pulumi_gcp as gcp
neg = gcp.compute.GlobalNetworkEndpointGroup("neg",
default_port=90,
network_endpoint_type="INTERNET_FQDN_PORT")
default_endpoint = gcp.compute.GlobalNetworkEndpoint("default-endpoint",
global_network_endpoint_group=neg.name,
fqdn="www.example.com",
port=90)
```
## Import
GlobalNetworkEndpoint can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
```sh
$ pulumi import gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint default {{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}
```
:param str resource_name: The name of the resource.
:param GlobalNetworkEndpointArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(GlobalNetworkEndpointArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = GlobalNetworkEndpointArgs.__new__(GlobalNetworkEndpointArgs)
__props__.__dict__["fqdn"] = fqdn
if global_network_endpoint_group is None and not opts.urn:
raise TypeError("Missing required property 'global_network_endpoint_group'")
__props__.__dict__["global_network_endpoint_group"] = global_network_endpoint_group
__props__.__dict__["ip_address"] = ip_address
if port is None and not opts.urn:
raise TypeError("Missing required property 'port'")
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
super(GlobalNetworkEndpoint, __self__).__init__(
'gcp:compute/globalNetworkEndpoint:GlobalNetworkEndpoint',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
fqdn: Optional[pulumi.Input[str]] = None,
global_network_endpoint_group: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None) -> 'GlobalNetworkEndpoint':
"""
Get an existing GlobalNetworkEndpoint resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] fqdn: Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
:param pulumi.Input[str] global_network_endpoint_group: The global network endpoint group this endpoint is part of.
:param pulumi.Input[str] ip_address: IPv4 address external endpoint.
:param pulumi.Input[int] port: Port number of the external endpoint.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _GlobalNetworkEndpointState.__new__(_GlobalNetworkEndpointState)
__props__.__dict__["fqdn"] = fqdn
__props__.__dict__["global_network_endpoint_group"] = global_network_endpoint_group
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["port"] = port
__props__.__dict__["project"] = project
return GlobalNetworkEndpoint(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def fqdn(self) -> pulumi.Output[Optional[str]]:
"""
Fully qualified domain name of network endpoint.
This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="globalNetworkEndpointGroup")
def global_network_endpoint_group(self) -> pulumi.Output[str]:
"""
The global network endpoint group this endpoint is part of.
"""
return pulumi.get(self, "global_network_endpoint_group")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
IPv4 address external endpoint.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
Port number of the external endpoint.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
| StarcoderdataPython |
6590595 | """ user administration
"""
from udinosaur.player import Player
def login(name="无名"):
player = Player(name)
return player
| StarcoderdataPython |
1705357 | #!/usr/bin/env python3
# Payload Encoder With Different Encoders
# Author <NAME>
import base64
import re
import sys
import string
import binascii
import urllib.parse
from colorama import Fore, Back, Style
print ("Payload Encoders")
print ("")
z = input("Eenter a Payload: ")
print ("")
payload = z
print (Fore.CYAN + " Custom Encode")
print ("")
print (" 1. B64")
print (" 2. Hex")
print (" 3. URL_encode")
print (" 4. HTML Entities")
print (" 5. Hex With Semi Coloumns")
print ("")
choose = input("Choose your Encode ")
choose = int(choose)
if choose > 5:
print ("Worng Choice!")
sys.exit()
#Encode Payload use of Base64#
if choose == 1:
print("")
encoded = base64.standard_b64encode(payload.encode("utf-8"))
print (' ################## B64 String #######################')
print ('')
en1 = encoded
print (Fore.YELLOW + str(en1))
#Encdoe Payload use of HEX#
elif choose == 2:
print ("")
encoded = binascii.b2a_hex(payload.encode("utf-8"))
print (' ################## URL String #######################')
print ('')
en2 = encoded
print (Fore.YELLOW + str(en2))
#Encode payload use of URLEncode#
elif choose == 3:
print ("")
encoded = urllib.parse.quote(payload.encode("utf8"))
print (' ################## URL String #######################')
print ('')
en3 = encoded
print (Fore.YELLOW + str(en3))
doublee = input("Double Encoding? y - n ")
if "y" in doublee:
ddoub = urllib.parse.quote_plus(en3)
print (Fore.BLUE + str(ddoub))
#Encode with HexSemi()
elif choose == 5:
print ("")
x = ('')
for i in payload:
x += "&#x"+hex(ord(i))[2:]+";"
print (x)
print (' ################## Hex With Semi #######################')
print ('')
en55 = x
print (Fore.YELLOW + str(en55))
#Encode Payload use of HTML Entities#
elif choose == 4:
print ("")
print (" 1. ()")
print (" 2. all")
print ("")
go = input(" Choose your Encode ")
go = int(go)
#HTML encode single & Double Quotes#
if go == 1:
new5 = (payload.replace("(", "(").replace(")", ")"))
get3 = new5
print (Fore.YELLOW + str(get3))
#HTML encode of <>#
elif go == 2:
nn = (payload.replace("<", "<").replace(">", ">").replace("(", "(").replace(")", ")").replace('"', '"').replace("'", '''))
get4 = nn
print (Fore.YELLOW + str(get4))
else:
print (" Try Again") | StarcoderdataPython |
8048841 | <reponame>hieast/sentry
from __future__ import absolute_import
from mock import Mock
import responses
from django.http import HttpRequest
from sentry.identity.vsts.provider import VSTSOAuth2CallbackView, AccountConfigView, AccountForm
from sentry.testutils import TestCase
from six.moves.urllib.parse import parse_qs
class TestVSTSOAuthCallbackView(TestCase):
@responses.activate
def test_exchange_token(self):
def redirect_url():
return 'https://app.vssps.visualstudio.com/oauth2/authorize'
view = VSTSOAuth2CallbackView(
access_token_url='https://app.vssps.visualstudio.com/oauth2/token',
client_id='vsts-client-id',
client_secret='vsts-client-secret',
)
request = Mock()
pipeline = Mock()
pipeline.redirect_url = redirect_url
responses.add(
responses.POST, 'https://app.vssps.visualstudio.com/oauth2/token',
json={
'access_token': '<KEY>',
'token_type': 'jwt-bearer',
'expires_in': '3599',
'refresh_token': '<PASSWORD>',
},
)
result = view.exchange_token(request, pipeline, 'oauth-code')
mock_request = responses.calls[0].request
req_params = parse_qs(mock_request.body)
assert req_params['grant_type'] == ['urn:ietf:params:oauth:grant-type:jwt-bearer']
assert req_params['assertion'] == ['oauth-code']
assert req_params['redirect_uri'] == ['https://app.vssps.visualstudio.com/oauth2/authorize']
assert req_params['client_assertion_type'] == [
'urn:ietf:params:oauth:client-assertion-type:jwt-bearer']
assert req_params['client_assertion'] == ['vsts-client-secret']
assert result['access_token'] == 'xxxxxxxxx'
assert result['token_type'] == 'jwt-bearer'
assert result['expires_in'] == '3599'
assert result['refresh_token'] == '<PASSWORD>'
class TestAccountConfigView(TestCase):
def setUp(self):
self.accounts = [
{
'AccountId': '1234567-89',
'NamespaceId': '00000000-0000-0000-0000-000000000000',
'AccountName': 'sentry',
'OrganizationName': None,
'AccountType': 0,
'AccountOwner': '00000000-0000-0000-0000-000000000000',
'CreatedBy': '00000000-0000-0000-0000-000000000000',
'CreatedDate': '0001-01-01T00:00:00',
'AccountStatus': 0,
'StatusReason': None,
'LastUpdatedBy': '00000000-0000-0000-0000-000000000000',
'Properties': {},
},
{
'AccountId': '1234567-8910',
'NamespaceId': '00000000-0000-0000-0000-000000000000',
'AccountName': 'sentry2',
'OrganizationName': None,
'AccountType': 0,
'AccountOwner': '00000000-0000-0000-0000-000000000000',
'CreatedBy': '00000000-0000-0000-0000-000000000000',
'CreatedDate': '0001-01-01T00:00:00',
'AccountStatus': 0,
'StatusReason': None,
'LastUpdatedBy': '00000000-0000-0000-0000-000000000000',
'Properties': {},
},
]
responses.add(
responses.GET,
'https://app.vssps.visualstudio.com/_apis/accounts',
json=self.accounts,
status=200,
)
@responses.activate
def test_dispatch(self):
view = AccountConfigView()
request = HttpRequest()
request.POST = {'account': '1234567-8910'}
pipeline = Mock()
pipeline.state = {'accounts': self.accounts}
pipeline.fetch_state = lambda key: pipeline.state[key]
pipeline.bind_state = lambda name, value: pipeline.state.update({name: value})
view.dispatch(request, pipeline)
assert pipeline.fetch_state(key='instance') == 'sentry2.visualstudio.com'
assert pipeline.fetch_state(key='account') == self.accounts[1]
assert pipeline.next_step.call_count == 1
@responses.activate
def test_get_accounts(self):
view = AccountConfigView()
accounts = view.get_accounts('access-token')
assert accounts[0]['AccountName'] == 'sentry'
assert accounts[1]['AccountName'] == 'sentry2'
def test_account_form(self):
account_form = AccountForm(self.accounts)
assert account_form.fields['account'].choices == [
('1234567-89', 'sentry'), ('1234567-8910', 'sentry2')]
| StarcoderdataPython |
9724077 | <reponame>alexgallego1997/GamestonkTerminal
""" Seeking Alpha View """
__docformat__ = "numpy"
import argparse
from typing import List
import pandas as pd
from gamestonk_terminal.helper_funcs import (
check_positive,
parse_known_args_and_warn,
)
from gamestonk_terminal.discovery import seeking_alpha_model
def earnings_release_dates_view(other_args: List[str]):
"""Prints a data frame with earnings release dates
Parameters
----------
other_args : List[str]
argparse other args - ["-p", "20", "-n", "5"]
"""
parser = argparse.ArgumentParser(
add_help=False,
prog="up_earnings",
description="""Upcoming earnings release dates. [Source: Seeking Alpha]""",
)
parser.add_argument(
"-p",
"--pages",
action="store",
dest="n_pages",
type=check_positive,
default=10,
help="Number of pages to read upcoming earnings from in Seeking Alpha website.",
)
parser.add_argument(
"-n",
"--num",
action="store",
dest="n_num",
type=check_positive,
default=3,
help="Number of upcoming earnings release dates to print",
)
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df_earnings = seeking_alpha_model.get_next_earnings(ns_parser.n_pages)
pd.set_option("display.max_colwidth", None)
for n_days, earning_date in enumerate(df_earnings.index.unique()):
if n_days > (ns_parser.n_num - 1):
break
print(f"Earning Release on {earning_date.date()}")
print("----------------------------------------------")
print(
df_earnings[earning_date == df_earnings.index][
["Ticker", "Name"]
].to_string(index=False, header=False)
)
print("")
| StarcoderdataPython |
81436 | # Copyright (c) 2018 Sony Pictures Imageworks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A frame list based on AbstractTreeWidget
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import map
import time
from PySide2 import QtCore
from PySide2 import QtGui
from PySide2 import QtWidgets
import opencue
import cuegui.AbstractTreeWidget
import cuegui.AbstractWidgetItem
import cuegui.Constants
import cuegui.Logger
import cuegui.MenuActions
import cuegui.Utils
logger = cuegui.Logger.getLogger(__file__)
class ProcMonitorTree(cuegui.AbstractTreeWidget.AbstractTreeWidget):
def __init__(self, parent):
self.startColumnsForType(cuegui.Constants.TYPE_PROC)
self.addColumn("Name", 150, id=1,
data=lambda proc: proc.data.name,
tip="Name of the running proc.")
self.addColumn("Cores", 50, id=2,
data=lambda proc: ("%.2f" % proc.data.reserved_cores),
tip="The number of cores reserved.")
self.addColumn("Mem Reserved", 100, id=3,
data=lambda proc: cuegui.Utils.memoryToString(proc.data.reserved_memory),
tip="The amount of memory reserved.")
self.addColumn("Mem Used", 100, id=4,
data=lambda proc: cuegui.Utils.memoryToString(proc.data.used_memory),
tip="The amount of memory used.")
self.addColumn("GPU Used", 100, id=5,
data=lambda proc: cuegui.Utils.memoryToString(proc.data.reserved_gpu),
tip="The amount of gpu memory used.")
self.addColumn("Age", 60, id=6,
data=lambda proc: cuegui.Utils.secondsToHHHMM(time.time() - proc.data.dispatch_time),
tip="The age of the running frame.")
self.addColumn("Unbooked", 80, id=7,
data=lambda proc: proc.data.unbooked,
tip="If the proc has been unbooked.\n If it is unbooked then"
"when the frame finishes the job will stop using this proc")
self.addColumn("Name", 300, id=8,
data=lambda proc: proc.data.frame_name ,
tip="The name of the proc, includes frame number and layer name.")
self.addColumn("Job", 50, id=9,
data=lambda proc: proc.data.job_name ,
tip="The job that this proc is running on.")
self.procSearch = opencue.search.ProcSearch()
cuegui.AbstractTreeWidget.AbstractTreeWidget.__init__(self, parent)
# Used to build right click context menus
self.__menuActions = cuegui.MenuActions.MenuActions(
self, self.updateSoon, self.selectedObjects)
self.itemClicked.connect(self.__itemSingleClickedCopy)
# Don't use the standard space bar to refresh
QtGui.qApp.request_update.connect(self.updateRequest)
self.startTicksUpdate(40)
# Don't start refreshing until the user sets a filter or hits refresh
self.ticksWithoutUpdate = -1
self.enableRefresh = False
def tick(self):
if self.ticksWithoutUpdate >= self.updateInterval and \
not self.window().isMinimized():
self.ticksWithoutUpdate = 0
self._update()
return
if (self.enableRefresh and
self.updateInterval + 1 >= self.ticksWithoutUpdate >= 0):
self.ticksWithoutUpdate += 1
def facilityChanged(self):
"""Called when the facility is changed and removes then updates the proc
list"""
self.removeAllItems()
self._update()
def __itemSingleClickedCopy(self, item, col):
"""Called when an item is clicked on. Copies selected object names to
the middle click selection clip board.
@type item: QTreeWidgetItem
@param item: The item clicked on
@type col: int
@param col: The column clicked on"""
selected = [proc.data.name for proc in self.selectedObjects() if cuegui.Utils.isProc(proc)]
if selected:
QtWidgets.QApplication.clipboard().setText(",".join(selected))
def clearFilters(self):
self.clearSelection()
self.procSearch = opencue.search.ProcSearch()
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.removeAllItems()
def updateRequest(self):
"""Updates the items in the TreeWidget if sufficient time has passed
since last updated"""
self.ticksWithoutUpdate = 999
def _getUpdate(self):
"""Returns the proper data from the cuebot"""
try:
# Refuse to update if no search criteria is defined
if not self.procSearch.options.get('max_results') and \
not self.procSearch.options.get('host') and \
not self.procSearch.options.get('job') and \
not self.procSearch.options.get('layer') and \
not self.procSearch.options.get('show') and \
not self.procSearch.options.get('alloc') and \
not self.procSearch.options.get('memory_range') and \
not self.procSearch.options.get('durationRange'):
return []
return opencue.api.getProcs(**self.procSearch.options)
except Exception as e:
list(map(logger.warning, cuegui.Utils.exceptionOutput(e)))
return []
def _createItem(self, object, parent = None):
"""Creates and returns the proper item
@type object: Proc
@param object: The object for this item
@type parent: QTreeWidgetItem
@param parent: Optional parent for this item
@rtype: QTreeWidgetItem
@return: The created item"""
if not parent:
parent = self
return ProcWidgetItem(object, parent)
def contextMenuEvent(self, e):
"""When right clicking on an item, this raises a context menu"""
menu = QtWidgets.QMenu()
self.__menuActions.procs().addAction(menu, "view")
self.__menuActions.procs().addAction(menu, "unbook")
self.__menuActions.procs().addAction(menu, "kill")
self.__menuActions.procs().addAction(menu, "unbookKill")
menu.exec_(e.globalPos())
class ProcWidgetItem(cuegui.AbstractWidgetItem.AbstractWidgetItem):
def __init__(self, object, parent):
cuegui.AbstractWidgetItem.AbstractWidgetItem.__init__(
self, cuegui.Constants.TYPE_PROC, object, parent)
| StarcoderdataPython |
5190188 | # coding=utf-8
# Created by OhBonsai at 2018/3/13
def add_fixture(db_session, fixture):
db_session.add(fixture)
db_session.commit()
def add_fixtures(db_session, *fixtures):
db_session.add_all(fixtures)
db_session.commit()
| StarcoderdataPython |
3585535 | #!/usr/bin/env python3
#
# Copyright (C) 2020 Wind River Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import sys
if sys.argv[0].endswith('.real'):
sys.argv[0] = sys.argv[0][:-5]
import glob
from genimage.constant import DEFAULT_MACHINE
def add_path():
basepath = os.path.abspath(os.path.dirname(__file__) + '/../../../../..')
pathlist = "usr/bin/crossscripts usr/bin usr/sbin bin sbin"
for path in pathlist.split():
newpath = os.path.join(basepath, path)
os.environ['PATH'] = newpath + ":" + os.environ['PATH']
if 'OECORE_NATIVE_SYSROOT' not in os.environ:
os.environ['OECORE_NATIVE_SYSROOT'] = basepath
if 'OECORE_TARGET_SYSROOT' not in os.environ:
basepath = os.path.abspath(basepath + "/..")
os.environ['OECORE_TARGET_SYSROOT'] = os.path.join(basepath, DEFAULT_MACHINE)
add_path()
from genimage.genimage import set_subparser
from genimage.genyaml import set_subparser_genyaml
from genimage.exampleyamls import set_subparser_exampleyamls
from genimage.geninitramfs import set_subparser_geninitramfs
from genimage.gencontainer import set_subparser_gencontainer
from genimage.genimage import main
from genimage.genyaml import main_genyaml
from genimage.exampleyamls import main_exampleyamls
from genimage.geninitramfs import main_geninitramfs
from genimage.gencontainer import main_gencontainer
__all__ = [
"set_subparser",
"set_subparser_exampleyamls",
"set_subparser_genyaml",
"set_subparser_geninitramfs",
"set_subparser_gencontainer",
"main",
"main_exampleyamls",
"main_genyaml",
"main_geninitramfs",
"main_gencontainer",
]
| StarcoderdataPython |
9795696 | <filename>ngrams/src/ngrams.py
# !/usr/bin/python
# -*- coding:utf-8 -*-
# @author: <NAME>
# @date: 2017-11-23 Thursday
# @email: <EMAIL>
import nltk
from nltk import word_tokenize
from nltk.util import ngrams
from collections import Counter
import codecs
import json
import re
def ngrams_nltk(text, n):
# token = nltk.word_tokenize(text.lower()) # too slow
token = re.findall(r'\w+', text.lower())
n_grams = ngrams(token, n)
return dict(Counter(n_grams))
def ngrams_pure(text, n):
token = re.findall(r'\w+', text.lower())
n_grams = []
for i in range(len(token)-n+1):
n_grams.append(tuple(token[i:i+n]))
return dict(Counter(n_grams))
def main():
text = "I need to write a program in NLTK that breaks a corpus (a large collection of txt files) into unigrams, bigrams, trigrams, fourgrams and fivegrams. I need to write a program in NLTK that breaks a corpus"
corpus_path = '../../spell-checker/data/big.txt'
bigrams_path = '../../n-grams/data/count/bigrams.txt'
trigrams_path = '../../n-grams/data/count/trigrams.txt'
with open(corpus_path, 'r') as corpus_file:
text = corpus_file.read()
bigrams = ngrams_pure(text, 2)
trigrams = ngrams_pure(text, 3)
pre1 = 'little'
pre2 = 'note'
biwords = (pre1, pre2)
print bigrams[biwords]
print sum(bigrams.values())
print sum(trigrams.values())
print len(bigrams)
print len(trigrams)
# with open(bigrams_path, 'w') as bigrams_file:
# bigrams = json.dumps(str(bigrams), ensure_ascii=False)
# bigrams_file.write(bigrams)
# with open(trigrams_path, 'w') as trigrams_file:
# trigrams = json.dumps(str(trigrams), ensure_ascii=False)
# trigrams_file.write(trigrams)
if __name__ == "__main__":
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.