id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
116024 | """
This subpackage is for providing the data to the controllers
"""
| StarcoderdataPython |
3222876 | <filename>firmwire/vendor/__init__.py
## Copyright (c) 2022, Team FirmWire
## SPDX-License-Identifier: BSD-3-Clause
# Import vendor plugins
import firmwire.vendor.shannon
import firmwire.vendor.mtk
| StarcoderdataPython |
41891 | <filename>src/pyrin/cli/core/template/__init__.py
# -*- coding: utf-8 -*-
"""
cli core template package.
"""
from pyrin.packaging.base import Package
class CLICoreTemplatePackage(Package):
"""
cli core template package class.
"""
NAME = __name__
| StarcoderdataPython |
103107 | <filename>corelib/units/volume.py
# coding: utf-8
r"""Volume conversions"""
from corelib.units.base import create_code
volumes = {"m3": 1., "cubic_meter": 1., "cubic_meters": 1.,
"l": 0.001, "litre": 0.001, "liter": 0.001, "litres": 0.001, "liters": 0.001,
"cm3": 1e-6, "centimeter_cube": 1e-6, "centimeters_cube": 1e-6,
"ml": 1e-6, "millilitre": 1e-6, "millilitres": 1e-6, "milliliter": 1e-6, "milliliters": 1e-6,
"mm3": 1e-9, "millimeter_cube": 1e-9, "millimeters_cube": 1e-9,
"pt": 568.26125 * 1e-6, "pint": 568.26125 * 1e-6, "pints": 568.26125 * 1e-6,
"qt": 1136.5225 * 1e-6, "quart": 1136.5225 * 1e-6, "quarts": 1136.5225 * 1e-6,
"gal": 4546.09 * 1e-6, "gallon": 4546.09 * 1e-6, "gallons": 4546.09 * 1e-6}
for k in volumes.keys():
exec(create_code("volumes", k), globals())
def convert(value, to_unit, from_unit):
r"""Convenience function for cases where the to_unit and the from_unit
are in string form
Parameters
----------
value : float or int
to_unit : str
The desired unit
from_unit : str
The input unit
"""
return globals()[to_unit](**{from_unit: value})
| StarcoderdataPython |
3225537 | <gh_stars>0
"""
A supporting module that provides a routine to integrate the differential hmf in a robust manner.
"""
from scipy.interpolate import InterpolatedUnivariateSpline as _spline
import numpy as np
import scipy.integrate as intg
class NaNException(Exception):
pass
def hmf_integral_gtm(M, dndm, mass_density=False):
"""
Cumulatively integrate dn/dm.
Parameters
----------
M : array_like
Array of masses.
dndm : array_like
Array of dn/dm (corresponding to M)
mass_density : bool, `False`
Whether to calculate mass density (or number density).
Returns
-------
ngtm : array_like
Cumulative integral of dndm.
Examples
--------
Using a simple power-law mass function:
>>> import numpy as np
>>> m = np.logspace(10,18,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
The function always integrates to m=1e18, and extrapolates with a spline
if data not provided:
>>> m = np.logspace(10,12,500)
>>> dndm = m**-2
>>> ngtm = hmf_integral_gtm(m,dndm)
>>> np.allclose(ngtm,1/m) #1/m is the analytic integral to infinity.
True
"""
# Eliminate NaN's
m = M[np.logical_not(np.isnan(dndm))]
dndm = dndm[np.logical_not(np.isnan(dndm))]
dndlnm = m * dndm
if len(m) < 4:
raise NaNException("There are too few real numbers in dndm: len(dndm) = %s, #NaN's = %s" % (len(M), len(M) - len(dndm)))
# Calculate the mass function (and its integral) from the highest M up to 10**18
if m[-1] < m[0] * 10 ** 18 / m[3]:
m_upper = np.arange(np.log(m[-1]), np.log(10 ** 18), np.log(m[1]) - np.log(m[0]))
mf_func = _spline(np.log(m), np.log(dndlnm), k=1)
mf = mf_func(m_upper)
if not mass_density:
int_upper = intg.simps(np.exp(mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = intg.simps(np.exp(m_upper + mf), dx=m_upper[2] - m_upper[1], even='first')
else:
int_upper = 0
# Calculate the cumulative integral (backwards) of [m*]dndlnm
if not mass_density:
ngtm = np.concatenate((intg.cumtrapz(dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
else:
ngtm = np.concatenate((intg.cumtrapz(m[::-1] * dndlnm[::-1], dx=np.log(m[1]) - np.log(m[0]))[::-1], np.zeros(1)))
return (ngtm + int_upper)
| StarcoderdataPython |
1760057 | <reponame>fredmorcos/attic
class Emulator:
"""
This class is used to parse simulation output to find a certain posedge
clock cycle. In other words, a clock cycle emulator.
"""
def __init__(self, machine, sim_data):
"""
Machine is the simulation's machine.
SimData cannot be empty.
"""
if sim_data.strip() == '':
self.logger.die('empty sim data')
self.machine = machine
self.data = sim_data.splitlines()
self.logger = machine.settings['logger']
def parse_line(self, row=0):
"""
Returns splitted line at row.
"""
return self.data[row].strip().split(':')
def check_cycle_line(self, line):
"""
Some sanity checks on split line (that time and clk are integers).
"""
try: time = int(line[0])
except: self.logger.die('first col (time) in sim output not int')
try: clk = int(line[1], 2)
except: self.logger.die('second col (clk) in sim output not bin')
self.logger.info('emulator pass, time=%s, clk=%s' % (time, clk))
return time, clk
def advance(self, step=0):
"""
Moves through the simulation data, parsing every line. Works
according to the emulator state machine:
START: Get 0 -> S0
S0: Get 0 -> S0
S0: Get 1 -> S1
S1: Get 1 -> S1
S1: Get 0 -> S0, step++, machine.load_values()
END when counter == wanted_steps or premature_finish
Returns True for success
Returns False for premature finish
"""
if len(self.data) == 0:
self.logger.die('empty sim data')
if step < 0:
self.logger.die('bad step number %s' % step)
if step == 0:
tmp_line = self.parse_line()
if tmp_line[0] == 'mw':
self.logger.die('step 0 is a memwrite operation')
self.check_cycle_line(tmp_line)
self.machine.load_mem()
self.machine.load_values(tmp_line[2].split(','))
self.machine.unmark_all()
else:
i = 0
row = 0
state = 0
self.machine.load_mem()
while i < step:
if row == len(self.data) - 1:
return False
self.machine.unmark_all()
tmp_line = self.parse_line(row)
if tmp_line[0] == 'mw':
self.machine.mem_write(tmp_line[1].split(','))
else:
time, clk = self.check_cycle_line(tmp_line)
if state == 0 and clk == 1:
if clk == 1:
state = 1
elif state == 1 and clk == 0:
if clk == 0:
state = 0
i += 1
prev_line = self.parse_line(row - 1)
self.logger.info('applying line at time=%s, clk=%s' %
(prev_line[0], prev_line[1]))
self.machine.load_values(prev_line[2].split(','))
row += 1
return True
| StarcoderdataPython |
150066 | <reponame>thiagofigcosta/Pytho-
#!/bin/python3
import math
import sys
import basic_external_file as ext
import basic_external_regularpy_file as py
print('This is Pytho{\}')
print('')
print('Running over Python {}.{}.{}'.format(sys.version_info[0],sys.version_info[1],sys.version_info[2]))
print('Several tabs here')
print('Is 13 greater than 14?')
if 13 > 14{ # comments after curly brackets
print('Unfortunately not')
} else { # fixed else if whitespace
print('NEVER')
}
print('')
print('10 reasons why you should use Pytho{\}')
for i in range(10){
print ("{} Because I rate : and tab".format(i)) # no ident here
}
empty_dictionary = {}
single_line_dictionary={"do we support dictionaries?":"Yes, we do support dictionaries"}
multi_line_dictionary={
"Pytho{\}":"Rocks"
}
print()
print(single_line_dictionary['do we support dictionaries?'])
for key,value in multi_line_dictionary.items(){
print(key,value)
}
ext.print_ext_file()
dict_arr=[]
dict_arr.append({'dictionaries':'appended inline to array are now supported'})
dict_arr.append({'help us to improve':'send issues'})
dict_arr.append({'Workaround Oriented Programming':'for the win'})
print()
for dictionary in dict_arr{
for k,v in dictionary.items(){
print('{} {}'.format(k,v))
}
}
py.print_lines()
# Pytho{\}: Start regular Python
for i in range(2):
print()
text='It is also possible to mix the syntaxes when the regular python syntax is in between \'# Pytho{\}: Start regular Python\' and \'# Pytho{\}: End regular Python\' lines'
out=''
for c in list(text):
out+=c
print (out)
# Pytho{\}: End regular Python
def printSomething(arg=''){
print('Print inside function - '+arg)
}
print()
if 'in line ifs works' == 'in line ifs works' { print ('Inline ifs working')}
if 1==1 {
if ('in line ifs works' == 'in line ifs works') { printSomething ('Inline if()s working')}
} | StarcoderdataPython |
3244287 | <gh_stars>0
import streamlit as st
import leafmap.kepler as leafmap
import geopandas as gpd
def app():
st.title("Kaavoituskohteet")
st.markdown(
"""
Väritä ja visualisoi asemakaava-aineistoa kartan vasemmasta yläkulmasta avautuvan työkalupakin avulla.
"""
)
m = leafmap.Map(center=[60.174, 24.802], zoom=14.5, height=600, widescreen=False)
gdf = gpd.read_file("http://pygeoapi-testing.gispocoding.fi/collections/koonti_koko_suomi_kaavakohteet/items?f=json&limit=1000")
gdf_map = gdf[["kaavoitusteema", "kaavamaarayslaji", "tekstiarvo", "numeerinen_arvo", "mittayksikko", "geometry"]]
m.add_gdf(gdf_map, layer_name="Espoo")
m.to_streamlit(height=700)
| StarcoderdataPython |
95534 | <reponame>Wikia/ask-fandom<gh_stars>1-10
"""
SemanticMediaWiki based intents
"""
from .base import SemanticFandomIntent
from .tv_series import EpisodeFactIntent, PersonFactIntent
from. wowwiki import WoWGroupsMemberIntent
| StarcoderdataPython |
196119 | # web-app for API image manipulation
from flask import Flask, request, render_template, send_from_directory
import os
from PIL import Image
import tensorflow as tf
import cv2
import numpy as np
from model import generator_model
app = Flask(__name__)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
# default access page
@app.route("/")
def main():
return render_template('index.html')
# upload selected image and forward to processing page
@app.route("/upload", methods=["POST"])
def upload():
target = os.path.join(APP_ROOT, 'static/images/')
# create image directory if not found
if not os.path.isdir(target):
os.mkdir(target)
# retrieve file from html file-picker
upload = request.files.getlist("file")[0]
print("File name: {}".format(upload.filename))
filename = upload.filename
# file support verification
ext = os.path.splitext(filename)[1]
if (ext == ".jpg") or (ext == ".png") or (ext == ".bmp"):
print("File accepted")
else:
return render_template("error.html", message="The selected file is not supported"), 400
# save file
destination = "/".join([target, filename])
print("File saved to to:", destination)
upload.save(destination)
# forward to processing page
return render_template("processing.html", image_name=filename)
# flip filename 'vertical' or 'horizontal'
@app.route("/colorize", methods=["POST"])
def colorize():
filename = request.form['image']
# open and process image
target = os.path.join(APP_ROOT, 'static/images')
destination = "/".join([target, filename])
img = Image.open(destination)
img = img.resize((224,224),Image.ANTIALIAS)
img_array = np.array(img)
if len(np.shape(img_array)) == 2:
img_array = np.reshape(img_array,(224,224,1))
gray = img_array[:,:,0]
gray = np.reshape(gray,(1,224,224,1))
# Initialize the model and load the weights
model = generator_model()
model.load_weights("./static/model0.h5")
# Normalize the gray image and make prediction
maximum_img = np.max(gray)
max_divided = maximum_img/2
gray = (gray-max_divided)/max_divided
predicted_image_lab = model.predict(gray)
predicted_image_lab = np.reshape(predicted_image_lab,(224,224,3))
# Convert in the predicted image in RGB
img = 127.5*predicted_image_lab + 127.5
img = img.astype('uint8')
img = cv2.cvtColor(img, cv2.COLOR_LAB2RGB)
img = Image.fromarray(img)
# save and return image
destination = "/".join([target, 'temp.png'])
if os.path.isfile(destination):
os.remove(destination)
img.save(destination)
return send_image('temp.png')
# blend filename with stock photo and alpha parameter
@app.route("/blend", methods=["POST"])
def blend():
# retrieve parameters from html form
alpha = request.form['alpha']
filename1 = request.form['image']
# open images
target = os.path.join(APP_ROOT, 'static/images')
filename2 = 'blend.jpg'
destination1 = "/".join([target, filename1])
destination2 = "/".join([target, filename2])
img1 = Image.open(destination1)
img2 = Image.open(destination2)
# resize images to max dimensions
width = max(img1.size[0], img2.size[0])
height = max(img1.size[1], img2.size[1])
img1 = img1.resize((width, height), Image.ANTIALIAS)
img2 = img2.resize((width, height), Image.ANTIALIAS)
# if image in gray scale, convert stock image to monochrome
if len(img1.mode) < 3:
img2 = img2.convert('L')
# blend and show image
img = Image.blend(img1, img2, float(alpha)/100)
# save and return image
destination = "/".join([target, 'temp.png'])
if os.path.isfile(destination):
os.remove(destination)
img.save(destination)
return send_image('temp.png')
# retrieve file from 'static/images' directory
@app.route('/static/images/<filename>')
def send_image(filename):
return send_from_directory("static/images", filename)
if __name__ == "__main__":
app.run()
| StarcoderdataPython |
23054 | from collections import deque
import numpy as np
import os
from abc import ABCMeta, abstractmethod
import random
random.seed(42)
from common import config, VehicleState
from helper import Helper
INFO = """Average merging time: {} s
Traffic flow: {} vehicle/s
Average speed: {} km/h
Average fuel consumption: {} ml/vehicle"""
class Vehicle(object):
"""docstring for Vehicle"""
def __init__(self, builder):
super(Vehicle, self).__init__()
self.ID = builder.ID
self.size = builder.size
self.speed = builder.speed
self.acceleration = builder.acceleration
self.max_speed = builder.max_speed
self.min_speed = builder.min_speed
self.max_acc = builder.max_acc
self.lane = builder.lane
self.position = builder.position
class VehicleBuilder(object):
"""docstring for VehicleBuilder"""
def __init__(self, ID):
super(VehicleBuilder, self).__init__()
self.ID = ID
self.max_acc = config.max_acc
self.max_speed = config.max_speed
self.min_speed = config.min_speed
self.size = config.size
def setSpeed(self, speed):
self.speed = speed
return self
def setPosition(self, position):
self.position = position
return self
def setLane(self, lane):
self.lane = lane
return self
def setAcceleration(self, acceleration):
self.acceleration = acceleration
return self
def build(self):
return Vehicle(self)
class OnBoardVehicle(object):
"""docstring for OnBoardVehicle"""
def __init__(self, vehicle, t0, min_pass_time=config.min_pass_time):
self.vehicle = vehicle
self.t0 = t0
self.tm = float('Inf')
self.position_history = [vehicle.position]
self.speed_history = [vehicle.speed]
self.acc_history = [vehicle.acceleration]
self.fuel_history = [0]
self.time_steps = [t0]
self.ParaV = None
self.min_pass_time = min_pass_time
self.state = VehicleState.PENDING
@property
def v0(self):
return self.speed_history[0]
@property
def p0(self):
return self.position_history[0]
@property
def merge_time(self):
return self.time_steps[-1] - self.time_steps[0]
@property
def tf(self):
return self.time_steps[-1]
@property
def average_speed(self):
return np.array(self.speed_history).mean()
@property
def fuel_consumption(self):
return self.fuel_history[-1]
class VehicleGeneratorBase(metaclass=ABCMeta):
"""docstring for VehicleGeneratorBase"""
def __init__(self):
self.schedule = deque()
self.buildSchedule()
@abstractmethod
def buildSchedule(self):
pass
def getAtTime(self, ctime):
vehicle_to_add = []
while self.hasVehicle() and self.schedule[0].t0 <= ctime:
vehicle_to_add.append(self.schedule.popleft())
return vehicle_to_add
def hasVehicle(self):
return len(self.schedule) > 0
def FIFOIDAssigner(self):
self.schedule = deque(sorted(self.schedule, key=lambda x:x.t0))
for i, ove in enumerate(self.schedule):
ove.vehicle.ID = i
def SpeedIDAssigner(self):
for ove in self.schedule:
ove.min_pass_time = Helper.getTmOptimal2(ove.v0, config.case_speed['speed_merge'],
-config.control_len, 0)
self.schedule = deque(sorted(self.schedule, key=lambda x:x.t0 + x.min_pass_time))
for i, ove in enumerate(self.schedule):
ove.vehicle.ID = i
self.schedule = deque(sorted(self.schedule, key=lambda x:x.t0))
class Case1VehicleGenerator(VehicleGeneratorBase):
"""docstring for Case1VehicleGenerator"""
def __init__(self):
super(Case1VehicleGenerator, self).__init__()
def buildSchedule(self):
for i in range(config.case1['total_cars']):
v = VehicleBuilder(i)\
.setSpeed(config.case1['speed'])\
.setPosition(-config.control_len)\
.setAcceleration(0)\
.setLane(i % 2).build()
t = 10.0 if (i < 2) else 15.0
# self.schedule.append(OnBoardVehicle(v, t, config.control_len / config.case1['speed']))
tm = Helper.getTmOptimal2(config.case1['speed'], config.case_speed['speed_merge'],
-config.control_len, 0) if i == 0 else 0
self.schedule.append(OnBoardVehicle(v, t, tm))
# self.schedule.sort(key=lambda x: x.vehicle.ID)
class Case2VehicleGenerator(VehicleGeneratorBase):
"""docstring for Case1VehicleGenerator"""
def __init__(self):
super(Case2VehicleGenerator, self).__init__()
def buildSchedule(self):
tnum = config.case2['total_cars']
# Randomly generate tnum//2 cars on each lane, with time span 10~50
# This does not ensure feasibility at initial state
t0_lane0 = np.random.rand(tnum//2) * 40.0 + 10.0
t0_lane1 = np.random.rand(tnum//2) * 40.0 + 10.0
for i in range(tnum):
v = VehicleBuilder(-1)\
.setSpeed(config.case2['speed'])\
.setPosition(-config.control_len)\
.setAcceleration(0)\
.setLane(i % 2).build()
t = t0_lane0[i//2] if (i % 2 == 0) else t0_lane1[i//2]
self.schedule.append(OnBoardVehicle(v, t, config.control_len / config.case2['speed']))
self.FIFOIDAssigner()
class MainHigherSpeedVG(VehicleGeneratorBase):
def __init__(self):
super(MainHigherSpeedVG, self).__init__()
def buildSchedule(self):
# lane0 (main road)
t0_lane0 = np.arange(10, 30.1, 2.0)
t0_lane1 = np.arange(9, 30.1, 3.1)
v0_lane0 = 25.0
v0_lane1 = 15.0
for ti0 in t0_lane0:
v = VehicleBuilder(-1)\
.setSpeed(v0_lane0)\
.setPosition(-config.control_len)\
.setAcceleration(0)\
.setLane(0).build()
self.schedule.append(OnBoardVehicle(v, ti0, Helper.getTc(v)))
for ti0 in t0_lane1:
v = VehicleBuilder(-1)\
.setSpeed(v0_lane1)\
.setPosition(-config.control_len)\
.setAcceleration(0)\
.setLane(1).build()
self.schedule.append(OnBoardVehicle(v, ti0, Helper.getTc(v)))
self.FIFOIDAssigner()
# self.SpeedIDAssigner()
class PoissonVehicleGenerator(VehicleGeneratorBase):
"""docstring for Case1VehicleGenerator"""
def __init__(self, tnum_lane0, tnum_lane1):
self.tnum_lane0 = tnum_lane0
self.tnum_lane1 = tnum_lane1
super(PoissonVehicleGenerator, self).__init__()
def buildSchedule(self):
speed_lane0 = config.case_speed['speed_init_lane0']
speed_lane1 = config.case_speed['speed_init_lane1']
t0_lane0 = [0]
t0_lane1 = [0]
tsp_lane0 = (config.delta) / speed_lane0
tsp_lane1 = (config.delta) / speed_lane1
for i in range(self.tnum_lane0):
t0_lane0.append(t0_lane0[-1] + tsp_lane0 + np.random.exponential(5.0))
for i in range(self.tnum_lane1):
t0_lane1.append(t0_lane1[-1] + tsp_lane1 + np.random.exponential(5.0))
t0_lane0 = np.array(t0_lane0[1:])
t0_lane1 = np.array(t0_lane1[1:])
for i in range(self.tnum_lane0):
speed_v = speed_lane0 + np.random.randn()
v = VehicleBuilder(-1)\
.setSpeed(speed_v)\
.setPosition(-config.control_len)\
.setAcceleration(0)\
.setLane(0).build()
t = t0_lane0[i]
self.schedule.append(OnBoardVehicle(v, t, Helper.getTc(v)))
for i in range(self.tnum_lane1):
speed_v = speed_lane1 + np.random.randn()
v = VehicleBuilder(-1)\
.setSpeed(speed_v)\
.setPosition(-config.control_len)\
.setAcceleration(0)\
.setLane(1).build()
t = t0_lane1[i]
self.schedule.append(OnBoardVehicle(v, t, Helper.getTc(v)))
self.FIFOIDAssigner()
# self.SpeedIDAssigner()
class APPVehicleGenerator(VehicleGeneratorBase):
def __init__(self, tnum, id_assigner, min_pass_time):
self.tnum = tnum
self.ida = id_assigner
self.mpt = min_pass_time
super(APPVehicleGenerator, self).__init__()
def buildSchedule(self):
tnum = self.tnum
t0_lane0 = np.random.rand(tnum//2) * 40.0 + 10.0
t0_lane1 = np.random.rand(tnum//2) * 40.0 + 10.0
for i in range(tnum):
v = VehicleBuilder(-1)\
.setSpeed(config.case2['speed'])\
.setPosition(-config.control_len)\
.setAcceleration(0)\
.setLane(i % 2).build()
t = t0_lane0[i//2] if (i % 2 == 0) else t0_lane1[i//2]
self.schedule.append(OnBoardVehicle(v, t, self.mpt))
if self.ida == 'FIFO':
self.FIFOIDAssigner()
elif self.ida == 'main':
self.MainRoadFirstIDAssigner()
class GameLoop(object):
"""docstring for GameLoop"""
def __init__(self, vscd):
super(GameLoop, self).__init__()
self.ctime = 0
self.vscd = vscd
self.on_board_vehicles = deque()
self.finished_vehicels = deque()
def isOver(self):
if self.ctime >= config.max_sim_time:
print("Simulation time out.")
return True
return (not self.vscd.hasVehicle()) and self.isEmpty()
def isEmpty(self):
return len(self.on_board_vehicles) == 0
def nextStep(self):
self.ctime += config.time_meta
t = self.ctime
ove_t = self.vscd.getAtTime(t)
for v in ove_t:
if v.vehicle.ID == 0:
# v.tm = v.t0 + max(config.min_pass_time, v.min_pass_time)
v.tm = v.t0 + Helper.getTmOptimal2(v.v0, config.speed_merge, v.p0, 0)
elif len(self.on_board_vehicles) > 0:
v.tm = Helper.getTm(v, self.on_board_vehicles[-1])
else:
v.tm = Helper.getTm(v, self.finished_vehicels[-1])
self.on_board_vehicles.append(v)
TimeM = Helper.getTimeMatrix(t, v.tm)
ConfV = Helper.getConfigVec(v)
v.ParaV = np.dot(np.linalg.inv(TimeM), ConfV)
v.state = VehicleState.ON_RAMP
for v in self.on_board_vehicles:
Helper.updateAVP(v, t)
if v.vehicle.position >= 0:
v.state = VehicleState.ON_MERGING
while not self.isEmpty() and self.on_board_vehicles[0].vehicle.position >= config.merging_len:
self.on_board_vehicles[0].state = VehicleState.FINISHED
self.finished_vehicels.append((self.on_board_vehicles.popleft()))
def play(self):
while not self.isOver():
self.nextStep()
self.measure()
def measure(self):
# Measure average merging time
AMT = np.array([v.merge_time for v in self.finished_vehicels]).mean()
# Measure traffic flow
TF = len(self.finished_vehicels) / (self.finished_vehicels[-1].tf - self.finished_vehicels[0].t0)
# Measure average speed
AS = 1 / np.array([1 / v.average_speed for v in self.finished_vehicels]).mean() * 3.6
# Measure average fuel consumption
AFC = np.array([v.fuel_consumption for v in self.finished_vehicels]).mean()
print(INFO.format(AMT, TF, AS, AFC))
def draw_result_pyplot(self, file_path=None):
import matplotlib.pyplot as plt
plt.figure(1)
plt.subplot(221)
plt.xlabel('time')
plt.ylabel('position')
plt.title('positions')
for tv in self.finished_vehicels:
linecolor = 'r--' if tv.vehicle.lane == 0 else 'b'
plt.plot(tv.time_steps, tv.position_history, linecolor,
label="Car {}".format(tv.vehicle.ID))
plt.grid(True)
if file_path:
plt.savefig(os.path.join(file_path, "position.pdf"), format="pdf")
plt.subplot(222)
plt.xlabel('time')
plt.ylabel('speed')
plt.title('speeds')
for tv in self.finished_vehicels:
linecolor = 'r--' if tv.vehicle.lane == 0 else 'b'
plt.plot(tv.time_steps, tv.speed_history, linecolor,
label="Car {}".format(tv.vehicle.ID))
plt.grid(True)
if file_path:
plt.savefig(os.path.join(file_path, "speed.pdf"), format="pdf")
plt.subplot(223)
plt.xlabel('time')
plt.ylabel('acceleration')
plt.title('accelerations')
for tv in self.finished_vehicels:
linecolor = 'r--' if tv.vehicle.lane == 0 else 'b'
plt.plot(tv.time_steps, tv.acc_history, linecolor,
label="Car {}".format(tv.vehicle.ID))
plt.grid(True)
if file_path:
plt.savefig(os.path.join(file_path, "acc.pdf"), format="pdf")
plt.subplot(224)
plt.xlabel('time')
plt.ylabel('fuel')
plt.title('fuels')
for tv in self.finished_vehicels:
linecolor = 'r--' if tv.vehicle.lane == 0 else 'b'
plt.plot(tv.time_steps, tv.fuel_history, linecolor,
label="Car {}".format(tv.vehicle.ID))
plt.grid(True)
if file_path:
plt.savefig(os.path.join(file_path, "fuel.pdf"), format="pdf")
if not file_path:
plt.subplots_adjust(top=0.92, bottom=-0.35, left=0.10, right=1.35, hspace=0.35,
wspace=0.35)
plt.show()
def draw_result(self, file_path):
from bokeh.layouts import gridplot
from bokeh.plotting import figure, output_file, show
output_file(file_path)
TOOLS = "pan,wheel_zoom,box_zoom,reset,save,box_select,lasso_select"
s1 = figure(tools=TOOLS, title="positions", x_axis_label="time", y_axis_label="position")
for tv in self.finished_vehicels:
s1.line(tv.time_steps, tv.position_history,
line_color="red" if tv.vehicle.lane == 0 else "blue",
legend="Car {}".format(tv.vehicle.ID), line_width=2)
s1.xgrid.minor_grid_line_color = 'navy'
s1.xgrid.minor_grid_line_alpha = 0.1
s1.ygrid.minor_grid_line_color = 'navy'
s1.ygrid.minor_grid_line_alpha = 0.1
s2 = figure(tools=TOOLS, title="speed", x_axis_label="time", y_axis_label="speed",
x_range=s1.x_range)
for tv in self.finished_vehicels:
s2.line(tv.time_steps, tv.speed_history,
line_color="red" if tv.vehicle.lane == 0 else "blue",
legend="Car {}".format(tv.vehicle.ID), line_width=2)
s2.xgrid.minor_grid_line_color = 'navy'
s2.xgrid.minor_grid_line_alpha = 0.1
s2.ygrid.minor_grid_line_color = 'navy'
s2.ygrid.minor_grid_line_alpha = 0.1
s3 = figure(tools=TOOLS, title="acceleration", x_axis_label="time", y_axis_label="acceleration",
x_range=s1.x_range)
for tv in self.finished_vehicels:
s3.line(tv.time_steps, tv.acc_history,
line_color="red" if tv.vehicle.lane == 0 else "blue",
legend="Car {}".format(tv.vehicle.ID), line_width=2)
s3.xgrid.minor_grid_line_color = 'navy'
s3.xgrid.minor_grid_line_alpha = 0.1
s3.ygrid.minor_grid_line_color = 'navy'
s3.ygrid.minor_grid_line_alpha = 0.1
s4 = figure(tools=TOOLS, title="fuel consumption", x_axis_label="time", y_axis_label="fuel",
x_range=s1.x_range)
for tv in self.finished_vehicels:
s4.line(tv.time_steps, tv.fuel_history,
line_color="red" if tv.vehicle.lane == 0 else "blue",
legend="Car {}".format(tv.vehicle.ID), line_width=2)
s4.xgrid.minor_grid_line_color = 'navy'
s4.xgrid.minor_grid_line_alpha = 0.1
s4.ygrid.minor_grid_line_color = 'navy'
s4.ygrid.minor_grid_line_alpha = 0.1
p = gridplot([[s1, s2, s3, s4]])
try:
show(p)
except Exception as e:
pass
show(p)
class SpeedGameLoop(GameLoop):
"""docstring for SpeedGameLoop"""
def __init__(self, vscd):
super(SpeedGameLoop, self).__init__(vscd)
self.on_board_vehicles = []
def nextStep(self):
self.ctime += config.time_meta
t = self.ctime
ove_t = self.vscd.getAtTime(t)
for v in ove_t:
tmp_v_stack = []
while len(self.on_board_vehicles) > 0 and self.on_board_vehicles[-1].vehicle.ID > v.vehicle.ID:
tmpv = self.on_board_vehicles.pop()
# tmpv.t0 = t
# tmpv.min_pass_time = max(tmpv.min_pass_time, Helper.getTmOptimal2(tmpv.vehicle.speed,
# config.case_speed['speed_merge'], tmpv.vehicle.position, 0))
tmp_v_stack.append(tmpv)
# Get t_m
if len(self.on_board_vehicles) == 0 and len(self.finished_vehicels) == 0:
v.tm = v.t0 + max(config.min_pass_time, v.min_pass_time)
elif len(self.on_board_vehicles) > 0:
v.tm = Helper.getTm(v, self.on_board_vehicles[-1])
else:
v.tm = Helper.getTm(v, self.finished_vehicels[-1])
tmp_v_stack.append(v)
prevve = None
for i in reversed(range(len(tmp_v_stack))):
ve = tmp_v_stack[i]
if prevve is not None:
ve.tm = Helper.getTm(ve, prevve)
self.on_board_vehicles.append(ve)
TimeM = Helper.getTimeMatrix(t, ve.tm)
ConfV = Helper.getConfigVec(ve)
# from IPython import embed; embed()
ve.ParaV = np.dot(np.linalg.inv(TimeM), ConfV)
ve.state = VehicleState.ON_RAMP
prevve = ve
# print("ID {}".format(prevve.vehicle.ID))
for v in self.on_board_vehicles:
Helper.updateAVP(v, t)
if v.vehicle.position >= 0:
v.state = VehicleState.ON_MERGING
while not self.isEmpty() and self.on_board_vehicles[0].vehicle.position >= config.merging_len:
self.on_board_vehicles[0].state = VehicleState.FINISHED
self.finished_vehicels.append((self.on_board_vehicles.pop(0)))
def main():
# vehicle_generator = Case1VehicleGenerator()
# game = GameLoop(vehicle_generator)
# game.play()
# game.draw_result_pyplot("case1")
vehicle_generator = MainHigherSpeedVG()
game = GameLoop(vehicle_generator)
# game = SpeedGameLoop(vehicle_generator)
game.play()
game.draw_result_pyplot("case2")
# vehicle_generator = APPVehicleGenerator(12, 'FIFO', 16.9)
# vehicle_generator = PoissonVehicleGenerator(config.case_speed['tnum_lane0'],
# config.case_speed['tnum_lane1'])
# ggame = GameLoop(vehicle_generator)
# ggame.play()
# # ggame.draw_result("result.html")
# ggame.draw_result_pyplot(".")
if __name__ == '__main__':
main() | StarcoderdataPython |
1691336 | from argparse import ArgumentParser
from transformers import RobertaTokenizerFast
from common import TOKENIZER_PATH
from common.config import config
def main():
parser = ArgumentParser(description="Try custom trained tokenizer.")
parser.add_argument("text", nargs="?", default="This is an example.", help="Text to tokenize.")
args = parser.parse_args()
text = args.text
if config.from_pretrained:
tokenizer = RobertaTokenizerFast.from_pretrained(config.from_pretrained, max_length=config.max_length)
else:
tokenizer = RobertaTokenizerFast.from_pretrained(TOKENIZER_PATH, max_length=config.max_length)
tokenized = tokenizer(
text,
return_offsets_mapping=True,
return_special_tokens_mask=True,
)
print("Tokenized:")
print(tokenized)
print(tokenized.tokens())
if __name__ == '__main__':
main()
| StarcoderdataPython |
182206 | <gh_stars>0
import random, requests, pendulum, hashlib, string, os, fnmatch
class File(object):
__DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'filenames'))
def __init__(self):
self.__filename = None
self.__full_path = None
self._filenames = self.__check_file_directory()
self._name = ''
self.random_value = ''.join(random.choice(string.ascii_uppercase) for i in range(256))
@property
def filename(self):
data = None
file_type = random.choice(['exe', 'sys', 'bin'])
if not self.__filename:
for item in self._filenames:
if file_type in item:
with open(item, 'r') as file:
data = file.read()
return random.choice(data.splitlines()).rsplit('\\',1)[1]
return self.__filename
@property
def full_path(self):
data = None
file_type = random.choice(['exe', 'sys', 'bin'])
if not self.__full_path:
for item in self._filenames:
if file_type in item:
with open(item, 'r') as file:
data = file.read()
return random.choice(data.splitlines()).rsplit('\\',1)[0]
return self.__full_path
@property
def signed(self):
return random.choice(['True', 'False'])
@property
def signature(self):
return 'Microsoft Windows'
@property
def signature_status(self):
return random.choice(['Verified', 'Unknown', 'Counterfit'])
@property
def size(self):
file_size_list = []
precision = 2
size = random.randint(1, 3221225472)
suffixes=['B','KB','MB','GB','TB']
suffixIndex = 0
while size > 1024 and suffixIndex < 4:
suffixIndex += 1 #increment the index of the suffix
size = size/1024.0 #apply the division
file_size_list.append("%.*f%s"%(precision,size,suffixes[suffixIndex]))
return file_size_list
@property
def timestamp(self):
return pendulum.now().subtract(
years=random.randint(0, 8),
days=random.randint(1,365),
hours=random.randint(1,24),
minutes=random.randint(1, 60),
seconds=random.randint(1, 60)
).to_datetime_string()
@property
def md5(self):
return random.choice([hashlib.md5(str(self.random_value).encode('utf-8')).hexdigest()])
@property
def sha1(self):
return random.choice([hashlib.sha1(str(self.random_value).encode('utf-8')).hexdigest()])
@property
def sha256(self):
return random.choice([hashlib.sha256(str(self.random_value).encode('utf-8')).hexdigest()])
@property
def hashes(self):
return {
'md5': self.md5,
'sha1': self.sha1,
'sha256': self.sha256
}
def __check_file_directory(self):
matches = []
file_path = os.path.abspath(self.__DATA_PATH)
for root, dirnames, filenames in os.walk(file_path):
for filename in fnmatch.filter(filenames, '*.txt'):
matches.append(os.path.abspath(os.path.join(root, filename)))
return matches | StarcoderdataPython |
1729848 | <gh_stars>0
import nltk
import numpy as np
import os
import pickle
import torch
import torch.utils.data as data
from multi_vocab import Vocabulary
class PrecompMultiDataset(data.Dataset):
def __init__(self, data_path, data_split, langs, vocab,
load_img=True, img_dim=2048, cn_seg = True):
self.vocab = vocab
self.langs = langs
lang1, lang2 = langs
# captions
self.captions = list()
lang_data_paths = []
for lang in langs:
lang_folder_path = os.path.join(data_path, lang)
if lang == "cn":
if cn_seg:
lang_data_paths.append(os.path.join(lang_folder_path, f'seg_{data_split}_caps.txt'))
else:
lang_data_paths.append(os.path.join(lang_folder_path, f'sim_{data_split}_caps.txt'))
else:
lang_data_paths.append(os.path.join(lang_folder_path, f'{data_split}_caps.txt'))
with open(lang_data_paths[0], 'r') as f1, \
open(lang_data_paths[1], 'r') as f2:
for line1, line2 in zip(f1, f2):
if lang1 == "en":
self.captions.append(line1.strip().lower().split())
elif lang1 == "fr":
self.captions.append(self.split_fr_line(line1))
elif lang1 == "cn":
self.captions.append(self.split_cn_line(line1, cn_seg))
else:
print(f"Not prepared for this language : {lang1}")
break
if lang2 == "en":
self.captions.append(line2.strip().lower().split())
elif lang2 == "fr":
self.captions.append(self.split_fr_line(line2))
elif lang2 == "cn":
self.captions.append(self.split_cn_line(line2, cn_seg))
else:
print(f"Not prepared for this language : {lang2}")
break
f1.close()
f2.close()
self.length = len(self.captions)
# image features
if load_img:
self.images = np.load(os.path.join(data_path, f'{data_split}_ims.npy'))
else:
self.images = np.zeros((self.length // 10, img_dim))
# each image can have 1 caption or 10 captions
if self.images.shape[0] != self.length:
self.im_div = 10
assert self.images.shape[0] * 10 == self.length
else:
self.im_div = 1
def __getitem__(self, index):
# image
img_id = index // self.im_div
image = torch.tensor(self.images[img_id])
caption = [self.vocab(token)
for token in ['<start>'] + self.captions[index] + ['<end>']]
caption = torch.tensor(caption)
lang1, lang2 = self.langs
this_lang = lang1 if not index % 2 else lang2
#print(f"test : {self.captions[index]}, lang: {this_lang}")
return image, caption, index, img_id, this_lang
def __len__(self):
return self.length
def split_fr_line(self, line):
if line[-1] == "\." and line[-2] != " " or line[-1] not in ["\.", " "]:
line = line[:-1] + " ."
elif line[-1] == " ":
line += "."
splited_by_prime = line.split('\'')
for idx in range(len(splited_by_prime) - 1):
splited_by_prime[idx] += '\''
fully_splited = []
for subline in splited_by_prime:
fully_splited += subline.strip().lower().split()
return fully_splited
def split_cn_line(self, line, seg = True):
if line[-1] == "\." and line[-2] != " " or line[-1] not in ["\.", " "]:
line = line[:-1] + " ."
elif line[-1] == " ":
line += "."
if seg:
return line.strip().split()
else:
return list(line.replace(" ", ""))
def collate_fn(data):
""" build mini-batch tensors from a list of (image, caption) tuples """
# sort a data list by caption length
data.sort(key=lambda x: len(x[1]), reverse=True)
zipped_data = list(zip(*data))
images, captions, ids, img_ids, cap_langs = zipped_data
images = torch.stack(images, 0)
targets = torch.zeros(len(captions), len(captions[0])).long()
lengths = [len(cap) for cap in captions]
for i, cap in enumerate(captions):
end = len(cap)
targets[i, :end] = cap[:end]
return images, targets, lengths, ids, cap_langs
def get_precomp_loader(data_path, data_split, langs, vocab, batch_size=128,
shuffle=True, num_workers=2, load_img=True,
img_dim=2048, cn_seg=True):
dset = PrecompMultiDataset(data_path, data_split, langs, vocab, load_img, img_dim, cn_seg=cn_seg)
data_loader = torch.utils.data.DataLoader(
dataset=dset, batch_size=batch_size, shuffle=shuffle,
pin_memory=True,
collate_fn=collate_fn
)
return data_loader
def get_train_loaders(data_path, langs, vocab, batch_size, workers, cn_seg=True):
train_loader = get_precomp_loader(
data_path, 'train', langs, vocab, batch_size, True, workers, cn_seg
)
val_loader = get_precomp_loader(
data_path, 'dev', langs, vocab, batch_size, False, workers, cn_seg
)
return train_loader, val_loader
def get_eval_loader(data_path, split_name, langs, vocab, batch_size, workers,
load_img=False, img_dim=2048, cn_seg=True):
eval_loader = get_precomp_loader(
data_path, split_name, langs, vocab, batch_size, False, workers,
load_img=load_img, img_dim=img_dim, cn_seg=cn_seg
)
return eval_loader
| StarcoderdataPython |
156106 | import requests, json, re
from requests import get
def main():
n = 1
check = True
while check == True:
check = scrape(n)
n = n+1
def scrape(n):
if n > 403:
n = n+1
url = 'https://xkcd.com/%d/info.0.json' %n
r = requests.get(url)
if r.status_code == 200:
file = open("content.txt","a")
content = r.json()["transcript"]
content = re.sub(r"\[\[\s*(.*?)\s*\]\]","",content)
content = re.sub(r"\{\{\s*(.*?)\s*\}\}","",content)
content = re.sub(r"\"","'",content)
file.write(r.json()["title"])
file.write("\n")
file.write(content)
file.write(r.json()["alt"])
file.write("\n")
file.write("\n")
filename = "images/%d.png" %n
#download(r.json()["img"],filename)
print(n)
return True
else:
print("Error!")
print(n)
print(r.status_code)
return False
def download(url,filename):
with open(filename, "wb") as file:
response = get(url)
file.write(response.content)
if __name__ == "__main__":
main()
| StarcoderdataPython |
11682 | <reponame>holoyan/python-data-validation
from setuptools import setup, find_packages
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='pyva',
packages=find_packages(),
version='0.4.1',
license='MIT',
description='Simple and flexible python data validation library',
long_description=long_description,
long_description_content_type='text/markdown',
author='Artak',
author_email='<EMAIL>',
url='https://github.com/holoyan/python-data-validation',
keywords=['data', 'validation', 'validator', 'data validator'],
install_requires=[ # I get to this in a second
'python-dateutil',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| StarcoderdataPython |
9925 | # coding=utf-8
from nlpir.native.nlpir_base import NLPIRBase
from ctypes import c_bool, c_char_p, c_int, POINTER, Structure, c_float
class StDoc(Structure):
__fields__ = [
("sTitle", c_char_p),
("sContent", c_char_p),
("sAuthor", c_char_p),
("sBoard", c_char_p),
("sDatatype", c_char_p)
]
class Classifier(NLPIRBase):
@property
def dll_name(self):
return "LJClassifier"
@NLPIRBase.byte_str_transform
def init_lib(self, data_path: str, encode: int, license_code: str) -> int:
"""
Call **classifier_init**
:param data_path:
:param encode:
:param license_code:
:return: 1 success 0 fail
"""
return self.get_func("classifier_init", [c_char_p, c_char_p, c_int, c_char_p], c_bool)(
"rulelist.xml", data_path, encode, license_code)
@NLPIRBase.byte_str_transform
def exit_lib(self) -> bool:
"""
Call **classifier_exit**
:return: exit success or not
"""
return self.get_func("classifier_exit", None, None)()
@NLPIRBase.byte_str_transform
def get_last_error_msg(self) -> str:
return self.get_func("classifier_GetLastErrorMsg", None, c_char_p)()
@NLPIRBase.byte_str_transform
def exec_1(self, data: StDoc, out_type: int = 0):
"""
Call **classifier_exec1**
对输入的文章结构进行分类
:param data: 文章结构
:param out_type: 输出是否包括置信度, 0 没有置信度 1 有置信度
:return: 主题类别串 各类之间用\t隔开,类名按照置信度从高到低排序
举例:“要闻 敏感 诉讼”, “要闻 1.00 敏感 0.95 诉讼 0.82”
"""
return self.get_func("classifier_exec1", [POINTER(StDoc), c_int], c_char_p)(data, out_type)
@NLPIRBase.byte_str_transform
def exec(self, title: str, content: str, out_type: int):
"""
Call **classifier_exec**
对输入的文章进行分类
:param title: 文章标题
:param content: 文章内容
:param out_type: 输出知否包括置信度,同 :func:`exec_1`
:return: 同 :func:`exec_1`
"""
return self.get_func("classifier_exec", [c_char_p, c_char_p, c_int], c_char_p)(title, content, out_type)
@NLPIRBase.byte_str_transform
def exec_file(self, filename: str, out_type: int) -> str:
"""
Call **classifier_execFile**
:param filename: 文件名
:param out_type: 输出是否包括置信度, 0 没有置信度 1 有置信度
:return: 主题类别串 各类之间用\t隔开,类名按照置信度从高到低排序
举例:“要闻 敏感 诉讼”, “要闻 1.00 敏感 0.95 诉讼 0.82”
"""
return self.get_func("classifier_execFile", [c_char_p, c_int], c_char_p)(filename, out_type)
@NLPIRBase.byte_str_transform
def detail(self, class_name: str):
"""
Call **classifier_detail**
对于当前文档,输入类名,取得结果明细
:param class_name: 结果类名
:return: 结果明细 例如:
::
RULE3:
SUBRULE1: 内幕 1
SUBRULE2: 股市 1 基金 3 股票 8
SUBRULE3: 书摘 2
"""
return self.get_func("classifier_detail", [c_char_p], c_char_p)(class_name)
@NLPIRBase.byte_str_transform
def set_sim_thresh(self, sim: float):
"""
Call **classifier_setsimthresh**
设置阈值
:param sim: 阈值
:return:
"""
return self.get_func("classifier_setsimthresh", [c_float])(sim)
| StarcoderdataPython |
1620791 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================== CONTINUOUS NOTEBOOK TESTS ============================================
#
# These tests are run for all of our notebooks against the current branch. It is assumed that
# notebooks will not install cirq in case cirq is on the path. The simple `import cirq` path is the
# main focus and it is executed in a shared virtual environment for the notebooks. Thus, these
# tests ensure that notebooks are still working with the latest version of cirq.
import functools
import glob
import os
import subprocess
from typing import Set
import pytest
from dev_tools import shell_tools
SKIP_NOTEBOOKS = [
# skipping vendor notebooks as we don't have auth sorted out
"**/aqt/*.ipynb",
"**/ionq/*.ipynb",
"**/google/*.ipynb",
"**/pasqal/*.ipynb",
# skipping fidelity estimation due to
# https://github.com/quantumlib/Cirq/issues/3502
"examples/*fidelity*",
# chemistry.ipynb requires openfermion, that installs cirq 0.9.1, which interferes
# with testing cirq itself...
'docs/tutorials/educators/chemistry.ipynb',
]
def _list_all_notebooks() -> Set[str]:
output = subprocess.check_output(['git', 'ls-files', '*.ipynb'])
return set(output.decode('utf-8').splitlines())
def _tested_notebooks():
"""We list all notebooks here, even those that are not """
all_notebooks = _list_all_notebooks()
skipped_notebooks = functools.reduce(
lambda a, b: a.union(b), list(set(glob.glob(g, recursive=True)) for g in SKIP_NOTEBOOKS)
)
# sorted is important otherwise pytest-xdist will complain that
# the workers have different parametrization:
# https://github.com/pytest-dev/pytest-xdist/issues/432
return sorted(os.path.abspath(n) for n in all_notebooks.difference(skipped_notebooks))
@pytest.mark.slow
@pytest.mark.parametrize("notebook_path", _tested_notebooks())
def test_notebooks_against_released_cirq(notebook_path):
notebook_file = os.path.basename(notebook_path)
notebook_rel_dir = os.path.dirname(os.path.relpath(notebook_path, "."))
out_path = f"out/{notebook_rel_dir}/{notebook_file[:-6]}.out.ipynb"
cmd = f"""mkdir -p out/{notebook_rel_dir}
papermill {notebook_path} {out_path}"""
_, stderr, status = shell_tools.run_shell(
cmd=cmd,
log_run_to_stderr=False,
raise_on_fail=False,
out=shell_tools.TeeCapture(),
err=shell_tools.TeeCapture(),
)
if status != 0:
print(stderr)
pytest.fail(
f"Notebook failure: {notebook_file}, please see {out_path} for the output "
f"notebook (in Github Actions, you can download it from the workflow artifact"
f" 'notebook-outputs')"
)
| StarcoderdataPython |
3292395 | <gh_stars>0
import logging
from typing import Union
from dff.core import Context, Actor
logger = logging.getLogger(__name__)
def multi_response(
replies: list[str],
confidences: Union[list, float] = 0.0,
human_attr: Union[list, dict] = {},
bot_attr: Union[list, dict] = {},
hype_attr: Union[list, dict] = {},
):
assert replies, "Got empty replies"
assert not isinstance(confidences, list) or len(confidences) == len(replies)
assert not isinstance(human_attr, list) or len(human_attr) == len(replies)
assert not isinstance(bot_attr, list) or len(bot_attr) == len(replies)
assert not isinstance(hype_attr, list) or len(hype_attr) == len(replies)
confidences = confidences if isinstance(confidences, list) else [confidences] * len(replies)
human_attr = human_attr if isinstance(human_attr, list) else [human_attr] * len(replies)
bot_attr = bot_attr if isinstance(bot_attr, list) else [bot_attr] * len(replies)
hype_attr = hype_attr if isinstance(hype_attr, list) else [hype_attr] * len(replies)
def multi_response_handler(ctx: Context, actor: Actor, *args, **kwargs) -> list:
return [hyp for hyp in zip(replies, confidences, human_attr, bot_attr, hype_attr)]
return multi_response_handler
| StarcoderdataPython |
41311 | from base import CQPartsTest
from base import testlabel
# units under test
from cqparts_fasteners.fasteners.nutbolt import NutAndBoltFastener
# ---------- Test Assembly ----------
import cadquery
import cqparts
from partslib.basic import Box
from cqparts import constraint
from cqparts.utils import CoordSystem
class FastenedAssembly(cqparts.Assembly):
def make_components(self):
base = Box(length=20, width=20, height=12)
top = Box(length=18, width=18, height=18)
return {
'base': base,
'top': top,
'fastener': NutAndBoltFastener(parts=[base, top]),
}
def make_constraints(self):
base = self.components['base']
top = self.components['top']
fastener = self.components['fastener']
return [
constraint.Fixed(base.mate_bottom),
constraint.Coincident(top.mate_bottom, base.mate_top),
constraint.Coincident(fastener.mate_origin, top.mate_top + CoordSystem((1, 2, 0))),
]
# ---------- Unit Tests ----------
class ScrewFastenerTest(CQPartsTest):
def test_fastener(self):
obj = FastenedAssembly()
bolt = obj.find('fastener.bolt')
nut = obj.find('fastener.nut')
self.assertEquals(bolt.world_coords.origin, cadquery.Vector((1, 2, 30)))
self.assertGreater(
bolt.bounding_box.zlen,
obj.find('top').height + obj.find('base').height
)
self.assertEquals(nut.world_coords.origin, cadquery.Vector((1, 2, 0)))
| StarcoderdataPython |
70393 | from django.core.management.base import BaseCommand, CommandError
from shortener.models import LitresinURL
class Command(BaseCommand):
help = 'Refrehes all LitresinURL shortcodes'
def add_arguments(self, parser):
parser.add_argument('--items', type=int)
def handle(self, *args, **options):
return LitresinURL.objects.refresh_shortcodes(items=options['items']) | StarcoderdataPython |
1672682 | from flask import Flask, redirect, url_for, render_template, request, session, flash
from datetime import timedelta
from json_interpreter import *
from api_caller import *
app = Flask(__name__)
app.secret_key = "supersecretkeyrighthere"
app.permanent_session_lifetime = timedelta(hours=1) # enable if session is permanent below, this just sets time
# Just making it easy to keep track of state values
imperial = "imperial"
metric = "metric"
view_today = 0
view_5_day = 1
view_5_day_graph = 2
# Template must be in a "Template folder in the same dir as py file
@app.route("/")
def home():
return render_template("home.html")
@app.route("/search", methods=["POST", "GET"])
def search_page():
if request.method == "POST":
session.permanent = True
if "zip_code" not in session and "country_name" not in session:
session["unit"] = imperial
session["view"] = view_5_day
print(request.form)
zip_code = request.form["myZip"]
session["zip_code"] = zip_code
country_name = request.form["myCountry"]
session["country_name"] = country_name
return redirect(url_for("weather_home"))
else:
country_name = ""
if "country_name" in session:
country_name = session["country_name"]
zip_code = ""
if "zip_code" in session:
zip_code = session["zip_code"]
return render_template("search.html", zip_code=zip_code, country_name=country_name)
@app.route("/weather", methods=["POST", "GET"])
def weather_home():
# if user hasn't provided location data, redirect to search page until they do
if "country_name" not in session or "zip_code" not in session:
flash("Enter your Zip and Country so we can find out what it's looking like out there", "info")
return redirect(url_for("search_page"))
else:
temp_data = {}
country_name = session["country_name"]
zip_code = session["zip_code"]
unit = session["unit"]
view = session["view"]
interpreter = json_interpreter()
caller = api_caller()
if view == view_5_day:
if "last_update_today" not in session or "forecast_5_day" not in session or can_i_refresh(session["last_update_5_day"]) :
interval_forecasts = interpreter.lazy_pass_in(caller.get_5_day_forecast(country_name, zip_code, unit))
if interval_forecasts is not None:
for key in interval_forecasts.keys():
if interval_forecasts[key]["timestamp_adjusted"].split("@ ")[1] == "12:00:00":
print(interval_forecasts[key]["timestamp"])
temp_data[interval_forecasts[key]["timestamp"]] = interval_forecasts[key]
session["last_update_5_day"] = datetime.datetime.now()
session["forecast_5_day"] = temp_data
else:
flash("Looks like there was some trouble connecting to OpenWeather to fetch forecasts. Make sure your "
+ "API key is up to date, and servers are reachable.")
else:
temp_data = session["forecast_5_day"]
elif view == view_today:
if "forecast_today" not in session or "last_update_today" not in session or can_i_refresh(session["last_update_today"]):
temp_data = interpreter.lazy_pass_in(caller.get_weather_today(country_name, zip_code, unit))
if temp_data is None:
flash("Looks like there was some trouble connecting to OpenWeather to fetch forecasts. Make sure your "
+ "API key is up to date, and servers are reachable.")
else:
session["last_update_today"] = datetime.datetime.now()
session["forecast_today"] = temp_data
else:
temp_data = session["forecast_today"]
else:
if "graph_points" not in session or "last_update_graph" not in session or can_i_refresh(session["last_update_graph"]):
api_return = caller.get_5_day_forecast(country_name, zip_code, unit)
if api_return is None:
flash("Looks like there was some trouble connecting to OpenWeather to fetch forecasts. Make sure "
+ "your API key is up to date, and servers are reachable.")
else:
api_return["graph"] = True
temp_data = interpreter.lazy_pass_in(api_return)
session["last_update_graph"] = datetime.datetime.now()
session["graph_points"] = temp_data
else:
temp_data = session["graph_points"]
# Allow switch between "Today" and "5 Day Forecast"
return render_template("weather.html", unit=unit, temp_data=temp_data, view=view)
@app.route("/toggle_unit")
def toggle_unit():
if "unit" in session:
if session["unit"] == imperial:
session["unit"] = metric
else:
session["unit"] = imperial
return redirect(url_for("weather_home"))
@app.route("/toggle_view", methods=["POST"])
def toggle_view():
if request.method == "POST" and "new_view_id" in request.form:
new_view_id = int(request.form["new_view_id"])
if new_view_id == view_today:
session["view"] = view_today
elif new_view_id == view_5_day:
session["view"] = view_5_day
else:
session["view"] = view_5_day_graph
return redirect(url_for("weather_home"))
@app.route("/contact")
def contact_page():
return redirect("https://github.com/jfarnsworth95")
@app.route("/clear_session")
def clear_session():
session.pop("country_name", None)
session.pop("zip_code", None)
session.pop("unit", None)
session.pop("view", None)
session.pop("can_update", None)
session.pop("last_update_5_day", None)
session.pop("last_update_today", None)
session.pop("last_update_graph", None)
session.pop("forecast_today", None)
session.pop("forecast_5_day", None)
session.pop("graph_points", None)
flash("Your session has been successfully purged... It had a family, you monster.")
return redirect(url_for("home"))
# Just catches any unknown paths to provide a cleaner experience
@app.route("/<unknown>")
def unknown(unknown):
return redirect(url_for("home"))
def can_i_refresh(last_updated):
if last_updated is None:
return True
difference = datetime.datetime.now() - last_updated
if difference.total_seconds() / 60 > 5:
return True
return False
if __name__ == "__main__":
app.run(debug=True) # debug=True will allow code to update once saved without requiring a restart
| StarcoderdataPython |
3291903 | <gh_stars>1-10
# -*- coding: utf-8 -*-
__version__ = "1.0"
__date__ = "09.05.2016"
__author__ = "<NAME>"
from itertools import tee
# conda install mingw libpython
# conda install gensim
from gensim import corpora, models
from gensim.models import Phrases
from sklearn.externals import joblib
import pandas as pd
from pprint import pprint
from time import time
import logging
from time import gmtime, strftime
from sklearn.feature_extraction.text import TfidfTransformer
# from sklearn.grid_search import GridSearchCV
from nltk.tokenize import word_tokenize
import pickle
import cPickle
import datetime
from nltk.stem.porter import *
# from stemming.porter2 import stem
from sklearn.linear_model import SGDClassifier
from sklearn import feature_selection
# pip install treetaggerwrapper
import treetaggerwrapper
from random import random
# from multiprocessing import Process
import string, codecs
from time import sleep
import nltk
import numpy
import scipy.sparse
from pylab import *
from sklearn.pipeline import Pipeline
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
# from multiprocessing import freeze_support
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
# from sklearn.feature_extraction.text import VectorizerMixin
from sklearn.feature_extraction.text import TfidfVectorizer
# from sklearn.feature_extraction.text import CountVectorizer
# from sklearn.ensemble import ExtraTreesClassifier
from sklearn.pipeline import FeatureUnion, Pipeline
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.naive_bayes import MultinomialNB
from textstat.textstat import textstat
# from AuthorProfiling.PAN_2016.working_files.backup_files.getMacroFScore_cross import getGenderFScore
from PreprocessingClass import PreprocessingClass
from sklearn.preprocessing import StandardScaler
# from sklearn.preprocessing import Normalizer
from FeatureClass import FeatureClass
from sklearn.base import BaseEstimator, TransformerMixin
from MainRunner import FinalClassicationClass
import xml.etree.ElementTree as ET
import os
from optparse import OptionParser
import warnings
warnings.filterwarnings("ignore")
def detect_language(path, ):
lang = ''
for file in os.listdir(path):
if file == 'truth.txt' or file == '.DS_Store':
continue
tree = ET.parse(os.path.join(path, file))
root = tree.getroot()
lang = root.get('lang')
break
return lang.strip()
def get_parser():
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-c", "--input", dest="input",
help="path/to/training/corpus")
parser.add_option("-o", "--output", dest="output",
help="path/to/output/directory")
return parser
def main(argv):
parser = get_parser()
tagger_en = treetaggerwrapper.TreeTagger(TAGLANG='en', TAGDIR="C:\\TreeTagger")
tagger_es = treetaggerwrapper.TreeTagger(TAGLANG='es', TAGDIR="C:\\TreeTagger")
tagger_nl = treetaggerwrapper.TreeTagger(TAGLANG='nl', TAGDIR="C:\\TreeTagger")
final_gender_classifier = LinearSVC(tol=1e-4, C = 0.10000000000000001, penalty='l2', class_weight={1:1.0, 2:0.9})
'''
# other classifiers for gender classification
LinearSVC1 = SGDClassifier(n_iter=100, loss="hinge", penalty="l2")
LinearSVC1 = RandomForestClassifier(n_estimators=300, criterion = 'entropy', max_features= 2000)
LinearSVC1 = SVC(C = 0.10000000000000001, gamma = 0.0005, kernel='linear', probability=False, tol=1e-4, shrinking=True, cache_size=2000, class_weight={1:1.0, 2:0.9})
'''
final_age_classifier = OneVsRestClassifier(LogisticRegression(dual=False,multi_class='multinomial', solver='lbfgs'))
preprocessor = PreprocessingClass()
(options, args) = parser.parse_args(argv)
if not (options.input and options.output):
parser.error("Required arguments not provided")
else:
lang = detect_language(options.input)
if lang.lower() not in ['en', 'es', 'nl']:
print >> sys.stderr, 'Language other than en, es, nl'
sys.exit(1)
else:
print "Current Language: ", lang
try:
new_output_path = re.sub("^/cygdrive/c/", "C:/", options.output)
except Exception as e:
print e
new_output_path = options.output
if lang.lower() == "en":
main_classifier = FinalClassicationClass(lang.lower(), options.input, new_output_path, None, 1)
dataset_input = preprocessor.read_all_files(options.input, "Training Set", lang.lower())
X, y_gender, y_age, y_author = preprocessor.split_lists(dataset_input, lang.lower())
# pos tagging and lemmatization
X_list_pos_tags, X_list_lemma = preprocessor.stem_and_pos(X, tagger_en)
# create a dictionary of text_samples, lemmatized_text_samples, pos_tagged_samples and author_ids
pipelined_dictionary = preprocessor.create_pipeline_dict(X, X_list_lemma, X_list_pos_tags, y_author)
main_classifier.dataset_statistics(X, y_gender, y_author, lang.lower(), y_age)
pipeline_gender = main_classifier.make_pipeline_en(final_gender_classifier, "gender_tr", lang.lower())
pipeline_age = main_classifier.make_pipeline_en(final_age_classifier, "age_tr", lang.lower())
main_classifier.train_model(pipelined_dictionary, y_gender, y_author, pipeline_gender, pipeline_age, new_output_path, lang.lower(), y_age)
# print options.input
# print new_output_path
elif lang.lower() == "nl":
main_classifier = FinalClassicationClass(lang.lower(), options.input, new_output_path, None, 1)
dataset_input = preprocessor.read_all_files(options.input, "Training Set", lang.lower())
X, y_gender, y_author = preprocessor.split_lists(dataset_input, lang.lower())
# pos tagging and lemmatization
X_list_pos_tags, X_list_lemma = preprocessor.stem_and_pos(X, tagger_nl)
# create a dictionary of text_samples, lemmatized_text_samples, pos_tagged_samples and author_ids
pipelined_dictionary = preprocessor.create_pipeline_dict(X, X_list_lemma, X_list_pos_tags, y_author)
main_classifier.dataset_statistics(X, y_gender, y_author, lang.lower())
pipeline_gender = main_classifier.make_pipeline_nl(final_gender_classifier, "gender_tr", lang.lower())
pipeline_age = main_classifier.make_pipeline_nl(final_age_classifier, "age_tr", lang.lower())
main_classifier.train_model(pipelined_dictionary, y_gender, y_author, pipeline_gender, pipeline_age, new_output_path, lang.lower())
# print options.input
# print new_output_path
elif lang.lower() == "es":
main_classifier = FinalClassicationClass(lang.lower(), options.input, new_output_path, None, 1)
dataset_input = preprocessor.read_all_files(options.input, "Training Set", lang.lower())
X, y_gender, y_age, y_author = preprocessor.split_lists(dataset_input, lang.lower())
# pos tagging and lemmatization
X_list_pos_tags, X_list_lemma = preprocessor.stem_and_pos(X, tagger_es)
# create a dictionary of text_samples, lemmatized_text_samples, pos_tagged_samples and author_ids
pipelined_dictionary = preprocessor.create_pipeline_dict(X, X_list_lemma, X_list_pos_tags, y_author)
main_classifier.dataset_statistics(X, y_gender, y_author, lang.lower(), y_age)
pipeline_gender = main_classifier.make_pipeline_es(final_gender_classifier, "gender_tr", lang.lower())
pipeline_age = main_classifier.make_pipeline_es(final_age_classifier, "age_tr", lang.lower())
main_classifier.train_model(pipelined_dictionary, y_gender, y_author, pipeline_gender, pipeline_age, new_output_path, lang.lower(), y_age)
# print options.input
# print new_output_path
if __name__ == "__main__":
main(sys.argv)
sys.exit(1) | StarcoderdataPython |
69335 | # -*- coding: utf-8 -*-
from .help import Help
from .welcome import Welcome
from .faq import FAQ
from .events import Events
from .scheduler import Scheduler
from .polls import Polls
from .search import Search
from .newMembers import NewMembers
from .info import Info
from .mentorships import Mentorship
# from .mentionNewMembers import MentionNewMembers
| StarcoderdataPython |
3299109 | # Copyright (C) 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _path_ignoring_repository(f):
if (len(f.owner.workspace_root) == 0):
return f.short_path
return f.path[f.path.find(f.owner.workspace_root)+len(f.owner.workspace_root)+1:]
def _gen_cc_impl(ctx):
protos = [f for dep in ctx.attr.deps for f in dep.proto.direct_sources]
includes = [f for dep in ctx.attr.deps for f in dep.proto.transitive_imports.to_list()]
proto_root = ""
if ctx.label.workspace_root:
proto_root = "/" + ctx.label.workspace_root
out_files = []
out_files += [ctx.actions.declare_file(
proto.basename[:-len(".proto")] + ".ipc.h",
sibling = proto) for proto in protos]
out_files += [ctx.actions.declare_file(
proto.basename[:-len(".proto")] + ".ipc.cc",
sibling = proto) for proto in protos]
dir_out = str(ctx.genfiles_dir.path + proto_root)
arguments = [
"--plugin=protoc-gen-plugin=" + ctx.executable._plugin.path,
"--plugin_out=wrapper_namespace=ipc:" + dir_out
]
for include in includes:
directory = include.path
if directory.startswith("external"):
external_sep = directory.find("/")
repository_sep = directory.find("/", external_sep + 1)
arguments += ["--proto_path=" + directory[:repository_sep]]
else:
arguments += ["--proto_path=."]
arguments += [proto.path for proto in protos]
ctx.actions.run(
inputs = protos + includes,
outputs = out_files,
tools = [ctx.executable._protoc, ctx.executable._plugin],
executable = ctx.executable._protoc,
arguments = arguments,
use_default_shell_env = True,
)
return [
DefaultInfo(files = depset(out_files)),
OutputGroupInfo(
cc = depset([f for f in out_files if f.path.endswith(".cc")]),
h = depset([f for f in out_files if f.path.endswith(".h")]),
),
]
_gen_cc = rule(
attrs = {
"deps": attr.label_list(
mandatory = True,
allow_empty = False,
providers = ["proto"],
),
"_protoc": attr.label(
default = Label("@com_google_protobuf//:protoc"),
executable = True,
cfg = "host",
),
"_plugin": attr.label(
default = Label("@perfetto//:ipc_plugin"),
executable = True,
cfg = "host",
),
},
output_to_genfiles = True,
implementation = _gen_cc_impl,
)
def cc_ipc_library(name, deps, cdeps, **kwargs):
_gen_cc(
name = name + "_src",
deps = deps,
)
native.filegroup(
name = name + "_h",
srcs = [":" + name + "_src"],
output_group = "h",
)
native.cc_library(
name = name,
srcs = [":" + name + "_src"],
hdrs = [":" + name + "_h"],
deps = cdeps,
**kwargs
)
| StarcoderdataPython |
1630818 | <filename>server/client.py
from connections import *
from messages import *
from thread import *
from store import *
import time
class Client(object):
def __init__(self, ip=PUBLIC):
# super(Client, self).__init__t__()
self.ip = ip
self.id = None
self._socket_sync = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket_async = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sync = None
self._async = None
self._messages = []
self._is_host = False
self._tether_callback = lambda *_: None
self._init_info = {}
def register_tether_callback(self, callback):
self._tether_callback = callback
@property
def is_host(self):
return self._is_host
def _join(self, ip, port, timeout):
print 'joining to ip'
conn = self._connect(self._socket_sync, ip, port, timeout)
conn.send(Join(self._init_info))
res = conn.recv().pop(0)
if res.data['success']:
print 'successfully joined'
self._sync = conn
self.id = res.data['id']
self._is_host = res.data['host']
else:
print 'unable to join. closing connection'
conn.close()
def _tether(self, ip, port, timeout):
print 'tethering to ip'
conn = self._connect(self._socket_async, ip, port, timeout)
conn.send(Tether({'id': self.id}))
res = conn.recv().pop(0)
if res.data['success']:
print 'successfully tethered'
start_new_thread(self._listen_tether, (conn, ))
self._async = conn
else:
conn.close()
def _listen_tether(self, connection):
while not connection.closed:
for message in connection.recv():
if message.type == 'action':
# print 'receiving action', message
self.recv_action(message)
def _connect(self, sock, ip, port, timeout):
print 'connecting socket to %r' % ((ip, port), )
sock.settimeout(timeout)
sock.connect((ip, port))
sock.settimeout(None)
conn = Connection(sock, (ip, port))
print 'waiting for response from server'
res = conn.recv().pop(0)
if res.data['success']:
return conn
return None
def connect(self, ip, port=PORT, timeout=TIMEOUT):
self._join(ip, port, timeout)
self._tether(ip, port, timeout)
print 'successfully connected'
def disconnect(self):
if self._sync:
self._sync.close()
if self._async:
self._async.close()
def recv_action(self, action):
if 'success' in action:
return
else:
self._tether_callback(action.data)
def post(self, info_name, info, callback):
post = Post(info_name, info)
self._sync.send(post)
message = self._sync.recv()[0]
return callback(message)
def get(self, info_name, identifier, callback):
get = Get(info_name, identifier)
self._sync.send(get)
message = self._sync.recv()[0]
return callback(message)
def delete(self, info_name, identifier, callback):
delete = Delete(info_name, identifier)
self._sync.send(delete)
message = self._sync.recv()[0]
return callback(message)
def send(self, msg_type, data):
'''
Non blocking. Sends without listening.
Good if you don't care about how your message is handled.
'''
message = Message(msg_type, data)
self._async.send(message)
if __name__ == '__main__':
client = Client()
client.connect(IP, PORT)
time.sleep(1)
client.send_action({'type': 'key_press'})
time.sleep(2) | StarcoderdataPython |
3380512 | <gh_stars>10-100
#_*_coding:utf-8_*_
__author__ = 'jidong'
from django.conf.urls import patterns, include, url
urlpatterns = patterns('iuser.views',
url(r'^user/$', 'user', name='user'),
url(r'^user/list/$', 'user_list', name='user_list'),
url(r'^user/add/$', 'user_add', name='user_add'),
url(r'^user/edit/$', 'user_edit', name='user_edit'),
url(r'^user/delete/$', 'user_delete', name='user_delete'),
url(r'^group/$', 'group', name='group'),
url(r'^group/list/$', 'group_list', name='group_list'),
url(r'^group/add/$', 'group_add', name='group_add'),
url(r'^group/edit/$', 'group_edit', name='group_edit'),
url(r'^group/delete/$', 'group_delete', name='group_delete'),
url(r'^departments/$', 'departments', name='departments'),
url(r'^departments/list/$', 'departments_list', name='departments_list'),
url(r'^departments/add/$', 'departments_add', name='departments_add'),
url(r'^departments/edit/$', 'departments_edit', name='departments_edit'),
url(r'^departments/delete/$', 'departments_delete', name='departments_delete'),
) | StarcoderdataPython |
1735552 | # Copyright (c) 2017 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import importlib
import json
import os
import time
from typing import Callable, Optional, List
import sys
import shutil
from rq import get_current_job
from gtmcore.activity.monitors.devenv import DevEnvMonitorManager
from gtmcore.labbook import LabBook
from gtmcore.inventory.inventory import InventoryManager, InventoryException
from gtmcore.inventory.branching import MergeConflict
from gtmcore.inventory import Repository
from gtmcore.logging import LMLogger
from gtmcore.workflows import ZipExporter, LabbookWorkflow, DatasetWorkflow, MergeOverride
from gtmcore.container.core import (build_docker_image as build_image,
start_labbook_container as start_container,
stop_labbook_container as stop_container)
from gtmcore.dataset.manifest import Manifest
from gtmcore.dataset.io.manager import IOManager
# PLEASE NOTE -- No global variables!
#
# None of the following methods can use global variables.
# ANY use of globals will cause the following methods to fail.
def publish_repository(repository: Repository, username: str, access_token: str,
remote: Optional[str] = None, public: bool = False, id_token: str = None) -> None:
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting publish_repository({str(repository)})")
def update_meta(msg):
job = get_current_job()
if not job:
return
if 'feedback' not in job.meta:
job.meta['feedback'] = msg
else:
job.meta['feedback'] = job.meta['feedback'] + f'\n{msg}'
job.save_meta()
try:
with repository.lock():
if isinstance(repository, LabBook):
wf = LabbookWorkflow(repository)
else:
wf = DatasetWorkflow(repository) # type: ignore
wf.publish(username=username, access_token=access_token, remote=remote or "origin",
public=public, feedback_callback=update_meta, id_token=id_token)
except Exception as e:
logger.exception(f"(Job {p}) Error on publish_repository: {e}")
raise Exception("Could not publish - try to log out and log in again.")
def sync_repository(repository: Repository, username: str, override: MergeOverride,
remote: str = "origin", access_token: str = None,
pull_only: bool = False, id_token: str = None) -> int:
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting sync_repository({str(repository)})")
def update_meta(msg):
job = get_current_job()
if not job:
return
if msg is None or (not msg.strip()):
return
if 'feedback' not in job.meta:
job.meta['feedback'] = msg.strip()
else:
job.meta['feedback'] = job.meta['feedback'] + f'\n{msg.strip()}'
job.save_meta()
try:
with repository.lock():
if isinstance(repository, LabBook):
wf = LabbookWorkflow(repository)
else:
wf = DatasetWorkflow(repository) # type: ignore
cnt = wf.sync(username=username, remote=remote, override=override,
feedback_callback=update_meta, access_token=access_token,
id_token=id_token, pull_only=pull_only)
logger.info(f"(Job {p} Completed sync_repository with cnt={cnt}")
return cnt
except MergeConflict as me:
logger.exception(f"(Job {p}) Merge conflict: {me}")
raise
except Exception as e:
logger.exception(f"(Job {p}) Error on sync_repository: {e}")
raise Exception("Could not sync - try to log out and log in again.")
def import_labbook_from_remote(remote_url: str, username: str, config_file: str = None) -> str:
"""Return the root directory of the newly imported Project"""
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting import_labbook_from_remote({remote_url}, {username})")
def update_meta(msg):
job = get_current_job()
if not job:
return
if 'feedback' not in job.meta:
job.meta['feedback'] = msg
else:
job.meta['feedback'] = job.meta['feedback'] + f'\n{msg}'
job.save_meta()
try:
toks = remote_url.split("/")
if len(toks) > 1:
proj_path = f'{toks[-2]}/{toks[-1].replace(".git", "")}'
else:
proj_path = remote_url
update_meta(f"Importing Project from {proj_path!r}...")
wf = LabbookWorkflow.import_from_remote(remote_url, username, config_file)
update_meta(f"Imported Project {wf.labbook.name}!")
return wf.labbook.root_dir
except Exception as e:
update_meta(f"Could not import Project from {remote_url}.")
logger.exception(f"(Job {p}) Error on import_labbook_from_remote: {e}")
raise
def export_labbook_as_zip(labbook_path: str, lb_export_directory: str) -> str:
"""Return path to archive file of exported labbook. """
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting export_labbook_as_zip({labbook_path})")
try:
lb = InventoryManager().load_labbook_from_directory(labbook_path)
with lb.lock():
path = ZipExporter.export_labbook(lb.root_dir, lb_export_directory)
return path
except Exception as e:
logger.exception(f"(Job {p}) Error on export_labbook_as_zip: {e}")
raise
def export_dataset_as_zip(dataset_path: str, ds_export_directory: str) -> str:
"""Return path to archive file of exported dataset. """
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting export_dataset_as_zip({dataset_path})")
try:
ds = InventoryManager().load_dataset_from_directory(dataset_path)
with ds.lock():
path = ZipExporter.export_dataset(ds.root_dir, ds_export_directory)
return path
except Exception as e:
logger.exception(f"(Job {p}) Error on export_dataset_as_zip: {e}")
raise
def import_labboook_from_zip(archive_path: str, username: str, owner: str,
config_file: Optional[str] = None) -> str:
"""Method to import a labbook from a zip file
Args:
archive_path(str): Path to the uploaded zip
username(str): Username
owner(str): Owner username
config_file(str): Optional path to a labmanager config file
Returns:
str: directory path of imported labbook
"""
def update_meta(msg):
job = get_current_job()
if not job:
return
job.meta['feedback'] = msg
job.save_meta()
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting import_labbook_from_zip(archive_path={archive_path},"
f"username={username}, owner={owner}, config_file={config_file})")
try:
lb = ZipExporter.import_labbook(archive_path, username, owner,
config_file=config_file,
update_meta=update_meta)
return lb.root_dir
except Exception as e:
logger.exception(f"(Job {p}) Error on import_labbook_from_zip({archive_path}): {e}")
raise
finally:
if os.path.exists(archive_path):
os.remove(archive_path)
def import_dataset_from_zip(archive_path: str, username: str, owner: str,
config_file: Optional[str] = None) -> str:
"""Method to import a dataset from a zip file
Args:
archive_path(str): Path to the uploaded zip
username(str): Username
owner(str): Owner username
config_file(str): Optional path to a labmanager config file
Returns:
str: directory path of imported labbook
"""
def update_meta(msg):
job = get_current_job()
if not job:
return
job.meta['feedback'] = msg
job.save_meta()
p = os.getpid()
logger = LMLogger.get_logger()
logger.info(f"(Job {p}) Starting import_dataset_from_zip(archive_path={archive_path},"
f"username={username}, owner={owner}, config_file={config_file})")
try:
lb = ZipExporter.import_dataset(archive_path, username, owner,
config_file=config_file,
update_meta=update_meta)
return lb.root_dir
except Exception as e:
logger.exception(f"(Job {p}) Error on import_dataset_from_zip({archive_path}): {e}")
raise
finally:
if os.path.exists(archive_path):
os.remove(archive_path)
def build_labbook_image(path: str, username: str,
tag: Optional[str] = None, nocache: bool = False) -> str:
"""Return a docker image ID of given LabBook.
Args:
path: Pass-through arg to labbook root.
username: Username of active user.
tag: Pass-through arg to tag of docker image.
nocache(bool): Pass-through arg to docker build.
Returns:
Docker image ID
"""
logger = LMLogger.get_logger()
logger.info(f"Starting build_labbook_image({path}, {username}, {tag}, {nocache}) in pid {os.getpid()}")
try:
job = get_current_job()
if job:
job.meta['pid'] = os.getpid()
job.save_meta()
def save_metadata_callback(line: str) -> None:
try:
if not line:
return
job.meta['feedback'] = (job.meta.get('feedback') or '') + line + '\n'
job.save_meta()
except Exception as e:
logger.error(e)
image_id = build_image(path, override_image_tag=tag, nocache=nocache, username=username,
feedback_callback=save_metadata_callback)
logger.info(f"Completed build_labbook_image in pid {os.getpid()}: {image_id}")
return image_id
except Exception as e:
logger.error(f"Error on build_labbook_image in pid {os.getpid()}: {e}")
raise
def start_labbook_container(root: str, config_path: str, username: str,
override_image_id: Optional[str] = None) -> str:
"""Return the ID of the LabBook Docker container ID.
Args:
root: Root directory of labbook
config_path: Path to config file (labbook.client_config.config_file)
username: Username of active user
override_image_id: Force using this name of docker image (do not infer)
Returns:
Docker container ID
"""
logger = LMLogger.get_logger()
logger.info(f"Starting start_labbook_container(root={root}, config_path={config_path}, username={username}, "
f"override_image_id={override_image_id}) in pid {os.getpid()}")
try:
c_id = start_container(labbook_root=root, config_path=config_path,
override_image_id=override_image_id, username=username)
logger.info(f"Completed start_labbook_container in pid {os.getpid()}: {c_id}")
return c_id
except Exception as e:
logger.error("Error on launch_docker_container in pid {}: {}".format(os.getpid(), e))
raise
def stop_labbook_container(container_id: str) -> int:
"""Return a dictionary of metadata pertaining to the given task's Redis key.
TODO - Take labbook as argument rather than image tag.
Args:
container_id(str): Container to stop
Returns:
0 to indicate no failure
"""
logger = LMLogger.get_logger()
logger.info(f"Starting stop_labbook_container({container_id}) in pid {os.getpid()}")
try:
stop_container(container_id)
return 0
except Exception as e:
logger.error("Error on stop_labbook_container in pid {}: {}".format(os.getpid(), e))
raise
def run_dev_env_monitor(dev_env_name, key) -> int:
"""Run method to check if new Activity Monitors for a given dev env need to be started/stopped
Args:
dev_env_name(str): Name of the dev env to monitor
key(str): The unique string used as the key in redis to track this DevEnvMonitor instance
Returns:
0 to indicate no failure
"""
logger = LMLogger.get_logger()
logger.debug("Checking Dev Env `{}` for activity monitors in PID {}".format(dev_env_name, os.getpid()))
try:
demm = DevEnvMonitorManager()
dev_env = demm.get_monitor_instance(dev_env_name)
if not dev_env:
raise ValueError('dev_env is None')
dev_env.run(key)
return 0
except Exception as e:
logger.error("Error on run_dev_env_monitor in pid {}: {}".format(os.getpid(), e))
raise e
def start_and_run_activity_monitor(module_name, class_name, user, owner, labbook_name, monitor_key, author_name,
author_email, session_metadata):
"""Run method to run the activity monitor. It is a long running job.
Args:
Returns:
0 to indicate no failure
"""
logger = LMLogger.get_logger()
logger.info("Starting Activity Monitor `{}` in PID {}".format(class_name, os.getpid()))
try:
# Import the monitor class
m = importlib.import_module(module_name)
# get the class
monitor_cls = getattr(m, class_name)
# Instantiate monitor class
monitor = monitor_cls(user, owner, labbook_name, monitor_key,
author_name=author_name, author_email=author_email)
# Start the monitor
monitor.start(session_metadata)
return 0
except Exception as e:
logger.error("Error on start_and_run_activity_monitor in pid {}: {}".format(os.getpid(), e))
raise e
def index_labbook_filesystem():
"""To be implemented later. """
raise NotImplemented
def test_exit_success():
"""Used only for testing -- vacuous method to always succeed and return 0. """
return 0
def test_exit_fail():
"""Used only for testing -- always throws an exception"""
raise Exception("Intentional Exception from job `test_exit_fail`")
def test_sleep(n):
"""Used only for testing -- example method with argument. """
logger = LMLogger.get_logger()
logger.info("Starting test_sleep({}) in pid {}".format(n, os.getpid()))
try:
job = get_current_job()
job.meta['sample'] = 'test_sleep metadata'
job.meta['pid'] = int(os.getpid())
job.save_meta()
time.sleep(n)
logger.info("Completed test_sleep in pid {}".format(os.getpid()))
return 0
except Exception as e:
logger.error("Error on test_sleep in pid {}: {}".format(os.getpid(), e))
raise
def test_incr(path):
logger = LMLogger.get_logger()
logger.info("Starting test_incr({}) in pid {}".format(path, os.getpid()))
try:
amt = 1
if not os.path.exists(path):
logger.info("Creating {}".format(path))
with open(path, 'w') as fp:
json.dump({'amt': amt}, fp)
else:
logger.info("Loading {}".format(path))
with open(path, 'r') as fp:
amt_dict = json.load(fp)
logger.info("Amt = {}")
with open(path, 'w') as fp:
amt_dict['amt'] = amt_dict['amt'] + 1
json.dump(amt_dict, fp)
logger.info("Set amt = {} in {}".format(amt_dict['amt'], path))
except Exception as e:
logger.error("Error on test_incr in pid {}: {}".format(os.getpid(), e))
raise
def download_dataset_files(logged_in_username: str, access_token: str, id_token: str,
dataset_owner: str, dataset_name: str,
labbook_owner: Optional[str] = None, labbook_name: Optional[str] = None,
all_keys: Optional[bool] = False, keys: Optional[List[str]] = None):
"""Method to import a dataset from a zip file
Args:
logged_in_username: username for the currently logged in user
access_token: bearer token
id_token: identity token
dataset_owner: Owner of the dataset containing the files to download
dataset_name: Name of the dataset containing the files to download
labbook_owner: Owner of the labbook if this dataset is linked
labbook_name: Name of the labbook if this dataset is linked
all_keys: Boolean indicating if all remaining files should be downloaded
keys: List if file keys to download
Returns:
str: directory path of imported labbook
"""
def update_meta(msg):
job = get_current_job()
if not job:
return
if 'feedback' not in job.meta:
job.meta['feedback'] = msg
else:
job.meta['feedback'] = job.meta['feedback'] + f'\n{msg}'
job.save_meta()
logger = LMLogger.get_logger()
try:
p = os.getpid()
logger.info(f"(Job {p}) Starting download_dataset_files(logged_in_username={logged_in_username},"
f"dataset_owner={dataset_owner}, dataset_name={dataset_name}, labbook_owner={labbook_owner},"
f" labbook_name={labbook_name}, all_keys={all_keys}, keys={keys}")
im = InventoryManager()
if labbook_owner is not None and labbook_name is not None:
# This is a linked dataset, load repo from the Project
lb = im.load_labbook(logged_in_username, labbook_owner, labbook_name)
dataset_dir = os.path.join(lb.root_dir, '.gigantum', 'datasets', dataset_owner, dataset_name)
ds = im.load_dataset_from_directory(dataset_dir)
else:
# this is a normal dataset. Load repo from working dir
ds = im.load_dataset(logged_in_username, dataset_owner, dataset_name)
ds.namespace = dataset_owner
ds.backend.set_default_configuration(logged_in_username, access_token, id_token)
m = Manifest(ds, logged_in_username)
iom = IOManager(ds, m)
if all_keys:
result = iom.pull_all(status_update_fn=update_meta)
elif keys:
result = iom.pull_objects(keys=keys, status_update_fn=update_meta)
else:
raise ValueError("Must provide a list of keys or set all_keys=True")
# Save the Relay node IDs to the job metadata so the UI can re-fetch as needed
job = get_current_job()
if job:
job.meta['success_keys'] = [x.dataset_path for x in result.success]
job.meta['failure_keys'] = [x.dataset_path for x in result.failure]
job.save_meta()
if len(result.failure) > 0:
# If any downloads failed, exit non-zero to the UI knows there was an error
sys.exit(-1)
except Exception as err:
logger.exception(err)
raise
def clean_dataset_file_cache(logged_in_username: str, dataset_owner: str, dataset_name: str,
cache_location: str, config_file: str = None) -> None:
"""Method to import a dataset from a zip file
Args:
logged_in_username: username for the currently logged in user
dataset_owner: Owner of the labbook if this dataset is linked
dataset_name: Name of the labbook if this dataset is linked
cache_location: Absolute path to the file cache (inside the container) for this dataset
config_file:
Returns:
None
"""
logger = LMLogger.get_logger()
p = os.getpid()
try:
logger.info(f"(Job {p}) Starting clean_dataset_file_cache(logged_in_username={logged_in_username},"
f"dataset_owner={dataset_owner}, dataset_name={dataset_name}")
im = InventoryManager(config_file=config_file)
# Check for dataset
try:
im.load_dataset(logged_in_username, dataset_owner, dataset_name)
logger.info(f"{logged_in_username}/{dataset_owner}/{dataset_name} still exists. Skipping file cache clean.")
return
except InventoryException:
# Dataset not found, move along
pass
# Check for submodule references
for lb in im.list_labbooks(logged_in_username):
submodules = lb.git.list_submodules()
for submodule in submodules:
submodule_dataset_owner, submodule_dataset_name = submodule['name'].split("&")
if submodule_dataset_owner == dataset_owner and submodule_dataset_name == dataset_name:
logger.info(f"{logged_in_username}/{dataset_owner}/{dataset_name} still referenced by {str(lb)}."
f" Skipping file cache clean.")
return
# If you get here the dataset no longer exists and is not used by any projects, clear files
shutil.rmtree(cache_location)
except Exception as err:
logger.error(f"(Job {p}) Error in clean_dataset_file_cache job")
logger.exception(err)
raise
| StarcoderdataPython |
4810240 | <reponame>jhoblitt/ltd-keeper
"""Lightweight library of Fastly API interactions needed by LTD Keeper.
See https://docs.fastly.com/api/ for more information about the Fastly API.
"""
import logging
import requests
from .exceptions import FastlyError
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class FastlyService(object):
"""API client for a Fastly service.
Parameters
----------
service_id : str
The Fastly service ID.
api_key : str
The Fastly API key. We only support key-based authentication.
"""
def __init__(self, service_id, api_key):
super(FastlyService, self).__init__()
self.service_id = service_id
self.api_key = api_key
self._api_root = 'https://api.fastly.com'
def _url(self, path):
return self._api_root + path
def purge_key(self, surrogate_key):
"""Instant purge URLs with a given `surrogate_key`.
See
https://docs.fastly.com/api/purge#purge_077dfb4aa07f49792b13c87647415537
for more information.
"""
path = '/service/{service}/purge/{surrogate_key}'.format(
service=self.service_id, surrogate_key=surrogate_key)
log.info('Fastly purge {0}'.format(path))
r = requests.post(self._url(path),
headers={'Fastly-Key': self.api_key,
'Accept': 'application/json'})
if r.status_code != 200:
raise FastlyError(r.json)
| StarcoderdataPython |
149213 | #
# Copyright (c) 2013 - 2017, 2019 Software AG, Darmstadt, Germany and/or its licensors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from xpybuild.propertysupport import *
from xpybuild.buildcommon import *
from xpybuild.pathsets import *
from xpybuild.targets.native import *
from xpybuild.targets.copy import Copy
from xpybuild.utils.compilers import GCC, VisualStudio
include(os.environ['PYSYS_TEST_ROOT_DIR']+'/build_utilities/native_config.xpybuild.py')
setGlobalOption('native.include.upToDateCheckIgnoreRegex', '(c:/program files.*|.*/tESt4.h)' if IS_WINDOWS else '.*/test4.h')
setGlobalOption('native.include.upToDateCheckIgnoreSystemHeaders', True) # only works on linux/gcc currently
Copy('${OUTPUT_DIR}/my-generated-include-files/', FindPaths('./include-src/'))
Copy('${OUTPUT_DIR}/my-generated-include-files2/generatedpath/test3.h', FindPaths('./include-src/generatedpath/'))
Copy('${OUTPUT_DIR}/test-generated.cpp', './test.cpp')
Cpp(objectname('${OUTPUT_DIR}/no-target-deps'), './test.cpp',
includes=[
"./include/",
'./include-src/',
]
)
Cpp(objectname('${OUTPUT_DIR}/target-cpp-and-include-dir'), '${OUTPUT_DIR}/test-generated.cpp',
includes=[
"./include/",
'${OUTPUT_DIR}/my-generated-include-files/', # a target
]
)
Cpp(objectname('${OUTPUT_DIR}/target-cpp'), '${OUTPUT_DIR}/test-generated.cpp',
includes=[
"./include/",
'./include-src/',
]
)
Cpp(objectname('${OUTPUT_DIR}/target-include-dir'), './test.cpp',
includes=[
"./include/",
'${OUTPUT_DIR}/my-generated-include-files/', # a target
]
)
# generated include files in non-target directories are no longer supported
Cpp(objectname('${OUTPUT_DIR}/target-include-file'), './test.cpp',
includes=[
"./include/",
TargetsWithinDir('${OUTPUT_DIR}/my-generated-include-files2/'), # NOT a target, but contains one
]
)
| StarcoderdataPython |
1694266 | import random
from timezones.forms import TIMEZONE_CHOICES
from django.contrib.auth.models import User
names = """<NAME> <NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>annah
<NAME>
<NAME>
<NAME> <NAME>
<NAME>
"""
surnames = """<NAME> <NAME>
<NAME> <NAME>
<NAME> <NAME>
<NAME> <NAME> <NAME>
<NAME>
<NAME> <NAME>
<NAME>
<NAME> <NAME>
Myers Long Foster Sanders Ro<NAME> <NAME>
<NAME>
"""
names = names.split()
random.shuffle(names)
surnames = surnames.split()
random.shuffle(surnames)
def generate():
for name, surname in zip(names, surnames):
username = '%s.%s' % (name.lower(), surname.lower())
u = User.objects.create(
username=username,
first_name=name,
last_name=surname,
is_active=True,
is_superuser=False,
is_staff=False,
email='<EMAIL>' % (username,),
password='<PASSWORD>' #password=<PASSWORD>
)
print "Created User %s" % unicode(u)
if __name__ == "__main__":
generate() | StarcoderdataPython |
1620839 | # -- coding: utf-8 --
# Copyright 2018 <NAME> <<EMAIL>>
"""
Module to handle block type used in iontof file formats ITA,ITM,ITS, etc...
"""
import sys
import binascii
import struct
import os
class MissingBlock(Exception):
def __init__(self, parent, name, index):
self.block_name = parent.parent+'/'+name
self.index = index
def __str__(self):
return "Missing block \"{name}\" with index {index}".format(name=self.block_name,index=self.index)
class Block:
"""
Class to handle a iontof-Block
One iontof file ITA,ITM,ITS contains a lot of Blocks forming a hierarchical structure.
Each Block can have children (sub-Blocks) and values (data).
Note: This class was created by reverse engineering on the fileformat of iontof and is most probably not 100% accurate.
Nevertheless is works in very good agreement with the developer's data.
"""
def __init__(self, fp, parent=''):
"""
Init the class
fp: file pointer (the one created by open(...) of an ITA,ITM,ITS, etc... file pointing at the beginning of a block
Each block start with one byte of type followed by 4 bytes that should always be \x19\x00\x00\x00 (all those 5 bytes are saved in self.Type)
Note: the value \x19\x00\x00\x00 is the unit32 for 25 which is the pre-header length of the block.
Then follows 5 uint32: length, z, u ,x ,y
length: The length of the block's name
z: Block ID. Start at 0 and is increased monotonically for each blocks of the same name with the same parent. We usually find the ID from the children's list (see below) and this information is never used as it's redundant.
u: The number of children / sub-blocks. Might be = 0 even if the block has children. Check the value L (defined below) if so
x: The length of the block's value
y: Redundant. Seems to be always = x
Then follow length-bytes representing the name of the block
Then follow x-bytes forming the value of the block
Blocks of types \x01\x19\x00\x00\x00 and \x03\x19\x00\x00\x00 are blocks that contains sub-blocks. There is no big difference between the two. I guess that types \x01 is the first one and type \x03 are the continuation blocks
Those block have a value which starts with 41-bytes.
2 uint32 -> (length, nums).
length: We actually don't need it. It's a redundant information. That is the length of the sub-headers. (It stop just before the sub-blocks names)
nums: The variable u (see above) contains the number of children. If u ==0, then nums will tell the correct number of children
5 bytes: type (usually 00 00 00 00 00 or 03 19 00 00 00)
5 uint32 -> a,b,L,d,e
a,b,d,e are unknown
L seems to give information on the number of children
1 uint64 -> NextBlock
Big blocks can be chunked in several ones. NextBlock tells the position in the file of the next chunk. If = 0, then it's the last chunk
Then 33 bytes for each sub-block follows:
1 byte: spacing (usually = 0 or 1)
3 uint32 -> index, slen, id
index: The position of the sub-block name in the header
slen: The length of the sub-block name (which is store later). So basically the sub-block name is: Block.value[index:index+slen]
id: start at 0 and increase monotonically for sub-blocks having the same name
4 unknown padding bytes
2 uint64 -> blen, bidx
blen: Block length
bidx: Position of the Block in the file
All the names of all sub-blocks follows (concatenated). You need to use their name length (see slen above) in order to chunk them properly
You should then go to the position NextBlock in the file and read the block their. It's the continuation of the current one.
Blocks of types \x00\x19\x00\x00\s00 have no children and only a value, usually formed by a string (UTF-16), an int or a float.
The type of the value was not discovered to be written in the Block. The user should deduce it depending on the block's name and location.
Blocks of type \x80\x19\x00\x00\x00 have it's values compressed with zlib (notice the \x78\x5e at the beginning which is typical for zlib encoded data)
Once decompressed it usually store an array in binary.
"""
self.f = fp
self.parent = parent
self.offset = self.f.tell()
self.Type = self.f.read(5)
if self.Type[1:] != b'\x19\x00\x00\x00':
raise ValueError('Wrong block type ({Type}) found @{pos}'\
.format(pos=self.offset, Type=binascii.hexlify(self.Type[1:])))
if len(self.Type) < 5:
raise ValueError('EOF reached. Block cannot be read')
self.head = dict(zip(['name_length', 'ID', 'N', 'length1', 'length2'], \
struct.unpack('<5I', self.f.read(20))))
self.name = self.f.read(self.head['name_length']).decode('ascii')
self.value = self.f.read(self.head['length1'])
self.List = None
self.iterP = 0
def BreadthFirstSearch(self, callback=None, filter=lambda x: True, func=lambda x: x):
res = []
if filter(self):
res += [func(self)]
if callback is not None:
callback(self)
if self.Type[0] in [1,3]:
for x in self:
res += x.BreadthFirstSearch(callback=callback, filter=filter, func=func)
return res
def getName(self):
"""
Return the name of the Block
"""
return self.name
def getList(self):
"""
Return the list of the sub-blocks (children) of the current Block.
"""
if not self.Type[0:1] in [b'\x01', b'\x03']:
return []
if self.List is None:
return self.createList()
return self.List
def gotoFollowingBlock(self):
offset = self.offset+25+self.head['name_length']+self.head['length1']
if offset < os.fstat(self.f.fileno()).st_size:
self.f.seek(offset)
return Block(self.f, parent=None)
return None
def gotoNextBlock(self):
offset = self.offset
self.f.seek(offset)
head = dict(zip(['name_length', 'ID', 'N', 'length1', 'length2'], struct.unpack('<5x5I', self.f.read(25))))
name = self.f.read(head['name_length'])
length, nums, NextBlock = struct.unpack('<II25xQ', self.f.read(41))
self.f.seek(NextBlock)
return Block(self.f, parent=self.parent)
def createList(self, limit=None, debug=False):
"""
Generate a list (self.List) containing all the children (sub-blocks) of the current Block
"""
length, nums, NextBlock = struct.unpack('<II25xQ', self.value[:41])
self.nums = nums
offset = self.offset
self.List = []
while True:
self.f.seek(offset)
head = dict(zip(['name_length', 'ID', 'N', 'length1', 'length2'], \
struct.unpack('<5x5I', self.f.read(25))))
name = self.f.read(head['name_length'])
data = self.f.tell()
length, nums, NextBlock = \
struct.unpack('<II25xQ', self.f.read(41))
N = head['N']
## The following is commented as believed to be erroneous
#if N == 0:
# N = nums
for i in range(N):
self.f.seek(data+42+33*i)
S = dict(\
zip(['index', 'slen', 'id', 'blen', 'bidx'],\
struct.unpack('<III4xQQ', self.f.read(32))))
self.f.seek(data+S['index'])
S['name'] = self.f.read(S['slen']).decode('ascii')
self.List.append(S)
if NextBlock == 0:
break
offset = NextBlock
return self.List
def getString(self):
"""
Decode the value of the Block to UTF-16 (standard for all strings in this fileformat)
"""
return self.value.decode('utf16')
def dictList(self):
"""
Return a dictionary of the value decoded with various formats (raw, long, float, utf16)
As the type of the data is not known, this function is very helpful for debugging purpose
"""
d = {}
for i, l in enumerate(self.getList()):
self.f.seek(l['bidx'])
child = Block(self.f, parent=[self.parent+'/'+self.name,'/'][self.parent==''])
if child.Type[0:1] == b'\x00':
value = binascii.hexlify(child.value)
d[child.name] = {'raw':value}
if len(child.value) == 4:
d[child.name]['long'] = child.getLong()
elif len(child.value) == 8:
d[child.name]['float'] = child.getDouble()
d[child.name]['long'] = child.getLongLong()
if len(child.value)%2 == 0:
d[child.name]['utf16'] = child.value.decode('utf16', "ignore")
del child
return d
def showList(self):
"""
Show a list of all the children (sub-blocks) of the current Block.
It will also display the value/data of all the children (if any)
"""
print('List of', len(self.getList()))
for i, l in enumerate(self.List):
self.f.seek(l['bidx'])
other = ''
try:
child = Block(self.f, parent=[self.parent+'/'+self.name,'/'][self.parent==''])
if child.Type[0:1] == b'\x00':
if len(child.value) == 4:
vL = child.getLong()
Dtype = 'long'
elif len(child.value) == 8:
vL = child.getDouble()
Dtype = 'double'
other += ' = '+str(child.getLongLong())+" (long64)"
elif len(child.value) == 2:
vL = child.getShort()
Dtype = 'short'
elif len(child.value) == 1:
vL = child.getByte()
Dtype = 'byte'
else:
vL = '???'
Dtype = '???'
value = binascii.hexlify(child.value)
if len(value) > 16:
value = value[:16]+b'...'
if len(child.value)%2 == 0:
vS = child.value.decode('utf16', "ignore")
if len(vS) > 20:
vS = vS[:20]+'...'
print(u"{name} ({id}) <{blen}> @{bidx}, value = {value} (hex) = \"{vS}\" (UTF-16)= {vL} ({Dtype}){other}"\
.format(value=value, vL=vL, Dtype=Dtype, other=other, vS=vS, **l))
else:
print(u"{name} ({id}) <{blen}> @{bidx}, value = {value} (hex) = {vL} ({Dtype}){other}"\
.format(value=value, vL=vL, Dtype=Dtype, other=other, **l))
else:
print("{name} ({id}) [{T}] <{blen}> @{bidx}".format(T=child.Type[0], **l))
del child
except ValueError:
pass
def __iter__(self):
"""
Return an iterator over all the children of the current block)
"""
self.pointer = 0
return self
def __next__(self):
L = self.getList()
if self.pointer >= len(L):
raise StopIteration
it = L[self.pointer]
self.pointer += 1
return self.gotoItem(it['name'], it['id'])
def gotoItem(self, name, idx=0, lazy=False):
"""
Return a new Block instance of a child of the current Block
name: name of the children's block
"""
Idx = self.getIndex(name, idx, lazy=lazy)
self.f.seek(Idx)
return Block(self.f, parent=[self.parent+'/'+self.name,'/'][self.parent==''])
def getIndex(self, name, idx=0, lazy=False):
"""
Get the index of the children having a name=name.
This function is more intended for internal usage.
You are encouraged to use the function _goto_ instead
If more than one children have the same name,
the second one can by retrieved by idx=1, the third with idx=2, etc.
Sometimes the id does not start with 0, but with random high values.
Instead of looking at the correct id, you can sue lazy=True with idx=0 in order to fetch the first one saved.
"""
if type(name) is bytes:
name = name.decode('ascii')
i=0
for l in self.getList():
if l['name'] == name:
if (lazy and i==idx) or (not lazy and l['id'] == idx):
return l['bidx']
i+=1
raise MissingBlock(self,name,idx)
def goto(self, path, lazy=False):
"""
Return a sub Block having a specific path
path: path is similar to filepath.
The block X contained in B which is itself contained in A,
can be retrieved with the path: A/B/X
if the block B has several children having the same name,
A/B/X[n] will return the n-th child (note that 0 is the first child)$
As the id sometimes start at weird values and we just want the first ones
saved whatever its id is, we can use the lazy=True argument.
"""
if path == '':
return self
s = self
for p in path.split('/'):
idx = 0
if '[' in p and p[-1] == ']':
i = p.index('[')
idx = int(p[i+1:-1])
p = p[:i]
s = s.gotoItem(p, idx, lazy=lazy)
return s
def getLongLong(self):
"""
Decode the value as an 64-Integer
"""
return struct.unpack('<q', self.value)[0]
def getDouble(self):
"""
Decode the value as a 64-float (Double)
"""
return struct.unpack('<d', self.value)[0]
def getShort(self):
"""
Decode the value as an 16-Integer (Short)
"""
return struct.unpack('<h', self.value)[0]
def getByte(self):
"""
Decode the value as a 1-Byte
"""
return struct.unpack('<B', self.value)[0]
def getULong(self):
"""
Decode the value as an unsigned 32-Integer (Long)
"""
return struct.unpack('<I', self.value)[0]
def getLong(self):
"""
Decode the value as an 32-Integer (Long)
"""
return struct.unpack('<i', self.value)[0]
def unpickle(self):
import pickle
return pickle.loads(self.value)
def getKeyValue(self, offset=16):
"""
Return a dictionnary of key/values pairs of the data
Note that the function has no idea if the data are stored as so.
"""
L = struct.unpack("<I", self.value[offset:offset+4])[0]
Key = self.value[offset+4:offset+4+L].decode('utf16', 'ignore')
int_value, float_value = struct.unpack("<2xqd", self.value[offset+4+L:offset+22+L])
L2 = struct.unpack("<I", self.value[offset+22+L:offset+26+L])[0]
SVal = self.value[offset+26+L:offset+26+L+L2].decode('utf16', 'ignore')
return {'key':Key, 'float':float_value, 'int':int_value,'string':SVal}
def show(self, maxlevel=3, level=0, All=False, out=sys.stdout, digraph=False, parent=None, ex=None):
"""
Display the children of the current Block (recursively if maxlevel > 1)
Very useful for debugging purpose and looking for the path of valuable data.
out: file instance to write the results (default terminal)
digraph: if True return a digraph (http://www.graphviz.org/) representation of the children
level: internal variable used to call the function recursively.
ex: execute function
"""
if not ex is None:
ex(self)
if parent == None:
parent = self.name
if digraph and level == 0:
out.write('digraph {{\n graph [nodesep=.1 rankdir=LR size="10,120"]\n'.format(root=parent))
for l in self.getList():
if l['id'] == 0 or All:
if digraph:
out.write('"{parent}-{name}" [label="{name}"]\n"{parent}" -> "{parent}-{name}"\n'\
.format(parent=parent, name=l['name'].decode('utf8')))
else:
if ex is None:
out.write("{tab}{name} ({id}) @{bidx}\n".format(tab="\t"*level, **l))
if level < maxlevel:
try:
self.gotoItem(l['name'], l['id'])\
.show(maxlevel, level+1, All=All, out=out, digraph=digraph\
, parent=parent+'-'+l['name'], ex=ex)
except:
pass
if digraph and level == 0:
out.write('}')
def getIndexes(self, key, debug=False):
if type(key) is str:
key = key.encode('utf8')
r = []
for x in self.getList():
if debug:
print(x['name'],key)
if x['name'] == key:
r.append(x['id'])
return r
def modify_block_and_export(self, path, new_data, output, debug=False, prog=False, lazy=False):
assert not os.path.exists(output) # Avoid to erase an existing file. Erase it outside the library if needed.
out = open(output,'wb')
out.write(b'ITStrF01')
block = self.goto(path, lazy=lazy)
block_offset = block.offset
length_diff = len(new_data)-len(block.value)
self.f.seek(8)
FILE_SIZE = os.fstat(self.f.fileno()).st_size
if prog:
try:
from tqdm import tqdm_notebook as tqdm
except:
from tqdm import tqdm as tqdm
T = tqdm(total=FILE_SIZE)
debug_msg = ["FileSize: "+str(FILE_SIZE)]
if debug:
print("File Size",FILE_SIZE)
curr = 8
if prog:
T.update(8)
while self.f.tell() < FILE_SIZE:
debug_msg = debug_msg[-30:]
if prog:
ncurr = self.f.tell()
T.update(ncurr-curr)
curr = ncurr
debug_msg.append("Current position: @"+str(self.f.tell()))
try:
current = Block(self.f) # Here we don't care about the parent argument. It is used only for debug purpose anyway.
except Exception as ex:
print("Error found! Debug info")
for x in debug_msg:
print("\t"+x)
raise ex
self.f.seek(current.offset)
curr_block_length = current.head['length1'] + current.head['name_length'] + 25
debug_msg.append('Block Name: "{}" / length: {}'.format(current.name, curr_block_length))
if current.offset == block_offset: # Found the block to change
debug_msg.append("Block to change FOUND!")
out.write(self.f.read(5)) # Write block type
out.write(struct.pack("<5I",block.head['name_length'],block.head['ID'],block.head['N'], length_diff+block.head['length1'], length_diff+block.head['length2']))
self.f.read(20) # Skip header
out.write(self.f.read(block.head['name_length'])) # copy block name
self.f.read(block.head['length1']) # skip data in source
out.write(new_data) # write new_data
elif current.Type[0] in [1,3]: # found a container, check for references to block after the modified block
debug_msg.append("Block container found. Checking children...")
out.write(self.f.read(25)) # copy header
out.write(self.f.read(current.head['name_length'])) # copy block name
SubHeader = list(struct.unpack('<2I5s5IQ', self.f.read(41) )) # read sub-block header
if SubHeader[8] > block_offset: # Is the nextbloxk after the modified block? Yes => Adjust the offset position
SubHeader[8] += length_diff
out.write(struct.pack('<2I5s5IQ', *SubHeader )) # write sub-block header
N = current.head['N']
#if N == 0:
# N = SubHeader[1]
for i in range(N):
X, index, slen, id,Y, blen, bidx = struct.unpack('<B4I2Q', self.f.read(33))
if bidx == block_offset: # If the children block is the modified block, adjust length
blen = len(new_data)
elif bidx > block_offset: # If the children is after the modifien block, adjust its offset
bidx += length_diff
out.write(struct.pack('<B4I2Q',X, index, slen, id, Y,blen, bidx)) # write child info
# Write the extra bytes used by iontof which seems to be useless as well as the childrens' name
delta = curr_block_length - (self.f.tell() - current.offset) # number of bytes remaining till the end of the block
out.write(self.f.read(delta))
else:
debug_msg.append("Data Block found. Copy data without check...")
out.write(self.f.read(curr_block_length))
if prog:
T.update(FILE_SIZE-curr)
T.close()
out.close()
| StarcoderdataPython |
152257 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import tempfile
from odoo import api, fields, models, tools, _
from odoo.exceptions import UserError
class BaseUpdateTranslations(models.TransientModel):
_name = 'base.update.translations'
_description = 'Update Translations'
@api.model
def _get_languages(self):
langs = self.env['res.lang'].search([('active', '=', True), ('translatable', '=', True)])
return [(lang.code, lang.name) for lang in langs]
lang = fields.Selection(_get_languages, 'Language', required=True)
@api.model
def _get_lang_name(self, lang_code):
lang = self.env['res.lang'].search([('code', '=', lang_code)], limit=1)
if not lang:
raise UserError(_('No language with code "%s" exists') % lang_code)
return lang.name
@api.multi
def act_update(self):
this = self[0]
lang_name = self._get_lang_name(this.lang)
with tempfile.NamedTemporaryFile() as buf:
tools.trans_export(this.lang, ['all'], buf, 'po', self._cr)
context = {'create_empty_translation': True}
tools.trans_load_data(self._cr, buf, 'po', this.lang, lang_name=lang_name, context=context)
return {'type': 'ir.actions.act_window_close'}
| StarcoderdataPython |
1747315 | <gh_stars>1-10
# encoding: utf-8
from six.moves.urllib.parse import quote
import webhelpers
import ckan.lib.search as search
from ckan.tests.legacy import setup_test_search_index
from ckan.tests.legacy.functional.api.base import *
from ckan.tests.legacy import TestController as ControllerTestCase
class PackageSearchApiTestCase(ApiTestCase, ControllerTestCase):
@classmethod
def setup_class(self):
setup_test_search_index()
CreateTestData.create()
self.package_fixture_data = {
'name' : u'testpkg',
'title': 'Some Title',
'url': u'http://blahblahblah.mydomain',
'resources': [{u'url':u'http://blahblahblah.mydomain',
u'format':u'', u'description':''}],
'tags': ['russion', 'novel'],
'license_id': u'gpl-3.0',
'extras': {'national_statistic':'yes',
'geographic_coverage':'England, Wales'},
}
CreateTestData.create_arbitrary(self.package_fixture_data)
self.base_url = self.offset('/action/package_search')
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
search.clear_all()
def assert_results(self, res_dict, expected_package_names):
result = res_dict['result']['results'][0]
assert_equal(result['name'], expected_package_names)
def test_01_uri_q(self):
offset = self.base_url + '?q=%s' % self.package_fixture_data['name']
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
self.assert_results(res_dict, 'testpkg')
assert res_dict['result']['count'] == 1, res_dict['result']['count']
def test_02_post_q(self):
offset = self.base_url
query = {'q':'testpkg'}
res = self.app.post(offset, params=query, status=200)
res_dict = self.data_from_res(res)
self.assert_results(res_dict, 'testpkg')
assert res_dict['result']['count'] == 1, res_dict['result']['count']
def test_04_post_json(self):
query = {'q': self.package_fixture_data['name']}
json_query = self.dumps(query)
offset = self.base_url
res = self.app.post(offset, params=json_query, status=200)
res_dict = self.data_from_res(res)
self.assert_results(res_dict, 'testpkg')
assert res_dict['result']['count'] == 1, res_dict['result']['count']
def test_06_uri_q_tags(self):
query = webhelpers.util.html_escape('annakarenina tags:russian tags:tolstoy')
offset = self.base_url + '?q=%s' % query
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
self.assert_results(res_dict, 'annakarenina')
assert res_dict['result']['count'] == 1, res_dict['count']
def test_09_just_tags(self):
offset = self.base_url + '?q=tags:russian'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 2, res_dict
def test_10_multiple_tags(self):
offset = self.base_url + '?q=tags:tolstoy tags:russian'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 1, res_dict
def test_12_all_packages_q(self):
offset = self.base_url + '?q=""'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert_equal(res_dict['result']['count'], 3)
def test_12_all_packages_no_q(self):
offset = self.base_url
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert_equal(res_dict['result']['count'], 3)
def test_12_filter_by_openness(self):
offset = self.base_url + '?filter_by_openness=1'
res = self.app.get(offset, status=400) # feature dropped in #1360
assert "'filter_by_openness'" in res.body, res.body
def test_12_filter_by_downloadable(self):
offset = self.base_url + '?filter_by_downloadable=1'
res = self.app.get(offset, status=400) # feature dropped in #1360
assert "'filter_by_downloadable'" in res.body, res.body
class LegacyOptionsTestCase(ApiTestCase, ControllerTestCase):
'''Here are tests with URIs in the syntax they were in
for API v1 and v2.'''
@classmethod
def setup_class(self):
setup_test_search_index()
CreateTestData.create()
self.package_fixture_data = {
'name' : u'testpkg',
'title': 'Some Title',
'url': u'http://blahblahblah.mydomain',
'resources': [{u'url':u'http://blahblahblah.mydomain',
u'format':u'', u'description':''}],
'tags': ['russion', 'novel'],
'license_id': u'gpl-3.0',
'extras': {'national_statistic':'yes',
'geographic_coverage':'England, Wales'},
}
CreateTestData.create_arbitrary(self.package_fixture_data)
self.base_url = self.offset('/search/dataset')
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
search.clear_all()
def test_08_all_fields_syntax_error(self):
offset = self.base_url + '?all_fields=should_be_boolean' # invalid all_fields value
res = self.app.get(offset, status=400)
assert('boolean' in res.body)
assert('all_fields' in res.body)
self.assert_json_response(res, 'boolean')
def test_09_just_tags(self):
offset = self.base_url + '?tags=tolstoy'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['count'] == 1, res_dict
def test_10_single_tag_with_plus(self):
tagname = "Flexible+" + quote(u'\u30a1'.encode('utf8'))
offset = self.base_url + "?tags=%s&all_fields=1"%tagname
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['count'] == 2, res_dict
def test_10_multi_tags_with_ampersand_including_a_multiword_tagame(self):
tagname = "Flexible+" + quote(u'\u30a1'.encode('utf8'))
offset = self.base_url + '?tags=tolstoy&tags=%s&all_fields=1' % tagname
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['count'] == 1, res_dict
def test_10_multiple_tags_with_ampersand(self):
offset = self.base_url + '?tags=tolstoy&tags=russian&all_fields=1'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['count'] == 1, res_dict
def test_10_many_tags_with_ampersand(self):
offset = self.base_url + '?tags=tolstoy&tags=russian&tags=tolstoy'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['count'] == 1, res_dict
def test_13_just_groups(self):
offset = self.base_url + '?groups=roger'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 1, res_dict
def test_14_empty_parameter_ignored(self):
offset = self.base_url + '?groups=roger&title='
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 1, res_dict
class TestPackageSearchApi3(Api3TestCase, PackageSearchApiTestCase):
'''Here are tests with URIs in specifically SOLR syntax.'''
def test_09_just_tags(self):
offset = self.base_url + '?q=tags:russian&fl=*'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 2, res_dict
def test_11_pagination_limit(self):
offset = self.base_url + '?fl=*&q=tags:russian&rows=1&sort=name asc'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 2, res_dict
assert len(res_dict['result']['results']) == 1, res_dict
self.assert_results(res_dict, 'annakarenina')
def test_11_pagination_offset_limit(self):
offset = self.base_url + '?fl=*&q=tags:russian&start=1&rows=1&sort=name asc'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 2, res_dict
assert len(res_dict['result']['results']) == 1, res_dict
self.assert_results(res_dict, 'warandpeace')
def test_11_pagination_validation_error(self):
offset = self.base_url + '?fl=*&q=tags:russian&start=should_be_integer&rows=1&sort=name asc' # invalid offset value
res = self.app.get(offset, status=409)
assert('Validation Error' in res.body)
def test_12_v1_or_v2_syntax(self):
offset = self.base_url + '?all_fields=1'
res = self.app.get(offset, status=400)
assert("Invalid search parameters: ['all_fields']" in res.body), res.body
def test_13_just_groups(self):
offset = self.base_url + '?q=groups:roger'
res = self.app.get(offset, status=200)
res_dict = self.data_from_res(res)
assert res_dict['result']['count'] == 1, res_dict
| StarcoderdataPython |
3380909 | <filename>tests/integration/test_users_and_roles.py<gh_stars>0
import src.superannotate as sa
from tests.integration.base import BaseTestCase
class TestUserRoles(BaseTestCase):
PROJECT_NAME = "test users and roles"
PROJECT_DESCRIPTION = "Desc"
PROJECT_TYPE = "Vector"
def test_users_roles(self):
user = sa.search_team_contributors()[0]
sa.share_project(self.PROJECT_NAME, user, "QA")
project_users = sa.get_project_metadata(
self.PROJECT_NAME, include_contributors=True
)["contributors"]
found = False
for u in project_users:
if u["user_id"] == user["id"]:
found = True
break
self.assertTrue(found and user)
sa.unshare_project(self.PROJECT_NAME, user)
project_users = sa.get_project_metadata(
self.PROJECT_NAME, include_contributors=True
)["contributors"]
found = False
for u in project_users:
if u["user_id"] == user["id"]:
found = True
break
self.assertFalse(found and user)
| StarcoderdataPython |
107712 | <reponame>done1892/Square-Images-Colorization
import numpy as np
import os
import shutil
import re
from requests import get
from bs4 import BeautifulSoup
from io import BytesIO
from PIL import Image
import cv2 as cv
def scrape_google_image(url, name_folder):
"""This function scrapes images from an URL coming from google
images and save them into a specified folder.
Args:
url: Google images url.
name_folder: name of the new folder.
Return:
images folder in the current file system
"""
# Delete previous homonyms folder and create it if doesn't exist
if os.path.exists(str(name_folder)):
shutil.rmtree(name_folder, ignore_errors=True)
os.makedirs(str(name_folder))
else:
os.makedirs(str(name_folder))
response = get(url)
html_soup = BeautifulSoup(response.text, 'html.parser')
for num in range(len(html_soup.find_all('a'))):
stringa = html_soup.find_all('a')[num].find_all('img')
search = re.search('src="(.+?)" style', str(stringa))
if search is not None:
link = search[1]
response = get(link)
img=np.asarray(Image.open(BytesIO(response.content)))
img = cv.resize(img, (450,300))
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
cv.imwrite(name_folder+'/img_'+str(num)+'.jpg', img)
if __name__ == '__main__':
import argparse
import yaml
parser = argparse.ArgumentParser()
parser.add_argument('--dest_folder', dest='dest_folder', type=str, required=True, help='folder name of scraped images.')
args = parser.parse_args()
with open('url.yaml') as file:
google_url = yaml.load(file, Loader=yaml.FullLoader)
scrape_google_image(url=google_url['url'], name_folder=args.dest_folder) | StarcoderdataPython |
1691949 | # doc-export: VideoViewer
"""
This example demonstrates how static files can be served by making use
of a static file server.
If you intend to create a web application, note that using a static
server is a potential security risk. Use only when needed. Other options
that scale better for large websites are e.g. Nginx, Apache, or 3d party
services like Azure Storage or Amazon S3.
When exported, any links to local files wont work, but the remote links will.
"""
import os
import sys
sys.path.append(r'C:\Users\{}\Documents\GitHub\flexx'.format(os.environ['USERNAME']))
sys.path.append(r'C:\Users\{}\Documents\GitHub\flexx\flexxamples'.format(os.environ['USERNAME']))
from flexx import flx
from tornado.web import StaticFileHandler
import glob
from howtos import editor_ace, jquery
import pandas as pd
# The directory to load video's from
dirname = r'E:\rssi_mcc'
# Make use of Tornado's static file handler
tornado_app = flx.create_server().app
tornado_app.add_handlers(r".*", [
(r"/videos/(.*)", StaticFileHandler, {"path": dirname}),
])
result_list = ['result_v1_0_final', 'result_v1_1_final', 'result_v1_6_final', 'result_v1_3_final']
df = {}
videos = {}
locations = {}
for sheet_name in result_list:
df[sheet_name] = pd.read_excel(r'C:\Users\{}\Documents\GitHub\AI-Country\{}.xlsx'.format(os.environ['USERNAME'], sheet_name))
# Collect videos that look like they can be read in html5
videos[sheet_name] = {}
for index, row in df[sheet_name].iterrows():
videos[sheet_name][row['figure'].split('\\')[-1] + '\t' + str(row['Correctness'])] = row['figure']
locations[sheet_name] = ['ALL'] + sorted(list(set(df[sheet_name]['Location'])))
class VideoViewer(flx.PyComponent):
""" A simple videoviewer that displays a list of videos found on the
server's computer, plus a few online videos. Note that not all videos
may be playable in HTML5.
"""
def init(self):
self.location_list = []
self.location = {}
self.correctness_list = []
self.correctness = {}
self.selector_list = []
self.selector = {}
self.console = {}
self.player = {}
self.nx = []
for i in range(1, 12):
self.nx.append({})
with flx.TabLayout() as self.tabbar:
for sheet_name in result_list:
with flx.HBox(title=sheet_name):
with flx.VBox(flex=0):
self.location_list.append(flx.ComboBox(options=locations[sheet_name], selected_index=0, style='width: 100%'))
self.location[sheet_name] = self.location_list[-1]
self.correctness_list.append(flx.ComboBox(options=['ALL', 'False', 'True'], selected_index=0, style='width: 100%'))
self.correctness[sheet_name] = self.correctness_list[-1]
self.location[sheet_name].set_editable(True)
self.selector_list.append(flx.ComboBox(options=[row['figure'].split('\\')[-1] + '\t' + str(row['Correctness']) for index, row in df[sheet_name].iterrows()], selected_index=0, style='width: 100%'))
self.selector[sheet_name] = self.selector_list[-1]
self.selector[sheet_name].set_editable(True)
self.console[sheet_name] = flx.TextAreaEdit()
self.console[sheet_name].set_disabled(True)
flx.Widget(flex=1)
with flx.VBox(flex=1):
self.player[sheet_name] = flx.ImageWidget(flex=1)
with flx.HBox(flex=1):
self.nx[1][sheet_name] = flx.ImageWidget(flex=1)
self.nx[2][sheet_name] = flx.ImageWidget(flex=1)
with flx.VBox(flex=1):
with flx.HBox(flex=1):
self.nx[3][sheet_name] = flx.ImageWidget(flex=1)
self.nx[4][sheet_name] = flx.ImageWidget(flex=1)
with flx.HBox(flex=1):
self.nx[5][sheet_name] = flx.ImageWidget(flex=1)
self.nx[6][sheet_name] = flx.ImageWidget(flex=1)
with flx.HBox(flex=1):
self.nx[7][sheet_name] = flx.ImageWidget(flex=1)
self.nx[8][sheet_name] = flx.ImageWidget(flex=1)
with flx.HBox(flex=1):
self.nx[9][sheet_name] = flx.ImageWidget(flex=1)
self.nx[10][sheet_name] = flx.ImageWidget(flex=1)
@flx.reaction('!tabbar.user_current')
def tab_change(self, *events):
flx.logger.info(str(events[0]['new_value'].title))
flx.logger.info(self.tabbar.current.title)
@flx.reaction('!location_list*.user_selected')
def location_on_select(self, *events):
flx.logger.info(events[0]['key'])
sheet_name = self.tabbar.current.title
correctness = self.correctness[sheet_name].selected_key
location = events[0]['key']
target_df = df[sheet_name]
if correctness != 'ALL':
target_df = target_df[target_df.Correctness == (correctness == 'True')]
if location != 'ALL':
target_df = target_df[target_df.Location == location]
self.selector[sheet_name].set_options([row['figure'].split('\\')[-1] + '\t' + str(row['Correctness']) for index, row in target_df.iterrows()])
@flx.reaction('!correctness_list*.user_selected')
def correctness_on_select(self, *events):
flx.logger.info(events[0]['key'])
sheet_name = self.tabbar.current.title
correctness = events[0]['key']
location = self.location[sheet_name].selected_key
target_df = df[sheet_name]
if correctness != 'ALL':
target_df = target_df[target_df.Correctness == (correctness == 'True')]
if location != 'ALL':
target_df = target_df[target_df.Location == location]
self.selector[sheet_name].set_options([row['figure'].split('\\')[-1] + '\t' + str(row['Correctness']) for index, row in target_df.iterrows()])
@flx.reaction('!selector_list*.user_selected')
def on_select(self, *events):
flx.logger.info(events[0]['key'])
sheet_name = self.tabbar.current.title
fname = events[0]['key']
flx.logger.info(videos[sheet_name][fname])
self.player[sheet_name].set_source('/videos/' + videos[sheet_name][fname])
match_row = df[sheet_name][df[sheet_name].figure == videos[sheet_name][fname]]
flx.logger.info(match_row['n1'].values[0])
for i in range(1, 11):
self.nx[i][sheet_name].set_source('/videos/' + match_row['n{}'.format(i)].values[0])
self.console[sheet_name].set_disabled(False)
text = ''
for i in range(1, 11):
text += 's{}:\t'.format(i)+match_row['n{}'.format(i)].values[0].split('.')[-3]+'\t'+str(match_row['s{}'.format(i)].values[0])+'\t'+str(match_row['b{}'.format(i)].values[0])+'\n'
self.console[sheet_name].set_text(text)
self.console[sheet_name].set_disabled(True)
if __name__ == '__main__':
a = flx.App(VideoViewer)
m = a.launch('browser')
flx.run()
| StarcoderdataPython |
35731 | #!/usr/bin/env python
"""
Generates a list of OS X system events into a plist for crankd.
This is designed to create a large (but probably not comprehensive) sample
of the events generated by Mac OS X that crankd can tap into. The generated
file will call the 'tunnel.sh' as the command for each event; said fail can
be easily edited to redirect the output to wherever you would like it to go.
"""
OUTPUT_FILE = "crankd-config.plist"
from SystemConfiguration import SCDynamicStoreCopyKeyList, SCDynamicStoreCreate
# Each event has a general event type, and a specific event
# The category is the key, and the value is a list of specific events
event_dict = {}
def AddEvent(event_category, specific_event):
"""Adds an event to the event dictionary"""
if event_category not in event_dict:
event_dict[event_category] = []
event_dict[event_category].append(specific_event)
def AddCategoryOfEvents(event_category, events):
"""Adds a list of events that all belong to the same category"""
for specific_event in events:
AddEvent(event_category, specific_event)
def AddKnownEvents():
"""Here we add all the events that we know of to the dictionary"""
# Add a bunch of dynamic events
store = SCDynamicStoreCreate(None, "generate_event_plist", None, None)
AddCategoryOfEvents(u"SystemConfiguration",
SCDynamicStoreCopyKeyList(store, ".*"))
# Add some standard NSWorkspace events
AddCategoryOfEvents(u"NSWorkspace",
u'''
NSWorkspaceDidLaunchApplicationNotification
NSWorkspaceDidMountNotification
NSWorkspaceDidPerformFileOperationNotification
NSWorkspaceDidTerminateApplicationNotification
NSWorkspaceDidUnmountNotification
NSWorkspaceDidWakeNotification
NSWorkspaceSessionDidBecomeActiveNotification
NSWorkspaceSessionDidResignActiveNotification
NSWorkspaceWillLaunchApplicationNotification
NSWorkspaceWillPowerOffNotification
NSWorkspaceWillSleepNotification
NSWorkspaceWillUnmountNotification
'''.split())
def PrintEvents():
"""Prints all the events, for debugging purposes"""
for category in sorted(event_dict):
print category
for event in sorted(event_dict[category]):
print "\t" + event
def OutputEvents():
"""Outputs all the events to a file"""
# print the header for the file
plist = open(OUTPUT_FILE, 'w')
print >>plist, '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>'''
for category in sorted(event_dict):
# print out the category
print >>plist, " <key>%s</key>\n <dict>" % category
for event in sorted(event_dict[category]):
print >>plist, """
<key>%s</key>
<dict>
<key>command</key>
<string>%s '%s' '%s'</string>
</dict>""" % ( event, 'tunnel.sh', category, event )
# end the category
print >>plist, " </dict>"
# end the plist file
print >>plist, '</dict>'
print >>plist, '</plist>'
plist.close()
def main():
"""Runs the program"""
AddKnownEvents()
#PrintEvents()
OutputEvents()
main()
| StarcoderdataPython |
1638434 | <gh_stars>0
__author__ = 'umran' | StarcoderdataPython |
1661205 | <reponame>BatsResearch/taglets
import torch
import torch.nn as nn
from torch.nn import init
from allennlp.nn.util import masked_max, masked_mean, masked_softmax
from taglets.modules.zsl_kg_lite.utils.core import pad_tensor, base_modified_neighbours
class AttnAggregator(nn.Module):
def __init__(self, features, input_dim, output_dim, device,
num_sample=50, sample_nodes=False, dropout=False,
self_loop=False):
"""
GAT: Attention Aggregator
"""
super(AttnAggregator, self).__init__()
self.features = features
self.device = device
self.num_sample = num_sample
self.sample_nodes = sample_nodes
self.input_dim = input_dim
if dropout:
self.dropout = nn.Dropout(0.5)
else:
self.dropout = None
self.proj = nn.Linear(input_dim, output_dim, bias=False)
init.xavier_uniform_(self.proj.weight)
self.attn_src = nn.Linear(output_dim, 1, bias=False)
self.attn_dst = nn.Linear(output_dim, 1, bias=False)
self.leaky_relu = nn.LeakyReLU(0.2)
self.self_loop = self_loop
def forward(self, nodes, to_neighs):
"""Function computes the aggregated vector.
Args:
nodes (list): list of nodes
to_neighs (list): list of list with node ids and relations
Returns:
torch.Tensor: tensors with aggregated vectors
"""
_set = set
if self.sample_nodes:
# sample neighs based on the hitting prob
_neighs = [sorted(to_neigh, key=lambda x: x[2], reverse=True)[:self.num_sample]
if len(to_neigh) >= self.num_sample else to_neigh for to_neigh in to_neighs]
# change ds
samp_neighs = []
for i, adj_list in enumerate(_neighs):
samp_neighs.append(set([node for node, rel, hp in adj_list]))
if self.self_loop:
samp_neighs[i].add(int(nodes[i]))
else:
# no sampling
samp_neighs = to_neighs
unique_nodes_list = list(set.union(*samp_neighs))
# get the unique nodes
unique_nodes = list(set(unique_nodes_list))
node_to_emb_idx = {n:i for i,n in enumerate(unique_nodes)}
unique_nodes_tensor = torch.tensor(unique_nodes, device=self.device)
embed_matrix = self.features(unique_nodes_tensor)
if self.dropout is not None:
embed_matrix = self.dropout(embed_matrix)
# get new features
embed_matrix_prime = self.proj(embed_matrix)
to_feats = torch.empty(len(samp_neighs), self.input_dim, device=self.device)
modified_adj_nodes = base_modified_neighbours(samp_neighs, node_to_emb_idx)
#
padded_tensor, mask = pad_tensor(modified_adj_nodes, mask=True)
# sending padded tensor
padded_tensor = padded_tensor.to(self.device)
mask = mask.to(self.device)
dst_nodes = []
max_length = mask.size(1)
for _node in nodes:
dst_nodes.append([node_to_emb_idx[_node]] * max_length)
dst_tensor = torch.tensor(dst_nodes).to(self.device)
# embed matrix
neigh_feats = embed_matrix_prime[padded_tensor]
dst_feats = embed_matrix_prime[dst_tensor]
# attention
dst_attn = self.leaky_relu(self.attn_dst(dst_feats))
neigh_attn = self.leaky_relu(self.attn_src(neigh_feats))
edge_attn = dst_attn + neigh_attn
attn = masked_softmax(edge_attn, mask.unsqueeze(-1), dim=1)
# multiply attention
to_feats = torch.sum(attn * neigh_feats, dim=1)
return to_feats
| StarcoderdataPython |
46190 | <reponame>lipovsek/avalanche<filename>avalanche/models/generator.py
################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 03-03-2022 #
# Author: <NAME> #
# Website: https://github.com/travela #
################################################################################
"""
File to place any kind of generative models
and their respective helper functions.
"""
from abc import abstractmethod
from matplotlib import transforms
import torch
import torch.nn as nn
from torchvision import transforms
from avalanche.models.utils import MLP, Flatten
from avalanche.models.base_model import BaseModel
class Generator(BaseModel):
"""
A base abstract class for generators
"""
@abstractmethod
def generate(self, batch_size=None, condition=None):
"""
Lets the generator sample random samples.
Output is either a single sample or, if provided,
a batch of samples of size "batch_size"
:param batch_size: Number of samples to generate
:param condition: Possible condition for a condotional generator
(e.g. a class label)
"""
###########################
# VARIATIONAL AUTOENCODER #
###########################
class VAEMLPEncoder(nn.Module):
"""
Encoder part of the VAE, computer the latent represenations of the input.
:param shape: Shape of the input to the network: (channels, height, width)
:param latent_dim: Dimension of last hidden layer
"""
def __init__(self, shape, latent_dim=128):
super(VAEMLPEncoder, self).__init__()
flattened_size = torch.Size(shape).numel()
self.encode = nn.Sequential(
Flatten(),
nn.Linear(in_features=flattened_size, out_features=400),
nn.BatchNorm1d(400),
nn.LeakyReLU(),
MLP([400, latent_dim]),
)
def forward(self, x, y=None):
x = self.encode(x)
return x
class VAEMLPDecoder(nn.Module):
"""
Decoder part of the VAE. Reverses Encoder.
:param shape: Shape of output: (channels, height, width).
:param nhid: Dimension of input.
"""
def __init__(self, shape, nhid=16):
super(VAEMLPDecoder, self).__init__()
flattened_size = torch.Size(shape).numel()
self.shape = shape
self.decode = nn.Sequential(
MLP([nhid, 64, 128, 256, flattened_size], last_activation=False),
nn.Sigmoid(),
)
self.invTrans = transforms.Compose(
[transforms.Normalize((0.1307,), (0.3081,))]
)
def forward(self, z, y=None):
if y is None:
return self.invTrans(self.decode(z).view(-1, *self.shape))
else:
return self.invTrans(
self.decode(torch.cat((z, y), dim=1)).view(-1, *self.shape)
)
class MlpVAE(Generator, nn.Module):
"""
Variational autoencoder module:
fully-connected and suited for any input shape and type.
The encoder only computes the latent represenations
and we have then two possible output heads:
One for the usual output distribution and one for classification.
The latter is an extension the conventional VAE and incorporates
a classifier into the network.
More details can be found in: https://arxiv.org/abs/1809.10635
"""
def __init__(self, shape, nhid=16, n_classes=10, device="cpu"):
"""
:param shape: Shape of each input sample
:param nhid: Dimension of latent space of Encoder.
:param n_classes: Number of classes -
defines classification head's dimension
"""
super(MlpVAE, self).__init__()
self.dim = nhid
self.device = device
self.encoder = VAEMLPEncoder(shape, latent_dim=128)
self.calc_mean = MLP([128, nhid], last_activation=False)
self.calc_logvar = MLP([128, nhid], last_activation=False)
self.classification = MLP([128, n_classes], last_activation=False)
self.decoder = VAEMLPDecoder(shape, nhid)
def get_features(self, x):
"""
Get features for encoder part given input x
"""
return self.encoder(x)
def generate(self, batch_size=None):
"""
Generate random samples.
Output is either a single sample if batch_size=None,
else it is a batch of samples of size "batch_size".
"""
z = (
torch.randn((batch_size, self.dim)).to(self.device)
if batch_size
else torch.randn((1, self.dim)).to(self.device)
)
res = self.decoder(z)
if not batch_size:
res = res.squeeze(0)
return res
def sampling(self, mean, logvar):
"""
VAE 'reparametrization trick'
"""
eps = torch.randn(mean.shape).to(self.device)
sigma = 0.5 * torch.exp(logvar)
return mean + eps * sigma
def forward(self, x):
"""
Forward.
"""
represntations = self.encoder(x)
mean, logvar = self.calc_mean(represntations), self.calc_logvar(
represntations
)
z = self.sampling(mean, logvar)
return self.decoder(z), mean, logvar
# Loss functions
BCE_loss = nn.BCELoss(reduction="sum")
MSE_loss = nn.MSELoss(reduction="sum")
CE_loss = nn.CrossEntropyLoss()
def VAE_loss(X, forward_output):
"""
Loss function of a VAE using mean squared error for reconstruction loss.
This is the criterion for VAE training loop.
:param X: Original input batch.
:param forward_output: Return value of a VAE.forward() call.
Triplet consisting of (X_hat, mean. logvar), ie.
(Reconstructed input after subsequent Encoder and Decoder,
mean of the VAE output distribution,
logvar of the VAE output distribution)
"""
X_hat, mean, logvar = forward_output
reconstruction_loss = MSE_loss(X_hat, X)
KL_divergence = 0.5 * torch.sum(-1 - logvar + torch.exp(logvar) + mean ** 2)
return reconstruction_loss + KL_divergence
__all__ = ["MlpVAE", "VAE_loss"]
| StarcoderdataPython |
44057 | <gh_stars>1000+
from .version import __version__
from dtreeviz.classifiers import clfviz
| StarcoderdataPython |
1718301 | <filename>database/app.py
from flask import Flask, request, jsonify, abort, send_file
from pymongo import MongoClient
from bson.objectid import ObjectId
import os
from dotenv import load_dotenv
import json
from operator import itemgetter
import time
import shutil
load_dotenv()
app = Flask(__name__)
MONGO_URI = 'mongodb://' + os.environ['MONGODB_USERNAME'] + ':' + os.environ['MONGODB_PASSWORD'] + '@' + os.environ['MONGODB_HOSTNAME'] + ':27017'
mongo_client = MongoClient(MONGO_URI, connect=False)
mongo = mongo_client[os.environ['MONGODB_DATABASE']]
FILES_DIR = "files"
ids = {
"datasets": "datasetId",
"networks": "datasetId",
"others": "datasetId",
"results": "requestId"
}
tables = str(list(ids.keys()))[1:-1]
@app.before_first_request
def create_files_dir():
if not os.path.exists(FILES_DIR):
os.makedirs(FILES_DIR)
@app.route('/<userId>/<any({}):table>/<id>'.format(tables), methods=["GET"])
def get_item(userId, table, id):
try:
out = mongo[table].find_one({"_id": ObjectId(id), "userId": userId})
if table == "networks":
table = "results"
id = out["requestId"]
out = mongo[table].find_one({"_id": ObjectId(id), "userId": userId})
except Exception as e:
print(e)
abort(404)
if not out:
abort(404)
t_end = time.time() + 25
while time.time() < t_end and table == "results" and (not out.get("done") or out.get("pending")):
out = mongo[table].find_one({"_id": ObjectId(id), "userId": userId})
time.sleep(1)
if time.time() >= t_end:
abort(504)
out[ids[table]] = str(out.pop("_id"))
return out
@app.route('/<userId>/<any({}):table>'.format(tables), methods=["POST"])
def post_item(userId, table):
data = {}
if request.get_json():
data = request.json
elif request.form.get("metadata"):
data = json.loads(request.form.get("metadata"))
id = mongo[table].insert_one({
"userId": userId,
**data
}).inserted_id
id = str(id)
if request.files.get("file") or request.form.get("file"):
path = os.path.join(FILES_DIR, userId)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, id)
if request.files.get("file"):
request.files.get("file").save(path)
else:
with open(path, "w") as f:
print(request.form.get("file"), file=f)
return {
ids[table]: id
}
@app.route('/<userId>/<any({}):table>/<id>'.format(tables), methods=["PUT"])
def update_item(userId, table, id):
try:
out = mongo[table].update_one({
"_id": ObjectId(id),
"userId": userId
}, {
"$set": request.json
})
except:
abort(404)
return {
ids[table]: id
}
@app.route('/<userId>', methods=["DELETE"])
def delete_user(userId):
for table in ids.keys():
mongo[table].delete_many({"userId": userId})
path = os.path.join(FILES_DIR, userId)
if os.path.exists(path):
shutil.rmtree(path)
return {}
@app.route('/<userId>/<any({}):table>/<id>'.format(tables), methods=["DELETE"])
def delete_item(userId, table, id):
try:
out = mongo[table].find_one_and_delete({"_id": ObjectId(id), "userId": userId})
if table == "networks":
out = mongo.results.find_one_and_delete({"_id": ObjectId(out["requestId"]), "userId": userId})
if table == "results":
print("here", out["files"]["dbn.ser"]["datasetId"])
o = mongo.networks.find_one_and_delete({"_id": ObjectId(out["files"]["dbn.ser"]["datasetId"])})
print(o)
except:
abort(404)
if not out:
abort(404)
if out.get("files"):
for key, file in out.get("files").items():
os.remove(os.path.join(FILES_DIR, userId, file["datasetId"]))
path = os.path.join(FILES_DIR, userId, id)
if os.path.exists(path):
os.remove(path)
return {}, 200
@app.route('/<userId>/<any({}):table>'.format(tables), methods=["GET"])
def get_table(userId, table):
items = list(mongo[table].find({
"userId": userId
}, {
"datasetName": 1,
"requestName": 1,
"requestId": 1
}))
for item in items:
item[ids[table]] = str(item.pop("_id"))
return jsonify(items)
@app.route('/<userId>/<id>', methods=["GET"])
def get_file(userId, id):
try:
return send_file(os.path.join(FILES_DIR, userId, id))
except:
abort(404)
@app.route('/<userId>/<id>', methods=["POST"])
def post_file(userId, id):
if request.files.get("file"):
file = request.files.get("file")
path = os.path.join(FILES_DIR, userId)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, id)
file.save(path)
return {}, 200
@app.route('/examples', methods=["GET"])
def get_examples():
try:
return send_file('examples/files.json')
except:
abort(404)
@app.route('/examples/<name>', methods=['GET'])
def get_example(name):
with open('examples/files.json') as json_file:
l = list(map(itemgetter("name"), json.load(json_file)))
if name not in l:
abort(404)
try:
return send_file('examples/' + name)
except:
abort(404)
@app.route('/methods', methods=['GET'])
def get_methods():
methods = []
for filename in os.listdir('methods'):
with open(os.path.join('methods', filename)) as json_file:
aux = json.load(json_file)
methods.append({
'method': aux['method'],
'mainFile': aux['mainFile']
})
return jsonify(methods)
@app.route('/methods/<method>', methods=['GET'])
def get_method(method):
try:
return send_file('methods/' + method + '.json')
except:
abort(404)
@app.errorhandler(404)
def page_not_found(e):
return jsonify(error=404, text=str(e)), 404
if __name__ == '__main__':
app.run(host="0.0.0.0", debug=True, port=5000) | StarcoderdataPython |
1728345 | class DebevecMerge:
def __init__(self, args):
self.img_fn = args.imgs
self.exposure_times = np.array(args.exposure_times, dtype=np.float32)
if len(self.img_fn) != len(self.exposure_times):
sys.stderr.write('List Size Error!')
self.img_list = self.readImg()
def DebevecHDR(self):
merge_debvec = cv2.createMergeDebevec()
self.hdr_debvec = merge_debvec.process(self.img_list, times=self.exposure_times.copy())
return self.hdr_debvec
| StarcoderdataPython |
43682 | <reponame>baseclue/codev
import logging
LOGLEVELS = {
'info': logging.INFO,
'debug': logging.DEBUG,
}
actual_loglevel = 'info'
class LoglevelFilter(logging.Filter):
def __init__(self, loglevel):
self.loglevel = loglevel
super().__init__()
def filter(self, record):
if record.levelno == self.loglevel:
return True
error_filter = LoglevelFilter(logging.ERROR)
info_filter = LoglevelFilter(logging.INFO)
debug_filter = LoglevelFilter(logging.DEBUG)
| StarcoderdataPython |
4802595 | # Copyright Contributors to the Rez project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Qt import QtCore, QtWidgets, QtGui
from rezgui.util import create_pane
from rez.utils.graph_utils import save_graph, prune_graph
import tempfile
import threading
import os
import os.path
class Writer(QtCore.QObject):
graph_written = QtCore.Signal(str, str)
def __init__(self, graph_str, filepath, prune_to=None):
super(Writer, self).__init__()
self.graph_str = graph_str
self.filepath = filepath
self.prune_to = prune_to
self.process = None
def cancel(self):
if self.process:
self.process.terminate()
def write_graph(self):
if self.prune_to:
graph_str = prune_graph(self.graph_str, self.prune_to)
else:
graph_str = self.graph_str
error_msg = ''
try:
save_graph(graph_str, self.filepath)
except Exception as e:
error_msg = str(e)
self.graph_written.emit(self.filepath, error_msg)
class WriteGraphDialog(QtWidgets.QDialog):
def __init__(self, graph_str, filepath, parent=None, prune_to=None):
super(WriteGraphDialog, self).__init__(parent)
self.setWindowTitle("Rendering graph...")
self.writer = Writer(graph_str, filepath, prune_to)
self.thread = None
self._finished = False
self.success = False
self.busy_cursor = QtGui.QCursor(QtCore.Qt.WaitCursor)
self.bar = QtWidgets.QProgressBar()
self.bar.setRange(0, 0)
self.cancel_btn = QtWidgets.QPushButton("Cancel")
pane = create_pane([None, self.cancel_btn], True)
create_pane([self.bar, pane], False, parent_widget=self)
self.writer.graph_written.connect(self._graph_written)
self.cancel_btn.clicked.connect(self._cancel)
def sizeHint(self):
return QtCore.QSize(300, 100)
def write_graph(self):
QtWidgets.QApplication.setOverrideCursor(self.busy_cursor)
self.thread = threading.Thread(target=self.writer.write_graph)
self.thread.daemon = True
self.thread.start()
self.exec_()
self.thread.join()
return self.success
def reject(self):
if self._finished:
super(WriteGraphDialog, self).reject()
else:
self._cancel()
def closeEvent(self, event):
if self._finished:
event.accept()
else:
self._cancel()
event.ignore()
def _cancel(self):
self.bar.setMaximum(10)
self.bar.setValue(10)
self.cancel_btn.setText("Cancelling...")
self.cancel_btn.setEnabled(False)
self.writer.cancel()
def _graph_written(self, filepath, error_message):
self._finished = True
self.bar.setMaximum(10)
self.bar.setValue(10)
QtWidgets.QApplication.restoreOverrideCursor()
self.setWindowTitle("Rendered graph")
if error_message:
QtWidgets.QMessageBox.critical(self, "Failed rendering resolve graph",
error_message)
elif filepath:
self.success = True
self.close()
graph_file_lookup = {}
def view_graph(graph_str, parent=None, prune_to=None):
"""View a graph."""
from rezgui.dialogs.ImageViewerDialog import ImageViewerDialog
from rez.config import config
# check for already written tempfile
h = hash((graph_str, prune_to))
filepath = graph_file_lookup.get(h)
if filepath and not os.path.exists(filepath):
filepath = None
# write graph to tempfile
if filepath is None:
suffix = ".%s" % config.dot_image_format
fd, filepath = tempfile.mkstemp(suffix=suffix, prefix="rez-graph-")
os.close(fd)
dlg = WriteGraphDialog(graph_str, filepath, parent, prune_to=prune_to)
if not dlg.write_graph():
return
# display graph
graph_file_lookup[h] = filepath
dlg = ImageViewerDialog(filepath, parent)
dlg.exec_()
| StarcoderdataPython |
3218120 | <filename>pypulse/__init__.py
__all__ = ["archive", "Archive", "singlepulse", "SinglePulse",
"dynamicspectrum", "DynamicSpectrum", "par", "Par",
"Parameter", "tim", "Tim", "TOA", "utils"]
__version__ = 0.1
from pypulse.archive import Archive
from pypulse.singlepulse import SinglePulse
from pypulse.dynamicspectrum import DynamicSpectrum
from pypulse.par import Par
from pypulse.tim import Tim, TOA
from pypulse.dmx import DMX, DM
| StarcoderdataPython |
125546 | <filename>scripts/multi_result_summary.py
# Requires nuspacesim and tabulate
#
# pip install nuspacesim tabulate
#
# Usage:
#
# python multi_result_summary directory/with/simulation/files/
import os
import sys
from tabulate import tabulate
import nuspacesim as nss
if __name__ == "__main__":
results = list()
path = os.path.abspath(sys.argv[1])
for filename in os.listdir(path):
_, extension = os.path.splitext(filename)
if extension != ".fits":
continue
filepath = os.path.join(path, filename)
r = nss.ResultsTable.read(filepath)
energy = r.meta["SPECPARA"]
mci = r.meta["OMCINT"]
gf = r.meta["OMCINTGO"]
npe = r.meta["ONEVPASS"]
t = tuple([energy, mci, gf, npe])
results.append(t)
results.sort()
print(
tabulate(
results,
headers=[
"log_e_nu",
"Monte Carlo Integral",
"Geometry Factor",
"Passing Events",
],
)
)
| StarcoderdataPython |
1732002 | from nnet.optim.optimizer import Optimizer
import numpy as np
import nnet.cuda
class SGD(Optimizer):
def __init__(self, lr=0.01, momentum=0.0):
super(SGD, self).__init__()
self.lr = lr
self.momentum = momentum
self.vs = {}
def update_one(self, param):
xp = nnet.cuda.get_array_module(param.data)
v_key = id(param)
if v_key not in self.vs:
self.vs[v_key] = xp.zeros_like(param.data)
v = self.vs[v_key]
v *= self.momentum
v -= self.lr * param.grad.data
param.data += v
| StarcoderdataPython |
89351 | from octosql_py import octosql_py
from octosql_py.core.storage.json import OctoSQLSourceJSON
from octosql_py.core.storage.static import OctoSQLSourceStatic
import octosql_py_native
octo = octosql_py.OctoSQL()
conn = octo.connect([
OctoSQLSourceStatic("lol", [
{ "a": 99 }
]),
OctoSQLSourceJSON("lol2", "./tests/samples/bikes.json"),
])
query = conn.createQuery("SELECT * FROM lol lol")
a = query.run()
print(a.values)
| StarcoderdataPython |
1631635 | def test_polkadot_service_file(host):
if host.ansible.get_variables()['inventory_hostname'] == 'public':
svc = host.file('/etc/systemd/system/polkadot.service')
assert svc.exists
assert svc.user == 'root'
assert svc.group == 'root'
assert svc.mode == 0o600
assert svc.contains('Restart=always')
def test_polkadot_running_and_enabled(host):
if host.ansible.get_variables()['inventory_hostname'] == 'public':
polkadot = host.service("polkadot.service")
assert polkadot.is_running
# assert polkadot.is_enabled
| StarcoderdataPython |
166702 | import os
from matplotlib.pyplot import figure
import matplotlib.pyplot as plt
from textwrap import wrap
from src.output_option.output_option import OutputOptionInterface
class GraphOutputOption(OutputOptionInterface):
def __init__(self, **kwargs):
"""
Args:
dir_path: Path to dir.
"""
self._dir_path: str = kwargs['dir_path']
self._file_name: str = 'results'
def save(self, simulation_results: list):
""" Saves simulation results as graph into .png file.
Throws ValueError if invalid path or file name.
Args:
simulation_results: A list of simulation results.
Returns: void
"""
if self._dir_path is None or len(self._dir_path) < 1 or not os.path.isdir(self._dir_path):
raise ValueError('Invalid dir path')
if self._file_name is None or len(self._file_name) < 1:
raise ValueError('Invalid file name')
names = [x.result.algorithm_title for x in simulation_results]
labels = ['\n'.join(wrap(x, 10)) for x in names]
fitness_scores = [x.result.best_fitness for x in simulation_results]
exe_time = [x.execution_time for x in simulation_results]
figure(num=None, figsize=(11, 11), dpi=80, facecolor='w', edgecolor='k')
plt.subplot(2, 1, 1)
bars = plt.bar(labels, fitness_scores, color='lightblue', width=0.3)
plt.ylabel('Value')
plt.title('Fitness score')
plt.suptitle('Fitness and execution time comparison', fontsize=16)
for bar in bars:
height = bar.get_height()
plt.text(bar.get_x() + bar.get_width() / 2.0, height, height, ha='center', va='bottom')
plt.subplot(2, 1, 2)
bars = plt.bar(labels, exe_time, color='pink', width=0.3)
plt.ylabel('Seconds')
plt.title('Execution time')
for bar in bars:
height = bar.get_height()
plt.text(bar.get_x() + bar.get_width() / 2.0, height, "{0:.2f}".format(height), ha='center', va='bottom')
plt.savefig(os.path.join(self._dir_path, self._file_name + '.png'))
| StarcoderdataPython |
4825509 | from crop import Crop
class Wheat(Crop):
# A wheat crop
def __init__(self) -> None:
super().__init__(1, 1, 1)
self._type = "Wheat"
| StarcoderdataPython |
1659955 | <gh_stars>0
#!/usr/bin/env python3
#
# This script is meant for testing functionalities
#
import requests
from pprint import pprint
import json
import argparse
import RPi.GPIO as GPIO
import time
import os
# CONSTANTS
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
PIR_InPin = 38
PIN_TRIGGER = 16
PIN_ECHO = 18
def setup():
# PIR setup
print("PIR setup")
GPIO.setmode(GPIO.BOARD) # use PHYSICAL GPIO Numbering
GPIO.setup(PIR_InPin, GPIO.IN) # set the PIR to IN mode
print ('using pin%d'%PIR_InPin)
print("SONAR setup")
GPIO.setup(PIN_TRIGGER, GPIO.OUT)
GPIO.setup(PIN_ECHO, GPIO.IN)
GPIO.output(PIN_TRIGGER, GPIO.LOW)
print("Waiting for sensor to settle")
time.sleep(2)
def pulse_sonar_trigger():
GPIO.output(PIN_TRIGGER, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(PIN_TRIGGER, GPIO.LOW)
def on_PIR_detect():
try:
#pulse_sonar_trigger()
# inline the function "pulse_sonar_trigger" to reduce function call overhead delay
GPIO.output(PIN_TRIGGER, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(PIN_TRIGGER, GPIO.LOW)
while GPIO.input(PIN_ECHO)==0:
pulse_start_time = time.time()
while GPIO.input(PIN_ECHO)==1:
pulse_end_time = time.time()
pulse_duration = pulse_end_time - pulse_start_time
distance = round(pulse_duration * 17150, 2)
print("Distance:", distance,"cm")
return distance
except:
# catch any timing issue
print("[on_PIR_detect] An exception occurred")
return 300 # return a distance that is beyond the detection range
def loop(config):
state = 0
user_detected = False
max_distance = 100 if "max_ultrasonic_range" not in config or type(config["max_ultrasonic_range"]) is not int else config["max_ultrasonic_range"]
while True:
i = GPIO.input(PIR_InPin)
if i:
if state != i:
state = i
print('ON')
# notify magicmirror
x = requests.get("http://localhost:8080/api/module/MMM-SensorControl/pir_trigger")
print("pir_detected")
# actively uses ultrasonic to detect user distance while PIR is triggered
if not user_detected:
# when PIR triggers and user is not detected, launch the sonar pulses to detect distance
distance = on_PIR_detect()
data = {"distance(cm)":distance}
if distance < max_distance: # check ultrasonic detection distance
x = requests.get("http://localhost:8080/api/module/MMM-SensorControl/user_detected", \
data=json.dumps(data), headers={'Content-Type': 'application/json'})
user_detected = True
print("user_detected")
else:
# user is already detected, PIR still active. Send signal to MM to reset the idle timer
time.sleep(2) # wait a bit, we dont want to send to many signals
x = requests.get("http://localhost:8080/api/module/MMM-SensorControl/user_detected")
else:
if state != i:
state = i
user_detected= False
print('OFF')
x = requests.get("http://localhost:8080/api/module/MMM-SensorControl/user_absent")
time.sleep(1) # sleep to minimize CPU usage
def destroy():
GPIO.cleanup() # Release all GPIO
if __name__ == '__main__': # Program entrance
print ('Program is starting ... \n')
parser = argparse.ArgumentParser()
parser.add_argument("config")
args = parser.parse_args()
print(args.config)
# remove the outter quotes
config = json.loads(args.config.strip("'"))
data = {}
for cf in config:
data[cf]=config[cf]
x = requests.post("http://localhost:8080/api/module/MMM-SensorControl/pir_trigger", data=json.dumps(data), headers={'Content-Type': 'application/json'})
setup()
try:
loop(config)
except KeyboardInterrupt: # Press ctrl-c to end the program.
destroy()
| StarcoderdataPython |
42893 | ## the noise masks of funcSize are not binarized, this script is to binarize them
import os, json
import nibabel as nib
import numpy as np
from scipy import ndimage
# initalize data
work_dir = '/mindhive/saxelab3/anzellotti/forrest/output_denoise/'
all_subjects = ['sub-01', 'sub-02', 'sub-03', 'sub-04', 'sub-05', 'sub-09', 'sub-10', 'sub-14', 'sub-15', 'sub-16', 'sub-17', 'sub-18', 'sub-19', 'sub-20']
out_dir = '/mindhive/saxelab3/anzellotti/forrest/derivatives/fmriprep/'
### work_dir = '/Users/chloe/Documents/output_denoise/'
### all_subjects = ['sub-02']
### out_dir = '/Users/chloe/Documents/'
mask = '_CSF_WM_mask_union_bin_shrinked_funcSize.nii.gz'
mask_thr = 0.5
# iterate through all subjects
for sub in all_subjects:
# generate union mask
# initialize info
sub_dir = work_dir + sub + '_denoise/'
mask_dir = sub_dir + sub + mask
sub_out_dir = out_dir + sub + '_complete/' + sub + '_ROIs/'
# load data
mask_union = nib.load(mask_dir)
mask_union_affine = mask_union.affine
mask_union_header = mask_union.header
mask_union = mask_union.get_data()
new_mask_union = np.zeros(mask_union.shape)
# make union of the two masks, filter with threshold
for x in range(0, mask_union.shape[0]):
for y in range(0, mask_union.shape[1]):
for z in range(0, mask_union.shape[2]):
if mask_union[x, y, z] >= mask_thr:
new_mask_union[x, y, z] = 1
# save the shrinked mask somewhere
mask_union_img = nib.Nifti1Image(new_mask_union, mask_union_affine, mask_union_header)
nib.save(mask_union_img, sub_out_dir + sub + '_CSF_WM_mask_union_bin_shrinked_funcSize.nii.gz')
| StarcoderdataPython |
4840776 | <reponame>prekolna/AlgorithmsGreatestHits
import unittest
from .unionfind import UnionFind
class UnionFindTests(unittest.TestCase):
def setUp(self):
self.nodes = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
self.U = UnionFind(self.nodes)
def test_find_pre_union(self):
self.assertEqual(self.U.find('a'), 'a')
def test_single_union(self):
self.U.union('a', 'b')
self.assertEqual(self.U.find('a'), 'b')
def test_multi_union(self):
self.U.union('a', 'b')
self.U.union('c', 'd')
self.U.union('a', 'd')
self.U.union('e', 'f')
self.U.union('g', 'h')
self.U.union('f', 'h')
self.U.union('c', 'g')
#should be fully connected
leader = None
for node in self.nodes:
if leader:
self.assertEqual(leader, self.U.find(node))
else:
leader = self.U.find(node)
def test_path_compression(self):
self.U.union('a', 'b')
self.U.union('c', 'd')
self.U.union('a', 'd')
self.U.union('e', 'f')
self.U.union('g', 'h')
self.U.union('f', 'h')
self.U.union('c', 'g')
#Currently, 'a' is a leaf
self.assertEqual(self.U._node_array[0]._parent, 1)
self.U.find('a')
#Now, 'a' should be directly attached to its leader, as should b.
self.assertEqual(self.U._node_array[0]._parent, 7)
self.assertEqual(self.U._node_array[1]._parent, 7)
def test_add(self):
uf = UnionFind([])
for n in self.nodes:
uf.add(n)
uf.union('a', 'b')
uf.union('c', 'd')
uf.union('a', 'd')
uf.union('e', 'f')
uf.union('g', 'h')
uf.union('f', 'h')
uf.union('c', 'g')
#should be fully connected
leader = None
for node in self.nodes:
if leader:
self.assertEqual(leader, uf.find(node))
else:
leader = uf.find(node)
| StarcoderdataPython |
1658530 | <filename>pantsuBooru/backend/test.py
from discord.ext import commands
commands
| StarcoderdataPython |
27540 | <filename>kubelet/datadog_checks/kubelet/summary.py
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import division
from fnmatch import fnmatch
from datadog_checks.base.utils.tagging import tagger
from .common import replace_container_rt_prefix, tags_for_docker, tags_for_pod
class SummaryScraperMixin(object):
"""
This class scrapes metrics from Kubelet "/stats/summary" endpoint
"""
def process_stats_summary(self, pod_list_utils, stats, instance_tags, main_stats_source):
# Reports system container metrics (node-wide)
self._report_system_container_metrics(stats, instance_tags)
# Reports POD & Container metrics. If `main_stats_source` is set, retrieve everything it can
# Otherwise retrieves only what we cannot get elsewhere
self._report_metrics(pod_list_utils, stats, instance_tags, main_stats_source)
def _report_metrics(self, pod_list_utils, stats, instance_tags, main_stats_source):
for pod in stats.get('pods', []):
pod_namespace = pod.get('podRef', {}).get('namespace')
pod_name = pod.get('podRef', {}).get('name')
pod_uid = pod.get('podRef', {}).get('uid')
if pod_namespace is None or pod_name is None or pod_uid is None:
self.log.warning("Got incomplete results from '/stats/summary', missing data for POD: %s", pod)
continue
if pod_list_utils.is_namespace_excluded(pod_namespace):
continue
self._report_pod_stats(
pod_namespace, pod_name, pod_uid, pod, pod_list_utils, instance_tags, main_stats_source
)
self._report_container_stats(
pod_namespace, pod_name, pod.get('containers', []), pod_list_utils, instance_tags, main_stats_source
)
def _report_pod_stats(
self, pod_namespace, pod_name, pod_uid, pod, pod_list_utils, instance_tags, main_stats_source
):
# avoid calling the tagger for pods that aren't running, as these are
# never stored
pod_phase = pod_list_utils.pods.get(pod_uid, {}).get('status', {}).get('phase', None)
if pod_phase != 'Running':
return
pod_tags = tags_for_pod(pod_uid, tagger.ORCHESTRATOR)
if not pod_tags:
self.log.debug("Tags not found for pod: %s/%s - no metrics will be sent", pod_namespace, pod_name)
return
pod_tags += instance_tags
used_bytes = pod.get('ephemeral-storage', {}).get('usedBytes')
if used_bytes:
self.gauge(self.NAMESPACE + '.ephemeral_storage.usage', used_bytes, pod_tags)
# Metrics below should already be gathered by another mean (cadvisor endpoints)
if not main_stats_source:
return
# Processing summary based network level metrics
net_pod_metrics = {'rxBytes': 'kubernetes.network.rx_bytes', 'txBytes': 'kubernetes.network.tx_bytes'}
for k, v in net_pod_metrics.items():
# ensure we can filter out metrics per the configuration.
pod_level_match = any([fnmatch(v, p) for p in self.pod_level_metrics])
enabled_rate = any([fnmatch(v, p) for p in self.enabled_rates])
if pod_level_match and enabled_rate:
net_bytes = pod.get('network', {}).get(k)
if net_bytes:
self.rate(v, net_bytes, pod_tags)
def _report_container_stats(
self, pod_namespace, pod_name, containers, pod_list_utils, instance_tags, main_stats_source
):
# Metrics below should already be gathered by another mean (cadvisor endpoints)
if not main_stats_source:
return
for container in containers:
container_name = container.get('name')
if container_name is None:
self.log.warning(
"Kubelet reported stats without container name for pod: %s/%s", pod_namespace, pod_name
)
continue
# No mistake, we need to give a tuple as parameter
container_id = pod_list_utils.get_cid_by_name_tuple((pod_namespace, pod_name, container_name))
if container_id is None:
self.log.debug(
"Container id not found from /pods for container: %s/%s/%s - no metrics will be sent",
pod_namespace,
pod_name,
container_name,
)
continue
# TODO: In `containers` we also have terminated init-containers, probably to be excluded?
if pod_list_utils.is_excluded(container_id):
continue
# Finally, we can get tags for this container
container_tags = tags_for_docker(replace_container_rt_prefix(container_id), tagger.HIGH, True)
if not container_tags:
self.log.debug(
"Tags not found for container: %s/%s/%s:%s - no metrics will be sent",
pod_namespace,
pod_name,
container_name,
container_id,
)
container_tags += instance_tags
cpu_total = container.get('cpu', {}).get('usageCoreNanoSeconds')
if cpu_total:
self.rate(self.NAMESPACE + '.cpu.usage.total', cpu_total, container_tags)
working_set = container.get('memory', {}).get('workingSetBytes')
if working_set:
self.gauge(self.NAMESPACE + '.memory.working_set', working_set, container_tags)
# TODO: Review meaning of these metrics as capacity != available + used
# availableBytes = container.get('rootfs', {}).get('availableBytes')
capacity_bytes = container.get('rootfs', {}).get('capacityBytes')
used_bytes = container.get('rootfs', {}).get('usedBytes')
if used_bytes is not None:
self.gauge(self.NAMESPACE + '.filesystem.usage', used_bytes, container_tags)
if used_bytes is not None and capacity_bytes is not None:
self.gauge(self.NAMESPACE + '.filesystem.usage_pct', float(used_bytes) / capacity_bytes, container_tags)
def _report_system_container_metrics(self, stats, instance_tags):
sys_containers = stats.get('node', {}).get('systemContainers', [])
for ctr in sys_containers:
if ctr.get('name') == 'runtime':
mem_rss = ctr.get('memory', {}).get('rssBytes')
if mem_rss:
self.gauge(self.NAMESPACE + '.runtime.memory.rss', mem_rss, instance_tags)
cpu_usage = ctr.get('cpu', {}).get('usageNanoCores')
if cpu_usage:
self.gauge(self.NAMESPACE + '.runtime.cpu.usage', cpu_usage, instance_tags)
if ctr.get('name') == 'kubelet':
mem_rss = ctr.get('memory', {}).get('rssBytes')
if mem_rss:
self.gauge(self.NAMESPACE + '.kubelet.memory.rss', mem_rss, instance_tags)
cpu_usage = ctr.get('cpu', {}).get('usageNanoCores')
if cpu_usage:
self.gauge(self.NAMESPACE + '.kubelet.cpu.usage', cpu_usage, instance_tags)
| StarcoderdataPython |
3214654 | <reponame>skrat/martinez<gh_stars>1-10
from hypothesis import strategies
from tests.integration_tests.factories import (
make_cyclic_bound_with_ported_sweep_events,
to_bound_with_ported_points_pair,
to_bound_with_ported_sweep_events)
from tests.integration_tests.utils import (
bound_with_ported_edges_types_pairs,
bound_with_ported_polygons_types_pairs)
from tests.strategies import (booleans,
single_precision_floats as floats,
unsigned_integers)
from tests.utils import (MAX_NESTING_DEPTH,
to_pairs)
booleans = booleans
polygons_types_pairs = bound_with_ported_polygons_types_pairs
edges_types_pairs = bound_with_ported_edges_types_pairs
unsigned_integers = unsigned_integers
points_pairs = strategies.builds(to_bound_with_ported_points_pair,
floats, floats)
nones_pairs = to_pairs(strategies.none())
leaf_sweep_events_pairs = to_bound_with_ported_sweep_events(nones_pairs)
acyclic_sweep_events_pairs = strategies.recursive(
leaf_sweep_events_pairs, to_bound_with_ported_sweep_events,
max_leaves=MAX_NESTING_DEPTH)
sweep_events_pairs = make_cyclic_bound_with_ported_sweep_events(
acyclic_sweep_events_pairs)
nested_sweep_events_pairs = to_bound_with_ported_sweep_events(
sweep_events_pairs)
maybe_sweep_events_pairs = nones_pairs | sweep_events_pairs
| StarcoderdataPython |
155859 | """ A playful implementation of the famous "German Tank Problem" in statistics.
First, the random number generator populates a list of "tanks", represented
by sequential serial numbers. The numbers are added to the list in random
order until they run out.
We then choose the sample size, representing the number of tanks we have
captured in battle and whose serial number we have been able to observe.
The program then retrieves an amount of random serial numbers equal to our
specified sample size, and attempts to estimate how many tanks there are
in total.
The formula can make fairly accurate estimates with relatively small sample
sizes, providing the serial numbers sampled are reasonably random.
"""
from random import sample
def generate_serials(total, samplesize):
""" Generate a list of consecutive serial numbers up to the specified limit
("total") and return a random sample out of it, of size "sample".
"""
serialnumbers = [i+1 for i in range(total)]
randomised = sample(serialnumbers, samplesize)
return randomised
def estimate_tanks(sample):
estimate = max(sample) + (max(sample) / len(sample)) - 1
return round(estimate)
def experiment(realtanks, samplesize):
""" Create a virtual tank army of size "realktanks", and retrieve a random
sample of serial numbers sized "samplesize". Then attempt to estimate the
number "realtanks" from that sample.
"""
capturedtanks = generate_serials(realtanks, samplesize)
estimate = estimate_tanks(capturedtanks)
print("GERMAN TANK PROBLEM\n")
print("Actual number of tanks: {}".format(realtanks))
print("Sample size: {}".format(samplesize))
print("Serial numbers sampled:")
print(capturedtanks)
print("-----")
print("Estimated number of tanks: {}".format(estimate))
error = abs(realtanks - estimate) / realtanks
percentageoff = round(error * 100, 2)
print("Error: {}%".format(percentageoff))
experiment(1500, 20) | StarcoderdataPython |
186296 | import re
import sys
import json
from ensmallen import Graph
from .utils import build_path
from .parsers import DocParser
def doc_analysis(args):
with open(build_path("results/analysis.json"), "r") as f:
analysis = json.load(f)
result = {}
for values in analysis.values():
for function in values["functions"]:
p = DocParser()
result[function.get("name", "")] = p.start(function, "\n".join(function["doc"]))
print(json.dumps(result, indent=4))
with open(build_path("results/doc_analysis.json"), "w") as f:
json.dump(result, f, indent=4) | StarcoderdataPython |
1700069 | <gh_stars>0
from pylatex import Document, Section, StandAloneGraphic, NewPage
from pylatex.utils import bold
import sys
import json
def fill_document():
with open('./temp/explanations/explanations.json') as f:
data = json.load(f)
doc = Document()
for element in data['elements']:
date = element['date']
explanation = element['explanation']
picture_path = element['picture']
if date[2:4] == '01':
month = 'January'
elif date[2:4] == '02':
month = 'February'
elif date[2:4] == '03':
month = 'March'
elif date[2:4] == '04':
month = 'April'
elif date[2:4] == '05':
month = 'May'
elif date[2:4] == '06':
month = 'June'
elif date[2:4] == '07':
month = 'July'
elif date[2:4] == '08':
month = 'August'
elif date[2:4] == '09':
month = 'September'
elif date[2:4] == '10':
month = 'October'
elif date[2:4] == '11':
month = 'November'
elif date[2:4] == '12':
month = 'December'
else:
month = 'Error: Invalid month'
if int(date[4:]) < 10:
day = date[-1]
else:
day = date[4:]
if int(date[0:2]) < 95:
year = '20' + date[0:2]
else:
year = '19' + date[0:2]
with doc.create(
Section(month + ' ' + day + ', ' + year, numbering=False)):
doc.append(
StandAloneGraphic(
image_options=r'width=\textwidth', filename=picture_path))
doc.append(bold('\n\nExplanation:\n'))
doc.append(explanation)
doc.append(NewPage())
doc.generate_pdf('astronomy-picture-of-the-day', clean_tex=True)
def main():
print('Generating pdf...')
fill_document()
print('...done')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| StarcoderdataPython |
3378732 | import os
import glob
import xml.etree.ElementTree as ET
import numpy
import soundfile
def main():
for f in glob.glob("*/**/*.xps"):
t = ET.parse(f)
r = t.getroot()
d = recurse_tree(r)
if not d:
continue
dirname = os.path.dirname(f)
irname = d["IRFileNameFull"]
path = os.path.join(dirname, irname)
newpath = os.path.join("..", d["IRtitle"])
if not os.path.isdir(newpath):
os.makedirs(newpath)
if not os.path.isfile(path):
continue
y = numpy.fromfile(path, numpy.float32)
y /= float(d.get("Norm", numpy.max(y)))
soundfile.write(os.path.join(newpath, os.path.splitext(irname)[0] + ".wav"), y, int(d["SampleRate"]), endian="little")
print("Wrote %s to .wav." % irname)
def recurse_tree(root):
if root.tag == "PluginSpecific":
j = {x.attrib["Name"]:x.text for x in root[0]}
if j.get("NumInChannels", "") == "1" and j.get("NumOutChannels", "") == "1":
return j
for c in root:
d = recurse_tree(c)
if d:
return d
if __name__ == "__main__":
main() | StarcoderdataPython |
3295414 | <reponame>tefra/xsdata-w3c-tests
from dataclasses import dataclass, field
from typing import Dict, Optional
@dataclass
class AnyAttr:
class Meta:
name = "anyAttr"
id1: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
any_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##any",
}
)
@dataclass
class Root:
class Meta:
name = "root"
a: Optional[AnyAttr] = field(
default=None,
metadata={
"type": "Element",
"namespace": "",
"required": True,
}
)
any_attributes: Dict[str, str] = field(
default_factory=dict,
metadata={
"type": "Attributes",
"namespace": "##any",
}
)
| StarcoderdataPython |
78946 | from collections import Counter
from analysis.computation import utils
def frequency(ambitus_list):
freq = Counter(ambitus_list)
r = [['Ambitus', 'Pieces']]
for k, v in sorted(freq.items()):
r.append([k, v])
return r
def frequency_pie(ambitus_list):
r = utils.aux_pie_chart(Counter(ambitus_list))
r.insert(0, ['Ambitus', 'Amount'])
return r
def analysis(compositions):
ambitus_list = utils.get_music_data_attrib(compositions, 'ambitus', 'append')
if ambitus_list:
basic_stats = utils.aux_basic_stats(ambitus_list, 'Pieces number', False)
dist_value = utils.distribution(ambitus_list, basic_stats, False)
args = {
'basic_stats': basic_stats,
'frequency': frequency(ambitus_list),
'histogram': utils.histogram(ambitus_list, 10, ['Ambitus', 'Pieces'], False, True),
'distribution_value': dist_value,
'distribution_amount': utils.distribution(ambitus_list, basic_stats, True),
'frequency_pie': frequency_pie(ambitus_list),
'boxplot': utils.boxplot(basic_stats),
}
return args
else:
return {}
| StarcoderdataPython |
155395 |
import argparse
import os
import sys
class Opts(object):
def __init__(self):
#self.parser = argparse.ArgumentParser()
#task
self.task = 'ddd' #'ddd, lane'
self.task = self.task.split(',')
self.dataset = 'kitti' #'coco'
self.test_dataset = 'kitti' #'coco'
self.debug_mode = 0
self.pretrained = False
self.lateral_dist = False
self.rel_dep_reg = False
self.split = 'train' #'val'
#system
self.gpu_list = '' #'0'
self.gpus = self.gpu_list.split(',') if self.gpu_list != '' else []
self.dataloader_threads = 4
self.use_cuda_dataloader = False
self.seed = 36
self.dist_train = False
self.device = ''
self.save_point = [1,20,40,60]
self.root_dir = os.path.join(os.path.dirname(__file__), '..')
self.save_dir = os.path.join(self.root_dir, 'checkpoints')
self.load_model_dir = os.path.join(self.save_dir, 'model_last.pth')
#network
self.arch = 'generic' #res_101, dla_34, mobilenet
self.backbone = 'mobilenet'
self.neck = 'dlaup'
#['Car', 'Van', 'Truck', 'Pedestrian', 'Cyclist']
self.num_classes = 5 #12
self.heads = {}
self.num_head_conv = 1 #number of head conv layers
self.head_conv = 24 #resnets:64,dla:256,mobilenet:24 channels num in each head conv layer
self.head_kernel = 3
self.cls_reg_angle = True
self.angle_cls_bin = 12 #bin number of angle
self.upsample_node = 'ConvTranspose'
#depth estimation
self.obj_dep_fln = False #focal length normalization of object depth
self.obj_dep_scln = False #scale normalization of object depth
self.obj_dep_rotn = False #pitch normalization of object depth
self.obj_depth_encode_type = 'ori' #depth encode type
self.obj_min_depth = 0.1
self.obj_max_depth = 250.0
self.dense_encode = False #???
self.fpn = False
self.out_strides = '4' #strides of output feature maps of fpn
self.K = 100 #max number of output objects
self.out_thresh = 0.1 #output minimal confidence threshold
#data
self.input_h = -1
self.input_w = -1
self.down_ratio = 4
self.output_w = self.input_w // self.down_ratio
self.output_h = self.input_h // self.down_ratio
self.kitti_split = '3dop' #'3dop | subcnn'
self.use_coord_input = False
self.data_channel = ['right-forward'] #which camera data source
self.data_dir = '/home/zuyuan/Data'
self.num_workers = 0 #dataloader threads.
#train
self.optimizer = 'Adam'
self.lr = 0.0001
self.lr_step = [60] #drop learning rate by 10
self.save_step = 90 #when to save the model to disk
self.epochs = 60
self.batch_size = 16
self.resume = False
#loss
self.obj_depth_loss_type = 'L1' #'ord_reg'
self.ord_num = 40 #'bin number of dense depth'
self.dep_uncertain_type = 'gaussian'
self.weights_dict = {'hm':1.0, 'wh':0.1, 'reg':1.0, 'dep':1.0,
'rot':1.0, 'depth_uncertain':0.1, #'latd':1.0,
'dim':1.0, 'amodel_offset':1.0
}
self.cls_reweight = False
self.use_uncertainty_wt = False #Automatic Weighted Loss
self.use_grad_norm = False #Gradient Normalize Loss
self.use_modified_grad = False #Modified gradient normalize
self.use_dynamic_wt_avg = False #Dynamic weights average loss
self.crit_loss_type = 'Focal' #GHM-C:gradient harmonizing mechanism
self.lateral_dst_reg_type = 'sqrt' #sqrt, sigmoid
self.amodel_offset_reg_type = 'ori' #ori,sqrt,sigmoid
self.dep_uncertainty_type = 'gaussian'
def update_with_dataset(self, opt, dataset):
input_h, input_w = dataset.default_resolution
opt.input_h = opt.input_h if opt.input_h > 0 else input_h
opt.input_w = opt.input_w if opt.input_w > 0 else input_w
opt.output_h = opt.input_h // opt.down_ratio
opt.output_w = opt.input_w // opt.down_ratio
opt.heads = {'hm': opt.num_classes, 'reg': 2, 'wh':2}
if 'ddd' in opt.task:
if opt.cls_reg_angle:
rot_dim = opt.angle_cls_bin * 2 #24
if opt.obj_depth_loss_type == 'ord_reg':
dep_dim = opt.ord_num * 2 + 1
else:
dep_dim = 1
opt.heads.update({'dep':dep_dim, 'rot':rot_dim, 'dim':3, 'amodel_offset':2})
if (opt.dep_uncertain_type == 'gaussian'):
opt.heads.update({'depth_uncertain':1})
if opt.lateral_dist:
opt.heads.update({'latd': 1})
# update heads
opt.weights = {head: self.weights_dict[head] for head in opt.heads}
opt.head_conv = {head: [opt.head_conv for i in range(opt.num_head_conv if
head!='reg' else 1)] for head in opt.heads}
opt.out_strides = [int(i) for i in opt.out_strides.split(',')]
if not opt.fpn:
opt.out_strides = [4]
return opt
| StarcoderdataPython |
1634266 | import re
# Fuzzy matching is commonly rather handled useing levenshtein-distance
# e.g. https://github.com/seatgeek/fuzzywuzzy
def fuzzify(query):
regex = r'(?:.*?' + re.sub(r'(.)', r'(\1).*?', query) + ')'
return regex
def fuzzyScore(pattern, reference):
result = re.match(fuzzify(pattern), reference, flags=re.IGNORECASE)
reflen = len(reference)
lastindex = -1
score = 0
if not result:
return None
matchCount = result.lastindex and result.lastindex or 0
for i in range(1, matchCount + 1):
startindex = result.start(i)
if startindex == None or startindex == -1:
score = max(0, score - len(reference) / 2)
else:
score += 1.0*(reflen - startindex)
penalty = (startindex - lastindex - 1)
assert(penalty >= 0)
score -= 2.0*penalty*penalty
lastindex = startindex
return score
if __name__ == '__main__':
searchstrings = ('Germany' , 'germany', 'Ge', 'ermy', 'Blablagerm', 'Gbermany', 'Germn')
name = 'Germany'
for search in searchstrings:
print(search, fuzzyScore(search, name))
| StarcoderdataPython |
4828603 | import logging
from nni.assessor import Assessor, AssessResult
_logger = logging.getLogger('NaiveAssessor')
_logger.info('start')
_result = open('/tmp/nni_assessor_result.txt', 'w')
class NaiveAssessor(Assessor):
def __init__(self, optimize_mode):
self._killed = set()
_logger.info('init')
def assess_trial(self, trial_job_id, trial_history):
_logger.info('assess trial %s %s' % (trial_job_id, trial_history))
id_ = trial_history[0]
if id_ in self._killed:
return AssessResult.Bad
s = 0
for i, val in enumerate(trial_history):
s += val
if s % 11 == 1:
self._killed.add(id_)
_result.write('%d %d\n' % (id_, i + 1))
_result.flush()
return AssessResult.Bad
return AssessResult.Good
def _on_exit(self):
_result.write('DONE\n')
_result.close()
def _on_error(self):
_result.write('ERROR\n')
_result.close()
| StarcoderdataPython |
1690030 | <gh_stars>1-10
# ===============================================================================
# Copyright 2017 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# standard library imports ======================================================
import os
import gdal
import osr
import re
import ntpath
from subprocess import call
def merge_rasters(in_folder, out_location, out_proj):
tifs = [os.path.join(in_folder, x) for x in os.listdir(in_folder) if x.endswith('.tif')]
print 'tif files: \n {}'.format(tifs)
tif_string = ' '.join(tifs)
print 'tif string to save: {}'.format(tif_string)
t_srs = osr.SpatialReference()
t_srs.ImportFromEPSG(out_proj)
t_proj = t_srs.ExportToWkt()
print 'target srs strirng: {}'.format(t_proj)
for tif in tifs:
dataset = gdal.Open(tif)
band = dataset.GetRasterBand(1)
band_ct = dataset.RasterCount
geo_t = dataset.GetGeoTransform()
proj_info = dataset.GetProjection()
src_srs = osr.SpatialReference()
src_srs.ImportFromWkt(proj_info)
src_proj = src_srs.ExportToWkt()
print 'source srs: {}'.format(src_proj)
driver = gdal.GetDriverByName('GTiff')
out_name = tif.replace('.tif', '_32100.tif')
print 'out name: {}'.format(out_name)
dest = driver.Create(out_name, dataset.RasterXSize, dataset.RasterYSize,
band_ct, band.DataType)
dest = driver.ReprojectImage(dataset, dest, src_proj, t_srs, gdal.GRA_Cubic)
dest.SetGeoTransform(geo_t)
dest.SetProjection(t_proj)
out_band = dest.GetRasterBand(1)
dest = None
# driver = gdal.GetDriverByName('GTiff')
# out_data_set = driver.Create(filename, self._geo['cols'], self._geo['rows'],
# self._geo['bands'], self._geo['data_type'])
# out_data_set.SetGeoTransform(self._geo['geotransform'])
# out_data_set.SetProjection(self._geo['projection'])
# output_band = out_data_set.GetRasterBand(1)
# output_band.WriteArray(array_to_save, 0, 0)
#
# raster_geo_dict = {'cols': dataset.RasterXSize, 'rows': dataset.RasterYSize, 'bands': dataset.RasterCount,
# 'data_type': band.DataType, 'projection': dataset.GetProjection(),
# 'geotransform': dataset.GetGeoTransform(), 'resolution': dataset.GetGeoTransform()[1]}
def wkt2epsg(wkt, epsg='/data01/anaconda2/share/proj/epsg/', forceProj4=False):
''' Transform a WKT string to an EPSG code
Arguments
---------
wkt: WKT definition
epsg: the proj.4 epsg file (defaults to '/usr/local/share/proj/epsg')
forceProj4: whether to perform brute force proj4 epsg file check (last resort)
Returns: EPSG code
'''
code = None
p_in = osr.SpatialReference()
s = p_in.ImportFromWkt(wkt)
if s == 5: # invalid WKT
return None
if p_in.IsLocal() == 1: # this is a local definition
return p_in.ExportToWkt()
if p_in.IsGeographic() == 1: # this is a geographic srs
cstype = 'GEOGCS'
else: # this is a projected srs
cstype = 'PROJCS'
an = p_in.GetAuthorityName(cstype)
ac = p_in.GetAuthorityCode(cstype)
if an is not None and ac is not None: # return the EPSG code
return '%s:%s' % \
(p_in.GetAuthorityName(cstype), p_in.GetAuthorityCode(cstype))
else: # try brute force approach by grokking proj epsg definition file
p_out = p_in.ExportToProj4()
if p_out:
if forceProj4 is True:
return p_out
f = open(epsg)
for line in f:
if line.find(p_out) != -1:
m = re.search('<(\\d+)>', line)
if m:
code = m.group(1)
break
if code: # match
return 'EPSG:%s' % code
else: # no match
return None
else:
return None
def gdal_build_vrt(in_folder, out_folder):
tifs = [os.path.join(in_folder, x) for x in os.listdir(in_folder) if x.endswith('.tif')]
print tifs
for tif in tifs:
out_name = ntpath.basename(tif.replace('.tif', '.vrt'))
print 'out name: {}'.format(out_name)
out_file = os.path.join(out_folder, out_name)
print 'out file: {}'.format(out_file)
slope = 'gdalbuildvrt {} {}'.format(out_file, tif)
call(slope, shell=True)
return None
def gdal_dem(in_folder, out_folder, terrain_type='slope'):
function = ['slope', 'aspect', 'hillshade']
tifs = [os.path.join(in_folder, x) for x in os.listdir(in_folder) if x.endswith('dem.tif')]
print tifs
for tif in tifs:
out_name = ntpath.basename(tif.replace('dem.tif', '{}.tif'.format(terrain_type)))
print 'out name: {}'.format(out_name)
out_file = os.path.join(out_folder, out_name)
print 'out file: {}'.format(out_file)
dem_function = 'gdaldem {} {} {}'.format(function.index(terrain_type), tif, out_file)
call(dem_function, shell=True)
return None
if __name__ == '__main__':
home = os.path.expanduser('~')
print 'home: {}'.format(home)
# ============= EOF ============================================================
| StarcoderdataPython |
1632306 | <gh_stars>10-100
import time
import os, tarfile, io
import numpy as np
from .utils import download_dataset
_urls = {
"http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz": "stl10_binary.tar.gz",
}
classes = [
"airplane",
"bird",
"car",
"cat",
"deer",
"dog",
"horse",
"monkey",
"ship",
"truck",
]
def load(path=None):
"""Image classification with extra unlabeled images.
The `STL-10 <https://cs.stanford.edu/~acoates/stl10/>`_ dataset is an image
recognition dataset for developing unsupervised feature learning,
deep learning, self-taught learning algorithms. It is inspired by the
CIFAR-10 dataset but with
some modifications. In particular, each class has fewer labeled
training examples than in CIFAR-10, but a very
large set of unlabeled examples is provided to learn image models prior
to supervised training. The primary challenge is to make use of the
unlabeled data (which comes from a similar but different distribution from
the labeled data) to build a useful prior. We also expect that the higher
resolution of this dataset (96x96) will make it a challenging benchmark
for developing more scalable unsupervised learning methods.
Parameters
----------
path: str (optional)
the path to look for the data and where it will be downloaded if
not present
Returns
-------
train_images: array
the training images
train_labels: array
the training labels
test_images: array
the test images
test_labels: array
the test labels
extra_images: array
the unlabeled additional images
"""
if path is None:
path = os.environ["DATASET_PATH"]
download_dataset(path, "irmas", _urls)
print("Loading stl10")
t = time.time()
# Loading Dataset
file_ = tarfile.open(path + "stl10/stl10_binary.tar.gz", "r:gz")
# loading test label
read_file = file_.extractfile("stl10_binary/test_y.bin").read()
test_y = np.frombuffer(io.BytesIO(read_file).read(), dtype=np.uint8) - 1
# loading train label
read_file = file_.extractfile("stl10_binary/train_y.bin").read()
train_y = np.frombuffer(io.BytesIO(read_file).read(), dtype=np.uint8) - 1
# load test images
read_file = file_.extractfile("stl10_binary/test_X.bin").read()
test_X = (
np.frombuffer(io.BytesIO(read_file).read(), dtype=np.uint8)
.reshape((-1, 3, 96, 96))
.transpose([0, 1, 3, 2])
)
# load train images
read_file = file_.extractfile("stl10_binary/train_X.bin").read()
train_X = (
np.frombuffer(io.BytesIO(read_file).read(), dtype=np.uint8)
.reshape((-1, 3, 96, 96))
.transpose([0, 1, 3, 2])
)
# load unlabelled images
read_file = file_.extractfile("stl10_binary/unlabeled_X.bin").read()
unlabeled_X = (
np.frombuffer(io.BytesIO(read_file).read(), dtype=np.uint8)
.reshape((-1, 3, 96, 96))
.transpose([0, 1, 3, 2])
)
print("Dataset stl10 loaded in", "{0:.2f}".format(time.time() - t), "s.")
data = {
"train_set/images": train_X,
"train_set/labels": train_y,
"test_set/images": test_X,
"test_set/labels": test_y,
"unlabelled": unlabeled_X,
}
return data
| StarcoderdataPython |
1658114 | <gh_stars>1-10
from .fp import UpperFingerprint
| StarcoderdataPython |
43277 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# rtk.hardware.component.resistor.fixed.Wirewound.py is part of the RTK
# Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
##########################################################
Hardware.Component.Resistor.Fixed Package Wirewound Module
##########################################################
"""
import gettext
import locale
try:
import Configuration
import Utilities
from hardware.component.resistor.Resistor import Model as Resistor
except ImportError: # pragma: no cover
import rtk.Configuration as Configuration
import rtk.Utilities as Utilities
from rtk.hardware.component.resistor.Resistor import Model as Resistor
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
# Add localization support.
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error: # pragma: no cover
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class Wirewound(Resistor):
"""
The Wirewound resistor data model contains the attributes and methods of
a Wirewound resistor. The attributes of a Wirewound resistor are:
:cvar list _lst_piR: list of MIL-HDBK-217FN2 resistance factor values.
:cvar list _lst_piE: list of MIL-HDBK-217FN2 operating environment factor
values.
:cvar list _lst_piQ_count: list of quality factor values for the
MIL-HDBK-217FN2 parts count method.
:cvar list _lst_piQ_stress: list of quality factor values for the
MIL-HDBK-217FN2 parts stress method.
:cvar list _lst_lambdab_count: list of base hazard rate values for the
MIL-HDBK-217FN2 parts count method.
:cvar int subcategory: default value: 29
Covers specifications MIL-R-93, MIL-R-39005.
Hazard Rate Models:
# MIL-HDBK-217F, section 9.5
"""
# MIL-HDK-217F hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_lst_piE = [1.0, 2.0, 11.0, 5.0, 18.0, 15.0, 18.0, 28.0, 35.0, 27.0, 0.8,
14.0, 38.0, 610.0]
_lst_piQ_count = [0.03, 0.1, 0.3, 1.0, 3.0, 10.0]
_lst_piQ_stress = [0.03, 0.1, 0.3, 1.0, 5.0, 15.0]
_lst_lambdab_count = [0.0085, 0.018, 0.10, 0.045, 0.16, 0.15, 0.17, 0.30,
0.38, 0.26, 0.0068, 0.13, 0.37, 5.4]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 29 # Subcategory ID in rtkcom DB.
def __init__(self):
"""
Method to initialize a Wirewound resistor data model instance.
"""
super(Wirewound, self).__init__()
def calculate_part(self):
"""
Method to calculate the hazard rate for the Wirewound resistor data
model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piR * piQ * piE'
# Base hazard rate.
_stress = self.operating_power / self.rated_power
try:
self.base_hr = 0.0031 * \
exp(((self.temperature_active + 273.0) / 398.0)**10.0) * \
exp((_stress * ((self.temperature_active + 273.0) /
273.0))**1.5)
self.hazard_rate_model['lambdab'] = self.base_hr
except OverflowError:
# TODO: Handle overflow error.
return True
# Resistance factor.
if self.resistance <= 10000.0:
self.piR = 1.0
elif self.resistance > 10000.0 and self.resistance <= 1.0E5:
self.piR = 1.7
elif self.resistance > 1.0E5 and self.resistance <= 1.0E6:
self.piR = 3.0
elif self.resistance > 1.0E6:
self.piR = 5.0
self.hazard_rate_model['piR'] = self.piR
return Resistor.calculate_part(self)
class WirewoundPower(Resistor):
"""
The Wirewound Power resistor data model contains the attributes and
methods of a Wirewound Power resistor. The attributes of a Wirewound
Power resistor are:
:cvar list _lst_piR: list of MIL-HDBK-217FN2 resistance factor values.
:cvar list _lst_piE: list of MIL-HDBK-217FN2 operating environment factor
values.
:cvar list _lst_piQ_count: list of quality factor values for the
MIL-HDBK-217FN2 parts count method.
:cvar list _lst_piQ_stress: list of quality factor values for the
MIL-HDBK-217FN2 parts stress method.
:cvar list _lst_lambdab_count: list of base hazard rate values for the
MIL-HDBK-217FN2 parts count method.
:cvar int subcategory: default value: 30
:ivar int specification: index of the specification applicable to the
resistor.
:ivar int style: index of the resistor style.
Covers specifications MIL-R-26 and MIL-R-39007.
Hazard Rate Models:
# MIL-HDBK-217F, section 9.6
"""
# MIL-HDK-217F hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_lst_piR = [[[1.0, 1.0, 1.2, 1.2, 1.6, 1.6, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.2, 1.6, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.2, 1.6],
[1.0, 1.2, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.1, 1.2, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0, 1.0, 1.2, 1.6],
[1.0, 1.0, 1.0, 1.2, 1.6, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 2.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 2.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 2.0, 0.0, 0.0],
[1.0, 1.2, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 2.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.2, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.5, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.4, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.4, 1.6, 2.0],
[1.0, 1.0, 1.0, 1.4, 1.6, 2.0],
[1.0, 1.0, 1.4, 2.4, 0.0, 0.0],
[1.0, 1.0, 1.2, 2.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 0.0, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.4, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.5, 0.0, 0.0, 0.0],
[1.0, 1.2, 0.0, 0.0, 0.0, 0.0]]]
_lst_piE = [1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.3,
13.0, 34.0, 610.0]
_lst_piQ_count = [0.03, 0.1, 0.3, 1.0, 3.0, 10.0]
_lst_piQ_stress = [0.03, 0.1, 0.3, 1.0, 5.0, 15.0]
_lambdab_count = [[0.014, 0.031, 0.16, 0.077, 0.26, 0.073, 0.15, 0.19,
0.39, 0.42, 0.0042, 0.21, 0.62, 9.4],
[0.013, 0.028, 0.15, 0.070, 0.24, 0.065, 0.13, 0.18,
0.35, 0.38, 0.0038, 0.19, 0.56, 8.6]]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 30 # Subcategory ID in rtkcom DB.
def __init__(self):
"""
Method to initialize a Wirewound Power resistor data model instance.
"""
super(WirewoundPower, self).__init__()
self._lst_lambdab_count = []
self.specification = 0
self.style = 0
def set_attributes(self, values):
"""
Method to set the Wirewound Power resistor data model attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_code = 0
_msg = ''
(_code, _msg) = Resistor.set_attributes(self, values)
try:
self.specification = int(values[117])
self.style = int(values[118])
except IndexError as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Insufficient input values."
except(TypeError, ValueError) as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Converting one or more inputs to correct data type."
return(_code, _msg)
def get_attributes(self):
"""
Method to retrieve the current values of the Wirewound Power resistor
data model attributes.
:return: (specification, style)
:rtype: tuple
"""
_values = Resistor.get_attributes(self)
_values = _values + (self.specification, self.style)
return _values
def calculate_part(self):
"""
Method to calculate the hazard rate for the Wirewound Power resistor
data model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# WARNING: Refactor calculate_part; current McCabe Complexity metric = 19.
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 1:
self._lst_lambdab_count = self._lambdab_count[self.specification - 1]
elif self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piR * piQ * piE'
# Base hazard rate.
_stress = self.operating_power / self.rated_power
try:
self.base_hr = \
0.00148 * \
exp(((self.temperature_active + 273.0) / 298.0)**2.0) * \
exp((_stress / 0.5) * ((self.temperature_active + 273.0) /
273.0))
self.hazard_rate_model['lambdab'] = self.base_hr
except OverflowError:
# TODO: Handle overflow error.
return True
# Resistance factor.
if self.specification == 1: # MIL-R-39007
if self.resistance <= 500.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][0]
elif self.resistance > 500.0 and self.resistance <= 1000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][1]
elif self.resistance > 1000.0 and self.resistance <= 5000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][2]
elif self.resistance > 5000.0 and self.resistance <= 7500.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][3]
elif self.resistance > 7500.0 and self.resistance <= 10000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][4]
elif self.resistance > 10000.0 and self.resistance <= 15000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][5]
elif self.resistance > 15000.0 and self.resistance <= 20000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][6]
elif self.resistance > 20000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][7]
elif self.specification == 2: # MIL-R-26
if self.resistance <= 100.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][0]
elif self.resistance > 100.0 and self.resistance <= 1000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][1]
elif self.resistance > 1000.0 and self.resistance <= 10000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][2]
elif self.resistance > 10000.0 and self.resistance <= 100000.0:
self.piR = self._lst_piR[self.specification - 1][self.style - 1][3]
elif(self.resistance > 100000.0 and
self.resistance <= 150000.0):
self.piR = self._lst_piR[self.specification - 1][self.style - 1][4]
elif(self.resistance > 150000.0 and
self.resistance <= 200000.0):
self.piR = self._lst_piR[self.specification - 1][self.style - 1][5]
self.hazard_rate_model['piR'] = self.piR
return Resistor.calculate_part(self)
class WirewoundChassisMount(Resistor):
"""
The Wirewound Chassis Mount Power resistor data model contains the
attributes and methods of a Wirewound Chassis Mount Power resistor. The
attributes of a Wirewound Chassis Mount Power resistor are:
:cvar list _lst_piR: list of MIL-HDBK-217FN2 resistance factor values.
:cvar list _lst_piE: list of MIL-HDBK-217FN2 operating environment factor
values.
:cvar list _lst_piQ_count: list of quality factor values for the
MIL-HDBK-217FN2 parts count method.
:cvar list _lst_piQ_stress: list of quality factor values for the
MIL-HDBK-217FN2 parts stress method.
:cvar list _lst_lambdab_count: list of base hazard rate values for the
MIL-HDBK-217FN2 parts count method.
:cvar int subcategory: default value: 31
Covers specifications MIL-R-18546 and MIL-R-39009.
Hazard Rate Models:
# MIL-HDBK-217F, section 9.7
"""
# MIL-HDK-217F hazard rate calculation variables.
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
_lst_piR = [[[1.0, 1.2, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.2, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.1, 1.2, 1.6],
[1.0, 1.0, 1.0, 1.0, 1.2, 1.6],
[1.0, 1.0, 1.0, 1.0, 1.2, 1.6]],
[[1.0, 1.2, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.2, 1.6, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.2, 1.6, 0.0, 0.0],
[1.0, 1.0, 1.1, 1.2, 1.4, 0.0],
[1.0, 1.0, 1.0, 1.2, 1.6, 0.0],
[1.0, 1.0, 1.0, 1.1, 1.4, 0.0]]]
_lst_piE = [1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.5,
13.0, 34.0, 610.0]
_lst_piQ_count = [0.03, 0.1, 0.3, 1.0, 3.0, 10.0]
_lst_piQ_stress = [0.03, 0.1, 0.3, 1.0, 5.0, 15.0]
_lst_lambdab_count = [0.008, 0.18, 0.096, 0.045, 0.15, 0.044, 0.088, 0.12,
0.24, 0.25, 0.004, 0.13, 0.37, 5.5]
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
subcategory = 31 # Subcategory ID in rtkcom DB.
def __init__(self):
"""
Method to initialize a Wirewound Chassis Mount Power resistor data
model instance.
"""
super(WirewoundChassisMount, self).__init__()
self.characteristic = 0
self.style = 0
def set_attributes(self, values):
"""
Method to set the Wirewound Chassis Mount Power resistor data model
attributes.
:param tuple values: tuple of values to assign to the instance
attributes.
:return: (_code, _msg); the error code and error message.
:rtype: tuple
"""
_code = 0
_msg = ''
Resistor.set_attributes(self, values)
try:
self.characteristic = int(values[117])
self.style = int(values[118])
except IndexError as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Insufficient input values."
except(TypeError, ValueError) as _err:
_code = Utilities.error_handler(_err.args)
_msg = "ERROR: Converting one or more inputs to correct data type."
return(_code, _msg)
def get_attributes(self):
"""
Method to retrieve the current values of the Wirewound Chassis Mount
Power resistor data model attributes.
:return: (specification, style)
:rtype: tuple
"""
_values = Resistor.get_attributes(self)
_values = _values + (self.characteristic, self.style)
return _values
def calculate_part(self):
"""
Method to calculate the hazard rate for the Wirewound Chassis Mount
Power resistor data model.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
from math import exp
self.hazard_rate_model = {}
if self.hazard_rate_type == 2:
self.hazard_rate_model['equation'] = 'lambdab * piR * piQ * piE'
# Base hazard rate.
_stress = self.operating_power / self.rated_power
self.base_hr = 0.00015 * \
exp(2.64 * ((self.temperature_active + 273.0) /
273.0)) * \
exp((_stress / -.466) * ((self.temperature_active +
273.0) / 273.0))
self.hazard_rate_model['lambdab'] = self.base_hr
# Resistance factor.
if self.resistance <= 500.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][0]
elif self.resistance > 500.0 and self.resistance <= 1000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][1]
elif self.resistance > 1000.0 and self.resistance <= 5000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][2]
elif self.resistance > 5000.0 and self.resistance <= 10000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][3]
elif self.resistance > 10000.0 and self.resistance <= 20000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][4]
elif self.resistance > 20000.0:
self.piR = self._lst_piR[self.characteristic - 1][self.style - 1][5]
self.hazard_rate_model['piR'] = self.piR
return Resistor.calculate_part(self)
| StarcoderdataPython |
189537 | <filename>setup.py
from setuptools import (
setup,
find_packages,
)
setup(
name='lookuper',
use_scm_version=True,
description='Lookup nested data structures',
long_description=open('README.rst').read(),
url='https://github.com/cr3/lookuper',
author='<NAME>',
author_email='<EMAIL>',
setup_requires=['setuptools_scm'],
packages=find_packages(where='src'),
package_dir={'': 'src'},
license='MIT',
keywords='lookup nested',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| StarcoderdataPython |
3291755 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
import re, os
from xml.dom.minidom import parse, parseString
from DdlCommonInterface import DdlCommonInterface, g_dbTypes
from OracleInterface import DdlOracle
from PostgreSQLInterface import DdlPostgres
from MySqlInterface import DdlMySql
from FirebirdInterface import DdlFirebird
__author__ = "<NAME> (scott_kirkwood at berlios.com)"
__keywords__ = ['XML', 'DML', 'SQL', 'Databases', 'Agile DB', 'ALTER', 'CREATE TABLE', 'GPL']
__licence__ = "GNU Public License (GPL)"
__url__ = 'http://xml2dml.berlios.de'
__version__ = "$Revision$"
def attribsToDict(node):
dict = {}
attribs = node.attributes
for nIndex in range(attribs.length):
dict[attribs.item(nIndex).name] = attribs.item(nIndex).value
return dict
def createDdlInterface(strDbms):
""" Here we use the letter/envelope paradymn to create the class of the right
type. """
if strDbms.lower() not in g_dbTypes:
print "Unknown dbms %s" % (strDbms)
if strDbms.startswith('postgres'):
return DdlPostgres(strDbms)
elif strDbms.startswith('mysql'):
return DdlMySql()
elif strDbms.startswith('firebird'):
return DdlFirebird()
elif strDbms.startswith('oracle'):
return DdlOracle(strDbms)
else:
assert(False)
if __name__ == "__main__":
import os, sys
sys.path += ['../tests']
from diffXml2DdlTest import doTests
os.chdir('../tests')
doTests()
| StarcoderdataPython |
1615492 | <gh_stars>0
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from keras.layers import Flatten, Dense, Reshape, Dropout, Activation
from keras.layers import SpatialDropout2D
from keras.regularizers import l1
from keras.models import Model
def create_model():
_input_img = Input(shape=(28, 28, 1))
_x = Conv2D(10, (5, 5))(_input_img)
_x = MaxPooling2D((2, 2))(_x)
_x = Activation('relu')(_x)
_x = Conv2D(20, (5, 5))(_x)
_x = SpatialDropout2D(0.5)(_x)
_x = MaxPooling2D((2, 2))(_x)
_x = Activation('relu')(_x)
_x = Flatten()(_x)
_x = Dense(50)(_x)
_x = Activation('relu')(_x)
_x = Dropout(0.5)(_x)
_x = Dense(10)(_x)
_x = Activation('softmax')(_x)
return Model(_input_img, _x)
| StarcoderdataPython |
58129 | <gh_stars>0
# standard libraries
import argparse
from collections import defaultdict, OrderedDict
import copy
import glob
import os
import csv
from pathlib import Path
from typing import Tuple
# third-party libraries
import editdistance
import torch
import tqdm
# project libraries
from evaluate.eval import run_eval
import speech.loader
from speech.models.ctc_decoder import decode as ctc_decode
from speech.models.ctc_model_train import CTC_train as CTC_train
from speech.utils.data_helpers import lexicon_to_dict, path_to_id, text_to_phonemes
from speech.utils.io import get_names, load_config, load_state_dict, read_data_json, read_pickle
from speech.utils.visual import print_nonsym_table
def eval1(config:dict)->None:
"""
This function produces a formatted output used to compare the predictions of a set of models
against the phonemes in the target and guess for each utterance. This output is used to
determine if the models are correctly labelling mispronunciations.
Config contains:
models: contains dict of models with name, path, tag, and model_name for the
speech.utils.io.get_names function
dataset_path (str): path to evaluation dataset
save_path (str): path where the formatted txt file will be saved
lexicon_path (str): path to lexicon used to convert words in target and guess to phonemes
n_top_beams (int): number of beams output from the ctc_decoder
Return:
None
"""
# unpack the config
model_params = config['models']
dataset_path = Path(config['dataset_path'])
output_path = config['output_path']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load the models and preproc objects
print(f"model_params contains: {model_params}")
model_preproc = {
model_name: _load_model(params, device) for model_name, params in model_params.items()
}
if dataset_path.suffix == ".tsv":
output_dict = output_dict_from_tsv(dataset_path, config['lexicon_path'])
elif dataset_path.suffix == ".json":
output_dict = output_dict_from_json(dataset_path)
else:
raise ValueError(f"dataset extension must be '.tsv' or '.json'. Recieved: {dataset_path.suffix}")
# directory where audio paths are stored
audio_dir = dataset_path.parent.joinpath("audio")
# loop through each output file and perform inference on each model
for rec_id in tqdm.tqdm(output_dict.keys()):
audio_path = audio_dir.joinpath(rec_id).with_suffix(".wav")
dummy_target = [] # dummy target list fed into the preprocessor, not used
output_dict[rec_id]['infer'] = {} # initialize a dict for the inference outputs
for model_name, (model, preproc) in model_preproc.items():
with torch.no_grad(): # no gradients calculated to speed up inference
inputs, dummy_target = preproc.preprocess(str(audio_path), dummy_target)
inputs = torch.FloatTensor(inputs)
inputs = torch.unsqueeze(inputs, axis=0).to(device) # add the batch dim and push to `device`
probs, _ = model(inputs, softmax=True) # don't need rnn_args output in `_`
probs = probs.data.cpu().numpy().squeeze() # convert to numpy and remove batch-dim
top_beams = ctc_decode(probs,
beam_size=3,
blank=model.blank,
n_top_beams=config['n_top_beams']
)
top_beams = [(preproc.decode(preds), probs) for preds, probs in top_beams]
output_dict[rec_id]['infer'].update({model_name: top_beams})
write_output_dict(output_dict, output_path)
def write_output_dict(output_dict, output_path)->None:
# write the PER predictions to a txt file
# sort the dictionary to ease of matching audio file with formatted output
output_dict = OrderedDict(sorted(output_dict.items()))
per_counter = defaultdict(lambda : {"total_diff":0, "total_phones":0})
with open(output_path, 'w') as out_file:
for rec_id in output_dict.keys():
out_file.write(f"rec_id:\t\t\t{rec_id}\n")
# write the header
for name, values in output_dict[rec_id]['header'].items():
out_file.write(f"{name}:\t\t\t{values}\n")
# write predictions from each model. writing multiple search-beams, if specified
for model_name in output_dict[rec_id]['infer'].keys():
top_beam=True
for preds, confid in output_dict[rec_id]['infer'][model_name]:
per = editdistance.eval(output_dict[rec_id]['reference_phones'], preds)
per_counter[model_name]['total_diff'] += per
len_phones = len((output_dict[rec_id]['reference_phones']))
per_counter[model_name]['total_phones'] += len_phones
per /= len_phones
# this top-beam if-else is used if multiple top beams in the search decoder
# are desired
if top_beam:
out_file.write(f"{model_name}:\t({round(per, 2)})\t{' '.join(preds)}\n")
top_beam = False
else:
out_file.write(f"\t \t({round(per, 2)})\t{' '.join(preds)}\n")
out_file.write("\n\n")
out_file.write("Dataset PER Values\n")
out_file.write("------------------\n")
for model_name, per_dict in per_counter.items():
per = round(per_dict['total_diff'] / per_dict['total_phones'], 3)
out_file.write(f"{model_name}\t{per}\n")
out_file.write("------------------\n")
def output_dict_from_tsv(tsv_dataset_path:str, lexicon_path:str)->dict:
"""This function returns a formatted output dict using a tsv dataset path
"""
output_dict = {} # dictionary containing the printed outputs
lexicon = lexicon_to_dict(lexicon_path)
# open the tsv file that contains the data on each example
with open(tsv_dataset_path, 'r') as tsv_file:
tsv_reader = csv.reader(tsv_file, delimiter='\t')
header = next(tsv_reader)
tsv_dataset = list(tsv_reader)
# tsv header is: "id", "target", "guess", "lessonId", "lineId", "uid", "redWords_score", "date"
for row in tsv_dataset:
record_id, target, guess = row[0], row[1], row[2]
# text_to_phonemes returns a list of string-phonemes
target_phones = text_to_phonemes(target, lexicon, unk_token="<UNK>")
guess_phones = text_to_phonemes(guess, lexicon, unk_token="<UNK>")
output_dict[record_id] = {
"header": {
"target": target,
"guess": guess,
"tar_pn": " ".join(target_phones),
"ges_pn": " ".join(guess_phones)
},
"reference_phones": target_phones # used for PER calculation
}
return output_dict
def output_dict_from_json(json_dataset_path:str)->dict:
"""This function returns a formatted output dict using a json dataset path
"""
output_dict = {} # dictionary containing the printed outputs
dataset = read_data_json(json_dataset_path)
for xmpl in dataset:
record_id = path_to_id(xmpl['audio'])
output_dict[record_id] = {
"header": {
"labels": " ".join(xmpl['text'])
},
"reference_phones": xmpl['text'] # used for PER calculation
}
return output_dict
def _load_model(model_params:str, device)->Tuple[torch.nn.Module, speech.loader.Preprocessor]:
"""
This function will load the model, config, and preprocessing object and prepare the model and preproc for evaluation
Args:
model_path (dict): dict containing model path, tag, and filename
device (torch.device): torch processing device
Returns:
torch.nn.Module: torch model
preprocessing object (speech.loader.Preprocessor): preprocessing object
"""
model_path, preproc_path, config_path = get_names(
model_params['path'],
tag=model_params['tag'],
get_config=True,
model_name=model_params['filename']
)
# load and update preproc
preproc = read_pickle(preproc_path)
preproc.update()
# load and assign config
config = load_config(config_path)
model_cfg = config['model']
model_cfg.update({'blank_idx': config['preproc']['blank_idx']}) # creat `blank_idx` in model_cfg section
# create model
model = CTC_train(
preproc.input_dim,
preproc.vocab_size,
model_cfg
)
state_dict = load_state_dict(model_path, device=device)
model.load_state_dict(state_dict)
model.to(device)
# turn model and preproc to eval_mode
model.set_eval()
preproc.set_eval()
return model, preproc
def eval2(config:dict)->None:
"""This function prints a table with a set of models as rows and a set of datasets as columns
where the values in the table are the PER's for each row-column pai.
Config contains:
models (dict): dict with model names as keys and values of path, tag, and model_name
for the `get_names` function
datasets (dict): dict with dataset names as keys and dataset paths as values
save_path (str): path where the output file will be saved
lexicon_path (str): path to lexicon
Return:
None
"""
# unpack the config
model_params = config['models']
datasets = config['datasets']
output_path = config['output_path']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# load the models and preproc objects
print(f"model_params contains: {model_params}")
# dict to contain all of the per values for each model
per_dict = dict()
# loop through the models and datasets the calculate a per for each combo
for model_name, params in model_params.items():
per_dict[model_name] = dict() # initialize the new key in the per_dict
print(f"calculating per for model: {model_name}")
for data_name, data_path in datasets.items():
print(f"calculating per for dataset: {data_name}")
per = run_eval(
model_path=params['path'],
dataset_json = data_path,
batch_size = 1,
tag = params['tag'],
model_name = params['filename']
)
print(f"PER value is: {per}")
per_dict[model_name][data_name] = per
print("full per_dict values: ")
print(per_dict)
print_nonsym_table(per_dict, title="PER values", row_name="Data\\Model")
def format_w2v_hypos(config)->None:
"""This function takes in a directory with predictions (hypos) from a wav2vec2.0 model
and writes a file in the format of the mispronunciation evaluation (eval1).
Args:
metadata_tsv_path: path to speak metadata tsv path
w2v_tsv_path: path to wav2vec tsv file
hypo_paths: list of paths to the wav2vec model predictions (hypos)
model_names: list of model names corresponding to hypo_dirs entries
"""
metadata_tsv_path = config['metadata_tsv_path']
w2v_tsv_path = config['w2v_tsv_path']
models = config['models']
output_path = config['output_path']
output_dict = output_dict_from_tsv(metadata_tsv_path, config['lexicon_path'])
model_hypos = list()
hypo_len = None
for model_name, hypo_path in models.items():
tgt_path = hypo_path.replace("hypo.", "ref.")
with open(hypo_path, 'r') as hypo_f, open(tgt_path, 'r') as tgt_f:
# splits and removes '(None 36)' at the end of the line
hypos = [line.strip().split()[:-1] for line in hypo_f]
tgts = [line.strip().split()[:-1] for line in tgt_f]
# check that all hypos are the same length
if hypo_len is None:
hypo_len = len(hypos)
assert hypo_len == len(hypos) == len(tgts), "hypos or targets are not the same length"
# need to make mapping from targets to hypos as the ordering of hypos doesn't
# match the file ordering of the `w2v_tsv` file
tgts_to_hypos = {tuple(tgt): hypo for tgt, hypo in zip(tgts, hypos)}
model_hypos.append((model_name, tgts_to_hypos ))
filtered_output_dict = {}
w2v_phn_path = w2v_tsv_path.replace(".tsv", ".phn")
with open(w2v_tsv_path, 'r') as tsv_f, open(w2v_phn_path, 'r') as phn_f:
root = next(tsv_f).strip()
tsv_f = [line.strip().split()[0] for line in tsv_f]
phn_f = [line.strip().split() for line in phn_f]
assert len(tsv_f) == len(phn_f) == hypo_len, "tsv file and hypos are not same length"
for i, (sub_path, phones) in enumerate(zip(tsv_f, phn_f)):
full_path = os.path.join(root, sub_path)
rec_id = path_to_id(full_path)
filtered_output_dict[rec_id] = output_dict[rec_id]
filtered_output_dict[rec_id]['infer'] = {}
dummy_conf = -1.0 # placeholder confidence value
for model_name, tgts_to_hypos in model_hypos:
matching_hypo = tgts_to_hypos[tuple(phones)]
filtered_output_dict[rec_id]['infer'][model_name] = [(matching_hypo, dummy_conf)]
write_output_dict(filtered_output_dict, output_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Eval a speech model."
)
parser.add_argument(
"--config", help="Path to config file containing the necessary inputs"
)
args = parser.parse_args()
config = load_config(args.config)
if config['eval_type'] == "eval1":
eval1(config)
elif config['eval_type'] == 'eval2':
eval2(config)
elif config['eval_type'] == 'format_w2v_hypos':
format_w2v_hypos(config)
else:
raise ValueError(f'eval types must be either "eval1" or "eval2", not {config["eval_type"]}')
| StarcoderdataPython |
3396211 | import matplotlib.pyplot as plt
import numpy as np
import argparse
def plot_iq(iq, np_data_type, frequency, sample_rate):
"""Plots the power spectrum of the given I/Q data.
Args:
iq: String containing alternating I/Q pairs (e.g. IQIQIQIQIQ..etc)
dtype: numpy dtype to interpret the data as. Rather than dealing
with a complex type, just treat each I and Q as same type
(e.g 32 bit complex float is just 32 bit float)
"""
#Convert to Numpy array
iq_array = np.fromstring(iq, dtype = np_data_type)
#Get Power (Power = I^2 + Q^2)
all_i = iq_array[::2] ** 2
all_q = iq_array[1::2] ** 2
pwr_array = np.add(all_i, all_q)
#Take FFT
fft_bins = 1024
fft_array = np.fft.fft(pwr_array, fft_bins)
#Shift FFT
fft_array = np.fft.fftshift(fft_array)
db_array = 20*np.log10(abs(fft_array))
#Fill the x axis with correct frequency values
x_vals = np.linspace(frequency - sample_rate/2.0, frequency + sample_rate /2.0, num=fft_bins)
#Plot dB values
plt.plot(x_vals, db_array)
#Label axes
plt.xlabel("Frequency (Hz)")
plt.ylabel("dB")
#plt.ylim(min(db_array), max(db_array))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("frequency", help="Center frequency of the data"
,type=int)
parser.add_argument("sample_rate", help="Sample rate of the data"
,type=int)
parser.add_argument("file_name", help="File that contains the data")
args = parser.parse_args()
parser = argparse.ArgumentParser()
with open(args.file_name, 'r') as data_file:
iq_data = data_file.read()
plot_iq(iq_data, np.float32, args.frequency, args.sample_rate) | StarcoderdataPython |
1617049 | <reponame>sklam/sdc
# -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import pandas as pd
from sdc.tests.test_utils import *
from sdc.tests.tests_perf.test_perf_base import *
from sdc.tests.tests_perf.test_perf_utils import *
def usecase_series_min(input_data):
start_time = time.time()
res = input_data.min()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_max(input_data):
start_time = time.time()
res = input_data.max()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_abs(input_data):
start_time = time.time()
res = input_data.abs()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_value_counts(input_data):
start_time = time.time()
res = input_data.value_counts()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_nsmallest(input_data):
start_time = time.time()
res = input_data.nsmallest()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_nlargest(input_data):
start_time = time.time()
res = input_data.nlargest()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_var(input_data):
start_time = time.time()
res = input_data.var()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_shift(input_data):
start_time = time.time()
res = input_data.shift()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_copy(input_data):
start_time = time.time()
res = input_data.copy()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_sum(input_data):
start_time = time.time()
res = input_data.sum()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_idxmax(input_data):
start_time = time.time()
res = input_data.idxmax()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_idxmin(input_data):
start_time = time.time()
res = input_data.idxmin()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_prod(input_data):
start_time = time.time()
res = input_data.prod()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_quantile(input_data):
start_time = time.time()
res = input_data.quantile()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_mean(input_data):
start_time = time.time()
res = input_data.mean()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_unique(input_data):
start_time = time.time()
res = input_data.unique()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_cumsum(input_data):
start_time = time.time()
res = input_data.cumsum()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_nunique(input_data):
start_time = time.time()
res = input_data.nunique()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_count(input_data):
start_time = time.time()
res = input_data.count()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_median(input_data):
start_time = time.time()
res = input_data.median()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_argsort(input_data):
start_time = time.time()
res = input_data.argsort()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_sort_values(input_data):
start_time = time.time()
res = input_data.sort_values()
finish_time = time.time()
return finish_time - start_time, res
def usecase_series_dropna(input_data):
start_time = time.time()
res = input_data.dropna()
finish_time = time.time()
return finish_time - start_time, res
# python -m sdc.runtests sdc.tests.tests_perf.test_perf_series.TestSeriesMethods
class TestSeriesMethods(TestBase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.total_data_length = {
'series_min': [10 ** 9],
'series_max': [10 ** 9],
'series_abs': [3 * 10 ** 8],
'series_value_counts': [3 * 10 ** 5],
'series_nsmallest': [10 ** 9],
'series_nlargest': [10 ** 9],
'series_var': [5 * 10 ** 8],
'series_shift': [5 * 10 ** 8],
'series_copy': [10 ** 8],
'series_sum': [10 ** 9],
'series_idxmax': [10 ** 9],
'series_idxmin': [10 ** 9],
'series_prod': [5 * 10 ** 8],
'series_quantile': [10 ** 8],
'series_mean': [10 ** 8],
'series_unique': [10 ** 5],
'series_cumsum': [2 * 10 ** 8],
'series_nunique': [10 ** 5],
'series_count': [2 * 10 ** 9],
'series_median': [10 ** 8],
'series_argsort': [10 ** 5],
'series_sort_values': [10 ** 5],
'series_dropna': [2 * 10 ** 8]
}
def _test_series(self, pyfunc, name, input_data=None):
input_data = input_data or test_global_input_data_float64
full_input_data_length = sum(len(i) for i in input_data)
hpat_func = sdc.jit(pyfunc)
for data_length in self.total_data_length[name]:
data = perf_data_gen_fixed_len(input_data, full_input_data_length, data_length)
test_data = pd.Series(data)
compile_results = calc_compilation(pyfunc, test_data, iter_number=self.iter_number)
# Warming up
hpat_func(test_data)
exec_times, boxing_times = get_times(hpat_func, test_data, iter_number=self.iter_number)
self.test_results.add(name, 'JIT', test_data.size, exec_times, boxing_results=boxing_times,
compile_results=compile_results)
exec_times, _ = get_times(pyfunc, test_data, iter_number=self.iter_number)
self.test_results.add(name, 'Reference', test_data.size, test_results=exec_times)
def test_series_float_min(self):
self._test_series(usecase_series_min, 'series_min')
def test_series_float_max(self):
self._test_series(usecase_series_max, 'series_max')
def test_series_float_abs(self):
self._test_series(usecase_series_abs, 'series_abs')
def test_series_float_value_counts(self):
self._test_series(usecase_series_value_counts, 'series_value_counts')
def test_series_float_nsmallest(self):
self._test_series(usecase_series_nsmallest, 'series_nsmallest')
def test_series_float_nlargest(self):
self._test_series(usecase_series_nlargest, 'series_nlargest')
def test_series_float_var(self):
self._test_series(usecase_series_var, 'series_var')
def test_series_float_shift(self):
self._test_series(usecase_series_shift, 'series_shift')
def test_series_float_copy(self):
self._test_series(usecase_series_shift, 'series_copy')
def test_series_float_sum(self):
self._test_series(usecase_series_sum, 'series_sum')
def test_series_float_idxmax(self):
self._test_series(usecase_series_idxmax, 'series_idxmax')
def test_series_float_idxmin(self):
self._test_series(usecase_series_idxmin, 'series_idxmin')
def test_series_float_prod(self):
self._test_series(usecase_series_prod, 'series_prod')
def test_series_float_quantile(self):
self._test_series(usecase_series_quantile, 'series_quantile')
def test_series_float_mean(self):
self._test_series(usecase_series_quantile, 'series_mean')
def test_series_float_unique(self):
self._test_series(usecase_series_unique, 'series_unique')
def test_series_float_cumsum(self):
self._test_series(usecase_series_cumsum, 'series_cumsum')
def test_series_float_nunique(self):
self._test_series(usecase_series_nunique, 'series_nunique')
def test_series_float_count(self):
self._test_series(usecase_series_count, 'series_count')
def test_series_float_median(self):
self._test_series(usecase_series_median, 'series_median')
def test_series_float_argsort(self):
self._test_series(usecase_series_argsort, 'series_argsort')
def test_series_float_sort_values(self):
self._test_series(usecase_series_sort_values, 'series_sort_values')
def test_series_float_dropna(self):
self._test_series(usecase_series_dropna, 'series_dropna')
| StarcoderdataPython |
4841430 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import re
from odoo import fields, models, api, _
from odoo.exceptions import ValidationError
class ResPartnerBank(models.Model):
_inherit = 'res.partner.bank'
aba_routing = fields.Char(string="ABA/Routing", help="American Bankers Association Routing Number")
@api.constrains('aba_routing')
def _check_aba_routing(self):
for bank in self:
if bank.aba_routing and not re.match(r'^\d{1,9}$', bank.aba_routing):
raise ValidationError(_('ABA/Routing should only contains numbers (maximum 9 digits).'))
| StarcoderdataPython |
3349017 | from flask import Flask, request, jsonify, url_for
import db
import traceback
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.errorhandler(Exception)
def exception_handler(error):
tracelist = str(traceback.format_exc()).split('\n')
return jsonify({"message":"Internal server error","trace":tracelist}),500
def message(message,code):
return jsonify({"message":message}),code
@app.route("/",methods=["GET","POST"])
def root():
"HTML client"
if request.method=="GET":
return app.send_static_file('index.html')
elif request.method=="POST":
j = request.get_json(force=True)
q = request.args
return jsonify({"json":j,"query":q})
@app.route("/swagger",methods=["GET"])
def swagger():
"Swagger client"
return app.send_static_file('swagger.html')
@app.route("/api",methods=["GET"])
def api_list():
"List API endpoints"
apilist = []
for rule in app.url_map.iter_rules():
url = str(rule)
apilist.append({"url":url,"methods":list(rule.methods),"desc":app.view_functions[rule.endpoint].__doc__})
return jsonify({"api":apilist})
@app.route("/api/conn",methods=["GET","POST"])
def conn():
"Get list of open connections, open new connection"
if request.method=="GET":
l = [{"token":token,"desc":db.conndict[token].desc} for token in db.conndict.keys()]
return jsonify(l)
elif request.method=="POST":
json = request.get_json(force=True)
connstr = json.get("conn",None);
if connstr is None:
connstr = 'scott/oracle@orcl'
desc = json.get("desc","")
token = db.open_connection(connstr,desc)
return jsonify({"token":token,"desc":desc}),201
@app.route("/api/conn/<token>",methods=["GET","POST","DELETE"])
def conn_id(token):
"Execute code within connection specified by token, close connection"
if request.method=="GET":
if token in db.conndict.keys():
c = db.conndict[token]
return jsonify({"desc":c.desc,"token":token})
else:
return message("token %s not found"%token,404)
elif request.method=="DELETE":
t = db.close_connection(token)
if t is None:
return message("token %s not found"%token,404)
return message("token %s deleted"%token,200)
elif request.method=="POST":
conn = db.get_connection(token)
if conn is None:
return message("token %s not found"%token,404)
cur = conn.cursor()
json = request.get_json(force=True)
sql = json.get("sql",None);
if sql is None:
return message("sql key not in json data",400)
invars = json.get("invars",{})
outvars = json.get("outvars",{})
fetchmax = json.get("fetchmax",500)
try:
if fetchmax is None:
fetchmax = 500
else:
fetchmax = int(fetchmax)
except ValueError:
return message("invalid fetchmax key format",400)
if fetchmax<1:
return message("number of rows to fetch should be greater than 0",400)
desc,data = db.execute_sql(cur,sql,fetchmax,invars,outvars)
cur.close()
return jsonify({"desc":desc,"data":data,"sql":sql,"fetchmax":fetchmax,"invars":invars,"outvars":outvars})
#db.open_connection('scott/oracle@orcl','First connection')
#db.open_connection('scott/oracle@orcl','Second connection')
if __name__=="__main__":
app.run(host='0.0.0.0',port=8000, debug=True)
| StarcoderdataPython |
3368880 | from mycroft.skills.core import MycroftSkill, intent_handler, intent_file_handler
from mycroft.messagebus.message import Message
class PetFish(MycroftSkill):
def __init__(self):
super(PetFish, self).__init__(name="PetFish")
def initialize(self):
self.gui.register_handler("pet.fish.close.screen", self.handle_close_screen)
@intent_file_handler("show_petfish.intent")
def show_pet_fish_ui(self, message):
# To try this: "show virtual pet fish"
self.gui["pet_action"] = ""
self.gui.show_page("Petfish.qml", override_idle=True)
def handle_close_screen(self, message):
self.gui.remove_page("Petfish.qml")
self.gui.release()
@intent_file_handler("example_action.intent")
def run_example_action(self, message):
# To try this: "run example swim action"
# List of actions (strings in lowercase):
# ["swim", "playdead", "eatfood", "eatfish", "introduce", "dance", "sleep", "awake"]
# Usage: self.gui["pet_action"] = "swim"
self.gui["pet_action"] = "swim"
def create_skill():
return PetFish()
| StarcoderdataPython |
3291250 | #!/usr/bin/env python
import sys
inFile = open(sys.argv[1])
outFile = open(sys.argv[2], 'w')
for line in inFile:
vals = line.split()
outFile.write(">"+vals[0]+"\n")
outFile.write(vals[1]+"\n")
outFile.close()
| StarcoderdataPython |
3201590 | """This module provides objects for managing the network balancer."""
from contextlib import contextmanager
from typing import Dict, List
from docker.models.containers import Container
from loguru import logger
from pydantic import BaseModel as Base
from .circuit import OnionCircuit
from .client import ContainerBase, ContainerOptions
from .mount import MountFile, MountPoint
HAPROXY_IMAGE = "haproxy:2.2.3"
class HAProxyOptions(Base):
"""Handles options for HAProxy docker instance.
Attributes:
max_connections (int): Maximum per-process number of concurrent connections.
timeout_client (int): Maximum inactivity time on the client side.
timeout_connect (int): Maximum time to wait for a connection attempt to a server
to succeed.
timeout_queue (int): Maximum time to wait in the queue for a connection slot
to be free.
timeout_server (int): Maximum inactivity time on the server side.
listen_host_port (int): Frontend port to the proxy.
backend_name (str): Name of Backend section.
dashboard_bind_port (int): Port to open to reach the HAProxy dashboard.
dashboard_refresh_rate (int): Refresh rate of the HAProxy dashboard page.
onions (List[Container]): Each onion container that is connected to the whaornet.
"""
max_connections: int = 4096
timeout_client: int = 3600
timeout_connect: int = 1
timeout_queue: int = 5
timeout_server: int = 3600
listen_host_port: int = 8001
backend_name: str = "onions"
dashboard_bind_port: int = 9999
dashboard_refresh_rate: int = 2
onions: List[Container]
class Config:
"""Pydantic Configuration."""
arbitrary_types_allowed = True
@property
def ports(self) -> List[int]:
"""Ports which will be used to expose on the local network."""
return [self.listen_host_port, self.dashboard_bind_port]
class Balancer(ContainerBase):
"""HAProxy Load Balancer.
Attributes:
haproxy_options (HAProxyOptions): HAProxy options object.
container_options (ContainerOptions): Container options for the HA proxy instance.
"""
haproxy_options: HAProxyOptions
container_options: ContainerOptions = ContainerOptions(image=HAPROXY_IMAGE)
class Config:
"""Pydantic Configuration."""
arbitrary_types_allowed = True
@property
def address(self) -> str:
"""Return socks5 address to poxy requests through."""
return f"socks5://localhost:{self.haproxy_options.listen_host_port}"
@property
def dashboard_address(self) -> str:
"""Return full dashboard address."""
return f"http://localhost:{self.haproxy_options.dashboard_bind_port}"
@property
def proxies(self) -> Dict[str, str]:
"""Return proxies to mount onto a requests session."""
return {
"http": self.address,
"https": self.address,
}
def add_mount_point(self, mount: MountFile) -> None:
"""Mount a volume into the HAProxy container.
Args:
mount (MountFile): File to mount between the container and local file system.
"""
self.container_options.mounts.append(mount.mount)
def display_settings(self) -> None:
"""Log config settings to stdout."""
logger.debug(
"\n==================="
"\nOnion Load Balancer"
"\n==================="
"\n" + self.json(indent=4)
)
self.show_follow_logs_command()
@contextmanager
# pylint: disable=invalid-name
def OnionBalancer(onions: List[OnionCircuit], show_log: bool = False) -> Balancer:
"""Context manager which yields a started instance of an HAProxy docker container.
Args:
onions (List[OnionCircuit]): List of tor containers to load balance requests across.
show_log (bool): If True shows the HAProxies logs on start and stop.
Yields:
Balancer: A started instance of a HAProxy docker container.
"""
haproxy_options = HAProxyOptions(onions=onions)
with MountPoint(
template_name="haproxy.cfg",
target_path="/usr/local/etc/haproxy/haproxy.cfg",
template_variables=haproxy_options.dict(),
) as mount_point:
try:
balancer = Balancer(haproxy_options=haproxy_options)
balancer.add_mount_point(mount_point)
for port in haproxy_options.ports:
balancer.expose_port(port)
balancer.start(show_log=show_log)
balancer.display_settings()
yield balancer
finally:
balancer.stop(show_log=show_log)
| StarcoderdataPython |
145712 | <filename>04-Python/lexer/lexer_test.py
import sys
sys.path.append('..')
import unittest
from lexer import new
from tokens import TokenType
class TestNextToken(unittest.TestCase):
def test_next_token(self):
input = 'let five = 5; \
let ten = 10; \
let add = fn(x, y) { \
x + y; \
}; \
let result = add(five, ten);\
!-/*5;\
5 < 10 > 5;\
if (5 < 10) { \
return true;\
} else {\
return false;\
}\
10 == 10;\
10 != 9; \
"foobar" \
"foo bar" \
[1, 2]; \
{"foo": "bar"} \
'
class ExpectedToken:
def __init__(self, type_name: TokenType, literal: str) -> None:
self.exp_token_type = type_name
self.exp_literal = literal
tests = [
ExpectedToken(TokenType.LET, 'let'),
ExpectedToken(TokenType.IDENT, 'five'),
ExpectedToken(TokenType.ASSIGN, '='),
ExpectedToken(TokenType.INT, '5'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.LET, 'let'),
ExpectedToken(TokenType.IDENT, 'ten'),
ExpectedToken(TokenType.ASSIGN, '='),
ExpectedToken(TokenType.INT, '10'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.LET, 'let'),
ExpectedToken(TokenType.IDENT, 'add'),
ExpectedToken(TokenType.ASSIGN, '='),
ExpectedToken(TokenType.FUNCTION, 'fn'),
ExpectedToken(TokenType.LPAREN, '('),
ExpectedToken(TokenType.IDENT, 'x'),
ExpectedToken(TokenType.COMMA, ','),
ExpectedToken(TokenType.IDENT, 'y'),
ExpectedToken(TokenType.RPAREN, ')'),
ExpectedToken(TokenType.LBRACE, '{'),
ExpectedToken(TokenType.IDENT, 'x'),
ExpectedToken(TokenType.PLUS, '+'),
ExpectedToken(TokenType.IDENT, 'y'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.RBRACE, '}'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.LET, 'let'),
ExpectedToken(TokenType.IDENT, 'result'),
ExpectedToken(TokenType.ASSIGN, '='),
ExpectedToken(TokenType.IDENT, 'add'),
ExpectedToken(TokenType.LPAREN, '('),
ExpectedToken(TokenType.IDENT, 'five'),
ExpectedToken(TokenType.COMMA, ','),
ExpectedToken(TokenType.IDENT, 'ten'),
ExpectedToken(TokenType.RPAREN, ')'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.BANG, '!'),
ExpectedToken(TokenType.MINUS, '-'),
ExpectedToken(TokenType.SLASH, '/'),
ExpectedToken(TokenType.ASTERISK, '*'),
ExpectedToken(TokenType.INT, '5'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.INT, '5'),
ExpectedToken(TokenType.LT, '<'),
ExpectedToken(TokenType.INT, '10'),
ExpectedToken(TokenType.GT, '>'),
ExpectedToken(TokenType.INT, '5'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.IF, 'if'),
ExpectedToken(TokenType.LPAREN, '('),
ExpectedToken(TokenType.INT, '5'),
ExpectedToken(TokenType.LT, '<'),
ExpectedToken(TokenType.INT, '10'),
ExpectedToken(TokenType.RPAREN, ')'),
ExpectedToken(TokenType.LBRACE, '{'),
ExpectedToken(TokenType.RETURN, 'return'),
ExpectedToken(TokenType.TRUE, 'true'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.RBRACE, '}'),
ExpectedToken(TokenType.ELSE, 'else'),
ExpectedToken(TokenType.LBRACE, '{'),
ExpectedToken(TokenType.RETURN, 'return'),
ExpectedToken(TokenType.FALSE, 'false'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.RBRACE, '}'),
ExpectedToken(TokenType.INT, '10'),
ExpectedToken(TokenType.EQ, '=='),
ExpectedToken(TokenType.INT, '10'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.INT, '10'),
ExpectedToken(TokenType.NOT_EQ, '!='),
ExpectedToken(TokenType.INT, '9'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.STRING, 'foobar'),
ExpectedToken(TokenType.STRING, 'foo bar'),
ExpectedToken(TokenType.LBRACKET, '['),
ExpectedToken(TokenType.INT, '1'),
ExpectedToken(TokenType.COMMA, ','),
ExpectedToken(TokenType.INT, '2'),
ExpectedToken(TokenType.RBRACKET, ']'),
ExpectedToken(TokenType.SEMICOLON, ';'),
ExpectedToken(TokenType.LBRACE, '{'),
ExpectedToken(TokenType.STRING, 'foo'),
ExpectedToken(TokenType.COLON, ':'),
ExpectedToken(TokenType.STRING, 'bar'),
ExpectedToken(TokenType.RBRACE, '}'),
ExpectedToken(TokenType.EOF, ''),
]
l = new(input)
for i, tt in enumerate(tests):
tok = l.next_token()
self.assertEqual(tok.token_type, tt.exp_token_type,
'tests[{}] - token_type wrong, expected {}, got {}'.format(i, tt.exp_token_type,
tok.token_type))
self.assertEqual(tok.literal, tt.exp_literal,
'tests[{}] - literal wrong, expected {}, got {}'.format(i, tt.exp_literal, tok.literal))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
80993 | from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
class Profile(models.Model):
user = models.OneToOneField(User)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
handle = models.CharField(max_length=30)
identity_pubkey = models.CharField(max_length=80, unique=True)
# lightning_address = BitcoinAddressField()
class Payment(models.Model):
PAYMENT_STATUS_CHOICES = (
('pending_invoice', 'Pending Invoice'),
('pending_payment', 'Pending Payment'),
('complete', 'Complete'),
('error', 'Error'),
)
sender = models.ForeignKey(User, related_name='senders')
recipient = models.ForeignKey(User, related_name='recipients')
amount = models.IntegerField()
status = models.CharField(max_length=50, default='accepted', choices=PAYMENT_STATUS_CHOICES)
r_hash = models.CharField(max_length=64)
payment_req = models.CharField(max_length=1000)
admin.site.register(Profile)
admin.site.register(Payment)
| StarcoderdataPython |
18140 | <gh_stars>10-100
"""
Compute Dice between test ground truth and predictions from groupwise registration.
"""
import os
import nibabel as nib
import glob
import numpy as np
from core import utils_2d
from core.metrics_2d import OverlapMetrics
def one_hot_label(label, label_intensity):
gt = np.around(label)
n_class = len(label_intensity)
label = np.zeros((np.hstack((gt.shape, n_class))), dtype=np.float32)
for k in range(1, n_class):
label[..., k] = (gt == label_intensity[k])
label[..., 0] = np.logical_not(np.sum(label[..., 1:], axis=-1))
return label
def load_nifty(name):
img = nib.load(name)
return np.asarray(img.get_fdata(), np.float32)
if __name__ == '__main__':
gt_path = '../../../../../../dataset/C0T2LGE/label_center_data/test/*label.nii.gz'
pred_path = '../../../../../../results/MSCMR/test_predictions_1.5mm_group3_fusion15/*label.nii.gz'
pred_names = utils_2d.strsort(glob.glob(pred_path))
gt_names = utils_2d.strsort([name for name in glob.glob(gt_path) if os.path.basename(name).split('_')[1] == 'DE'])
pred_gt_names = dict(zip(pred_names, gt_names))
print(pred_gt_names)
average_dice = []
myo_dice = []
LV_dice = []
RV_dice = []
for name in pred_names:
pred_label = load_nifty(name)
one_hot_pred = one_hot_label(pred_label, (0, 200, 500, 600))
gt_label = load_nifty(pred_gt_names[name])
gt_label = np.concatenate([gt for gt in np.dsplit(gt_label, gt_label.shape[-1])
if np.all([np.sum(gt==i) > 0 for i in [200, 500, 600]])], axis=-1)
one_hot_gt = one_hot_label(gt_label, (0, 200, 500, 600))
Dice = OverlapMetrics(n_class=4, mode='np')
dice = Dice.averaged_foreground_dice(one_hot_gt, one_hot_pred)
m_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=1)
l_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=2)
r_dice = Dice.class_specific_dice(one_hot_gt, one_hot_pred, i=3)
average_dice.append(dice)
myo_dice.append(m_dice)
LV_dice.append(l_dice)
RV_dice.append(r_dice)
print("Average foreground Dice for %s: %.4f" % (os.path.basename(name), dice))
print("Myocardium Dice for %s: %.4f" % (os.path.basename(name), m_dice))
print("LV Dice for %s: %.4f" % (os.path.basename(name), l_dice))
print("RV Dice for %s: %.4f" % (os.path.basename(name), r_dice))
print("Average prediction Dice: %.4f" % np.mean(average_dice))
print("Average myocardium Dice: %.4f" % np.mean(myo_dice))
print("Average LV Dice: %.4f" % np.mean(LV_dice))
print("Average RV Dice: %.4f" % np.mean(RV_dice))
| StarcoderdataPython |
9483 | <reponame>chenmich/google-ml-crash-course-exercises<filename>quick_pandas.py<gh_stars>0
import pandas as pd
print(pd.__version__)
city_names = pd.Series(['San Francisco', 'San Jose', 'Sacramento'])
population = pd.Series([852469, 1015785, 485199])
#city_population_table = pd.DataFrame(({'City name': city_names, 'Population': population}))
california_houseing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",")
california_houseing_dataframe.describe()
california_houseing_dataframe.head()
#some error
#california_houseing_dataframe.hist('housing_median_age')
cities = pd.DataFrame({'City name': city_names, 'Population': population})
#print(type(cities['City name']))
#print(cities['City name'])
#print(type(cities['City name'][1]))
#print(cities['City name'][1])
#print(type(cities[0:2]))
#print(cities[0:2])
#print(population / 1000)
import numpy as np
np.log(population)
#print(population.apply(lambda val: val > 10000))
cities['Area square miles'] = pd.Series([46.87, 176.53, 97.92])
#print(cities)
cities['Population density'] = cities['Population'] / cities['Area square miles']
#print(cities)
print(city_names.index)
print(cities.reindex([2, 0, 1]))
print(cities) | StarcoderdataPython |
113595 | <reponame>GnomGad/KworkBrowser<gh_stars>0
import sys
from src.core import execute_from_command_line
def main():
execute_from_command_line(sys.argv[1:])
#execute_from_command_line(["get","-p","0"])
if __name__ == "__main__":
main() | StarcoderdataPython |
3348288 | <gh_stars>0
from django.shortcuts import render, redirect
from django.http import JsonResponse, HttpResponse
from django.contrib.auth import authenticate, login, logout
from decimal import Decimal
from django.conf import settings
import json
import datetime
from django.contrib import messages
from django.core.mail import send_mail
from django.contrib.auth.forms import UserCreationForm
from taggit.models import Tag
from .models import *
from .forms import *
from .models import *
from .filters import *
# Create your views here.
def landing(request):
return render(request, 'app/landing.html', {})
def track(request):
return render(request, 'app/track.html', {})
def task_admin(request):
if not request.user.is_authenticated:
return redirect("account:login")
view = 'admin'
tasks = Task.objects.all()
name = "All Tasks"
taskFilter = TaskFilter(request.GET, queryset=tasks)
total_tasks = tasks.count()
tasks = taskFilter.qs
context = {'tasks': tasks, 'name': name, 'total_tasks': total_tasks, 'filter':taskFilter, 'view':view}
return render(request, 'app/task_admin.html',context)
def tasks(request):
if not request.user.is_authenticated:
return redirect("account:login")
view = 'tasks'
tasks = Task.objects.all()
name = "All Tasks"
taskFilter = TaskFilter(request.GET, queryset=tasks)
total_tasks = tasks.count()
tasks = taskFilter.qs
context = {'tasks': tasks, 'name': name, 'total_tasks': total_tasks, 'filter':taskFilter, 'view':view}
return render(request, 'app/tasks.html',context)
def my_tasks(request):
if not request.user.is_authenticated:
return redirect("account:login")
view = 'assigned'
name = "My Tasks"
tasks = Task.objects.filter(assignee=request.user.owner)
taskFilter = TaskFilter(request.GET, queryset=tasks)
total_tasks = tasks.count()
tasks = taskFilter.qs
context = {'tasks': tasks, 'name': name, 'total_tasks': total_tasks, 'filter':taskFilter, 'view':view}
return render(request, 'app/tasks.html', context)
def created_tasks(request):
if not request.user.is_authenticated:
return redirect("account:login")
view = 'created'
name = "My Tasks"
tasks = Task.objects.filter(creator=request.user.email)
taskFilter = TaskFilter(request.GET, queryset=tasks)
total_tasks = tasks.count()
tasks = taskFilter.qs
context = {'tasks': tasks, 'name': name, 'total_tasks': total_tasks, 'filter':taskFilter, 'view':view}
return render(request, 'app/tasks.html', context)
def add_task(request):
view = 'create'
action = 'create'
name = "Add Task"
form = TaskForm( initial={'creator': request.user.email, 'owner':request.user.owner})
if request.method == 'POST':
form = TaskForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('app:tasks')
context = {'action':action, 'form':form, 'name':name , 'view':view }
return render(request, 'app/add_task.html', context)
def update_task(request, pk):
action = 'update'
task = Task.objects.get(id=pk)
form = TaskForm(instance=task, initial={'creator': task.creator, 'name': task.name,'description': task.description,'priority': task.priority,'notes': task.notes ,'status': task.status } )
if request.method == 'POST':
form = TaskForm(request.POST, instance=task)
if form.is_valid():
# Without this next line the tags won't be saved.
form.save()
return redirect('app:tasks')
context = {'action':action, 'form':form,'task': task, }
return render(request, 'app/update_task.html', context)
def view_task(request, pk):
task = Task.objects.get(id=pk)
form = TaskForm(instance=task, initial={'due_date': task.due_date,'name': task.name,'description': task.description,'priority': task.priority,'notes': task.notes ,'status': task.status } )
context = {'form':form,'task': task, }
return render(request, 'app/view_task.html', context)
def delete_task(request, pk):
task = Task.objects.get(id=pk)
if request.method == 'POST':
task.delete()
return redirect('app:tasks')
return render(request, 'app/delete_item.html', {'item':task})
| StarcoderdataPython |
49654 | <reponame>snowxmas/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.TiansuoIsvBindVO import TiansuoIsvBindVO
class AntMerchantExpandIndirectTiansuoBindModel(object):
def __init__(self):
self._tiansuo_isv_bind_list = None
@property
def tiansuo_isv_bind_list(self):
return self._tiansuo_isv_bind_list
@tiansuo_isv_bind_list.setter
def tiansuo_isv_bind_list(self, value):
if isinstance(value, list):
self._tiansuo_isv_bind_list = list()
for i in value:
if isinstance(i, TiansuoIsvBindVO):
self._tiansuo_isv_bind_list.append(i)
else:
self._tiansuo_isv_bind_list.append(TiansuoIsvBindVO.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.tiansuo_isv_bind_list:
if isinstance(self.tiansuo_isv_bind_list, list):
for i in range(0, len(self.tiansuo_isv_bind_list)):
element = self.tiansuo_isv_bind_list[i]
if hasattr(element, 'to_alipay_dict'):
self.tiansuo_isv_bind_list[i] = element.to_alipay_dict()
if hasattr(self.tiansuo_isv_bind_list, 'to_alipay_dict'):
params['tiansuo_isv_bind_list'] = self.tiansuo_isv_bind_list.to_alipay_dict()
else:
params['tiansuo_isv_bind_list'] = self.tiansuo_isv_bind_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandIndirectTiansuoBindModel()
if 'tiansuo_isv_bind_list' in d:
o.tiansuo_isv_bind_list = d['tiansuo_isv_bind_list']
return o
| StarcoderdataPython |
77486 | import tensorflow as tf
import os
import zipfile
from os import path, getcwd, chdir
import os
train_happy_dir = os.path.join('/Users/seanjudelyons/Downloads/happy-or-sad/happy/') # the zip file had the folders called horses and humans
train_sad_dir = os.path.join('/Users/seanjudelyons/Downloads/happy-or-sad/sad/')
train_happy_names = os.listdir(train_happy_dir)
print(train_happy_names[:10])
train_sad_names = os.listdir(train_sad_dir)
print(train_sad_names[:10])
print('total training happy images:', len(os.listdir(train_happy_dir)))
print('total training sad images:', len(os.listdir(train_happy_dir)))
# GRADED FUNCTION: train_happy_sad_model
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('acc') > 0.999):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
def train_happy_sad_model():
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# The second convolution
tf.keras.layers.Conv2D(32, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# The third convolution
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
# Flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
# Only 1 output neuron. It will contain a value from 0-1 where 0 for 1 class ('horses') and 1 for the other ('humans')
tf.keras.layers.Dense(1, activation='sigmoid')])
model.summary()
from tensorflow.keras.optimizers import RMSprop
model.compile(loss='binary_crossentropy', optimizer=RMSprop(lr=0.001), metrics=['acc'])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1 / 255)
train_generator = train_datagen.flow_from_directory('/Users/seanjudelyons/Downloads/happy-or-sad/', target_size=(150, 150), batch_size=20,
class_mode='binary')
history = model.fit(train_generator, steps_per_epoch=4, epochs=20, verbose=1, callbacks=[callbacks])
from keras.preprocessing import image
import numpy as np
path = os.path.join('/Users/seanjudelyons/Downloads/happyorsadtest.png')
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(classes[0])
if classes[0] > 0.5:
print("The picture is Sad")
else:
print("The picture is Happy")
return history.history['acc'][-1]
train_happy_sad_model()
| StarcoderdataPython |
4805562 | <reponame>marcosfpr/match_up_lib
import unittest
from matchup.models.algorithms import ExtendedBoolean
from matchup.structure.solution import Result
from matchup.structure.weighting.tf import TermFrequency
from matchup.structure.weighting.idf import InverseFrequency
from . import set_up_pdf_test, set_up_txt_test
class ExtendedBooleanTest(unittest.TestCase):
def test_txt_search_known_response(self):
self._vocabulary, self._query = set_up_txt_test()
self._query.ask(answer="artilheiro brasil 1994 gols")
response = self._query.search(model=ExtendedBoolean(3.0), idf=InverseFrequency(), tf=TermFrequency())
some_expected_results = [Result("./tests/static/files/d1.txt", 0.564),
Result("./tests/static/files/d3.txt", 0.180),
Result("./tests/static/files/d15.txt", 0.308),
Result("./tests/static/files/d11.txt", 0.179)]
for expected in some_expected_results:
self.assertTrue(expected in response)
def test_pdf_search_known_response(self):
self._vocabulary, self._query = set_up_pdf_test()
self._query.ask(answer="artilheiro brasil 1994 gols")
response = self._query.search(model=ExtendedBoolean(3.0), idf=InverseFrequency(), tf=TermFrequency())
some_expected_results = [Result("./tests/static/pdf-files/d1.pdf", 0.564),
Result("./tests/static/pdf-files/d3.pdf", 0.180),
Result("./tests/static/pdf-files/d15.pdf", 0.308),
Result("./tests/static/pdf-files/d11.pdf", 0.179)]
for expected in some_expected_results:
self.assertTrue(expected in response)
| StarcoderdataPython |
91787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pcuf` package."""
import hashlib
import warnings
import pytest
import pcuf
from tests.utils import TEST_DIR
@pytest.fixture
def file(request):
test_file = TEST_DIR / "foo.txt"
content = "hello"
with open(str(test_file), "w+") as f:
f.write(content)
file_hash = hashlib.sha1(test_file.read_bytes()).hexdigest()
def teardown():
if test_file.is_file():
test_file.unlink()
request.addfinalizer(teardown)
return dict(file=test_file, content=content, hash=file_hash)
def test_file_hash(file):
"""Test pcuf.file.sha1"""
file_hash = file["hash"]
cal_hash = pcuf.file.sha1(file["file"])
assert file_hash == cal_hash
def test_file_rename(file):
"""Test pcuf.file.rename"""
renamed_file = TEST_DIR / "foobar.txt"
assert file["file"].is_file()
assert not renamed_file.is_file()
pcuf.file.rename(file["file"], "foobar.txt")
assert renamed_file.is_file()
renamed_file.unlink()
assert not renamed_file.is_file()
def test_deprecation_warning():
"""Tests pcuf.warnings.deprecate """
with warnings.catch_warnings(record=True) as warns:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger deprecation warning.
pcuf.warnings.deprecate(
"Deprecation Test.",
version=pcuf.__version__,
link_uid="fhQbw",
link_file="gist-test",
)
assert len(warns) == 1
assert issubclass(warns[-1].category, DeprecationWarning)
assert "Deprecation Test." in str(warns[-1].message)
assert pcuf.__version__ in str(warns[-1].message)
def test_retry_function():
"""Tests pcuf.functions.retry"""
def completes_on_third_call():
if completes_on_third_call.state < 2:
completes_on_third_call.state += 1
raise TypeError
return 1
completes_on_third_call.state = 0
value = pcuf.functions.retry(completes_on_third_call)
assert completes_on_third_call.state == 2
assert value == 1
def always_fail():
always_fail.state += 1
raise TypeError
always_fail.state = 0
with pytest.raises(TypeError):
pcuf.functions.retry(always_fail, retries=6)
assert always_fail.state == 6
| StarcoderdataPython |
129579 | def my_init(shape, dtype=None):
array = np.array([
[0.0, 0.2, 0.0],
[0.0, -0.2, 0.0],
[0.0, 0.0, 0.0],
])
# adds two axis to match the required shape (3,3,1,1)
return np.expand_dims(np.expand_dims(array,-1),-1)
conv_edge = Sequential([
Conv2D(kernel_size=(3,3), filters=1,
padding="same", kernel_initializer=my_init,
input_shape=(None, None, 1))
])
img_in = np.expand_dims(grey_sample_image, 0)
img_out = conv_edge.predict(img_in)
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(10, 5))
ax0.imshow(np.squeeze(img_in[0]).astype(np.uint8),
cmap=plt.cm.gray);
ax1.imshow(np.squeeze(img_out[0]).astype(np.uint8),
cmap=plt.cm.gray);
# We only showcase a vertical edge detection here.
# Many other kernels work, for example differences
# of centered gaussians (sometimes called mexican-hat
# connectivity)
#
# You may try with this filter as well
# np.array([
# [ 0.1, 0.2, 0.1],
# [ 0.0, 0.0, 0.0],
# [-0.1, -0.2, -0.1],
# ])
| StarcoderdataPython |
96225 | # Consume:
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
# Access:
ACCESS_TOKEN = ''
ACCESS_SECRET = '' | StarcoderdataPython |
4834180 | <gh_stars>100-1000
"""
antecedent_consequent.py : Contains Antecedent and Consequent classes.
"""
import networkx as nx
import numpy as np
from .fuzzyvariable import FuzzyVariable
from .state import StatefulProperty
def accumulation_max(*args):
"""
Take the maximum of input values/arrays.
This is the default OR aggregation method for a fuzzy Rule.
"""
return np.fmax(*args)
def accumulation_mult(*args):
"""
Multiply input values/arrays.
This may be used as an alternate AND aggregation method for a fuzzy Rule.
"""
return np.multiply(*args)
class Antecedent(FuzzyVariable):
"""
Antecedent (input/sensor) variable for a fuzzy control system.
Parameters
----------
universe : array-like
Universe variable. Must be 1-dimensional and convertible to a NumPy
array.
label : string
Name of the universe variable.
"""
# Customized subclass of `FuzzyVariable`
input = StatefulProperty(None)
def __init__(self, universe, label):
"""""" + Antecedent.__doc__
super(Antecedent, self).__init__(universe, label)
self.__name__ = 'Antecedent'
@property
def graph(self):
"""
NetworkX graph which connects this Antecedent with its Term(s).
"""
g = nx.DiGraph()
for t in self.terms.values():
g.add_edge(self, t)
return g
class Consequent(FuzzyVariable):
"""
Consequent (output/control) variable for a fuzzy control system.
Parameters
----------
universe : array-like
Universe variable. Must be 1-dimensional and convertible to a NumPy
array.
label : string
Name of the universe variable.
defuzzify_method : string
name of method used for defuzzification, defaults to 'centroid'
Notes
-----
The ``label`` string chosen must be unique among Antecedents and
Consequents in the ``ControlSystem``.
"""
# Customized subclass of `FuzzyVariable`
output = StatefulProperty(None)
def __init__(self, universe, label, defuzzify_method='centroid'):
"""""" + Consequent.__doc__
super(Consequent, self).__init__(universe, label, defuzzify_method)
self.__name__ = 'Consequent'
# Default accumulation method is to take the max of any cut
self.accumulation_method = accumulation_max
@property
def graph(self):
"""
NetworkX graph which connects this Consequent with its Term(s).
"""
g = nx.DiGraph()
for t in self.terms.values():
g.add_edge(t, self)
return g
| StarcoderdataPython |
1781903 | <gh_stars>0
"""
Includes functions for reading and writing graphs, in a very simple readable format.
"""
# Version: 30-01-2015, <NAME>
# Version: 29-01-2017, <NAME>
# updated 30-01-2015: writeDOT also writes color information for edges.
# updated 2-2-2015: writeDOT can also write directed graphs.
# updated 5-2-2015: no black fill color used, when more than numcolors**2 vertices.
# updated 29-1-2017: pep8 reformat, general improvements
import sys
from typing import IO, Tuple, List, Union
from graph.graph import Graph, Edge
DEFAULT_COLOR_SCHEME = "paired12"
NUM_COLORS = 12
def read_line(f: IO[str]) -> str:
"""
Read a single non-comment line from a file
:param f: The file
:return: the line
"""
line = f.readline()
while len(line) > 0 and line[0] == '#':
line = f.readline()
return line
def read_graph(graphclass, f: IO[str]) -> Tuple[Graph, List[str], bool]:
"""
Read a graph from a file
:param graphclass: The class of the graph
:param f: The file
:return: The graph
"""
options = []
while True:
try:
line = read_line(f)
n = int(line)
graph = graphclass(directed=False, n=n)
break
except ValueError:
if len(line) > 0 and line[-1] == '\n':
options.append(line[:-1])
else:
options.append(line)
line = read_line(f)
edges = []
try:
while True:
comma = line.find(',')
if ':' in line:
colon = line.find(':')
edges.append((int(line[:comma]), int(line[comma + 1:colon]), int(line[colon + 1:])))
else:
edges.append((int(line[:comma]), int(line[comma + 1:]), None))
line = read_line(f)
except Exception:
pass
indexed_nodes = list(graph.vertices)
for edge in edges:
graph += Edge(indexed_nodes[edge[0]], indexed_nodes[edge[1]], edge[2])
if line != '' and line[0] == '-':
return graph, options, True
else:
return graph, options, False
def read_graph_list(graph_class, f: IO[str]) -> Tuple[List[Graph], List[str]]:
"""
Read a list of graphs from a file
:param graph_class: The graph class
:param f: The file
:return: A list of graphs
"""
options = []
graphs = []
cont = True
while cont:
graph, new_options, cont = read_graph(graph_class, f)
options += new_options
graphs.append(graph)
return graphs, options
def load_graph(f: IO[str], graph_class=Graph, read_list: bool = False) -> Union[Tuple[List[Graph], List[str]], Graph]:
"""
Load a graph from a file
:param f: The file
:param graph_class: The class of the graph. You may subclass the default graph class and add your own here.
:param read_list: Specifies whether to read a list of graphs from the file, or just a single graph.
:return: The graph, or a list of graphs.
"""
if read_list:
graph_list, options = read_graph_list(graph_class, f)
return graph_list, options
else:
graph, options, tmp = read_graph(graph_class, f)
return graph # ,options
def input_graph(graph_class=Graph, read_list: bool = False) -> Union[Tuple[List[Graph], List[str]], Graph]:
"""
Load a graph from sys.stdin
:param graph_class: The class of the graph. You may subclass the default graph class and add your own here.
:param read_list: Specifies whether to read a list of graphs from the file, or just a single graph.
:return: The graph, or a list of graphs.
"""
return load_graph(f=sys.stdin, graph_class=graph_class, read_list=read_list)
def write_line(f: IO[str], line: str):
"""
Write a line to a file
:param f: The file
:param line: The line
"""
f.write(line + '\n')
def write_graph_list(graph_list: List[Graph], f: IO[str], options=[]):
"""
Write a graph list to a file.
:param graph_list: The list of graphs
:param f: the file
:param options: the (optional) options to write to the file.
"""
# we may only write options that cannot be seen as an integer:
for S in options:
try:
int(S)
except ValueError:
write_line(f, str(S))
for i, g in enumerate(graph_list):
n = len(g)
write_line(f, '# Number of vertices:')
write_line(f, str(n))
# Give the vertices (temporary) labels from 0 to n-1:
label = {}
for vertex_index, vertex in enumerate(g):
label[vertex] = vertex_index
write_line(f, '# Edge list:')
for e in g.edges:
if e.weight:
write_line(f, str(label[e.tail]) + ',' + str(label[e.head]) + ':' + str(e.weight))
else:
write_line(f, str(label[e.tail]) + ',' + str(label[e.head]))
if i + 1 < len(graph_list):
write_line(f, '--- Next graph:')
def save_graph(graph_list: Union[Graph, List[Graph]], f: IO[str], options=[]):
"""
Write a graph, or a list of graphs to a file.
:param graph_list: The graph, or a list of graphs.
:param f: The file
:param options: the (optional) options to write to the file.
"""
if type(graph_list) is list:
write_graph_list(graph_list, f, options)
else:
write_graph_list([graph_list], f, options)
def print_graph(graph_list: Union[Graph, List[Graph]], options=[]):
"""
Print a graph, or a list of graphs to sys.stdout
:param graph_list: The graph, or list of graphs.
:param options: The (optional) options to print.
"""
if type(graph_list) is list:
write_graph_list(graph_list, sys.stdout, options)
else:
write_graph_list([graph_list], sys.stdout, options)
def write_dot(graph: Graph, f: IO[str], directed=False):
"""
Writes a given graph to a file in .dot format.
:param graph: The graph. If its vertices contain attributes `label`, `colortext` or `colornum`, these are also
included in the file. If its edges contain an attribute `weight`, these are also included in the file.
:param f: The file.
:param directed: Whether the graph should be drawn as a directed graph.
"""
if directed:
f.write('digraph G {\n')
else:
f.write('graph G {\n')
name = {}
next_name = 0
for v in graph:
name[v] = next_name
next_name += 1
options = 'penwidth=3,'
if hasattr(v, 'label'):
options += 'label="' + str(v.label) + '",'
if hasattr(v, 'colortext'):
options += 'color="' + v.colortext + '",'
elif hasattr(v, 'colornum'):
options += 'color=' + str(v.colornum % NUM_COLORS + 1) + ', colorscheme=' + DEFAULT_COLOR_SCHEME + ','
if v.colornum >= NUM_COLORS:
options += 'style=filled,fillcolor=' + str((v.colornum // NUM_COLORS) % NUM_COLORS + 1) + ','
if len(options) > 0:
f.write(' ' + str(name[v]) + ' [' + options[:-1] + ']\n')
else:
f.write(' ' + str(name[v]) + '\n')
f.write('\n')
for e in graph.edges:
options = 'penwidth=2,'
if hasattr(e, 'weight'):
options += 'label="' + str(e.weight) + '",'
if hasattr(e, 'colortext'):
options += 'color="' + e.colortext + '",'
elif hasattr(e, 'colornum'):
options += 'color=' + str(e.colornum % NUM_COLORS + 1) + ', colorscheme=' + DEFAULT_COLOR_SCHEME + ','
if len(options) > 0:
options = ' [' + options[:-1] + ']'
if directed:
f.write(' ' + str(name[e.tail]) + ' -> ' + str(name[e.head]) + options + '\n')
else:
f.write(' ' + str(name[e.tail]) + '--' + str(name[e.head]) + options + '\n')
f.write('}')
if __name__ == "__main__":
from mygraphs import MyGraph
with open('examplegraph.gr') as f:
G = load_graph(f, MyGraph)
print(G)
G.del_vert(next(iter(G.vertices)))
print(G)
| StarcoderdataPython |
3387005 | <filename>tests/providers/amazon/aws/sensors/test_eks.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from unittest import mock
import pytest
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.eks import (
ClusterStates,
EksHook,
FargateProfileStates,
NodegroupStates,
)
from airflow.providers.amazon.aws.sensors.eks import (
CLUSTER_TERMINAL_STATES,
FARGATE_TERMINAL_STATES,
NODEGROUP_TERMINAL_STATES,
UNEXPECTED_TERMINAL_STATE_MSG,
EksClusterStateSensor,
EksFargateProfileStateSensor,
EksNodegroupStateSensor,
)
CLUSTER_NAME = 'test_cluster'
FARGATE_PROFILE_NAME = 'test_profile'
NODEGROUP_NAME = 'test_nodegroup'
TASK_ID = 'test_eks_sensor'
CLUSTER_PENDING_STATES = frozenset({state.value for state in ClusterStates} - CLUSTER_TERMINAL_STATES)
FARGATE_PENDING_STATES = frozenset({state.value for state in FargateProfileStates} - FARGATE_TERMINAL_STATES)
NODEGROUP_PENDING_STATES = frozenset({state.value for state in NodegroupStates} - NODEGROUP_TERMINAL_STATES)
class TestEksClusterStateSensor:
@pytest.fixture(scope="function")
def setUp(self):
self.target_state = ClusterStates.ACTIVE
self.sensor = EksClusterStateSensor(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
target_state=self.target_state,
)
@mock.patch.object(EksHook, 'get_cluster_state', return_value=ClusterStates.ACTIVE)
def test_poke_reached_target_state(self, mock_get_cluster_state, setUp):
assert self.sensor.poke({})
mock_get_cluster_state.assert_called_once_with(clusterName=CLUSTER_NAME)
@mock.patch('airflow.providers.amazon.aws.hooks.eks.EksHook.get_cluster_state')
@pytest.mark.parametrize('pending_state', CLUSTER_PENDING_STATES)
def test_poke_reached_pending_state(self, mock_get_cluster_state, setUp, pending_state):
mock_get_cluster_state.return_value = pending_state
assert not self.sensor.poke({})
mock_get_cluster_state.assert_called_once_with(clusterName=CLUSTER_NAME)
@mock.patch('airflow.providers.amazon.aws.hooks.eks.EksHook.get_cluster_state')
@pytest.mark.parametrize('unexpected_terminal_state', CLUSTER_TERMINAL_STATES - {ClusterStates.ACTIVE})
def test_poke_reached_unexpected_terminal_state(
self, mock_get_cluster_state, setUp, unexpected_terminal_state
):
expected_message = UNEXPECTED_TERMINAL_STATE_MSG.format(
current_state=unexpected_terminal_state, target_state=self.target_state
)
mock_get_cluster_state.return_value = unexpected_terminal_state
with pytest.raises(AirflowException) as raised_exception:
self.sensor.poke({})
assert str(raised_exception.value) == expected_message
mock_get_cluster_state.assert_called_once_with(clusterName=CLUSTER_NAME)
class TestEksFargateProfileStateSensor:
@pytest.fixture(scope="function")
def setUp(self):
self.target_state = FargateProfileStates.ACTIVE
self.sensor = EksFargateProfileStateSensor(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
fargate_profile_name=FARGATE_PROFILE_NAME,
target_state=self.target_state,
)
@mock.patch.object(EksHook, 'get_fargate_profile_state', return_value=FargateProfileStates.ACTIVE)
def test_poke_reached_target_state(self, mock_get_fargate_profile_state, setUp):
assert self.sensor.poke({})
mock_get_fargate_profile_state.assert_called_once_with(
clusterName=CLUSTER_NAME, fargateProfileName=FARGATE_PROFILE_NAME
)
@mock.patch('airflow.providers.amazon.aws.hooks.eks.EksHook.get_fargate_profile_state')
@pytest.mark.parametrize('pending_state', FARGATE_PENDING_STATES)
def test_poke_reached_pending_state(self, mock_get_fargate_profile_state, setUp, pending_state):
mock_get_fargate_profile_state.return_value = pending_state
assert not self.sensor.poke({})
mock_get_fargate_profile_state.assert_called_once_with(
clusterName=CLUSTER_NAME, fargateProfileName=FARGATE_PROFILE_NAME
)
@mock.patch('airflow.providers.amazon.aws.hooks.eks.EksHook.get_fargate_profile_state')
@pytest.mark.parametrize(
'unexpected_terminal_state', FARGATE_TERMINAL_STATES - {FargateProfileStates.ACTIVE}
)
def test_poke_reached_unexpected_terminal_state(
self, mock_get_fargate_profile_state, setUp, unexpected_terminal_state
):
expected_message = UNEXPECTED_TERMINAL_STATE_MSG.format(
current_state=unexpected_terminal_state, target_state=self.target_state
)
mock_get_fargate_profile_state.return_value = unexpected_terminal_state
with pytest.raises(AirflowException) as raised_exception:
self.sensor.poke({})
assert str(raised_exception.value) == expected_message
mock_get_fargate_profile_state.assert_called_once_with(
clusterName=CLUSTER_NAME, fargateProfileName=FARGATE_PROFILE_NAME
)
class TestEksNodegroupStateSensor:
@pytest.fixture(scope="function")
def setUp(self):
self.target_state = NodegroupStates.ACTIVE
self.sensor = EksNodegroupStateSensor(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
nodegroup_name=NODEGROUP_NAME,
target_state=self.target_state,
)
@mock.patch.object(EksHook, 'get_nodegroup_state', return_value=NodegroupStates.ACTIVE)
def test_poke_reached_target_state(self, mock_get_nodegroup_state, setUp):
assert self.sensor.poke({})
mock_get_nodegroup_state.assert_called_once_with(
clusterName=CLUSTER_NAME, nodegroupName=NODEGROUP_NAME
)
@mock.patch('airflow.providers.amazon.aws.hooks.eks.EksHook.get_nodegroup_state')
@pytest.mark.parametrize('pending_state', NODEGROUP_PENDING_STATES)
def test_poke_reached_pending_state(self, mock_get_nodegroup_state, setUp, pending_state):
mock_get_nodegroup_state.return_value = pending_state
assert not self.sensor.poke({})
mock_get_nodegroup_state.assert_called_once_with(
clusterName=CLUSTER_NAME, nodegroupName=NODEGROUP_NAME
)
@mock.patch('airflow.providers.amazon.aws.hooks.eks.EksHook.get_nodegroup_state')
@pytest.mark.parametrize(
'unexpected_terminal_state', NODEGROUP_TERMINAL_STATES - {NodegroupStates.ACTIVE}
)
def test_poke_reached_unexpected_terminal_state(
self, mock_get_nodegroup_state, setUp, unexpected_terminal_state
):
expected_message = UNEXPECTED_TERMINAL_STATE_MSG.format(
current_state=unexpected_terminal_state, target_state=self.target_state
)
mock_get_nodegroup_state.return_value = unexpected_terminal_state
with pytest.raises(AirflowException) as raised_exception:
self.sensor.poke({})
assert str(raised_exception.value) == expected_message
mock_get_nodegroup_state.assert_called_once_with(
clusterName=CLUSTER_NAME, nodegroupName=NODEGROUP_NAME
)
| StarcoderdataPython |
95048 | <reponame>LukasK13/ESBO-ETC<gh_stars>0
from .AOpticalComponent import AOpticalComponent
from ..IRadiant import IRadiant
from ..SpectralQty import SpectralQty
from ..Entry import Entry
import astropy.units as u
from typing import Union
class StrayLight(AOpticalComponent):
"""
A class to model additional stray light sources e.g. zodiacal light
"""
def __init__(self, parent: IRadiant, emission: str):
"""
Initialize a new stray light source
Parameters
----------
parent : IRadiant
The parent element from which the electromagnetic radiation is received.
This element is usually of type Target or StrayLight.
emission : str
Path to the file containing the spectral radiance of the stray light source.
The format of the file will be guessed by `astropy.io.ascii.read()`.
"""
# Read the emission
emission_sqty = SpectralQty.fromFile(emission, wl_unit_default=u.nm,
qty_unit_default=u.W / (u.m ** 2 * u.nm * u.sr))
# Initialize the super class
super().__init__(parent, 1.0, emission_sqty)
@staticmethod
def check_config(conf: Entry) -> Union[None, str]:
"""
Check the configuration for this class
Parameters
----------
conf : Entry
The configuration entry to be checked.
Returns
-------
mes : Union[None, str]
The error message of the check. This will be None if the check was successful.
"""
mes = conf.check_file("emission")
if mes is not None:
return mes
| StarcoderdataPython |
45519 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import threading
import traceback
import sys
import os
import shutil
import zipfile
from subprocess import Popen, PIPE
from re import search
from datetime import datetime
from call_helper import CallHelper
from os.path import basename
class OtherException(Exception):
def __init__ (self, value):
self.value = value
def __str__(self):
return self.value
#use valid login and password
def hgcmd(cmd,*args):
return [
'hg',
cmd,
'--config','auth.spread.username=user',
'--config','auth.spread.password=password',
'--config','auth.spread.schemes=http https',
'--config','auth.spread.prefix=*',
'--noninteractive',
] + list(args)
def GetVersionFromHg(branchName):
(o,e) = CallHelper.call_helper(hgcmd('log', '--template', 'tag: {tags}&&&&\n', '-l', '5', '-b', branchName))
logs = o.split("&&&&")
version = ""
for log in logs:
line = log.split(None,1)
if(line.__len__() > 1):
if line[0] == 'tag:':
if line[1] != 'tip':
version = line[1]
break
return version
def ZipPdb(path, zip_handle):
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".pdb"):
file_path = os.path.join(root,file)
zip_handle.write(file_path, basename(file_path))
def main():
try:
if len(sys.argv) < 5:
raise OtherException('Enter source dir, Project name, prefix for dest. dir and branch name')
version = GetVersionFromHg(sys.argv[4])
prefix_dst = sys.argv[3]
project_name = sys.argv[2]
src_dir = sys.argv[1]
zipfile_name = version + '.zip'
zip_file_path = src_dir + '\\' + zipfile_name
zipf = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)
ZipPdb(src_dir, zipf)
if len(version) > 0:
print 'Copy symbol ' + version
#use symbols server path
dst_dir = '\\\\server\\ReleaseSymbol\\' + project_name + '\\' + version[:version.rfind('.')] + '\\' + prefix_dst + '\\'
try:
os.makedirs(dst_dir)
except OSError as e:
pass
shutil.copyfile(zip_file_path, dst_dir + zipfile_name)
else:
print "No Change"
except OtherException as e:
print "\r\n ERROR:"
print e.__str__()
except:
print "\r\nSYS ERROR:"
traceback.print_exc(file=sys.stdout)
if __name__ == "__main__":
main() | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.