id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
344089 | import operator as op
from sweetpea import fully_cross_block
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial
# Stroop 3, but the text value must always follow color.
color = Factor("color", ["red", "blue", "green"])
text = Factor("text", ["red", "blue", "green"])
# Global keyword needed to make this work when the tests exec() this file
global correct_order
def correct_order(color, text):
return (color == "red" and text == "blue") or \
(color == "blue" and text == "green") or \
(color == "green" and text == "red")
global incorrect_order
def incorrect_order(color, text):
return not correct_order(color, text)
order = Factor("order", [
DerivedLevel("correct", WithinTrial(correct_order, [color, text])),
DerivedLevel("incorrect", WithinTrial(incorrect_order, [color, text]))
])
design = [color, text, order]
crossing = [color, order]
block = fully_cross_block(design, crossing, [])
# ASSERT COUNT = 5760
| StarcoderdataPython |
5117312 | import json
import os
def writejson(filename, v):
with open(filename, 'w') as f:
f.write(json.dumps(v, indent=2))
def mkdirsafeish( name ):
if not os.path.exists(name):
os.makedirs(name)
| StarcoderdataPython |
5008237 | import logging
class ProgramCrew(object):
def __init__(self):
self.person_id = None # type: unicode
self.name_id = None # type: unicode
self.billing_order = None # type: unicode
self.role = None # type: unicode
self.name = None # type: unicode
def __unicode__(self): # type: () -> unicode
return self.name
def __str__(self):
return unicode(self).encode("utf-8")
@classmethod
def from_iterable(cls, iterable): # type: (Iterable[dict]) -> List[ProgramCrew]
"""
:param iterable:
:return:
"""
return [cls.from_dict(crew) for crew in iterable]
@classmethod
def from_dict(cls, dct): # type: (dict) -> ProgramCrew
"""
:param dct:
:return:
"""
program_crew = cls()
if "personId" in dct:
program_crew.person_id = dct.pop("personId")
if "nameId" in dct:
program_crew.name_id = dct.pop("nameId")
if "billingOrder" in dct:
program_crew.billing_order = dct.pop("billingOrder")
if "role" in dct:
program_crew.role = dct.pop("role")
if "name" in dct:
program_crew.name = dct.pop("name")
if len(dct) != 0:
logging.warn("Key(s) not processed for ProgramCrew: %s", ", ".join(dct.keys()))
return program_crew
| StarcoderdataPython |
11237286 | #!/usr/bin/env python
# Import the components
from flask import Flask, request, redirect, url_for, render_template
# Import the database functions corresponding to queries
from reportingtooldb import (get_most_popular_articles,
get_most_popular_authors,
get_most_erroneous_day)
app = Flask(__name__)
# Render frontend at '/' and display the fetched data on the webpage
@app.route('/', methods=['GET'])
def main():
"""Get the SQL data into a HTML frontend"""
fetched_1 = get_most_popular_articles() # fetches results of query 1
fetched_2 = get_most_popular_authors() # fetches results of query 2
fetched_3 = get_most_erroneous_day() # fetches results of query 3
return render_template('index.html',
fetched_1=fetched_1,
fetched_2=fetched_2,
fetched_3=fetched_3)
if __name__ == '__main__':
app.secret_key = "SECRET KEY!"
app.debug = True
app.run(host='0.0.0.0', port=8000) # starts the web server at 0.0.0.0:8000
| StarcoderdataPython |
6520570 | <gh_stars>10-100
#
# PySNMP MIB module TUBS-IBR-PROC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TUBS-IBR-PROC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:20:32 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Counter64, Counter32, iso, IpAddress, TimeTicks, MibIdentifier, NotificationType, Integer32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, ObjectIdentity, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Counter64", "Counter32", "iso", "IpAddress", "TimeTicks", "MibIdentifier", "NotificationType", "Integer32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "ObjectIdentity", "ModuleIdentity")
TextualConvention, DateAndTime, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DateAndTime", "DisplayString")
ibr, = mibBuilder.importSymbols("TUBS-SMI", "ibr")
procMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 1575, 1, 3))
procMIB.setRevisions(('2000-02-09 00:00', '1997-02-14 10:23', '1994-11-15 20:24',))
if mibBuilder.loadTexts: procMIB.setLastUpdated('200002090000Z')
if mibBuilder.loadTexts: procMIB.setOrganization('TU Braunschweig')
procReload = MibScalar((1, 3, 6, 1, 4, 1, 1575, 1, 3, 1), DateAndTime()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: procReload.setStatus('current')
procTable = MibTable((1, 3, 6, 1, 4, 1, 1575, 1, 3, 2), )
if mibBuilder.loadTexts: procTable.setStatus('current')
procEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1575, 1, 3, 2, 1), ).setIndexNames((0, "TUBS-IBR-PROC-MIB", "procID"))
if mibBuilder.loadTexts: procEntry.setStatus('current')
procID = MibTableColumn((1, 3, 6, 1, 4, 1, 1575, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: procID.setStatus('current')
procCmd = MibTableColumn((1, 3, 6, 1, 4, 1, 1575, 1, 3, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: procCmd.setStatus('current')
mibBuilder.exportSymbols("TUBS-IBR-PROC-MIB", PYSNMP_MODULE_ID=procMIB, procMIB=procMIB, procTable=procTable, procEntry=procEntry, procCmd=procCmd, procID=procID, procReload=procReload)
| StarcoderdataPython |
5066379 | import gzip
import os
import pandas as pd
import re
import sys
import tarfile
sys.path.append(os.path.abspath(os.path.join("..")))
from parsers.loadgen_parser import LoadgenParser
def get_node_names(experiment_dirname):
return [dirname
for dirname in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs"))
if not dirname.startswith('.')]
def get_rpc_logfiles(experiment_dirname):
tarball_patterns = [
r"^apigateway.*\.tar\.gz$",
r"^.+_service.*\.tar\.gz$",
]
for node_name in get_node_names(experiment_dirname):
for tarball_name in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname,
"logs", node_name)):
if sum([1 if re.match(tarball_pattern, tarball_name) else 0 for tarball_pattern in tarball_patterns]):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs",
node_name, tarball_name)
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith("calls.log"):
with tar.extractfile(filename) as logfile:
yield logfile
def get_query_logfiles(experiment_dirname):
tarball_patterns = [
r"^.+_service.*\.tar\.gz$",
]
for node_name in get_node_names(experiment_dirname):
for tarball_name in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname,
"logs", node_name)):
if sum([1 if re.match(tarball_pattern, tarball_name) else 0 for tarball_pattern in tarball_patterns]):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs",
node_name, tarball_name)
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith("queries.log"):
with tar.extractfile(filename) as logfile:
yield logfile
def get_loadgen_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
for tarball_name in os.listdir(os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname,
"logs", node_name)):
if re.match(r"^loadgen.*\.tar\.gz$", tarball_name):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs",
node_name, tarball_name)
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith("loadgen.log"):
with tar.extractfile(filename) as logfile:
yield logfile
def get_collectl_cpu_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs", node_name,
"collectl.tar.gz")
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith(".cpu.gz"):
with gzip.open(tar.extractfile(filename), "rt") as logfile:
yield (node_name, logfile)
def get_collectl_mem_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs", node_name,
"collectl.tar.gz")
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith(".numa.gz"):
with gzip.open(tar.extractfile(filename), "rt") as logfile:
yield (node_name, logfile)
def get_collectl_dsk_logfiles(experiment_dirname):
for node_name in get_node_names(experiment_dirname):
tarball_path = os.path.join(os.path.dirname(__file__), "..", "data", experiment_dirname, "logs", node_name,
"collectl.tar.gz")
with tarfile.open(tarball_path, "r:gz") as tar:
for filename in tar.getnames():
if filename.endswith(".dsk.gz"):
with gzip.open(tar.extractfile(filename), "rt") as logfile:
yield (node_name, logfile)
def get_experiment_start_time(experiment_dirname):
requests = pd.concat([
pd.DataFrame.from_dict(LoadgenParser(logfile).parse())
for logfile in get_loadgen_logfiles(experiment_dirname)
], ignore_index=True)
return requests["timestamp"].values.min()
def get_experiment_end_time(experiment_dirname):
requests = pd.concat([
pd.DataFrame.from_dict(LoadgenParser(logfile).parse())
for logfile in get_loadgen_logfiles(experiment_dirname)
], ignore_index=True)
return requests["timestamp"].values.max()
| StarcoderdataPython |
3452284 | <filename>nanomesh/data/__init__.py
"""Module containing sample data."""
from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING
import numpy as np
from skimage.data import binary_blobs
from nanomesh._doc import doc
data_dir = Path(__file__).parent
if TYPE_CHECKING:
from nanomesh import MeshContainer
@doc(dim='2d')
def binary_blobs2d(**kwargs) -> np.ndarray:
"""Generate {dim} binary blobs.
Parameters
----------
**kwargs
These parameters are passed to :func:`skimage.data.binary_blobs`
Returns
-------
numpy.ndarray
{dim} array with binary blobs
"""
kwargs.setdefault('length', 50)
kwargs.setdefault('n_dim', 2)
kwargs.setdefault('volume_fraction', 0.2)
kwargs.setdefault('blob_size_fraction', 0.3)
return binary_blobs(**kwargs).astype(int)
@doc(binary_blobs2d, dim='3d')
def binary_blobs3d(**kwargs) -> np.ndarray:
kwargs.setdefault('length', 50)
kwargs.setdefault('n_dim', 3)
kwargs.setdefault('volume_fraction', 0.2)
kwargs.setdefault('blob_size_fraction', 0.2)
return binary_blobs(**kwargs).astype(int)
def nanopores() -> np.ndarray:
"""Fetch 2D slice of nanopore dataset.
Returns
-------
nanopores : numpy.ndarray
2D image of nanopores
"""
i = 161
return nanopores3d()[:, :, i]
def nanopores_gradient() -> np.ndarray:
"""Fetch 2D slice of nanopore dataset with a gradient.
Returns
-------
nanopores : (i,j) numpy.ndarray
2D image of nanopores with gradient
"""
return np.rot90(np.load(data_dir / 'nanopores_gradient.npy'))
def nanopores3d() -> np.ndarray:
"""Fetch 3D nanopore dataset.
Returns
-------
nanopores : (i,j,k) numpy.ndarray
3D image of nanopores
"""
return np.load(data_dir / 'nanopores3d.npy')
@doc(dim='2d', kind='triangle', func='triangulate')
def blob_mesh2d(opts: str = 'q30a10', **kwargs) -> MeshContainer:
"""Return a {dim} {kind} mesh generated from binary blobs.
Parameters
----------
opts : str, optional
Options passed to :func:`{func}`.
**kwargs
These parameters are passed to :func:`binary_blobs{dim}`.
Returns
-------
mesh : MeshContainer
{dim} {kind} mesh generated from binary blobs.
"""
from nanomesh import plane2mesh
data = binary_blobs2d(**kwargs)
return plane2mesh(data, opts=opts)
@doc(blob_mesh2d, dim='3d', kind='tetrahedral', func='tetrahedralize')
def blob_mesh3d(opts: str = '-pAq', **kwargs) -> MeshContainer:
from nanomesh import volume2mesh
data = binary_blobs3d(**kwargs)
return volume2mesh(data, opts=opts)
__all__ = [
'nanopores',
'nanopores3d',
'binary_blobs3d',
'blob_mesh2d',
'blob_mesh3d',
]
| StarcoderdataPython |
6589647 | <reponame>vontell/SimCrawl<gh_stars>0
'''
ok so here are sections
2W: 2AB
4E: 3/4C
4W: 4AB
5C: 5AB
6E: 5/6C
6W: 6AB
8C: B Tower
8E: 7ABC
8W: A Tower
9E: C Tower
'''
# c_tower_set = set([975,1078B,1074,978,1078A,1040,940,1040,938,977,873,1077,1073,978,1075,980,840,939,874,840,941,940,939,875,1039,973,1078B,1039,839,878,1038,979,1076,974,1078B,941,980,840,878,976])
# two_ab_set = set([329,341,224A,225,252,322B,345,244B,344,327,321,252,244C,229,228,322B,329,321,344,326,328,344,324,324,228,322D,230,341,224B,326,329,326,328,224A,322C,225,322B,322C,225,330,325])
def init():
files = ['a_tower_rooms.txt','7abc_rooms.txt','b_tower_rooms.txt','6ab_rooms.txt','56c_rooms.txt','4ab_rooms.txt','5ab_rooms.txt','34c_rooms.txt','c_tower_rooms.txt','2a_rooms.txt']
a_tower_set = set([])
b_tower_set = set([])
c_tower_set = set([])
two_ab_set = set([])
three_four_c_set = set([])
four_ab_set = set([])
five_ab_set = set([])
five_six_c_set = set([])
six_ab_set = set([])
seven_abc_set = set([])
a_tower_rooms_file = open('a_tower_rooms.txt','r')
for line in a_tower_rooms_file:
line = line.strip()
a_tower_set.add(line)
a_tower_rooms_file.close()
b_tower_rooms_file = open('b_tower_rooms.txt','r')
for line in b_tower_rooms_file:
line = line.strip()
b_tower_set.add(line)
b_tower_rooms_file.close()
c_tower_rooms_file = open('c_tower_rooms.txt','r')
for line in c_tower_rooms_file:
line = line.strip()
c_tower_set.add(line)
c_tower_rooms_file.close()
two_ab_rooms_file = open('2a_rooms.txt','r')
for line in two_ab_rooms_file:
line = line.strip()
two_ab_set.add(line)
two_ab_rooms_file.close()
seven_abc_rooms_file = open('7abc_rooms.txt','r')
for line in seven_abc_rooms_file:
line = line.strip()
seven_abc_set.add(line)
seven_abc_rooms_file.close()
six_ab_rooms_file = open('6ab_rooms.txt','r')
for line in six_ab_rooms_file:
line = line.strip()
six_ab_set.add(line)
six_ab_rooms_file.close()
five_six_c_rooms_file = open('56c_rooms.txt','r')
for line in five_six_c_rooms_file:
line = line.strip()
five_six_c_set.add(line)
five_six_c_rooms_file.close()
four_ab_rooms_file = open('4ab_rooms.txt','r')
for line in four_ab_rooms_file:
line = line.strip()
four_ab_set.add(line)
four_ab_rooms_file.close()
five_ab_rooms_file = open('5ab_rooms.txt','r')
for line in five_ab_rooms_file:
line = line.strip()
five_ab_set.add(line)
five_ab_rooms_file.close()
three_four_c_rooms_file = open('34c_rooms.txt','r')
for line in three_four_c_rooms_file:
line = line.strip()
three_four_c_set.add(line)
three_four_c_rooms_file.close()
rooms_dictionary = {'A Tower':a_tower_set,
'B Tower':b_tower_set,
'C Tower':c_tower_set,
'23AB':two_ab_set,
'34C':three_four_c_set,
'4AB':four_ab_set,
'5AB':five_ab_set,
'56C':five_six_c_set,
'6AB':six_ab_set,
'7ABC':seven_abc_set
}
# print rooms_dictionary
return rooms_dictionary
# print rooms_set
def map_room(room_number):
C_tower_indices = ['39','30','73','74','75','76','77','78A','78B','41']
B_tower_indices = ['31','32','33','34','36','52A','64','65','66','72','71']
rooms_dictionary = init()
correct_section = ''
for section in rooms_dictionary:
if(room_number in rooms_dictionary[section]):
correct_section = section
break
if('Tower' in correct_section):
if(room_number[0] == '1'):
return room_number[0:2] + correct_section[0]
return room_number[0] + correct_section[0]
elif('ABC' in correct_section):
for index in C_tower_indices:
if(index in room_number[1:]):
return '7C'
for index in B_tower_indices:
if(index in room_number[1:]):
return '7B'
return '7A'
elif('AB' in correct_section):
for index in B_tower_indices:
if(index in room_number[1:]):
return room_number[0] + 'B'
return room_number[0] + 'A'
else:
return room_number[0] + 'C'
# print(map_room('673')) | StarcoderdataPython |
1934036 | import drawSvg as draw
import pandas as pd
# >>> df = pd.DataFrame([[0, 2, 3], [0, 4, 1], [10, 20, 30]],
# ... index=[4, 5, 6], columns=['A', 'B', 'C'])
# >>> df
# A B C
# 4 0 2 3
# 5 0 4 1
# 6 10 20 30
df = pd.DataFrame([[15.7, 11.9, 3.8]], index=[1], columns=['Comp', 'Traffic', 'Ticket'])
# comp_val = 15.7
# traffic_val = 11.9
# ticket_val = 3.8
comp_val = df.at[1, 'Comp']
traffic_val = df.at[1, 'Traffic']
ticket_val = df.at[1, 'Ticket']
traffic_h = abs(traffic_val)/(abs(traffic_val) + abs(ticket_val))
ticket_h = abs(ticket_val)/(abs(traffic_val) + abs(ticket_val))
w = 1200
h = 800
r = 1.0
d = draw.Drawing(w, h, origin='center', displayInline=False)
# function drawAnRect(x1, y1, x2, y2, x3, y3, x4, y4, is_closed=True, fill_color, txt, txt_color='black', txt_x1, txt_x2)
# Draw an irregular polygon
# 2f6fc7
# 3e87c1
# 87b7df
# red
# ca352f
# da7879
########### draw Comp #############
bx = -300
by = -75
bw = 200
bh = 300
w1 = bw
h1 = bh
d.append(draw.Rectangle(bx+0*r, by+0*r, w1*r, h1*r, fill='#2f6fc7'))
# d.append(draw.Circle(bx+0*r, by+0*r, 30,
# fill='yellow', stroke_width=2, stroke='black'))
x2 = bx + w1/2.0 * r
y2 = by + h1/2.0 * r
txt = 'Comp%'
d.append(draw.Text(txt, 15, x2, y2, center=0.5, fill='white'))
x2 = bx + w1/2.0 * r
y2 = by + h1/2.0 * r
txt = str(comp_val) + '%'
d.append(draw.Text(txt, 15, x2, y2, center=3.5, fill='white'))
########### draw Traffic #############
x1 = bx + w1 * r
y1 = by + h1 * r
w1 = bw
h1 = bh*traffic_h
delta_y = h1*0.1
if traffic_val >= 0:
fill_color = '#3e87c1'
else:
fill_color = '#ca352f'
d.append(draw.Lines(x1, y1,
x1 + w1*r, y1+delta_y*r,
x1 + w1*r, y1-h1*r+delta_y*r,
x1, y1-h1*r,
close=True,
fill=fill_color,
stroke=fill_color))
x2 = x1 + w1/2.0 * r
y2 = y1 - h1/2.0 * r
txt = 'Traffic%'
d.append(draw.Text(txt, 15, x2, y2, center=0.5, fill='white'))
########### draw Traffic percent #############
x1 = x1 + w1 * r
y1 = y1 + delta_y
w1 = bw * 0.7
h1 = bh * traffic_h
delta_y = h1*0.2
if traffic_val >= 0:
fill_color = '#87b7df'
else:
fill_color = '#da7879'
d.append(draw.Lines(x1, y1,
x1 + w1*r, y1,
x1 + w1*r, y1-h1*r,
x1, y1-h1*r,
close=True,
fill=fill_color,
stroke=fill_color))
x2 = x1 + w1/2.0 * r
y2 = y1 - h1/2.0 * r
txt = str(traffic_val) + '%'
if traffic_val >= 0:
fill_color = 'black'
else:
fill_color = 'white'
d.append(draw.Text(txt, 15, x2, y2, center=0.5, fill=fill_color))
########### draw Ticket #############
space_h = 5*r
w1 = bw
h1 = bh
x1 = bx + w1 * r
y1 = by + bh*ticket_h * r - 5*r
# d.append(draw.Circle(x1,y1, 30,
# fill='red', stroke_width=2, stroke='black'))
w1 = bw
h1 = bh*ticket_h-space_h
delta_y = h1*0.2
if ticket_val >= 0:
fill_color = '#3e87c1'
else:
fill_color = '#ca352f'
d.append(draw.Lines(x1, y1,
x1 + w1*r, y1+delta_y*r,
x1 + w1*r, y1-h1+delta_y*r,
x1, y1-h1*r,
close=True,
fill=fill_color,
stroke=fill_color))
x2 = x1 + w1/2.0 * r
y2 = y1 - h1/2.0 * r
txt = 'Ticket%'
d.append(draw.Text(txt, 15, x2, y2, center=0.5, fill='white'))
########### draw Ticket percent #############
x1 = x1 + w1 * r
y1 = y1 + delta_y
w1 = bw * 0.7
h1 = bh * ticket_h-space_h
if ticket_val >= 0:
fill_color = '#87b7df'
else:
fill_color = '#da7879'
d.append(draw.Lines(x1, y1,
x1 + w1*r, y1,
x1 + w1*r, y1-h1*r,
x1, y1-h1*r,
close=True,
fill=fill_color,
stroke=fill_color))
x1 = x1 + w1/2.0 * r
y1 = y1 - h1/2.0 * r
txt = str(ticket_val) + '%'
if ticket_val >= 0:
fill_color = 'black'
else:
fill_color = 'white'
d.append(draw.Text(txt, 15, x1, y1, center=0.5, fill=fill_color))
# x1 = bx + w1/2.0 * r
# y1 = by + h1/2.0 * r
# txt = str(comp_val) + '%'
# d.append(draw.Text(txt, 15, x1, y1, center=3.5, fill='white'))
# d.append(draw.Text('Comp%',insert = (30, 55),font_size="10px",fill='black'))
# dwg.add(dwg.text('Test',insert = (30, 55),font_size="10px",fill='black'))
# hlink.append(draw.Text('Hyperlink',0.2, 0,0, center=0.6, fill='white'))
# d.append(draw.Rectangle(0*r, 0*r, 100*r, 150*r, fill='#2f6fc7'))
# d.setPixelScale(1) # Set number of pixels per geometry unit
#d.setRenderSize(400,200) # Alternative to setPixelScale
d.saveSvg('example.svg')
d.savePng('example.png')
# Display in Jupyter notebook
d.rasterize() # Display as PNG
d # Display as SVG
# d.append(draw.Lines(0*r, 0*r,
# 100*r, 0*r,
# 100*r, 150*r,
# 0*r, 150*r,
# close=True,
# fill='#eeee00',
# stroke='black'))
#
# # Draw a rectangle
# d.append(draw.Rectangle(0,0,40,50, fill='#1248ff'))
#
# # Draw a circle
# d.append(draw.Circle(-40, -10, 30,
# fill='red', stroke_width=2, stroke='black'))
#
# # Draw an arbitrary path (a triangle in this case)
# p = draw.Path(stroke_width=2, stroke='green',
# fill='black', fill_opacity=0.5)
# p.M(-30,5) # Start path at point (-30, 5)
# p.l(60,30) # Draw line to (60, 30)
# p.h(-70) # Draw horizontal line to x=-70
# p.Z() # Draw line to start
# d.append(p)
#
# # Draw multiple circular arcs
# d.append(draw.ArcLine(60,-20,20,60,270,
# stroke='red', stroke_width=5, fill='red', fill_opacity=0.2))
# d.append(draw.Arc(60,-20,20,60,270,cw=False,
# stroke='green', stroke_width=3, fill='none'))
# d.append(draw.Arc(60,-20,20,270,60,cw=True,
# stroke='blue', stroke_width=1, fill='black', fill_opacity=0.3))
#
# # Draw arrows
# arrow = draw.Marker(-0.1, -0.5, 0.9, 0.5, scale=4, orient='auto')
# arrow.append(draw.Lines(-0.1, -0.5, -0.1, 0.5, 0.9, 0, fill='red', close=True))
# p = draw.Path(stroke='red', stroke_width=2, fill='none',
# marker_end=arrow) # Add an arrow to the end of a path
# p.M(20, -40).L(20, -27).L(0, -20) # Chain multiple path operations
# d.append(p)
# d.append(draw.Line(30, -20, 0, -10,
# stroke='red', stroke_width=2, fill='none',
# marker_end=arrow)) # Add an arrow to the end of a line
#
# d.setPixelScale(2) # Set number of pixels per geometry unit
# #d.setRenderSize(400,200) # Alternative to setPixelScale
# d.saveSvg('example.svg')
# d.savePng('example.png')
#
# # Display in Jupyter notebook
# d.rasterize() # Display as PNG
# d # Display as SVG | StarcoderdataPython |
3318602 | <reponame>timgates42/tweetmotif<gh_stars>10-100
import sys
from collections import defaultdict
import twokenize
import bigrams
import lang_model
class LinkedCorpus:
" Hold tweets & indexes .. that is, ngrams are 'linked' to their tweets. "
def __init__(self):
self.model = lang_model.LocalLM()
self.index = defaultdict(list)
self.bigram_index = defaultdict(list)
self.tweets_by_id = {}
def add_tweet(self, tweet):
self.tweets_by_id[tweet['id']] = tweet
toks = tweet['toks']
self.model.info['big_n'] += len(toks)
the_unigrams = set(bigrams.filtered_unigrams(toks))
tweet['unigrams'] = the_unigrams
for unigram in the_unigrams:
self.model.add(unigram)
self.index[unigram].append(tweet)
the_bigrams = set(bigrams.filtered_bigrams(toks))
tweet['bigrams'] = the_bigrams
for bigram in the_bigrams:
self.model.add(bigram)
self.index[bigram].append(tweet)
self.bigram_index[bigram[0], None].append(bigram)
self.bigram_index[None, bigram[1]].append(bigram)
tweet['trigrams'] = set(bigrams.filtered_trigrams(toks))
for trigram in tweet['trigrams']:
self.model.add(trigram)
self.index[trigram].append(tweet)
#self.tweets_by_text.append(tweet)
#for ngram in set(bigrams.multi_ngrams(toks, n_and_up=3)):
# pass
def fill_from_tweet_iter(self, tweet_iter):
for tweet in tweet_iter:
self.add_tweet(tweet)
if __name__=='__main__':
import cPickle as pickle
import search
q = sys.argv[1]
smoothing = sys.argv[2]
bg_model = lang_model.TokyoLM(readonly=True)
lc = LinkedCorpus()
tweet_iter = search.cleaned_results(q,
pages = 2,
key_fn = search.user_and_text_identity,
save = None,
load = None
)
lc.fill_from_tweet_iter(tweet_iter)
for ratio, ngram in lc.model.compare_with_bg_model(bg_model, 3, min_count=3, smoothing_algorithm=smoothing):
print "%s\t%s" % ('_'.join(ngram), ratio)
| StarcoderdataPython |
9646383 | <filename>src/comments/urls.py
from django.urls import path,include
from . import views
app_name='comments'
urlpatterns = [
path('comment/create/<int:post_pk>/', views.create_comment, name='create_comment'),
path('privatecomment/create/<int:assignment_pk>/', views.create_private_comment, name='private_comment'),
] | StarcoderdataPython |
1655990 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='a comment bot for getting reddit karma',
author='<NAME>, PhD',
license='MIT',
)
| StarcoderdataPython |
11226268 | EXPECTED_OUTLET_DATA_COLS = [
"AddressChangeCode",
"BureauOfEconomicsAnalysisCode",
"CategorizationOfLocale_By_SizeAndProximityToCities",
"CategorizationOfLocale_By_SizeAndProximityToCities_FromRuralEducationAchievementProgram",
"CensusBlock",
"CensusTract",
"CongressionalDistrict",
"CoreBasedStatisticalArea",
"CountyPopulation",
"Geocoding_AccuracyAndPrecision",
"GeographicNamesInformationSystemFeatureId",
"HoursOpen",
"InternationalCommiteeForInfoTechStandardsStateCode_ThreeDigit",
"InternationalCommiteeForInfoTechStandardsStateCode_TwoDigit",
"Latitude",
"LibraryIdCode_FromIMLS",
"LibraryIdCode_FromState",
"LibraryIdCode_Suffix",
"Longitude",
"MeetsDefinitionOfPublicLibrary",
"MetropolitanAndMicropolitcanStatisticalAreaFlag",
"Name",
"NameChangeCode",
"Num_BookmobilesInBookmobileOutletRecord",
"OutletType",
"SquareFootage",
"SquareFootage_ImputationFlag",
"State",
"StreetAddress_Address",
"StreetAddress_City",
"StreetAddress_County",
"StreetAddress_Phone",
"StreetAddress_ZipCode",
"StreetAddress_ZipCode_4Digit",
"StructureChangeCode",
"SubmissionYearOfPublicLibraryData",
"WeeksOpen",
]
EXPECTED_STATE_SUM_COLS = [
"BureauOfEconomicsAnalysisCode",
"CapitalExpenditures_Total",
"CapitalExpenditures_Total_ImputationFlag",
"CapitalRevenue_From_FederalGovernment",
"CapitalRevenue_From_FederalGovernment_ImputationFlag",
"CapitalRevenue_From_LocalGovernment",
"CapitalRevenue_From_LocalGovernment_ImputationFlag",
"CapitalRevenue_From_Other",
"CapitalRevenue_From_Other_ImputationFlag",
"CapitalRevenue_From_State",
"CapitalRevenue_From_State_ImputationFlag",
"CapitalRevenue_Total",
"CapitalRevenue_Total_ImputationFlag",
"Circulation_For_ChildrenMaterials",
"Circulation_For_ChildrenMaterials_ImputationFlag",
"Circulation_For_ElectronicContentUse",
"Circulation_For_ElectronicInfo_SuccessfulRetrieval",
"Circulation_For_ElectronicMaterials",
"Circulation_For_PhysicalItems",
"Circulation_Total_CountOf_PhysicalAndElectronicCirculation_And_ElectronicSuccessfulRetrieval",
"Circulation_Total_Transactions",
"ElectronicCollections_From_LocalOrOther",
"ElectronicCollections_From_Other_ImputationFlag",
"ElectronicCollections_From_State",
"ElectronicCollections_From_State_ImputationFlag",
"ElectronicCollections_Total",
"ElectronicCollections_Total_ImputationFlag",
"FullTimePaidStaff_Librarians",
"FullTimePaidStaff_Librarians_ImputationFlag",
"FullTimePaidStaff_Librarians_WithMasters",
"FullTimePaidStaff_Librarians_WithMasters_ImputationFlag",
"FullTimePaidStaff_Other",
"FullTimePaidStaff_Other_ImputationFlag",
"FullTimePaidStaff_Total",
"FullTimePaidStaff_Total_ImputationFlag",
"Hours_Total",
"Hours_Total_ImputationFlag",
"InterLibraryLoans_Amount_Given",
"InterLibraryLoans_Amount_Given_ImputationFlag",
"InterLibraryLoans_Amount_Received",
"InterLibraryLoans_Amount_Received_ImputationFlag",
"InternationalCommitteeForInformationTechnologyStandardsStateCode_2Digit",
"LibraryCollections_CountOf_AudioPhysical_ImputationFlag",
"LibraryCollections_CountOf_Audio_Downloadable",
"LibraryCollections_CountOf_Audio_Downloadable_ImputationFlag",
"LibraryCollections_CountOf_Audio_Physical",
"LibraryCollections_CountOf_EBooks",
"LibraryCollections_CountOf_EBooks_ImputationFlag",
"LibraryCollections_CountOf_PrintMaterials",
"LibraryCollections_CountOf_Video_Downloadable",
"LibraryCollections_CountOf_Video_Downloadable_ImputationFlag",
"LibraryCollections_CountOf_Video_Physical",
"LibraryCollections_CountOf_Video_Physical_ImputationFlag",
"LibraryPrograms_Attendance_For_Children",
"LibraryPrograms_Attendance_For_Children_ImputationFlag",
"LibraryPrograms_Attendance_For_YoungAdults",
"LibraryPrograms_Attendance_For_YoungAdults_ImputationFlag",
"LibraryPrograms_Attendance_Total",
"LibraryPrograms_Attendance_Total_ImputationFlag",
"LibraryPrograms_CountOf_For_Children",
"LibraryPrograms_CountOf_For_Children_ImputationFlag",
"LibraryPrograms_CountOf_For_YoungAdults",
"LibraryPrograms_CountOf_For_YoungAdults_ImputationFlag",
"LibraryPrograms_CountOf_Total",
"LibraryPrograms_CountOf_Total_ImputationFlag",
"LibraryServices_CountOf_ReferenceTransactions",
"LibraryServices_CountOf_ReferenceTransactions_ImputationFlag",
"LibraryServices_CountOf_RegisteredUsers",
"LibraryServices_CountOf_RegisteredUsers_ImputationFlag",
"LibraryServices_CountOf_Visits",
"LibraryServices_CountOf_Visits_ImputationFlag",
"Name",
"OperatingExpenditures_On_Collections_Of_ElectronicMaterials",
"OperatingExpenditures_On_Collections_Of_ElectronicMaterials_ImputationFlag",
"OperatingExpenditures_On_Collections_Of_OtherMaterials",
"OperatingExpenditures_On_Collections_Of_OtherMaterials_ImputationFlag",
"OperatingExpenditures_On_Collections_Of_PrintMaterials",
"OperatingExpenditures_On_Collections_Of_PrintMaterials_ImputationFlag",
"OperatingExpenditures_On_Collections_Of_Total",
"OperatingExpenditures_On_Collections_Of_Total_ImputationFlag",
"OperatingExpenditures_On_Other",
"OperatingExpenditures_On_Other_ImputationFlag",
"OperatingExpenditures_On_Staff_EmployeeBenefits",
"OperatingExpenditures_On_Staff_EmployeeBenefits_ImputationFlag",
"OperatingExpenditures_On_Staff_Salaries",
"OperatingExpenditures_On_Staff_Salaries_ImputationFlag",
"OperatingExpenditures_On_Staff_Total",
"OperatingExpenditures_On_Staff_Total_ImputationFlag",
"OperatingExpenditures_Total",
"OperatingExpenditures_Total_ImputationFlag",
"OperatingRevenue_From_FederalGovernment",
"OperatingRevenue_From_FederalGovernment_ImputationFlag",
"OperatingRevenue_From_LocalGovernment",
"OperatingRevenue_From_LocalGovernment_ImputationFlag",
"OperatingRevenue_From_Other",
"OperatingRevenue_From_Other_ImputationFlag",
"OperatingRevenue_From_StateGovernment",
"OperatingRevenue_From_StateGovernment_ImputationFlag",
"OperatingRevenue_Total",
"OperatingRevenue_Total_ImputationFlag",
"OtherCollections_CurrentPrintSerialSubscriptions",
"OtherCollections_CurrentPrintSerialSubscriptions_ImputationFlag",
"OtherElectronicInfo_CountOf_ComputerUses",
"OtherElectronicInfo_CountOf_ComputerUses_ImputationFlag",
"OtherElectronicInfo_CountOf_Computers",
"OtherElectronicInfo_CountOf_Computers_ImputationFlag",
"OtherElectronicInfo_CountOf_WiFiSessions",
"Population_Of_LegalServiceArea",
"Population_Of_LegalServiceArea_ImputationFlag",
"Population_Of_LegalServiceAreas_Unduplicated",
"Population_Of_State_EstimateTotal",
"ReportingPeriod_EndDate",
"ReportingPeriod_StartDate",
"ServiceOutlets_CountOf_Bookmobiles",
"ServiceOutlets_CountOf_Bookmobiles_ImputationFlag",
"ServiceOutlets_CountOf_BranchLibraries",
"ServiceOutlets_CountOf_BranchLibraries_ImputationFlag",
"ServiceOutlets_CountOf_CentralLibraries",
"ServiceOutlets_CountOf_CentralLibraries_ImputationFlag",
"SubmissionYearOfPublicLibraryData",
]
EXPECTED_SYS_DATA_COLS = [
"AddressChangeCode",
"AdministrativeStructureCode",
"BureauOfEconomicAnalysisCode",
"CapitalExpenditures_Total",
"CapitalExpenditures_Total_ImputationFlag",
"CapitalRevenue_From_FederalGovernment",
"CapitalRevenue_From_FederalGovernment_ImputationFlag",
"CapitalRevenue_From_Government_ImputationFlag",
"CapitalRevenue_From_LocalGovernment",
"CapitalRevenue_From_Other",
"CapitalRevenue_From_Other_ImputationFlag",
"CapitalRevenue_From_StateGovernment",
"CapitalRevenue_From_StateGovernment_ImputationFlag",
"CapitalRevenue_Total",
"CapitalRevenue_Total_ImputationFlag",
"CategorizationOfLocale_By_ModalLocaleCodeOfAssociatedStationaryOutlets",
"CategorizationOfLocale_By_ModalLocaleCodeOfAssociatedStationaryOutlets_FromRuralEducationAchievementProgram",
"CategorizationOfLocale_By_SizeAndProximityToCities",
"CategorizationOfLocale_By_SizeAndProximityToCities_FromRuralEducationAchievementProgram",
"CensusBlock",
"CensusTract",
"Circulation_CountOf_ChildrenMaterials",
"Circulation_CountOf_ChildrenMaterials_ImputationFlag",
"Circulation_CountOf_ElectronicContentUse",
"Circulation_CountOf_ElectronicMaterials",
"Circulation_CountOf_PhysicalMaterials",
"Circulation_CountOf_SuccessfulRetrievalOfElectronicInfo",
"Circulation_Total_CountOf_PhysicalAndElectronicCirculation_And_ElectronicSuccessfulRetrieval",
"Circulation_Total_Transactions",
"CongressionalDistrict",
"CoreBasedStatisticalArea",
"CountyPopulation",
"DidLegalServiceAreaChangeInPastYear",
"ElectronicCollection_From_LocalOrOther",
"ElectronicCollection_From_LocalOrOther_ImputationFlag",
"ElectronicCollection_From_State",
"ElectronicCollection_From_State_ImputationFlag",
"ElectronicCollection_Total",
"ElectronicCollection_Total_ImputationFlag",
"FullTimePaidStaff_CountOf_Employees_WithTitleLibrarian",
"FullTimePaidStaff_CountOf_Employees_WithTitleLibrarian_ImputationFlag",
"FullTimePaidStaff_CountOf_OtherPaidStaff",
"FullTimePaidStaff_CountOf_OtherPaidStaff_ImputationFlag",
"FullTimePaidStaff_CountOf_PaidLibrarians_WithMasters",
"FullTimePaidStaff_CountOf_PaidLibrarians_WithMasters_ImputationFlag",
"FullTimePaidStaff_Total",
"FullTimePaidStaff_Total_ImputationFlag",
"Geocoding_AccuracyAndPrecision",
"GeographicNamesInformationSystemFeatureId",
"InterLibraryLoans_Amount_Given",
"InterLibraryLoans_Amount_Given_ImputationFlag",
"InterLibraryLoans_Amount_Received",
"InterLibraryLoans_Amount_Received_ImputationFlag",
"InterlibraryRelationshipCode",
"InternationalCommitteeForInfoTechStandardsStateCode_2Digit",
"InternationalCommitteeForInfoTechStandardsStateCode_3Digit",
"Latitude",
"LegalBasisCode",
"LibraryCollection_CountOf_AudioMaterials_Downloadable",
"LibraryCollection_CountOf_AudioMaterials_Downloadable_ImputationFlag",
"LibraryCollection_CountOf_AudioMaterials_Physical",
"LibraryCollection_CountOf_AudioMaterials_Physical_ImputationFlag",
"LibraryCollection_CountOf_ElectronicMaterials",
"LibraryCollection_CountOf_ElectronicMaterials_ImputationFlag",
"LibraryCollection_CountOf_PrintMaterials",
"LibraryCollection_CountOf_VideoMaterials_Downloadable",
"LibraryCollection_CountOf_VideoMaterials_Downloadable_ImputationFlag",
"LibraryCollection_CountOf_VideoMaterials_Physical",
"LibraryCollection_CountOf_VideoMaterials_Physical_ImputationFlag",
"LibraryIdCode_FromIMLS",
"LibraryIdCode_FromState",
"LibraryPrograms_Attendance_For_Children",
"LibraryPrograms_Attendance_For_Children_ImputationFlag",
"LibraryPrograms_Attendance_For_YoungAdults",
"LibraryPrograms_Attendance_For_YoungAdults_ImputationFlag",
"LibraryPrograms_Attendance_Total",
"LibraryPrograms_Attendance_Total_ImputationFlag",
"LibraryPrograms_CountOf_For_Children",
"LibraryPrograms_CountOf_For_Children_ImputationFlag",
"LibraryPrograms_CountOf_For_YoungAdults",
"LibraryPrograms_CountOf_For_YoungAdults_ImputationFlag",
"LibraryPrograms_CountOf_Total",
"LibraryPrograms_CountOf_Total_ImputationFlag",
"LibraryServices_CountOf_ReferenceTransactions",
"LibraryServices_CountOf_ReferenceTransactions_ImputationFlag",
"LibraryServices_CountOf_RegisteredUsers",
"LibraryServices_CountOf_RegisteredUsers_ImputationFlag",
"LibraryServices_CountOf_Visits",
"LibraryServices_CountOf_Visits_ImputationFlag",
"Longitute",
"MailingAddress_Address",
"MailingAddress_City",
"MailingAddress_County",
"MailingAddress_PhoneNumber",
"MailingAddress_ZipCode",
"MailingAddress_ZipCode_4Digit",
"MeetsDefinitionOfPublicLibrary",
"MetropolitanAndMicropolitcanStatisticalAreaFlag",
"Name",
"NameChangeCode",
"OperatingExpenditures_On_Collection_ElectronicMaterials",
"OperatingExpenditures_On_Collection_ElectronicMaterials_ImputationFlag",
"OperatingExpenditures_On_Collection_OtherMaterials",
"OperatingExpenditures_On_Collection_OtherMaterials_ImputationFlag",
"OperatingExpenditures_On_Collection_PrintMaterials",
"OperatingExpenditures_On_Collection_PrintMaterials_ImputationFlag",
"OperatingExpenditures_On_Collection_Total",
"OperatingExpenditures_On_Collection_Total_ImputationFlag",
"OperatingExpenditures_On_Other",
"OperatingExpenditures_On_Other_ImputationFlag",
"OperatingExpenditures_On_Staff_EmployeeBenefits",
"OperatingExpenditures_On_Staff_EmployeeBenefits_ImputationFlag",
"OperatingExpenditures_On_Staff_Total",
"OperatingExpenditures_On_Staff_Total_ImputationFlag",
"OperatingExpenditures_On_Staff_Wages",
"OperatingExpenditures_On_Staff_Wages_ImputationFlag",
"OperatingExpenditures_Total",
"OperatingExpenditures_Total_ImputationFlag",
"OperatingRevenue_From_FederalGovernment",
"OperatingRevenue_From_FederalGovernment_ImputationFlag",
"OperatingRevenue_From_LocalGovernment",
"OperatingRevenue_From_LocalGovernment_ImputationFlag",
"OperatingRevenue_From_OtherSources",
"OperatingRevenue_From_OtherSources_ImputationFlag",
"OperatingRevenue_From_StateGovernment",
"OperatingRevenue_From_StateGovernment_ImputationFlag",
"OperatingRevenue_Total",
"OperatingRevenue_Total_ImputationFlag",
"OtherElectronicInformation_CountOf_ComputersUsedByPublic",
"OtherElectronicInformation_CountOf_ComputersUsedByPublic_ImputationFlag",
"OtherElectronicInformation_CountOf_UsagesOfComputers",
"OtherElectronicInformation_CountOf_UsagesOfComputers_ImputationFlag",
"OtherElectronicInformation_CountOf_WiFiUses",
"Population_Of_LegalServiceArea",
"Population_Of_LegalServiceArea_ImputationFlag",
"Population_Of_LegalServiceArea_Unduplicated",
"PrintSerialSubscriptions_Total",
"PrintSerialSubscriptions_Total_ImputationFlag",
"PublicServiceHours_Total_PerYear",
"PublicServiceHours_Total_PerYear_ImputationFlag",
"ReportingPeriod_EndDate",
"ReportingPeriod_StartDate",
"ReportingStatus",
"ServiceOutlets_CountOf_Bookmobiles",
"ServiceOutlets_CountOf_Bookmobiles_ImputationFlag",
"ServiceOutlets_CountOf_BranchLibraries",
"ServiceOutlets_CountOf_BranchLibraries_ImputationFlag",
"ServiceOutlets_CountOf_CentralLibraries",
"ServiceOutlets_CountOf_CentralLibraries_ImputationFlag",
"State",
"StreetAddress_Address",
"StreetAddress_City",
"StreetAddress_ZipCode",
"StreetAddress_ZipCode_4Digit",
"StructureChangeCode",
"SubmissionYearOfPublicLibraryData",
"TypeOfRegionServed",
]
| StarcoderdataPython |
8118525 | <reponame>societe-generale/jaeger-client-python
# Copyright (c) 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import mock
import unittest
from jaeger_client import utils
class ConfigTests(unittest.TestCase):
def check_boolean(self, string, default, correct):
assert utils.get_boolean(string, default) == correct
def test_get_false_boolean(self):
self.check_boolean('false', 'asdf', False)
def test_get_0_boolean(self):
self.check_boolean('0', 'asdf', False)
def test_get_true_boolean(self):
self.check_boolean('true', 'qwer', True)
def test_get_1_boolean(self):
self.check_boolean('1', 'qwer', True)
def test_get_unknown_boolean(self):
self.check_boolean('zxcv', 'qwer', 'qwer')
def test_get_None_boolean(self):
self.check_boolean(None, 'qwer', False)
# def test_error_reporter_doesnt_send_metrics_if_not_configured(self):
# er = utils.ErrorReporter(False)
# er.error('foo', 1)
# assert not mock_metrics.count.called
def test_error_reporter_sends_metrics_if_configured(self):
mock_metrics = mock.MagicMock()
er = utils.ErrorReporter(mock_metrics)
er.error('foo', 1)
assert mock_metrics.count.called_with('foo', 1)
def test_error_reporter_doesnt_send_log_messages_if_before_deadline(self):
mock_logger = mock.MagicMock()
er = utils.ErrorReporter(None, logger=mock_logger, log_interval_minutes=1000)
er.error('foo', 1)
assert not mock_logger.error.called
def test_error_reporter_sends_log_messages_if_after_deadline(self):
mock_logger = mock.MagicMock()
# 0 log interval means we're always after the deadline, so always log
er = utils.ErrorReporter(None, logger=mock_logger, log_interval_minutes=0)
er._last_error_reported_at=0
er.error('foo', 1, 'error args')
assert mock_logger.error.call_args == (('foo', 1, 'error args',),)
def test_local_ip_does_not_blow_up():
import socket
import jaeger_client.utils
socket.gethostname()
with mock.patch('socket.gethostbyname',
side_effect=[IOError(), '127.0.0.1']):
jaeger_client.utils.local_ip()
| StarcoderdataPython |
8170938 | <reponame>asanoviskhak/Outtalent
class Solution:
def transpose(self, A: List[List[int]]) -> List[List[int]]:
return [list(row) for row in zip(*A)]
| StarcoderdataPython |
1875701 | <reponame>parveshkatoch/Scavenger---OSINT-Bot<gh_stars>1-10
#!/usr/bin/python
import time
import datetime
import os
from os import listdir
from os.path import isfile, join
class ScavUtility:
def __init__(self):
pass
def testifreadytoarchive(self, directory):
pastecount = len([name for name in os.listdir(directory) if os.path.isfile(os.path.join(directory, name))])
if pastecount > 48000:
return 1
else:
return 0
def archivepastes(self, dir, site):
archivefilename = str(time.time()) + ".zip"
os.system("zip -r " + site + "_" + archivefilename + " " + dir)
os.system("mv " + site + "_" + archivefilename + " archive/.")
os.system("rm " + dir + "/*")
def getthejuicythings(self, pastefolder, site):
emailPattern = os.popen("find " + pastefolder + " -type f -print | xargs grep -l -E -o \"\\b[a-zA-Z0-9.-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z0-9.-]+\\b:\"").read()
emailPattern = emailPattern.split("\n")
print emailPattern
for file in emailPattern:
if file != "":
fname = file.split("/")
fname = fname[len(fname)-1]
os.system("cp " + file + " data/files_with_passwords/" + fname + "_" + site)
def statisticsaddpoint(self):
now = datetime.datetime.now()
f = open("statistics/" + str(now.day) + "-" + str(now.month) + "-" + str(now.year), "a+")
f.write("0")
f.close()
def statisticscountpoints(self):
def takeSecond(elem):
return elem[0]
statisticset = []
statisticpath = "statistics"
statisticfiles = [f for f in listdir(statisticpath) if isfile(join(statisticpath, f))]
for statfile in statisticfiles:
f = open(statisticpath + "/" + statfile, "r")
numberoffindings = len(f.read())
statfile = statfile.replace("-", "/")
statfile = time.mktime(datetime.datetime.strptime(statfile, "%d/%m/%Y").timetuple())
fileset = [statfile, numberoffindings]
statisticset.append(fileset)
statisticset.sort(key=takeSecond)
return statisticset
| StarcoderdataPython |
188203 | #<NAME>
# Write a program that asks the user to input any positive integer and outputs the successive values of the following calculation.
# At each step calculate the next value by taking the current value and
# if it is even, divide it by two, but if it is odd, multiply it by three and add one.
# Have the program end if the current value is one.
#Version includes print text to describe number input
#Version includes print text to describe number output
#Enter positive number
pos_int = int(input("Please enter a positive integer:"))
#While Loop
#If Statement to calculate based on Odd or Even Number
while pos_int > 1:
if pos_int % 2 == 0:
print(round(pos_int),"is even so divide by two. This gives:",round(pos_int/2))
pos_int = pos_int/2
else:
print(round(pos_int),"is odd so multiply by three and add one. This gives:",round((pos_int*3)+1))
pos_int = (pos_int*3)+1
#Prints when program completes
print("We have now reached",round(pos_int),"!!!! Programme completed")
#Version with print text | StarcoderdataPython |
145767 | from synapseaware.isthmus import topological_thinning
from synapseaware.teaser import teaser
from synapseaware.connectome import wiring
prefix = 'Fib25'
label = 1
topological_thinning.TopologicalThinning(prefix, label)
teaser.TEASER(prefix, label)
wiring.GenerateSkeleton(prefix, label)
wiring.RefineSkeleton(prefix, label)
| StarcoderdataPython |
6422330 | import pytest
from tetris.grid import Point, TetrisGrid, clear_rows
locked_points0 = {
Point(0, 11): (1, 1, 1),
Point(1, 11): (1, 1, 1),
Point(2, 11): (1, 1, 1),
Point(3, 11): (1, 1, 1),
Point(4, 11): (1, 1, 1),
Point(0, 10): (1, 1, 1),
# Point(1, 10): (1, 1, 1),
Point(2, 10): (1, 1, 1),
Point(3, 10): (1, 1, 1),
Point(4, 10): (1, 1, 1),
Point(0, 9): (1, 1, 1),
Point(1, 9): (1, 1, 1),
Point(2, 9): (1, 1, 1),
Point(3, 9): (1, 1, 1),
Point(4, 9): (1, 1, 1),
}
locked_points1 = {
Point(0, 11): (1, 1, 1),
Point(1, 11): (1, 1, 1),
Point(2, 11): (1, 1, 1),
Point(3, 11): (1, 1, 1),
Point(4, 11): (1, 1, 1),
Point(0, 10): (1, 1, 1),
# Point(1, 10): (1, 1, 1),
Point(2, 10): (1, 1, 1),
Point(3, 10): (1, 1, 1),
Point(4, 10): (1, 1, 1),
Point(0, 9): (1, 1, 1),
Point(1, 9): (1, 1, 1),
Point(2, 9): (1, 1, 1),
Point(3, 9): (1, 1, 1),
Point(4, 9): (1, 1, 1),
Point(0, 8): (1, 1, 1),
Point(1, 8): (1, 1, 1),
Point(2, 8): (1, 1, 1),
Point(3, 8): (1, 1, 1),
Point(4, 8): (1, 1, 1),
Point(0, 7): (1, 1, 1),
}
locked_points2 = {
Point(0, 11): (1, 1, 1),
Point(1, 11): (1, 1, 1),
Point(2, 11): (1, 1, 1),
Point(3, 11): (1, 1, 1),
Point(4, 11): (1, 1, 1),
Point(0, 10): (1, 1, 1),
Point(1, 10): (1, 1, 1),
Point(2, 10): (1, 1, 1),
Point(3, 10): (1, 1, 1),
Point(4, 10): (1, 1, 1),
Point(0, 9): (1, 1, 1),
Point(1, 9): (1, 1, 1),
Point(2, 9): (1, 1, 1),
Point(3, 9): (1, 1, 1),
Point(4, 9): (1, 1, 1),
Point(0, 8): (1, 1, 1),
Point(1, 8): (1, 1, 1),
# Point(2, 8): (1, 1, 1),
# Point(3, 8): (1, 1, 1),
Point(4, 8): (1, 1, 1),
Point(0, 7): (1, 1, 1),
}
@pytest.mark.parametrize('locked,grid,points', [
[
locked_points0, TetrisGrid(5, 12, locked_points0), [
Point(0, 11),
Point(2, 11),
Point(3, 11),
Point(4, 11),
]
],
[
locked_points1, TetrisGrid(5, 12, locked_points1), [
Point(0, 11),
Point(2, 11),
Point(3, 11),
Point(4, 11),
Point(0, 10),
]
],
[
locked_points2, TetrisGrid(5, 12, locked_points2), [
Point(0, 11),
Point(1, 11),
Point(4, 11),
Point(0, 10),
]
],
])
def test_grid(locked, grid, points):
clear_rows(grid, locked)
print(locked)
for p in points:
assert p in locked
assert len(points) == len(locked)
| StarcoderdataPython |
3432094 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import sys
import platform
import random
import string
import importlib
#Parameters
import optparse
import configparser
import OmniDB.custom_settings
import OmniDB_app.include.OmniDatabase as OmniDatabase
import OmniDB_app.include.Spartacus.Utils as Utils
OmniDB.custom_settings.DEV_MODE = False
OmniDB.custom_settings.DESKTOP_MODE = False
parser = optparse.OptionParser(version=OmniDB.custom_settings.OMNIDB_VERSION)
group = optparse.OptionGroup(parser, "General Options")
group.add_option("-d", "--homedir", dest="homedir",
default='', type=str,
help="home directory containing config and log files")
group.add_option("-C", "--configfile", dest="conf",
default='', type=str,
help="configuration file")
group.add_option("-i", "--init", dest="init",
action="store_true",
default=False,
help="Create home directory containing config and log files")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Webserver Options")
group.add_option("-H", "--host", dest="host",
default=None, type=str,
help="listening address")
group.add_option("-p", "--port", dest="port",
default=None, type=int,
help="listening port")
group.add_option("-A", "--app", dest="app",
action="store_true",
default=False,
help=optparse.SUPPRESS_HELP)
group.add_option("-P", "--path", dest="path",
default='', type=str,
help="path to access the application, other than /")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Management Options",
"Options to list, create and drop users and connections.")
group.add_option("-M", "--migratedatabase", dest="migratedb",
nargs=1,metavar="dbfile",
help="migrate users and connections from OmniDB 2 to 3: -M dbfile")
group.add_option("-r", "--resetdatabase", dest="reset",
default=False, action="store_true",
help="reset user and session databases")
group.add_option("-j", "--jsonoutput", dest="jsonoutput",
default=False, action="store_true",
help="format list output as json")
group.add_option("-l", "--listusers", dest="listusers",
default=False, action="store_true",
help="list users")
group.add_option("-u", "--createuser", dest="createuser",
nargs=2,metavar="username password",
help="create user: -u username password")
group.add_option("-s", "--createsuperuser", dest="createsuperuser",
nargs=2,metavar="username password",
help="create super user: -s username password")
group.add_option("-x", "--dropuser", dest="dropuser",
nargs=1,metavar="username",
help="drop user: -x username")
group.add_option("-m", "--listconnections", dest="listconnections",
nargs=1,metavar="username",
help="list connections: -m username")
group.add_option("-c", "--createconnection", dest="createconnection",
nargs=8,metavar="username technology title host port database dbuser dbpassword",
help="create connection: -c username technology host port database dbuser dbpassword")
group.add_option("-z", "--dropconnection", dest="dropconnection",
nargs=1,metavar="connid",
help="drop connection: -z connid")
parser.add_option_group(group)
(options, args) = parser.parse_args()
#Generate random token if in app mode
if options.app:
OmniDB.custom_settings.DESKTOP_MODE = True
OmniDB.custom_settings.APP_TOKEN = ''.join(random.choice(string.ascii_lowercase + string.digits) for i in range(50))
app_version = True
else:
app_version = False
if options.homedir!='':
if not os.path.exists(options.homedir):
print("Home directory does not exist. Please specify a directory that exists.",flush=True)
sys.exit()
else:
OmniDB.custom_settings.HOME_DIR = options.homedir
else:
if OmniDB.custom_settings.DESKTOP_MODE:
OmniDB.custom_settings.HOME_DIR = os.path.join(os.path.expanduser('~'), '.omnidb', 'omnidb-app')
else:
OmniDB.custom_settings.HOME_DIR = os.path.join(os.path.expanduser('~'), '.omnidb', 'omnidb-server')
if not os.path.exists(OmniDB.custom_settings.HOME_DIR):
print("Creating home directory.",flush=True)
os.makedirs(OmniDB.custom_settings.HOME_DIR)
if options.conf!='':
if not os.path.exists(options.conf):
print("Config file not found. Please specify a file that exists.",flush=True)
sys.exit()
else:
config_file = options.conf
else:
config_file = os.path.join(OmniDB.custom_settings.HOME_DIR, 'config.py')
if not os.path.exists(config_file):
print("Copying config file to home directory.",flush=True)
shutil.copyfile(os.path.join(OmniDB.custom_settings.BASE_DIR, 'config.py'), config_file)
if options.init:
sys.exit()
# Loading config file
spec = importlib.util.spec_from_file_location("omnidb_settings", config_file)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
omnidb_settings = module
if options.host!=None:
listening_address = options.host
else:
if hasattr(omnidb_settings,'LISTENING_ADDRESS'):
listening_address = omnidb_settings.LISTENING_ADDRESS
else:
listening_address = '127.0.0.1'
if options.port!=None:
listening_port = options.port
else:
if hasattr(omnidb_settings,'LISTENING_PORT'):
listening_port = omnidb_settings.LISTENING_PORT
else:
listening_port = 8000
if options.path!='':
OmniDB.custom_settings.PATH = options.path
else:
if hasattr(omnidb_settings,'CUSTOM_PATH'):
OmniDB.custom_settings.PATH = omnidb_settings.CUSTOM_PATH
if hasattr(omnidb_settings,'IS_SSL'):
is_ssl = omnidb_settings.IS_SSL
if is_ssl:
OmniDB.custom_settings.SESSION_COOKIE_SECURE = True
OmniDB.custom_settings.CSRF_COOKIE_SECURE = True
else:
is_ssl = False
if hasattr(omnidb_settings,'SSL_CERTIFICATE_FILE'):
ssl_certificate_file = omnidb_settings.SSL_CERTIFICATE_FILE
if is_ssl and not os.path.exists(ssl_certificate_file):
print("Certificate file not found. Please specify a file that exists.",flush=True)
sys.exit()
else:
ssl_certificate_file = ''
if hasattr(omnidb_settings,'SSL_KEY_FILE'):
ssl_key_file = omnidb_settings.SSL_KEY_FILE
if is_ssl and not os.path.exists(ssl_key_file):
print("Key file not found. Please specify a file that exists.",flush=True)
sys.exit()
else:
ssl_key_file = ''
#importing settings after setting HOME_DIR and other required parameters
import OmniDB.settings
# Adjust OmniDB settings based on the content of the config file
for attribute, value in omnidb_settings.__dict__.items():
setattr(OmniDB.settings,attribute,value)
import logging
import logging.config
logger = logging.getLogger('OmniDB_app.Init')
import OmniDB
import OmniDB_app
import OmniDB_app.apps
os.environ['DJANGO_SETTINGS_MODULE'] = 'OmniDB.settings'
import django
from django.core.management import call_command
django.setup()
from OmniDB_app.models.main import *
from django.contrib.auth.models import User
from django.utils import timezone
import django_sass
print('''Running database migrations...''',flush=True)
logger.info('''Running Database Migrations...''')
from os import devnull
try:
call_command("migrate", interactive=False)
call_command("clearsessions")
except Exception as exc:
print(str(exc),flush=True)
logger.error(str(exc))
sys.exit()
# Migration from 2 to 3 ########################################################
from omnidb_server_helper import *
old_db_file = dbfile = os.path.expanduser(os.path.join(OmniDB.custom_settings.HOME_DIR,'omnidb.db'))
# SQlite database file exists, proceed with migration
if os.path.exists(old_db_file) and not options.migratedb:
migration_main(old_db_file, False, logger)
################################################################################
maintenance_action = False
def create_user(p_user,p_pwd,p_superuser):
User.objects.create_user(username=p_user,
password=<PASSWORD>,
email='',
last_login=timezone.now(),
is_superuser=p_superuser,
first_name='',
last_name='',
is_staff=False,
is_active=True,
date_joined=timezone.now())
if options.reset:
maintenance_action = True
print('*** ATENTION *** ALL USERS DATA WILL BE LOST')
try:
value = input('Would you like to continue? (y/n) ')
if value.lower()=='y':
# Removing users
User.objects.all().delete()
# Create default admin user
create_user('admin', 'admin', True)
except Exception as exc:
print('Error:')
print(exc)
if options.listusers:
from OmniDB_app.include.Spartacus.Database import DataTable
table = DataTable()
table.AddColumn('id')
table.AddColumn('username')
table.AddColumn('superuser')
maintenance_action = True
users = User.objects.all()
for user in users:
table.AddRow([user.id,user.username,user.is_superuser])
if options.jsonoutput:
print(table.Jsonify())
else:
print(table.Pretty())
if options.createuser:
maintenance_action = True
create_user(options.createuser[0], options.createuser[1], False)
if options.createsuperuser:
maintenance_action = True
create_user(options.createsuperuser[0], options.createsuperuser[1], True)
if options.dropuser:
maintenance_action = True
User.objects.get(username=options.dropuser).delete()
if options.listconnections:
maintenance_action = True
from OmniDB_app.include.Spartacus.Database import DataTable
table = DataTable()
table.AddColumn('id')
table.AddColumn('technology')
table.AddColumn('alias')
table.AddColumn('connstring')
table.AddColumn('host')
table.AddColumn('port')
table.AddColumn('database')
table.AddColumn('user')
table.AddColumn('tunnel enabled')
table.AddColumn('tunnel server')
table.AddColumn('tunnel port')
table.AddColumn('tunnel user')
maintenance_action = True
for conn in Connection.objects.filter(user=User.objects.get(username=options.listconnections)):
table.AddRow(
[
conn.id,
conn.technology.name,
conn.alias,
conn.conn_string,
conn.server,
conn.port,
conn.database,
conn.username,
conn.use_tunnel,
conn.ssh_server,
conn.ssh_port,
conn.ssh_user
]
)
if options.jsonoutput:
print(table.Jsonify())
else:
print(table.Pretty())
if options.createconnection:
maintenance_action = True
connection = Connection(
user=User.objects.get(username=options.createconnection[0]),
technology=Technology.objects.get(name=options.createconnection[1]),
server=options.createconnection[3],
port=options.createconnection[4],
database=options.createconnection[5],
username=options.createconnection[6],
password=options.createconnection[7],
alias=options.createconnection[2],
ssh_server='',
ssh_port='',
ssh_user='',
ssh_password='',
ssh_key='',
use_tunnel=False,
conn_string='',
)
connection.save()
if options.dropconnection:
maintenance_action = True
Connection.objects.get(id=options.dropconnection).delete()
if options.migratedb:
maintenance_action = True
dbfile = os.path.expanduser(options.migratedb)
if not os.path.exists(dbfile):
print('Specified database file does not exist, aborting.')
sys.exit()
else:
migration_main(dbfile, True, logger)
# Maintenance performed, exit before starting webserver
if maintenance_action == True:
sys.exit()
# This line was reached, so not a maintenance run, lock HOME DIR if not on Windows
if platform.system() != 'Windows':
import fcntl
try:
lockfile_pointer = os.open(OmniDB.custom_settings.HOME_DIR, os.O_RDONLY)
fcntl.flock(lockfile_pointer, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception as exc:
print("OmniDB is already running pointing to directoy '{0}'.".format(OmniDB.custom_settings.HOME_DIR))
sys.exit()
import social_django
import social_django.urls
import social_django.config
import social_django.strategy
import social_django.models
import social_core.backends.github
#if platform.system() != 'Darwin':
# import ldap
# import django_auth_ldap
# import django_auth_ldap.config
# import django_auth_ldap.backend
import html.parser
import http.cookies
import django.template.defaulttags
import django.template.loader_tags
import django.contrib.staticfiles
import django.contrib.staticfiles.apps
import django.contrib.admin.apps
import django.contrib.auth.apps
import django.contrib.contenttypes.apps
import django.contrib.sessions.apps
import django.contrib.messages.apps
import OmniDB_app.urls
import django.contrib.messages.middleware
import django.contrib.auth.middleware
import django.contrib.sessions.middleware
import django.contrib.sessions.serializers
import django.template.loaders
import django.contrib.auth.context_processors
import django.contrib.messages.context_processors
import django.views.defaults
import django.contrib.auth.password_validation
from django.core.handlers.wsgi import WSGIHandler
from OmniDB import startup
import time
import cherrypy
from django.contrib.sessions.backends.db import SessionStore
import socket
import random
import urllib.request
def check_port(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1)
try:
s.bind(("127.0.0.1", port))
except socket.error as e:
print(str(e))
return False
s.close()
return True
class DjangoApplication(object):
def mount_static(self, url, root):
config = {
'tools.staticdir.on': True,
'tools.staticdir.dir': root,
'tools.expires.on': True,
'tools.expires.secs': 86400
}
cherrypy.tree.mount(None, url, {'/': config})
def run(self,parameters):
#cherrypy.engine.unsubscribe('graceful', cherrypy.log.reopen_files)
logging.config.dictConfig(OmniDB.settings.LOGGING)
#cherrypy.log.error_log.propagate = False
cherrypy.log.access_log.propagate = False
self.mount_static(OmniDB.settings.STATIC_URL, OmniDB.settings.STATIC_ROOT)
cherrypy.tree.graft(WSGIHandler())
port = parameters['listening_port']
num_attempts = 0
print('''Checking port availability...''',flush=True)
logger.info('''Checking port availability...''')
while not check_port(port):
print("Port {0} is busy, trying another port...".format(port),flush=True)
logger.info("Port {0} is busy, trying another port...".format(port))
port = random.randint(1025,32676)
num_attempts = num_attempts + 1
if num_attempts == 20:
break
if num_attempts < 20:
v_cherrypy_config = {
'server.socket_host': parameters['listening_address'],
'server.socket_port': port,
'engine.autoreload_on': False,
'log.screen': False,
'log.access_file': '',
'log.error_file': ''
}
if parameters['is_ssl']:
import ssl
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.options |= ssl.OP_NO_TLSv1
ssl_ctx.options |= ssl.OP_NO_TLSv1_1
ssl_ctx.load_cert_chain(parameters['ssl_certificate_file'],
parameters['ssl_key_file'])
v_cherrypy_config['server.ssl_module'] = 'builtin'
v_cherrypy_config['server.ssl_certificate'] = parameters['ssl_certificate_file']
v_cherrypy_config['server.ssl_private_key'] = parameters['ssl_key_file']
v_cherrypy_config['server.ssl_context'] = ssl_ctx
cherrypy.config.update({
'global': {
'engine.autoreload.on': False
}
})
cherrypy.config.update(v_cherrypy_config)
print ("Starting server {0} at {1}:{2}{3}.".format(OmniDB.settings.OMNIDB_VERSION,parameters['listening_address'],str(port),OmniDB.settings.PATH),flush=True)
logger.info("Starting server {0} at {1}:{2}.".format(OmniDB.settings.OMNIDB_VERSION,parameters['listening_address'],str(port)))
# Startup
startup.startup_procedure()
cherrypy.engine.start()
if not app_version:
print ("Open OmniDB in your favorite browser",flush=True)
if platform.system() != 'Windows':
print ("Press Ctrl+C to exit",flush=True)
else:
#Sending response to electron app
print ("http://localhost:{0}/omnidb_login/?user=admin&pwd=<PASSWORD>&token={1}".format(str(port),OmniDB.custom_settings.APP_TOKEN),flush=True)
cherrypy.engine.block()
cherrypy.engine.exit()
else:
print('Tried 20 different ports without success, closing...',flush=True)
logger.info('Tried 20 different ports without success, closing...')
print('''Starting OmniDB server...''',flush=True)
logger.info('''Starting OmniDB server...''')
#Removing Expired Sessions
SessionStore.clear_expired()
try:
DjangoApplication().run(
{
'listening_address' : listening_address,
'listening_port' : listening_port,
'is_ssl' : is_ssl,
'ssl_certificate_file': ssl_certificate_file,
'ssl_key_file' : ssl_key_file
}
)
except KeyboardInterrupt:
cherrypy.engine.exit()
print("")
print("Bye!")
| StarcoderdataPython |
1958504 | <gh_stars>1-10
from mojo.events import publishEvent
if __name__ == "__main__":
publishEvent(
"AutoInstaller.AddExternalFonts"
)
| StarcoderdataPython |
3588564 | <gh_stars>0
def divide_range(Ori_img_W, Ori_img_H, section_num, mode=0):
'''
:param Ori_img_W:
:param Ori_img_H:
:param section_num: divide the weight/height to (%s section_num) parts.
:param mode: mode=0 to divide weight, mode=1 to divide height
:return:
'''
if mode==0:
section = []
U_rate = 0.2
# n-1 incomplete(1-U_rate) plus a complete one
part_img_w = int(Ori_img_W / ((section_num - 1) * (1 - U_rate) + 1))
w_range = []
for i in range(section_num):
part_ = [0, Ori_img_H, int(i * part_img_w * (1 - U_rate)), int(i * part_img_w * (1 - U_rate)) + part_img_w]
if i == section_num - 1:
part_[3] = Ori_img_W
section.append(part_)
w_range.append(part_[2])
w_range.append(part_[3])
box_range = []
middle = []
w_range.sort()
middle.append(w_range[0])
for i in range(1, len(w_range) - 1, 2):
w_ = int((w_range[i] + w_range[i + 1]) / 2)
middle.append(w_)
middle.append(w_range[-1])
for i in range(section_num):
box_range.append([middle[i], middle[i + 1]])
return section,box_range,part_img_w
else:
section = []
U_rate = 0.4
part_img_h = int(Ori_img_H / ((section_num - 1) * (1 - U_rate) + 1))
h_range = []
for i in range(section_num):
part_ = [int(i * part_img_h * (1 - U_rate)), int(i * part_img_h * (1 - U_rate)) + part_img_h, 0, Ori_img_W]
if i == section_num - 1:
part_[1] = Ori_img_H
section.append(part_)
h_range.append(part_[0])
h_range.append(part_[1])
box_range = []
middle = []
h_range.sort()
middle.append(h_range[0])
for i in range(1, len(h_range) - 1, 2):
h_ = int((h_range[i] + h_range[i + 1]) / 2)
middle.append(h_)
middle.append(h_range[-1])
for i in range(section_num):
box_range.append([middle[i], middle[i + 1]])
return section, box_range,part_img_h | StarcoderdataPython |
4996347 | <reponame>georgetown-analytics/DC-Bikeshare<gh_stars>10-100
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import time
import os
import matplotlib.pyplot as plt
import sys
TIMESTR = time.strftime("%Y%m%d_%H%M%S")
def open_drive():
gauth = GoogleAuth()
gauth.LocalWebserverAuth()
drive = GoogleDrive(gauth)
return drive
def push_to_drive(custom_title, location, drive, link):
file1 = drive.CreateFile(
{'title': custom_title, "parents": [{
"kind": "drive#fileLink", "id": link
}]})
file1.SetContentFile(location)
file1.Upload()
def all_in_one_save(title, path, drive, link):
if not os.path.exists(path):
os.makedirs(path)
filepath = path + '_' + title + '_' + TIMESTR + '.png'
plt.savefig(filepath)
push_to_drive(title, filepath, drive, link)
| StarcoderdataPython |
3533413 | <filename>calendareshop/shopping/views.py
# -*- coding: utf-8 -*-
import json
import datetime
from collections import defaultdict
from django import forms
from django.db.models import Sum
from django.contrib import auth, messages
from django.contrib.admin.views.decorators import staff_member_required
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import get_object_or_404, redirect, render_to_response, render
from django.template import RequestContext, loader, Context
from django.utils.translation import ugettext as _
from django.views import generic
from django.conf import settings
from django.conf.urls import url, patterns
from plata.contact.models import Contact
from plata.discount.models import Discount
from plata.shop.views import Shop, checkout_process_decorator, cart_not_empty, \
order_already_confirmed, order_cart_validates
from .models import Product, CustomOrder, ShippingPayment, Payment, Shipping, email_hash
from .forms import CustomCheckoutForm, CustomConfirmationForm, ShippingPaymentForm
from calendareshop.utils import get_currency_code
class CalendarShop(Shop):
shipping_payment_template = "plata/shop_shipping_payment.html"
def default_currency(self, request=None):
"""
Return the default currency for instantiating new orders
Override this with your own implementation if you have a
multi-currency shop with auto-detection of currencies.
"""
return get_currency_code(request)
def order_from_request(self, request, create=False):
order = super(CalendarShop, self).order_from_request(request, create)
if order:
# set custom order currency
order.currency = get_currency_code(request)
order.save()
return order
def get_context(self, request, context, **kwargs):
ctx = super(CalendarShop, self).get_context(request, context, **kwargs)
ctx.update({
'is_preorder': settings.PREORDER_END > datetime.date.today(),
})
return ctx
def get_urls(self):
return super(CalendarShop, self).get_urls() + patterns('', self.get_shipping_payment_url())
def get_shipping_payment_url(self):
return url(r'^shipping-payment/$', checkout_process_decorator(
cart_not_empty, order_already_confirmed, order_cart_validates,
)(self.shipping_payment), name='plata_shop_shipping_payment')
def checkout_form(self, request, order):
return CustomCheckoutForm
def checkout(self, request, order):
"""Handles the first step of the checkout process"""
if not request.user.is_authenticated():
if request.method == 'POST' and '_login' in request.POST:
loginform = self.get_authentication_form(
data=request.POST,
prefix='login')
if loginform.is_valid():
user = loginform.get_user()
auth.login(request, user)
order.user = user
order.save()
return HttpResponseRedirect('.')
else:
loginform = self.get_authentication_form(prefix='login')
else:
loginform = None
if order.status < order.CHECKOUT:
order.update_status(order.CHECKOUT, 'Checkout process started')
OrderForm = self.checkout_form(request, order)
orderform_kwargs = {
'prefix': 'order',
'instance': order,
'request': request,
'shop': self,
}
if request.method == 'POST' and '_checkout' in request.POST:
orderform = OrderForm(request.POST, **orderform_kwargs)
if orderform.is_valid():
orderform.instance.personal_information_consent_date = datetime.datetime.now()
orderform.save()
return self.redirect('plata_shop_shipping_payment')
else:
orderform = OrderForm(**orderform_kwargs)
return self.render_checkout(request, {
'order': order,
'loginform': loginform,
'orderform': orderform,
'progress': 'checkout',
})
def render_checkout(self, request, context):
"""Renders the checkout page"""
#context = self.get_context(request, context)
return self.render(
request,
self.checkout_template,
context
)
def shipping_payment(self, request, order):
"""
Handles the order shipping and payment module selection step
Hands off processing to confirmation
"""
kwargs = {
'order': order,
'request': request,
'shop': self,
}
if request.method == 'POST':
form = ShippingPaymentForm(request.POST, **kwargs)
if form.is_valid():
form.save()
return self.redirect('plata_shop_confirmation')
else:
form = ShippingPaymentForm(**kwargs)
if order.status <= order.CHECKOUT:
order.update_status(order.CHECKOUT, 'Shipping & Payment process started')
selected_shipping = int(request.POST.get('shipping_type', order.shipping_type.pk if order.shipping_type else 0))
selected_payment = int(request.POST.get('payment_type', order.payment_type.pk if order.payment_type else 0))
# prepare dict with combinations of Shipping - Payment for use in template
shipping_payment = defaultdict(dict)
for ship_pay in ShippingPayment.objects.select_related('shipping', 'payment'):
# if pre-order, do not use cash payment
if settings.PREORDER_END > datetime.date.today() and ship_pay.payment.module == 'cash':
continue
shipping_payment[ship_pay.shipping.id]['price'] = ship_pay.shipping.get_shipping_price(
quantity=order.total_quantity,
currency=order.currency,
country_code=order.billing_country
)
shipping_payment[ship_pay.shipping.id].setdefault('payment', [])
shipping_payment[ship_pay.shipping.id]['payment'].append(ship_pay.payment.id)
return self.render_shipping_payment(request, {
'order': order,
'form': form,
'shipping_payment_json': json.dumps(shipping_payment),
'shipping_payment': dict(shipping_payment),
'payment_payment': {p.pk: (p.get_payment_price(order), p.module) for p in Payment.objects.active()},
'all_payment_ids_json': json.dumps(list(Payment.objects.active().values_list('id', flat=True))),
'progress': 'shipping_payment',
'selected_shipping': selected_shipping,
'selected_payment': selected_payment,
})
def render_shipping_payment(self, request, context):
"""Renders the shipping_payment page"""
return self.render(
request,
self.shipping_payment_template,
self.get_context(request, context)
)
def confirmation_form(self, request, order):
return CustomConfirmationForm
shop = CalendarShop(Contact, CustomOrder, Discount)
product_list = generic.ListView.as_view(
queryset=Product.objects.filter(is_active=True),
template_name='product/product_list.html',
)
class OrderItemForm(forms.Form):
quantity = forms.IntegerField(
label=_('quantity'),
initial=1,
min_value=1,
max_value=100
)
def product_detail(request, object_id):
product = get_object_or_404(Product.objects.filter(is_active=True), pk=object_id)
if request.method == 'POST':
form = OrderItemForm(request.POST)
if form.is_valid():
order = shop.order_from_request(request, create=True)
try:
order.modify_item(product, form.cleaned_data.get('quantity'))
messages.success(request, _('The cart has been updated.'))
except ValidationError, e:
if e.code == 'order_sealed':
[messages.error(request, msg) for msg in e.messages]
else:
messages.error(request, _('Order contains more than one currency.'))
return redirect('plata_shop_cart')
else:
form = OrderItemForm()
return render_to_response('product/product_detail.html', {
'object': product,
'form': form,
}, context_instance=RequestContext(request))
@staff_member_required
def email_test(request, order_id, template):
order = get_object_or_404(CustomOrder, pk=order_id)
# overrides
shipping = request.GET.get('shipping', None)
if shipping:
order.shipping_type = get_object_or_404(Shipping, code=shipping)
payment = request.GET.get('payment', None)
if payment:
order.payment_type = get_object_or_404(Payment, module=payment)
t = loader.get_template('plata/notifications/%s.html' % template)
c = Context({
'order': order,
'bank_attrs': settings.PAYMENT_BANK_ATTRS,
'is_preorder': settings.PREORDER_END > datetime.date.today(),
})
rendered = t.render(c)
html = u"\n".join(rendered.splitlines()[2:])
return HttpResponse(html, content_type="text/html")
@staff_member_required
def order_report(request):
products = []
for p in Product.objects.all():
paid_count = p.orderitem_set.filter(order__status=CustomOrder.PAID).aggregate(Sum('quantity'))['quantity__sum'] or 0
not_paid_count = p.orderitem_set.filter(order__status=CustomOrder.CONFIRMED).aggregate(Sum('quantity'))['quantity__sum'] or 0
done_count = p.orderitem_set.filter(order__status=CustomOrder.COMPLETED).aggregate(Sum('quantity'))['quantity__sum'] or 0
products.append({
'product': p,
'paid_count': paid_count,
'not_paid_count': not_paid_count,
'done_count': done_count,
'total': paid_count + not_paid_count + done_count,
})
return render_to_response(
"order_report.html",
{'products': products},
context_instance=RequestContext(request))
def gdpr_consent(request):
email = request.GET.get('email', None)
hash = request.GET.get('hash', None)
if email and hash and hash == email_hash(email):
# is authorized
for order in CustomOrder.objects.filter(email=email):
order.personal_information_consent = True
order.personal_information_consent_years = settings.PERSONAL_INFORMATION_CONSENT_YEARS
order.personal_information_consent_date = datetime.datetime.now()
order.save(update_fields=['personal_information_consent', 'personal_information_consent_date', 'personal_information_consent_years'])
messages.success(request, "Souhlas se zpracováním osobních údajů byl úspěšně uložen, děkujeme.")
else:
# NOT authorized
messages.error(request, "Nemáte oprávnění uskutečnit tuto akci.")
return HttpResponseRedirect(reverse('project_index'))
| StarcoderdataPython |
11370961 | <filename>hooks/pre_gen_project.py
import re
import sys
name = '{{ cookiecutter.package_name }}'
if not re.match(r'^[_a-zA-Z][_a-zA-Z0-9]+$', name):
print('ERROR: Not a valid python package name: %s\n'
' Use \'_\' instead of \'-\' and start with a letter.' % name)
sys.exit(1)
| StarcoderdataPython |
9694066 | """
Translator method
"""
import json
import os
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from dotenv import load_dotenv
load_dotenv()
apikey = os.environ['apikey']
url = os.environ['url']
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
def english_to_french(english_text):
"""Translate text from english to french"""
if english_text is not None:
french_text = language_translator.translate(
text=english_text,
source='en',
target='fr',
).get_result()
return french_text['translations'][0]['translation']
return False
def french_to_english(french_text):
""" Translate text from french to english """
if french_text is not None:
english_text = language_translator.translate(
text=french_text,
source='fr',
target='en',
).get_result()
return english_text['translations'][0]['translation']
return False
| StarcoderdataPython |
3463895 | from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER, CONFIG_DISPATCHER, HANDSHAKE_DISPATCHER
from ryu.controller.handler import set_ev_cls
import ryu.ofproto.ofproto_v1_3 as ofproto
import ryu.ofproto.ofproto_v1_3_parser as ofparser
import ryu.ofproto.openstate_v1_0 as osproto
import ryu.ofproto.openstate_v1_0_parser as osparser
from ryu.lib.packet import packet
from ryu.topology import event
from pprint import pprint
import logging
from sets import Set
import time
import sys,os
sys.path.append(os.path.abspath("/home/mininet/spider/src"))
import SPIDER_parser as f_t_parser
from ryu.lib import hub
from datetime import datetime
from time import sleep
import random
class OpenStateFaultTolerance(app_manager.RyuApp):
OFP_VERSIONS = [ofproto.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(OpenStateFaultTolerance, self).__init__(*args, **kwargs)
delta_6 = float(os.environ['delta_6'])
delta_7 = float(os.environ['delta_7'])
delta_5 = float(os.environ['delta_5'])
f_t_parser.detection_timeouts_list = [(delta_6,delta_7,delta_5)]
self.REALIZATIONS_NUM = int(os.environ['REALIZATIONS_NUM'])
results_hash = f_t_parser.md5sum_results()
if f_t_parser.network_has_changed(results_hash):
f_t_parser.erase_figs_folder()
(self.requests,self.faults) = f_t_parser.parse_ampl_results_if_not_cached()
print len(self.requests), 'requests loaded'
print len(self.faults), 'faults loaded'
print "Building network graph from network.xml..."
# G is a NetworkX Graph object
(self.G, self.pos, self.hosts, self.switches, self.mapping) = f_t_parser.parse_network_xml()
print 'Network has', len(self.switches), 'switches,', self.G.number_of_edges()-len(self.hosts), 'links and', len(self.hosts), 'hosts'
print "NetworkX to Mininet topology conversion..."
# mn_topo is a Mininet Topo object
self.mn_topo = f_t_parser.networkx_to_mininet_topo(self.G, self.hosts, self.switches, self.mapping)
# mn_net is a Mininet object
self.mn_net = f_t_parser.create_mininet_net(self.mn_topo)
f_t_parser.launch_mininet(self.mn_net)
self.ports_dict = f_t_parser.adapt_mn_topo_ports_to_old_API(self.mn_topo.ports)
f_t_parser.mn_setup_MAC_and_IP(self.mn_net)
f_t_parser.mn_setup_static_ARP_entries(self.mn_net)
f_t_parser.draw_network_topology(self.G,self.pos,self.ports_dict,self.hosts)
(self.fault_ID, self.flow_entries_dict, self.flow_entries_with_timeout_dict, self.flow_entries_with_burst_dict) = f_t_parser.generate_flow_entries_dict(self.requests,self.faults,self.ports_dict,match_flow=f_t_parser.get_mac_match_mininet,check_cache=False)
# Associates dp_id to datapath object
self.dp_dictionary=dict()
# Associates dp_id to a dict associating port<->MAC address
self.ports_mac_dict=dict()
# Needed by fault_tolerance_rest
self.f_t_parser = f_t_parser
# switch counter
self.switch_count = 0
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
self.ports_mac_dict[datapath.id] = dict()
self.send_features_request(datapath)
self.send_port_desc_stats_request(datapath)
self.configure_stateful_stages(datapath)
self.install_flows(datapath)
self.dp_dictionary[datapath.id] = datapath
def install_flows(self,datapath):
print("Configuring flow table for switch %d" % datapath.id)
if datapath.id in self.flow_entries_dict.keys():
for table_id in self.flow_entries_dict[datapath.id]:
for match in self.flow_entries_dict[datapath.id][table_id]:
mod = ofparser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=table_id,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=self.flow_entries_dict[datapath.id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
flags=0, match=match, instructions=self.flow_entries_dict[datapath.id][table_id][match]['inst'])
datapath.send_msg(mod)
self.switch_count += 1
if self.switch_count == self.G.number_of_nodes():
self.monitor_thread = hub.spawn(self._monitor,datapath)
def send_features_request(self, datapath):
req = ofparser.OFPFeaturesRequest(datapath)
datapath.send_msg(req)
def configure_stateful_stages(self, datapath):
node_dict = f_t_parser.create_node_dict(self.ports_dict,self.requests)
self.send_table_mod(datapath, table_id=2)
self.send_key_lookup(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST])
self.send_key_update(datapath, table_id=2, fields=[ofproto.OXM_OF_ETH_SRC,ofproto.OXM_OF_ETH_DST])
self.send_table_mod(datapath, table_id=3)
self.send_key_lookup(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA])
self.send_key_update(datapath, table_id=3, fields=[ofproto.OXM_OF_METADATA])
def configure_global_states(self, datapath):
for port in self.ports_mac_dict[datapath.id]:
if port!=ofproto.OFPP_LOCAL:
(global_state, global_state_mask) = osparser.masked_global_state_from_str("1",port-1)
msg = osparser.OFPExpSetGlobalState(datapath=datapath, global_state=global_state, global_state_mask=global_state_mask)
datapath.send_msg(msg)
def send_table_mod(self, datapath, table_id, stateful=1):
req = osparser.OFPExpMsgConfigureStatefulTable(datapath=datapath, table_id=table_id, stateful=stateful)
datapath.send_msg(req)
def send_key_lookup(self, datapath, table_id, fields):
key_lookup_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_L_EXTRACTOR, fields=fields, table_id=table_id)
datapath.send_msg(key_lookup_extractor)
def send_key_update(self, datapath, table_id, fields):
key_update_extractor = osparser.OFPExpMsgKeyExtract(datapath=datapath, command=osproto.OFPSC_EXP_SET_U_EXTRACTOR, fields=fields, table_id=table_id)
datapath.send_msg(key_update_extractor)
def set_link_down(self,node1,node2):
if(node1 > node2):
node1,node2 = node2,node1
os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' down')
os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' down')
def set_link_up(self,node1,node2):
if(node1 > node2):
node1,node2 = node2,node1
os.system('sudo ifconfig s'+str(node1)+'-eth'+str(self.ports_dict['s'+str(node1)]['s'+str(node2)])+' up')
os.system('sudo ifconfig s'+str(node2)+'-eth'+str(self.ports_dict['s'+str(node2)]['s'+str(node1)])+' up')
def send_port_desc_stats_request(self, datapath):
req = ofparser.OFPPortDescStatsRequest(datapath, 0)
datapath.send_msg(req)
@set_ev_cls(ofp_event.EventOFPPortDescStatsReply, MAIN_DISPATCHER)
def port_desc_stats_reply_handler(self, ev):
# store the association port<->MAC address
for p in ev.msg.body:
self.ports_mac_dict[ev.msg.datapath.id][p.port_no]=p.hw_addr
self.configure_global_states(ev.msg.datapath)
@set_ev_cls(ofp_event.EventOFPExperimenterStatsReply, MAIN_DISPATCHER)
def state_stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if ev.msg.body.exp_type==0:
# EXP_STATE_STATS
stats = osparser.OFPStateStats.parser(ev.msg.body.data, offset=0)
for stat in stats:
if stat.entry.key != []:
msg = osparser.OFPExpMsgSetFlowState(
datapath=dp, state=0, keys=stat.entry.key, table_id=stat.table_id)
dp.send_msg(msg)
elif ev.msg.body.exp_type==1:
stat = osparser.OFPGlobalStateStats.parser(ev.msg.body.data, offset=0)
msg = osparser.OFPExpResetGlobalState(datapath=dp)
dp.send_msg(msg)
self.configure_global_states(dp)
def timeout_probe(self,timeout):
f_t_parser.selected_timeout = timeout
for datapath_id in self.flow_entries_with_timeout_dict[timeout]:
for table_id in self.flow_entries_with_timeout_dict[timeout][datapath_id]:
for match in self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id]:
mod = ofparser.OFPFlowMod(
datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id,
command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0,
priority=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
flags=0, match=match, instructions=self.flow_entries_with_timeout_dict[timeout][datapath_id][table_id][match]['inst'])
self.dp_dictionary[datapath_id].send_msg(mod)
def timeout_burst(self,burst):
f_t_parser.selected_burst = burst
for datapath_id in self.flow_entries_with_burst_dict[burst]:
for table_id in self.flow_entries_with_burst_dict[burst][datapath_id]:
for match in self.flow_entries_with_burst_dict[burst][datapath_id][table_id]:
mod = ofparser.OFPFlowMod(
datapath=self.dp_dictionary[datapath_id], cookie=0, cookie_mask=0, table_id=table_id,
command=ofproto.OFPFC_MODIFY, idle_timeout=0, hard_timeout=0,
priority=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['priority'], buffer_id=ofproto.OFP_NO_BUFFER,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
flags=0, match=match, instructions=self.flow_entries_with_burst_dict[burst][datapath_id][table_id][match]['inst'])
self.dp_dictionary[datapath_id].send_msg(mod)
def send_state_stats_request(self):
for datapath_id in self.dp_dictionary:
req = osparser.OFPExpStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id])
self.dp_dictionary[datapath_id].send_msg(req)
def send_global_state_stats_request(self):
for datapath_id in self.dp_dictionary:
req = osparser.OFPExpGlobalStateStatsMultipartRequest(datapath=self.dp_dictionary[datapath_id])
self.dp_dictionary[datapath_id].send_msg(req)
def _monitor(self,datapath):
hub.sleep(5)
print("Network is ready")
hub.sleep(5)
req_per_fault = {}
for f in self.faults:
req_per_fault[f]=len(self.faults[f]['requests'])
worst_fault=max(req_per_fault.iterkeys(), key=lambda k: req_per_fault[k])
#worst_fault=(7,8)
fw_back_path_len_per_req = {}
for r in self.faults[worst_fault]['requests']:
if self.faults[worst_fault]['requests'][r]['fw_back_path']!=None:
fw_back_path_len_per_req[r]=len(self.faults[worst_fault]['requests'][r]['fw_back_path'])
else:
fw_back_path_len_per_req[r]=0
# requests passing from worst_link sorted by fw_back_path_len in decreasing order
sorted_req=sorted(fw_back_path_len_per_req,key=fw_back_path_len_per_req.__getitem__,reverse=True)
i=0
for sim_num in range(self.REALIZATIONS_NUM):
print('\n\x1B[32mSTARTING REALIZATION '+str(i+1)+"/"+str(self.REALIZATIONS_NUM)+'\n\x1B[0m')
count=0
for req in sorted_req:
count+=1
print('h'+str(req[0])+'# ping -i '+str(os.environ['interarrival'])+' '+self.mn_net['h'+str(req[1])].IP()+'&')
self.mn_net['h'+str(req[0])].cmd('ping -i '+str(os.environ['interarrival'])+' '+self.mn_net['h'+str(req[1])].IP()+'> ~/ping_SPIDER.'+str(req[0])+'.'+str(req[1])+'.sim'+str(i)+'.txt &')
if count==int(os.environ['N']):
break
if os.environ['ENABLE_FAULT']=='yes':
hub.sleep(int(os.environ['LINK_DOWN']))
print("LINK DOWN "+str(worst_fault))
self.set_link_down(worst_fault[0],worst_fault[1])
hub.sleep(int(os.environ['LINK_UP']))
print("LINK UP "+str(worst_fault))
os.system("sudo kill -SIGINT `pidof ping`")
self.set_link_up(worst_fault[0],worst_fault[1])
self.send_state_stats_request()
self.send_global_state_stats_request()
hub.sleep(int(os.environ['LINK_UP']))
i+=1
os.system("chown mininet:mininet ~/ping_SPIDER.*")
os.system("kill -9 $(pidof -x ryu-manager) 2> /dev/null")
| StarcoderdataPython |
5192136 | from dogapi.common import is_p3k
__all__ = [
'SnapshotApi',
]
if is_p3k():
from urllib.parse import urlparse
else:
from urlparse import urlparse
class SnapshotApi(object):
def graph_snapshot(self, metric_query, start, end, event_query=None):
"""
Take a snapshot of a graph, returning the full url to the snapshot.
Values for `start` and `end` are given in seconds since the epoch.
An optional event query can be provided to overlay events on the graph.
>>> end = int(time.time())
>>> start = end - 60 * 60
>>> dog_http_api.snapshot("system.load.1{*}", start, end)
"""
query_params = {
'metric_query': metric_query,
'start': start,
'end': end
}
if event_query:
query_params['event_query'] = event_query
return self.http_request('GET', '/graph/snapshot', **query_params)
def graph_snapshot_from_def(self, graph_def, start, end):
"""
Take a snapshot of a graph from a graph definition, returning the
full url to the snapshot. Values for `start` and `end` are given in
seconds since the epoch.
>>> end = int(time.time())
>>> start = end - 60 * 60
>>> graph_def = json.dumps({
"requests": [{
"q": "system.load.1{*}"
}, {
"q": "system.load.5{*}"
}],
"viz": "timeseries",
"events": [{
"q": "*"
}]
})
>>> dog_http_api.snapshot(graph_def, start, end)
"""
query_params = {
'graph_def': graph_def,
'start': start,
'end': end
}
return self.http_request('GET', '/graph/snapshot', **query_params)
def snapshot_status(self, snapshot_url):
"""
Returns the status code of snapshot. Can be used to know when the
snapshot is ready for download.
Example usage:
>> snap = dog_http_api.snapshot(metric_query, start, end)
>> snapshot_url = snap['snapshot_url']
>> while snapshot_status(snapshot_url) != 200:
>> time.sleep(1)
>> img = urllib.urlopen(snapshot_url)
"""
snap_path = urlparse(snapshot_url).path
snap_path = snap_path.split('/snapshot/view/')[1].split('.png')[0]
snapshot_status_url = '/graph/snapshot_status/{0}'.format(snap_path)
get_status_code = lambda x: int(x['status_code'])
return self.http_request('GET', snapshot_status_url,
response_formatter=get_status_code) | StarcoderdataPython |
1649850 | <gh_stars>10-100
import os
import glob
import argparse
import re
import json
def get_goal_files(root_dir, ext = "*.gc"):
"""Get all GOAL source files under root_dir."""
return [goal_file for file in os.walk(root_dir) for goal_file in glob.glob(os.path.join(file[0], ext))]
def get_sgs(goal_file):
"""Get a list of all the skel groups defined in the file, excluding the -sg and *'s."""
with open(goal_file, "r") as f:
text = f.read()
# given "(defskelgroup *foo* bar", will match "bar"
matches = re.findall(r'\(defskelgroup \*[\w-]+-sg\* ([\w-]+)', text)
return matches
def main():
parser = argparse.ArgumentParser()
parser.add_argument(dest='goal_src', help='the goal_src folder')
args = parser.parse_args()
all_import_files = get_goal_files(os.path.join(args.goal_src, "import"))
all_files = get_goal_files(args.goal_src)
all_non_import_files = list(set(all_files) - set(all_import_files))
output_json = {}
to_modify = {}
import_map = {}
for import_file in all_import_files:
base = os.path.basename(import_file)
# make sure extention is -ag.gc
assert base[-6:] == "-ag.gc"
base_no_extension = base[:-6]
import_map[base_no_extension] = "goal_src/import/" + import_file.split("import/")[1]
# print(import_map)
# sg_locations = {}
for source_file in all_non_import_files:
sgs = get_sgs(source_file)
deps = set()
for sg in sgs:
if sg not in import_map:
print("missing: ", sg)
else:
deps.add(import_map[sg])
if len(deps) > 0:
output_json[os.path.basename(source_file)[:-3]] = list(deps)
to_modify[source_file] = deps
# uncomment to modify files
# for file, deps in to_modify.items():
# print("modifying ", file, deps)
# with open(file, "r") as f:
# lines = f.readlines()
# to_add = [] # ["\n"]
# for dep in deps:
# to_add.append("(import \"{}\")\n".format(dep))
# print(to_add)
# added = False
# for i, line in enumerate(lines):
# if ";; decomp begins" in line.lower():
# lines[i+1:i+1] = to_add
# added = True
# break
# if not added:
# lines[6:6] = to_add
# assert lines[1] == "(in-package goal)\n"
# with open(file, "w") as f:
# f.writelines(lines)
# uncomment to print json.
# print(json.dumps(output_json, indent=4))
if __name__ == "__main__":
main() | StarcoderdataPython |
9734603 | <gh_stars>10-100
# coding: utf-8
from __future__ import absolute_import, unicode_literals
from codecs import open # pylint:disable=redefined-builtin
from collections import defaultdict
from os.path import dirname, join
import sys
from setuptools import setup, find_packages
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
]
def main():
base_dir = dirname(__file__)
requirements = ['six']
test_requirements = []
extra_requirements = defaultdict(list)
conditional_dependencies = {
# Newer versions of pip and wheel, which support PEP 426, allow
# environment markers for conditional dependencies to use operators
# such as `<` and `<=` [1]. However, older versions of pip and wheel
# only support PEP 345, which only allows operators `==` and `in` (and
# their negations) along with string constants [2]. To get the widest
# range of support, we'll only use the `==` operator, which means
# explicitly listing all supported Python versions that need the extra
# dependencies.
#
# [1] <https://www.python.org/dev/peps/pep-0426/#environment-markers>
# [2] <https://www.python.org/dev/peps/pep-0345/#environment-markers>
'ordereddict': ['2.6'], # < 2.7
}
for requirement, python_versions in conditional_dependencies.items():
for python_version in python_versions:
# <https://wheel.readthedocs.org/en/latest/#defining-conditional-dependencies>
python_conditional = 'python_version=="{0}"'.format(python_version)
key = ':{0}'.format(python_conditional)
extra_requirements[key].append(requirement)
if sys.version_info[:2] == (2, 6):
test_requirements.append('unittest2')
setup(
name='genty',
version='1.3.2',
description='Allows you to run a test with multiple data sets',
long_description=open(join(base_dir, 'README.rst'), encoding='utf-8').read(),
author='Box',
author_email='<EMAIL>',
url='https://github.com/box/genty',
license='Apache Software License, Version 2.0, http://www.apache.org/licenses/LICENSE-2.0',
packages=find_packages(exclude=['test']),
test_suite='test',
zip_safe=False,
keywords=('genty', 'tests', 'generative', 'unittest'),
classifiers=CLASSIFIERS,
install_requires=requirements,
extras_require=extra_requirements,
tests_require=test_requirements,
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3529998 | <filename>.tox/bootstrap/lib/python3.7/site-packages/matrix/__init__.py
# -*- coding: utf-8 -*-
import re
import warnings
from fnmatch import fnmatch
from itertools import product
from backports.configparser2 import ConfigParser
try:
from collections import OrderedDict
except ImportError:
from .ordereddict import OrderedDict
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__version__ = "2.0.1"
entry_rx = re.compile(r"""
^
((?P<merge>\?))?
((?P<alias>[^\?:]*):)?
\s*(?P<value>[^!&]+?)\s*
(?P<reducers>[!&].+)?
$
""", re.VERBOSE)
reducer_rx = re.compile(r"""
\s*
(?P<type>[!&])
(?P<variable>[^!&\[\]]+)
\[(?P<glob>[^\[\]]+)\]
\s*
""", re.VERBOSE)
special_chars_rx = re.compile(r'[\\/:>?|\[\]< ]+')
class ParseError(Exception):
pass
class DuplicateEntry(UserWarning):
def __str__(self):
return "Duplicate entry %r (from %r). Conflicts with %r - it has the same alias." % self.args
__repr__ = __str__
class DuplicateEnvironment(Exception):
def __str__(self):
return "Duplicate environment %r. It has conflicting sets of data: %r != %r." % self.args
__repr__ = __str__
class Reducer(object):
def __init__(self, entry):
kind, variable, pattern = entry
assert kind in "&!"
self.kind = kind
self.is_exclude = kind == '!'
self.variable = variable
self.pattern = pattern
def __str__(self):
return "%s(%s[%s])" % (
"exclude" if self.is_exclude else "include",
self.variable,
self.pattern,
)
__repr__ = __str__
class Entry(object):
def __init__(self, value):
value = value.strip()
if not value:
self.alias = ''
self.value = ''
self.merge = False
self.reducers = []
else:
m = entry_rx.match(value)
if not m:
raise ValueError("Failed to parse %r" % value)
m = m.groupdict()
self.alias = m['alias']
self.value = m['value']
self.merge = m['merge']
self.reducers = [Reducer(i) for i in reducer_rx.findall(m['reducers'] or '')]
if self.value == '-':
self.value = ''
if self.alias is None:
self.alias = special_chars_rx.sub('_', self.value)
def __eq__(self, other):
return self.alias == other.alias
def __str__(self):
return "Entry(%r, %salias=%r)" % (
self.value,
', '.join(str(i) for i in self.reducers) + ', ' if self.reducers else '',
self.alias,
)
__repr__ = __str__
def parse_config(fp, section='matrix'):
parser = ConfigParser()
parser.readfp(fp)
config = OrderedDict()
for name, value in parser.items(section):
entries = config[name] = []
for line in value.strip().splitlines():
entry = Entry(line)
duplicates = [i for i in entries if i == entry]
if duplicates:
warnings.warn(DuplicateEntry(entry, line, duplicates), DuplicateEntry, 1)
entries.append(entry)
if not entries:
entries.append(Entry('-'))
return config
def from_config(config):
"""
Generate a matrix from a configuration dictionary.
"""
matrix = {}
variables = config.keys()
for entries in product(*config.values()):
combination = dict(zip(variables, entries))
include = True
for value in combination.values():
for reducer in value.reducers:
if reducer.pattern == '-':
match = not combination[reducer.variable].value
else:
match = fnmatch(combination[reducer.variable].value, reducer.pattern)
if match if reducer.is_exclude else not match:
include = False
if include:
key = '-'.join(entry.alias for entry in entries if entry.alias)
data = dict(
zip(variables, (entry.value for entry in entries))
)
if key in matrix and data != matrix[key]:
raise DuplicateEnvironment(key, data, matrix[key])
matrix[key] = data
return matrix
def from_file(filename, section='matrix'):
"""
Generate a matrix from a .ini file. Configuration is expected to be in a ``[matrix]`` section.
"""
config = parse_config(open(filename), section=section)
return from_config(config)
def from_string(string, section='matrix'):
"""
Generate a matrix from a .ini file. Configuration is expected to be in a ``[matrix]`` section.
"""
config = parse_config(StringIO(string), section=section)
return from_config(config)
| StarcoderdataPython |
11313800 |
ogp_types = {}
def ogp_type(cls):
type = cls.__name__.lower()
ogp_types[type] = cls()
return cls
class OGP:
def __init__(self, doc):
self.doc = doc
self.prefixes = []
og = doc.meta.namespaces.get('og')
if og:
type = og.get('type')
if type:
self.add_type(type)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if not type and self.prefixes:
prefix = ' '.join(self.prefixes)
self.doc.attr('prefix', prefix)
def add_type(self, type):
prefix = 'og: http://ogp.me/ns#'
if type is not 'website':
bits = type.split('.')
if not self.prefixes:
self.prefixes.append(prefix)
prefix = '{0}: http://ogp.me/ns/{0}#'.format(bits[0])
if prefix not in self.prefixes:
type_handler = ogp_types.get(type)
if type_handler:
self.doc.head.add_meta(property='og:type', content=type)
self.prefixes.append(prefix)
type_handler(self.doc)
class OGPType:
def __call__(self, doc):
pass
def set(self, doc, key, tag_key=None, array=False):
'''Set a key in the doc meta tags
'''
value = doc.meta.namespaces['og'].get(key)
if not value and tag_key:
value = doc.meta.get(tag_key)
if value and array:
value = value.split(', ')
if value:
key = 'og:%s' % key
if not isinstance(value, (tuple, list)):
value = (value,)
if not array:
value = value[:1]
for v in value:
doc.head.add_meta(property=key, content=v)
@ogp_type
class Website(OGPType):
def __call__(self, doc):
self.set(doc, 'url')
self.set(doc, 'title', 'title')
self.set(doc, 'description', 'description')
self.set(doc, 'locale')
self.set(doc, 'site_name')
self.set(doc, 'image', array=True)
@ogp_type
class Profile(Website):
def __call__(self, doc):
super().__call__(doc)
self.set(doc, 'first_name')
self.set(doc, 'last_name')
self.set(doc, 'username')
self.set(doc, 'gender')
@ogp_type
class Article(Website):
def __call__(self, doc):
super().__call__(doc)
self.set(doc, 'published_time')
self.set(doc, 'modified_time')
self.set(doc, 'expiration_time')
self.set(doc, 'author', 'author', array=True)
self.set(doc, 'section')
self.set(doc, 'tag', 'keywords', array=True)
| StarcoderdataPython |
3335955 | import sys
from helpfuncs import translateR
from inverse import inv
# parse spatial CSP and fill in the constraint matrix
def parsecsp(ConMatrix):
while True:
# assure not interrupted parsing
try:
line = sys.stdin.readline()
except KeyboardInterrupt:
break
if not line:
break
l = line.strip().replace('(','').replace(')','').split()
# condition to end parsing
if l == ['.']:
break
s = reduce(lambda x, y: x | y, [translateR(i) for i in l[2:]])
ConMatrix[int(l[0])][int(l[1])] = s
ConMatrix[int(l[1])][int(l[0])] = inv[s-1]
| StarcoderdataPython |
313182 | # This files defiens the error table as a panda object
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from dotmap import DotMap
from collections import defaultdict
from kmodes.kmodes import KModes
class error_table():
def __init__(self, space=None, table=None, column_type = None):
assert space is not None or table is not None
if space is not None:
self.space= space
self.column_names = []
self.column_type = {}
for i in range(space.fixedFlattenedDimension):
self.column_names.append(space.meaningOfFlatCoordinate(i))
self.column_type[space.meaningOfFlatCoordinate(i)] = \
space.coordinateIsNumerical(i)
self.column_names.append("rho")
self.column_type["rho"] = True # Set to numerical by default. Can be updated later.
self.table = pd.DataFrame(columns=self.column_names)
self.ignore_locs = []
else:
self.table = table
self.column_names = table.columns
if column_type is None:
self.column_type = {col:True for col in self.column_names}
else:
self.column_type = column_type
self.ignore_locs = []
def update_column_names(self, column_names):
assert len(self.table.columns) == len(column_names)
self.table.columns = column_names
self.column_names = column_names
def update_error_table(self, sample, rho):
sample = self.space.flatten(sample, fixedDimension=True)
sample_dict = {}
for k, v in zip(self.table.columns, list(sample)):
if np.any(np.array(sample) == None):
locs = np.where(np.array(sample) == None)
self.ignore_locs = self.ignore_locs + list(locs[0])
sample_dict[k] = float(v) if self.column_type[k] and v is not None else v
if isinstance(rho, (list, tuple)):
for i,r in enumerate(rho[:-1]):
if "rho_" + str(i) not in self.column_names:
self.column_names.append("rho_"+str(i))
if isinstance(r, bool):
self.column_type["rho_" + str(i)] = False
else:
self.column_type["rho_" + str(i)] = True
sample_dict["rho_"+str(i)] = r
sample_dict["rho"] = rho[-1]
if isinstance(rho[-1], bool) and self.column_type["rho"]:
print("Updating column type")
self.column_type["rho"] = False
else:
sample_dict["rho"] = rho
if isinstance(rho, bool) and self.column_type["rho"]:
print("Updating column type")
self.column_type["rho"] = False
self.ignore_locs = list(set(tuple(self.ignore_locs)))
self.table = self.table.append(sample_dict, ignore_index=True)
def get_column_by_index(self, index):
if isinstance(index, int):
index = list([index])
if len(index) < 1:
print("No indices provided: returning all samples")
elif max(index) >= len(self.table.columns):
for i in index:
if i >= len(self.table.columns):
index.remove(i)
print("Tried to access index not in error table")
if len(self.table) > 0:
names_index = self.table.columns[index]
return self.table[names_index]
else:
print("No entries in error table yet")
return None
def get_column_by_name(self, column_names):
index = []
if isinstance(column_names, str):
if column_names in self.table.columns:
index.append(column_names)
else:
for s in column_names:
if s in self.table.columns:
index.append(s)
return self.table[index]
def get_samples_by_index(self, index):
if isinstance(index, int):
index = list([index])
if max(index) >= len(self.table):
print("Trying to access samples not in the table")
for i in index:
if i >= len(self.table):
index.remove(i)
return self.table.iloc[index]
def split_table(self, column_names=None):
if column_names is None:
column_names = self.column_names
numerical, categorical = [], []
for c in column_names:
if self.column_type[c]:
numerical.append(c)
else:
categorical.append(c)
return self.get_column_by_name(numerical), self.get_column_by_name(categorical)
def get_random_samples(self, count=5):
if count > len(self.table):
return list(range(len(self.table)))
else:
sample_ids = set()
while len(sample_ids) < count:
i = np.random.randint(len(self.table))
sample_ids.add(i)
return list(sample_ids)
def build_normalized(self, column_names=None):
if len(self.table) < 1:
return pd.DataFrame(), pd.DataFrame()
if column_names is None:
column_names = self.column_names
numerical, categorical = self.split_table(column_names=column_names)
if len(categorical.columns) + len(numerical.columns) == 0:
return pd.DataFrame(), pd.DataFrame()
# Normalize tables (only for numerical table)
stats = numerical.describe()
normalized_dict = {r: (numerical[r] - stats[r]['min']) / (stats[r]['max'] - stats[r]['min'])
for r in numerical.columns}
normalized_table = pd.DataFrame(normalized_dict)
return normalized_table, categorical, \
np.array([stats[r]['min'] for r in numerical.columns]),\
np.array([stats[r]['max'] for r in numerical.columns])
def build_standardized(self, column_names=None):
if len(self.table) < 1:
return pd.DataFrame(), pd.DataFrame()
if column_names is None:
column_names = self.column_names
numerical, categorical = self.split_table(column_names=column_names)
if len(categorical.columns) + len(numerical.columns) == 0:
return pd.DataFrame(), pd.DataFrame()
# Normalize tables (only for numerical table)
stats = numerical.describe()
standardized_dict = {r: (numerical[r] - stats[r]['mean']) / stats[r]['std']
for r in numerical.columns}
standardized_table = pd.DataFrame(standardized_dict)
return standardized_table, categorical, \
np.array([stats[r]['mean'] for r in numerical.columns]),\
np.array([stats[r]['std'] for r in numerical.columns])
def dist_element(self, numerical, point_n):
d=np.zeros(len(self.table))
if len(numerical.columns) > 0:
d = np.linalg.norm(numerical.values- point_n, axis=1)
return d
def k_clusters(self, column_names=None, k=None):
if k is None or k >= len(self.table):
return np.array(range(len(self.table)))
if len(self.table) <=0 :
return np.array(range(len(self.table)))
numerical, categorical, min_elems, max_elems = self.build_normalized(column_names=column_names)
range_elems = max_elems- min_elems
result = defaultdict(dict)
if len(categorical.columns) > 0:
X = np.array(categorical.values)
Y = np.array(numerical.values)
kmodes = KModes(n_clusters=k, init='Huang', n_init=5, verbose=1)
kmodes.fit_predict(X)
centers_cat = kmodes.cluster_centroids_
labels_cat = kmodes.labels_
result['categorical'] = {'clusters':centers_cat, 'labels':labels_cat}
for j in range(k):
pos = Y[np.where(labels_cat == j)]
kmeans = KMeans(n_clusters=min(len(pos), k), random_state=0).fit(pos)
centers = kmeans.cluster_centers_
labels = kmeans.labels_
for label, center in enumerate(centers):
center = (center * range_elems) + min_elems
result[j][label] = center
result[j]['labels'] = labels
elif len(numerical.columns) > 0:
X = np.array(numerical.values)
kmeans = KMeans(n_clusters=k, random_state=0).fit(X)
centers = kmeans.cluster_centers_
labels = kmeans.labels_
#print(centers, labels)
for label, center in enumerate(centers):
center = (center*range_elems) + min_elems
result['clusters'][label] = center
result['labels'] = labels
return result
def k_closest_samples(self, column_names=None, k = None, dist_type=True):
# dist_type is True for using normalized, False for standardized
if k is None or k >= len(self.table):
return np.array(range(len(self.table)))
if dist_type:
numerical, categorical, _, _ = self.build_normalized(column_names=column_names)
else:
numerical, categorical, _, _ = self.build_standardized(column_names=column_names)
# Norm distance between table rows
d_rows = np.zeros((len(self.table), len(self.table)))
for i in range(len(self.table)):
d_rows[i] = self.dist_element(numerical, numerical.values[i])
# Now the row associated with the min sum is the largest set of correlated elements
sum_rows = []
correlated_rows = []
for r in d_rows:
ks = np.argpartition(r, k)[:k]
sum_rows.append(sum(r[ks]))
correlated_rows.append(ks)
return correlated_rows[np.array(sum_rows).argmin()]
def pca_analysis(self, column_names=None, n_components= 1):
# Returns the direction of the principal component among the samples
if len(self.table) < 1:
return
if column_names is None:
column_names = self.column_names
numerical, _ = self.split_table(column_names=column_names)
# Do PCA analysis only on the numerical columns
if len(numerical.columns) == 0:
return
pca_columns = []
for c in numerical.columns:
if c in self.table.columns and c not in self.table.columns[self.ignore_locs]:
pca_columns.append(c)
table = self.get_column_by_name(pca_columns)
# PCA components
if n_components > min(len(table), len(table.columns)):
n_components = min(len(table), len(table.columns))
pca = PCA(n_components=n_components)
pca.fit(table)
return {'columns':pca_columns, 'pivot': table.mean().values, 'directions': pca.components_}
def analyze(self, analysis_params=None):
analysis_data = DotMap()
if analysis_params is None or ('pca' in analysis_params and analysis_params.pca) or 'pca' not in analysis_params:
if analysis_params is not None and 'pca_params' in analysis_params:
columns = analysis_params.pca_params.columns \
if 'columns' in analysis_params.pca_params else None
n_components = analysis_params.pca_params.n_components \
if 'n_components' in analysis_params.pca_params else 1
else:
columns, n_components = None, 1
analysis_data.pca = self.pca_analysis(column_names=columns, n_components=n_components)
if analysis_params is None or ('k_closest' in analysis_params and analysis_params.k_closest) or 'k_closest' not in analysis_params:
if analysis_params is not None and 'k_closest_params' in analysis_params:
columns = analysis_params.k_closest_params.columns \
if 'columns' in analysis_params.k_closest_params else None
k = analysis_params.k_closest_params.k \
if 'k' in analysis_params.k_closest_params else None
else:
columns, k = None, None
analysis_data.k_closest = self.k_closest_samples(column_names=columns, k=k)
if analysis_params is None or ('random' in analysis_params and analysis_params.random) or 'random' not in analysis_params:
if analysis_params is not None and 'random_params' in analysis_params:
count = analysis_params.random_params.count \
if 'count' in analysis_params.random_params else 5
else:
count = 5
analysis_data.random = self.get_random_samples(count=count)
if analysis_params is None or ('k_clusters' in analysis_params and analysis_params.k_clusters) or 'k_clusters' not in analysis_params:
if analysis_params is not None and 'k_clusters_params' in analysis_params:
columns = analysis_params.k_clusters_params.columns \
if 'columns' in analysis_params.k_clusters_params else None
k = analysis_params.k_clusters_params.k \
if 'k' in analysis_params.k_clusters_params else None
else:
columns, k = None, None
analysis_data.k_clusters = self.k_clusters(column_names=columns, k=k)
return analysis_data
| StarcoderdataPython |
3513437 | <gh_stars>10-100
from .core import AutoGeneS
from typing import Optional, Tuple
import pandas as pd
import anndata
import numpy as np
import warnings
import dill as pickle
from sklearn.svm import NuSVR
from sklearn import linear_model
from scipy.optimize import nnls
from scipy import sparse
class Interface:
def __init__(self):
self.pre_selection = None
self._selection = None
self._adata = None
self.data = None
self.data_genes = None
self.main = None
def init(self,
data,
celltype_key = 'celltype',
genes_key = None,
use_highly_variable = False,
**kwargs
):
"""
init(data, celltype_key = 'celltype', genes_key = None, use_highly_variable = False)
Preprocesses input data
If an AnnData object is passed, it is assumed that it contains single-cell data. The means are calculated using 'celltype_key'. In addition, a pre-selection of genes can be specified with 'genes_key' or 'use_highly_variable'. Then, only these genes will be considered in the optimization.
If a DataFrame or numpy array is passed, it is assumed that they already contain the means.
Parameters
----------
data : `anndata.AnnData`, `np.ndarray`, `pd.DataFrame`
Input data
celltype_key : `str`, optional (default: `celltype`)
Name of the obs column that specifies the cell type of a cell
For AnnData only
genes_key : `str`, optional (default: `None`)
Name of the var column with boolean values to pre-select genes
use_highly_variable : `bool`, optional (default: `False`)
Equivalent to genes_key='highly_variable'
Returns
-------
None
"""
self.__init__()
# Process different input formats
if isinstance(data, anndata.AnnData):
if use_highly_variable: genes_key = 'highly_variable'
if celltype_key not in data.obs:
raise ValueError(f"AnnData has no obs column '{celltype_key}'")
self._adata = self.__compute_means(data,celltype_key)
self.data_genes = data.var_names.values
if genes_key:
self.pre_selection = data.var[genes_key].values
else:
self.pre_selection = np.full((data.X.shape[1],),True)
self.data = self._adata.X[:,self.pre_selection]
self.main = AutoGeneS(self.data)
return self._adata
elif isinstance(data,pd.DataFrame):
self.data = data.values
self.data_genes = data.columns.values
self.main = AutoGeneS(self.data)
self.pre_selection = np.full((data.shape[1],),True)
elif isinstance(data, np.ndarray):
self.data = data
self.main = AutoGeneS(self.data)
self.pre_selection = np.full((data.shape[1],),True)
else:
raise TypeError("data must be AnnData, DataFrame or ndarray")
def optimize(
self,
ngen = 2,
mode = 'standard',
nfeatures = None,
weights = None,
objectives = None,
seed = 0,
verbose = True,
**kwargs
):
"""
optimize(ngen = 2, mode = 'standard', nfeatures = None, weights = None, objectives = None, seed = 0, verbose = True, **kwargs)
Runs multi-objective optimizer
This method runs an evolutionary algorithm to find gene selections that optimize certain objectives. It can run for a different number of generations and in different modes. For more information on genetic algorithms and their parameters, refer to the `DEAP documention <https://deap.readthedocs.io/en/master/index.html>`_.
Parameters
----------
ngen : `int`, optional (default: `2`)
Number of generations. The higher, the longer it takes
mode : `standard`, `fixed`, optional (default: `standard`)
In standard mode, the number of genes of a selection is allowed to vary arbitrarily. In fixed mode, the number of selected genes is fixed (using `nfeatures`)
nfeatures : `int`, optional (default: `int`)
Number of genes to be selected in fixed mode
weights : `(int, ...)`, optional (default: `(-1,1)`)
Weights applied to the objectives. For the optimization, only the sign is relevant: `1` means to maximize the respective objective, `-1` to minimize it and `0` means to ignore it. The weight supplied here will be the default weight for selection. There must be as many weights as there are objectives
objectives : `([str,function], ...)`, optional (default: `('correlation','distance')`)
The objectives to maximize or minimize. Must have the same length as weights. The default objectives (correlation, distance) can be referred to using strings. For custom objectives, a function has to be passed. For further details, refer to the respective tutorial.
seed : `int`, optional (default: `0`)
Seed for random number generators
verbose : `bool`, optional (default: `True`)
If True, output a progress summary of the optimization (the current generation, size of the pareto front, min and max values of all objectives)
population_size : `int`, optional (default: `100`)
Size of every generation (mu parameter)
offspring_size : `int`, optional (default: `50`)
Number of individuals created in every generation (lambda parameter)
crossover_pb : `float`, optional (default: `0.7`)
Crossover probability
mutation_pb : `float`, optional (default: `0.3`)
Mutation probability
mutate_flip_pb : `float`, optional (default: `1E-3`)
Mutation flipping probability (fixed mode)
crossover_thres : `int`, optional (default: `1000`)
Crossover threshold (standard mode)
ind_standard_pb : `float`, optional (default: `0.1`)
Probability used to generate initial population in standard mode
Returns
-------
None
"""
if self.main is None:
raise Exception("Not initialized")
self.main.run(
ngen=ngen,
mode=mode,
nfeatures=nfeatures,
weights=weights,
objectives=objectives,
seed=seed,
verbose=verbose,
**kwargs
)
def plot(self, **kwargs):
"""
plot(objectives = (0,1), weights = None, index = None, close_to = None)
Plots objective values of solutions
Can only be run after `optimize`. Every parameter corresponds to one selection method. Only one can be chosen at a time. If you don't specify an selection method, the weights passed to `optimize` will be used.
Parameters
----------
objectives : `(int,int)`, optional (default: `(0,1)`)
The objectives to be plotted. Contains indices of objectives. The first index refers to the objective that is plotted on the x-axis. For example, `(2,1)` will plot the third objective on the x-axis and the second on the y-axis.
weights : `(int, ...)`, optional
Weights with which to weight the objective values. For example, `(-1,2)` will minimize the first objective and maximize the the second (with higher weight).
index : `int`, `(int,int)`, optional
If one int is passed, return `pareto[index]`
If two ints are passed, the first is an objective (`0` for the first). The second is the nth element if the solutions have been sorted by the objective in ascending order. For example, `(0,1)` will return the solution that has the second-lowest value in the first objective. `(1,-1)` will return the solution with the highest value in the second objective.
close_to : `(int,int)`, optional
Select the solution whose objective value is closest to a certain value. Assumes `(objective,value)`. For example, `(0,100)` will select the solution whose value for the first objective is closest to 100.
"""
if self.main is None:
raise Exception("Not initialized")
self.main.plot(**kwargs)
def select(self, copy=False, key_added='autogenes', **kwargs):
"""
select(weights = None, close_to = None, index = None, copy=False, key_added='autogenes')
Selects a solution
Specify a criterion to choose a solution from the solution set. Supports adding the solution to the annotation of an adata object. Can only be run after `optimize`
Parameters
----------
weights : `(int, ...)`, optional
Weights with which to weight the objective values. For example, `(-1,2)` will minimize the first objective and maximize the the second (with more weight).
index : `int`, `(int,int)`, optional
If one int is passed, return `pareto[index]`
If two ints are passed, the first is an objective (`0` for the first). The second is the nth element if the solutions have been sorted by the objective in ascending order. For example, `(0,1)` will return the solution that has the second-lowest value in the first objective. `(1,-1)` will return the solution with the highest value in the second objective.
close_to : `(int,int)`, optional
Select the solution whose objective value is close to a certain value. Assumes `(objective,value)`. For example, `(0,100)` will select the solution whose value for the first objective is closest to 100.
copy : `bool`, optional (default: `False`)
If true, a new adata object will be created with the selected solution in the var column specified by `key_added`
key_added : `str`, optional (default: `autogenes`)
The name of the var column to which to add the chosen gene selection
"""
if self.main is None:
raise Exception("Not initialized")
s = self.main.select(**kwargs)
self._selection = self.__process_selection(s)
if self._adata:
if copy:
r = self._adata.copy()
r.var[key_added] = self._selection
return r
else:
self._adata.var[key_added] = self._selection
return self._selection
def deconvolve(self, bulk, key=None, model='nusvr', **kwargs):
"""
deconvolve(bulk,key = None, model='nusvr')
Performs bulk deconvolution
Deconvolves bulk data using a gene selection. The selection can be specified through a key or the current selection is used.
If the optimizer has been run, but nothing has been selected yet, an automatic selection occurs (equivalent to ``ag.select()``)
Parameters
----------
bulk : `np.ndarray`, `pd.Series`, `pd.DataFrame`, `AnnData`
If multi-dimensional, then each row corresponds to a sample. If it has gene annotations (e.g. var_names for AnnData or df.columns for DataFrame), the method will respond intelligently (reorder if necessary, use only those genes from the selection that are available in the bulk data)
key : `str`, optional (default: `None`)
Name of the var column that specifies a gene selection. If None, then the current selection is used (or is automatically chosen)
model : `nusvr`, `nnls`, `linear`, optional (default: `nusvr`)
Choose a regression model. Available options: NuSVR, non-negative least squares and linear model.
Returns
-------
An array of the form `[[float, ...],...]` containing the model coefficients for each target (bulk sample)
"""
if self._selection is None:
self.select(**kwargs)
selection = self._adata.var[key] if key else self._selection
bulk_data, bulk_genes = self.__unpack_bulk(bulk)
X,y = self.__model_input(bulk_data, bulk_genes, selection)
if model == "nusvr":
nu = 0.5
C = 0.5
kernel = 'linear'
degree=3
gamma='scale'
coef0=0.0
shrinking=True
tol=1e-3
cache_size=200
verbose=False
max_iter=-1
if 'nu' in kwargs:
nu=kwargs['nu']
if 'C' in kwargs:
C=kwargs['C']
if 'kernel' in kwargs:
kernel=kwargs['kernel']
if 'degree' in kwargs:
degree=kwargs['degree']
if 'gamma' in kwargs:
gamma=kwargs['gamma']
if 'coef0' in kwargs:
coef0=kwargs['coef0']
if 'shrinking' in kwargs:
shrinking=kwargs['shrinking']
if 'tol' in kwargs:
tol=kwargs['tol']
if 'cache_size' in kwargs:
cache_size=kwargs['cache_size']
if 'verbose' in kwargs:
verbose=kwargs['verbose']
if 'max_iter' in kwargs:
max_iter=kwargs['max_iter']
if y.shape[1] == 1:
y = np.ravel(y)
model = NuSVR(nu=nu,C=C,kernel=kernel,degree=degree,gamma=gamma,coef0=coef0,shrinking=shrinking,tol=tol,cache_size=cache_size,verbose=verbose,max_iter=max_iter)
model.fit(X, y)
self.model = model
return model.coef_
else:
res = np.zeros((y.shape[1],X.shape[1]))
for i in range(y.shape[1]):
model = NuSVR(nu=nu,C=C,kernel=kernel,degree=degree,gamma=gamma,coef0=coef0,shrinking=shrinking,tol=tol,cache_size=cache_size,verbose=verbose,max_iter=max_iter)
model.fit(X, y[:,i])
self.model = model
res[i] = model.coef_
return res
if model == "nnls":
if y.ndim == 1:
x,err = nnls(X,y)
return x
else:
res = np.zeros((y.shape[1],X.shape[1]))
for i in range(y.shape[1]):
x,err = nnls(X,y[:,i])
res[i] = x
return res
if model == "linear":
model = linear_model.LinearRegression(copy_X=True, fit_intercept=False)
model.fit(X, y)
self.model = model
return model.coef_
raise ValueError("Model is not supported")
def pipeline(self, data, bulk, **kwargs):
"""
pipeline(data,bulk, **kwargs)
Runs the optimizer, selection and deconvolution using one method
"""
self.init(data,**kwargs)
self.run(**kwargs)
return self.deconvolve(bulk, **kwargs)
def resume(self):
"""Resumes an optimization process that has been interrupted"""
if self.main is None:
raise Exception("Not initialized")
self.main.resume()
def save(self,filename):
"""Saves current state to a file
Parameters
----------
filename : `str`
Name of the file
"""
pickle.dump(self, open(filename, 'wb'))
def load(self,filename):
"""Loads a state from a file
Parameters
----------
filename : `str`
Name of the file
"""
tmp = pickle.load(open(filename, 'rb'))
self.__dict__.update(tmp.__dict__)
def adata(self):
"""Returns AnnData object
Returns
-------
The AnnData object that the optimizer operates on (if no AnnData was passed to `ag.init`, `None`)
"""
return self._adata
def fitness_matrix(self):
"""Returns fitness matrix
Returns
-------
A `pd.DataFrame` that contains the objective values of all solutions. The nth row corresponds to the nth solution (``ag.pareto()[n]``)
"""
return self.main.fitness_matrix
def pareto(self):
"""Returns the entire solution set
Returns
-------
The solution set in the form `[[bool],...]`. Every member corresponds to a gene selection
"""
if self.main is None:
raise Exception("Not initialized")
return list(map(self.__process_selection, self.main.pareto))
def selection(self):
"""Returns the current selection
Returns
-------
The current selection as a boolean array
"""
if self._selection is None:
raise Exception("Nothing selected")
return self._selection
#
# Helper
#
def __process_selection(self,s):
r = self.pre_selection.copy()
i = 0
for k,val in enumerate(self.pre_selection):
if val:
r[k] = s[i]
i += 1
return r
def __compute_means(self,adata,celltype_key):
"""
returns a new, shallow (!) AnnData. It contains the mean gene expressions per cell type. The row names are the cell types. The column names are the genes of the original adata.
"""
if celltype_key not in adata.obs:
raise ValueError("Key not found")
if not sparse.issparse(adata.X):
sc_means = pd.DataFrame(data=adata.X, columns=adata.var_names)
sc_means['cell_types'] = pd.Series(data=adata.obs[celltype_key].values,index=sc_means.index)
sc_means = sc_means.groupby('cell_types').mean()
else:
sc_means = pd.DataFrame(index=adata.var_names)
for cell in set(adata.obs[celltype_key]):
sc_means[cell] = sparse.csr_matrix.mean(adata[adata.obs[celltype_key]==cell].X,axis=0).tolist()[0]
sc_means = sc_means.T
if len(sc_means.index) == 1:
raise ValueError("More than 1 cell types expected")
result = anndata.AnnData(sc_means)
result.var = adata.var.copy()
result.var_names = adata.var_names
return result
def __model_input(self,bulk_data,bulk_genes,selection):
data_genes = self.data_genes
# Case: gene labels for both bulk and data are available
if bulk_genes is not None and data_genes is not None:
common_genes = np.isin(data_genes,bulk_genes)
intersect_genes = np.logical_and(common_genes,selection)
n_intersect_genes = sum(intersect_genes)
if n_intersect_genes == 0:
raise ValueError("None of the selected genes appear in the bulk data")
if n_intersect_genes < sum(selection):
warnings.warn("Some of the selected genes don't appear in the bulk data and will be ignored")
if self._adata:
X = self._adata.X.T[intersect_genes]
else:
X = self.data.T[intersect_genes]
# Note: Genes in bulk may be in different order and of different size!
# Cannot simply apply bitmask!
y = np.zeros((bulk_data.shape[0],n_intersect_genes))
gene_names = data_genes[intersect_genes]
for i,gene in enumerate(gene_names):
bulk_gene_index = np.argwhere(bulk_genes == gene)[0][0]
y[:,i] = bulk_data[:, bulk_gene_index]
y = y.T
# Case: no gene labels available (for at least one)
else:
bulk_dim = bulk_data.shape[1]
if bulk_dim != len(selection): #self.data.shape[1]
raise ValueError("Bulk data has wrong shape")
if self._adata:
X = self._adata.X.T[selection]
else:
X = self.data.T[selection]
y = bulk_data.T[selection]
return X,y
def __unpack_bulk(self,bulk):
"""
returns tuple of
2-dim ndarray bulk_data
1-dim bulk_genes (or None)
"""
bulk_data, bulk_genes = None, None
if isinstance(bulk,np.ndarray):
if bulk.ndim == 1:
bulk = bulk.reshape(1,len(bulk))
bulk_data = bulk
if isinstance(bulk, anndata.AnnData):
bulk_genes = bulk.var.index.values
bulk_data = bulk.X
if isinstance(bulk, pd.Series):
bulk_genes = bulk.index.values
bulk_data = bulk.values.reshape(1,len(bulk))
if isinstance(bulk, pd.DataFrame):
bulk_genes = bulk.columns.values
bulk_data = bulk.values
if bulk_data is None:
raise ValueError("Invalid data type for bulk")
return bulk_data,bulk_genes
| StarcoderdataPython |
6461904 | """
content.index
"""
from datetime import datetime
import logging
import os
from zoom.mvc import View
from zoom.page import page
from zoom.browse import browse
from pages import load_page
class MyView(View):
def index(self):
return page('Metrics and activity log and statistics will go here.', title='Overview')
def show(self, path=None):
template = 'default'
if path == None or path == 'content/index.html':
path = ''
template = 'index'
else:
path = '/'.join(path.split('/')[1:])
content = load_page(path)
if content:
return page(content, template=template)
view = MyView()
| StarcoderdataPython |
3569164 | <gh_stars>1-10
import click
from app.domain.commands import DownloadIFQ
from app import bootstrap
@click.command()
@click.option(
'--day',
type=click.DateTime(),
required=True,
help='The day to summarize')
def run_command(day):
"""Downloads the IFQ issue for a specific day"""
print(f'downloading IFQ for {day}')
cmd = DownloadIFQ(day=day)
messagebus = bootstrap.for_cli()
messagebus.handle(cmd, {})
if __name__ == '__main__':
run_command()
| StarcoderdataPython |
4971847 | # pylint: disable=no-name-in-module
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import activations, initializers, regularizers
from tensorflow.keras.layers import Layer
from tensorflow.python.keras.utils import conv_utils #pylint: disable=no-name-in-module
from typing import List, Tuple, Text, Optional, Union
import numpy as np
import tensornetwork as tn
import math
# pytype: disable=module-attr
@tf.keras.utils.register_keras_serializable(package='tensornetwork')
# pytype: enable=module-attr
class Conv2DMPO(Layer):
"""2D Convolutional Matrix Product Operator (MPO) TN layer.
This layer recreates the functionality of a traditional convolutional
layer, but stores the 'kernel' as a network of nodes forming an MPO.
The bond dimension of the MPO can be adjusted to increase or decrease the
number of parameters independently of the input and output dimensions.
When the layer is called, the MPO is contracted into a traditional kernel
and convolved with the layer input to produce a tensor of outputs.
Example:
::
# as first layer in a sequential model:
model = Sequential()
model.add(
Conv2DMPO(256,
kernel_size=3,
num_nodes=4,
bond_dim=16,
activation='relu',
input_shape=(32, 32, 256)))
# now the model will take as input tensors of shape (*, 32, 32, 256)
# and output arrays of shape (*, 32, 32, 256).
# After the first layer, you don't need to specify
# the size of the input anymore:
model.add(Conv2DMPO(256, 3, num_nodes=4, bond_dim=8, activation='relu'))
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
num_nodes: Positive integer, number of nodes in the MPO.
Note input_shape[-1]**(1. / num_nodes) and filters**(1. / num_nodes)
must both be round.
bond_dim: Positive integer, size of the MPO bond dimension (between nodes).
Lower bond dimension means more parameter compression.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution
along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"`
data_format: A string,
one of `"channels_last"` or `"channels_first"`.
The ordering of the dimensions in the inputs.
`"channels_last"` corresponds to inputs with shape
`(batch, height, width, channels)` while `"channels_first"`
corresponds to inputs with shape
`(batch, channels, height, width)`.
It defaults to "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the node weight matrices.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer for the node weight matrices.
bias_regularizer: Regularizer for the bias vector.
Input shape:
4D tensor with shape: `(batch_size, h, w, channels)`.
Output shape:
4D tensor with shape: `(batch_size, h_out, w_out, filters)`.
"""
def __init__(self,
filters: int,
kernel_size: Union[int, Tuple[int, int]],
num_nodes: int,
bond_dim: int,
strides: Union[int, Tuple[int, int]] = 1,
padding: Text = "same",
data_format: Optional[Text] = "channels_last",
dilation_rate: Union[int, Tuple[int, int]] = (1, 1),
activation: Optional[Text] = None,
use_bias: bool = True,
kernel_initializer: Text = "glorot_uniform",
bias_initializer: Text = "zeros",
kernel_regularizer: Optional[Text] = None,
bias_regularizer: Optional[Text] = None,
**kwargs) -> None:
if num_nodes < 2:
raise ValueError('Need at least 2 nodes to create MPO')
if padding not in ('same', 'valid'):
raise ValueError('Padding must be "same" or "valid"')
if data_format not in ['channels_first', 'channels_last']:
raise ValueError('Invalid data_format string provided')
super().__init__(**kwargs)
self.nodes = []
self.filters = filters
self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
self.num_nodes = num_nodes
self.bond_dim = bond_dim
self.strides = conv_utils.normalize_tuple(strides, 2, 'kernel_size')
self.padding = padding
self.data_format = data_format
self.dilation_rate = conv_utils.normalize_tuple(dilation_rate,
2, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
def build(self, input_shape: List[int]) -> None:
# Disable the attribute-defined-outside-init violations in this function
# pylint: disable=attribute-defined-outside-init
if self.data_format == 'channels_first':
channel_axis = 1
else:
channel_axis = -1
if input_shape[channel_axis] is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
def is_perfect_root(n, n_nodes):
root = n**(1. / n_nodes)
return round(root)**n_nodes == n
channels = input_shape[channel_axis]
# Ensure dividable dimensions
assert is_perfect_root(channels, self.num_nodes), (
f'Input dim incorrect. '
f'{input_shape[-1]}**(1. / {self.num_nodes}) must be round.')
assert is_perfect_root(self.filters, self.num_nodes), (
f'Output dim incorrect. '
f'{self.filters}**(1. / {self.num_nodes}) must be round.')
super().build(input_shape)
in_leg_dim = math.ceil(channels**(1. / self.num_nodes))
out_leg_dim = math.ceil(self.filters**(1. / self.num_nodes))
self.nodes.append(
self.add_weight(name='end_node_first',
shape=(in_leg_dim, self.kernel_size[0],
self.bond_dim, out_leg_dim),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer))
for i in range(self.num_nodes - 2):
self.nodes.append(
self.add_weight(name=f'middle_node_{i}',
shape=(in_leg_dim, self.bond_dim, self.bond_dim,
out_leg_dim),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer))
self.nodes.append(
self.add_weight(name='end_node_last',
shape=(in_leg_dim, self.bond_dim,
self.kernel_size[1], out_leg_dim),
trainable=True,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer))
if self.use_bias:
self.bias_var = self.add_weight(
name='bias',
shape=(self.filters,),
trainable=True,
initializer=self.bias_initializer,
regularizer=self.bias_regularizer)
else:
self.use_bias = None
def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: #pylint: disable=arguments-differ
tn_nodes = [tn.Node(n, backend='tensorflow') for n in self.nodes]
for i in range(len(tn_nodes) - 1):
tn_nodes[i][2] ^ tn_nodes[i+1][1]
input_edges = [n[0] for n in tn_nodes]
output_edges = [n[3] for n in tn_nodes]
edges = [tn_nodes[0][1], tn_nodes[-1][2]] + input_edges + output_edges
contracted = tn.contractors.greedy(tn_nodes, edges)
tn.flatten_edges(input_edges)
tn.flatten_edges(output_edges)
tf_df = 'NCHW' if self.data_format == 'channels_first' else 'NHWC'
result = tf.nn.conv2d(inputs,
contracted.tensor,
self.strides,
self.padding.upper(),
data_format=tf_df,
dilations=self.dilation_rate)
if self.use_bias:
bias = tf.reshape(self.bias_var, (1, self.filters,))
result += bias
if self.activation is not None:
result = self.activation(result)
return result
def compute_output_shape(self, input_shape: List[int]) -> Tuple[
int, int, int, int]:
if self.data_format == 'channels_first':
space = input_shape[2:]
else:
space = input_shape[1:-1]
new_space = []
for i, _ in enumerate(space):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
if self.data_format == 'channels_first':
return (input_shape[0], self.filters) + tuple(new_space)
return (input_shape[0],) + tuple(new_space) + (self.filters,)
def get_config(self) -> dict:
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'num_nodes': self.num_nodes,
'bond_dim': self.bond_dim,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
}
base_config = super().get_config()
config.update(base_config)
return config
| StarcoderdataPython |
1928289 | <reponame>madcat1991/clustered_cars
"""
The script transforms bookings data into binary mode.
"""
import argparse
import logging
import sys
import numpy as np
import pandas as pd
from preprocessing.common import canonize_datetime, check_processed_columns
from feature_matrix.functions import replace_numerical_to_categorical
# cols: number of bins
BINNING_COLS = {
'adults': 3,
'children': 3,
'babies': 2,
'avg_spend_per_head': 4,
'drivetime': 3,
'n_booked_days': 3
}
RESERVED_COLS = ["code", "year", "propcode", "bookcode"]
DATE_COLS = [u'bookdate', u'sdate', u"fdate"]
COLS_TO_DROP = [
u'bookdate', # no need
u'sourcedesc' # too detailed
]
def actualize_testing_data(training_df, testing_df):
logging.info(u"Testing data, before cleaning: %s", testing_df.shape)
# removing from testing unknown users and items
known_user_ids = training_df.code.unique()
known_item_ids = training_df.propcode.unique()
testing_df = testing_df[
testing_df.code.isin(known_user_ids) & testing_df.propcode.isin(known_item_ids)
]
# removing from testing user/item pairs that have been seen in training
known_pairs = {(t.code, t.propcode) for t in training_df.itertuples()}
mask = [(t.code, t.propcode) not in known_pairs for t in testing_df.itertuples()]
testing_df = testing_df[mask]
logging.info(u"Testing data, after cleaning: %s", testing_df.shape)
return testing_df
def remove_unrepresentative_users(bdf, min_bookings_per_user):
if min_bookings_per_user > 1:
logging.info("DF, before cleaning: %s", bdf.shape)
bookings_per_user = bdf.code.value_counts()
logging.info("Removing users having less than %s bookings", min_bookings_per_user)
good_user_ids = bookings_per_user[bookings_per_user >= min_bookings_per_user].index
bdf = bdf[bdf.code.isin(good_user_ids)]
logging.info("DF data, after cleaning: %s", bdf.shape)
return bdf
def prepare_for_categorization(bdf):
bdf = bdf.drop(COLS_TO_DROP, axis=1)
bdf[u'n_booked_days'] = (bdf.fdate - bdf.sdate).apply(lambda x: x.days)
bdf = bdf.drop([u'sdate', u'fdate'], axis=1)
bdf.drivetime = np.round(bdf.drivetime / 3600) # to hours
bdf.n_booked_days = bdf.n_booked_days.apply(lambda x: 1 if pd.isnull(x) or x < 1 else x)
bdf.avg_spend_per_head /= bdf.n_booked_days.astype(float)
return bdf
def main():
logging.info(u"Start")
bdf = pd.read_csv(args.data_csv_path)
bdf = canonize_datetime(bdf, DATE_COLS)
original_columns = bdf.columns
# categorizing
bdf = remove_unrepresentative_users(bdf, args.min_bookings_per_user)
bdf = prepare_for_categorization(bdf)
bdf = replace_numerical_to_categorical(bdf, BINNING_COLS)
# quality check
columns = set(bdf.columns).union(COLS_TO_DROP + DATE_COLS).difference(['n_booked_days'])
check_processed_columns(columns, original_columns)
bdf.to_csv(args.output_path, index=False)
logging.info(u"Finish")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-d", required=True, dest="data_csv_path",
help=u"Path to a csv file with the cleaned bookings")
parser.add_argument("-m", dest="min_bookings_per_user", type=int, default=1,
help=u"Min bookings per user. Default: 1")
parser.add_argument("-o", default='t_bookings.csv', dest="output_path",
help=u"Path to the output CSV. Default: t_bookings.csv")
parser.add_argument("--log-level", default='INFO', dest="log_level",
choices=['DEBUG', 'INFO', 'WARNINGS', 'ERROR'], help=u"Logging level")
args = parser.parse_args()
logging.basicConfig(
format='%(asctime)s %(levelname)s:%(message)s', stream=sys.stdout, level=getattr(logging, args.log_level)
)
main()
| StarcoderdataPython |
384153 | import json
import os
import requests
import config as config
import twitter_helper as twitter_helper
from watson_developer_cloud import PersonalityInsightsV3
def send_pi_request(handle):
"""
Send a request to PI given a handle name
:return:
JSON in python format
"""
tweet_data = twitter_helper.process_tweets(handle)
r = requests.post(config.pi_url + '/v2/profile',
auth=(config.pi_username, config.pi_password),
headers={
'content-type': 'application/json',
'accept': 'application/json'
},
data=tweet_data
)
print("Profile Request sent. Status code: %d, content-type: %s" % (r.status_code, r.headers['content-type']))
return json.loads(r.text)
def extract_personality(pi_data):
"""
Extract big 5 personality traits from PI json output and places it in a dictionary
:param pi_data:
:return:
Dictionary with key:value equaling trait name: value
"""
big5 = {}
personality = pi_data['tree']['children'][0]['children'][0]['children']
for trait in personality:
name = trait['name'].lower().replace(" ", "_")
value = trait['percentage']
big5[name] = value
return big5
if __name__ == '__main__':
users = ['DaveRench', 'soc_brianne', 'gravitysydney', 'KevinReuning']
user = users[2]
# data = send_pi_request(user)
filename = "./data/person2.json"
with open(os.path.join(os.path.dirname(__file__), './data/person.json')) as pi_json:
data = json.load(pi_json)
pdata = extract_personality(data)
new_data = {'handle': user, 'personality': pdata}
print(new_data)
| StarcoderdataPython |
1987450 | <gh_stars>1-10
import nth_tac_toe
boardSize = 3
testGame = nth_tac_toe.Game(boardSize)
def X_manual():
while 1:
x_ip = input("x position:")
x_ip = x_ip.split(" ")
rc = testGame.updateBoard("x", x_ip)
if rc == None:
testGame.display()
break
def O_manual():
while 1:
o_ip = input("o position:")
o_ip = o_ip.split(" ")
rc = testGame.updateBoard("o", o_ip)
if rc == None:
testGame.display()
break
sd = 0
while 1:
if sd == 0:
testGame.display()
sd += 1
X_manual()
if testGame.curr_playing == False:
break
O_manual()
if testGame.curr_playing == False:
break
| StarcoderdataPython |
4948189 | #!/usr/bin/env python3
#
# Short version, harder to read
seats = []
for line in open('input.txt').read().splitlines():
seats.append(int(line.replace('B','1').replace('F','0').replace('R','1').replace('L','0'), 2))
# part 1
print(max(seats))
# part 2
seats.sort()
for i in range(1, len(seats)):
if seats[i] - seats[i - 1] > 1:
print('missing', seats[i] - 1)
| StarcoderdataPython |
9791885 | import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import itertools
from pipeline import lab, experiment, psth
from pipeline import dict_to_hash
# ================== DEFINE LOOK-UP ==================
# ==================== Project =====================
experiment.Project.insert([('li2015', '', 'doi:10.1038/nature14178'),
('lidaie2016', '', 'doi:10.1038/nature17643')],
skip_duplicates=True)
# ==================== Probe =====================
# Probe - NeuroNexus Silicon Probe
probe = 'A4x8-5mm-100-200-177'
lab.Probe.insert1({'probe': probe,
'probe_type': 'nn_silicon_probe'}, skip_duplicates=True)
lab.Probe.Electrode.insert(({'probe': probe, 'electrode': x} for x in range(1, 33)), skip_duplicates=True)
electrode_group = {'probe': probe, 'electrode_group': 0}
electrode_group_member = [{**electrode_group, 'electrode': chn} for chn in range(1, 33)]
electrode_config_name = 'silicon32' #
electrode_config_hash = dict_to_hash(
{**electrode_group, **{str(idx): k for idx, k in enumerate(electrode_group_member)}})
lab.ElectrodeConfig.insert1({'probe': probe,
'electrode_config_hash': electrode_config_hash,
'electrode_config_name': electrode_config_name}, skip_duplicates=True)
lab.ElectrodeConfig.ElectrodeGroup.insert1({'electrode_config_name': electrode_config_name,
**electrode_group}, skip_duplicates=True)
lab.ElectrodeConfig.Electrode.insert(({'electrode_config_name': electrode_config_name, **member}
for member in electrode_group_member), skip_duplicates=True)
# ==================== Brain Location =====================
brain_locations = [{'brain_location_name': 'left_m2',
'brain_area': 'M2',
'hemisphere': 'left',
'skull_reference': 'Bregma'},
{'brain_location_name': 'right_m2',
'brain_area': 'M2',
'hemisphere': 'right',
'skull_reference': 'Bregma'},
{'brain_location_name': 'both_m2',
'brain_area': 'M2',
'hemisphere': 'both',
'skull_reference': 'Bregma'},
{'brain_location_name': 'left_alm',
'brain_area': 'ALM',
'hemisphere': 'left',
'skull_reference': 'Bregma'},
{'brain_location_name': 'right_alm',
'brain_area': 'ALM',
'hemisphere': 'right',
'skull_reference': 'Bregma'},
{'brain_location_name': 'both_alm',
'brain_area': 'ALM',
'hemisphere': 'both',
'skull_reference': 'Bregma'},
{'brain_location_name': 'left_pons',
'brain_area': 'PONS',
'hemisphere': 'left',
'skull_reference': 'Bregma'},
{'brain_location_name': 'right_pons',
'brain_area': 'PONS',
'hemisphere': 'right',
'skull_reference': 'Bregma'},
{'brain_location_name': 'both_pons',
'brain_area': 'PONS',
'hemisphere': 'both',
'skull_reference': 'Bregma'}]
experiment.BrainLocation.insert(brain_locations, skip_duplicates=True)
# ==================== Photostim Trial Condition =====================
stim_locs = ['left_alm', 'right_alm', 'both_alm']
stim_periods = [None, 'sample', 'early_delay', 'middle_delay']
trial_conditions = []
for loc in stim_locs:
for instruction in (None, 'left', 'right'):
for period, stim_dur in itertools.product(stim_periods, (0.5, 0.8)):
condition = {'trial_condition_name': '_'.join(filter(None, ['all', 'noearlylick', loc,
period, str(stim_dur), 'stim', instruction])),
'trial_condition_func': '_get_trials_include_stim',
'trial_condition_arg': {
**{'_outcome': 'ignore',
'task': 'audio delay',
'task_protocol': 1,
'early_lick': 'no early',
'brain_location_name': loc},
**({'trial_instruction': instruction} if instruction else {'_trial_instruction': 'non-performing'}),
**({'photostim_period': period, 'duration': stim_dur} if period else dict())}}
trial_conditions.append(condition)
psth.TrialCondition.insert_trial_conditions(trial_conditions)
| StarcoderdataPython |
1609076 | <filename>setup.py
import os.path
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import latest
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
tox.cmdline(self.test_args)
sys.exit(1)
setup(
name=latest.__project__,
version=latest.__release__,
description='A LaTeX-oriented template engine.',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
tests_require=[
'tox',
],
cmdclass={
'test': Tox
},
install_requires=[
'pyparsing>=2.2.0',
'pyyaml>=5.0.0',
],
include_package_data=True,
license='MIT',
url='https://github.com/bluephlavio/latest',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='latex template engine',
packages=['latest'],
entry_points={
'console_scripts': ['latest=latest.__main__:main'],
},
)
| StarcoderdataPython |
4882130 | <filename>transpyler/templ_utils.py
import ast
import _ast
def is_const(node):
tree = node.ast
return (
isinstance(tree, _ast.Constant)
or isinstance(tree, _ast.UnaryOp)
and isinstance(tree.operand, _ast.Constant)
)
def get_val(node):
if not is_const(node):
return 'unknown'
return ast.literal_eval(node.ast)
class cmp:
def __init__(self, first, second):
if not(is_const(first) and is_const(second)):
self.max = 'unknown'
self.min = 'unknown'
return
self.max = max(get_val(first), get_val(second))
self.min = min(get_val(first), get_val(second))
utils = {'cmp': cmp, 'get_val': get_val, 'is_const': is_const}
| StarcoderdataPython |
175350 | #! /usr/bin/python3
import tkinter
import tkinter.messagebox as mb
def main():
window = tkinter.Tk()
mb.showinfo("Yo yo title", "Yo yo body")
answer = mb.askquestion("Do you ...", "Do something ?")
if answer == "yes":
print("Ok")
window.mainloop()
if __name__ == '__main__':
main() | StarcoderdataPython |
3278262 | <filename>djaludir/core/tests/models/test_privacy.py
from django.conf import settings
from django.test import TestCase
from djaludir.core.models import Privacy
from djtools.utils.logging import seperator
class CorePrivacyTestCase(TestCase):
fixtures = ['user.json', 'privacy.json']
def setUp(self):
self.cid = settings.TEST_USER_COLLEGE_ID
def test_privacy(self):
print("\n")
print("test privacy ORM data model")
print(seperator())
privacies = Privacy.objects.filter(user__id=self.cid)
print("len = {}".format(len(privacies)))
for privacy in privacies:
print(privacy)
self.assertGreaterEqual(len(privacies), 1)
| StarcoderdataPython |
3340143 | <reponame>Vivek-Kolhe/URL-Shortener
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import path
APP_URL = "https://someurl.com/"
db = SQLAlchemy()
DB_NAME = "database.db"
def create_app():
app = Flask(__name__)
app.config["SECRET_KEY"] = "<PASSWORD>"
app.config["SQLALCHEMY_DATABASE_URI"] = f"sqlite:///{DB_NAME}"
db.init_app(app)
from .views import views
app.register_blueprint(views, url_prefix = "/")
from .models import URL_DB
create_db(app)
return app
def create_db(app):
if not path.exists("website/" + DB_NAME):
db.create_all(app = app)
print("DB Created")
| StarcoderdataPython |
6561003 | from django.db import models
from django_oso.models import AuthorizedModel
class User(models.Model):
username = models.CharField(max_length=255)
is_moderator = models.BooleanField(default=False)
is_banned = models.BooleanField(default=False)
posts = models.ManyToManyField("Post")
class Meta:
app_label = "test_app2"
class Tag(AuthorizedModel):
name = models.CharField(max_length=255)
created_by = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
users = models.ManyToManyField(User)
is_public = models.BooleanField(default=False)
class Meta:
app_label = "test_app2"
class Post(AuthorizedModel):
contents = models.CharField(max_length=255)
access_level = models.CharField(
choices=[(c, c) for c in ["public", "private"]], max_length=7, default="private"
)
created_by = models.ForeignKey(User, on_delete=models.CASCADE)
needs_moderation = models.BooleanField(default=False)
tags = models.ManyToManyField(Tag)
class Meta:
app_label = "test_app2"
| StarcoderdataPython |
1996787 | <reponame>polde-live/interprog1
"""
An Introduction to Interactive Programming in Python (Part 1)
Practice exercises for buttons and input fields # 1.
print_hello
print_goodbye
"""
import simpleguitk as simplegui
def print_hello():
print "Hello"
def print_goodbye():
print "Goodbye"
frame = simplegui.create_frame("Hello and Goodbye", 200, 200)
frame.add_button("Hello", print_hello, 50)
frame.add_button("Goodbye", print_goodbye, 50)
frame.start()
| StarcoderdataPython |
3394650 | from distutils.core import setup
import setuptools
dependencies=[
"setuptools~=57.0.0",
"aiohttp~=3.7.4",
"PyYAML~=5.4.1",
]
setup(
name="chiahub_monitor",
version="0.0.5",
author="<NAME>",
author_email="<EMAIL>",
description="A monitoring utility for chia blockchain",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/yan74/chiahub-monitor",
install_requires=dependencies,
packages=setuptools.find_packages(),
project_urls={
"Bug Tracker": "https://github.com/yan74/chiahub-monitor/issues"
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| StarcoderdataPython |
390262 | # -*- coding: utf-8 -*-
# Generated by Django 1.9b1 on 2015-11-21 13:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('items', '0008_auto_20151121_1244'),
]
operations = [
migrations.CreateModel(
name='Report',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateTimeField(auto_now=True)),
('text', models.TextField()),
('resolved', models.BooleanField(default=False)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='items.Item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
3558705 | import allure
from page_objects.LoginPage import LoginPage
@allure.parent_suite("Проверка тестового магазина opencart")
@allure.suite("Тесты страницы авторизации")
@allure.epic("Проверка магазина на opencart")
@allure.feature("Проверка наличия элементов на странице логина")
@allure.title("Поиск элементов на странице логина")
@allure.description("""Тест проверяет наличие элементов на странице логина""")
@allure.severity(allure.severity_level.CRITICAL)
def test_login_page(browser, pytestconfig):
login_page = LoginPage(browser).open()
login_page.find_input_email()
login_page.find_continue_button()
login_page.find_input_password()
login_page.find_forgotten_password()
login_page.find_login_button()
| StarcoderdataPython |
11327679 | from unittest import TestCase
from flask_jwt_extended import create_access_token
from dimensigon.domain.entities import Step, ActionTemplate, ActionType, Orchestration
from dimensigon.network.auth import HTTPBearerAuth
from dimensigon.web import create_app, db, errors
class TestOrchestration(TestCase):
def setUp(self):
"""Create and configure a new app instance for each test."""
# create the app with common test config
self.app = create_app('test')
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
self.auth = HTTPBearerAuth(create_access_token('00000000-0000-0000-0000-000000000001'))
db.create_all()
self.at = ActionTemplate(name='action', version=1, action_type=ActionType.SHELL, code='code to run',
expected_stdout='expected output', expected_rc=0,
system_kwargs={})
ActionTemplate.set_initial()
def tearDown(self) -> None:
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_orch_creation(self):
o = Orchestration(id="aaaaaaaa-1234-5678-1234-56781234aaa1",
name='Test Orchestration',
version=1,
description='description',
)
db.session.add(o)
db.session.commit()
del o
o = Orchestration.query.get("aaaaaaaa-1234-5678-1234-56781234aaa1")
def test_parameters(self):
o = Orchestration(id=1,
name='Test Orchestration',
version=1,
description='description')
s1 = o.add_step(undo=False, action_template=self.at, parents=[], children=[], stop_on_error=False, id=1)
at = ActionTemplate(name='action', version=1, action_type=ActionType.SHELL, code='code to run',
expected_stdout='expected output', expected_rc=0,
system_kwargs={})
s1.action_template = at
def test_set_dependencies(self):
s1 = Step(orchestration=None, undo=False, stop_on_error=False, action_template=self.at,
id='11111111-2222-3333-4444-555555550001')
s2 = Step(orchestration=None, undo=False, stop_on_error=False, action_template=self.at,
id='11111111-2222-3333-4444-555555550002')
s3 = Step(orchestration=None, undo=False, stop_on_error=False, action_template=self.at,
id='11111111-2222-3333-4444-555555550003')
o = Orchestration('test', 1, id='11111111-2222-3333-4444-555555550004',
description='description test', steps=[s1, s2, s3],
dependencies={s1.id: [s2.id], s2.id: [s3.id], s3.id: []})
self.assertDictEqual(o.dependencies, {s1: [s2], s2: [s3], s3: []})
self.assertListEqual(o.steps, [s1, s2, s3])
with self.assertRaises(ValueError):
o.set_dependencies({s1: [s2], s2: [s3], s3: [4]})
o = Orchestration('test', 1, id='11111111-2222-3333-4444-555555550004',
description='description test', steps=[s1, s2, s3])
self.assertListEqual([s1, s2, s3], o.steps)
o.set_dependencies([(s1, s2), (s2, s3)])
self.assertDictEqual(o.dependencies, {s1: [s2], s2: [s3], s3: []})
# noinspection PyTypeChecker
def test_attributes_and_methods(self):
o = Orchestration(id=1,
name='Test Orchestration',
version=1,
description='description')
s1 = o.add_step(undo=False, action_template=self.at, parents=[], children=[], stop_on_error=False,
id=1)
s2 = o.add_step(undo=True, action_template=self.at, parents=[s1], children=[], stop_on_error=False,
id=2)
with self.assertRaises(errors.ParentUndoError):
o.add_step(undo=False, action_template=self.at, parents=[s2], children=[], stop_on_error=False)
self.assertListEqual(o.steps, [s1, s2])
o2 = Orchestration('dto', 2, id=2)
s21 = o2.add_step(undo=False, action_template=self.at, parents=[], children=[], stop_on_error=False,
id=21)
with self.assertRaises(ValueError):
o.add_parents(s21, [s1])
o.delete_step(s2)
self.assertListEqual(o.steps, [s1])
self.assertListEqual(o.children[s1], [])
s2 = o.add_step(undo=False, action_template=self.at, parents=[s1], children=[], stop_on_error=False, id=2)
s3 = o.add_step(undo=False, action_template=self.at, parents=[s2], children=[], stop_on_error=False, id=3)
s4 = o.add_step(undo=False, action_template=self.at, parents=[s2], children=[], stop_on_error=False, id=4)
s5 = o.add_step(undo=False, action_template=self.at, parents=[s4], children=[], stop_on_error=False, id=5)
s6 = o.add_step(undo=False, action_template=self.at, parents=[s4], children=[], stop_on_error=False, id=6)
s7 = o.add_step(undo=False, action_template=self.at, parents=[s1], children=[s2], stop_on_error=False,
id=7)
self.assertListEqual(o.steps, [s1, s2, s3, s4, s5, s6, s7])
self.assertDictEqual(o.children,
{s1: [s2, s7], s2: [s3, s4], s3: [], s4: [s5, s6], s5: [], s6: [], s7: [s2]})
self.assertDictEqual(o.parents,
{s1: [], s2: [s1, s7], s3: [s2], s4: [s2], s5: [s4], s6: [s4], s7: [s1]})
with self.assertRaises(errors.ChildDoError):
o.add_step(undo=True, action_template=self.at, parents=[s1], children=[s2], stop_on_error=False)
with self.assertRaises(errors.CycleError):
o.add_step(undo=False, action_template=self.at, parents=[s6], children=[s1], stop_on_error=False)
# Check parent functions
o.add_parents(s6, [s2])
self.assertListEqual(o.children[s2], [s3, s4, s6])
o.set_parents(s6, [s3, s4])
self.assertListEqual(o.parents[s6], [s3, s4])
o.delete_parents(s6, [s3, s2])
self.assertListEqual(o.parents[s6], [s4])
# Check children functions
o.add_children(s3, [s6])
self.assertListEqual(o.parents[s6], [s4, s3])
o.delete_children(s4, [s5, s6])
self.assertListEqual(o.parents[s6], [s3])
self.assertListEqual(o.parents[s5], [])
o.set_children(s4, [s5, s6]).set_children(s3, [])
self.assertListEqual([s4], o.parents[s6])
self.assertListEqual([s4], o.parents[s5])
self.assertListEqual([], o.children[s3])
# properties and default values
s = o.add_step(undo=False, action_template=self.at)
self.assertEqual(True, s.stop_on_error)
self.assertEqual('code to run', s.code)
self.assertEqual('expected output', s.expected_stdout)
s.expected_output = 'changed'
self.assertEqual('changed', s.expected_output)
s.expected_output = ''
self.assertEqual('', s.expected_output)
s.expected_output = None
self.assertEqual('expected output', s.expected_stdout)
self.assertEqual(0, s.expected_rc)
s.expected_rc = 2
self.assertEqual(2, s.expected_rc)
s.expected_rc = 0
self.assertEqual(0, s.expected_rc)
s.expected_rc = None
self.assertEqual(0, s.expected_rc)
def test_eq_imp(self):
o1 = Orchestration('dto', 1)
s11 = o1.add_step(False, self.at)
s12 = o1.add_step(False, self.at, parents=[s11])
o2 = Orchestration('dto', 2)
s21 = o2.add_step(False, self.at, )
s22 = o2.add_step(False, self.at, parents=[s21])
self.assertTrue(o1.eq_imp(o2))
self.assertTrue(o2.eq_imp(o1))
s23 = o2.add_step(True, self.at, parents=[s22])
self.assertFalse(o1.eq_imp(o2))
self.assertFalse(o2.eq_imp(o1))
s13 = o1.add_step(True, self.at, parents=[s12])
self.assertTrue(o1.eq_imp(o2))
self.assertTrue(s13.eq_imp(s23))
self.assertTrue(o1.eq_imp(o2))
def test_init_on_load(self):
o = Orchestration(id='aaaaaaaa-1234-5678-1234-aaaaaaaa0001',
name='Test Orchestration',
version=1,
description='description')
s1 = o.add_step(undo=False, action_template=self.at, parents=[], children=[], stop_on_error=False,
id='bbbbbbbb-1234-5678-1234-bbbbbbbb0001')
db.session.add(o)
db.session.commit()
del o
o = Orchestration.query.get('aaaaaaaa-1234-5678-1234-aaaaaaaa0001')
self.assertEqual({s1: []}, o._graph.succ)
s2 = o.add_step(undo=False, action_template=self.at, parents=[], children=[], stop_on_error=False,
id='bbbbbbbb-1234-5678-1234-bbbbbbbb0002')
del o
o = Orchestration.query.get('aaaaaaaa-1234-5678-1234-aaaaaaaa0001')
self.assertEqual({s1: [], s2: []}, o._graph.succ)
def test_schema(self):
self.maxDiff = None
o = Orchestration('Schema Orch', 1)
s1 = o.add_step(id=1, action_type=ActionType.SHELL, undo=False,
schema={'input': {'1_a': {},
'1_b': {}},
'required': ['1_b'],
'output': ['1_c']})
s2 = o.add_step(undo=False, action_type=ActionType.SHELL, parents=[s1],
schema={'input': {'2_a': {}},
'required': ['2_a'],
'mapping': {'2_a': {'from': '1_c'}}})
self.assertDictEqual({'input': {'1_a': {},
'1_b': {}},
'required': ['input.1_b'],
'output': ['1_c']}, o.schema)
o = Orchestration('Schema Orch', 1, id='00000000-0000-0000-0000-000000000001')
s1 = o.add_step(id=1, action_type=ActionType.SHELL, undo=False,
schema={'input': {'1_a': {},
'1_b': {}},
'required': ['1_b'],
'output': ['1_c']})
s2 = o.add_step(undo=False, action_type=ActionType.SHELL,
schema={'input': {'2_a': {}},
'required': ['2_a'],
'output': ['2_b']})
s3 = o.add_step(undo=False, action_type=ActionType.SHELL, parents=[s1],
schema={'input': {'3_a': {},
'3_b': {}},
'required': ['3_a'],
'mapping': {'3_a': {'from': '2_b'}}})
self.assertDictEqual({'input': {'1_a': {},
'1_b': {},
'2_a': {},
'3_b': {}},
'required': ['input.1_b', 'input.2_a'],
'output': ['1_c', '2_b']}, o.schema)
db.session.add(o)
o2 = Orchestration('Schema Orch', 1)
at = ActionTemplate.query.filter_by(name='orchestration', version=1).one()
s1 = o2.add_step(id=1, action_template=at,
undo=False,
schema={'mapping': {'orchestration': o.id}})
s2 = o2.add_step(undo=False, action_type=1, parents=[s1],
schema={'input': {'1': {},
'2': {}},
'required': ['1', '2'],
'mapping': {'1': {'from': '1_c'},
'2': {'from': '5'}}})
with self.assertRaises(errors.MappingError):
self.assertDictEqual({'input': {'hosts': at.schema['input']['hosts'],
'1_a': {},
'1_b': {},
'2_a': {},
'3_b': {}},
'required': ['input.1_b', 'input.2_a', 'input.hosts'],
'output': ['1_c', '2_b']}, o2.schema)
s2.schema = {'input': {'1': {},
'2': {}},
'required': ['1'],
'mapping': {'1': {'from': 'env.server_id'},
'2': {'from': '5'}}}
self.assertDictEqual({'input': {'hosts': at.schema['input']['hosts'],
'version': {'type': 'integer'}, # from ActionTemplate
'1_a': {},
'1_b': {},
'2_a': {},
'3_b': {}},
'required': ['input.1_b', 'input.2_a', 'input.hosts'],
'output': ['1_c', '2_b']}, o2.schema)
# test a container required in step
o = Orchestration('Schema Orch', 1)
s1 = o.add_step(id=1, action_type=ActionType.SHELL, undo=False,
schema={'container': {'1_a': {}},
'required': ['container.1_a']})
s2 = o.add_step(id=1, action_type=ActionType.SHELL, undo=False,
schema={'input': {'1_b': {}},
'mapping': {'1_b': {'from': 'container.foo'}},
'required': ['1_b']}, parents=[s1])
self.assertDictEqual({'container': {'1_a': {}},
'required': ['container.1_a', 'container.foo']}, o.schema)
| StarcoderdataPython |
6488447 | '''
https://www.youtube.com/watch?v=PNj8uEdd5c0
'''
import itertools
from contextlib import closing
from os import PathLike
import sqlite3
from typing import Tuple, Iterable
from dataclasses import dataclass
from kivy.app import App
from kivy.properties import ObjectProperty
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.factory import Factory as F
import asynckivy as ak
from kivy_garden.draggable import KXDraggableBehavior
KV_CODE = r'''
<SHLabel@Label,SHButton@Button>:
size_hint_min: [v + dp(8) for v in self.texture_size]
<SHFood>:
orientation: 'vertical'
spacing: '4dp'
drag_timeout: 0
drag_cls: 'food'
size: '200dp', '200dp'
size_hint: None, None
opacity: .5 if self.is_being_dragged else 1.
canvas.before:
Color:
rgba: .4, .4, .4, 1
Line:
rectangle: (*self.pos, *self.size, )
Image:
allow_stretch: True
texture: root.datum.texture
size_hint_y: 3.
SHLabel:
text: '{} ({} yen)'.format(root.datum.name, root.datum.price)
<SHShelf@KXReorderableBehavior+RVLikeBehavior+StackLayout>:
padding: '10dp'
spacing: '10dp'
size_hint_min_y: self.minimum_height
drag_classes: ['food', ]
viewclass: 'SHFood'
<SHMain>:
orientation: 'vertical'
padding: '10dp'
spacing: '10dp'
SpecialBoxLayout:
BoxLayout:
orientation: 'vertical'
SHLabel:
text: 'Shelf'
font_size: max(20, sp(16))
bold: True
color: rgba("#44AA44")
ScrollView:
size_hint_y: 1000.
always_overscroll: False
SHShelf:
id: shelf
Splitter:
sizable_from: 'left'
min_size: 100
max_size: root.width
BoxLayout:
orientation: 'vertical'
SHLabel:
text: 'Your Shopping Cart'
font_size: max(20, sp(16))
bold: True
color: rgba("#4466FF")
ScrollView:
size_hint_y: 1000.
always_overscroll: False
SHShelf:
id: cart
BoxLayout:
size_hint_y: None
height: self.minimum_height
SHButton:
text: 'sort by price\n(ascend)'
on_press: shelf.data = sorted(shelf.data, key=lambda d: d.price)
SHButton:
text: 'sort by price\n(descend)'
on_press: shelf.data = sorted(shelf.data, key=lambda d: d.price, reverse=True)
Widget:
SHButton:
text: 'total price'
on_press: root.show_total_price()
SHButton:
text: 'sort by price\n(ascend)'
on_press: cart.data = sorted(cart.data, key=lambda d: d.price)
SHButton:
text: 'sort by price\n(descend)'
on_press: cart.data = sorted(cart.data, key=lambda d: d.price, reverse=True)
'''
@dataclass
class Food:
name: str = ''
price: int = 0
texture: F.Texture = None
class ShoppingApp(App):
def build(self):
Builder.load_string(KV_CODE)
return SHMain()
def on_start(self):
ak.start(self.root.main(db_path=__file__ + r".sqlite3"))
class SHMain(F.BoxLayout):
def show_total_price(self, *, _cache={}):
popup = _cache.get('popup', None)
if popup is None:
_cache['popup'] = popup = F.Popup(
size_hint=(.5, .2, ),
title='Total',
content=F.Label(),
)
total_price = sum(d.price for d in self.ids.cart.data)
popup.content.text = f"{total_price} yen"
popup.open()
async def main(self, db_path: PathLike):
from random import randint
from io import BytesIO
from kivy.core.image import Image as CoreImage
conn = await self._load_database(db_path)
with closing(conn.cursor()) as cur:
self.food_textures = textures = {
name: CoreImage(BytesIO(image_data), ext='png').texture
for name, image_data in cur.execute("SELECT name, image FROM Foods")
}
self.ids.shelf.data = [
Food(name=name, price=price, texture=textures[name])
for name, price in cur.execute("SELECT name, price FROM Foods")
for __ in range(randint(2, 4))
]
@staticmethod
async def _load_database(db_path: PathLike) -> sqlite3.Connection:
from os.path import exists
already_exists = exists(db_path)
conn = sqlite3.connect(db_path)
conn.execute("PRAGMA foreign_keys = ON")
if not already_exists:
try:
await SHMain._init_database(conn)
except Exception:
from os import remove
remove(db_path)
raise
else:
conn.commit()
return conn
@staticmethod
async def _init_database(conn: sqlite3.Connection):
from concurrent.futures import ThreadPoolExecutor
import requests
import asynckivy as ak
with closing(conn.cursor()) as cur:
cur.executescript("""
CREATE TABLE Foods (
name TEXT NOT NULL UNIQUE,
price INT NOT NULL,
image_url TEXT NOT NULL,
image BLOB DEFAULT NULL,
PRIMARY KEY (name)
);
INSERT INTO Foods(name, price, image_url) VALUES
('blueberry', 500, 'https://3.bp.blogspot.com/-RVk4JCU_K2M/UvTd-IhzTvI/AAAAAAAAdhY/VMzFjXNoRi8/s180-c/fruit_blueberry.png'),
('cacao', 800, 'https://3.bp.blogspot.com/-WT_RsvpvAhc/VPQT6ngLlmI/AAAAAAAAsEA/aDIU_F9TYc8/s180-c/fruit_cacao_kakao.png'),
('dragon fruit', 1200, 'https://1.bp.blogspot.com/-hATAhM4UmCY/VGLLK4mVWYI/AAAAAAAAou4/-sW2fvsEnN0/s180-c/fruit_dragonfruit.png'),
('kiwi', 130, 'https://2.bp.blogspot.com/-Y8xgv2nvwEs/WCdtGij7aTI/AAAAAAAA_fo/PBXfb8zCiQAZ8rRMx-DNclQvOHBbQkQEwCLcB/s180-c/fruit_kiwi_green.png'),
('lemon', 200, 'https://2.bp.blogspot.com/-UqVL2dBOyMc/WxvKDt8MQbI/AAAAAAABMmk/qHrz-vwCKo8okZsZpZVDsHLsKFXdI1BjgCLcBGAs/s180-c/fruit_lemon_tategiri.png'),
('mangosteen', 300, 'https://4.bp.blogspot.com/-tc72dGzUpww/WGYjEAwIauI/AAAAAAABAv8/xKvtWmqeKFcro6otVdLi5FFF7EoVxXiEwCLcB/s180-c/fruit_mangosteen.png'),
('apple', 150, 'https://4.bp.blogspot.com/-uY6ko43-ABE/VD3RiIglszI/AAAAAAAAoEA/kI39usefO44/s180-c/fruit_ringo.png'),
('orange', 100, 'https://1.bp.blogspot.com/-fCrHtwXvM6w/Vq89A_TvuzI/AAAAAAAA3kE/fLOFjPDSRn8/s180-c/fruit_slice10_orange.png'),
('soldum', 400, 'https://2.bp.blogspot.com/-FtWOiJkueNA/WK7e09oIUyI/AAAAAAABB_A/ry22yAU3W9sbofMUmA5-nn3D45ix_Y5RwCLcB/s180-c/fruit_soldum.png'),
('corn', 50, 'https://1.bp.blogspot.com/-RAJBy7nx2Ro/XkZdTINEtOI/AAAAAAABXWE/x8Sbcghba9UzR8Ppafozi4_cdmD1pawowCNcBGAsYHQ/s180-c/vegetable_toumorokoshi_corn_wagiri.png'),
('aloe', 400, 'https://4.bp.blogspot.com/-v7OAB-ULlrs/VVGVQ1FCjxI/AAAAAAAAtjg/H09xS1Nf9_A/s180-c/plant_aloe_kaniku.png');
""")
# download images
with ThreadPoolExecutor() as executer:
with requests.Session() as session: # The Session object may not be thread-safe so it's probably better not to use it...
async def download_one_image(name, image_url) -> Tuple[bytes, str]:
image = await ak.run_in_executer(lambda: session.get(image_url).content, executer)
return (image, name)
tasks = await ak.and_from_iterable(
download_one_image(name, image_url)
for name, image_url in cur.execute("SELECT name, image_url FROM Foods")
)
# save images
cur.executemany(
"UPDATE Foods SET image = ? WHERE name = ?",
(task.result for task in tasks),
)
class SHFood(KXDraggableBehavior, F.BoxLayout):
datum = ObjectProperty(Food(), rebind=True)
class SpecialBoxLayout(F.BoxLayout):
'''Always dispatches touch events to all the children'''
def on_touch_down(self, touch):
return any([c.dispatch('on_touch_down', touch) for c in self.children])
def on_touch_move(self, touch):
return any([c.dispatch('on_touch_move', touch) for c in self.children])
def on_touch_up(self, touch):
return any([c.dispatch('on_touch_up', touch) for c in self.children])
class RVLikeBehavior:
'''Mix-in class that adds RecyclewView-like interface to layouts. But
unlike RecycleView, this one creates view widgets as much as the number
of the data.
'''
viewclass = ObjectProperty()
'''widget-class or its name'''
def __init__(self, **kwargs):
self._rv_refresh_params = {}
self._rv_trigger_refresh = Clock.create_trigger(self._rv_refresh, -1)
super().__init__(**kwargs)
def on_viewclass(self, *args):
self._rv_refresh_params['viewclass'] = None
self._rv_trigger_refresh()
def _get_data(self) -> Iterable:
data = self._rv_refresh_params.get('data')
return [c.datum for c in reversed(self.children)] if data is None else data
def _set_data(self, new_data: Iterable):
self._rv_refresh_params['data'] = new_data
self._rv_trigger_refresh()
data = property(_get_data, _set_data)
def _rv_refresh(self, *args):
viewclass = self.viewclass
if not viewclass:
self.clear_widgets()
return
data = self.data
params = self._rv_refresh_params
reusable_widgets = '' if 'viewclass' in params else self.children[::-1]
self.clear_widgets()
if isinstance(viewclass, str):
viewclass = F.get(viewclass)
for datum, w in zip(data, itertools.chain(reusable_widgets, iter(viewclass, None))):
w.datum = datum
self.add_widget(w)
params.clear()
F.register('RVLikeBehavior', cls=RVLikeBehavior)
if __name__ == '__main__':
ShoppingApp().run()
| StarcoderdataPython |
11307646 | """ Entity Class
This file contains the entity class, and is a basis for all
objects within the project.
Methods:
- attack()
Handles the attack action of an entity.
- edit_hp()
Edits the hp of the entity.
Author: <NAME>
"""
# Import/s
import math
from DieClass import Die
class Entity:
def __init__(self, stats = ["Entity", 10, 10, 10]):
self.name = stats[0] # Name
# Stats
self.str = stats[1]
self.dex = stats[2]
self.int = stats[3]
# Status Array
self.buff = []
self.debuff = []
# Determine the entity's highest statistic.
stats = stats[1 : ]
highest_stat = max(stats)
self.prof = [stats[i] for i in range(len(stats)) if stats[i] == highest_stat][0]
# Calculate Health
self.hp = (self.str * 10)
self.max_hp = self.hp
# Calculate Armor
self.armor = min((self.dex // 2), 15)
def attack(self): # Basic attack.
"""
This function handles the attack action of an entity.
- param: None
- return: int damage
"""
die = Die() # Create Die
idx = 2
# Roll to hit.
roll = die.roll()
# Roll for damage.
type = min(self.prof // 6, 5) # Determine the type of die, based on the main stat of the entity.
rolls = max(self.prof // 6, 1) # Determine the number of rolls for the type of die.
damage = die.roll(rolls, type)
if roll == 1: # Fumble
damage = 0
idx = 0
elif roll == 20: # Critical hit
damage = damage * 2
idx = 1
# Display
disp_str = ["Fumble! ", "Critical Hit! ", ""]
print(f"{disp_str[idx]}{self.name} deals {damage} damage!(rolled a {roll})")
return damage
def edit_hp(self, value):
"""
This function changes the hp value of the entity.
- param: int value
- return: None
"""
new_hp = self.hp + value
if new_hp <= 0:
self.hp = 0
elif new_hp >= self.max_hp:
self.hp = self.max_hp
else:
self.hp = new_hp
def edit_status(self, damage):
"""
This function handles statuses the entity has.
- param: str type
- param: int damage
- return: int damage
"""
if not self.buff == []: # Check there exist buff
operator = self.buff[0][0] # Determine the operation
if operator == "*":
damage = damage * self.buff[0][1] # Edit damage value
elif operator == "+":
damage = damage + self.buff[0][1]
# Display
print(self.buff[0][2])
damage = math.ceil(damage)
self.buff.pop(0) # Remove buff
return damage | StarcoderdataPython |
4970491 | <filename>tests/core/inst/mem/inst_lw.py
#=========================================================================
# lwu
#=========================================================================
import random
from pymtl import *
from tests.context import lizard
from tests.core.inst_utils import *
#-------------------------------------------------------------------------
# gen_basic_test
#-------------------------------------------------------------------------
def gen_basic_test():
return """
csrr x1, mngr2proc < 0x00002000
nop
nop
nop
nop
nop
nop
nop
nop
lwu x2, 0(x1)
nop
nop
nop
nop
nop
nop
nop
nop
csrw proc2mngr, x2 > 0x01020304
.data
.word 0x01020304
"""
#-------------------------------------------------------------------------
# gen_dest_dep_test
#-------------------------------------------------------------------------
def gen_dest_dep_test():
return [
gen_ld_dest_dep_test(5, "lwu", 0x2000, 0x00010203),
gen_ld_dest_dep_test(4, "lwu", 0x2004, 0x04050607),
gen_ld_dest_dep_test(3, "lwu", 0x2008, 0x08090a0b),
gen_ld_dest_dep_test(2, "lwu", 0x200c, 0x0c0d0e0f),
gen_ld_dest_dep_test(1, "lwu", 0x2010, 0x10111213),
gen_ld_dest_dep_test(0, "lwu", 0x2014, 0x14151617),
gen_word_data([
0x00010203,
0x04050607,
0x08090a0b,
0x0c0d0e0f,
0x10111213,
0x14151617,
])
]
#-------------------------------------------------------------------------
# gen_base_dep_test
#-------------------------------------------------------------------------
def gen_base_dep_test():
return [
gen_ld_base_dep_test(5, "lwu", 0x2000, 0x00010203),
gen_ld_base_dep_test(4, "lwu", 0x2004, 0x04050607),
gen_ld_base_dep_test(3, "lwu", 0x2008, 0x08090a0b),
gen_ld_base_dep_test(2, "lwu", 0x200c, 0x0c0d0e0f),
gen_ld_base_dep_test(1, "lwu", 0x2010, 0x10111213),
gen_ld_base_dep_test(0, "lwu", 0x2014, 0x14151617),
gen_word_data([
0x00010203,
0x04050607,
0x08090a0b,
0x0c0d0e0f,
0x10111213,
0x14151617,
])
]
#-------------------------------------------------------------------------
# gen_srcs_dest_test
#-------------------------------------------------------------------------
def gen_srcs_dest_test():
return [
gen_ld_base_eq_dest_test("lwu", 0x2000, 0x01020304),
gen_word_data([0x01020304])
]
#-------------------------------------------------------------------------
# gen_value_test
#-------------------------------------------------------------------------
def gen_value_test():
return [
# Test positive offsets
gen_ld_value_test("lwu", 0, 0x00002000, 0xdeadbeef),
gen_ld_value_test("lwu", 4, 0x00002000, 0x00010203),
gen_ld_value_test("lwu", 8, 0x00002000, 0x04050607),
gen_ld_value_test("lwu", 12, 0x00002000, 0x08090a0b),
gen_ld_value_test("lwu", 16, 0x00002000, 0x0c0d0e0f),
gen_ld_value_test("lwu", 20, 0x00002000, 0xcafecafe),
# Test negative offsets
gen_ld_value_test("lwu", -20, 0x00002014, 0xdeadbeef),
gen_ld_value_test("lwu", -16, 0x00002014, 0x00010203),
gen_ld_value_test("lwu", -12, 0x00002014, 0x04050607),
gen_ld_value_test("lwu", -8, 0x00002014, 0x08090a0b),
gen_ld_value_test("lwu", -4, 0x00002014, 0x0c0d0e0f),
gen_ld_value_test("lwu", 0, 0x00002014, 0xcafecafe),
# Test positive offset with unaligned base
gen_ld_value_test("lwu", 1, 0x00001fff, 0xdeadbeef),
gen_ld_value_test("lwu", 5, 0x00001fff, 0x00010203),
gen_ld_value_test("lwu", 9, 0x00001fff, 0x04050607),
gen_ld_value_test("lwu", 13, 0x00001fff, 0x08090a0b),
gen_ld_value_test("lwu", 17, 0x00001fff, 0x0c0d0e0f),
gen_ld_value_test("lwu", 21, 0x00001fff, 0xcafecafe),
# Test negative offset with unaligned base
gen_ld_value_test("lwu", -21, 0x00002015, 0xdeadbeef),
gen_ld_value_test("lwu", -17, 0x00002015, 0x00010203),
gen_ld_value_test("lwu", -13, 0x00002015, 0x04050607),
gen_ld_value_test("lwu", -9, 0x00002015, 0x08090a0b),
gen_ld_value_test("lwu", -5, 0x00002015, 0x0c0d0e0f),
gen_ld_value_test("lwu", -1, 0x00002015, 0xcafecafe),
gen_word_data([
0xdeadbeef,
0x00010203,
0x04050607,
0x08090a0b,
0x0c0d0e0f,
0xcafecafe,
])
]
#-------------------------------------------------------------------------
# gen_random_test
#-------------------------------------------------------------------------
def gen_random_test():
# Generate some random data
data = []
for i in xrange(128):
data.append(random.randint(0, 0xffffffff))
# Generate random accesses to this data
asm_code = []
for i in xrange(100):
a = random.randint(0, 127)
b = random.randint(0, 127)
base = Bits(32, 0x2000 + (4 * b))
offset = Bits(16, (4 * (a - b)))
result = data[a]
asm_code.append(gen_ld_value_test("lwu", offset.int(), base.uint(), result))
# Add the data to the end of the assembly code
asm_code.append(gen_word_data(data))
return asm_code
# specific test
def gen_stall_add_test():
return """
csrr x1, mngr2proc < 0x00002000
csrr x2, mngr2proc < 0x00002004
lwu x6, 0(x1)
lwu x7, 0(x2)
add x8, x6, x7
csrw proc2mngr, x8 > 0x00000009
.data
.word 0x00000004
.word 0x00000005
"""
# specific test
# def gen_stall_mul_test():
# return """
# csrr x1, mngr2proc < 0x00002000
# csrr x2, mngr2proc < 0x00002004
# lwu x6, 0(x1)
# lwu x7, 0(x2)
# mul x8, x6, x7
# csrw proc2mngr, x8 > 0x00000014
#
# .data
# .word 0x00000004
# .word 0x00000005
# """
| StarcoderdataPython |
189242 | #!/usr/bin/env python
# coding: utf-8
from django.contrib.auth.hashers import make_password, check_password
from django.db.models import CharField
from django.utils import six
from django.utils.encoding import smart_text
class PasswordFieldDescriptor(object):
def __init__(self):
self.value = None
def __eq__(self, other):
def setter(raw_password):
self.value = make_password(raw_password)
return check_password(other, self.value, setter=setter)
def __set__(self, instance, value):
if instance.password.value != value:
self.value = make_password(value)
def __str__(self):
return smart_text(self.value)
class PasswordField(CharField):
def __init__(self, *args, **kwargs):
max_length = kwargs.pop('max_length', 512)
super(PasswordField, self).__init__(max_length=max_length, *args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super(PasswordField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, name, PasswordFieldDescriptor())
def to_python(self, value):
if isinstance(value, six.string_types) or value is None:
return value
return smart_text(value.value)
| StarcoderdataPython |
6692783 | from wing import Wing
from engine import Engine
from payload import Payload
__all__ = ('Wing', 'Engine', 'Payload')
| StarcoderdataPython |
276439 | answer1 = widget_inputs["radio1"]
answer2 = widget_inputs["radio2"]
answer3 = widget_inputs["radio3"]
answer4 = widget_inputs["radio4"]
is_correct = False
comments = []
def commentizer(new):
if new not in comments:
comments.append(new)
if answer1 == True:
is_correct = True
else:
is_correct = is_correct and False
commentizer("Check the first one. Remember, an SVG animation will animate the rotation of an image, as opposed to a gif which is a series of raster images displayed one after another.")
if answer4 == True:
is_correct = is_correct and True
else:
is_correct = is_correct and False
commentizer("Check the second one. Will the image be reused? If so, an external file probably makes more sense.")
if is_correct:
commentizer("Great job!")
commentizer(" I love the internet :)")
grade_result["comment"] = "\n\n".join(comments)
grade_result["correct"] = is_correct | StarcoderdataPython |
212754 | # encoding: utf-8
"""The recommended development HTTP server."""
# ## Imports
from __future__ import unicode_literals, print_function
try:
from waitress import serve as serve_
except ImportError:
print("You must install the 'waitress' package.")
raise
# ## Server Adapter
def serve(application, host='127.0.0.1', port=8080, threads=4, **kw):
"""The recommended development HTTP server.
Note that this server performs additional buffering and will not honour chunked encoding breaks.
"""
# Bind and start the server; this is a blocking process.
serve_(application, host=host, port=int(port), threads=int(threads), **kw)
| StarcoderdataPython |
9625635 | """ File handling helper functions """
import os
import fnmatch
import tarfile
import warnings
def get_author():
try:
import platform
CURRENTOS = platform.system()
if CURRENTOS == "Windows":
import getpass
author = getpass.getuser()
else:
import pwd
author = pwd.getpwuid(os.getuid())[4]
except:
author = "unknown"
warnings.warn("Author could not be resolved.")
return author
def create_directory(path):
""" Create the given directory path recursively """
parts = path.split(os.sep)
subpath = ""
for part in parts[1:]:
subpath += os.sep + part
if not os.path.exists(subpath):
try:
os.mkdir(subpath)
except OSError as (err_no, strerr):
import errno
# os.path.exists isn't secure on gpfs!
if not err_no == errno.EEXIST:
raise
def common_path(path1, path2, common=[]):
""" Compute the common part of two paths *path1* and *path2* """
if len(path1) < 1:
return (common, path1, path2)
if len(path2) < 1:
return (common, path1, path2)
if path1[0] != path2[0]:
return (common, path1, path2)
return common_path(path1[1:], path2[1:], common + [path1[0]])
def get_relative_path(path1, path2):
""" Return the relative path of *path1* to *path2* """
(common,l1,l2) = common_path(path1.split(os.path.sep),
path2.split(os.path.sep),)
p = []
if len(l1) > 0:
p = [os.pardir + os.sep for i in range(len(l1)-1)]
p = p + l2
return os.path.join( *p )
def locate(pattern, root=os.curdir):
""" Locate all files matching pattern in root directory.
Locate all files matching supplied filename pattern in and below
the supplied root directory.
**Parameters**
:pattern:
The pattern (regular expression) the files, that are selected, must match
:root:
The root directory of the directory tree in which files are searched
:Source: http://code.activestate.com/recipes/499305/
.. todo:: transfer to file handling?
"""
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def create_source_archive(archive_path, packages=["pySPACE"],
patterns=["*.py", "*.yaml"]):
""" Store the source code of important packages
Locates all files in the directory structure of the given :packages:
that match the given :patterns:. Add these files to an archive that is
stored in :archive_path:.
:Author: <NAME> (<EMAIL>)
:Created: 2010/08/12
"""
# Create archive file
archive_file = tarfile.open(archive_path + os.sep + "source_code_archive.tbz",
"w:bz2")
# Find all packages the directories in which they are located
package_root_dirs = [__import__(package).__path__[0] + os.sep + os.pardir
for package in packages]
orig_dir = os.curdir
for package, package_root_dir in zip(packages, package_root_dirs):
os.chdir(package_root_dir)
# Find all files in the package that match one of the patterns and add
# them to the archive.
for pattern in patterns:
for file in locate(pattern, package):
archive_file.add(file)
archive_file.close()
os.chdir(orig_dir) | StarcoderdataPython |
4955349 | <reponame>volundmush/mudstring-python
from rich.style import Style
from .base import ProtoStyle
from .colors import COLORS
from typing import Union, Tuple, List
from rich.text import Text, Span
from rich.color import Color
import html
import re
from enum import IntFlag, IntEnum
from xml.etree import ElementTree
ANSI_SECTION_MATCH = {
"letters": re.compile(r"^(?P<data>[a-z ]+)\b", flags=re.IGNORECASE),
"numbers": re.compile(r"^(?P<data>\d+)\b"),
"rgb": re.compile(
r"^<(?P<red>\d{1,3})\s+(?P<green>\d{1,3})\s+(?P<blue>\d{1,3})>(\b)?"
),
"hex1": re.compile(r"^#(?P<data>[0-9A-F]{6})\b", flags=re.IGNORECASE),
"hex2": re.compile(r"^<#(?P<data>[0-9A-F]{6})>(\b)?", flags=re.IGNORECASE),
"name": re.compile(r"^\+(?P<data>\w+)\b", flags=re.IGNORECASE),
}
STYLE_REVERSE = {1: "h", 2: "i", 4: "f", 8: "u"}
class StyleMap(IntFlag):
BOLD = 1
INVERSE = 2
FLASH = 4
UNDERLINE = 8
class BgMode(IntEnum):
NONE = 0
FG = 1
BG = 2
CHAR_MAP = {"f": "flash", "h": "bold", "i": "reverse", "u": "underline"}
BASE_COLOR_MAP = {
"d": -1,
"x": 0,
"r": 1,
"g": 2,
"y": 3,
"b": 4,
"m": 5,
"c": 6,
"w": 7,
}
BASE_COLOR_REVERSE = {v: k for k, v in BASE_COLOR_MAP.items()}
def _process_ground(
codes: str, bg: bool = False
) -> Tuple[str, Tuple[str, BgMode, object, object]]:
matched = False
ground = BgMode.BG if bg else BgMode.FG
for k, v in ANSI_SECTION_MATCH.items():
if k == "letters" and ground == BgMode.BG:
# Letters are not allowed immediately following a /
continue
if (match := v.match(codes)) :
codes = codes[match.end() :]
matched = True
if k == "letters" and ground != BgMode.BG:
# Letters are not allowed immediately following a /
return codes, (
k,
BgMode.NONE,
match.groupdict()["data"],
match.group(0),
)
if k == "numbers":
data = match.groupdict()["data"]
number = abs(int(data))
if number > 255 or number < 0:
raise ValueError(match.group(0))
return codes, (k, ground, number, match.group(0))
if k == "name":
return codes, (
k,
ground,
match.groupdict()["data"].lower(),
match.group(0),
)
elif k in ("hex1", "hex2"):
data = match.groupdict()["data"]
out = {
"red": int(data[0:2], 16),
"green": int(data[2:4], 16),
"blue": int(data[4:6], 16),
}
return codes, (k, ground, out, match.group(0))
elif k == "rgb":
data = match.groupdict()
# hex = f"#{int(data['red']):2X}{int(data['green']):2X}{int(data['blue']):2X}"
data = {k: int(v) for k, v in data.items()}
print(f"DATA IS: {data}")
return codes, (k, ground, data, match.group(0))
if not matched:
raise ValueError(codes)
def separate_codes(codes: str, errors: str = "strict"):
codes = " ".join(codes.split())
while len(codes):
if codes[0] in ("/", "!"):
codes = codes[1:]
if not len(codes):
# if there's nothing after a / then we just break.
break
if codes[0].isspace():
codes = codes[1:]
# if a space immediately follows a / , then it is treated as no color.
# it will be ignored.
continue
elif codes[0] in ("/", "!"):
continue
else:
remaining, result = _process_ground(codes, True)
codes = remaining
yield result
elif codes[0].isspace():
codes = codes[1:]
continue
else:
remaining, result = _process_ground(codes, False)
codes = remaining
yield result
matched = False
def test_separate(codes: str):
for code in separate_codes(codes):
print(code)
def apply_color_rule(mark: ProtoStyle, rule_tuple):
mode, g, data, original = rule_tuple
if mode == "letters":
for c in data:
if c == "n":
# ANSI reset
mark.do_reset()
continue
if (bit := CHAR_MAP.get(c, None)) :
setattr(mark, bit, True)
elif (bit := CHAR_MAP.get(c.lower(), None)) :
setattr(mark, bit, False)
elif (code := BASE_COLOR_MAP.get(c, None)) :
setattr(mark, "color", Color.from_ansi(code))
elif (code := BASE_COLOR_MAP.get(c.lower(), None)) :
setattr(mark, "bgcolor", Color.from_ansi(code))
else:
pass # I dunno what we got passed, but it ain't relevant.
elif g == BgMode.FG:
if mode == "numbers":
setattr(mark, "color", Color.from_ansi(data))
elif mode == "name":
if (found := COLORS.get(data)) :
setattr(mark, "color", Color.from_ansi(found["xterm"]))
elif mode in ("rgb", "hex1", "hex2"):
setattr(
mark, "color", Color.from_rgb(data["red"], data["green"], data["blue"])
)
elif g == BgMode.BG:
if mode == "numbers":
setattr(mark, "bgcolor", Color.from_ansi(data))
elif mode == "name":
if (found := COLORS.get(data)) :
setattr(mark, "bgcolor", Color.from_ansi(found["xterm"]))
elif mode in ("rgb", "hex1", "hex2"):
setattr(
mark,
"bgcolor",
Color.from_rgb(data["red"], data["green"], data["blue"]),
)
def apply_rules(mark: ProtoStyle, rules: str):
for res in separate_codes(rules):
apply_color_rule(mark, res)
def apply_mxp(mark: ProtoStyle, rules: str):
if " " in rules:
before, after = rules.split(" ", 1)
x = f"<{before} {after}></{before}>"
else:
x = f"<{rules}></{rules}>"
elem = ElementTree.fromstring(x)
mark.tag = elem.tag
mark.xml_attr = elem.attrib
def serialize_colors(s: Style) -> str:
if c.reset:
return "n"
output = ""
for k, v in STYLE_REVERSE.items():
if k & c.bits:
output += v
if k & c.off_bits:
output += v.upper()
if c.fg_mode == 0:
output += BASE_COLOR_REVERSE.get(c.fg_color)
if c.bg_mode == 0:
output += BASE_COLOR_REVERSE.get(c.bg_color).upper()
letters = bool(output)
if c.fg_mode == 1:
if letters:
output += " "
output += str(c.fg_color)
if c.bg_mode == 1:
output += f"/{c.bg_color}"
return output
def enter_tag(s: Style) -> str:
if isinstance(m, ColorMarkup):
return f"{TAG_START}c{serialize_colors(m)}{TAG_END}"
elif isinstance(m, MXPMarkup):
if m.attributes:
attrs = " ".join(
[f'{k}="{html.escape(v)}"' for k, v in m.attributes.items()]
)
return f"{TAG_START}p{m.tag} {attrs}{TAG_END}"
else:
return f"{TAG_START}p{m.tag}{TAG_END}"
else:
return ""
def exit_tag(s: Style) -> str:
if isinstance(m, ColorMarkup):
return f"{TAG_START}c/{TAG_END}"
elif isinstance(m, MXPMarkup):
return f"{TAG_START}p/{TAG_END}"
else:
return ""
def encode(mstring: Text, errors: str = "strict") -> str:
output = ""
tag_stack = list()
cur = None
for i, span in enumerate(mstring.spans):
if isinstance(span.style, Style):
if cur:
# we are already inside of a markup!
if span.style is cur:
pass
else:
# moving to a different markup.
if span.style.parent is cur:
# we moved into a child.
output += enter_tag(span.style)
tag_stack.append(span.style)
else:
# We left a tag and are moving into another kind of tag. It might be a parent, an ancestor,
# or completely unrelated. Let's find out which, first!
ancestors = cur.ancestors(reversed=True)
idx = None
if span.style in tag_stack:
# We need to close out of the ancestors we no longer have. A slice accomplishes that.
tags_we_left = ancestors[tag_stack.index(span.style) :]
for i in range(len(tags_we_left) - 1):
tag_stack.pop(-1)
# now that we know what to leave, let's leave them.
for tag in reversed(tags_we_left):
output += exit_tag(tag)
else:
# it's not an ancestor at all, so close out of everything and rebuild.
for tag in reversed(tag_stack):
output += exit_tag(tag)
tag_stack.clear()
# Now to enter the new tag...
for ancestor in span.style.ancestors(reversed=True):
output += enter_tag(ancestor)
tag_stack.append(ancestor)
output += enter_tag(span.style)
tag_stack.append(span.style)
cur = span.style
else:
# We are not inside of a markup tag. Well, that changes now.
cur = span.style
for ancestor in span.style.ancestors(reversed=True):
tag_stack.append(ancestor)
tag_stack.append(span.style)
for tag in tag_stack:
output += enter_tag(tag)
else:
# we are moving into a None markup...
if cur:
for tag in reversed(tag_stack):
output += exit_tag(tag)
tag_stack.clear()
cur = None
else:
# from no markup to no markup. Just append the character.
pass
# Then append this span's text
output += mstring.plain[span.start : span.end]
# Finalize and exit all remaining tags.
for tag in reversed(tag_stack):
output += exit_tag(tag)
return output
def decode(src, errors: str = "strict") -> Text:
current = ProtoStyle()
state = 0
remaining = src
segments: List[Tuple[str, Style]] = list()
tag = None
while len(remaining):
if state == 0:
idx_start = remaining.find("\002")
if idx_start != -1:
segments.append((remaining[:idx_start], current.convert()))
remaining = remaining[idx_start + 1 :]
state = 1
else:
segments.append((remaining, current.convert()))
remaining = ""
elif state == 1:
# encountered a TAG START...
tag = remaining[0]
remaining = remaining[1:]
state = 2
elif state == 2:
# we are inside a tag. hoover up all data up to TAG_END...
idx_end = remaining.find("\003")
opening = True
if idx_end != -1:
tag_data = remaining[:idx_end]
remaining = remaining[idx_end + 1 :]
if tag_data and tag_data[0] == "/":
opening = False
tag_data = tag_data[1:]
if opening:
current = ProtoStyle(parent=current)
if tag == "p":
apply_mxp(current, tag_data)
elif tag == "c":
current.inherit_ansi()
apply_rules(current, tag_data)
else:
current = current.parent
state = 0
else:
# malformed data.
break
return Text.assemble(*segments)
def ansi_fun_style(code: str) -> Style:
if code is None:
code = ""
code = code.strip()
mark = ProtoStyle()
apply_rules(mark, code)
return mark.convert()
def ansi_fun(code: str, text: Union[Text, str]) -> Text:
"""
This constructor is used to create a Text from a PennMUSH style ansi() call, such as: ansi(hr,texthere!)
"""
style = ansi_fun_style(code)
return ansify(style, text)
def ansify(style: Style, text: Union[Text, str]) -> Text:
if isinstance(text, Text):
spans = [Span(s.start, s.end, style + s.style) for s in text.spans]
return Text(text.plain, spans=spans)
elif isinstance(text, str):
spans = [Span(0, len(text), style)]
return Text(text, spans=spans)
def from_html(text: Union[Text, str], tag: str, **kwargs) -> Text:
mark = ProtoStyle()
mark.tag = tag
mark.xml_attr = kwargs
style = mark.convert()
if isinstance(text, Text):
spans = [Span(s.start, s.end, style + s.style) for s in text.spans]
return Text(text.plain, spans=spans)
elif isinstance(text, str):
spans = [Span(0, len(text), mark.convert())]
return Text(text, spans=spans)
def send_menu(text: Union[Text, str], commands=None) -> Text:
if commands is None:
commands = []
hints = "|".join(a[1] for a in commands)
cmds = "|".join(a[0] for a in commands)
return from_html(text=text, tag="SEND", href=cmds, hint=hints)
| StarcoderdataPython |
249007 |
''' A module for representing permutations in Sym(N). '''
from bisect import bisect
from itertools import combinations
from math import factorial
import numpy as np
class Permutation:
''' This represents a permutation on 0, 1, ..., N-1. '''
def __init__(self, perm):
self.perm = perm
assert set(self) == set(range(len(self)))
def __str__(self):
return str(self.perm)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.perm)
def __getitem__(self, item):
return self.perm[item]
def __call__(self, item):
return self[item]
def __iter__(self):
return iter(self.perm)
def __len__(self):
return len(self.perm)
def __eq__(self, other):
if isinstance(other, Permutation):
if len(self) != len(other):
raise ValueError('Cannot compare permutations defined over different number of elements')
return self.perm == other.perm
else:
return NotImplemented
def __hash__(self):
return hash(tuple(self.perm))
def inverse(self):
''' Return the inverse of this permutation. '''
return Permutation(sorted(range(len(self)), key=self))
def __invert__(self):
return self.inverse()
@classmethod
def from_dict(cls, dictionary, ordering=None):
''' Return a Permutation from a dictionary.
The order of the elements within the dictionary may also be specified. '''
if ordering is None: ordering = list(dictionary)
index_lookup = dict((item, index) for index, item in enumerate(ordering))
return cls([index_lookup[dictionary[item]] for item in ordering])
def order(self):
''' Return the order of this permutation. '''
identity = Permutation(list(range(len(self))))
power = self
i = 1
while True:
if power == identity: return i
i += 1
power = power * self
raise RuntimeError('Should not be able to reach here')
def __mul__(self, other):
if isinstance(other, Permutation):
if len(self) != len(other):
raise ValueError('Cannot compose permutations defined over different number of elements')
return Permutation([self(other(i)) for i in range(len(self))])
else:
return NotImplemented
def __pow__(self, n):
if n < 0: return (~self)**(-n)
perm = self
result = Permutation(list(range(len(self))))
while n:
if n % 2 == 1:
result = result * perm
n = n - 1
perm = perm * perm
n = n // 2
return result
@classmethod
def from_index(cls, N, index):
''' Return the permutation in Sym(N) with the given index. '''
P = []
f = factorial(N)
symbols = list(range(N))
while symbols:
f = f // len(symbols)
i, index = divmod(index, f)
P.append(symbols[i])
symbols = symbols[:i] + symbols[i+1:]
return cls(P)
def index(self):
''' Return the index of this permutation in the (sorted) list of all permutations on this many symbols. '''
symbols = sorted(self.perm)
index = 0
for p in self:
i = bisect(symbols, p) - 1
index = index * len(symbols) + i
symbols = symbols[:i] + symbols[i+1:]
return index
def matrix(self):
''' Return the corresponding permutation matrix.
That is, a matrix M such that M * e_i == e_{self[i]}. '''
return np.array([[1 if i == j else 0 for j in self] for i in range(len(self))], dtype=object)
def is_even(self):
''' Return whether this permutation is the composition of an even number of transposiions. '''
return sum(1 if a > b else 0 for a, b in combinations(self, r=2) if a > b) % 2 == 0
def cycle_lengths(self):
''' Return the sorted list of cycle lengths of this Permutation.
This is a total conjugacy invariant. '''
N = len(self)
cycle_lengths = []
seen = set()
for i in range(N):
if i not in seen:
image = self(i)
seen.add(image)
for j in range(1, N+1):
if image == i:
cycle_lengths.append(j)
break
image = self(image)
seen.add(image)
return sorted(cycle_lengths)
def is_conjugate_to(self, other):
''' Return whether this permutation in conjugate to other.
Two permutations are conjugate iff they have the same cycle lengths. '''
assert isinstance(other, Permutation)
if len(self) != len(other):
return False
return self.cycle_lengths() == other.cycle_lengths()
| StarcoderdataPython |
1959531 | import cv2
import numpy as np
class ColorPiker:
def __init__(self):
self.cam_id = 0
self.frameWidth = 640
self.frameHeight = 480
self.cap = cv2.VideoCapture(self.cam_id, cv2.CAP_DSHOW)
self.cap.set(3, self.frameWidth)
self.cap.set(4, self.frameHeight)
self.cap.set(10, 150)
self.window_name = "COLOR HSV VALUES"
self.default_hsv = {
"Hue Min": [0, 179],
"Hue Max": [179, 179],
"Sat Min": [0, 255],
"Sat Max": [255, 255],
"Val Min": [0, 255],
"Val Max": [255, 255],
}
self.selected_colors = []
def _empty(self, a):
pass
def _trackbar(self):
cv2.namedWindow(self.window_name)
cv2.resizeWindow(self.window_name, 480, 320)
for key in self.default_hsv.keys():
cv2.createTrackbar(key, self.window_name, self.default_hsv[key][0], self.default_hsv[key][1], self._empty)
return 0
def pick(self):
keys = list(self.default_hsv)
self._trackbar()
try:
while True:
success, img = self.cap.read()
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h_min = cv2.getTrackbarPos(keys[0], self.window_name)
h_max = cv2.getTrackbarPos(keys[1], self.window_name)
s_min = cv2.getTrackbarPos(keys[2], self.window_name)
s_max = cv2.getTrackbarPos(keys[3], self.window_name)
v_min = cv2.getTrackbarPos(keys[4], self.window_name)
v_max = cv2.getTrackbarPos(keys[5], self.window_name)
lower_hsv = np.array([h_min, s_min, v_min])
upper_hsv = np.array([h_max, s_max, v_max])
imgMask = cv2.inRange(imgHSV, lower_hsv, upper_hsv)
imgResult = cv2.bitwise_and(img, img, mask=imgMask)
Mask2BGR = cv2.cvtColor(imgMask, cv2.COLOR_GRAY2BGR)
try:
cv2.imshow(f"WebCam({self.cam_id}) original", img)
cv2.imshow(f"WebCam({self.cam_id}) Mask", Mask2BGR)
cv2.imshow(f"WebCam({self.cam_id}) Result", imgResult)
if cv2.waitKey(1) & 0xFF == ord('s'):
self.selected_colors.append([h_min, h_max, s_min, s_max, v_min, v_max])
except:
print(f"No webcam is activate with id {self.cam_id}!")
break
except:
pass
return self.selected_colors
""" FOR TESTING --> """
if __name__ == "__main__":
colorPicker = ColorPiker()
colorPicker.pick()
print(colorPicker.selected_colors) | StarcoderdataPython |
1695248 | <reponame>5gconnectedbike/Navio2
'''OpenGL extension OES.sample_shading
This module customises the behaviour of the
OpenGL.raw.GLES2.OES.sample_shading to provide a more
Python-friendly API
Overview (from the spec)
In standard multisample rendering, an implementation is allowed to
assign the same sets of fragment shader input values to each sample.
This can cause aliasing where the fragment shader input values are
used to generate a result that doesn't antialias itself, for example
with alpha-tested transparency.
This extension adds the ability to explicitly request that an
implementation use a minimum number of unique set of fragment
computation inputs when multisampling a pixel. Specifying such a
requirement can reduce aliasing that results from evaluating the
fragment computations too few times per pixel.
This extension adds new global state that controls the minimum
number of samples for which attribute data is independently
interpolated. When enabled, all fragment-shading operations
are executed independently on each sample.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/sample_shading.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.OES.sample_shading import *
from OpenGL.raw.GLES2.OES.sample_shading import _EXTENSION_NAME
def glInitSampleShadingOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | StarcoderdataPython |
56931 | <reponame>vijaykumawat256/Prompt-Summarization
def count_calls(func, *args, **kwargs):
| StarcoderdataPython |
11268600 | <gh_stars>10-100
import paddle
import numpy as np
import test_grad.ppdet_resnet as ppdet_resnet
from paddle.regularizer import L2Decay
depth = 50
variant = 'd'
return_idx = [1, 2, 3]
dcn_v2_stages = [-1]
freeze_at = -1
freeze_norm = False
norm_decay = 0.
depth = 50
variant = 'd'
return_idx = [1, 2, 3]
dcn_v2_stages = [-1]
freeze_at = 2
freeze_norm = False
norm_decay = 0.
x_shape = [1, 3, 416, 416]
batch_size = 4
fused_modconv = False
model = ppdet_resnet.ResNet(depth=depth, variant=variant, return_idx=return_idx, dcn_v2_stages=dcn_v2_stages,
freeze_at=freeze_at, freeze_norm=freeze_norm, norm_decay=norm_decay)
model.train()
base_lr = 0.00000001 * 1.0
base_wd = 0.0005
# base_wd = 0.0
momentum = 0.9
# 是否进行梯度裁剪
clip_grad_by_norm = None
# clip_grad_by_norm = 35.0
if clip_grad_by_norm is not None:
grad_clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=clip_grad_by_norm)
else:
grad_clip = None
weight_decay = L2Decay(base_wd) if base_wd > 0.0000000001 else None
optimizer = paddle.optimizer.Momentum(parameters=model.parameters(), learning_rate=base_lr,
momentum=momentum, weight_decay=weight_decay, grad_clip=grad_clip)
paddle.save(model.state_dict(), "51_00.pdparams")
dic = {}
for batch_idx in range(8):
optimizer.clear_gradients()
x_shape[0] = batch_size
x = paddle.randn(x_shape)
x.stop_gradient = False
y = model(dict(image=x,))
y_52x52 = y[0]
y_26x26 = y[1]
y_13x13 = y[2]
dic['batch_%.3d.y_52x52'%batch_idx] = y_52x52.numpy()
dic['batch_%.3d.y_26x26'%batch_idx] = y_26x26.numpy()
dic['batch_%.3d.y_13x13'%batch_idx] = y_13x13.numpy()
dic['batch_%.3d.x'%batch_idx] = x.numpy()
loss = y_13x13.sum()
loss.backward()
# 注意,这是裁剪之前的梯度,Paddle无法获得裁剪后的梯度。
# dic['batch_%.3d.w_grad'%batch_idx] = model.res5.res5c.branch2c.conv.weight.grad.numpy()
# dic['batch_%.3d.b_grad'%batch_idx] = model.res5.res5c.branch2c.norm.bias.grad.numpy()
optimizer.step()
np.savez('51', **dic)
paddle.save(model.state_dict(), "51_08.pdparams")
print(paddle.__version__)
print()
| StarcoderdataPython |
9688785 | <reponame>mluessi/mne-python<filename>mne/tests/test_label.py
import os.path as op
from nose.tools import assert_true
from ..datasets import sample
from .. import label_time_courses
examples_folder = op.join(op.dirname(__file__), '..', '..', 'examples')
data_path = sample.data_path(examples_folder)
stc_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-lh.stc')
label = 'Aud-lh'
label_fname = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
def test_label_io_and_time_course_estimates():
"""Test IO for STC files
"""
values, times, vertices = label_time_courses(label_fname, stc_fname)
assert_true(len(times) == values.shape[1])
assert_true(len(vertices) == values.shape[0])
| StarcoderdataPython |
50835 | # -*- coding: utf-8 -*-
"""
/dms/exercisefolder/views_sitemap.py
.. zeigt die Sitemap des aktuellen Lernarchivs an
Django content Management System
<NAME>
<EMAIL>
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 02.05.2008 Beginn der Arbeit
"""
from django.shortcuts import render_to_response
from django.template import Context
from django.template.loader import get_template
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.queries import get_container_sitemap
from dms.utils_form import get_folderish_vars_show
from dms.utils_base import show_link
#from dms.roles import *
from dms.edufolder.utils import get_user_support
from dms.edufolder.utils import get_folder_content
from dms.newsboard.utils import get_folder_content as get_newsboard_content
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
def exercisefolder_sitemap(request, item_container):
""" zeigt die Sitemap des Lernarchivs """
def get_sitemap(item_container, start, length=200):
""" liefert eine Liste der untergeordneten Lernarchive """
tSitemap = get_template('app/edufolder/sitemap.html')
path_length = len(item_container.container.path)
containers, count = get_container_sitemap(item_container, start, length,
False)
ret = ''
s = get_site_url(item_container, '')
#assert False
for container in containers:
p = container.path[path_length:]
if p != '':
n = p.count('/')
space = n * '| ' + ' '
ret += '%s<a href="%s%sindex.html">%s</a><br />\n' % (space, s, p, p)
# --- -2, weil ./ nicht zaehlt und von 0 an gerechnet wird
if start + length > count:
max = count-2
next = ''
else:
max = start + length
next = show_link('./?start='+str(start+200), _(u'weiter'),
url_class="navLink")
if start > 0:
prev = show_link('./?start='+str(start-200), _(u'zurück'),
url_class="navLink")
else:
prev = ''
section = Context ( { 'start': start,
'max': max,
'count': count-1,
'prev': prev,
'next': next,
'links': ret } )
return tSitemap.render(section)
app_name = u'edufolder'
if request.GET.has_key('start'):
start = int(request.GET['start'])
else:
start = 0
vars = get_folderish_vars_show(request, item_container, app_name,
get_sitemap(item_container, start),
get_user_support(item_container, request.user))
vars['text'] = ''
vars['title'] = _(u'Sitemap <i>dieses</i> Lernarchivs')
vars['image_url'] = ''
vars['slot_right_info'] = ''
return render_to_response ( 'app/base_folderish.html', vars )
| StarcoderdataPython |
4995334 | <reponame>ahameedx/intel-inb-manageability
"""
Central telemetry/logging service for the manageability framework
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
from .constants import (
STATE_CHANNEL,
CLOUDADAPTER_STATE_CHANNEL,
AGENT,
CONFIGURATION_UPDATE_CHANNEL,
TELEMETRY_UPDATE_CHANNEL,
CLIENT_KEYS,
CLIENT_CERTS
)
from inbm_lib.mqttclient.mqtt import MQTT
from inbm_lib.mqttclient.config import (
DEFAULT_MQTT_HOST, DEFAULT_MQTT_PORT, MQTT_KEEPALIVE_INTERVAL, DEFAULT_MQTT_CERTS)
from inbm_common_lib.constants import TELEMETRY_CHANNEL
from .telemetry_handling import publish_telemetry_update, publish_static_telemetry
from .poller import Poller
import logging
import json
from future import standard_library
standard_library.install_aliases()
logger = logging.getLogger(__name__)
def broker_stop(client: MQTT) -> None:
"""Shut down broker, publishing 'dead' event first.
@param client: broker client
"""
client.publish(f'{AGENT}/state', 'dead', retain=True)
# Disconnect MQTT client
client.stop()
def broker_init(poller: Poller, tls: bool = True, with_docker: bool = False) -> MQTT:
"""Set up generic action for message received; subscribe to state channel; publish
'running' state
@return: broker client
"""
client = MQTT(AGENT + "-agent",
DEFAULT_MQTT_HOST,
DEFAULT_MQTT_PORT,
MQTT_KEEPALIVE_INTERVAL,
env_config=True,
tls=tls,
client_certs=str(CLIENT_CERTS),
client_keys=str(CLIENT_KEYS))
client.start()
def on_message(topic, payload, qos) -> None:
logger.info('Message received: %s on topic: %s', payload, topic)
if topic == CLOUDADAPTER_STATE_CHANNEL and 'running' in payload:
publish_static_telemetry(client, TELEMETRY_CHANNEL)
def on_telemetry_update(topic, payload, qos) -> None:
logger.info('Received telemetry update request for: %s', payload)
publish_telemetry_update(
client, TELEMETRY_CHANNEL, with_docker, payload)
def on_update(topic, payload, qos) -> None:
logger.info('Message received: %s on topic: %s', payload, topic)
poller.set_configuration_value(json.loads(
payload), 'telemetry/' + topic.split('/')[-1])
try:
logger.debug('Subscribing to: %s', STATE_CHANNEL)
client.subscribe(STATE_CHANNEL, on_message)
logger.debug('Subscribing to: %s', CONFIGURATION_UPDATE_CHANNEL)
client.subscribe(CONFIGURATION_UPDATE_CHANNEL, on_update)
logger.debug('Subscribing to %s', TELEMETRY_UPDATE_CHANNEL)
client.subscribe(TELEMETRY_UPDATE_CHANNEL, on_telemetry_update)
except Exception as exception:
logger.exception('Subscribe failed: %s', exception)
client.publish(f'{AGENT}/state', 'running', retain=True)
return client
| StarcoderdataPython |
3380086 | <filename>autopycoin/models/nbeats.py
"""
N-BEATS implementation
"""
from typing import Callable, Union, Tuple, List, Optional
from typing import List, Optional
import tensorflow as tf
from keras.engine import data_adapter
from .training import UnivariateModel
from ..layers import TrendBlock, SeasonalityBlock, GenericBlock, BaseBlock
from ..layers.nbeats_layers import SEASONALITY_TYPE
from .pool import BasePool
class Stack(UnivariateModel):
"""
A stack is a series of blocks where each block produces two outputs,
the forecast and the backcast.
Inside a stack all forecasts are sum up and compose the stack output.
In the meantime, the backcast is given to the following block.
Parameters
----------
blocks : tuple[:class:`autopycoin.models.BaseBlock`]
Blocks layers. they can be generic, seasonal or trend ones.
You can also define your own block by subclassing `BaseBlock`.
Attributes
----------
blocks : tuple[:class:`autopycoin.models.BaseBlock`]
label_width : int
input_width : int
is_interpretable : bool
stack_type : str
Examples
--------
>>> from autopycoin.layers import TrendBlock, SeasonalityBlock
>>> from autopycoin.models import Stack, NBEATS
>>> from autopycoin.losses import QuantileLossError
...
>>> trend_block = TrendBlock(label_width=20,
... p_degree=2,
... n_neurons=16,
... drop_rate=0.1,
... name="trend_block")
...
>>> seasonality_block = SeasonalityBlock(label_width=20,
... forecast_periods=[10],
... backcast_periods=[20],
... forecast_fourier_order=[10],
... backcast_fourier_order=[20],
... n_neurons=15,
... drop_rate=0.1,
... name="seasonality_block")
...
... # blocks creation
>>> trend_blocks = [trend_block for _ in range(3)]
>>> seasonality_blocks = [seasonality_block for _ in range(3)]
...
... # Stacks creation
>>> trend_stacks = Stack(trend_blocks, name="trend_stack")
>>> seasonality_stacks = Stack(seasonality_blocks, name="seasonality_stack")
...
... # model definition and compiling
>>> model = NBEATS([trend_stacks, seasonality_stacks], name="interpretable_NBEATS")
>>> model.compile(loss=QuantileLossError(quantiles=[0.5]))
Notes
-----
input shape:
N-D tensor with shape: (..., batch_size, time step).
The most common situation would be a 2D input with shape (batch_size, time step).
output shape:
N-D tensor with shape: (..., batch_size, units).
For instance, for a 2D input with shape (batch_size, units),
the output would have shape (batch_size, units).
With a QuantileLossError with 2 quantiles or higher the output would have shape (quantiles, batch_size, units).
If you add 2 variables, the output would have shape (variables, quantiles, batch_size, units).
"""
def __init__(
self,
blocks: Tuple[BaseBlock, ...],
apply_quantiles_transpose: bool = False,
apply_multivariate_transpose: bool = False,
*args: list,
**kwargs: dict,
):
super().__init__(
apply_quantiles_transpose=apply_quantiles_transpose,
apply_multivariate_transpose=apply_multivariate_transpose,
*args,
**kwargs,
)
self._blocks = blocks
self._stack_type = self._set_type()
self._is_interpretable = self._set_interpretability()
def call(
self, inputs: Union[tuple, dict, list, tf.Tensor], **kwargs: dict
) -> Tuple[tf.Tensor, ...]:
"""Call method from tensorflow."""
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
outputs = tf.constant(0.0) # init output
for block in self.blocks:
reconstructed_inputs, residual_outputs = block(inputs)
inputs = tf.subtract(inputs, reconstructed_inputs)
outputs = tf.add(outputs, residual_outputs)
return inputs, outputs
def get_config(self) -> dict:
"""See tensorflow documentation."""
config = super().get_config()
config.update({"blocks": self.blocks})
return config
def _set_type(self) -> str:
"""Return the type of the stack."""
block_type = self.blocks[0].block_type
for block in self.blocks:
if block.block_type != block_type:
return "CustomStack"
return block_type.replace("Block", "") + "Stack"
def _set_interpretability(self) -> bool:
"""True if the stack is interpretable else False."""
interpretable = all([block.is_interpretable for block in self.blocks])
if interpretable:
return True
return False
@property
def label_width(self) -> int:
"""Return the label width."""
return self.blocks[0].label_width
@property
def input_width(self) -> int:
"""Return the input width."""
return self.blocks[0].input_width
@property
def blocks(self) -> List[BaseBlock]:
"""Return the list of blocks."""
return self._blocks
@property
def stack_type(self) -> str:
"""Return the type of the stack.
`CustomStack` if the blocks are all differents."""
return self._stack_type
@property
def is_interpretable(self) -> bool:
"""Return True if the stack is interpretable."""
return self._is_interpretable
def __repr__(self):
return self.stack_type
class NBEATS(UnivariateModel):
"""
Tensorflow model defining the N-BEATS architecture.
N-BEATS is a univariate model, see :class:`autopycoin.models.UnivariateModel` for more information.
Its strong advantage resides in its structure which allows us to extract the trend and the seasonality of
temporal series. They are available from the attributes `seasonality` and `trend`.
This is an unofficial implementation of the paper https://arxiv.org/abs/1905.10437.
Parameters
----------
stacks : tuple[:class:`autopycoin.models.Stack`]
Stacks can be created from :class:`autopycoin.models.TrendBlock`,
:class:`autopycoin.models.SeasonalityBlock` or :class:`autopycoin.models.GenericBlock`.
See stack documentation for more details.
Attributes
----------
stacks : tuple[`Tensor`]
seasonality : `Tensor`
Seasonality component of the output.
trend : `Tensor`
Trend component of the output.
stack_outputs : `Tensor`
is_interpretable : bool
nbeats_type : str
label_width : int
input_width : int
Examples
--------
>>> from autopycoin.layers import TrendBlock, SeasonalityBlock
>>> from autopycoin.models import Stack, NBEATS
>>> from autopycoin.losses import QuantileLossError
>>> from autopycoin.data import random_ts
>>> from autopycoin.dataset import WindowGenerator
>>> import tensorflow as tf
>>> import pandas as pd
...
>>> data = random_ts(n_steps=1000,
... trend_degree=2,
... periods=[10],
... fourier_orders=[10],
... trend_mean=0,
... trend_std=1,
... seasonality_mean=0,
... seasonality_std=1,
... batch_size=1,
... n_variables=1,
... noise=True,
... seed=42)
>>> data = pd.DataFrame(data[0].numpy(), columns=['test'])
...
>>> w = WindowGenerator(
... input_width=20,
... label_width=10,
... shift=10,
... test_size=50,
... valid_size=10,
... flat=True,
... batch_size=32,
... preprocessing=lambda x,y: (x, (x, y))
... )
...
>>> w = w.from_array(data=data,
... input_columns=['test'],
... label_columns=['test'])
>>>
>>> trend_block = TrendBlock(label_width=w.label_width,
... p_degree=2,
... n_neurons=16,
... drop_rate=0.1,
... name="trend_block")
>>>
>>> seasonality_block = SeasonalityBlock(label_width=w.label_width,
... forecast_periods=[10],
... backcast_periods=[20],
... forecast_fourier_order=[10],
... backcast_fourier_order=[20],
... n_neurons=15,
... drop_rate=0.1,
... name="seasonality_block")
>>>
>>> trend_blocks = [trend_block for _ in range(3)]
>>> seasonality_blocks = [seasonality_block for _ in range(3)]
>>> trend_stacks = Stack(trend_blocks, name="trend_stack")
>>> seasonality_stacks = Stack(seasonality_blocks, name="seasonality_stack")
>>>
>>> model = NBEATS([trend_stacks, seasonality_stacks], name="interpretable_NBEATS")
>>> model.compile(loss=QuantileLossError(quantiles=[0.5]))
>>> history = model.fit(w.train, verbose=0)
Notes
-----
NBEATS supports the estimation of aleotoric and epistemic errors with:
- Aleotoric interval : :class:`autopycoin.loss.QuantileLossError`
- Epistemic interval : MCDropout
You can use :class:`autopycoin.loss.QuantileLossError` as loss error to estimate the
aleotoric error. Also, run multiple times a prediction with `drop_date` > 0 to estimate
the epistemic error.
*Input shape*
N-D tensor with shape: (batch_size, time step, variables) or (batch_size, time step).
The most common situation would be a 2D input with shape (batch_size, time step).
*Output shape*
Two N-D tensor with shape: (batch_size, time step, variables, quantiles) or (batch_size, time step, quantiles)
or (batch_size, time step).
For instance, for a 2D input with shape (batch_size, units),
the output would have shape (batch_size, units).
With a QuantileLossError with 2 quantiles or higher the output
would have shape (batch_size, units, quantiles).
With a multivariate inputs the output
would have shape (batch_size, units, variates, quantiles).
"""
def __init__(self, stacks: Tuple[Stack, ...], *args: list, **kwargs: dict):
super().__init__(*args, **kwargs)
# Stacks where blocks are defined
self._stacks = stacks
self._is_interpretable = self._set_interpretability()
self._nbeats_type = self._set_type()
def build(
self, input_shape: Union[tf.TensorShape, Tuple[tf.TensorShape, ...]]
) -> None:
"""See tensorflow documentation."""
if isinstance(input_shape, tuple):
input_shape = input_shape[0]
super().build(input_shape)
def compile(
self,
optimizer="rmsprop",
loss=None,
metrics=None,
loss_weights=[0.0, 1.0],
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs,
) -> None:
super().compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
**kwargs,
)
def call(
self, inputs: Union[tuple, dict, list, tf.Tensor], **kwargs: dict
) -> tf.Tensor:
"""Call method from tensorflow."""
if isinstance(inputs, (tuple, list)):
inputs = inputs[0]
residual_inputs = tf.identity(inputs)
outputs = tf.constant(0.0)
for stack in self.stacks:
residual_inputs, residual_outputs = stack(residual_inputs)
outputs = tf.math.add(outputs, residual_outputs)
reconstructed_inputs = inputs - residual_inputs
return reconstructed_inputs, outputs
def seasonality(self, data: tf.Tensor) -> tf.Tensor:
"""
Based on the paper, the seasonality is available if
the previous stacks are composed by trend blocks.
Else, it doesn't correspond to seasonality.
Parameters
----------
data : `Tensor`
input data.
Returns
-------
seasonality : `Tensor`
Same shape as call inputs (see notes).
Raises
------
AttributeError
If all previous stacks are not composed
by trend blocks then an error is raised.
AssertionError
if no `SeasonalityStack` are defined.
"""
msg_error = f"""The first stack has to be a `TrendStack`,
hence seasonality doesn't exists . Got {self.stacks}."""
for idx, stack in enumerate(self.stacks):
if stack.stack_type != "TrendStack" and idx == 0:
raise AttributeError(msg_error)
elif stack.stack_type == "SeasonalityStack":
start = idx
elif stack.stack_type == "TrendStack":
continue
else:
break
if "start" not in locals():
raise AttributeError(f"No `SeasonalityStack` defined. Got {self.stacks}")
for stack in self.stacks[:start]:
data, _ = stack(data)
for stack in self.stacks[start : idx + 1]:
data, residual_seas = stack(data)
if "seasonality" not in locals():
seasonality = residual_seas
else:
seasonality += residual_seas
return seasonality
def trend(self, data: tf.Tensor) -> tf.Tensor:
"""
The trend component of the output.
Returns
-------
trend : `Tensor`
Same shape as call inputs (see notes).
Raises
------
AttributeError
Raises an error if previous stacks are not `TrendBlock`.
"""
for idx, stack in enumerate(self.stacks):
if stack.stack_type != "TrendStack":
break
msg = f"""No `TrendStack` defined. Got {self.stacks}.
`TrendStack` has to be defined as first stack."""
if idx == 0:
raise AttributeError(msg)
for stack in self.stacks[: idx + 1]:
data, residual_trend = stack(data)
if "trend" not in locals():
trend = residual_trend
else:
trend += residual_trend
return trend
def get_config(self) -> dict:
"""Get_config from tensorflow."""
return {"stacks": self.stacks}
def _set_interpretability(self) -> bool:
"""check if interpretable or not."""
return all(stack.is_interpretable for stack in self.stacks)
def _set_type(self):
"""Defines the type of Nbeats."""
if self.is_interpretable:
return "InterpretableNbeats"
return "Nbeats"
@property
def label_width(self) -> int:
"""Return the label width."""
return self.stacks[0].label_width
@property
def input_width(self) -> int:
"""Return the input width."""
return self.input_width
@property
def stacks(self) -> int:
"""Return the input width."""
return self._stacks
@property
def is_interpretable(self) -> bool:
"""Return True if the model is interpretable."""
return self._is_interpretable
@property
def nbeats_type(self) -> str:
"""Return the Nbeats type."""
return self._nbeats_type
def __repr__(self):
return self._nbeats_type
NbeatsModelsOptions = Union[
Union[List[NBEATS], NBEATS], Union[List[Callable], Callable],
]
def create_interpretable_nbeats(
label_width: int,
forecast_periods: SEASONALITY_TYPE = None,
backcast_periods: SEASONALITY_TYPE = None,
forecast_fourier_order: SEASONALITY_TYPE = None,
backcast_fourier_order: SEASONALITY_TYPE = None,
p_degree: int = 1,
trend_n_neurons: int = 252,
seasonality_n_neurons: int = 2048,
drop_rate: float = 0.0,
share: bool = True,
name: str = "interpretable_NBEATS",
**kwargs: dict,
):
"""
Wrapper which create an interpretable model as described in the original paper.
Two stacks are created with 3 blocks each. The first entirely composed by trend blocks,
The second entirely composed by seasonality blocks.
Within the same stack, it is possible to share the weights between blocks.
Parameters
----------
label_width : int
Past to rebuild. Usually, label_width = n * input width with n between 1 and 7.
forecast_periods : Tuple[int, ...]
Compute the fourier serie period in the forecast equation.
if a list is provided then all periods are taken.
backcast_periods : Tuple[int, ...]
Compute the fourier serie period in the backcast equation.
if a list is provided then all periods are taken.
forecast_fourier_order : Tuple[int, ...]
Compute the fourier order. each order element refers to its respective period.
backcast_fourier_order : Tuple[int, ...]
Compute the fourier order. each order element refers to its respective back period.
p_degree : int
Degree of the polynomial function. It needs to be > 0.
trend_n_neurons : int
Number of neurons in th Fully connected trend layers.
seasonality_n_neurons: int
Number of neurons in Fully connected seasonality layers.
drop_rate : float
Rate of the dropout layer. This is used to estimate the epistemic error.
Expected a value between 0 and 1. Default to 0.
share : bool
If True, the weights are shared between blocks inside a stack. Dafault to True.
Returns
-------
model : :class:`autopycoin.models.NBEATS`
Return an interpetable model with two stacks. One composed by 3 `TrendBlock`
objects and a second composed by 3 `SeasonalityBlock` objects.
Examples
--------
>>> from autopycoin.models import create_interpretable_nbeats
>>> from autopycoin.losses import QuantileLossError
>>> model = create_interpretable_nbeats(label_width=3,
... forecast_periods=[2],
... backcast_periods=[3],
... forecast_fourier_order=[2],
... backcast_fourier_order=[3],
... p_degree=1,
... trend_n_neurons=16,
... seasonality_n_neurons=16,
... drop_rate=0.1,
... share=True)
>>> model.compile(loss=QuantileLossError(quantiles=[0.5]))
"""
if share is True:
trend_block = TrendBlock(
label_width=label_width,
p_degree=p_degree,
n_neurons=trend_n_neurons,
drop_rate=drop_rate,
name="trend_block",
)
seasonality_block = SeasonalityBlock(
label_width=label_width,
forecast_periods=forecast_periods,
backcast_periods=backcast_periods,
forecast_fourier_order=forecast_fourier_order,
backcast_fourier_order=backcast_fourier_order,
n_neurons=seasonality_n_neurons,
drop_rate=drop_rate,
name="seasonality_block",
)
trend_blocks = [trend_block for _ in range(3)]
seasonality_blocks = [seasonality_block for _ in range(3)]
else:
trend_blocks = [
TrendBlock(
label_width=label_width,
p_degree=p_degree,
n_neurons=trend_n_neurons,
drop_rate=drop_rate,
name="trend_block",
)
for _ in range(3)
]
seasonality_blocks = [
SeasonalityBlock(
label_width=label_width,
forecast_periods=forecast_periods,
backcast_periods=backcast_periods,
forecast_fourier_order=forecast_fourier_order,
backcast_fourier_order=backcast_fourier_order,
n_neurons=seasonality_n_neurons,
drop_rate=drop_rate,
name="seasonality_block",
)
for _ in range(3)
]
trend_stacks = Stack(trend_blocks, name="trend_stack")
seasonality_stacks = Stack(seasonality_blocks, name="seasonality_stack")
model = NBEATS([trend_stacks, seasonality_stacks], name=name, **kwargs)
return model
def create_generic_nbeats(
label_width: int,
g_forecast_neurons: int = 524,
g_backcast_neurons: int = 524,
n_neurons: int = 524,
n_blocks: int = 1,
n_stacks: int = 30,
drop_rate: float = 0.0,
share: bool = False,
name: str = "generic_NBEATS",
**kwargs: dict,
):
"""
Wrapper which create a generic model as described in the original paper.
In the same stack, it is possible to share the weights between blocks.
Parameters
----------
label_width : int
Past to rebuild. Usually, label_width = n * input width with n between 1 and 7.
n_neurons : int
Number of neurons in th Fully connected generic layers.
n_blocks : int
Number of blocks per stack.
n_stacks : int
Number of stacks in the model.
drop_rate : float
Rate of the dropout layer. This is used to estimate the epistemic error.
Expected a value between 0 and 1. Default to 0.
share : bool
If True, the weights are shared between blocks inside a stack. Default to True.
Returns
-------
model : :class:`autopycoin.models.NBEATS`
Return an generic model with n stacks defined by the parameter `n_stack`
and respoectively n blocks defined by `n_blocks`.
Examples
--------
>>> from autopycoin.models import create_generic_nbeats
>>> from autopycoin.losses import QuantileLossError
>>> model = create_generic_nbeats(label_width=3,
... g_forecast_neurons=16,
... g_backcast_neurons=16,
... n_neurons=16,
... n_blocks=3,
... n_stacks=3,
... drop_rate=0.1,
... share=True)
>>> model.compile(loss=QuantileLossError(quantiles=[0.5]))
"""
generic_stacks = []
if share is True:
for _ in range(n_stacks):
generic_block = GenericBlock(
label_width=label_width,
g_forecast_neurons=g_forecast_neurons,
g_backcast_neurons=g_backcast_neurons,
n_neurons=n_neurons,
drop_rate=drop_rate,
name="generic_block",
)
generic_blocks = [generic_block for _ in range(n_blocks)]
generic_stacks.append(Stack(generic_blocks, name="generic_stack"))
else:
for _ in range(n_stacks):
generic_blocks = [
GenericBlock(
label_width=label_width,
g_forecast_neurons=g_forecast_neurons,
g_backcast_neurons=g_backcast_neurons,
n_neurons=n_neurons,
drop_rate=drop_rate,
name="generic_block",
)
for _ in range(n_blocks)
]
generic_stacks.append(Stack(generic_blocks, name="generic_stack"))
model = NBEATS(generic_stacks, name=name, **kwargs)
return model
# TODO: finish doc and unit testing.
class PoolNBEATS(BasePool):
"""
Tensorflow model defining a pool of N-BEATS models.
As described in the paper https://arxiv.org/abs/1905.10437, the state-of-the-art results
are reached with a bagging method of N-BEATS models including interpretable and generic ones.
The aggregation function is used in predict `method` if it is possible, i.e when the outputs shape are not differents.
As the reconstructed inputs are masked randomly the aggregation is not perfomed on them.
Fore more information about poll model see :class:`autopycoin.models.Pool`.
Parameters
----------
label_width : int
Width of the targets.
It can be not defined if `nbeats_model` is a list of NBEATS instances.
Default to None.
n_models : int
Number of models inside the pool.
The minimum value according to the paper to get SOTA results is 18.
If NBEATS instances are provided then n_models is not used.
Default to 18.
nbeats_models : list[callable] or list[NBEATS]
A list of callables which create a NBEATS model or a list of :class:`autopycoin.models.NBEATS` instances.
If None then use a mix of generic and interpretable NBEATs model.
Default to None.
fn_agg : Callable
Function of aggregation which takes an parameter axis.
It aggregates the models outputs. Default to mean.
seed: int
Used in combination with tf.random.set_seed to create a
reproducible sequence of tensors across multiple calls.
Returns
-------
outputs : Tuple[Tuple[`Tensor` | QuantileTensor | UnivariateTensor], Tuple[`Tensor` | QuantileTensor | UnivariateTensor]]
Return the reconstructed inputs and inferred outputs as tuple (reconstructed inputs, outputs).
Reconstructed inputs is a tuple of tensors as the mask is not the same through models.
Outputs can be a tuple of tensors or an aggregated tensor if the prediction is used through `predict` method.
Attributes
----------
see :class:`autopycoin.models.Pool`
Examples
--------
>>> from autopycoin.data import random_ts
>>> from autopycoin.models import PoolNBEATS, create_interpretable_nbeats
>>> from autopycoin.dataset import WindowGenerator
>>> import tensorflow as tf
>>> import pandas as pd
...
>>> data = random_ts(n_steps=1000,
... trend_degree=2,
... periods=[10],
... fourier_orders=[10],
... trend_mean=0,
... trend_std=1,
... seasonality_mean=0,
... seasonality_std=1,
... batch_size=1,
... n_variables=1,
... noise=True,
... seed=42)
>>> data = pd.DataFrame(data[0].numpy(), columns=['test'])
...
>>> w = WindowGenerator(
... input_width=70,
... label_width=10,
... shift=10,
... test_size=50,
... valid_size=10,
... flat=True,
... batch_size=32,
... preprocessing=lambda x,y: (x, (x, y))
... )
...
>>> w = w.from_array(data=data,
... input_columns=['test'],
... label_columns=['test'])
...
>>> model = PoolNBEATS(
... label_width=10,
... n_models=2,
... nbeats_models=create_interpretable_nbeats,
... )
>>> model.compile(tf.keras.optimizers.Adam(
... learning_rate=0.015, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=True,
... name='Adam'), loss=[['mse', 'mse'], ['mae', 'mae'], ['mape', 'mape']], metrics=['mae'])
>>> history = model.fit(w.train, validation_data=w.valid, epochs=1, verbose=0)
>>> model.predict(w.test.take(1))[1].shape
(32, 10)
Notes
-----
PoolNBEATS is just a wrapper around nbeats models hence you can use epistemic loss error
or multivariates inputs.
This class only applies mask to its inputs.
*Input shape*
N-D tensor with shape: (batch_size, time step, variables) or (batch_size, time step).
The most common situation would be a 2D input with shape (batch_size, time step).
*Output shape*
n N-D tensors with shape: (batch_size, time step, variables, quantiles) or (batch_size, time step, quantiles)
or (batch_size, time step) with n the number of models generated randomly or registered in the constructor.
For instance, for a 2D input with shape (batch_size, units) and three models,
the output would have shape
(((batch_size, units), (batch_size, units), (batch_size, units)), ((batch_size, units), (batch_size, units), (batch_size, units)))
if call is used else
(((batch_size, units), (batch_size, units), (batch_size, units)), (batch_size, units)) if predict is used.
The outputs tensors can be aggregated during `predict` method only if all tensors are similar in shape.
"""
def __init__(
self,
label_width: int = None,
n_models: int = 18,
nbeats_models: Union[None, NbeatsModelsOptions] = [
create_interpretable_nbeats,
create_generic_nbeats,
],
fn_agg: Callable = tf.reduce_mean,
seed: Optional[int] = None,
**kwargs: dict,
):
super().__init__(
label_width=label_width,
n_models=n_models,
models=nbeats_models,
fn_agg=fn_agg,
seed=seed,
**kwargs,
)
def compile(
self,
optimizer="rmsprop",
loss=None,
metrics=None,
loss_weights=[0.0, 1.0],
weighted_metrics=None,
run_eagerly=None,
steps_per_execution=None,
**kwargs,
) -> None:
self.check_valid_structure(loss, name='loss')
self.check_valid_structure(loss_weights, name='loss_weights')
super().compile(
optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
weighted_metrics=weighted_metrics,
run_eagerly=run_eagerly,
steps_per_execution=steps_per_execution,
**kwargs,
)
# TODO: test
def check_valid_structure(self, structure, name):
"""Check if loss and loss_weights are list of lists or a list of two elements."""
if not isinstance(structure, (list, tuple)):
raise ValueError(f'{name} has to be a list or a tuple, got {structure}')
elif all(isinstance(l, (list, tuple)) for l in structure):
if any(len(l)!=2 for l in structure):
raise ValueError(f'In case of list of list, {name} elements has to be length 2, got {structure}')
elif len(structure) != 2 :
raise ValueError(f'if {name} is not a list of list, {name} has to be length 2, got {structure}')
def checks(self, nbeats_models: List[NBEATS]) -> None:
"""Check if `label_width` are equals through models instances."""
labels_width = [model.label_width for model in nbeats_models]
# If `label_width` is defined in the init then use it to check models else use the first model value.
self._label_width = self.label_width or labels_width[0]
assert all([label_width == self.label_width for label_width in labels_width]), (
f"`label_width` parameter has to be identical through models and against the value given in the init method. "
f"Got {labels_width} for models and `label_width` = {self.label_width}"
)
def build(
self, input_shape: Union[tf.TensorShape, Tuple[tf.TensorShape, ...]]
) -> None:
"""See tensorflow documentation."""
# Defines masks
mask = tf.random.uniform(
(self.n_models,),
minval=0,
maxval=int(input_shape[1] / self.label_width) or 1,
dtype=tf.int32,
seed=self.seed,
)
self._mask = input_shape[1] - (mask * self.label_width)
super().build(input_shape)
def call(
self, inputs: Union[tuple, dict, list, tf.Tensor], **kwargs: dict
) -> tf.Tensor:
"""Call method from tensorflow Model.
Make prediction with every models generated during the constructor method.
"""
output_fn = lambda idx: self.models[idx](inputs[:, -self._mask[idx] :])
outputs = tf.nest.map_structure(
output_fn, [idx for idx in range(self.n_models)]
)
return outputs
@tf.function
def preprocessing_x(
self,
x: Union[None, Union[Union[tf.Tensor, tf.data.Dataset], Tuple[tf.Tensor, ...]]],
) -> Union[Tuple[None, None], Tuple[Callable, tuple]]:
"Apply mask inside `train_step`"
# Build masks from PoolNBEATS `build` method
self._maybe_build(x)
masked_x = None
if x is not None:
masked_x = [x[:, -self._mask[idx] :] for idx in range(self.n_models)]
return masked_x
@tf.function
def preprocessing_y(
self,
y: Union[None, Union[Union[tf.Tensor, tf.data.Dataset], Tuple[tf.Tensor, ...]]],
) -> Union[Tuple[None, None], Tuple[Callable, tuple]]:
"Apply mask inside `train_step`, `test_step`"
masked_y = None
if y is not None:
masked_y = [
(y[0][:, -self._mask[idx] :], y[1]) for idx in range(self.n_models)
]
return masked_y
@tf.function
def postprocessing_y(self, y):
"Apply mask inside `predict_step`"
inputs_reconstucted = [outputs[0] for outputs in y]
y = [outputs[1] for outputs in y]
if any(outputs.quantiles for outputs in y):
return inputs_reconstucted, y
return inputs_reconstucted, self.fn_agg(y, axis=0)
| StarcoderdataPython |
9668307 | import unittest
from machinetranslation.translator import englishToFrench, frenchToEnglish
class TestTranslatorModule(unittest.TestCase):
def test_en_to_fr_with_null(self):
result = englishToFrench(None)
self.assertEqual(result,"")
def test_fr_to_en_with_null(self):
result = frenchToEnglish(None)
self.assertEqual(result,"")
def test_en_to_fr_with_hello(self):
result = englishToFrench("Hello")
self.assertEqual(result,"Bonjour")
def test_fr_to_en_with_bonjour(self):
result = frenchToEnglish("Bonjour")
self.assertEqual(result,"Hello")
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
5096103 | import os
import shutil
import warnings
import random
import logging
import msm_pele.constants as cs
import msm_pele.Helpers.helpers as hp
class EnviroBuilder(object):
"""
Base class wher the needed pele selfironment
is build by creating folders and files
"""
def __init__(self, folders, files, args):
"""
Base class that encodes as attributes
the software parameters for each stage
"""
#Main Arguments
self.folders = folders
self.ext_temp = args.ext_temp
self.files = files
self.system = args.system
self.box = args.box
self.one_exit = args.one_exit
self.noRMSD = args.noRMSD
self.user_center = args.user_center
self.solvent = args.solvent
self.user_radius = args.user_radius
self.box_type = args.box_type
self.box_metric = cs.BOX_METRIC if args.box_metric else " "
self.iterations = args.iterations
self.forcefield = args.forcefield
self.water = args.water if args.water else None
self.residue = args.residue
self.templates = os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(__file__)), "PeleTemplates"))
self.restart = args.restart
self.native = args.native
self.chain = args.chain
self.mae_lig = os.path.abspath(args.mae_lig) if args.mae_lig else None
self.clusters = args.clust = args.clust if not args.test else 2
self.exit_iters = args.exit_iters if not args.test else 1
self.eq_struct = args.eq_struct if not args.test else 1
self.test = args.test
self.folder = args.folder
self.pdb = args.pdb
self.steps = args.steps
self.nonstandard = args.nonstandard
self.time = args.time
self.lagtime = args.lagtime
self.msm_clust = args.msm_clust
self.log = '"simulationLogPath" : "$OUTPUT_PATH/logFile.txt",' if args.log else ""
self.renumber = args.nonrenum
self.nosasa = args.nosasa
self.temp = args.temp
self.sasa = args.sasa
self.perc_sasa = args.perc_sasa
self.water_radius = args.water_radius
self.water_temp = args.water_temp
self.water_trials = args.water_trials
self.water_constr = args.water_constr
#Choose CPUS
if args.test:
self.cpus = args.cpus = 4
elif args.restart == "analise":
self.cpus = args.cpus = 1
else:
self.cpus = args.cpus
#Build constants for each module
self.build_sasa_constants()
self.build_msm_constants()
self.build_water_constants()
self.build_path_constants()
@classmethod
def build_env(cls, args):
if args.test and not args.precision:
self = cls(cs.FOLDERS, cs.FILES_TEST, args)
elif args.test and args.precision:
self = cls(cs.FOLDERS, cs.FILES_TEST_XP, args)
elif args.precision:
self = cls(cs.FOLDERS, cs.FILES_XP, args)
elif args.precision2:
self = cls(cs.FOLDERS, cs.FILES_XP2, args)
else:
self = cls(cs.FOLDERS, cs.FILES_SP, args)
self.create()
return self
def build_msm_constants(self):
"""
Build sasa related constants for later
classifing the exit simulation clusters
"""
self.lagtime = 1 if self.test else self.lagtime
self.lagtimes = None if self.test else [50, 100, 200, 500]
self.msm_clust = 2 if self.test else self.msm_clust
if not self.test:
if self.time: self.steps = 10000
else: self.steps = self.steps
else:
self.steps = 2
def build_water_constants(self):
"""
Build water constants to run
water MC PELE
"""
if self.water:
cms = [ hp.find_coords(self.system, water.split(":")[1], water.split(":")[0]) for water in self.water]
try:
cm = [str(coord) for coord in hp.find_centroid(cms)]
except TypeError:
raise TypeError("Check the specified waters exist")
water_atoms = [ '"' + water + '"' for water in self.water]
self.dynamic_water = cs.WATER.format(self.water_radius, ",".join(cm), ",".join(water_atoms),
self.water_temp, self.water_trials, self.water_constr)
else:
self.water = []
self.dynamic_water = ""
def build_sasa_constants(self):
"""
Build sasa related constants for later
classifing the exit simulation clusters
"""
self.perc_sasa_min, self.perc_sasa_int, self.perc_sasa_max = self.perc_sasa
self.sasamin, self.sasamax = self.sasa if self.sasa else [None, None]
self.sasa = True if not self.nosasa and not self.test else False
def build_path_constants(self):
self.template = None
self.rotamers_file = None
self.random_num = random.randrange(1, 70000) if not self.test else 1234
self.license = '''"{}"'''.format(cs.LICENSE)
if self.test:
self.equil_steps = 1
else:
self.equil_steps = int(cs.EQ_STEPS/self.cpus) if self.cpus < cs.EQ_STEPS else 1
pele_dir = os.path.abspath("{}_Pele".format(self.residue))
if not self.folder:
self.pele_dir = is_repited(pele_dir) if self.restart == "all" else is_last(pele_dir)
else:
self.pele_dir = os.path.abspath(self.folder)
self.system_fix = os.path.join(self.pele_dir, "{}_processed.pdb".format(os.path.splitext(os.path.basename(self.system))[0]))
for f in self.ext_temp:
cs.FILES_NAME.append(os.path.join("DataLocal/Templates/{}/HeteroAtoms/".format(self.forcefield), os.path.basename(f)))
self.files.append(os.path.basename(f))
self.adap_ex_input = os.path.join(self.pele_dir, os.path.basename(self.system_fix))
self.adap_exit_template = os.path.join(cs.DIR, "Templates/adaptive_exit.conf")
self.adap_ex_output = os.path.join(self.pele_dir, "output_adaptive_exit")
self.exit_path = os.path.join(self.adap_ex_output, "exit_path{}")
self.template_folder = os.path.join(self.pele_dir, "DataLocal/Templates/{}/HeteroAtoms/".format(self.forcefield))
self.obc_tmp = os.path.join(cs.DIR, "Templates/solventParamsHCTOBC.txt")
self.obc_file = os.path.join(self.pele_dir, "DataLocal/OBC/solventParamsHCTOBC.txt")
self.results = os.path.join(self.pele_dir, "results")
self.cluster_output = os.path.join(self.pele_dir, "output_clustering")
self.adap_l_input = os.path.join(self.pele_dir, "output_clustering/initial_*")
self.adap_l_output = os.path.join(self.pele_dir, "output_pele")
self.ad_ex_temp = os.path.join(self.pele_dir, "adaptive_exit.conf")
self.ad_l_temp = os.path.join(self.pele_dir, "adaptive_long.conf")
self.pele_exit_temp = os.path.join(self.pele_dir, "pele_exit.conf")
self.pele_temp = os.path.join(self.pele_dir, "pele.conf")
self.box_temp = os.path.join(self.pele_dir, "box.pdb")
self._pele_temp = os.path.join(cs.DIR, "Templates/pele_SP.conf")
self.clusters_output = os.path.join(self.cluster_output, "clusters_{}_KMeans_allSnapshots.pdb".format(self.clusters))
self.ligand_ref = os.path.join(self.pele_dir, "ligand.pdb")
if self.native:
self.native = cs.NATIVE.format(os.path.abspath(self.native), self.chain)
elif not self.noRMSD:
self.native = cs.NATIVE.format(os.path.abspath(self.ligand_ref), self.chain)
else:
self.native = ""
self.topology = None if self.pdb else os.path.join(self.adap_ex_output, "topologies/topology_0.pdb")
def update_variable_for_iteration(self, i):
self.adap_ex_output = os.path.join(self.pele_dir, "output_adaptive_exit/iteration{}".format(i+1))
self.topology = None if self.pdb else os.path.join(self.adap_ex_output, "topologies/topology_0.pdb")
self.cluster_output = os.path.join(self.pele_dir, "output_clustering/iteration{}".format(i+1))
self.clusters_output = os.path.join(self.cluster_output, "clusters_{}_KMeans_allSnapshots.pdb".format(self.clusters))
self.adap_l_input = os.path.join(self.cluster_output, "initial_*")
def create(self):
if self.restart == "all":
self.create_folders()
self.create_files()
self.create_logger()
else:
self.create_logger()
def create_folders(self):
"""
Create pele folders
"""
for folder in self.folders:
self.create_dir(self.pele_dir, folder)
def create_files(self):
"""
Copy templates
"""
# Actions
for file, destination_name in zip(self.files, cs.FILES_NAME):
self.copy(file, os.path.join(self.pele_dir, destination_name))
def create_dir(self, base_dir, extension=None):
"""
Class Method to manage
directory creation only if that
ones doesn't exist
Location:
base_dir+extension
or base_dir if extension is None
"""
if extension:
path = os.path.join(base_dir, extension)
if os.path.isdir(path):
warnings.warn("Directory {} already exists.".format(path), RuntimeWarning)
else:
os.makedirs(path)
else:
if os.path.isdir(base_dir):
warnings.warn("Directory {} already exists.".format(base_dir), RuntimeWarning)
else:
os.makedirs(base_dir)
def copy(self, standard, destination, user=None):
if user:
shutil.copy(user, os.path.join(self.pele_dir, standard))
else:
shutil.copy(standard, destination)
return os.path.join(self.pele_dir, standard)
def create_logger(self):
log_name = os.path.join(self.pele_dir, "{}.log".format(self.residue))
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s:%(levelname)s:%(message)s")
if self.restart == "all":
file_handler = logging.FileHandler(log_name, mode='w')
else:
file_handler = logging.FileHandler(log_name, mode='a')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
def is_repited(pele_dir):
original_dir = None
split_dir = pele_dir.split("_")
for chunk in split_dir:
if chunk != "Pele":
if original_dir:
original_dir = "{}_{}".format(original_dir, chunk)
else:
original_dir = chunk
else:
break
if split_dir[-1].isdigit():
i = split_dir[-1]
i = int(i) + 1
else:
i = 1
if os.path.isdir(pele_dir):
new_pele_dir = "{}_Pele_{}".format(original_dir, i)
new_pele_dir = is_repited(new_pele_dir)
return new_pele_dir
else:
return pele_dir
def is_last(pele_dir):
original_dir = None
split_dir = pele_dir.split("_")
for chunk in split_dir:
if chunk != "Pele":
if original_dir:
original_dir = "{}_{}".format(original_dir, chunk)
else:
original_dir = chunk
else:
break
if split_dir[-1].isdigit():
i = split_dir[-1]
i = int(i) + 1
else:
i = 1
if os.path.isdir(pele_dir):
new_pele_dir = "{}_Pele_{}".format(original_dir, i)
if not os.path.isdir(new_pele_dir):
return pele_dir
else:
new_pele_dir = is_last(new_pele_dir)
return new_pele_dir
else:
return pele_dir
| StarcoderdataPython |
6699971 | <reponame>TheVinhLuong102/thinc
from typing import Tuple, List, Callable, Sequence
from murmurhash import hash_unicode
from ..model import Model
from ..config import registry
from ..types import Ints2d
InT = Sequence[Sequence[str]]
OutT = List[Ints2d]
@registry.layers("strings2arrays.v1")
def strings2arrays() -> Model[InT, OutT]:
"""Transform a sequence of string sequences to a list of arrays."""
return Model("strings2arrays", forward)
def forward(model: Model[InT, OutT], Xs: InT, is_train: bool) -> Tuple[OutT, Callable]:
hashes = [[hash_unicode(word) for word in X] for X in Xs]
hash_arrays = [model.ops.asarray2i(h, dtype="uint64") for h in hashes]
arrays = [model.ops.reshape2i(array, -1, 1) for array in hash_arrays]
def backprop(dX: OutT) -> InT:
return []
return arrays, backprop
| StarcoderdataPython |
11398967 | <filename>main2.py
import os
from ReaCombiner import file_utils
from ReaCombiner import gui
from ReaCombiner import db
# noinspection SpellCheckingInspection
def getHome():
if 'HOME' in os.environ:
return os.environ['HOME']
elif 'USERPROFILE' in os.environ:
return os.environ['USERPROFILE']
else:
return file_utils.browseDir('DB location')
if __name__ == '__main__':
db.createConnection(getHome().replace('\\', '/') + '/sqlitedb')
gui.createMyWindow()
gui.showMyWindow(db.loadProjects())
| StarcoderdataPython |
11353141 | import subprocess
class ParallelExecutor:
def __init__(self, limit: int):
assert limit > 0, "Limit cannot be less than zero"
self.limit: int = limit
self.__executing: list = []
self.__finished: list = []
def execute(self, *args, **kwrgs):
if len(self.__executing) >= self.limit:
proc = self.__executing.pop(0)
proc.wait()
self.__finished.append(proc)
self.__executing.append(subprocess.Popen(*args, **kwrgs))
def finish(self):
for proc in self.__executing:
proc.wait()
self.__finished.append(proc)
return list(self.__finished)
def clear(self):
self.finish()
self.__finished.clear()
self.__executing.clear()
| StarcoderdataPython |
3254143 | import os
import numpy as np
from nilt_base.settingsreader import SetupTool
from morph_tools import setup_morpher, single_image_morpher
from grating_helper import get_morphing_info_from_specs
from glob import glob
st = SetupTool("grating_morpher")
log = st.log
def fmod(n):
if n < 5:
n += 1
return n
specification_path = st.read_input("morph.specifications", datatype=str)
positions_path = st.read_input("morph.positions", datatype=str)
optimized_dir = st.read_input("morph.optimized_dir", datatype=str)
image_name = st.read_input("morph.image_name", datatype=str)
scale = st.read_input("morph.scale", datatype=float)
select_ids = st.read_input("morph.select_ids", datatype=list, default_value=None)
try:
st.read_input("morph.parameters.im_sz")
raise ValueError("morph.parameters.im_sz must not be set. It is inferred from specifications")
except ValueError:
pass
parameters = st.settings.get("morph").get("parameters", None)
specs = get_morphing_info_from_specs(specification_path, positions_path, n_mod_fun=fmod)
if select_ids:
selected = {}
for sel_id in select_ids:
selected[sel_id] = specs[sel_id]
specs = selected
gen_outdir = st.output_folder
i = 0
for name_id, spec in specs.items():
i += 1
log.info(f"Processing {name_id} - {i}/{len(specs)}")
#try:
folders = glob(os.path.join(optimized_dir, name_id + "_*"))
assert len(folders) == 2, f"Got more than two folder for single key: {folders}"
target_image = os.path.join(optimized_dir, name_id + "_left", image_name)
source_image = os.path.join(optimized_dir, name_id + "_right", image_name)
assert os.path.isfile(source_image), f"Source image, {source_image}, does not exist"
assert os.path.isfile(target_image), f"Source image, {target_image}, does not exist"
id_folder = os.path.join(gen_outdir, f"morphed_{name_id}")
if not os.path.isdir(id_folder):
os.makedirs(id_folder)
needed_dimensions = [(l, w) for l, w in zip(spec["lengths"], spec["widths"])]
source_dim = (spec["lengths"][-1], spec["widths"][-1])
target_dim = (spec["lengths"][0], spec["widths"][0])
morph_class_trained = setup_morpher(source_image, target_image, output_folder=id_folder, **parameters)
log.info(f"Generating {len(needed_dimensions)} images")
for j, dim in enumerate(needed_dimensions):
name = name_id + f"_im_{j:03d}"
im = single_image_morpher(morph_class_trained, dim, source_dim, target_dim, scale, name=name, save_images=True)
#except Exception as e:
# log.error(f"Processing of {name_id} FAILED! Trying next! Got error: {e}")
log.info(f"Done, output saved to: {gen_outdir}")
| StarcoderdataPython |
9762113 | from os.path import join, dirname
from ctapipe.utils import get_dataset
from ctapipe.io.eventfilereader import EventFileReader, \
EventFileReaderFactory, HessioFileReader
def test_event_file_reader():
try:
EventFileReader(config=None, tool=None)
except TypeError:
return
raise TypeError("EventFileReader should raise a TypeError when "
"instantiated due to its abstract methods")
def test_hessio_file_reader():
dataset = get_dataset("gamma_test.simtel.gz")
file = HessioFileReader(None, None, input_path=dataset)
assert file.directory == dirname(dataset)
assert file.extension == ".gz"
assert file.filename == "gamma_test.simtel"
source = file.read()
event = next(source)
assert event.r0.tels_with_data == {38, 47}
def test_get_event():
dataset = get_dataset("gamma_test.simtel.gz")
file = HessioFileReader(None, None, input_path=dataset)
event = file.get_event(2)
assert event.count == 2
assert event.r0.event_id == 803
event = file.get_event(803, True)
assert event.count == 2
assert event.r0.event_id == 803
def test_get_num_events():
dataset = get_dataset("gamma_test.simtel.gz")
file = HessioFileReader(None, None, input_path=dataset)
num_events = file.num_events
assert(num_events == 9)
file.max_events = 2
num_events = file.num_events
assert (num_events == 2)
def test_event_file_reader_factory():
dataset = get_dataset("gamma_test.simtel.gz")
factory = EventFileReaderFactory(None, None)
factory.input_path = dataset
cls = factory.get_class()
file = cls(None, None)
num_events = file.num_events
assert(num_events == 9)
| StarcoderdataPython |
9619181 | from collections import namedtuple
from enigma_machine.conf.defaults import ALPHABET
Wire = namedtuple("Wire", "l_contact r_contact")
class Rotor:
def __init__(self, wheel_id, contact_mapping, notch, window="A", ring_setting="A", alphabet=ALPHABET):
""" Initialize Rotor
Args:
wheel_id (str): Identifier for the Rotor
contact_mapping (str): Index ordered mapping from alphabet onto
inbound contacts
notch (str): Letter associated with position of notch on Rotor ring
window (str): Initial window position of Rotor
ring_setting (str): Rotor ring setting
alphabet (str): Alphabet used for Rotor
"""
self.alphabet = alphabet
self.wheel_id = wheel_id
self.wiring = contact_mapping
self.notch = notch
self.configure(window, ring_setting)
@property
def alphabet(self):
return self._alphabet
@alphabet.setter
def alphabet(self, alphabet):
assert isinstance(alphabet, str), "Alphabet must be string"
assert len(alphabet) > 0, "Alphabet must be non-zero length"
self._alphabet = alphabet
@property
def notch(self):
return self._notch
@notch.setter
def notch(self, notch_pos):
""" Set the position of the notch on the ring
Sets the letter position of the notch on the ring for the rotor. Also
updates and records the turnover position (Letter visible in window)
when notch is active and triggers the next rotor to step.
Args:
notch_pos (str): Letter position of the notch on the ring
"""
assert (len(notch_pos) == 1), 'Notch position must be single digit'
assert isinstance(notch_pos, str), 'Notch postion must be of type string'
assert notch_pos in self.alphabet, 'Notch position must be member of alphabet'
self._notch = notch_pos.upper()
# Notch position is 8 letter positions advanced from turnover in window
self.turnover = self.alphabet[(self.alphabet.index(self._notch) - 8) % len(self.alphabet)]
@property
def ring_setting(self):
return self._ring_setting
@ring_setting.setter
def ring_setting(self, setting):
assert (len(setting) == 1), 'Ring setting must be a single letter'
assert isinstance(setting, str), 'Ring setting must be of type string'
assert setting in self.alphabet, 'Ring setting must be member of alphabet'
self._ring_setting = setting.upper()
@property
def window(self):
return self._window
@window.setter
def window(self, window):
assert (len(window) == 1), 'Window setting must be a single letter'
assert isinstance(window, str), 'Window setting must be of type string'
assert window in self.alphabet, 'Window setting must be member of alphabet'
self._window = window.upper()
# Offset the core wiring based on selected window position
self.core_offset = (self.alphabet.index(self.window) - self.alphabet.index(self.ring_setting)) % len(self.alphabet)
@property
def wiring(self):
# Return forward pass mapping by default
return sorted(self._wiring, key=lambda x: x.r_contact)
@wiring.setter
def wiring(self, contact_mapping):
""" Set the wiring from a string mapping of alphabet onto inbound contacts
Iterates over the provided mapping of alphabet from the right contacts through
the scrambled wiring onto the left contacts. Populates a wiring array with
all Wire mappings such that they can be used to encode letters during
forward or reverse passes on the rotor.
All wires are established disregarding the ring setting (equivalent to
assuming a default ring setting of A).
Args:
contact_mapping (string): String containing ordered mapping of contacts
"""
assert (len(contact_mapping) == len(self.alphabet)), f"Argument must contain {len(self.alphabet)} letters"
assert isinstance(contact_mapping, str), 'Argument must be of type string'
self._wiring = []
for idx in range(len(self.alphabet)):
self._wiring.append(Wire(contact_mapping[idx], self.alphabet[idx]))
def configure(self, window="A", ring_setting="A"):
""" Configure the rotor ring or window setting
Args:
window (str): Window setting for the rotor
ring_setting (str): Ring setting for the ring relative to wiring core
"""
# Configure ring setting first
self.ring_setting = ring_setting
# Then set window position
self.window = window
def step(self):
""" Step the rotor position
Returns:
(bool) True if window is on a the turnover letter position
"""
# Increment the core_offset and wrap around to 0 after after final letter
turned = self.window == self.turnover
self.core_offset = (self.core_offset + 1) % len(self.alphabet)
self.window = self.alphabet[self.core_offset]
return turned
def encode(self, letter, forward=True):
""" Encode a letter passing through the rotor
Args:
letter (str): Letter to encode
forward (bool): Encoding forward pass or reverse through the rotor
Returns:
(string) Letter associated with the output index encoded by the
rotor
"""
assert len(letter) == 1, 'Letter must be a single letter'
assert isinstance(letter, str), 'Letter must be of type str'
assert isinstance(forward, bool), 'forward must be of type boolean'
# Target wire for input letter shifted by positon of wiring core and
# inverse wiring mapping if backward pass through rotor
wiring_map = self.wiring if forward else sorted(self.wiring, key=lambda x: x.l_contact)
wire = wiring_map[self.core_offset]
# Get output letter from encoding on wiring core
encoded_letter = self.alphabet.index(wire.l_contact if forward else wire.r_contact)
# Get index of the output letter shifted by the position of the wiring core
output_index = encoded_letter - self.core_offset % len(self.alphabet)
# Return letter associated with the output index
return self.alphabet[output_index]
| StarcoderdataPython |
9727140 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2016, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2016. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
"""Defines methods for reconstructing data from the :mod:`.acquisition` module.
The algorithm module contains methods for reconstructing tomographic data
including gridrec, SIRT, ART, and MLEM. These methods can be used as benchmarks
for custom reconstruction methods or as an easy way to access reconstruction
algorithms for developing other methods such as noise correction.
.. note::
Using `tomopy <https://github.com/tomopy/tomopy>`_ is recommended instead
of these functions for heavy computation.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import logging
import numpy as np
from xdesign.acquisition import thv_to_zxy
logger = logging.getLogger(__name__)
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC."
__docformat__ = 'restructuredtext en'
__all__ = ['art', 'sirt', 'mlem', 'update_progress']
def update_progress(progress):
"""Draw a process bar in the terminal.
Parameters
-------------
process : float
The percentage completed e.g. 0.10 for 10%
"""
percent = progress * 100
nbars = int(progress * 10)
print(
'\r[{0}{1}] {2:.2f}%'.format('#' * nbars, ' ' * (10 - nbars), percent),
end=''
)
if progress == 1:
print('')
def get_mids_and_lengths(x0, y0, x1, y1, gx, gy):
"""Return the midpoints and intersection lengths of a line and a grid.
Parameters
----------
x0,y0,x1,y1 : float
Two points which define the line. Points must be outside the grid
gx,gy : :py:class:`np.array`
Defines positions for the gridlines
Return
------
xm,ym : :py:class:`np.array`
Coordinates along the line within each intersected grid pixel.
dist : :py:class:`np.array`
Lengths of the line segments crossing each pixel
"""
# avoid upper-right boundary errors
if (x1 - x0) == 0:
x0 += 1e-6
if (y1 - y0) == 0:
y0 += 1e-6
# vector lengths (ax, ay)
ax = (gx - x0) / (x1 - x0)
ay = (gy - y0) / (y1 - y0)
# edges of alpha (a0, a1)
ax0 = min(ax[0], ax[-1])
ax1 = max(ax[0], ax[-1])
ay0 = min(ay[0], ay[-1])
ay1 = max(ay[0], ay[-1])
a0 = max(max(ax0, ay0), 0)
a1 = min(min(ax1, ay1), 1)
# sorted alpha vector
cx = (ax >= a0) & (ax <= a1)
cy = (ay >= a0) & (ay <= a1)
alpha = np.sort(np.r_[ax[cx], ay[cy]])
# lengths
xv = x0 + alpha * (x1 - x0)
yv = y0 + alpha * (y1 - y0)
lx = np.ediff1d(xv)
ly = np.ediff1d(yv)
dist = np.sqrt(lx**2 + ly**2)
# indexing
mid = alpha[:-1] + np.ediff1d(alpha) / 2.
xm = x0 + mid * (x1 - x0)
ym = y0 + mid * (y1 - y0)
return xm, ym, dist
def art(
gmin,
gsize,
data,
theta,
h,
init,
niter=10,
weights=None,
save_interval=None
):
"""Reconstruct data using ART algorithm. :cite:`Gordon1970`."""
assert data.size == theta.size == h.size, "theta, h, must be" \
"the equal lengths"
data = data.ravel()
theta = theta.ravel()
h = h.ravel()
if weights is None:
weights = np.ones(data.shape)
if save_interval is None:
save_interval = niter
archive = list()
# Convert from probe to global coords
srcx, srcy, detx, dety = thv_to_zxy(theta, h)
# grid frame (gx, gy)
sx, sy = init.shape
gx = np.linspace(gmin[0], gmin[0] + gsize[0], sx + 1, endpoint=True)
gy = np.linspace(gmin[1], gmin[1] + gsize[1], sy + 1, endpoint=True)
midlengths = dict() # cache the result of get_mids_and_lengths
for n in range(niter):
if n % save_interval == 0:
archive.append(init.copy())
# update = np.zeros(init.shape)
# nupdate = np.zeros(init.shape, dtype=np.uint)
update_progress(n / niter)
for m in range(data.size):
# get intersection locations and lengths
if m in midlengths:
xm, ym, dist = midlengths[m]
else:
xm, ym, dist = get_mids_and_lengths(
srcx[m], srcy[m], detx[m], dety[m], gx, gy
)
midlengths[m] = (xm, ym, dist)
# convert midpoints of line segments to indices
ix = np.floor(sx * (xm - gmin[0]) / gsize[0]).astype('int')
iy = np.floor(sy * (ym - gmin[1]) / gsize[1]).astype('int')
# simulate acquistion from initial guess
dist2 = np.dot(dist, dist)
if dist2 != 0:
ind = (dist != 0) & (0 <= ix) & (ix < sx) \
& (0 <= iy) & (iy < sy)
sim = np.dot(dist[ind], init[ix[ind], iy[ind]])
upd = np.true_divide((data[m] - sim), dist2)
init[ix[ind], iy[ind]] += dist[ind] * upd
archive.append(init.copy())
update_progress(1)
if save_interval == niter:
return init
else:
return archive
def sirt(
gmin,
gsize,
data,
theta,
h,
init,
niter=10,
weights=None,
save_interval=None
):
"""Reconstruct data using SIRT algorithm. :cite:`Gilbert1972`."""
assert data.size == theta.size == h.size, "theta, h, must be" \
"the equal lengths"
data = data.ravel()
theta = theta.ravel()
h = h.ravel()
if weights is None:
weights = np.ones(data.shape)
if save_interval is None:
save_interval = niter
archive = list()
# Convert from probe to global coords
srcx, srcy, detx, dety = thv_to_zxy(theta, h)
# grid frame (gx, gy)
sx, sy = init.shape
gx = np.linspace(gmin[0], gmin[0] + gsize[0], sx + 1, endpoint=True)
gy = np.linspace(gmin[1], gmin[1] + gsize[1], sy + 1, endpoint=True)
midlengths = dict() # cache the result of get_mids_and_lengths
for n in range(niter):
if n % save_interval == 0:
archive.append(init.copy())
update = np.zeros(init.shape)
nupdate = np.zeros(init.shape, dtype=np.uint)
update_progress(n / niter)
for m in range(data.size):
# get intersection locations and lengths
if m in midlengths:
xm, ym, dist = midlengths[m]
else:
xm, ym, dist = get_mids_and_lengths(
srcx[m], srcy[m], detx[m], dety[m], gx, gy
)
midlengths[m] = (xm, ym, dist)
# convert midpoints of line segments to indices
ix = np.floor(sx * (xm - gmin[0]) / gsize[0]).astype('int')
iy = np.floor(sy * (ym - gmin[1]) / gsize[1]).astype('int')
# simulate acquistion from initial guess
dist2 = np.dot(dist, dist)
if dist2 != 0:
ind = (dist != 0) & (0 <= ix) & (ix < sx) \
& (0 <= iy) & (iy < sy)
sim = np.dot(dist[ind], init[ix[ind], iy[ind]])
upd = np.true_divide((data[m] - sim), dist2)
update[ix[ind], iy[ind]] += dist[ind] * upd
nupdate[ix[ind], iy[ind]] += 1
nupdate[nupdate == 0] = 1
init += np.true_divide(update, nupdate)
archive.append(init.copy())
update_progress(1)
if save_interval == niter:
return init
else:
return archive
def mlem(gmin, gsize, data, theta, h, init, niter=10):
"""Reconstruct data using MLEM algorithm."""
assert data.size == theta.size == h.size, "theta, h, must be" \
"the equal lengths"
data = data.ravel()
theta = theta.ravel()
h = h.ravel()
# if weights is None:
# weights = np.ones(data.shape)
# if save_interval is None:
# save_interval = niter
# archive = list()
# Convert from probe to global coords
srcx, srcy, detx, dety = thv_to_zxy(theta, h)
# grid frame (gx, gy)
sx, sy = init.shape
gx = np.linspace(gmin[0], gmin[0] + gsize[0], sx + 1, endpoint=True)
gy = np.linspace(gmin[1], gmin[1] + gsize[1], sy + 1, endpoint=True)
midlengths = dict() # cache the result of get_mids_and_lengths
for n in range(niter):
update = np.zeros(init.shape)
sumdist = np.zeros(init.shape)
update_progress(n / niter)
for m in range(data.size):
# get intersection locations and lengths
if m in midlengths:
xm, ym, dist = midlengths[m]
else:
xm, ym, dist = get_mids_and_lengths(
srcx[m], srcy[m], detx[m], dety[m], gx, gy
)
midlengths[m] = (xm, ym, dist)
# convert midpoints of line segments to indices
ix = np.floor(sx * (xm - gmin[0]) / gsize[0]).astype('int')
iy = np.floor(sy * (ym - gmin[1]) / gsize[1]).astype('int')
# simulate acquistion from initial guess
ind = (dist != 0)
sumdist[ix[ind], iy[ind]] += dist
sim = np.dot(dist[ind], init[ix[ind], iy[ind]])
if not sim == 0:
upd = np.true_divide(data[m], sim)
update[ix[ind], iy[ind]] += dist[ind] * upd
init[sumdist > 0] *= np.true_divide(
update[sumdist > 0], sumdist[sumdist > 0] * sy
)
update_progress(1)
return init
| StarcoderdataPython |
11322529 | #!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. All advertising materials mentioning features or use of this
# software must display the following acknowledgment:
# "This product includes software developed by
# <NAME> <<EMAIL>>."
#
# 4. Redistributions of any form whatsoever must retain the following
# acknowledgment:
# "This product includes software developed by
# <NAME> <<EMAIL>>."
#
# THIS SOFTWARE IS PROVIDED BY <NAME> ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <NAME> OR
# ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
__rev_id__ = """$Id: Row.py,v 1.6 2005/08/11 08:53:48 rvk Exp $"""
import BIFFRecords
from Deco import *
from Worksheet import Worksheet
import Style
import Cell
import ExcelFormula
import datetime as dt
class Row(object):
__slots__ = ["__init__",
"__adjust_height",
"__adjust_bound_col_idx",
"__excel_date_dt",
"get_height_in_pixels",
"set_style",
"get_xf_index",
"get_cells_count",
"get_min_col",
"get_max_col",
"get_str_count",
"get_row_biff_data",
"get_cells_biff_data",
"get_index",
"write",
"write_blanks",
# private variables
"__idx",
"__parent",
"__parent_wb",
"__cells",
"__min_col_idx",
"__max_col_idx",
"__total_str",
"__xf_index",
"__has_default_format",
"__height_in_pixels",
# public variables
"height",
"has_default_height",
"level",
"collapse",
"hidden",
"space_above",
"space_below"]
#################################################################
## Constructor
#################################################################
def __init__(self, index, parent_sheet):
self.__idx = index
self.__parent = parent_sheet
self.__parent_wb = parent_sheet.get_parent()
self.__cells = []
self.__min_col_idx = 0
self.__max_col_idx = 0
self.__total_str = 0
self.__xf_index = 0x0F
self.__has_default_format = 0
self.__height_in_pixels = 0x11
self.height = 0x00FF
self.has_default_height = 0x00
self.level = 0
self.collapse = 0
self.hidden = 0
self.space_above = 0
self.space_below = 0
def __adjust_height(self, style):
twips = style.font.height
points = float(twips)/20.0
# Cell height in pixels can be calcuted by following approx. formula:
# cell height in pixels = font height in points * 83/50 + 2/5
# It works when screen resolution is 96 dpi
pix = int(round(points*83.0/50.0 + 2.0/5.0))
if pix > self.__height_in_pixels:
self.__height_in_pixels = pix
def __adjust_bound_col_idx(self, *args):
for arg in args:
if arg < self.__min_col_idx:
self.__min_col_idx = arg
elif arg > self.__max_col_idx:
self.__max_col_idx = arg
def __excel_date_dt(self, date):
if isinstance(date, dt.date) and (not isinstance(date, dt.datetime)):
epoch = dt.date(1899, 12, 31)
elif isinstance(date, dt.time):
date = dt.datetime.combine(dt.datetime(1900, 1, 1), date)
epoch = dt.datetime(1900, 1, 1, 0, 0, 0)
else:
epoch = dt.datetime(1899, 12, 31, 0, 0, 0)
delta = date - epoch
xldate = delta.days + float(delta.seconds) / (24*60*60)
# Add a day for Excel's missing leap day in 1900
if xldate > 59:
xldate += 1
return xldate
def get_height_in_pixels(self):
return self.__height_in_pixels
@accepts(object, Style.XFStyle)
def set_style(self, style):
self.__adjust_height(style)
self.__xf_index = self.__parent_wb.add_style(style)
def get_xf_index(self):
return self.__xf_index
def get_cells_count(self):
return len(self.__cells)
def get_min_col(self):
return self.__min_col_idx
def get_max_col(self):
return self.__min_col_idx
def get_str_count(self):
return self.__total_str
def get_row_biff_data(self):
height_options = (self.height & 0x07FFF)
height_options |= (self.has_default_height & 0x01) << 15
options = (self.level & 0x07) << 0
options |= (self.collapse & 0x01) << 4
options |= (self.hidden & 0x01) << 5
options |= (0x00 & 0x01) << 6
options |= (0x01 & 0x01) << 8
if self.__xf_index != 0x0F:
options |= (0x01 & 0x01) << 7
else:
options |= (0x00 & 0x01) << 7
options |= (self.__xf_index & 0x0FFF) << 16
options |= (0x00 & self.space_above) << 28
options |= (0x00 & self.space_below) << 29
return BIFFRecords.RowRecord(self.__idx, self.__min_col_idx, self.__max_col_idx, height_options, options).get()
def get_cells_biff_data(self):
return ''.join([ cell.get_biff_data() for cell in self.__cells ])
def get_index(self):
return self.__idx
@accepts(object, int, (str, unicode, int, float, dt.datetime, dt.time, dt.date, ExcelFormula.Formula), Style.XFStyle)
def write(self, col, label, style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(col)
if isinstance(label, (str, unicode)):
if len(label) > 0:
self.__cells.extend([ Cell.StrCell(self, col, self.__parent_wb.add_style(style), self.__parent_wb.add_str(label)) ])
self.__total_str += 1
else:
self.__cells.extend([ Cell.BlankCell(self, col, self.__parent_wb.add_style(style)) ])
elif isinstance(label, (int, float)):
self.__cells.extend([ Cell.NumberCell(self, col, self.__parent_wb.add_style(style), label) ])
elif isinstance(label, (dt.datetime, dt.time)):
self.__cells.extend([ Cell.NumberCell(self, col, self.__parent_wb.add_style(style), self.__excel_date_dt(label)) ])
else:
self.__cells.extend([ Cell.FormulaCell(self, col, self.__parent_wb.add_style(style), label) ])
@accepts(object, int, int, Style.XFStyle)
def write_blanks(self, c1, c2, style):
self.__adjust_height(style)
self.__adjust_bound_col_idx(c1, c2)
self.__cells.extend([ Cell.MulBlankCell(self, c1, c2, self.__parent_wb.add_style(style)) ])
| StarcoderdataPython |
6559018 | <reponame>SYU15/cloudless
"""
Cloudless image build command line interface.
"""
import os
import sys
import click
from cloudless.cli.utils import NaturalOrderAliasedGroup
from cloudless.cli.utils import handle_profile_for_cli
from cloudless.util.image_build_configuration import ImageBuildConfiguration
from cloudless.testutils.image_builder import ImageBuilder
def add_image_build_group(cldls):
"""
Add commands for the image_build command group.
"""
@cldls.group(name='image-build', cls=NaturalOrderAliasedGroup)
@click.option('--dev/--no-dev', default=False, help="Development mode.")
@click.pass_context
def image_build_group(ctx, dev):
"""
Tools to build and test instance images.
Commands to interact with machine images.
"""
handle_profile_for_cli(ctx)
click.echo('image group with provider: %s' % ctx.obj['PROVIDER'])
ctx.obj['DEV'] = dev
@image_build_group.command(name="deploy")
@click.argument('config')
@click.pass_context
# pylint:disable=unused-variable
def image_build_deploy(ctx, config):
"""
deploy an image given a configuration file.
"""
if os.path.isdir(config):
click.echo("Configuration must be a file, not a directory!")
sys.exit(1)
config_obj = ImageBuildConfiguration(config)
image_builder = ImageBuilder(ctx.obj['CLIENT'], config=config_obj)
service, state = image_builder.deploy()
click.echo('Successfully deployed! Log in with:')
for instance in ctx.obj['CLIENT'].service.get_instances(service):
click.echo("ssh -i %s %s@%s" % (state["ssh_private_key"], state["ssh_username"],
instance.public_ip))
@image_build_group.command(name="configure")
@click.argument('config')
@click.pass_context
# pylint:disable=unused-variable
def image_build_configure(ctx, config):
"""
configure an image given a config file.
"""
if os.path.isdir(config):
click.echo("Configuration must be a file, not a directory!")
sys.exit(1)
config_obj = ImageBuildConfiguration(config)
image_builder = ImageBuilder(ctx.obj['CLIENT'], config=config_obj)
image_builder.configure()
click.echo('Configure complete!')
@image_build_group.command(name="check")
@click.argument('config')
@click.pass_context
# pylint:disable=unused-variable
def image_build_check(ctx, config):
"""
check an image given a configuration file.
"""
if os.path.isdir(config):
click.echo("Configuration must be a file, not a directory!")
sys.exit(1)
config_obj = ImageBuildConfiguration(config)
image_builder = ImageBuilder(ctx.obj['CLIENT'], config=config_obj)
image_builder.check()
click.echo('Check complete!')
@image_build_group.command(name="cleanup")
@click.argument('config')
@click.pass_context
# pylint:disable=unused-variable
def image_build_cleanup(ctx, config):
"""
cleanup images given a configuration file.
"""
if os.path.isdir(config):
click.echo("Configuration must be a file, not a directory!")
sys.exit(1)
config_obj = ImageBuildConfiguration(config)
image_builder = ImageBuilder(ctx.obj['CLIENT'], config=config_obj)
image_builder.cleanup()
click.echo('Cleanup complete!')
@image_build_group.command(name="run")
@click.argument('config')
@click.pass_context
# pylint:disable=unused-variable
def image_build_run(ctx, config):
"""
Build an image given a configuration file. Runs all steps in order and saves the image at
the end.
This is the only way to save images, which ensures that every saved image had all the tests
pass.
"""
if os.path.isdir(config):
click.echo("Configuration must be a file, not a directory!")
sys.exit(1)
config_obj = ImageBuildConfiguration(config)
image_builder = ImageBuilder(ctx.obj['CLIENT'], config=config_obj)
if ctx.obj['PROVIDER'] == "mock-aws":
image_builder.run(mock=True)
else:
image_builder.run()
click.echo('Build complete!')
| StarcoderdataPython |
11379686 | """Tests for elections_lk."""
import unittest
from elections_lk import party_color
class TestCase(unittest.TestCase):
"""Tests."""
def test_party_to_rgb_color(self):
"""Test."""
for party_id, expected_color in (
('UNP', (0, 0.5, 0)),
('NDF', (0, 0.5, 0)),
):
self.assertEqual(
expected_color,
party_color.get_rgb_color(party_id),
)
def test_party_to_rgba_color(self):
"""Test."""
for party_id, p_votes, expected_color in (
('UNP', 0.45, (0, 0.5, 0, 0)),
('NDF', 1.0, (0, 0.5, 0, 1)),
):
self.assertEqual(
expected_color,
party_color.get_rgba_color(party_id, p_votes),
)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
37491 | <gh_stars>1-10
from src.fft_from_image.ChainGeneration import ChainGeneration
import numpy as np
class ThueMorse(ChainGeneration):
def __init__(self, repeat, tm_num):
ChainGeneration.__init__(self, repeat)
self.tm_num = tm_num
@staticmethod
def tm_construct(seq):
return [(i + 1) % 2 for i in seq]
def sequence(self):
seq = [0]
for i in range(self.tm_num):
seq = seq + self.tm_construct(seq)
return np.repeat(seq, self.repeat)
class Fibonacci(ChainGeneration):
def __init__(self, repeat, fib_num):
ChainGeneration.__init__(self, repeat)
self.fib_num = fib_num
def fib_number(self):
n = self.fib_num
i = h = 1
j = k = 0
while n > 0:
if n % 2 == 1:
t = j * h
j = i * h + j * k + t
i = i * k + t
t = h * h
h = 2 * k * h + t
k = k * k + t
n = int(n / 2)
return j
def sequence_generator(self):
seq1 = [1]
seq2 = [0]
seq = seq2 + seq1
for i in range(self.fib_num - 2):
seq = seq2 + seq1
seq1 = seq2
seq2 = seq
return np.array(seq)
def sequence(self):
return np.repeat(self.sequence_generator(), self.repeat)
class Periodic(ChainGeneration):
def __init__(self, repeat, num):
ChainGeneration.__init__(self, repeat)
self.num = num
def sequence_generator(self):
seq = np.zeros(self.num)
seq[::2] += 1
return seq
def sequence(self):
return np.repeat(self.sequence_generator(), self.repeat)
class Random(ChainGeneration):
def __init__(self, repeat, num, stripes1_count):
ChainGeneration.__init__(self, repeat)
self.num = num
self.stripes1_count = stripes1_count
def sequence_generator(self):
seq = np.zeros(self.num)
seq[:self.stripes1_count] += 1
return np.random.permutation(seq)
def sequence(self):
return np.repeat(self.sequence_generator(), self.repeat)
class Heated(ChainGeneration):
def __init__(self, repeat):
ChainGeneration.__init__(self, repeat)
def cos_sequence(self):
return (np.cos(np.linspace(0, 2 * np.pi, self.repeat)) + 1) / 2
class Custom(ChainGeneration):
def __init__(self, file_name, repeat=1):
ChainGeneration.__init__(self, repeat)
self.tmp = np.transpose(np.loadtxt(file_name))[-1]
self.data = (self.tmp - np.min(self.tmp))
self.data /= np.max(self.data)
def sequence(self):
return self.data
class Phason:
def __init__(self, sequence_type, repeat, num, phason_parameter):
if sequence_type == 'F':
self.f = Fibonacci(1, num)
self.seq = self.f.sequence()
elif sequence_type == 'P':
self.p = Periodic(1, num)
self.seq = self.p.sequence()
elif sequence_type == 'R':
struct = Fibonacci(1, num).fib_number()
stripes1 = Fibonacci(1, num - 2).fib_number()
self.p = Random(1, struct, stripes1) # randomized Fibonacci
self.seq = self.p.sequence()
else:
raise ValueError('No more types supported at the moment')
self.repeat = repeat
self.len = len(self.seq)
self.where_one = self.find_all_phasons(self.seq)
self.phason_parameter = phason_parameter
self.sequence_type = sequence_type
if phason_parameter < 1:
self.phasons_count = int(phason_parameter * len(self.where_one))
else:
self.phasons_count = phason_parameter
def find_all_phasons(self, seq):
a = np.argwhere(seq == 1).T[0]
b = np.concatenate((np.diff(a), np.array([(self.len - a[-1] + a[0])])))
return np.compress(np.where(b == 1, 0, 1) == 1, a)
def sequence_shuffling(self, seq):
if self.sequence_type == "R":
phason_pos = np.argwhere(self.p.sequence_generator() == 1)
print(phason_pos)
return self.seq, phason_pos
else:
if self.phason_parameter < 1:
phasons_pos = np.random.permutation(self.find_all_phasons(seq))[0:self.phasons_count]
seq = self.make_shufling(phasons_pos)
else:
collect_phasons = np.zeros(self.phasons_count)
for i in range(self.phasons_count):
phasons_pos = np.random.permutation(self.find_all_phasons(seq))[0]
seq = self.make_shufling(phasons_pos)
collect_phasons[i] = phasons_pos
phasons_pos = collect_phasons
return seq, phasons_pos
def make_shufling(self, stripe_position):
seq = self.seq
seq[(stripe_position + 1) % len(seq)] = 1
seq[stripe_position] = 0
return seq
def sequence(self, seq):
return np.repeat(seq, self.repeat)
if __name__ == "__main__":
pass
| StarcoderdataPython |
5123798 | class SurchargeList(list):
@property
def total(self):
return sum([surcharge.price for surcharge in self])
class SurchargePrice():
surcharge = None
price = None
def __init__(self, surcharge, price):
self.surcharge = surcharge
self.price = price
class SurchargeApplicator():
def __init__(self, request=None, context=None):
self.context = context
self.request = request
def get_surcharges(self, basket, **kwargs):
"""
For example::
return (
PercentageCharge(percentage=D("2.00")),
FlatCharge(excl_tax=D("20.0"), incl_tax=D("20.0")),
)
Surcharges must implement the minimal API in ``oscar.apps.checkout.surcharges.BaseSurcharge``.
Note that you can also make it a model if you want, just like shipping methods.
"""
return ()
def get_applicable_surcharges(self, basket, **kwargs):
methods = [
SurchargePrice(
surcharge,
surcharge.calculate(basket=basket, **kwargs)
)
for surcharge in self.get_surcharges(basket=basket, **kwargs)
if self.is_applicable(surcharge=surcharge, basket=basket, **kwargs)
]
if methods:
return SurchargeList(methods)
else:
return None
def is_applicable(self, surcharge, basket, **kwargs):
"""
Checks if surcharge is applicable to certain conditions
"""
return True
| StarcoderdataPython |
6477755 | """
**DEPRECATED**
A print function that pretty prints sympy Basic objects.
:moduleauthor: <NAME>
Usage
=====
Once the extension is loaded, Sympy Basic objects are automatically
pretty-printed.
As of SymPy 0.7.2, maintenance of this extension has moved to SymPy under
sympy.interactive.ipythonprinting, any modifications to account for changes to
SymPy should be submitted to SymPy rather than changed here. This module is
maintained here for backwards compatablitiy with old SymPy versions.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import warnings
def load_ipython_extension(ip):
warnings.warn("The sympyprinting extension has moved to `sympy`, "
"use `from sympy import init_printing; init_printing()`")
| StarcoderdataPython |
6459278 | """
The module contains functions related to data aggretion
"""
# Python libs
from datetime import datetime
from dateutil.relativedelta import relativedelta
# Django libs
from django.db.models import Avg
# Seshdash
from seshdash.models import Daily_Data_Point, Sesh_Site, Sesh_Alert
from seshdash.utils.time_utils import get_date_dashed
from seshdash.utils.reporting import get_measurement_unit
def get_avg_field_year(site, field):
""" Returns the average of a field for a year range in Daily Data Point"""
now = datetime.now()
year_before = now - relativedelta(years=1)
avg_field_yield = Daily_Data_Point.objects.filter(date__range=[
get_date_dashed(year_before),
get_date_dashed(now)],site=site)\
.aggregate(Avg(field)).values()[0]
return avg_field_yield
def get_alerts_for_year(site):
""" Returns the alerts for a site in a year range """
# NOTE: When using datetime object date_range does not include the last date
now = datetime.now()
year_before = now - relativedelta(years=1)
return get_alerts_for_range(site, year_before, now)
def get_alerts_for_range(site, start_date=None, end_date=None):
""" Returns the number of alerts for a given date range default day """
if start_date is None and end_date is None:
start_date = datetime.now()
end_date = start_date - relativedelta(days=1)
alerts_for_year = Sesh_Alert.objects.filter(date__range=[
get_date_dashed(start_date),
get_date_dashed(end_date)], site=site)
return alerts_for_year
def get_historical_dict(column='daily_pv_yield'):
""" Packages a ditionary containing data information of a column in Daily_Data_Point for the whole year """
unit = get_measurement_unit(column)
sites = Sesh_Site.objects.all();
historical_points = Daily_Data_Point.objects.all()
historical_data = [];
# For each site get Points
for site in sites:
historical_points = Daily_Data_Point.objects.filter(site=site.id)
site_historical_data = []
# For point get neccessary Data
for point in historical_points:
site_historical_data.append({
"date": get_date_dashed(point.date),
"count": getattr(point, column),
"point_id":point.id,
})
historical_data.append({
"site_id":site.id,
"site_name":site.site_name,
"site_historical_data": site_historical_data,
"data_unit": unit,
"number_of_alerts": get_alerts_for_year(site).count(),
"average_pv_yield": get_avg_field_year(site, 'daily_pv_yield'),
"average_power_consumption_total": get_avg_field_year(site, 'daily_power_consumption_total')
})
return historical_data
| StarcoderdataPython |
167067 | <gh_stars>1-10
import os
import pandas as pd
import numpy as np
from path import Path
from poor_trader import indicators
from poor_trader.config import SYSTEMS_PATH
def _trim_quotes(symbol, df_group_quotes):
df_quotes = df_group_quotes.filter(regex='^{}_'.format(symbol))
df_quotes.columns = [_.replace(symbol + '_', '') for _ in df_quotes.columns]
df_quotes = df_quotes.loc[df_quotes['Date'].dropna().index]
return df_quotes
def run_atr_channel_breakout(symbols, df_group_quotes, prefix='ATRChannel', top=7, bottom=3, sma=120):
fname = '{}{}|{}|{}'.format(prefix, top, bottom, sma)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
df_atr_channel = indicators.atr_channel(df_quotes, top=top, bottom=bottom, sma=sma, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(df_quotes.Close > df_atr_channel.top, df_quotes.Close.shift(1) < df_atr_channel.top.shift(1))
short_condition = np.logical_or(df_quotes.Close < df_atr_channel.bottom, df_quotes.Close < df_atr_channel.mid)
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
def run_atr_channel_breakout_sma(symbols, df_group_quotes, prefix='ATRChannelSMA', top=7, bottom=3, sma=120, fast=100, slow=150):
fname = '{}{}|{}|{}|{}|{}'.format(prefix, top, bottom, sma, fast, slow)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
df_atr_channel = indicators.atr_channel(df_quotes, top=top, bottom=bottom, sma=sma, symbol=symbol)
df_sma = indicators.SMA_cross(df_quotes, fast=fast, slow=slow, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(np.logical_and(df_sma.FastSMA > df_sma.SlowSMA, df_quotes.Close > df_sma.FastSMA),
np.logical_and(df_quotes.Close > df_atr_channel.top, df_quotes.Close.shift(1) < df_atr_channel.top.shift(1)))
short_condition = np.logical_or(df_quotes.Close < df_atr_channel.bottom, df_quotes.Close < df_atr_channel.mid)
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
def run_dcsma(symbols, df_group_quotes, prefix='DonchianSMA', high=50, low=50, fast=100, slow=150):
fname = '{}{}|{}|{}|{}'.format(prefix, high, low, fast, slow)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
df_donchian = indicators.donchian_channel(df_quotes, high=high, low=low, symbol=symbol)
df_sma = indicators.SMA_cross(df_quotes, fast=fast, slow=slow, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(np.logical_and(df_sma.FastSMA > df_sma.SlowSMA, df_quotes.Close > df_sma.FastSMA),
np.logical_and(df_donchian.high.shift(1) < df_donchian.high, df_donchian.low.shift(1) <= df_donchian.low))
short_condition = np.logical_and(df_donchian.low.shift(1) > df_donchian.low, df_donchian.high.shift(1) >= df_donchian.high)
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
def run_slsma(symbols, df_group_quotes, prefix='SLSMA', st_fast=5, st_slow=10, s_fast=40, s_slow=60, l_fast=100, l_slow=120):
fname = '{}{}|{}|{}|{}|{}|{}'.format(prefix, st_fast, st_slow, s_fast, s_slow, l_fast, l_slow)
fpath = SYSTEMS_PATH / '{}.pkl'.format(fname)
if os.path.exists(fpath):
return fname, pd.read_pickle(fpath)
else:
df_positions = pd.DataFrame()
for symbol in symbols:
print('Running', symbol)
df_quotes = _trim_quotes(symbol, df_group_quotes)
shortest_sma = indicators.SMA_cross(df_quotes, fast=st_fast, slow=st_slow, symbol=symbol)
short_sma = indicators.SMA_cross(df_quotes, fast=s_fast, slow=s_slow, symbol=symbol)
long_sma = indicators.SMA_cross(df_quotes, fast=l_fast, slow=l_slow, symbol=symbol)
df = pd.DataFrame(index=df_quotes.index)
long_condition = np.logical_and(np.logical_and(long_sma.FastSMA > long_sma.SlowSMA, short_sma.FastSMA > long_sma.FastSMA),
np.logical_or(long_sma.FastCrossoverSlow == 1,
np.logical_or(short_sma.FastCrossoverSlow == 1,
np.logical_and(short_sma.FastSMA > short_sma.SlowSMA,
shortest_sma.FastCrossoverSlow == 1))))
short_condition = short_sma.FastSMA < long_sma.FastSMA
df[symbol] = np.where(long_condition, 'LONG', np.where(short_condition, 'SHORT', 'HOLD'))
df_positions = df_positions.join(df, how='outer')
df_positions.to_pickle(fpath)
return fname, df_positions
| StarcoderdataPython |
78773 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian
from a2c_ppo_acktr.utils import init
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base=None, base_kwargs=None, use_pnn=False):
super(Policy, self).__init__()
if base_kwargs is None:
base_kwargs = {}
if base is None:
if use_pnn:
base = PNNConvBase
elif len(obs_shape) == 3:
base = CNNBase
elif len(obs_shape) == 1:
base = MLPBase
else:
raise NotImplementedError
self.base = base(obs_shape[0], **base_kwargs)
if action_space.__class__.__name__ == "Discrete":
num_outputs = action_space.n
self.dist = Categorical(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "MultiBinary":
num_outputs = action_space.shape[0]
self.dist = Bernoulli(self.base.output_size, num_outputs)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.gru.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.gru(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class CNNBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.main = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = self.main(inputs / 255.0)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLPBase(NNBase):
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
##### Added for CCM #######
class ScaleLayer(nn.Module):
def __init__(self, init_value=1e-3):
super().__init__()
self.scale = nn.Parameter(torch.FloatTensor([init_value]))
def forward(self, x):
return x * self.scale
class PNNBase(NNBase):
def __init__(self, t, recurrent=False, hidden_size=512):
super(PNNBase, self).__init__(recurrent, hidden_size, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.conv1 = init_(nn.Conv2d(t[0][0], t[0][1], t[0][2], stride=t[0][3]))
self.conv2 = init_(nn.Conv2d(t[1][0], t[1][1], t[1][2], stride=t[1][3]))
self.conv3 = init_(nn.Conv2d(t[2][0], t[2][1], t[2][2], stride=t[2][3]))
self.fc = init_(nn.Linear(t[3][0], t[3][1]))
self.mp = None
self.relu = nn.ReLU()
self.flatten = Flatten()
self.topology = [
[t[1][2], t[1][3]],
[t[2][2], t[2][3]],
t[3][1]
]
self.output_shapes = [x[1] for x in t]
self.input_shapes = [x[0] for x in t]
def layers(self, i, x):
if i == 0:
if not self.mp:
return self.relu(self.conv1(x))
else:
return self.mp(self.relu(self.conv1(x)))
elif i == 1:
return self.relu(self.conv2(x))
elif i == 2:
return self.relu(self.conv3(x))
elif i == 3:
return self.fc(self.flatten(x))
def forward(self, x):
outs = []
for i in range(4):
x = self.layers(i, x)
outs.append(x)
return outs
class PNNColumnAtari(PNNBase): # Use this for atari environments
def __init__(self, num_inputs, recurrent=False, hidden_size=512):
t = [[num_inputs, 32, 8, 4], [32, 64, 4, 2], [64, 32, 3, 1], [32 * 7 * 7, hidden_size]]
# [n_input, n_output, fsize, stride] for c1, c2, c3 and [n_input, n_output] for FC
super(PNNColumnAtari, self).__init__(t, recurrent, hidden_size)
class PNNColumnGrid(PNNBase): # Use this for grid environments
def __init__(self, num_inputs, recurrent=False, hidden_size=64):
t = [[num_inputs, 16, 2, 1], [16, 32, 2, 1], [32, 64, 2, 1], [64, 64]]
super(PNNColumnGrid, self).__init__(t, recurrent, hidden_size)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.mp = nn.MaxPool2d((2, 2))
self.fc = nn.Sequential(
init_(nn.Linear(hidden_size, 64)),
nn.Tanh(),
self.fc
)
class PNNConvBase(NNBase):
def __init__(self, num_inputs, recurrent=False, grid=False, hidden_size=512):
super(PNNConvBase, self).__init__(recurrent, hidden_size, hidden_size)
self.columns = nn.ModuleList([])
self.num_inputs = num_inputs
self.hidden_size = hidden_size
self.recurrent = recurrent
self.alpha = nn.ModuleList([])
self.V = nn.ModuleList([])
self.U = nn.ModuleList([])
self.flatten = Flatten()
self.grid = grid
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if grid:
self.critic_linear = nn.Sequential(
init_(nn.Linear(self.hidden_size, 64)),
nn.Tanh(),
init_(nn.Linear(64, 1))
)
else:
self.critic_linear = init_(nn.linear(self.hidden_size,1))
self.train()
self.n_layers = 4
def forward(self, x, rnn_hxs, masks):
assert self.columns, 'PNN should at least have one column (missing call to `new_task` ?)'
# x = (x / 255.0)
inputs = [self.columns[i].layers(0, x) for i in range(len(self.columns))]
for l in range(1, self.n_layers):
outputs = [self.columns[0].layers(l, inputs[0])]
for c in range(1, len(self.columns)):
pre_col = inputs[c - 1]
cur_out = self.columns[c].layers(l, inputs[c])
a = self.alpha[c - 1][l - 1]
a_h = F.relu(a(pre_col))
V = self.V[c - 1][l - 1]
V_a_h = F.relu(V(a_h))
U = self.U[c - 1][l - 1]
if l == self.n_layers - 1: # FC layer
V_a_h = self.flatten(V_a_h)
U_V_a_h = U(V_a_h)
out = F.relu(cur_out + U_V_a_h)
outputs.append(out)
else:
U_V_a_h = U(V_a_h) # conv layers
out = F.relu(cur_out + U_V_a_h)
outputs.append(out)
inputs = outputs
x = inputs[-1]
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
def new_task(self): # adds a new column to pnn
if self.grid:
new_column = PNNColumnGrid(self.num_inputs, self.recurrent, self.hidden_size)
else:
new_column = PNNColumnAtari(self.num_inputs, self.recurrent, self.hidden_size)
self.columns.append(new_column)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if len(self.columns) > 1:
pre_col, col = self.columns[-2], self.columns[-1]
a_list = []
V_list = []
U_list = []
for l in range(1, self.n_layers):
a = ScaleLayer(0.01)
map_in = pre_col.output_shapes[l - 1]
map_out = int(map_in / 2)
v = init_(nn.Conv2d(map_in, map_out, 1))
if l != self.n_layers - 1: # conv -> conv, last layer
cur_out = col.output_shapes[l]
size, stride = pre_col.topology[l - 1]
u = init_(nn.Conv2d(map_out, cur_out, size, stride=stride))
else:
input_size = int(col.input_shapes[-1] / 2)
hidden_size = self.hidden_size
u = init_(nn.Linear(input_size, hidden_size))
a_list.append(a)
V_list.append(v)
U_list.append(u)
a_list = nn.ModuleList(a_list)
V_list = nn.ModuleList(V_list)
U_list = nn.ModuleList(U_list)
self.alpha.append(a_list)
self.V.append(V_list)
self.U.append(U_list)
def freeze_columns(self, skip=None): # freezes the weights of previous columns
if skip is None:
skip = []
for i, c in enumerate(self.columns):
if i not in skip:
for params in c.parameters():
params.requires_grad = False
def parameters(self, col=None):
if col is None:
return super(PNNConvBase, self).parameters()
return self.columns[col].parameters()
| StarcoderdataPython |
8136113 | from setuptools import setup
setup(
name='hh-deep-deep',
url='https://github.com/TeamHG-Memex/hh-deep-deep',
packages=['hh_deep_deep'],
include_package_data=True,
install_requires=[
'pykafka==2.6.0',
'tldextract',
],
entry_points = {
'console_scripts': [
'hh-deep-deep-service=hh_deep_deep.service:main',
],
},
classifiers=[
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
)
| StarcoderdataPython |
5062523 | #!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This module implements the operations for ONTAP MCC Mediator.
# The Mediator is supported for MCC IP configs from ONTAP 9.7 or later.
# This module requires REST APIs for Mediator which is supported from
# ONTAP 9.8 (DW) or later
'''
na_ontap_mcc_mediator
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_mcc_mediator
short_description: NetApp ONTAP Add and Remove MetroCluster Mediator
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: 20.9.0
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
description:
- Add and remove ONTAP MCC Mediator
options:
state:
choices: ['present', 'absent']
description:
- "Whether MCCIP Mediator is present or not."
default: present
type: str
mediator_address:
description:
- ip address of the mediator
type: str
required: true
mediator_user:
description:
- username of the mediator
type: str
required: true
mediator_password:
description:
- password of the mediator
type: str
required: true
'''
EXAMPLES = """
- name: Add ONTAP MCCIP Mediator
na_ontap_mcc_mediator:
state: present
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ <PASSWORD> }}"
mediator_address: mediator_ip
mediator_user: metrocluster_admin
mediator_password: <PASSWORD>!
- name: Delete ONTAP MCCIP Mediator
na_ontap_mcc_mediator:
state: absent
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ <PASSWORD> }}"
mediator_user: metrocluster_admin
mediator_password: <PASSWORD>!
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
class NetAppOntapMccipMediator(object):
"""
Mediator object for Add/Remove/Display
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
mediator_address=dict(required=True, type='str'),
mediator_user=dict(required=True, type='str'),
mediator_password=dict(required=True, type='str', no_log=True),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.rest_api = OntapRestAPI(self.module)
self.use_rest = self.rest_api.is_rest()
if not self.use_rest:
self.module.fail_json(msg=self.rest_api.requires_ontap_9_6('na_ontap_mcc_mediator'))
def add_mediator(self):
"""
Adds an ONTAP Mediator to MCC configuration
"""
api = 'cluster/mediators'
params = {
'ip_address': self.parameters['mediator_address'],
'password': self.parameters['mediator_password'],
'user': self.parameters['mediator_user']
}
dummy, error = self.rest_api.post(api, params)
if error:
self.module.fail_json(msg=error)
def remove_mediator(self, current_uuid):
"""
Removes the ONTAP Mediator from MCC configuration
"""
api = 'cluster/mediators/%s' % current_uuid
params = {
'ip_address': self.parameters['mediator_address'],
'password': self.parameters['mediator_password'],
'user': self.parameters['mediator_user'],
'uuid': current_uuid
}
dummy, error = self.rest_api.delete(api, params)
if error:
self.module.fail_json(msg=error)
def get_mediator(self):
"""
Determine if the MCC configuration has added an ONTAP Mediator
"""
api = "cluster/mediators"
message, error = self.rest_api.get(api, None)
if error:
self.module.fail_json(msg=error)
if message['num_records'] > 0:
return message['records'][0]['uuid']
return None
def apply(self):
"""
Apply action to MCC Mediator
"""
current = self.get_mediator()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if cd_action == 'create':
self.add_mediator()
elif cd_action == 'delete':
self.remove_mediator(current)
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Add, Remove and display ONTAP MCC Mediator
"""
mediator_obj = NetAppOntapMccipMediator()
mediator_obj.apply()
if __name__ == '__main__':
main()
| StarcoderdataPython |
329911 | import logging
from .taxid import TaxId
ID_PREFIXES = {
# "http://": {"url_prefix": "http://", "url_suffix": ""},
# "https://": {"url_prefix": "https://", "url_suffix": ""},
"W:": {"url_prefix": "http://wikipedia.org/wiki/", "url_suffix": ""},
"NBN:": {"url_prefix": "https://data.nbn.org.uk/Taxa/", "url_suffix": ""},
"GBIF:": {"url_prefix": "http://www.gbif.org/species/", "url_suffix": ""},
"AFD:": {
"url_prefix": "http://www.environment.gov.au/biodiversity/abrs/online-resources/fauna/afd/taxa/",
"url_suffix": "",
},
"https://cmecscatalog.org/cmecs/classification/aquaticSetting/": {
"url_prefix": "https://cmecscatalog.org/cmecs/classification/aquaticSetting/",
"url_suffix": "",
},
"FBC:SLB:SpecCode:": {
"url_prefix": "http://sealifebase.org/Summary/SpeciesSummary.php?id=",
"url_suffix": "",
},
"INAT_TAXON:": {"url_prefix": "https://inaturalist.org/taxa/", "url_suffix": ""},
"GEONAMES:": {"url_prefix": "http://www.geonames.org/", "url_suffix": ""},
"INAT:": {
"url_prefix": "https://www.inaturalist.org/observations/",
"url_suffix": "",
},
"WD:": {"url_prefix": "https://www.wikidata.org/wiki/", "url_suffix": ""},
"bioinfo:ref:": {
"url_prefix": "http://bioinfo.org.uk/html/b",
"url_suffix": ".htm",
},
"GAME:": {
"url_prefix": "https://public.myfwc.com/FWRI/GAME/Survey.aspx?id=",
"url_suffix": "",
},
"ALATaxon:": {"url_prefix": "https://bie.ala.org.au/species/", "url_suffix": ""},
"doi:": {"url_prefix": "https://doi.org/", "url_suffix": ""},
"NCBI:": {
"url_prefix": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=",
"url_suffix": "",
},
"BioGoMx:": {
"url_prefix": "http://gulfbase.org/biogomx/biospecies.php?species=",
"url_suffix": "",
},
"FBC:FB:SpecCode:": {
"url_prefix": "http://fishbase.org/summary/",
"url_suffix": "",
},
"IRMNG:": {
"url_prefix": "http://www.marine.csiro.au/mirrorsearch/ir_search.list_species?sp_id=",
"url_suffix": "",
},
"NCBITaxon:": {
"url_prefix": "http://purl.obolibrary.org/obo/NCBITaxon_",
"url_suffix": "",
},
"ENVO:": {"url_prefix": "http://purl.obolibrary.org/obo/ENVO_", "url_suffix": ""},
"OTT:": {
"url_prefix": "https://tree.opentreeoflife.org/opentree/ottol@",
"url_suffix": "",
},
"ITIS:": {
"url_prefix": "http://www.itis.gov/servlet/SingleRpt/SingleRpt?search_topic=TSN&search_value=",
"url_suffix": "",
},
"WORMS:": {
"url_prefix": "http://www.marinespecies.org/aphia.php?p=taxdetails&id=",
"url_suffix": "",
},
"urn:lsid:biodiversity.org.au:apni.taxon:": {
"url_prefix": "http://id.biodiversity.org.au/apni.taxon/",
"url_suffix": "",
},
"EOL:": {"url_prefix": "http://eol.org/pages/", "url_suffix": ""},
"EOL_V2:": {
"url_prefix": "https://doi.org/10.5281/zenodo.1495266#",
"url_suffix": "",
},
"IF:": {
"url_prefix": "http://www.indexfungorum.org/names/NamesRecord.asp?RecordID=",
"url_suffix": "",
},
"DOI:": {"url_prefix": "https://doi.org/", "url_suffix": ""},
}
class URIMapper:
def __init__(self):
self.logger = logging.getLogger(__name__)
self.map = ID_PREFIXES
def is_valid_db_prefix(self, db_prefix):
return db_prefix in self.map
def get_uri_prefix_from_db_prefix(self, db_prefix):
if self.is_valid_db_prefix(db_prefix):
return self.map[db_prefix]["url_prefix"]
else:
raise ValueError("No known URI for prefix ", db_prefix)
def get_db_prefix_from_uri(self, uri):
for key, val in self.map.items():
if val["url_prefix"] in uri:
return key
raise ValueError("No known prefix for URI ", uri)
class URIManager:
def __init__(self, mapper=URIMapper()):
self.logger = logging.getLogger(__name__)
self.mapper = mapper
def get_uri_from_taxid(self, taxid):
try:
db, id = taxid.split()
uri_prefix = self.mapper.get_uri_prefix_from_db_prefix(db)
return "{}{}".format(uri_prefix, id)
# return "<{}{}>".format(uri_prefix, id)
except Exception as e:
raise ValueError("Cannot format URI for source ", taxid.get_prefix())
def get_taxid_from_uri(self, uri):
try:
db_prefix = self.mapper.get_db_prefix_from_uri(uri)
uri_prefix = self.mapper.get_uri_prefix_from_db_prefix(db_prefix)
except ValueError:
logging.getLogger(__name__).error(
"Cannot get taxid from URI {}".format(uri)
)
return None
id = uri.strip("").replace(uri_prefix, "")
# id = uri.strip("<>").replace(uri_prefix, "")
return TaxId(db_prefix, id)
| StarcoderdataPython |
347055 | from rest_framework import serializers
from .models import Marks
from .models import MaxRankBuckets
class MarksSerializer(serializers.ModelSerializer):
class Meta:
model = Marks
fields = "__all__"
class MaxRankBucketsSerializer(serializers.ModelSerializer):
class Meta:
model = MaxRankBuckets
fields = "__all__" | StarcoderdataPython |
224761 | import numpy as np
import matplotlib.pyplot as plt
import newton
def f1(x, y):
return x ** 3 - y * 2
def f2(x, y):
return x ** 2 + y ** 2 - 1
def f(xx):
x = xx[0]
y = xx[1]
return np.array([f1(x, y), f2(x, y)])
def df(xx):
x = xx[0]
y = xx[1]
return np.array([[3 * x ** 2, -2], [2 * x, 2 * y]])
if __name__ == "__main__":
xmin, xmax, ymin, ymax = -3, 3, -3, 3
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
x = np.linspace(xmin, xmax)
y = np.linspace(ymin, ymax)
xmesh, ymesh = np.meshgrid(x, y)
z1 = f1(xmesh, ymesh)
z2 = f2(xmesh, ymesh)
plt.contour(xmesh, ymesh, z1, colors="r", levels=[0])
plt.contour(xmesh, ymesh, z2, colors="k", levels=[0])
solver = newton.Newton(f, df)
initinals = [np.array([1, 1]), np.array([-1, -1]), np.array([1, -1])]
markers = ["+", "*", "x"]
for x0, m in zip(initinals, markers):
sol = solver.solve(x0)
plt.scatter(solver.path_[:, 0], solver.path_[:, 1], color="k", marker=m)
print(sol)
plt.show()
| StarcoderdataPython |
1800197 | r"""
Permutation group homomorphisms
AUTHORS:
- <NAME> (2006-03-21): first version
- <NAME> (2008-06): fixed kernel and image to return a group,
instead of a string.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: g = G([(1,2,3,4)])
sage: phi = PermutationGroupMorphism_im_gens(G, H, map(H, G.gens()))
sage: phi.image(G)
Subgroup of (Dihedral group of order 8 as a permutation group) generated by [(1,2,3,4)]
sage: phi.kernel()
Subgroup of (Cyclic group of order 4 as a permutation group) generated by [()]
sage: phi.image(g)
(1,2,3,4)
sage: phi(g)
(1,2,3,4)
sage: phi.codomain()
Dihedral group of order 8 as a permutation group
sage: phi.codomain()
Dihedral group of order 8 as a permutation group
sage: phi.domain()
Cyclic group of order 4 as a permutation group
"""
#*****************************************************************************
# Copyright (C) 2006 <NAME> and <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.superseded import deprecation
from sage.categories.morphism import Morphism
from sage.groups.perm_gps.permgroup import PermutationGroup, PermutationGroup_generic
class PermutationGroupMorphism(Morphism):
"""
A set-theoretic map between PermutationGroups.
"""
def _repr_type(self):
"""
Returns the type of this morphism. This is used for printing
the morphism.
EXAMPLES::
sage: G = PSL(2,7)
sage: D, iota1, iota2, pr1, pr2 = G.direct_product(G)
sage: pr1._repr_type()
'Permutation group'
"""
return "Permutation group"
def range(self):
"""
Returns the codomain of this morphism. This method is
deprecated. Please use :meth:`codomain` instead.
EXAMPLES::
sage: G = PSL(2,7)
sage: D, iota1, iota2, pr1, pr2 = G.direct_product(G)
sage: pr1.range()
doctest:...: DeprecationWarning: range is deprecated. Please use codomain instead.
See http://trac.sagemath.org/10334 for details.
Permutation Group with generators [(3,7,5)(4,8,6), (1,2,6)(3,4,8)]
"""
deprecation(10334, 'range is deprecated. Please use codomain instead.')
return self.codomain()
def kernel(self):
"""
Returns the kernel of this homomorphism as a permutation group.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: g = G([(1,2,3,4)])
sage: phi = PermutationGroupMorphism_im_gens(G, H, [1])
sage: phi.kernel()
Subgroup of (Cyclic group of order 4 as a permutation group) generated by [(1,2,3,4)]
::
sage: G = PSL(2,7)
sage: D = G.direct_product(G)
sage: H = D[0]
sage: pr1 = D[3]
sage: G.is_isomorphic(pr1.kernel())
True
"""
return self.domain().subgroup(gap_group=self._gap_().Kernel())
def image(self, J):
"""
J must be a subgroup of G. Computes the subgroup of H which is the
image of J.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: g = G([(1,2,3,4)])
sage: phi = PermutationGroupMorphism_im_gens(G, H, map(H, G.gens()))
sage: phi.image(G)
Subgroup of (Dihedral group of order 8 as a permutation group) generated by [(1,2,3,4)]
sage: phi.image(g)
(1,2,3,4)
::
sage: G = PSL(2,7)
sage: D = G.direct_product(G)
sage: H = D[0]
sage: pr1 = D[3]
sage: pr1.image(G)
Subgroup of (The projective special linear group of degree 2 over Finite Field of size 7) generated by [(3,7,5)(4,8,6), (1,2,6)(3,4,8)]
sage: G.is_isomorphic(pr1.image(G))
True
"""
H = self.codomain()
if J in self.domain():
J = PermutationGroup([J])
G = self._gap_().Image(J)
return H.subgroup(gap_group=G).gens()[0]
else:
G = self._gap_().Image(J)
return H.subgroup(gap_group=G)
def __call__(self, g):
"""
Some python code for wrapping GAP's Images function but only for
permutation groups. Returns an error if g is not in G.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: phi = PermutationGroupMorphism_im_gens(G, H, map(H, G.gens()))
sage: g = G([(1,3),(2,4)]); g
(1,3)(2,4)
sage: phi(g)
(1,3)(2,4)
"""
return self.image(g)
class PermutationGroupMorphism_id(PermutationGroupMorphism):
pass
class PermutationGroupMorphism_from_gap(PermutationGroupMorphism):
def __init__(self, G, H, gap_hom):
"""
This is a Python trick to allow Sage programmers to create a group
homomorphism using GAP using very general constructions. An example
of its usage is in the direct_product instance method of the
PermutationGroup_generic class in permgroup.py.
Basic syntax:
PermutationGroupMorphism_from_gap(domain_group,
range_group,'phi:=gap_hom_command;','phi') And don't forget the
line: from sage.groups.perm_gps.permgroup_morphism import
PermutationGroupMorphism_from_gap in your program.
EXAMPLES::
sage: from sage.groups.perm_gps.permgroup_morphism import PermutationGroupMorphism_from_gap
sage: G = PermutationGroup([[(1,2),(3,4)], [(1,2,3,4)]])
sage: H = G.subgroup([G([(1,2,3,4)])])
sage: PermutationGroupMorphism_from_gap(H, G, gap.Identity)
Permutation group morphism:
From: Subgroup of (Permutation Group with generators [(1,2)(3,4), (1,2,3,4)]) generated by [(1,2,3,4)]
To: Permutation Group with generators [(1,2)(3,4), (1,2,3,4)]
Defn: Identity
"""
if not all(isinstance(X, PermutationGroup_generic) for X in [G, H]):
raise TypeError, "Sorry, the groups must be permutation groups."
PermutationGroupMorphism.__init__(self, G, H)
self._gap_hom = gap_hom
def _repr_defn(self):
"""
Returns the definition of this morphism. This is used when
printing the morphism.
EXAMPLES::
sage: from sage.groups.perm_gps.permgroup_morphism import PermutationGroupMorphism_from_gap
sage: G = PermutationGroup([[(1,2),(3,4)], [(1,2,3,4)]])
sage: H = G.subgroup([G([(1,2,3,4)])])
sage: phi = PermutationGroupMorphism_from_gap(H, G, gap.Identity)
sage: phi._repr_defn()
'Identity'
"""
return str(self._gap_hom).replace('\n', '')
def _gap_(self, gap=None):
"""
Returns a GAP version of this morphism.
EXAMPLES::
sage: from sage.groups.perm_gps.permgroup_morphism import PermutationGroupMorphism_from_gap
sage: G = PermutationGroup([[(1,2),(3,4)], [(1,2,3,4)]])
sage: H = G.subgroup([G([(1,2,3,4)])])
sage: phi = PermutationGroupMorphism_from_gap(H, G, gap.Identity)
sage: phi._gap_()
Identity
"""
return self._gap_hom
def __call__(self, g):
"""
Some python code for wrapping GAP's Images function but only for
permutation groups. Returns an error if g is not in G.
EXAMPLES::
sage: G = PSL(2,7)
sage: D = G.direct_product(G)
sage: H = D[0]
sage: pr1 = D[3]
sage: [pr1(g) for g in G.gens()]
[(3,7,5)(4,8,6), (1,2,6)(3,4,8)]
"""
return self.codomain()(self._gap_().Image(g))
class PermutationGroupMorphism_im_gens(PermutationGroupMorphism):
def __init__(self, G, H, gens=None, images=None):
"""
Some python code for wrapping GAP's GroupHomomorphismByImages
function but only for permutation groups. Can be expensive if G is
large. Returns "fail" if gens does not generate self or if the map
does not extend to a group homomorphism, self - other.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: phi = PermutationGroupMorphism_im_gens(G, H, map(H, G.gens())); phi
Permutation group morphism:
From: Cyclic group of order 4 as a permutation group
To: Dihedral group of order 8 as a permutation group
Defn: [(1,2,3,4)] -> [(1,2,3,4)]
sage: g = G([(1,3),(2,4)]); g
(1,3)(2,4)
sage: phi(g)
(1,3)(2,4)
sage: images = ((4,3,2,1),)
sage: phi = PermutationGroupMorphism_im_gens(G, G, images)
sage: g = G([(1,2,3,4)]); g
(1,2,3,4)
sage: phi(g)
(1,4,3,2)
AUTHORS:
- <NAME> (2006-02)
"""
if not all([isinstance(X, PermutationGroup_generic) for X in [G, H]]):
raise TypeError, "Sorry, the groups must be permutation groups."
if images is not None:
deprecation(10334, 'only the images need to be specified')
else:
images = gens
PermutationGroupMorphism.__init__(self, G, H)
self._images = [H(img) for img in images]
def _repr_defn(self):
"""
Returns the definition of this morphism. This is used when
printing the morphism.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: phi = PermutationGroupMorphism_im_gens(G, H, map(H, G.gens()))
sage: phi._repr_defn()
'[(1,2,3,4)] -> [(1,2,3,4)]'
"""
return "%s -> %s"%(self.domain().gens(), self._images)
def _gap_(self):
"""
Returns a GAP representation of this morphism.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: phi = PermutationGroupMorphism_im_gens(G, H, map(H, G.gens()))
sage: phi._gap_()
GroupHomomorphismByImages( Group( [ (1,2,3,4) ] ), Group(
[ (1,2,3,4), (1,4)(2,3) ] ), [ (1,2,3,4) ], [ (1,2,3,4) ] )
"""
return self.domain()._gap_().GroupHomomorphismByImages(self.codomain(), self.domain().gens(), self._images)
def is_PermutationGroupMorphism(f):
"""
Returns True if the argument ``f`` is a PermutationGroupMorphism.
EXAMPLES::
sage: from sage.groups.perm_gps.permgroup_morphism import is_PermutationGroupMorphism
sage: G = CyclicPermutationGroup(4)
sage: H = DihedralGroup(4)
sage: phi = PermutationGroupMorphism_im_gens(G, H, map(H, G.gens()))
sage: is_PermutationGroupMorphism(phi)
True
"""
return isinstance(f, PermutationGroupMorphism)
| StarcoderdataPython |
3467319 | <reponame>neural-reckoning/decoding_sound_location
from base import *
def compute_confusion_matrix(analysis, estimator):
num_shuffles = analysis.settings['num_shuffles']
bins = analysis.moresettings['itd_bins']
binmids = 0.5*(bins[1:]+bins[:-1])
confmat = zeros((len(bins)-1, len(bins)-1))
shuffled_results = analysis(analysis.shuffled_results, estimator, num_shuffles)
for shufflenum in xrange(num_shuffles):
results, trueval, guessedval, testing_responses = shuffled_results[shufflenum]
trueval = digitize(trueval, bins)-1
guessedval = digitize(guessedval, bins)-1
trueval[trueval==len(bins)-1] = len(bins)-2
guessedval[guessedval==len(bins)-1] = len(bins)-2
for correct, guessed in zip(trueval, guessedval):
confmat[guessed, correct] += 1
sumconfmat = reshape(sum(confmat, axis=0), (1, len(binmids)))
sumconfmat[sumconfmat==0] = 1
confmat /= sumconfmat
return confmat
def show_confusion_matrices(analysis, estimator_types):
basename = analysis.settings['basename']
bins = analysis.moresettings['itd_bins']
binmids = 0.5*(bins[1:]+bins[:-1])
figure()
suptitle(basename)
for estnum, (f, name) in enumerate(estimator_types):
estimator = f(analysis.fm_orig)
subplot(*subplot_size[len(estimator_types)]+(estnum+1,))
confmat = compute_confusion_matrix(analysis, estimator)
imshow(confmat, origin='lower left', interpolation='nearest',
aspect='auto', extent=(-amin(binmids)*second/usecond,
-amax(binmids)*second/usecond,
-amin(binmids)*second/usecond,
-amax(binmids)*second/usecond))
ylabel('Estimated location ($\mu$s)')
xlabel('Location ($\mu$s)')
title(name)
| StarcoderdataPython |
6652072 | #!/usr/bin/env python3
"""A test program to test action servers for the JACO and MICO arms."""
import roslib; roslib.load_manifest('kinova_demo')
import rospy
import actionlib
import kinova_msgs.msg
import geometry_msgs.msg
import tf
import std_msgs.msg
import math
from kinova_msgs.srv import *
import argparse
prefix = 'j2s7s300_'
nbJoints = 7
interactive = True
def joint_position_client(angle_set):
action_address = '/' + prefix + 'driver/joints_action/joint_angles'
client = actionlib.SimpleActionClient(action_address,
kinova_msgs.msg.ArmJointAnglesAction)
client.wait_for_server()
goal = kinova_msgs.msg.ArmJointAnglesGoal()
goal.angles.joint1 = angle_set[0]
goal.angles.joint2 = angle_set[1]
goal.angles.joint3 = angle_set[2]
goal.angles.joint4 = angle_set[3]
goal.angles.joint5 = angle_set[4]
goal.angles.joint6 = angle_set[5]
goal.angles.joint7 = angle_set[6]
client.send_goal(goal)
client.wait_for_result(rospy.Duration(100.0))
# Prints out the result of executing the action
return client.get_result()
def argumentParser(argument):
""" Argument parser """
parser = argparse.ArgumentParser(description='Drive robot joint to command position')
parser.add_argument('kinova_robotType', metavar='kinova_robotType', type=str, default='j2n6a300',
help='kinova_RobotType is in format of: [{j|m|r|c}{1|2}{s|n}{4|6|7}{s|a}{2|3}{0}{0}]. eg: j2n6a300 refers to jaco v2 6DOF assistive 3fingers. Please be noted that not all options are valided for different robot types.')
args_ = parser.parse_args(argument)
prefix = args_.kinova_robotType + "_"
nbJoints = int(args_.kinova_robotType[3])
def ZeroTorque():
#move robot to candle like pose
#result = joint_position_client([180]*7)
print ("torque before setting zero")
topic_name = '/' + prefix + 'driver/out/joint_torques'
sub_once = rospy.Subscriber(topic_name, kinova_msgs.msg.JointAngles, printTorqueVaules)
rospy.wait_for_message(topic_name, kinova_msgs.msg.JointAngles, timeout=2)
sub_once.unregister()
#call zero torque
service_address = '/' + prefix + 'driver/in/set_zero_torques'
rospy.wait_for_service(service_address)
try:
zeroTorques = rospy.ServiceProxy(service_address, ZeroTorques)
zeroTorques()
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return None
rospy.sleep(0.5)
print "torque after setting zero"
sub_once = rospy.Subscriber(topic_name, kinova_msgs.msg.JointAngles, printTorqueVaules)
rospy.wait_for_message(topic_name, kinova_msgs.msg.JointAngles, timeout=2)
sub_once.unregister()
def runCOMParameterEstimation():
service_address = '/' + prefix + 'driver/in/run_COM_parameters_estimation'
rospy.wait_for_service(service_address)
try:
runEstimation = rospy.ServiceProxy(service_address, RunCOMParametersEstimation)
runEstimation()
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return None
def printTorqueVaules(torques):
print "Torque - {}, {}, {}, {}, {}, {}, {}".format(torques.joint1,
torques.joint2, torques.joint3, torques.joint4,
torques.joint5, torques.joint6, torques.joint7)
if __name__ == '__main__':
try:
args = argumentParser(None)
rospy.init_node('torque_compensated_mode')
if (interactive == True):
nb = raw_input('Moving robot to candle like position, press return to start')
result = joint_position_client([180]*7)
if (interactive == True):
nb = raw_input('Setting torques to zero, press return')
#test zero torque
ZeroTorque()
if (interactive == True):
nb = raw_input('Sarting COM parameters estimation, press return')
runCOMParameterEstimation()
except rospy.ROSInterruptException:
print "program interrupted before completion"
| StarcoderdataPython |
6406407 | <reponame>minshenglin/nfs-ganesha-tools
import rados
import cephfs
import xattr
import errno
import os
import sys
import ganesha
class CephHandler():
def __init__(self):
self.cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
self.cluster.connect()
self.fs = CephfsHandler(self.cluster)
def createPool(self, name):
pools = self.cluster.list_pools()
if name in pools:
return
self.cluster.create_pool(name)
def read(self, pool, name):
pools = self.cluster.list_pools()
if pool not in pools:
return None
ioctx = self.cluster.open_ioctx(pool)
objs = ioctx.list_objects()
# if object not in pool
if name not in map(lambda obj: obj.key, objs):
ioctx.close()
return None
content = ioctx.read(name)
ioctx.close()
return content
def write(self, pool, name, content):
ioctx = self.cluster.open_ioctx(pool)
ioctx.write_full(name, content)
ioctx.close()
class CephfsHandler():
def __init__(self, cluster):
self.fs = cephfs.LibCephFS()
self.fs.create_with_rados(cluster)
self.fs.init()
self.fs.mount()
def mkdir(self, path, mode=0o755):
try:
self.fs.mkdir(path, mode)
return True
except cephfs.ObjectExists:
return True
except Exception:
return False
def setQuotaBytes(self, path, value):
return self.__setQuota(path, "bytes", value)
def __setQuota(self, path, kind, value):
if kind not in ["bytes", "files"]:
return False
name = "ceph.quota.max_" + kind
self.fs.setxattr(path=path, name=name, value=value, flags=xattr.XATTR_CREATE)
return True
def sync(self):
self.fs.sync_fs()
if __name__ == '__main__':
ceph = CephHandler()
client = ganesha.Client(["192.168.15.100"],
access_type=ganesha.AccessType.RW,
squash=ganesha.Squash.No_Root_Squash)
client2 = ganesha.Client(["192.168.15.0/24"],
access_type=ganesha.AccessType.RO,
squash=ganesha.Squash.Root_Squash)
fsal = ganesha.CephfsFsal()
fsal = RgwFsal("nfs", "30GAEOGMTRX0SKWBAD19", "DGMsovPHztquIllIKDJNVvf931xke97ABLsobpTI")
export = ganesha.Export(1234, "/test", [client, client2], fsal)
export2 = ganesha.Export(7891, "/test2", [client, client2], fsal)
config = ganesha.GaneshaConfig([export, export2])
ceph.write("nfs-ganesha", "export", str(config))
print ceph.read("nfs-ganesha", "export")
| StarcoderdataPython |
9617659 | from cbpro_client import cbpro_client
from logger import logger
import json
import sys
import string
@cbpro_client
@logger
def check_stats(cbpro_client, logger, product = 'BTC-USD'):
""" Check stats of a pair
Params:
- product (pair): string default BTC-USD
Return:
- price response
{
"open": "6745.61000000",
"high": "7292.11000000",
"low": "6650.00000000",
"volume": "26185.51325269",
"last": "6813.19000000",
"volume_30day": "1019451.11188405"
}
"""
logger.info("Getting price")
btc_stats = cbpro_client.get_product_24hr_stats(product)
logger.info("Last Price: {}".format(btc_stats.get("last")))
resp = btc_stats
if 'message' in resp.keys():
logger.warning("message in keys?")
else:
logger.info("Stats Response: {}".format(resp))
return resp
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.