id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
107507 | import urllib.request
from bs4 import BeautifulSoup
from django.core.exceptions import ObjectDoesNotExist
import re
from standard.models import *
from projects.models import *
dwc_url = 'http://rs.tdwg.org/dwc/terms/'
simple_dwc_url ='http://rs.tdwg.org/dwc/terms/simple/'
def get_dwc_html(url=dwc_url):
opener = urllib.request.build_opener()
html = opener.open(url)
return html
def get_terms_table(html):
soup = BeautifulSoup(html, 'html.parser')
tbody_list = soup.find_all('tbody')
for t in tbody_list:
if t.find_all(string=re.compile('Begin Terms Table')):
return t
def get_dwc_terms():
"""
Function to scrape Darwin Core terms from the Darwin Core website.
Class is set to None where Class is empty.
:return: [(0_name, 1_identifier, 2_class, 3_definition, 4_comments, 5_details), ... ]
names = [t[0] for t in result_list]
"""
result_list = []
html = get_dwc_html()
terms_table = get_terms_table(html)
tr_list = terms_table.find_all('tr')
term_chunks = [tr_list[i:i+6] for i in range(0, len(tr_list), 6)]
for t in term_chunks[0:]:
tname = t[0].string[11:].strip()
tidentifier, tclass, tdefinition, tcomments, tdetails = [t[i].td.next_sibling.contents for i in range(1, 6)]
if tclass:
tclass = tclass[0]
else:
tclass = None
result_list.append((tname, tidentifier[0], tclass, tdefinition[0], tcomments[0], tdetails[0]))
return result_list
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def ppterms(dict):
pstring = 'Name: {}\nIdentifier: {}\nClass: {}\nDefinition: {}\nComments: {}\nDetails: {}\n------------------------'
for t in dict:
try:
print_string = pstring.format(t[0], t[1], t[2], t[3], t[4], t[5])
print(print_string)
except UnicodeEncodeError:
print_string = 'Unable to print {}'.format(t[0])
print(print_string)
def get_my_dwc():
return [term_object for term_object in Term.objects.filter(projects=10)]
def compare_terms():
dwc = get_dwc_terms() # list of tuples
mydwc = get_my_dwc() # list of objects
dwc_ids = set([t[1] for t in dwc]) # ids should be unique. len(set) should equal len(list)
mydwc_ids = set([t.uri for t in mydwc])
return [[dwc_ids - mydwc_ids], [dwc_ids & mydwc_ids], [mydwc_ids - dwc_ids]] # [left_comp, intersection, right_comp]
def import_dwc_terms():
dwc = get_dwc_terms()
dwc_project_object = Project.objects.get(short_name='Darwin Core')
for t in dwc:
if t[2] in [None, '']:
category = TermCategory.objects.get(name='Class')
elif t[2] == 'all':
category = TermCategory.objects.get(name='Record')
else:
category = TermCategory.objects.get(uri=t[2])
try:
# (0_name, 1_identifier, 2_class, 3_definition, 4_comments, 5_details)
myt = Term.objects.get(uri=t[1])
myt.name = t[0]
myt.category = category
myt.definition = t[3]
myt.examples = t[4]
myt.save()
if dwc_project_object not in myt.projects.all():
ProjectTerm.objects.create(
project=dwc_project_object,
term=myt,
native=True,
)
except ObjectDoesNotExist:
myt = Term.objects.create(
name=t[0],
uri=t[1],
category=category,
definition=t[3],
example=t[4],
remarks=t[5],
status=TermStatus.objects.get(name='standard'),
data_type=TermDataType.objects.get(name='string'),
uses_controlled_vocabulary=False
)
ProjectTerm.objects.create(
project=dwc_project_object,
term=myt,
native=True,
)
def import_dwc_classes():
dwc = get_dwc_terms()
for t in dwc:
if t[2] in ['', None]:
try:
category_object = TermCategory.objects.get(name=t[0])
category_object.uri = t[1]
category_object.description = t[3]
category_object.save()
except ObjectDoesNotExist:
TermCategory.objects.create(
name=t[0],
uri=t[1],
description=t[3],
is_occurrence=False,
tree_visibility=True
)
# | Classes Terms |
# Simple | 13 169 | 182
# Aux | 2 16 | 18
# -----------------------------
# 15 185 | 200
def dwc_summary():
dwc = Term.objects.filter(projects__short_name='Darwin Core') # all dwc terms = 200
dwc_classes = dwc.filter(is_class=True) # classes = 15
dwc_terms = dwc.filter(is_class=False) # terms without classes = 15
sdwc = dwc.exclude(category__name='ResourceRelationship').exclude(category__name='MeasurementOrFact') # simple = 182
sdwc_classes = sdwc.filter(is_class=True) # simple dwc classes = 13
sdwc_terms = sdwc.filter(is_class=False) # simple dwc terms = 169
adwc = dwc.filter(category__name__in=['ResourceRelationship', 'MeasurementOrFact']) # aux dwc = 18
adwc_classes = adwc.filter(is_class=True) # aux dwc classes = 2
adwc_terms = adwc.filter(is_class=False) # aux dwc terms = 16
row1 = ' | Classes Terms |'
row2 = 'Simple | {} {} | {}'
row3 = 'Aux | {} {} | {}'
row4 = '-----------------------------'
row5 = ' {} {} | {}'
print(row1)
print(row2.format(sdwc_classes.count(), sdwc_terms.count(), sdwc.count()))
print(row3.format(adwc_classes.count(), adwc_terms.count(), adwc.count()))
print(row4)
print(row5.format(dwc_classes.count(), dwc_terms.count(), dwc.count()))
def print_dwc_terms():
categories = [['Record'], ['Occurrence'], ['Organism'], ['MaterialSample', 'LivingSpecimen', 'PreservedSpecimen',
'FossilSpecimen'], ['Event', 'HumanObservation', 'MachineObservation'], ['Location'],
['GeologicalContext'], ['Identification'], ['Taxon']]
dwc = Term.objects.filter(projects__short_name='Darwin Core')
for c in categories:
print_string = ', '.join(c)
print(print_string)
terms = dwc.filter(category__name=c[0]).filter(is_class=False).order_by('name')
for t in terms:
print(t)
from mlp.models import *
def update_mlp_terms():
"""
Procedure to read mlp.models and from that add terms. Not working yet: need way to select data_type based on
field type, e.g. CharField == string
:return:
"""
bio = Biology
arch = Archaeology
# get a unique set of terms from all relevant models
field_list = list(set([f for f in bio._meta.get_fields()] + [f for f in arch._meta.get_fields()]))
# iterate over fields, check if each is in the DB, if not add, if yes check if assoc. with project
active_status = TermStatus.objects.get(name='active')
mlp = Project.objects.get(paleocore_appname='mlp')
string = TermDataType.objects.get(name='string')
for field in field_list:
t, created = Term.objects.get_or_create(name=field.name, defaults={'name': field.name,
'status': active_status,
'data_type': string,
'definition': field.description})
pt = ProjectTerm.objects.create(term=t, project=mlp)
| StarcoderdataPython |
135679 | import time
debug = False
def error_fx(text):
'''The default error handling, print the text to the console.
replace with your own function if you want, have it print to your
wx application or whatever.'''
sys.stderr.write(text)
def output_fx(text):
'''The default output handling, print text to the console.
replace with your own function if you want, like have it print to
a text control in your wx application.'''
print text
def input_fx(text):
'''The default user input handler, use raw_input, if you like you
can replace this with your own function, like have it read from a
text control.'''
return raw_input(text)
def check_killed():
'''Checks if the program was killed during execution implemented
by pyBASIC ide to kill runaway threads.'''
return False
def var_replace(string, var_dict):
'''
Replaces variables the user is using ($asdf) with python
understood ones ( %(asdf)s )
'''
terminators = [" ", ",", "\'", "\"", ".", ";", ":", "!", "?"]
#string = string.replace("\\$", "|DOLLAR SIGN|")
newstring = ""
in_var = False
curr_var = ""
for char in string:
#If we are in a var add the current char to the curr var
if in_var and char not in terminators:
curr_var += char
#The start of a variable
if char == '$':
in_var = True
newstring += "%("
#The end of a var
elif in_var == True and char in terminators:
#Give the appropriate ending based on type
if type(var_dict[curr_var.strip()]) == type(0.0):
newstring+=")d"
if type(var_dict[curr_var.strip()]) == type(0):
newstring += ")i"
if type(var_dict[curr_var.strip()]) == type(""):
newstring += ")s"
newstring += char
curr_var = ""
in_var = False
else:
newstring += char
#if closed without finishing variable
if in_var == True:
#Give the appropriate ending based on type
if type(var_dict[curr_var.strip()]) == type(0.0):
newstring+=")d"
if type(var_dict[curr_var.strip()]) == type(0):
newstring += ")i"
if type(var_dict[curr_var.strip()]) == type(""):
newstring += ")s"
return newstring.replace("|DOLLAR SIGN|", "$")
def get_labels(td):
labeldict = {"START": 0}
index = 0;
for line in td:
if line[0] in ["LBL", "LABEL"]:
labeldict[line[1]] = index
index += 1
return labeldict
def error(str,line):
error_fx("Error Line %d: %s" % (line, str))
def debug_msg(str):
if debug:
output_fx(str)
def process_line(index, line, label_list, var_dict):
'''
Processes a line of basic to run. Returns the new index along with
the new variable list.
'''
if line[0] in ["STOP"]:
#Force out of bounds = program stops
index = -100
#Print statment
if line[0] in ["PRINT"]:
try:
output_fx( eval(var_replace(line[1], var_dict)%(var_dict)) )
except KeyError:
error("No such variable", index)
except ValueError:
error("Value Error",index)
except TypeError:
error("Type Error", index)
#Clear Statment
if line[0] in ["CLEAR", "CLS"]:
for i in range(0,100):
output_fx("")
#If statment
if line[0] in ["IF"]:
#debug_msg(var_replace(line[1], var_dict) %(var_dict))
#debug_msg(eval(var_replace(line[1], var_dict)%(var_dict))))
if eval(var_replace(line[1], var_dict)%(var_dict)):
index, var_dict = process_line(index, line[2], label_list, var_dict)
else:
index, var_dict = process_line(index, line[3], label_list, var_dict)
index -= 1
#Goto Statment
if line[0] in ["GOTO"]:
index = label_list[line[1]] -1
#Define Let Statment
if line[0] in ["LET"]:
try:
mystr = var_replace(line[2], var_dict)
x = eval(mystr %(var_dict))
var_dict[line[1]] = x
except ValueError:
error("ValueError", index)
except TypeError:
error("Type Error", index)
#Define Input Statment
if line[0] in ["INPUT"]:
x = input_fx(line[1] + "\n")
try:
x = float(x)
except ValueError:
x = str(x)
var_dict[line[2]] = x
debug_msg(var_dict)
index += 1
return index, var_dict
def run(td):
'''
Runs a BASIC program given a token document.
'''
debug_msg("Lines List:\n"+str(td)+"\n")
start_time=time.time()
index = 0 #Current line in file.
running = True
label_list = get_labels(td)
var_dict = {}
while running:
try:
line = td[index]
index, var_dict = process_line(index, line, label_list,
var_dict)
if check_killed():
#Stop by making a long line
print "Killed"
index = len(td)
except IndexError:
running = False
end_time=time.time()
output_fx("\n\n")
output_fx("--------------------------------")
output_fx("Program exited normally.")
debug_msg("Debug Mode ON:")
debug_msg("Variables: " + str(var_dict))
debug_msg("Labels: " + str(label_list))
debug_msg("Uptime: " + str(end_time - start_time) + " seconds")
| StarcoderdataPython |
1696387 | """
Grab bag of tests implemented for the various CASA routines. This
isn't a systematic unit test, but if you write something useful put it
here. This collection for be for tests that can be run with only the
pipeline itself in place. There are other test files in the scripts/
directory.
"""
#region Imports and definitions
import os
import glob
import logging
import numpy as np
from scipy.special import erfc
import pyfits # CASA has pyfits, not astropy
# Analysis utilities
import analysisUtils as au
# Pipeline versionining
from .pipelineVersion import version as pipeVer
# CASA stuff
from . import casaStuff
# Pipeline CASA routines
from . import casaCubeRoutines as ccr
from . import casaMaskingRoutines as cma
from . import casaMosaicRoutines as cmr
from . import casaFeatherRoutines as cfr
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
#endregion
def test_estimate_noise(
):
"""
Test the noise estimation routine.
"""
tol = 1e-2
vec = np.random.randn(1e5)
mad_est = cma.estimate_noise(vec, method='mad')
std_est = cma.estimate_noise(vec, method='std')
chauv_est = cma.estimate_noise(vec, method='chauv')
chauvmad_est = cma.estimate_noise(vec, method='chauvmad')
logger.info("mad estimate accuracy: "+str(np.abs(mad_est-1.0)))
if np.abs(mad_est - 1.0) > tol:
logger.error("mad estimate exceeds tolerance.")
logger.info("std estimate accuracy: "+str(np.abs(std_est-1.0)))
if np.abs(std_est - 1.0) > tol:
logger.error("std estimate exceeds tolerance.")
logger.info("chauv estimate accuracy: "+str(np.abs(chauv_est-1.0)))
if np.abs(chauv_est - 1.0) > tol:
logger.error("chauv estimate exceeds tolerance.")
logger.info("chauvmad estimate accuracy: "+str(np.abs(chauvmad_est-1.0)))
if np.abs(chauvmad_est - 1.0) > tol:
logger.error("chauvmad estimate exceeds tolerance.")
return(None)
| StarcoderdataPython |
153496 | __version__ = "0.1.0"
__version_info__ = (0, 1, 0)
import loggerpy
logger_level = loggerpy.Level.DEBUG
def get_version():
return __version__
DATA_FIELD = [
"id",
"CAP",
"city",
"provincia",
"provincia_iso",
"regione",
"stato",
"stato_iso",
"latitude",
"longitude",
"frazioni",
"localita"
]
| StarcoderdataPython |
101552 | <reponame>ValeriaRibeiroDev/CursoEmVideo-Scripts-Python<gh_stars>0
dia=input ('Qual é o dia que você nasceu?')
mês=input ('Qual é o mês que você nasceu?')
ano=input ('Qual é o ano que você nasceu?')
| StarcoderdataPython |
1656022 | array = [7, 5, 9, 0, 3, 1, 6, 2, 4, 8]
for i in range(len(array)):
min_index = i
for j in range(i+1, len(array)):
if array[min_index] > array[j]:
min_index = j
array[i], array[min_index] = array[min_index], array[i]
print(array)
# swap example ##########################
array = [3, 5]
array[0], array[1] = array[1], array[0]
print(array)
##########################################
| StarcoderdataPython |
3200125 | import pycom
import time
import config
class LED:
SUCCESS = 0x00ff00
ERROR = 0xff0000
TRANSMIT = 0x0000ff
PROCESSING = 0xffff00
def set(self, color, timeout = None):
if not config.LEDS:
return
pycom.rgbled(color)
if timeout is not None:
time.sleep(timeout)
def success(self, timeout = None):
self.set(self.SUCCESS, timeout)
def error(self, timeout = None):
self.set(self.ERROR, timeout)
def transmit(self, timeout = None):
self.set(self.TRANSMIT, timeout)
def processing(self, timeout = None):
self.set(self.PROCESSING, timeout)
| StarcoderdataPython |
179800 | """
Node for Panda3d that renders a floor.
@author <NAME>
"""
from os import path
from pathlib import Path
from panda3d.core import GeomVertexFormat, Geom, GeomVertexData, GeomVertexWriter, GeomTriangles, GeomNode, \
TextureAttrib, RenderState, SamplerState
from direct.showbase.Loader import Loader
class FloorNode(GeomNode):
def __init__(self, base):
# Load texture
tex = Loader(base).loadTexture((Path(path.realpath(__file__)).parent.parent.parent / "res/images/checkerboard.png").absolute())
tex.setMagfilter(SamplerState.FT_nearest)
tex.setMinfilter(SamplerState.FT_nearest)
# Set up vertex data
vdata = GeomVertexData("floor_data", GeomVertexFormat.get_v3t2(), Geom.UHStatic)
vdata.setNumRows(6)
vertex = GeomVertexWriter(vdata, "vertex")
texcoord = GeomVertexWriter(vdata, "texcoord")
vertex.addData3(-5, -5, 0)
texcoord.addData3(0, 0, 0)
vertex.addData3(-5, 5, 0)
texcoord.addData3(0, 10, 0)
vertex.addData3(5, 5, 0)
texcoord.addData3(10, 10, 0)
vertex.addData3(5, 5, 0)
texcoord.addData3(10, 10, 0)
vertex.addData3(5, -5, 0)
texcoord.addData3(10, 0, 0)
vertex.addData3(-5, -5, 0)
texcoord.addData3(0, 0, 0)
# Create primitive
prim = GeomTriangles(Geom.UHStatic)
prim.addVertices(0, 1, 2)
prim.addVertices(3, 4, 5)
geom = Geom(vdata)
geom.add_primitive(prim)
# Initialize geometry node
GeomNode.__init__(self, "floor")
attrib = TextureAttrib.make(tex)
state = RenderState.make(attrib)
self.addGeom(geom, state)
| StarcoderdataPython |
3216764 | #!/usr/bin/env python3
"""
** Allows you to format the text color. **
------------------------------------------
Specifically allows you to choose from a reduced list,
the highlighting color and the text color.
"""
import colorama
from context_printer.memory import get_lifo
colorama.init() # for windows
def _str_to_color(color):
r"""
** Normalize the name of the color. **
Parameters
----------
color : str
The name of the color that we want to normalize.
Returns
-------
style : str
The color scheme (DIM, NORMAL, BRIGHT).
color : str
The normalized color (BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE).
Raises
------
KeyError
If the color is not in one of the possible solutions.
Examples
--------
>>> from context_printer.color import _str_to_color
>>> _str_to_color('blue')
('NORMAL', 'BLUE')
>>> _str_to_color('Pink\n')
('BRIGHT', 'MAGENTA')
>>>
"""
colors = {
'BLACK': ('DIM', 'BLACK'),
'NOIR': ('DIM', 'BLACK'),
'CHARCOAL': ('NORMAL', 'BLACK'),
'COAL': ('NORMAL', 'BLACK'),
'CHARBON': ('NORMAL', 'BLACK'),
'GRAY': ('BRIGHT', 'BLACK'),
'GREY': ('BRIGHT', 'BLACK'),
'GRIS': ('BRIGHT', 'BLACK'),
'SILVER': ('DIM', 'WHITE'),
'ARGENT': ('DIM', 'WHITE'),
'WHITE': ('BRIGHT', 'WHITE'),
'BLANC': ('BRIGHT', 'WHITE'),
'BURGUNDY': ('DIM', 'RED'),
'BORDEAUX': ('DIM', 'RED'),
'RED': ('NORMAL', 'RED'),
'ROUGE': ('NORMAL', 'RED'),
'OLIVE': ('DIM', 'GREEN'),
'GREEN': ('NORMAL', 'GREEN'),
'VERT': ('NORMAL', 'GREEN'),
'MINT': ('BRIGHT', 'GREEN'),
'MENTHE': ('BRIGHT', 'GREEN'),
'LIME': ('BRIGHT', 'GREEN'),
'CIRTON VERT': ('BRIGHT', 'GREEN'),
'ORANGE': ('DIM', 'YELLOW'),
'BROWN': ('DIM', 'YELLOW'),
'MARRON': ('DIM', 'YELLOW'),
'OCHRE': ('DIM', 'YELLOW'),
'OCRE': ('DIM', 'YELLOW'),
'GOLD': ('NORMAL', 'YELLOW'),
'OR': ('NORMAL', 'YELLOW'),
'MUSTARD': ('NORMAL', 'YELLOW'),
'MOUTARDE': ('NORMAL', 'YELLOW'),
'YELLOW': ('BRIGHT', 'YELLOW'),
'JAUNE': ('BRIGHT', 'YELLOW'),
'NAVY BLUE': ('DIM', 'BLUE'),
'BLEU MARINE': ('DIM', 'BLUE'),
'BLUE': ('NORMAL', 'BLUE'),
'BLEU': ('NORMAL', 'BLUE'),
'INDIGO': ('DIM', 'MAGENTA'),
'PURPLE': ('NORMAL', 'MAGENTA'),
'VIOLET': ('NORMAL', 'MAGENTA'),
'POURPRE': ('NORMAL', 'MAGENTA'),
'MAGENTA': ('BRIGHT', 'MAGENTA'),
'PINK': ('BRIGHT', 'MAGENTA'),
'ROSE': ('BRIGHT', 'MAGENTA'),
'FUCHSIA': ('BRIGHT', 'MAGENTA'),
'TEAL': ('DIM', 'CYAN'),
'BLEU CANARD': ('DIM', 'CYAN'),
'CANARD': ('DIM', 'CYAN'),
'SKY BLUE': ('NORMAL', 'CYAN'),
'BLEU CIEL': ('NORMAL', 'CYAN'),
'CYAN': ('BRIGHT', 'CYAN'),
'TURQUOISE': ('BRIGHT', 'CYAN'),
}
color = str(color).strip().upper()
if color not in colors:
raise KeyError(f'{color} is not a color that is part of the {set(colors)} list')
return colors[color]
def colorize(color, text, *, kind='fg'):
"""
** Adds the flags that allow to format the color text. **
Parameters
----------
color : str or tuple
Either the name of the color in str,
or the result of the function ``_str_to_color``.
text : str
The text to be formatted.
kind : str, optional
Decides whether to format the background or the text. 'fg' or 'bg'.
Returns
-------
formatted_text : str
The formatted text with the start and end flag.
"""
if not isinstance(color, tuple):
color = _str_to_color(color)
style = getattr(colorama.Style, color[0])
if kind == 'fg':
color_tag = getattr(colorama.Fore, color[1])
elif kind == 'bg':
color_tag = getattr(colorama.Back, color[1])
else:
raise ValueError(f"'kind' can only take the values 'fg' or 'bg', not '{kind}'")
reset_tag = colorama.Style.RESET_ALL
return style + color_tag + text + reset_tag
def format_text(text, **formatting):
r"""
** Format the text with the provided parameters. **
Parameters
----------
text : str
Text to be printed. It should be in a single line (no \n character).
color : str, optional
The color of the text (only applies to this message).
bg : str, optional
The background highlight color (only applies to this message).
blink : boolean, optional
If set to True, the line will be blinking (not compatible with all consoles).
bold : boolean, optional
If set to true, prints the text in boldface.
underline : boolean, optional
If set to true, prints the text underlined.
Examples
--------
>>> from context_printer.color import format_text
>>> format_text('text')
'text'
>>>
"""
if 'color' in formatting:
text = colorize(formatting['color'], text)
if 'bg' in formatting:
text = colorize(formatting['bg'], text, kind='bg')
if formatting.get('blink', False):
text = '\033[5m' + text + '\033[0m'
if formatting.get('bold', False):
text = '\033[1m' + text + '\033[0m'
if formatting.get('underline', False):
text = '\033[4m' + text + '\033[0m'
return text
def get_section_header(header_car='█ ', *, partial=False):
"""
** Retrieves the tag from the beginning of the current section. **
"""
header = ''
for context in get_lifo().lifo[-2::-1] if not partial else get_lifo().lifo[-2:0:-1]:
header += format_text(header_car, **context)
if partial:
header += ' '*len(header_car)
return header
| StarcoderdataPython |
79686 | def make_subject(sector: str, year: str, q: str):
return f'[업종: {sector}] {year}년도 {q}분기'
def make_strong_tag(value: str):
return f'<strong>{value}</strong>'
def make_p_tag(value: str):
return f'<p>{value}</p>'
def make_img_tag(name: str, src: str):
return f'<img src="{src}" alt="{name}">'
def make_new_line(repeat: int = 1):
return '<p> </p>' * repeat
def replace_template_str(template: str, values: dict, parameter_format: list = None):
import re
if parameter_format is None:
parameter_format = ['{{', '}}']
re_value = None
for key, value in values.items():
re_key = parameter_format[0] + key + parameter_format[1]
re_value = re.sub(re_key, value, template)
return re_value
class Template:
_title: str = ''
_description: str = ''
_image: str = ''
def __init__(self, title: str, image: dict, description: str = ''):
"""
Args:
title:
image:
description:
"""
self.set_title(title)
self.set_image(image['name'], image['src'])
self.set_description(description)
def set_title(self, title: str):
if title:
self._title = make_p_tag(title)
return self
def set_image(self, name: str, src: str):
if name and src:
self._image = make_img_tag(name, src)
return self
def set_description(self, description: str):
if description:
self._description = make_p_tag(description)
return self
def make(self):
res = ''
res += self._title
res += make_new_line(1)
res += self._description
res += self._image
res += make_new_line(3)
return res
| StarcoderdataPython |
1701059 | <filename>ttracker/model/thread_logger/deck.py
from ttracker.model.items.deck import DeckList
class CreateDeckV3:
def __init__(self, payload):
self.deck = DeckList(payload)
class GetDeckListsV3:
def __init__(self, payload):
self.deck_lists = self.get_deck_lists(payload)
def get_deck_lists(self, payload):
return [DeckList(deck) for deck in payload]
class GetPreconDecksV3:
def __init__(self, payload):
pass
class UpdateDeckV3:
def __init__(self, payload):
self.deck = DeckList(payload)
| StarcoderdataPython |
1633683 | <filename>sphinxpapyrus/docxbuilder/nodes/description.py
# -*- coding: utf-8 -*-
"""
Translate docutils node description formatting.
each description start will processed with visit() and finished with depart()
"""
from docutils.nodes import Node
from sphinxpapyrus.docxbuilder.translator import DocxTranslator
node_name = "description"
def visit(visitor: DocxTranslator, node: Node):
"""Start processing description node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
row = visitor.tables[-1][1]
col = visitor.tables[-1][2]
table = visitor.tables[-1][0]
cell = table.cell(row, col)
visitor.p_parents.append(cell)
visitor.p = cell.paragraphs[0]
def depart(visitor: DocxTranslator, node: Node):
"""Finish processing description node"""
assert isinstance(visitor, DocxTranslator)
assert isinstance(node, Node)
visitor.p = None
visitor.p_parents.pop()
visitor.tables[-1][2] += 1
| StarcoderdataPython |
12863 | from .common import *
HEADER = r'''\usepackage{tikz}
\definecolor{purple}{cmyk}{0.55,1,0,0.15}
\definecolor{darkblue}{cmyk}{1,0.58,0,0.21}
\usepackage[colorlinks,
linkcolor=black,
urlcolor=darkblue,
citecolor=purple]{hyperref}
\urlstyle{same}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{claim}[theorem]{Claim}
\theoremstyle{definition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}
'''
def render_pdfmeta(authors, title):
author_list = authors_list(authors, short=True)
return f'''\\hypersetup{{%
pdftitle = {{{title}}},
pdfauthor = {{{author_list}}}}}\n'''
def render_author(author):
out = render_command('author', author['name'])
if 'affiliation' in author:
out += render_command('address',
", ".join(value for _, value in author['affiliation'].items()))
if 'email' in author:
out += render_command('email', author['email'])
return out
def render_funding(funds):
funding_note = '\n'.join(grant['note']
for grant in funds
if 'note' in grant)
return render_command('thanks', funding_note)
def render_acks(acks):
return f'\\subsection*{{Acknowledgements}}\n\n{acks.strip()}\n'
def header(data, cname=None, classoptions=[], **kwargs):
if cname is None:
cname = 'amsart'
if 'noheader' in classoptions:
classoptions.remove('noheader')
include_header = False
else:
include_header = True
headers = [
render_command(
'documentclass',
cname,
','.join(classoptions)),
render_encs]
if include_header:
headers.append(HEADER)
if 'include' in kwargs:
headers += [include(file) for file in kwargs['include']]
shorttitle = data['shorttitle'] if 'shorttitle' in data else ''
headers += [
render_pdfmeta(data['authors'], data['title']),
begin_document,
render_command('title', data['title'], shorttitle),
'\n'.join(map(render_author, data['authors']))]
if 'funding' in data:
headers.append(render_funding(data['funding']))
if 'abstract' in data:
headers.append(render_abstract(data['abstract']))
if 'keywords' in data:
headers.append(render_keywords(data['keywords']))
headers += [maketitle, '']
return '\n'.join(headers)
def footer(data, bib):
footers = ['']
if 'acknowledgements' in data: # and not anonymous:
footers.append(render_acks(data['acknowledgements']))
if bib:
footers.append(render_bib('alphaurl', bib))
footers.append(end_document)
return '\n'.join(footers)
| StarcoderdataPython |
1616788 | #!/usr/bin/env python3
from collections import defaultdict
MAX_N = 10 ** 6 + 1
def main():
n, a = ints()
colors = list(ints())
# Count of each color
counts = [0] * MAX_N
# Map from count -> {Set of colors with this count...}
index = defaultdict(set)
index[0] = set(colors)
for c in colors:
k = counts[c]
# Check if color c is...
# (1) previously eliminated
# (2) loses in this iteration which is the first encounter
if k == -1 or k < counts[a]:
continue
# Update the count of color c
counts[c] = k + 1
index[k].remove(c)
index[k + 1].add(c)
if c == a:
for x in index[k]:
counts[x] = -1
del index[k]
else:
assert counts[c] >= counts[a]
sols = [c for c in range(MAX_N) if counts[c] > 0 and c != a]
if len(sols) > 0:
check(colors, a, sols[0])
print(sols[0])
else:
print(-1)
def check(colors, a, b):
"""Checks that Bob wins with color b if Alice chooses color a."""
x, y = 0, 0
for c in colors:
if c == a:
x += 1
elif c == b:
y += 1
assert x <= y, '%d is not a valid solution!' % b
def ints():
"""Returns a generator of integers from the next input line."""
return (int(i) for i in input().split())
if __name__ == '__main__':
main()
| StarcoderdataPython |
5888 | from mushroom_rl.utils.plots import PlotItemBuffer, DataBuffer
from mushroom_rl.utils.plots.plot_item_buffer import PlotItemBufferLimited
class RewardPerStep(PlotItemBuffer):
"""
Class that represents a plot for the reward at every step.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Step_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class RewardPerEpisode(PlotItemBuffer):
"""
Class that represents a plot for the accumulated reward per episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used.
"""
title = "Episode_Reward"
curves_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, curves_params)
class Actions(PlotItemBufferLimited):
"""
Class that represents a plot for the actions.
"""
def __init__(self, plot_buffers, maxs=None, mins=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
"""
title = "Actions"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins)
class Observations(PlotItemBufferLimited):
"""
Class that represents a plot for the observations.
"""
def __init__(self, plot_buffers, maxs=None, mins=None, dotted_limits=None):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
maxs(list, None): list of max values of each data buffer plotted.
If an element is None, no max line is drawn;
mins(list, None): list of min values of each data buffer plotted.
If an element is None, no min line is drawn.
dotted_limits (list, None): list of booleans. If True, the
corresponding limit is dotted; otherwise, it is printed as a
solid line.
"""
title = "Observations"
super().__init__(title, plot_buffers, maxs=maxs, mins=mins,
dotted_limits=dotted_limits)
class LenOfEpisodeTraining(PlotItemBuffer):
"""
Class that represents a plot for the length of the episode.
"""
def __init__(self, plot_buffer):
"""
Constructor.
Args:
plot_buffer (DataBuffer): data buffer to be used;
"""
title = "Len of Episode"
plot_params = [dict(data_buffer=plot_buffer)]
super().__init__(title, plot_params)
| StarcoderdataPython |
1767006 | # Generated by Django 3.0.5 on 2020-10-28 10:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('configs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sysconfigs',
name='web_desc',
field=models.CharField(help_text='主页下方的副标题', max_length=200, null='Flase', verbose_name='系统描述'),
preserve_default='Flase',
),
migrations.AddField(
model_name='sysconfigs',
name='web_foot',
field=models.CharField(help_text='整个系统目标页尾说明', max_length=200, null='Flase', verbose_name='系统页尾'),
preserve_default='Flase',
),
]
| StarcoderdataPython |
1707710 | """
File: show_results.py
Author: <NAME>
TFG
"""
import argparse
import os
import keras
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator
from sklearn import metrics
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from sklearn.metrics import roc_curve, auc
from vis.utils import utils
from vis.visualization import visualize_saliency
from show_results_binary import get_class
def plot_roc_curve(y_score, y, fname):
"""
Plots ROC curve
Parameters
----------
y_score: Predicted class
y: True class
fname: File name where the ROC curves will be stored
Returns
-------
"""
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y, y_score)
auc_keras = auc(fpr_keras, tpr_keras)
plt.figure(1)
plt.plot([1.02, -0.02], [-0.02, 1.02], 'k--', lw=2)
plt.xlim([1.02, -0.02])
plt.ylim([-0.02, 1.02])
plt.plot(1 - fpr_keras, tpr_keras, label='AUC = {0:0.2f}'.format(auc_keras))
plt.xlabel('Specificity')
plt.ylabel('Sensitivity')
plt.title('ROC curve')
plt.legend(loc='best')
plt.savefig(fname)
def plot_saliency_map(model, x, fname):
"""
Plots the model's average saliency map on the test set
Parameters
----------
model: Deep-learning binary model
x: Test images
fname: File name to store the saliency map
Returns
-------
"""
# Find the index of the to be visualized layer above
layer_index = utils.find_layer_idx(model, 'dense_3')
# Swap softmax with linear to get better results
model.layers[layer_index].activation = keras.activations.linear
model = utils.apply_modifications(model)
# Calculate saliency_map and visualize it
saliency = np.zeros((512, 512))
m = 100
for i in range(m): # Get input
input_image = x[i]
print(i)
saliency += visualize_saliency(model, layer_index, filter_indices=0, seed_input=input_image)
saliency /= m
fig = plt.figure()
cax = plt.imshow(((saliency - saliency.min()) / (saliency.max() - saliency.min()) * 255).astype(np.uint8),
cmap='jet')
cbar = fig.colorbar(cax, ticks=[0, 110, 220])
cbar.ax.set_yticklabels(['Low', 'Medium', 'High']) # horizontal colorbar
plt.savefig(fname)
def plot_tsne(model, x, y, fname):
"""
Plots t-SNE graphic on the train set
Parameters
----------
model: deep-learning binary model
x: train images
y: train labels
fname: file name where the t-SNE plot will be saved
Returns
-------
"""
# First apply PCA to reduce to 30 dims
pca = PCA(n_components=30)
# Then TSNE to reduce to 2 dims with 1000 iterations and learning rate of 200
tsne = TSNE(n_components=2, n_iter=1000, learning_rate=200)
# Get the output of layer 'dense_1' (1024 features) to reduce the dimension of that output
layer_name = 'dense_1'
intermediate_output = model.get_layer(layer_name).output
intermediate_model = keras.Model(inputs=model.input, outputs=intermediate_output)
intermediate_model.compile(optimizer=keras.optimizers.Adam(lr=0.0001),
loss='binary_crossentropy',
metrics=['acc'])
# Get the features generated when passing X data
features = intermediate_model.predict(x)
# Apply PCA and t-SNE
pca_result = pca.fit_transform(features)
tsne_result = tsne.fit_transform(pca_result)
# Prepare data to be visualized
tsne_data = dict()
tsne_data['tsne-2d-one'] = tsne_result[:, 0]
tsne_data['tsne-2d-two'] = tsne_result[:, 1]
tsne_data['y'] = get_class(y)
# Visualize the data reduced to 2 dimensions
plt.figure(figsize=(16, 10))
sns.scatterplot(
x="tsne-2d-one", y="tsne-2d-two",
hue="y",
palette=sns.hls_palette(2, l=.6, s=.7),
data=tsne_data,
legend="full",
alpha=0.3
)
plt.savefig(fname)
def plot_cm(y_test, y_pred):
"""
Show Specificity, sensitivity, precision, f1-score, TP, TN, FP, FN of each predicted class
Parameters
----------
y_test: True class
y_pred: Predicted class
Returns
-------
"""
y_prd = [y > 0.5 for y in y_pred]
y_prd = np.array(y_prd)
tn, fp, fn, tp = metrics.confusion_matrix(y_test, y_prd).ravel()
specificity = tn / (tn + fp)
sensitivity = metrics.recall_score(y_test, y_prd) # tp / (tp + fn)
precision = metrics.precision_score(y_test, y_prd)
f1_score = metrics.f1_score(y_test, y_prd)
print('############################################')
print('Sensitivity: ', sensitivity)
print('Specificity: ', specificity)
print('Precision: ', precision)
print('F1-Score: ', f1_score)
print('TP: ', tp)
print('TN: ', tn)
print('FP: ', fp)
print('FN: ', fn)
print()
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--directory", default=None, help="path to the directory where the images are stored")
ap.add_argument("-m", "--model", default=None, help="path to the file where the model is stored")
ap.add_argument("-o", "--output", default='ad_vs_mci_vs_cn2.png',
help="output filename where the images will be stored")
args = ap.parse_args()
base_dir = None
model_file = None
output = args.output
if args.directory is not None:
if not os.path.isdir(args.directory):
print("Directory \'%s\' does not exist" % args.directory)
return
base_dir = args.directory
else:
print("You must specify the directory where the images are stored (see help).")
return
if args.model is not None:
if not os.path.isfile(args.model):
print("File \'%s\' does not exist" % args.model)
return
model_file = args.model
else:
print("You must specify the file where the model is stored (see help).")
return
# Load the model architecture and its weights
model = load_model(model_file)
print(model.summary())
train_datagen = ImageDataGenerator(
rotation_range=8,
shear_range=np.pi / 16,
width_shift_range=0.10,
height_shift_range=0.10,
zoom_range=0.08,
horizontal_flip=False,
vertical_flip=False,
)
test_datagen = ImageDataGenerator()
# Set the batch size and calculate the number of steps per epoch
input_size = 512
batch_size = 8
train_dir = os.path.join(base_dir, 'train')
test_dir = os.path.join(base_dir, 'test')
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(input_size, input_size),
batch_size=batch_size,
class_mode='binary',
shuffle=True
)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(input_size, input_size),
batch_size=batch_size,
class_mode='binary',
shuffle=True
)
print(test_generator.class_indices)
nb_train_samples = len(train_generator.filenames)
nb_test_samples = len(test_generator.filenames)
x_train = []
y_train = []
x_test = []
y_test = []
batches = 0
for x_batch, y_batch in train_generator:
for i in range(len(y_batch)): # Get input
x_train.append(x_batch[i])
y_train.append(y_batch[i])
batches += 1
if batches >= nb_train_samples / batch_size:
# we need to break the loop by hand because
# the generator loops indefinitely
break
batches = 0
for x_batch, y_batch in test_generator:
for i in range(len(y_batch)): # Get input
x_test.append(x_batch[i])
y_test.append(y_batch[i])
batches += 1
if batches >= nb_test_samples / batch_size:
# we need to break the loop by hand because
# the generator loops indefinitely
break
print(test_generator.classes)
x_train = np.array(x_train)
x_test = np.array(x_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
# Get the score of the model with test dataset
_, train_accuracy = model.evaluate(x_train, y_train, batch_size=batch_size)
_, test_accuracy = model.evaluate(x_test, y_test, batch_size=batch_size)
print('Train accuracy: %.3f, Test accuracy: %.3f' % (train_accuracy, test_accuracy))
print(output)
y_pred = model.predict(x_test)
y_pred = 1 - y_pred
y_test2 = np.zeros(y_test.shape)
idx_ad = np.where(y_test == 0)[0]
y_test2[idx_ad] = 1
y_test = y_test2
plot_cm(y_test, y_pred)
print('Plotting ROC curve...')
plot_roc_curve(y_pred, y_test, fname='ROC_curve-' + output)
print('Plotting t-SNE...')
plot_tsne(model, x_train, y_train, fname='t_SNE-' + output)
print('Plotting saliency map...')
plot_saliency_map(model, x_test, fname='saliency_map-' + output)
if __name__ == '__main__':
"""
Match input image or current life video feed with the selected template
"""
# GPU memory growth and just use GPU 0
os.environ["CUDA_VISIBLE_DEVICES"] = "0" # only see the gpu 0
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
main()
| StarcoderdataPython |
93896 | """TDD support for a function cleaning cluttered HTML code.
"""
import re
import unittest
CLUTTERED = ''' <P CLASS="western"><A NAME="ScID:17"></A><A NAME="ScID:120"></A><!-- Climb up rope to safety. Back in ship for hyperspace. --><FONT COLOR="#000000"><SPAN STYLE="text-decoration: none"><FONT FACE="monospace"><FONT SIZE=3><SPAN STYLE="font-style: normal"><SPAN STYLE="font-weight: normal"><SPAN STYLE="background: #ffffff">And
now for something </SPAN></SPAN></SPAN></FONT></FONT></SPAN></FONT><FONT COLOR="#000000"><SPAN STYLE="text-decoration: none"><FONT FACE="monospace"><FONT SIZE=3><I><SPAN STYLE="font-weight: normal"><SPAN STYLE="background: #ffffff">completely</SPAN></SPAN></I></FONT></FONT></SPAN></FONT><FONT COLOR="#000000"><SPAN STYLE="text-decoration: none"><FONT FACE="monospace"><FONT SIZE=3><SPAN STYLE="font-style: normal"><SPAN STYLE="font-weight: normal"><SPAN STYLE="background: #ffffff">
different.</SPAN></SPAN></SPAN></FONT></FONT></SPAN></FONT></P>
'''
CLEANED = '<P CLASS="western"><A NAME="ScID:17"></A><A NAME="ScID:120"></A><!-- Climb up rope to safety. Back in ship for hyperspace. -->And now for something <I>completely</I> different.</P>'
CONVERTED = '<P CLASS="western"><A NAME="ScID:17"></A><A NAME="ScID:120"></A><!-- Climb up rope to safety. Back in ship for hyperspace. -->And now for something [i]completely[/i] different.</P>'
STRIPPED = '<P CLASS="western"><A NAME="ScID:17"></A><A NAME="ScID:120"></A><!-- Climb up rope to safety. Back in ship for hyperspace. -->And now for something completely different.</P>'
def clean(text):
text = re.sub('</*font.*?>', '', text)
text = re.sub('</*span.*?>', '', text)
text = re.sub('</*FONT.*?>', '', text)
text = re.sub('</*SPAN.*?>', '', text)
text = text.replace('\n', ' ')
while ' ' in text:
text = text.replace(' ', ' ').strip()
return text
def convert(text):
text = text.replace('<i>', '[i]')
text = text.replace('<I>', '[i]')
text = text.replace('</i>', '[/i]')
text = text.replace('</I>', '[/i]')
text = text.replace('</em>', '[/i]')
text = text.replace('</EM>', '[/i]')
text = text.replace('<b>', '[b]')
text = text.replace('<B>', '[b]')
text = text.replace('</b>', '[/b]')
text = text.replace('</B>', '[/b]')
return text
def strip(text):
text = text.replace('[i]', '')
text = text.replace('[/i]', '')
text = text.replace('[b]', '')
text = text.replace('[/b]', '')
return text
class Cleaner(unittest.TestCase):
def test_cleaner(self):
result = clean(CLUTTERED)
# print(result)
self.assertEqual(result, CLEANED)
def test_converter(self):
result = convert(CLEANED)
# print(result)
self.assertEqual(result, CONVERTED)
def test_stripper(self):
result = strip(CONVERTED)
# print(result)
self.assertEqual(result, STRIPPED)
def main():
unittest.main()
if __name__ == '__main__':
main()
| StarcoderdataPython |
3283861 | import requests
from itertools import chain
from bs4 import BeautifulSoup, Tag
BASE_URL = 'https://mvnrepository.com/artifact/{group}/{artifact}/{number}'
class MvnRepository:
def __init__(self, http_compression=True):
self._session = requests.Session()
if not http_compression:
self._session.headers.update({'Accept-Encoding': 'identity'})
def fetch_licenses(self, name, number):
for artifact_id in name.artifact_id_variations:
response = self.__get_page(name.group_id, artifact_id, number)
if response.ok:
licenses = self.__process_html(response.text)
if licenses:
return licenses
elif response.status_code != 404:
response.raise_for_status()
return []
def __get_page(self, group_id, artifact_id, number):
return self._session.get(BASE_URL.format_map({
'group': group_id,
'artifact': artifact_id,
'number': number
}))
def __process_html(self, html):
soup = BeautifulSoup(html, 'html.parser')
license_tag = soup.find('th', text='License')
if license_tag:
return self.__find_licenses(self.__sibling_tags(license_tag))
@staticmethod
def __find_licenses(tags):
license_tags = chain.from_iterable(i.find_all('span', class_='b lic') for i in tags)
return [i.text for i in license_tags]
@staticmethod
def __sibling_tags(tag):
return (i for i in tag.next_siblings if isinstance(i, Tag))
| StarcoderdataPython |
3314369 | <reponame>Conchsk/mlapt
HDFS_HOST = '127.0.0.1'
HDFS_PORT = 9870
HDFS_USER = 'conch'
| StarcoderdataPython |
1652967 | <filename>ds/backpropogation.py
import numpy as np
x = 5.0
y = 7.0
lmb = 0.1
w1 = 1.0
w2 = -1.0
w3 = 2.0
u = np.tanh(w1*x)
z = np.tanh(w2*u)
yhat = w3*z
dL_yhat = 2 * (yhat - y)
dL_w3 = 2 * (yhat - y) * z
w3 = w3 - lmb * dL_w3
| StarcoderdataPython |
3322349 | from flask_login import UserMixin, AnonymousUserMixin
from flask_bcrypt import generate_password_hash
from datetime import datetime
from services.web_application.web_app.myapp import database
'''
# How to add roles to a user?
# CODE:
role = Role.query.filter_by(name='MyRole').first()
user = User.query.filter_by(username='admin').first()
user.roles.append(role)
database.session.commit()
'''
roles_users = database.Table(
'role_users',
database.Column('user_id', database.Integer(), database.ForeignKey('user.id')),
database.Column('role_id', database.Integer(), database.ForeignKey('role.id'))
)
class AnonymousUser(AnonymousUserMixin):
"""AnonymousUser definition"""
def __init__(self):
self.roles = list()
@property
def is_authenticated(self):
return False
@property
def is_active(self):
return False
@property
def is_anonymous(self):
return True
def get_id(self):
return None
@staticmethod
def has_role(*args):
return False
class User(UserMixin, database.Model):
id = database.Column(database.Integer, primary_key=True)
username = database.Column(database.String(32), unique=True, nullable=False)
password = database.Column(database.Binary(128), nullable=False)
email = database.Column(database.String(250), unique=True, nullable=False)
joined_on = database.Column(database.DateTime, default=datetime.now)
is_admin = database.Column(database.Boolean, default=False)
roles = database.relationship('Role', secondary=roles_users,
backref=database.backref('users', lazy='dynamic'))
@property
def is_authenticated(self):
return True
@property
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return self.id
def has_role(self, role):
"""Returns 'True' if the user identifies with the specified role.
:param role: A role name or 'Role' instance"""
if isinstance(role, str):
return role.upper() in [user_role.name.upper() for user_role in self.roles]
# Required for administrative interface
def __unicode__(self):
return self.username
def __init__(self, username, email, password, is_admin=False):
self.username = username
self.email = email
self.password = <PASSWORD>password_hash(password)
self.is_admin = is_admin
def __repr__(self):
return self.username
class Role(database.Model):
id = database.Column(database.Integer, primary_key=True)
name = database.Column(database.String(64), unique=True, nullable=False)
description = database.Column(database.String(255), nullable=True)
def __init__(self, name, description):
self.name = name
self.description = description
def __repr__(self):
return self.name
| StarcoderdataPython |
1785967 | #!/usr/bin/env python3
# coding:utf-8
from setuptools import setup
setup(name='easy_util',
version='0.0.dev1',
description='Easy util provide memory usage, flush animation during calculation and so on.',
author='Mogu',
author_email='<EMAIL>',
url='https://github.com/Moguf/easy_util',
license='MIT',
packages=['easyutil'],
install_requires=[],)
| StarcoderdataPython |
3363645 | import pytest
from collections import defaultdict
from coffea import processor
from functools import partial
import numpy as np
def test_accumulators():
a = processor.value_accumulator(float)
a += 3.
assert a.value == 3.
assert a.identity().value == 0.
a = processor.value_accumulator(partial(np.array, [2.]))
a += 3.
assert np.array_equal(a.value, np.array([5.]))
assert np.array_equal(a.identity().value, np.array([2.]))
l = processor.list_accumulator(range(4))
l += [3]
l += processor.list_accumulator([1, 2])
assert l == [0, 1, 2, 3, 3, 1, 2]
b = processor.set_accumulator({'apples', 'oranges'})
b += {'pears'}
b += 'grapes'
assert b == {'apples', 'oranges', 'pears', 'grapes'}
c = processor.dict_accumulator({'num': a, 'fruit': b})
c['num'] += 2.
c += processor.dict_accumulator({
'num2': processor.value_accumulator(int),
'fruit': processor.set_accumulator({'apples', 'cherries'}),
})
assert c['num2'].value == 0
assert np.array_equal(c['num'].value, np.array([7.]))
assert c['fruit'] == {'apples', 'oranges', 'pears', 'grapes', 'cherries'}
d = processor.defaultdict_accumulator(float)
d['x'] = 0.
d['x'] += 4.
d['y'] += 5.
d['z'] += d['x']
d['x'] += d['y']
assert d['x'] == 9.
assert d['y'] == 5.
assert d['z'] == 4.
assert d['w'] == 0.
e = d + c
f = processor.defaultdict_accumulator(lambda: 2.)
f['x'] += 4.
assert f['x'] == 6.
f += f
assert f['x'] == 12.
assert f['y'] == 2.
a = processor.column_accumulator(np.arange(6).reshape(2,3))
b = processor.column_accumulator(np.arange(12).reshape(4,3))
a += b
assert a.value.sum() == 81
def test_new_accumulators():
a = processor.accumulate((0., 3.))
assert a == 3.
a = processor.accumulate((
np.array([2.]),
3.,
))
assert np.array_equal(a, np.array([5.]))
l = processor.accumulate((
list(range(4)),
[3],
[1, 2],
))
assert l == [0, 1, 2, 3, 3, 1, 2]
b = processor.accumulate((
{'apples', 'oranges'},
{'pears'},
{'grapes'},
))
assert b == {'apples', 'oranges', 'pears', 'grapes'}
c = processor.accumulate((
{'num': a, 'fruit': b},
{'num': 2.},
{
'num2': 0,
'fruit': {'apples', 'cherries'},
}
))
assert c['num2'] == 0
assert np.array_equal(c['num'], np.array([7.]))
assert c['fruit'] == {'apples', 'oranges', 'pears', 'grapes', 'cherries'}
d = processor.accumulate((
defaultdict(float),
{"x": 4., "y": 5.},
{"z": 4., "x": 5.},
))
assert d['x'] == 9.
assert d['y'] == 5.
assert d['z'] == 4.
# this is different than old style!
with pytest.raises(KeyError):
d['w']
e = processor.accumulate((d, c))
f = processor.accumulate((
defaultdict(lambda: 2.),
defaultdict(lambda: 2, {"x": 4.}),
))
assert f['x'] == 4.
assert f['y'] == 2.
# this is different than old style!
f = processor.accumulate([f], f)
assert f['x'] == 8.
assert f['y'] == 4.
assert f['z'] == 2.
a = processor.accumulate((
processor.column_accumulator(np.arange(6).reshape(2,3)),
processor.column_accumulator(np.arange(12).reshape(4,3)),
))
assert a.value.sum() == 81
def test_accumulator_types():
class MyDict(dict):
pass
out = processor.accumulate((
{"x": 2},
MyDict({"x": 3}),
))
assert type(out) is dict
with pytest.raises(ValueError):
processor.accumulate((
defaultdict(lambda: 2),
MyDict({"x": 3}),
))
out = processor.accumulate((
MyDict({"x": 3}),
{"x": 2},
))
assert type(out) is dict
with pytest.raises(ValueError):
processor.accumulate((
MyDict({"x": 3}),
defaultdict(lambda: 2),
))
| StarcoderdataPython |
4811783 | import numpy as np
def log_gaussian(x, mean, sigma):
"""
Evaluate the log of a normal law
Parameters
----------
x: float or array-like
Value at which the log gaussian is evaluated
mean: float
Central value of the normal distribution
sigma: float
Width of the normal distribution
Returns
-------
log_pdf: float or array-like
Log of the evaluation of the normal law at x
"""
log_pdf = -(x - mean) ** 2 / (2 * sigma ** 2)
log_pdf = log_pdf - np.log((np.sqrt(2 * np.pi) * sigma))
return log_pdf
def log_gaussian2d(size, x, y, x_cm, y_cm, width, length, psi):
"""
Evaluate the log of a bi-dimensionnal gaussian law
Parameters
----------
size: float
Integral of the 2D Gaussian
x, y: float or array-like
Position at which the log gaussian is evaluated
x_cm, y_cm: float
Center of the 2D Gaussian
width, length: float
Standard deviations of the 2 dimensions of the 2D Gaussian law
psi: float
Orientation of the 2D Gaussian
Returns
-------
log_pdf: float or array-like
Log of the evaluation of the 2D gaussian law at (x,y)
"""
scale_w = 1. / (2. * width ** 2)
scale_l = 1. / (2. * length ** 2)
a = np.cos(psi) ** 2 * scale_l + np.sin(psi) ** 2 * scale_w
b = np.sin(2 * psi) * (scale_w - scale_l) / 2.
c = np.cos(psi) ** 2 * scale_w + np.sin(psi) ** 2 * scale_l
norm = 1. / (2 * np.pi * width * length)
log_pdf = - (a * (x - x_cm) ** 2 - 2 * b * (x - x_cm) * (y - y_cm) + c * (
y - y_cm) ** 2)
log_pdf += np.log(norm) + np.log(size)
return log_pdf
| StarcoderdataPython |
3204381 | <reponame>Kamil732/DK-team<filename>backend/project/accounts/api/pagination.py
from rest_framework import pagination
from rest_framework.response import Response
class CustomerImagesPagination(pagination.PageNumberPagination):
page_size = 20
def get_paginated_response(self, data):
return Response({
'next': self.page.next_page_number() if self.page.has_next() else None,
'results': data,
}) | StarcoderdataPython |
4824881 | <filename>kronos/settings.py
import os
import sys
from django.conf import settings
PROJECT_PATH = os.getcwd()
PROJECT_MODULE = sys.modules['.'.join(settings.SETTINGS_MODULE.split('.')[:-1])]
| StarcoderdataPython |
1718229 | <reponame>Sheetal0601/InterviewBit
# Numbers of length N and value less than K
# https://www.interviewbit.com/problems/numbers-of-length-n-and-value-less-than-k/
#
# Given a set of digits (A) in sorted order, find how many numbers of length B are possible
# whose value is less than number C.
#
# NOTE: All numbers can only have digits from the given set.
#
# Examples:
#
# Input:
# 3 0 1 5
# 1
# 2
# Output:
# 2 (0 and 1 are possible)
#
# Input:
# 4 0 1 2 5
# 2
# 21
# Output:
# 5 (10, 11, 12, 15, 20 are possible)
#
# Constraints:
#
# 1 <= B <= 9, 0 <= C <= 1e9 & 0 <= A[i] <= 9
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class Solution:
# @param A : list of integers
# @param B : integer
# @param C : integer
# @return an integer
def n_to_l(self, A):
l = []
while A:
l.append(A % 10)
A //= 10
return list(reversed(l))
def calc_less_then(self, A):
less_than = [0] * 10
lc = 0
for i in range(10):
less_than[i] = lc
lc += int(i in A)
return less_than
def solve(self, A, B, C):
from math import pow
less_than = self.calc_less_then(A)
list_c = self.n_to_l(C)
lead = 0 in A
if len(list_c) > B:
if B == 1:
return len(A)
else:
return int(pow(len(A), B)) - lead * int(pow(len(A), B - 1))
elif len(list_c) < B or not len(A):
return 0
else:
res = 0
for e in list_c:
B = B - 1
if less_than[e] != 0:
res += (less_than[e] - lead) * int(pow(len(A), B))
if e not in A:
break
lead = 0
return res
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # | StarcoderdataPython |
70365 | <filename>lhotse/recipes/timit.py
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corporation (Author: <NAME>)
# Apache 2.0
import glob
import logging
import zipfile
from collections import defaultdict
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Dict, Optional, Union
from tqdm import tqdm
from lhotse import validate_recordings_and_supervisions
from lhotse.audio import Recording, RecordingSet
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, urlretrieve_progress
def download_timit(
target_dir: Pathlike = ".",
force_download: bool = False,
base_url: Optional[str] = "https://data.deepai.org/timit.zip",
) -> None:
"""
Download and unzip the dataset TIMIT.
:param target_dir: Pathlike, the path of the dir to store the dataset.
:param force_download: bool, if True, download the zips no matter if the zips exists.
:param base_url: str, the URL of the TIMIT dataset to download.
"""
target_dir = Path(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
zip_name = "timit.zip"
zip_path = target_dir / zip_name
corpus_dir = zip_path.with_suffix("")
completed_detector = corpus_dir / ".completed"
if completed_detector.is_file():
logging.info(f"Skipping {zip_name} because {completed_detector} exists.")
return
if force_download or not zip_path.is_file():
urlretrieve_progress(
base_url, filename=zip_path, desc=f"Downloading {zip_name}"
)
with zipfile.ZipFile(zip_path) as zip_file:
corpus_dir.mkdir(parents=True, exist_ok=True)
for names in zip_file.namelist():
zip_file.extract(names, str(corpus_dir))
def prepare_timit(
corpus_dir: Pathlike,
output_dir: Optional[Pathlike] = None,
num_phones: int = 48,
num_jobs: int = 1,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Returns the manifests which consists of the Recodings and Supervisions.
:param corpus_dir: Pathlike, the path of the data dir.
:param output_dir: Pathlike, the path where to write and save the manifests.
:param num_phones: int=48, the number of phones (60, 48 or 39) for modeling and 48 is regarded as the default value.
:return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'audio' and 'supervisions'.
"""
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
manifests = defaultdict(dict)
dataset_parts = ["TRAIN", "DEV", "TEST"]
phones_dict = {}
if num_phones in [60, 48, 39]:
phones_dict = get_phonemes(num_phones)
else:
raise ValueError("The value of num_phones must be in [60, 48, 39].")
dev_spks, test_spks = get_speakers()
with ThreadPoolExecutor(num_jobs) as ex:
for part in dataset_parts:
wav_files = []
if part == "TRAIN":
print("starting....")
wav_files = glob.glob(str(corpus_dir) + "/TRAIN/*/*/*.WAV")
# filter the SA (dialect sentences)
wav_files = list(
filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
)
elif part == "DEV":
wav_files = glob.glob(str(corpus_dir) + "/TEST/*/*/*.WAV")
# filter the SA (dialect sentences)
wav_files = list(
filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
)
wav_files = list(
filter(lambda x: x.split("/")[-2].lower() in dev_spks, wav_files)
)
else:
wav_files = glob.glob(str(corpus_dir) + "/TEST/*/*/*.WAV")
# filter the SA (dialect sentences)
wav_files = list(
filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
)
wav_files = list(
filter(lambda x: x.split("/")[-2].lower() in test_spks, wav_files)
)
logging.debug(f"{part} dataset manifest generation.")
recordings = []
supervisions = []
for wav_file in tqdm(wav_files):
items = str(wav_file).strip().split("/")
idx = items[-2] + "-" + items[-1][:-4]
speaker = items[-2]
transcript_file = Path(wav_file).with_suffix(".PHN")
if not Path(wav_file).is_file():
logging.warning(f"No such file: {wav_file}")
continue
if not Path(transcript_file).is_file():
logging.warning(f"No transcript: {transcript_file}")
continue
text = []
with open(transcript_file, "r") as f:
lines = f.readlines()
for line in lines:
phone = line.rstrip("\n").split(" ")[-1]
if num_phones != 60:
phone = phones_dict[str(phone)]
text.append(phone)
text = " ".join(text).replace("h#", "sil")
recording = Recording.from_file(path=wav_file, recording_id=idx)
recordings.append(recording)
segment = SupervisionSegment(
id=idx,
recording_id=idx,
start=0.0,
duration=recording.duration,
channel=0,
language="English",
speaker=speaker,
text=text.strip(),
)
supervisions.append(segment)
recording_set = RecordingSet.from_recordings(recordings)
supervision_set = SupervisionSet.from_segments(supervisions)
validate_recordings_and_supervisions(recording_set, supervision_set)
if output_dir is not None:
supervision_set.to_json(output_dir / f"supervisions_{part}.json")
recording_set.to_json(output_dir / f"recordings_{part}.json")
manifests[part] = {
"recordings": recording_set,
"supervisions": supervision_set,
}
return manifests
def get_phonemes(num_phones):
"""
Choose and convert the phones for modeling.
:param num_phones: the number of phones for modeling.
"""
phonemes = {}
if num_phones == int(48):
logging.debug("Using 48 phones for modeling!")
# This dictionary is used to convert the 60 phoneme set into the 48 one.
phonemes["sil"] = "sil"
phonemes["aa"] = "aa"
phonemes["ae"] = "ae"
phonemes["ah"] = "ah"
phonemes["ao"] = "ao"
phonemes["aw"] = "aw"
phonemes["ax"] = "ax"
phonemes["ax-h"] = "ax"
phonemes["axr"] = "er"
phonemes["ay"] = "ay"
phonemes["b"] = "b"
phonemes["bcl"] = "vcl"
phonemes["ch"] = "ch"
phonemes["d"] = "d"
phonemes["dcl"] = "vcl"
phonemes["dh"] = "dh"
phonemes["dx"] = "dx"
phonemes["eh"] = "eh"
phonemes["el"] = "el"
phonemes["em"] = "m"
phonemes["en"] = "en"
phonemes["eng"] = "ng"
phonemes["epi"] = "epi"
phonemes["er"] = "er"
phonemes["ey"] = "ey"
phonemes["f"] = "f"
phonemes["g"] = "g"
phonemes["gcl"] = "vcl"
phonemes["h#"] = "sil"
phonemes["hh"] = "hh"
phonemes["hv"] = "hh"
phonemes["ih"] = "ih"
phonemes["ix"] = "ix"
phonemes["iy"] = "iy"
phonemes["jh"] = "jh"
phonemes["k"] = "k"
phonemes["kcl"] = "cl"
phonemes["l"] = "l"
phonemes["m"] = "m"
phonemes["n"] = "n"
phonemes["ng"] = "ng"
phonemes["nx"] = "n"
phonemes["ow"] = "ow"
phonemes["oy"] = "oy"
phonemes["p"] = "p"
phonemes["pau"] = "sil"
phonemes["pcl"] = "cl"
phonemes["q"] = ""
phonemes["r"] = "r"
phonemes["s"] = "s"
phonemes["sh"] = "sh"
phonemes["t"] = "t"
phonemes["tcl"] = "cl"
phonemes["th"] = "th"
phonemes["uh"] = "uh"
phonemes["uw"] = "uw"
phonemes["ux"] = "uw"
phonemes["v"] = "v"
phonemes["w"] = "w"
phonemes["y"] = "y"
phonemes["z"] = "z"
phonemes["zh"] = "zh"
elif num_phones == int(39):
logging.debug("Using 39 phones for modeling!")
# This dictionary is used to convert the 60 phoneme set into the 39 one.
phonemes["sil"] = "sil"
phonemes["aa"] = "aa"
phonemes["ae"] = "ae"
phonemes["ah"] = "ah"
phonemes["ao"] = "aa"
phonemes["aw"] = "aw"
phonemes["ax"] = "ah"
phonemes["ax-h"] = "ah"
phonemes["axr"] = "er"
phonemes["ay"] = "ay"
phonemes["b"] = "b"
phonemes["bcl"] = "sil"
phonemes["ch"] = "ch"
phonemes["d"] = "d"
phonemes["dcl"] = "sil"
phonemes["dh"] = "dh"
phonemes["dx"] = "dx"
phonemes["eh"] = "eh"
phonemes["el"] = "l"
phonemes["em"] = "m"
phonemes["en"] = "n"
phonemes["eng"] = "ng"
phonemes["epi"] = "sil"
phonemes["er"] = "er"
phonemes["ey"] = "ey"
phonemes["f"] = "f"
phonemes["g"] = "g"
phonemes["gcl"] = "sil"
phonemes["h#"] = "sil"
phonemes["hh"] = "hh"
phonemes["hv"] = "hh"
phonemes["ih"] = "ih"
phonemes["ix"] = "ih"
phonemes["iy"] = "iy"
phonemes["jh"] = "jh"
phonemes["k"] = "k"
phonemes["kcl"] = "sil"
phonemes["l"] = "l"
phonemes["m"] = "m"
phonemes["ng"] = "ng"
phonemes["n"] = "n"
phonemes["nx"] = "n"
phonemes["ow"] = "ow"
phonemes["oy"] = "oy"
phonemes["p"] = "p"
phonemes["pau"] = "sil"
phonemes["pcl"] = "sil"
phonemes["q"] = ""
phonemes["r"] = "r"
phonemes["s"] = "s"
phonemes["sh"] = "sh"
phonemes["t"] = "t"
phonemes["tcl"] = "sil"
phonemes["th"] = "th"
phonemes["uh"] = "uh"
phonemes["uw"] = "uw"
phonemes["ux"] = "uw"
phonemes["v"] = "v"
phonemes["w"] = "w"
phonemes["y"] = "y"
phonemes["z"] = "z"
phonemes["zh"] = "sh"
else:
logging.debug("Using 60 phones for modeling!")
return phonemes
def get_speakers():
# List of test speakers
test_spk = [
"fdhc0",
"felc0",
"fjlm0",
"fmgd0",
"fmld0",
"fnlp0",
"fpas0",
"fpkt0",
"mbpm0",
"mcmj0",
"mdab0",
"mgrt0",
"mjdh0",
"mjln0",
"mjmp0",
"mklt0",
"mlll0",
"mlnt0",
"mnjm0",
"mpam0",
"mtas1",
"mtls0",
"mwbt0",
"mwew0",
]
# List of dev speakers
dev_spk = [
"fadg0",
"faks0",
"fcal1",
"fcmh0",
"fdac1",
"fdms0",
"fdrw0",
"fedw0",
"fgjd0",
"fjem0",
"fjmg0",
"fjsj0",
"fkms0",
"fmah0",
"fmml0",
"fnmr0",
"frew0",
"fsem0",
"majc0",
"mbdg0",
"mbns0",
"mbwm0",
"mcsh0",
"mdlf0",
"mdls0",
"mdvc0",
"mers0",
"mgjf0",
"mglb0",
"mgwt0",
"mjar0",
"mjfc0",
"mjsw0",
"mmdb1",
"mmdm2",
"mmjr0",
"mmwh0",
"mpdf0",
"mrcs0",
"mreb0",
"mrjm4",
"mrjr0",
"mroa0",
"mrtk0",
"mrws1",
"mtaa0",
"mtdt0",
"mteb0",
"mthc0",
"mwjg0",
]
return dev_spk, test_spk
| StarcoderdataPython |
3360486 | from flask import Flask
from flask_bcrypt import Bcrypt
from flask_graphql import GraphQLView
import config
from gql import schema
app = Flask(__name__)
bcrypt = Bcrypt(app)
app.debug = config.DEBUG
app.config["SQLALCHEMY_DATABASE_URI"] = config.DB_URI
app.config[
"SQLALCHEMY_TRACK_MODIFICATIONS"] = config.SQLALCHEMY_TRACK_MODIFICATIONS
app.add_url_rule(
"/graphql",
view_func=GraphQLView.as_view("graphql",
schema=schema.schema,
graphiql=True),
)
| StarcoderdataPython |
3394857 | <reponame>Kaufi-Jonas/VaRA-Tool-Suite<filename>varats/varats/tables/code_centrality_table.py
"""Module for code centrality tables."""
import logging
import typing as tp
from pathlib import Path
import pandas as pd
from tabulate import tabulate
from varats.data.reports.blame_interaction_graph import (
create_blame_interaction_graph,
CIGNodeAttrs,
)
from varats.data.reports.blame_report import BlameReport
from varats.paper_mgmt.case_study import (
newest_processed_revision_for_case_study,
)
from varats.project.project_util import get_local_project_gits
from varats.table.table import Table, wrap_table_in_document, TableDataEmpty
from varats.table.tables import TableFormat
from varats.utils.git_util import (
ChurnConfig,
calc_commit_code_churn,
create_commit_lookup_helper,
CommitRepoPair,
UNCOMMITTED_COMMIT_HASH,
)
LOG = logging.Logger(__name__)
class TopCentralCodeCommitsTable(Table):
"""Table showing commits with highest commit interaction graph node
degrees."""
NAME = "top_central_code_commits_table"
def __init__(self, **kwargs: tp.Any):
super().__init__(self.NAME, **kwargs)
def tabulate(self) -> str:
case_study = self.table_kwargs["table_case_study"]
num_commits = self.table_kwargs.get("num_commits", 10)
project_name = case_study.project_name
revision = newest_processed_revision_for_case_study(
case_study, BlameReport
)
if not revision:
raise TableDataEmpty()
churn_config = ChurnConfig.create_c_style_languages_config()
cig = create_blame_interaction_graph(project_name, revision
).commit_interaction_graph()
commit_lookup = create_commit_lookup_helper(project_name)
repo_lookup = get_local_project_gits(project_name)
def filter_nodes(node: CommitRepoPair) -> bool:
if node.commit_hash == UNCOMMITTED_COMMIT_HASH:
return False
return bool(commit_lookup(node))
nodes: tp.List[tp.Dict[str, tp.Any]] = []
for node in cig.nodes:
node_attrs = tp.cast(CIGNodeAttrs, cig.nodes[node])
commit = node_attrs["commit"]
if not filter_nodes(commit):
continue
_, insertions, _ = calc_commit_code_churn(
Path(repo_lookup[commit.repository_name].path),
commit.commit_hash, churn_config
)
if insertions == 0:
LOG.warning(f"Churn for commit {commit} is 0.")
insertions = 1
nodes.append(({
"commit_hash": commit.commit_hash.hash,
"degree": cig.degree(node),
"insertions": insertions,
}))
data = pd.DataFrame(nodes)
data["code_centrality"] = data["degree"] - data["insertions"]
data.set_index("commit_hash", inplace=True)
top_degree = data["code_centrality"].nlargest(num_commits)
degree_data = pd.DataFrame.from_dict({
"commit": top_degree.index.values,
"centrality": top_degree.values,
})
degree_data.sort_values(["centrality", "commit"],
ascending=[False, True],
inplace=True)
if self.format in [
TableFormat.LATEX, TableFormat.LATEX_BOOKTABS, TableFormat.LATEX_RAW
]:
table = degree_data.to_latex(
index=False,
multicolumn_format="c",
multirow=True,
caption=f"Top {num_commits} Central Code Commits"
)
return str(table) if table else ""
return tabulate(degree_data, degree_data.columns, self.format.value)
def wrap_table(self, table: str) -> str:
return wrap_table_in_document(table=table, landscape=True)
| StarcoderdataPython |
100074 | from pykeepass import PyKeePass
kp4_pass = "<PASSWORD>"
kp4 = PyKeePass("keepass_v4_test.kdbx", password=kp4_pass)
kp3_pass = "<PASSWORD>"
kp3 = PyKeePass("keepass_v4_test.kdbx", password=kp3_pass)
kp = kp4
divider = "#" * 50
print("list all groups:")
print(kp.groups)
print(divider)
print("for each group, list all entries:")
for group in kp.groups:
print(group, " entries:")
print(group.entries)
print()
print(divider)
print("for each entry in each group, list entry details:")
for group in kp.groups:
print(group, " entries:")
for entry in group.entries:
print(entry, " details:")
print(" TITLE: ", entry.title)
print(" USERNAME: ", entry.username)
print(" PASSWORD: ", entry.password)
print(" URL: ", entry.url)
print(" NOTES: ", entry.notes)
print(" LAST MODIFIED: ", entry.mtime)
# print(" TAGS: ", entry.tags) # not availalbe for test with KeePasXC
print(" CUSTOM PROPERTIES:")
for prop_key, prop_value in entry.custom_properties.items():
print(" " + prop_key + ": " + prop_value)
print()
print(divider)
| StarcoderdataPython |
1777742 | import unittest
from iterable_collections import collect
class TestPop(unittest.TestCase):
def test_list(self):
c = collect(list(range(10)))
self.assertEqual(c.pop(), list.pop(list(list(range(10)))))
self.assertEqual(c.len(), 9)
c.pop()
self.assertEqual(c.len(), 8)
def test_set(self):
c = collect(set(range(10)))
self.assertEqual(c.pop(), list.pop(list(set(range(10)))))
self.assertEqual(c.len(), 9)
c.pop()
self.assertEqual(c.len(), 8)
def test_tuple(self):
c = collect(tuple(range(10)))
self.assertEqual(c.pop(), list.pop(list(tuple(range(10)))))
self.assertEqual(c.len(), 9)
c.pop()
self.assertEqual(c.len(), 8)
def test_iterator(self):
c = collect(iter(range(10)))
self.assertEqual(c.pop(), list.pop(list(iter(range(10)))))
self.assertEqual(c.len(), 9)
c.pop()
self.assertEqual(c.len(), 8)
def test_dict(self):
c = collect({'a': 1, 'b': 2})
self.assertEqual(c.pop(), list.pop(list({'a': 1, 'b': 2})))
def test_dict_items(self):
c = collect({'a': 1, 'b': 2}.items())
self.assertEqual(c.pop(), list.pop(list({'a': 1, 'b': 2}.items())))
def test_enumerate(self):
c = collect(list(range(10))).enumerate()
self.assertEqual(c.pop(), list.pop(list(enumerate(range(10)))))
self.assertEqual(c.len(), 9)
c.pop()
self.assertEqual(c.len(), 8)
| StarcoderdataPython |
3274249 | <gh_stars>0
from .travis_logs import TravisLogsStorage
| StarcoderdataPython |
3215104 | <filename>tests/components/deconz/test_light.py
"""deCONZ light platform tests."""
from unittest.mock import Mock, patch
from homeassistant import config_entries
from homeassistant.components import deconz
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_setup_component
import homeassistant.components.light as light
from tests.common import mock_coro
LIGHT = {
"1": {
"id": "Light 1 id",
"name": "Light 1 name",
"state": {
"on": True, "bri": 255, "colormode": "xy", "xy": (500, 500),
"reachable": True
},
"uniqueid": "00:00:00:00:00:00:00:00-00"
},
"2": {
"id": "Light 2 id",
"name": "Light 2 name",
"state": {
"on": True, "colormode": "ct", "ct": 2500, "reachable": True
}
}
}
GROUP = {
"1": {
"id": "Group 1 id",
"name": "Group 1 name",
"type": "LightGroup",
"state": {},
"action": {},
"scenes": [],
"lights": [
"1",
"2"
],
},
"2": {
"id": "Group 2 id",
"name": "Group 2 name",
"state": {},
"action": {},
"scenes": [],
"lights": [],
},
}
SWITCH = {
"1": {
"id": "Switch 1 id",
"name": "Switch 1 name",
"type": "On/Off plug-in unit",
"state": {}
}
}
ENTRY_CONFIG = {
deconz.const.CONF_ALLOW_CLIP_SENSOR: True,
deconz.const.CONF_ALLOW_DECONZ_GROUPS: True,
deconz.config_flow.CONF_API_KEY: "ABCDEF",
deconz.config_flow.CONF_BRIDGEID: "0123456789",
deconz.config_flow.CONF_HOST: "1.2.3.4",
deconz.config_flow.CONF_PORT: 80
}
async def setup_gateway(hass, data, allow_deconz_groups=True):
"""Load the deCONZ light platform."""
from pydeconz import DeconzSession
loop = Mock()
session = Mock()
ENTRY_CONFIG[deconz.const.CONF_ALLOW_DECONZ_GROUPS] = allow_deconz_groups
config_entry = config_entries.ConfigEntry(
1, deconz.DOMAIN, 'Mock Title', ENTRY_CONFIG, 'test',
config_entries.CONN_CLASS_LOCAL_PUSH)
gateway = deconz.DeconzGateway(hass, config_entry)
gateway.api = DeconzSession(loop, session, **config_entry.data)
gateway.api.config = Mock()
hass.data[deconz.DOMAIN] = {gateway.bridgeid: gateway}
with patch('pydeconz.DeconzSession.async_get_state',
return_value=mock_coro(data)):
await gateway.api.async_load_parameters()
await hass.config_entries.async_forward_entry_setup(config_entry, 'light')
# To flush out the service call to update the group
await hass.async_block_till_done()
return gateway
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert await async_setup_component(hass, light.DOMAIN, {
'light': {
'platform': deconz.DOMAIN
}
}) is True
assert deconz.DOMAIN not in hass.data
async def test_no_lights_or_groups(hass):
"""Test that no lights or groups entities are created."""
gateway = await setup_gateway(hass, {})
assert not hass.data[deconz.DOMAIN][gateway.bridgeid].deconz_ids
assert len(hass.states.async_all()) == 0
async def test_lights_and_groups(hass):
"""Test that lights or groups entities are created."""
with patch('pydeconz.DeconzSession.async_put_state',
return_value=mock_coro(True)):
gateway = await setup_gateway(
hass, {"lights": LIGHT, "groups": GROUP})
assert "light.light_1_name" in gateway.deconz_ids
assert "light.light_2_name" in gateway.deconz_ids
assert "light.group_1_name" in gateway.deconz_ids
assert "light.group_2_name" not in gateway.deconz_ids
assert len(hass.states.async_all()) == 4
lamp_1 = hass.states.get('light.light_1_name')
assert lamp_1 is not None
assert lamp_1.state == 'on'
assert lamp_1.attributes['brightness'] == 255
assert lamp_1.attributes['hs_color'] == (224.235, 100.0)
light_2 = hass.states.get('light.light_2_name')
assert light_2 is not None
assert light_2.state == 'on'
assert light_2.attributes['color_temp'] == 2500
gateway.api.lights['1'].async_update({})
await hass.services.async_call('light', 'turn_on', {
'entity_id': 'light.light_1_name',
'color_temp': 2500,
'brightness': 200,
'transition': 5,
'flash': 'short',
'effect': 'colorloop'
}, blocking=True)
await hass.services.async_call('light', 'turn_on', {
'entity_id': 'light.light_1_name',
'hs_color': (20, 30),
'flash': 'long',
'effect': 'None'
}, blocking=True)
await hass.services.async_call('light', 'turn_off', {
'entity_id': 'light.light_1_name',
'transition': 5,
'flash': 'short'
}, blocking=True)
await hass.services.async_call('light', 'turn_off', {
'entity_id': 'light.light_1_name',
'flash': 'long'
}, blocking=True)
async def test_add_new_light(hass):
"""Test successful creation of light entities."""
gateway = await setup_gateway(hass, {})
light = Mock()
light.name = 'name'
light.register_async_callback = Mock()
async_dispatcher_send(
hass, gateway.async_event_new_device('light'), [light])
await hass.async_block_till_done()
assert "light.name" in gateway.deconz_ids
async def test_add_new_group(hass):
"""Test successful creation of group entities."""
gateway = await setup_gateway(hass, {})
group = Mock()
group.name = 'name'
group.register_async_callback = Mock()
async_dispatcher_send(
hass, gateway.async_event_new_device('group'), [group])
await hass.async_block_till_done()
assert "light.name" in gateway.deconz_ids
async def test_do_not_add_deconz_groups(hass):
"""Test that clip sensors can be ignored."""
gateway = await setup_gateway(hass, {}, allow_deconz_groups=False)
group = Mock()
group.name = 'name'
group.register_async_callback = Mock()
async_dispatcher_send(
hass, gateway.async_event_new_device('group'), [group])
await hass.async_block_till_done()
assert len(gateway.deconz_ids) == 0
async def test_no_switch(hass):
"""Test that a switch doesn't get created as a light entity."""
gateway = await setup_gateway(hass, {"lights": SWITCH})
assert len(gateway.deconz_ids) == 0
assert len(hass.states.async_all()) == 0
async def test_unload_light(hass):
"""Test that it works to unload switch entities."""
gateway = await setup_gateway(hass, {"lights": LIGHT, "groups": GROUP})
await gateway.async_reset()
# Group.all_lights will not be removed
assert len(hass.states.async_all()) == 1
| StarcoderdataPython |
1629860 | from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.http import HttpResponse, HttpResponseRedirect
from django.db.models import F, Q
from django.shortcuts import redirect, render
from django.views import generic
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from heritagesites.filters import HeritageSiteFilter
from heritagesites.forms import HeritageSiteForm, SearchForm
from heritagesites.models import CountryArea, HeritageSite, HeritageSiteJurisdiction, Location
from django_filters.views import FilterView
@login_required()
def index(request):
return HttpResponse("Hello, world. You're at the UNESCO Heritage Sites index page.")
class AboutPageView(generic.TemplateView):
template_name = 'heritagesites/about.html'
@method_decorator(login_required, name='dispatch')
class CountryAreaDetailView(generic.DetailView):
model = CountryArea
context_object_name = 'country'
template_name = 'heritagesites/country_area_detail.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_object(self):
country_area = super().get_object()
return country_area
@method_decorator(login_required, name='dispatch')
class CountryAreaListView(generic.ListView):
model = CountryArea
context_object_name = 'countries'
template_name = 'heritagesites/country_area.html'
paginate_by = 20
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_queryset(self):
return CountryArea.objects.select_related('dev_status', 'location').order_by('country_area_name')
class HomePageView(generic.TemplateView):
template_name = 'heritagesites/home.html'
class LocationListView(generic.ListView):
model = Location
context_object_name = 'locations'
template_name = 'heritagesites/location.html'
def get_queryset(self):
return Location.objects \
.select_related('region', 'sub_region', 'intermediate_region') \
.order_by(
'region__region_name',
'sub_region__sub_region_name',
'intermediate_region__intermediate_region_name'
)
class OceaniaListView(generic.ListView):
model = HeritageSite
context_object_name = 'sites'
template_name = 'heritagesites/oceania.html'
paginate_by = 10
def get_queryset(self):
return HeritageSite.objects \
.select_related('heritage_site_category') \
.filter(countries__location__region__region_id=5) \
.order_by(
'countries__location__sub_region__sub_region_name',
'countries__country_area_name',
'site_name'
)
class OceaniaDetailView(generic.DetailView):
model = HeritageSite
context_object_name = 'site'
template_name = 'heritagesites/oceania_detail.html'
def get_object(self):
site = super().get_object()
return site
@method_decorator(login_required, name='dispatch')
class SiteCreateView(generic.View):
model = HeritageSite
form_class = HeritageSiteForm
success_message = "Heritage Site created successfully"
template_name = 'heritagesites/site_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = reverse_lazy('heritagesites/site_list')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = HeritageSiteForm(request.POST)
if form.is_valid():
site = form.save(commit=False)
site.save()
for country in form.cleaned_data['countries']:
HeritageSiteJurisdiction.objects.create(heritage_site=site, country_area=country)
return redirect(site) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(site.get_absolute_url())
return render(request, 'heritagesites/site_new.html', {'form': form})
def get(self, request):
form = HeritageSiteForm()
return render(request, 'heritagesites/site_new.html', {'form': form})
'''
class SiteCreateView(generic.CreateView):
model = HeritageSite
form_class = HeritageSiteForm
template_name = 'heritagesites/site_new.html'
# fields = '__all__' <-- superseded by form_class
# success_url = ''
def form_valid(self, form):
site = form.save(False)
site.save()
for country in form.cleaned_data['countries']:
HeritageSiteJurisdiction.objects.create(heritage_site=site, country_area=country)
return HttpResponseRedirect(site.get_absolute_url())
#return HttpResponseRedirect(self.get_success_url())
'''
@method_decorator(login_required, name='dispatch')
class SiteDeleteView(generic.DeleteView):
model = HeritageSite
success_message = "Heritage Site deleted successfully"
success_url = reverse_lazy('site')
context_object_name = 'site'
template_name = 'heritagesites/site_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete HeritageSiteJurisdiction entries
HeritageSiteJurisdiction.objects \
.filter(heritage_site_id=self.object.heritage_site_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
class SiteDetailView(generic.DetailView):
model = HeritageSite
context_object_name = 'site'
template_name = 'heritagesites/site_detail.html'
def get_object(self):
return super().get_object()
class PaginatedFilterView(generic.View):
"""
Creates a view mixin, which separates out default 'page' keyword and returns the
remaining querystring as a new template context variable.
https://stackoverflow.com/questions/51389848/how-can-i-use-pagination-with-django-filter
"""
def get_context_data(self, **kwargs):
context = super(PaginatedFilterView, self).get_context_data(**kwargs)
if self.request.GET:
querystring = self.request.GET.copy()
if self.request.GET.get('page'):
del querystring['page']
context['querystring'] = querystring.urlencode()
return context
class SiteFilterView(PaginatedFilterView, FilterView):
model = HeritageSite
# form_class = SearchForm
filterset_class = HeritageSiteFilter
context_object_name = 'site_list'
template_name = 'heritagesites/site_filter.html'
paginate_by = 30
'''
class SiteListView(generic.ListView):
model = HeritageSite
context_object_name = 'sites'
template_name = 'heritagesites/site.html'
paginate_by = 30
def get_queryset(self):
return HeritageSite.objects \
.select_related('heritage_site_category') \
.order_by('site_name')
'''
@method_decorator(login_required, name='dispatch')
class SiteUpdateView(generic.UpdateView):
model = HeritageSite
form_class = HeritageSiteForm
# fields = '__all__' <-- superseded by form_class
context_object_name = 'site'
# pk_url_kwarg = 'site_pk'
success_message = "Heritage Site updated successfully"
template_name = 'heritagesites/site_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
site = form.save(commit=False)
# site.updated_by = self.request.user
# site.date_updated = timezone.now()
site.save()
# If any existing country/areas are not in updated list, delete them
new_ids = []
old_ids = HeritageSiteJurisdiction.objects\
.values_list('country_area_id', flat=True)\
.filter(heritage_site_id=site.heritage_site_id)
# New countries list
new_countries = form.cleaned_data['countries']
# Insert new unmatched country entries
for country in new_countries:
new_id = country.country_area_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
HeritageSiteJurisdiction.objects \
.create(heritage_site=site, country_area=country)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
HeritageSiteJurisdiction.objects \
.filter(heritage_site_id=site.heritage_site_id, country_area_id=old_id) \
.delete()
return HttpResponseRedirect(site.get_absolute_url())
# return redirect('heritagesites/site_detail', pk=site.pk)
| StarcoderdataPython |
1797906 | '''
Created on 08/20/2014
@<NAME>
'''
import argparse
import glob
import os
import sys
import time
import stat
from subprocess import Popen
from subprocess import call
import shlex
import shutil
from pprint import pprint
import re
def main():
parser = argparse.ArgumentParser(prog='dmp_run_pipeline_in_batch.py', description='Run DMP IMPACT Pipeline In Batch', usage='%(prog)s [options]')
parser.add_argument("-i", "--dmsRunInfo", action="store", dest="dmsRunInfo", required=True, metavar='RunIlluminaProcess.pl', help="Full path to RunIlluminaProcess.pl.")
parser.add_argument("-z", "--pipeline", action="store", dest="pipeline", required=True, metavar='RunInformation.txt', help="Full path to the file containing dms database dump for each Run.")
parser.add_argument("-c", "--dmpConf", action="store", dest="dmpConf", required=True, metavar='dmp_impact.conf', help="Full Path to dmp_impact.conf file")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=True, help="make lots of noise [default]")
parser.add_argument("-sc", "--svconf", action="store", dest="svconfig", required=False, metavar='configuration_cv3.txt', help="Full Path to configuration_cv3.txt file")
parser.add_argument("-p", "--process", action="store", nargs='*', dest="process", required=True, metavar='1', help="Number of process to be used to run pipeline; Multiple process are separated by space")
parser.add_argument("-o", "--outDir", action="store", dest="outdir", required=True, metavar='/somepath/output', help="Full Path to the output dir.")
args = parser.parse_args()
#Check how many process the analysis should be ran for
if(args.verbose):
print "Checking the number of process given for analysis."
if (args.process.__len__() > 1):
process = ",".join(args.process)
else:
process = "".join(args.process)
if(process.startswith('3')):
RunForThree(args)
def RunForThree(args):
if(args.verbose):
print "Going to Run IMPACT pipeline for all Runs in ", args.dmsRunInfo,"\n"
#Open fof of bam and make a list
with open(args.dmsRunInfo, 'r') as filecontent:
for line in filecontent:
data = line.rstrip('\n').split(",")
fastqLocation = data[0]
poolName = data[1]
BamLocation = data[2]
poolOutput = args.outdir + "/" + poolName
if(args.verbose):
print "Will try to run the process on " , poolName,".\n"
#make the output dir
if os.path.isdir(poolOutput):
if(args.verbose):
print "The output directory ", poolOutput, " exists & thus we will skip the run ", poolName
continue
else:
#print "test\n"#
if(args.verbose):
print"Making directory & changing directory to", poolOutput," to run the Pipeline.\n"
os.mkdir(poolOutput)
call(['chmod', '755', poolOutput])
os.chdir(poolOutput)
grepPattern = BamLocation + "/*.bam"
filelist = glob.glob(grepPattern)
#Srt bam list
if(args.verbose):
print "Making the InputBam.list file\n"
InputBamList = poolOutput + "/InputBam.list"
InputBam = open(InputBamList,'w')
call(['chmod', '755', InputBamList])
for bamFile in filelist:
baiFile = bamFile.replace('.bam','.bai')
fileName = os.path.basename(bamFile)
baseName = re.search('(.*)_MD.*', fileName).group(1)
srtBam = baseName + ".bam"
srtBai = baseName + ".bai"
mdBam = baseName + "_MD.bam"
mdBai = baseName + "_MD.bai"
destsrtBam = poolOutput + "/" + srtBam
destsrtBai = poolOutput + "/" + srtBai
InputBam.write("%s\n" % destsrtBam)
destmdBam = poolOutput + "/" + mdBam
destmdBai = poolOutput + "/" + mdBai
print bamFile,"\n"
print baiFile,"\n"
print destsrtBam,"\n"
print destmdBam,"\n"
print destsrtBai,"\n"
print destmdBai,"\n"
#Softlink files
if os.path.isfile(bamFile):
#print "test1\n"
if(args.verbose):
print "Making symbolic soft links for bam files.\n"
os.symlink(bamFile, destsrtBam)
os.symlink(bamFile, destmdBam)
if os.path.isfile(baiFile):
#print "test2\n"
if(args.verbose):
print "Making symbolic soft links for index bai files.\n"
os.symlink(baiFile, destsrtBai)
os.symlink(baiFile, destmdBai)
InputBam.close()
#sys.exit()
dstDmpConf = ""
dstSVconf = ""
if(args.dmpConf):
#assert not os.path.isabs(args.dmpConf)
if(args.verbose):
print "Copying the pipeline configuration file.\n"
dstDmpConf = os.path.join(poolOutput, os.path.basename(args.dmpConf))
shutil.copy(args.dmpConf, dstDmpConf)
if(args.svconfig):
#assert not os.path.isabs(args.svconfig)
if(args.verbose):
print "Copying the SV configuration file.\n"
dstSVconf = os.path.join(poolOutput, os.path.basename(args.svconfig))
shutil.copy(args.svconfig, dstSVconf)
if(args.verbose):
print "Making cmd to run pipeline.\n"
if(args.svconfig):
cmd = args.pipeline + " -c " + dstDmpConf + " -sc " + dstSVconf + " -d " + fastqLocation + " -o " + poolOutput
else:
cmd = args.pipeline + " -c " + dstDmpConf + " -d " + fastqLocation + " -o " + poolOutput
print "cmd:",cmd,"\n"
print "Running Pipeline for pool " ,poolName,".\n"
cmd_args = shlex.split(cmd)
proc = Popen(cmd_args)
proc.wait()
print "ReturnCode:", proc.returncode
print "Finished Running Pipleine for pool ", poolName, ".\n"
#sys.exit()
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
print("Elapsed time was %g seconds" % (end_time - start_time)) | StarcoderdataPython |
3212572 | <filename>galois_field/exceptions.py
#!/usr/bin/python3
class FFOperationException(Exception):
"""
Finite Field Element Operation Exception
"""
def __init__(self, op="", msg=""):
self.operator = op
self.message = msg
class PrimeFieldNoFitException(Exception):
"""
No fitting available in prime field
"""
def __init__(self):
self.message = (
"There is no fitting in prime field. Use the correct polynom's degree 1"
)
class FPNegativeDegreeNotAllowed(Exception):
"""
Negative degree is unallowed
"""
def __init__(self):
self.message = "Negative degree polynom is not allowed"
| StarcoderdataPython |
1709084 | from setuptools import setup
setup(
name='journal-cli', # This is the name of your PyPI-package.
version='0.3', # Update the version number for new release
scripts=['journal'] # The name of your scipt, and also the command you'll be using for calling it
) | StarcoderdataPython |
3349041 | import unittest
import time
from collections.abc import Iterable
import numpy as np
import openmdao.api as om
from openmdao.utils.mpi import MPI
from openmdao.utils.array_utils import evenly_distrib_idxs, take_nth
from openmdao.utils.assert_utils import assert_near_equal, assert_warning
try:
from parameterized import parameterized
except ImportError:
from openmdao.utils.assert_utils import SkipParameterized as parameterized
try:
from openmdao.vectors.petsc_vector import PETScVector
except ImportError:
PETScVector = None
def _test_func_name(func, num, param):
args = []
for p in param.args:
if isinstance(p, str):
p = {p.replace('.', '_')}
elif not isinstance(p, Iterable):
p = {p}
for item in p:
try:
arg = item.__name__
except:
arg = str(item)
args.append(arg)
return func.__name__ + '_' + '_'.join(args)
class PathCompEx(om.ExplicitComponent):
def __init__(self, s=''):
super().__init__()
self.s = s
def setup(self):
self.add_discrete_input('x', val=self.s)
self.add_discrete_output('y', val=self.s)
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
discrete_outputs['y'] = discrete_inputs['x'] + self.pathname + '/'
class SerialTests(unittest.TestCase):
@parameterized.expand([(3, 'par.C1.x', True),
(3, 'par.C2.x', True),
(3, 'indeps.x', False)],
name_func=_test_func_name)
def test_fan_out(self, size, toset, auto):
p = om.Problem()
model = p.model
if not auto:
ivc = model.add_subsystem('indeps', om.IndepVarComp())
ivc.add_output('x', np.ones(size))
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('C1', om.ExecComp('y = 3 * x', x=np.ones(size), y=np.ones(size)), promotes_inputs=['x'])
par.add_subsystem('C2', om.ExecComp('y = 5 * x', x=np.ones(size), y=np.ones(size)), promotes_inputs=['x'])
if not auto:
model.connect('indeps.x', 'par.x')
p.setup()
inval = np.arange(size) + 1.0
p[toset] = inval
p.run_model()
np.testing.assert_allclose(p.get_val('par.C1.y', get_remote=True), inval * 3.)
np.testing.assert_allclose(p.get_val('par.C2.y', get_remote=True), inval * 5.)
of = ['par.C1.y', 'par.C2.y']
if auto:
wrt = ['par.x']
else:
wrt = ['indeps.x']
J = p.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
np.testing.assert_allclose(J['par.C1.y', wrt[0]], 3. * np.eye(size))
np.testing.assert_allclose(J['par.C2.y', wrt[0]], 5. * np.eye(size))
@parameterized.expand([(3, 'par.C1.x', True, True), (3, 'par.C1.x', True, False),
(3, 'par.C2.x', True, True), (3, 'par.C2.x', True, False),
(3, 'C3.x', True, True), (3, 'C3.x', True, False),
(3, 'indeps.x', False, True), (3, 'indeps.x', False, False)],
name_func=_test_func_name)
def test_fan_out_with_dup(self, size, toset, auto, before):
# this connects an auto_ivc to 3 variables. 2 are under a parallel group and 1 is
# duplicated in all procs
p = om.Problem()
model = p.model
if not auto:
ivc = model.add_subsystem('indeps', om.IndepVarComp())
ivc.add_output('x', np.ones(size))
c3 = om.ExecComp('y = 4. * x', x=np.ones(size), y=np.ones(size))
if before:
model.add_subsystem('C3', c3, promotes_inputs=['x'])
par = model.add_subsystem('par', om.ParallelGroup(), promotes_inputs=['x'])
par.add_subsystem('C1', om.ExecComp('y = 3 * x', x=np.ones(size), y=np.ones(size)), promotes_inputs=['x'])
par.add_subsystem('C2', om.ExecComp('y = 5 * x', x=np.ones(size), y=np.ones(size)), promotes_inputs=['x'])
if not before:
model.add_subsystem('C3', c3, promotes_inputs=['x'])
if not auto:
model.connect('indeps.x', 'x')
p.setup()
inval = np.arange(size) + 1.0
p[toset] = inval
p.run_model()
np.testing.assert_allclose(p.get_val('par.C1.y', get_remote=True), inval * 3.)
np.testing.assert_allclose(p.get_val('par.C2.y', get_remote=True), inval * 5.)
np.testing.assert_allclose(p.get_val('C3.y', get_remote=True), inval * 4.)
of = ['par.C1.y', 'par.C2.y', 'C3.y']
if auto:
wrt = ['x']
else:
wrt = ['indeps.x']
J = p.compute_totals(of=of, wrt=wrt, return_format='flat_dict')
np.testing.assert_allclose(J['par.C1.y', wrt[0]], 3. * np.eye(size))
np.testing.assert_allclose(J['par.C2.y', wrt[0]], 5. * np.eye(size))
np.testing.assert_allclose(J['C3.y', wrt[0]], 4. * np.eye(size))
# try with absolute names
if not auto:
J = p.compute_totals(of=of, wrt=['indeps.x'], return_format='flat_dict')
np.testing.assert_allclose(J['par.C1.y', 'indeps.x'], 3. * np.eye(size))
np.testing.assert_allclose(J['par.C2.y', 'indeps.x'], 5. * np.eye(size))
np.testing.assert_allclose(J['C3.y', 'indeps.x'], 4. * np.eye(size))
@parameterized.expand([(3, 'par.C1.x', 'par.C2.x', True),
(3, 'par.C1.x', 'par.C2.x', True),
(3, 'indeps1.x', 'indeps2.x', False)],
name_func=_test_func_name)
def test_fan_in(self, size, toset1, toset2, auto):
p = om.Problem()
model = p.model
if not auto:
ivc = model.add_subsystem('indeps1', om.IndepVarComp())
ivc.add_output('x', np.ones(size))
ivc = model.add_subsystem('indeps2', om.IndepVarComp())
ivc.add_output('x', np.ones(size))
par = model.add_subsystem('par', om.ParallelGroup())
par.add_subsystem('C1', om.ExecComp('y = 3 * x', x=np.ones(size), y=np.ones(size)))
par.add_subsystem('C2', om.ExecComp('y = 5 * x', x=np.ones(size), y=np.ones(size)))
model.add_subsystem('sum', om.ExecComp('z = x + y', x=np.ones(size), y=np.ones(size), z=np.ones(size)))
if not auto:
model.connect('indeps1.x', 'par.C1.x')
model.connect('indeps2.x', 'par.C2.x')
model.connect('par.C1.y', 'sum.x')
model.connect('par.C2.y', 'sum.y')
p.setup()
inval1 = np.arange(size) + 1.0
inval2 = (np.arange(size) + 1.0)[::-1]
p[toset1] = inval1
p[toset2] = inval2
p.run_model()
np.testing.assert_allclose(p['sum.z'], inval1 * 3. + inval2 * 5.)
def test_discrete_fan_out(self):
p = om.Problem()
model = p.model
par = model.add_subsystem('par', om.ParallelGroup(), promotes=['x'])
par.add_subsystem('C1', PathCompEx(), promotes=['x'])
par.add_subsystem('C2', PathCompEx(), promotes=['x'])
p.setup()
p.run_model()
self.assertEqual(p.get_val('par.C1.y', get_remote=True), 'par.C1/')
self.assertEqual(p.get_val('par.C2.y', get_remote=True), 'par.C2/')
def test_discrete_fan_out(self):
p = om.Problem()
model = p.model
par = model.add_subsystem('par', om.ParallelGroup(), promotes=['x'])
par.add_subsystem('C1', PathCompEx('foo'), promotes=['x'])
par.add_subsystem('C2', PathCompEx('bar'), promotes=['x'])
try:
p.setup()
except Exception as err:
self.assertEqual(str(err), "<model> <class Group>: The following inputs, ['par.C1.x', 'par.C2.x'], promoted to 'x', are connected but their metadata entries ['val'] differ. Call <group>.set_input_defaults('x', val=?), where <group> is the Group named 'par' to remove the ambiguity.")
else:
self.fail("Exception expected.")
def test_obj_using_input_name(self):
class Phase(om.Group):
def setup(self):
self.add_subsystem('C1', om.ExecComp('y=.5*x'))
self.add_subsystem('C2', om.ExecComp('y=g*x'))
self.add_subsystem('C3', om.ExecComp('y=-x'))
# this is the culprit. Bug when objective is added in group using input name
self.add_objective('C2.g')
self.add_design_var('C2.g')
self.connect('C1.y', 'C2.x')
self.connect('C2.y', 'C3.x')
p = om.Problem()
indep = p.model.add_subsystem('indep', om.IndepVarComp('x'))
indep.add_output('g')
p.model.add_subsystem('phase0', Phase())
p.model.connect('indep.x', 'phase0.C1.x')
p.model.add_design_var('indep.x')
p.model.add_constraint('phase0.C3.y', equals=0.0)
p.setup(force_alloc_complex=True)
p['indep.x'] = [9.9]
p['indep.g'] = 9.80665
p.run_model()
totals = p.check_totals(compact_print=True, method='cs', out_stream=None)
for key, meta in totals.items():
np.testing.assert_allclose(meta['abs error'][0], 0.)
@unittest.skipUnless(MPI and PETScVector, "MPI and PETSc are required.")
class MPITests(SerialTests):
N_PROCS = 2
| StarcoderdataPython |
3291078 | <reponame>droctothorpe/robin
import os
from flask_migrate import Migrate, upgrade
from app import create_app, db
from app.models import Channel
app = create_app(os.getenv("FLASK_CONFIG") or "default")
migrate = Migrate(app, db)
@app.shell_context_processor
def make_shell_context():
return dict(db=db, Channel=Channel)
@app.cli.command()
def deploy():
"""Run deployment task"""
upgrade()
| StarcoderdataPython |
14152 | <gh_stars>1-10
# Generated by Django 2.1.4 on 2018-12-21 21:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('date', models.DateTimeField()),
('wordcount', models.IntegerField()),
('in_series', models.BooleanField()),
('views', models.IntegerField()),
('faves', models.IntegerField()),
('comments', models.IntegerField()),
('votes', models.IntegerField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='posts.Author')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='posts.Tag'),
),
]
| StarcoderdataPython |
1717423 | lev=[]
with open('map.txt') as f:
for line in f:
tmp=[]
for letter in line:
if letter != '\n':
tmp.append(letter.replace('.', '0'))
lev.append(tmp)
ly=len(lev)
lx=len(lev[0])
for i in lev:
print(i)
fx=0
fy=0
for line in lev:
try:
fx = line.index('x')
break
except:
pass
fy+=1
print(fx,fy)
lev[fy][fx] = 1000
i=0
poves = []
check = [[fx,fy]]
ox,oy=fx,fy
xindex,yindex=0,0
while ox*oy > i:
for asdf in check:
fx,fy = asdf[0], asdf[1]
poves = []
coords = [[fy,fx+1,lev[fy][fx]], [fy,fx-1,lev[fy][fx]], [fy+1,fx,lev[fy][fx]], [fy-1,fx,lev[fy][fx]]]
for it in coords:
coord = lev[it[0]][it[1]]
if not coord == 'w' and not coord == 'p' and not type(coord) == int:
poves.append(it)
for it in poves:
lev[it[0]][it[1]] = int(lev[fy][fx])-1
check.append([it[1], it[0]])
i+=1
'''for line in lev:
xindex=0
print(yindex, xindex)
for coord in line:
if not coord == 'w' and not coord == 'p' and not type(coord) == int:
lev[yindex][xindex] = (1000-(fy-yindex))-(fx-xindex)
xindex+=1
yindex+=1
for i in lev:
print(i)
''' #add as fallback code if main method fails?
px,py=0,0
poves = []
n=0
for i in lev:
print(i)
for line in lev:
try:
px = line.index('p')
break
except:
pass
py+=1
while True:
poves=[]
print(px,py)
try:
if not lev[py][px+1] == 'w':
poves.append((px+1, py, int(lev[py][px+1])))
if not lev[py][px-1] == 'w':
poves.append((px-1, py, int(lev[py][px-1])))
if not lev[py+1][px] == 'w':
poves.append((px, py+1, int(lev[py+1][px])))
if not lev[py-1][px] == 'w':
poves.append((px, py-1, int(lev[py-1][px])))
high = sorted(poves, key=lambda poves: poves[2])
high = high[len(high)-1]
lev[py][px] = -1-n
px = high[0]
py = high[1]
if lev[py][px] == 1000:
print("'AI' has found its way to the end!")
exit()
lev[py][px] = 'p'
n+=1
except Exception as e:
print("Failed to find space!")
print(str(e))
exit()
for i in lev:
print(i)
input()
| StarcoderdataPython |
16600 | <filename>lite/tests/unittest_py/pass/test_conv_elementwise_fuser_pass.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
sys.path.append('.')
from auto_scan_test import FusePassAutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
from test_conv_util import UpdatePaddingAndDilation, ConvOutputSize, ConvTransposeOutputSize
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
class TestConvElementwiseFuse(FusePassAutoScanTest):
def __init__(self, *args, **kwargs):
FusePassAutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.ARM, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.X86, [PrecisionType.FP32],
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
#conv or conv_transpose
Transpose = draw(st.sampled_from([True, False]))
#conv param or conv_transpose param
in_shape = draw(
st.lists(
st.integers(
min_value=3, max_value=128),
min_size=3,
max_size=3))
in_shape = [draw(st.integers(min_value=1, max_value=4))] + in_shape
weight_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=4, max_size=4))
paddings = draw(
st.lists(
st.integers(
min_value=0, max_value=2), min_size=2, max_size=2))
dilations = draw(st.sampled_from([[2, 2]]))
groups = draw(st.sampled_from([1, 2, in_shape[1]]))
padding_algorithm = draw(st.sampled_from(["VALID", "SAME"]))
strides = draw(st.sampled_from([[1, 1], [2, 2]]))
output_padding = draw(
st.sampled_from([[], [
draw(
st.integers(
min_value=0,
max_value=max(strides[0], dilations[0]) - 1)), draw(
st.integers(
min_value=0,
max_value=max(strides[1], dilations[1]) - 1))
]]))
scale_in = draw(st.floats(min_value=0.001, max_value=0.1))
scale_out = draw(st.floats(min_value=0.001, max_value=0.1))
if Transpose:
bias_sample_shape = weight_shape[1] * groups
else:
bias_sample_shape = weight_shape[0]
elementwise_bias_shape = [bias_sample_shape]
conv_out_shape = []
paddings_, dilations_ = UpdatePaddingAndDilation(
in_shape, weight_shape, paddings, dilations, groups,
padding_algorithm, strides)
if Transpose:
assume(in_shape[1] == weight_shape[0])
assume(in_shape[1] % groups == 0) #TODO
if len(output_padding):
assume(output_padding[0] < max(strides[0], dilations_[0]))
assume(output_padding[1] < max(strides[1], dilations_[1]))
conv_out_shape = [in_shape[0], weight_shape[1] * groups]
oh, ow = ConvTransposeOutputSize(in_shape, weight_shape,
dilations_, paddings_, strides)
if len(output_padding):
oh = oh + output_padding[0]
ow = ow + output_padding[1]
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
if len(output_padding):
conv_output_h = (oh + output_padding[0] + paddings[0] +
paddings[1] -
(dilations[0] *
(weight_shape[2] - 1) + 1)) / strides[0] + 1
conv_output_w = (oh + output_padding[1] + paddings[0] +
paddings[1] -
(dilations[1] *
(weight_shape[3] - 1) + 1)) / strides[1] + 1
assume(in_shape[2] == (int)(conv_output_h))
assume(in_shape[3] == (int)(conv_output_w))
else:
assume(in_shape[1] == weight_shape[1] * groups)
assume(weight_shape[0] % groups == 0)
conv_out_shape = [in_shape[0], weight_shape[0]]
oh, ow = ConvOutputSize(in_shape, weight_shape, dilations_,
paddings_, strides)
conv_out_shape = conv_out_shape + [int(oh), int(ow)]
assume(oh > 0 and ow > 0)
conv_type = ""
conv_attrs = {}
if Transpose:
conv_type = "conv2d_transpose"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out,
"output_size": [],
"output_padding": output_padding
}
else:
conv_type = "conv2d"
conv_attrs = {
"data_format": 'nchw',
"dilations": dilations,
"padding_algorithm": padding_algorithm,
"groups": groups,
"paddings": paddings,
"strides": strides,
"Scale_in": scale_in,
"Scale_out": scale_out
}
conv_op = OpConfig(
type=conv_type,
inputs={"Input": ["input_data"],
"Filter": ["filter_data"]},
outputs={"Output": ["conv_output_data"]},
attrs=conv_attrs)
elementwise_add_op = OpConfig(
type="elementwise_add",
inputs={"X": ["conv_output_data"],
"Y": ["add_bias_data"]},
outputs={"Out": ["output_data"]},
attrs={"axis": 1})
ops = [conv_op, elementwise_add_op]
self.ops = ops
program_config = ProgramConfig(
ops=ops,
weights={
"filter_data": TensorConfig(shape=weight_shape),
"add_bias_data": TensorConfig(shape=elementwise_bias_shape)
},
inputs={"input_data": TensorConfig(shape=in_shape)},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
config = CxxConfig()
return self.get_predictor_configs(), [self.ops[0].type], (1e-4, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(
quant=False,
max_examples=500,
passes=["lite_conv_elementwise_fuser_pass"])
if __name__ == "__main__":
unittest.main(argv=[''])
| StarcoderdataPython |
131833 | <gh_stars>1-10
###############################################################################
# Caleydo - Visualization for Molecular Biology - http://caleydo.org
# Copyright (c) The Caleydo Team. All rights reserved.
# Licensed under the new BSD license, available at http://caleydo.org/license
###############################################################################
from builtins import str
import phovea_server.plugin
import phovea_server.range
import phovea_server.util
from phovea_server.dataset_def import to_idtype_description
import itertools
_providers_r = None
def _providers():
global _providers_r
if _providers_r is None:
_providers_r = [p.load().factory() for p in phovea_server.plugin.list('dataset-provider')]
return _providers_r
def iter():
"""
an iterator of all known datasets
:return:
"""
return itertools.chain(*_providers())
def list_datasets():
"""
list all known datasets
:return:
"""
return list(iter())
def get(dataset_id):
"""
:param dataset_id:
:return: returns the selected dataset identified by id
"""
for p in _providers():
r = p[dataset_id]
if r is not None:
return r
return None
def add(desc, files=[], id=None):
"""
adds a new dataset to this storage
:param desc: the dict description information
:param files: a list of FileStorage
:param id: optional the unique id to use
:return: the newly created dataset or None if an error occurred
"""
for p in _providers():
r = p.upload(desc, files, id)
if r:
return r
return None
def update(dataset, desc, files=[]):
"""
updates the given dataset
:param dataset: a dataset or a dataset id
:param desc: the dict description information
:param files: a list of FileStorage
:return:
"""
old = get(dataset) if isinstance(dataset, str) else dataset
if old is None:
return add(desc, files)
r = old.update(desc, files)
return r
def remove(dataset):
"""
removes the given dataset
:param dataset: a dataset or a dataset id
:return: boolean whether the operation was successful
"""
old = get(dataset) if isinstance(dataset, str) else dataset
if old is None:
return False
for p in _providers():
if p.remove(old):
return True
return False
def list_idtypes():
tmp = dict()
for d in list_datasets():
for idtype in d.to_idtype_descriptions():
tmp[idtype['id']] = idtype
# also include the known elements from the mapping graph
mapping = get_mappingmanager()
for idtype_id in mapping.known_idtypes():
tmp[idtype_id] = to_idtype_description(idtype_id)
return list(tmp.values())
def get_idmanager():
return phovea_server.plugin.lookup('idmanager')
def get_mappingmanager():
return phovea_server.plugin.lookup('mappingmanager')
| StarcoderdataPython |
164306 | #!/usr/bin/env python
import sys
import time
import obd
import json
import os
if len(sys.argv) == 1:
connection = obd.OBD()
else:
connection = obd.OBD(sys.argv[1])
os.system('clear')
while True:
print 'Car Information: '
print 'Speed : ' + \
str(connection.query(obd.commands.SPEED).value.to("mph"))
print 'RPM : ' + str(connection.query(obd.commands.RPM).value)
print 'Fuel Level: ' + str(connection.query(obd.commands.FUEL_LEVEL).value)
print 'Engine Temp : ' + \
str(connection.query(obd.commands.COOLANT_TEMP).value.to("degF"))
print '\n'
print 'Diagonstic Information: '
print 'Stored DTCs: ' + str(connection.query(obd.commands.GET_DTC).value)
print 'UpTme: ' + str(connection.query(obd.commands.RUN_TIME).value)
print '\n'
print 'Weather Information:'
print 'Air Temp: ' \
+ str(connection.query(obd.commands.AMBIANT_AIR_TEMP).value.to("degF"))
print 'Barometric Pressure: ' \
+ str(connection.query(obd.commands.BAROMETRIC_PRESSURE).value)
time.sleep(5)
os.system('clear')
| StarcoderdataPython |
1617554 | <filename>samtranslator/model/preferences/deployment_preference_collection.py<gh_stars>0
from .deployment_preference import DeploymentPreference
from samtranslator.model.codedeploy import CodeDeployApplication
from samtranslator.model.codedeploy import CodeDeployDeploymentGroup
from samtranslator.model.iam import IAMRole
from samtranslator.model.intrinsics import fnSub, is_instrinsic
from samtranslator.model.update_policy import UpdatePolicy
from samtranslator.translator.arn_generator import ArnGenerator
import copy
CODE_DEPLOY_SERVICE_ROLE_LOGICAL_ID = 'CodeDeployServiceRole'
CODEDEPLOY_APPLICATION_LOGICAL_ID = 'ServerlessDeploymentApplication'
CODEDEPLOY_PREDEFINED_CONFIGURATIONS_LIST = ["Canary10Percent5Minutes",
"Canary10Percent10Minutes",
"Canary10Percent15Minutes",
"Canary10Percent30Minutes",
"Linear10PercentEvery1Minute",
"Linear10PercentEvery2Minutes",
"Linear10PercentEvery3Minutes",
"Linear10PercentEvery10Minutes",
"AllAtOnce"
]
class DeploymentPreferenceCollection(object):
"""
This class contains the collection of all global and
specific / per function deployment preferences. It includes ways to add
the deployment preference information from the SAM template and how to
generate the update policy (and dependencies of the update policy) for
each function alias. Dependencies include the codedeploy cloudformation
resources.
"""
def __init__(self):
"""
This collection stores an intenral dict of the deployment preferences for each function's
deployment preference in the SAM Template.
"""
self._resource_preferences = {}
self.codedeploy_application = self._codedeploy_application()
self.codedeploy_iam_role = self._codedeploy_iam_role()
def add(self, logical_id, deployment_preference_dict):
"""
Add this deployment preference to the collection
:raise ValueError if an existing logical id already exists in the _resource_preferences
:param logical_id: logical id of the resource where this deployment preference applies
:param deployment_preference_dict: the input SAM template deployment preference mapping
"""
if logical_id in self._resource_preferences:
raise ValueError("logical_id {logical_id} previously added to this deployment_preference_collection".format(
logical_id=logical_id))
self._resource_preferences[logical_id] = DeploymentPreference.from_dict(logical_id, deployment_preference_dict)
def get(self, logical_id):
"""
:rtype: DeploymentPreference object previously added for this given logical_id
"""
return self._resource_preferences.get(logical_id)
def any_enabled(self):
"""
:return: boolean whether any deployment preferences in the collection are enabled
"""
return any(preference.enabled for preference in self._resource_preferences.values())
def can_skip_service_role(self):
"""
If every one of the deployment preferences have a custom IAM role provided, we can skip creating the
service role altogether.
:return: True, if we can skip creating service role. False otherwise
"""
return all(preference.role for preference in self._resource_preferences.values())
def enabled_logical_ids(self):
"""
:return: only the logical id's for the deployment preferences in this collection which are enabled
"""
return [logical_id for logical_id, preference in self._resource_preferences.items() if preference.enabled]
def _codedeploy_application(self):
codedeploy_application_resource = CodeDeployApplication(CODEDEPLOY_APPLICATION_LOGICAL_ID)
codedeploy_application_resource.ComputePlatform = 'Lambda'
return codedeploy_application_resource
def _codedeploy_iam_role(self):
iam_role = IAMRole(CODE_DEPLOY_SERVICE_ROLE_LOGICAL_ID)
iam_role.AssumeRolePolicyDocument = {
'Version': '2012-10-17',
'Statement': [{
'Action': ['sts:AssumeRole'],
'Effect': 'Allow',
'Principal': {'Service': ['codedeploy.amazonaws.com']}
}]
}
iam_role.ManagedPolicyArns = [
ArnGenerator.generate_aws_managed_policy_arn('service-role/AWSCodeDeployRoleForLambda')
]
return iam_role
def deployment_group(self, function_logical_id):
"""
:param function_logical_id: logical_id of the function this deployment group belongs to
:return: CodeDeployDeploymentGroup resource
"""
deployment_preference = self.get(function_logical_id)
deployment_group = CodeDeployDeploymentGroup(self.deployment_group_logical_id(function_logical_id))
if deployment_preference.alarms is not None:
deployment_group.AlarmConfiguration = {'Enabled': True,
'Alarms': [{'Name': alarm} for alarm in
deployment_preference.alarms]}
deployment_group.ApplicationName = self.codedeploy_application.get_runtime_attr('name')
deployment_group.AutoRollbackConfiguration = {'Enabled': True,
'Events': ['DEPLOYMENT_FAILURE',
'DEPLOYMENT_STOP_ON_ALARM',
'DEPLOYMENT_STOP_ON_REQUEST']}
deployment_group.DeploymentConfigName = self._replace_deployment_types(copy.deepcopy(
deployment_preference.deployment_type))
deployment_group.DeploymentStyle = {'DeploymentType': 'BLUE_GREEN',
'DeploymentOption': 'WITH_TRAFFIC_CONTROL'}
deployment_group.ServiceRoleArn = self.codedeploy_iam_role.get_runtime_attr("arn")
if deployment_preference.role:
deployment_group.ServiceRoleArn = deployment_preference.role
return deployment_group
def _replace_deployment_types(self, value):
if isinstance(value, list):
for i in range(len(value)):
value[i] = self._replace_deployment_types(value[i])
return value
elif is_instrinsic(value):
for (k, v) in value.items():
value[k] = self._replace_deployment_types(v)
return value
else:
if value in CODEDEPLOY_PREDEFINED_CONFIGURATIONS_LIST:
return fnSub("CodeDeployDefault.Lambda${ConfigName}", {"ConfigName": value})
return value
def update_policy(self, function_logical_id):
deployment_preference = self.get(function_logical_id)
return UpdatePolicy(
self.codedeploy_application.get_runtime_attr('name'),
self.deployment_group(function_logical_id).get_runtime_attr('name'),
deployment_preference.pre_traffic_hook,
deployment_preference.post_traffic_hook,
)
def deployment_group_logical_id(self, function_logical_id):
return function_logical_id + 'DeploymentGroup'
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
if isinstance(other, self.__class__):
return not self.__eq__(other)
return NotImplemented
def __hash__(self):
return hash(tuple(sorted(self.__dict__.items())))
| StarcoderdataPython |
1788933 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
file : LISTA_base.py
author: xhchrn
email : <EMAIL>
date : 2019-02-18
A base class for all LISTA networks.
"""
import numpy as np
import numpy.linalg as la
import tensorflow as tf
import sys, os
import time
import utils.train
class LISTA_base (object):
"""
Implementation of deep neural network model.
"""
def __init__ (self):
pass
def setup_layers (self):
pass
def inference (self):
pass
def save_trainable_variables (self , filename, **kwargs):
"""
Save trainable variables in the model to npz file with current value of each
variable in tf.trainable_variables().
:sess: Tensorflow session.
:savefn: File name of saved file.
"""
state = getattr (self , 'state' , {})
"""
Save trainable variables in the model to npz file with current value of
each variable in tf.trainable_variables().
:sess: Tensorflow session.
:filename: File name of saved file.
:scope: Name of the variable scope that we want to save.
:kwargs: Other arguments that we want to save.
"""
save = dict()
for v_tuple in self.vars_in_layer:
for v in v_tuple:
save[str(v.name)] = v
# file name suffix check
if not filename.endswith(".npz"):
filename = filename + ".npz"
save.update(self._scope)
save.update(state)
np.savez(filename, **save)
def load_trainable_variables (self, filename):
"""
Load trainable variables from saved file.
:sess: TODO
:savefn: TODO
:returns: TODO
"""
"""
Load trainable variables from saved file.
:sess: TODO
:filename: TODO
:returns: TODO
"""
other = dict()
# file name suffix check
if filename[-4:] != '.npz':
filename = filename + '.npz'
if not os.path.exists(filename):
raise ValueError(filename + ' not exists')
tv = dict([(str(v.name), v) for v_tuple in self.vars_in_layer for v in v_tuple ])
for k, d in np.load(filename).items():
if k in tv:
print('restoring ' + k)
tf.compat.v1.assign(tv[k], d)
else:
other[k] = d
def do_training(self, stages, data_set, savefn, scope,
val_step, maxit, better_wait):
"""
Do training actually. Refer to utils/train.py.
:sess : Tensorflow session, in which we will run the training.
:stages : List of tuples. Training stages obtained via
`utils.train.setup_training`.
:savefn : String. Path where the trained model is saved.
:batch_size : Integer. Training batch size.
:val_step : Integer. How many steps between two validation.
:maxit : Integer. Max number of iterations in each training stage.
:better_wait: Integer. Jump to next stage if no better performance after
certain # of iterations.
"""
"""
Train the model actually.
:sess: Tensorflow session. Variables should be initialized or loaded from trained
model in this session.
:stages: Training stages info. ( name, xh_, loss_, nmse_, op_, var_list ).
:prob: Problem instance.
:batch_size: Batch size.
:val_step: How many steps between two validation.
:maxit: Max number of iterations in each training stage.
:better_wait: Jump to next training stage in advance if nmse_ no better after
certain number of steps.
:done: name of stages that has been done.
"""
if not savefn.endswith(".npz"):
savefn += ".npz"
if os.path.exists(savefn):
sys.stdout.write('Pretrained model found. Loading...\n')
state = self.load_trainable_variables(savefn)
else:
state = {}
done = state.get('done', [])
log = state.get('log', [])
for name, loss_, nmse_, loss_val_, nmse_val_, opt_, var_list in stages:
start = time.time()
"""Skip stage done already."""
if name in done:
sys.stdout.write('Already did {}. Skipping\n'.format(name))
continue
# print stage information
var_disc = 'fine tuning ' + ','.join([v.name for v in var_list])
print(name + ' ' + var_disc)
nmse_hist_val = []
for i in range(maxit + 1):
data_set.update()
# _, loss_tr, nmse_tr = sess.run ([op_, loss_, nmse_])
opt_.minimize(loss=loss_, var_list=var_list)
nmse_tr_dB = 10. * np.log10(nmse_())
loss_tr = loss_()
if i % val_step == 0:
nmse_val = nmse_val_()
loss_val = loss_val_()
if np.isnan(nmse_val):
raise RuntimeError('nmse is nan. exiting...')
nmse_hist_val = np.append(nmse_hist_val, nmse_val)
db_best_val = 10. * np.log10(nmse_hist_val.min())
nmse_val_dB = 10. * np.log10(nmse_val)
sys.stdout.write("\r| i={i:<7d} | loss_tr={loss_tr:.6f} | "
"nmse_tr/dB={nmse_tr_db:.6f} | loss_val ={loss_val:.6f} | "
"nmse_val/dB={nmse_val_db:.6f} | (best={db_best_val:.6f})" \
.format(i=i, loss_tr=loss_tr, nmse_tr_db=nmse_tr_dB,
loss_val=loss_val, nmse_val_db=nmse_val_dB,
db_best_val=db_best_val))
sys.stdout.flush()
if i % (10 * val_step) == 0:
age_of_best = (len(nmse_hist_val) -
nmse_hist_val.argmin() - 1)
# If nmse has not improved for a long time, jump to the
# next training stage.
if age_of_best * val_step > better_wait:
print('')
break
if i % (100 * val_step) == 0:
print('')
done = np.append(done, name)
# TODO: add log
end = time.time()
time_log = 'Took me {totaltime:.3f} minutes, or {time_per_interation:.1f} ms per iteration'.format(
totaltime=(end - start) / 60, time_per_interation=(end - start) * 1000 / i)
print(time_log)
state['done'] = done
state['log'] = log
self.save_trainable_variables(savefn, **state)
| StarcoderdataPython |
1732730 | <filename>Solutions/mailroom/mailroom_fp/test_mailroom.py<gh_stars>0
# test-mailroom.py
import pytest
from random import randint, SystemRandom
from string import ascii_letters as letters
# from mailroom_mfr import (
from mailroom_parallel import (
load_donordb,
add_donation,
tally_report,
)
@pytest.fixture(scope='module')
def db():
return load_donordb()
def test_add_donation(db):
random_donor_name = ''.join(SystemRandom().choice(letters) for _ in range(20))
random_amount_1 = randint(0, 999999)
add_donation(db, random_donor_name, random_amount_1)
random_amount_2 = randint(0, 999999)
add_donation(db, random_donor_name, random_amount_2)
assert random_donor_name in db.keys()
assert random_amount_1 in db[random_donor_name]
assert random_amount_2 in db[random_donor_name]
def test_tally_report(db):
doner = 'Aristotle'
donation_total, num_gifts, average_gift = tally_report(db[doner])
assert donation_total == sum(db[doner])
assert num_gifts == len(db[doner])
assert average_gift == donation_total / num_gifts
| StarcoderdataPython |
3325408 | <filename>backend/board/serializer.py
from rest_framework import serializers
from .models import Post, Comment,Category
from users.models import User
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model=Category
fields = ('name','post_num')
class CommentSerializer(serializers.ModelSerializer):
created_at = serializers.DateTimeField('%Y/%m/%d %H:%M:%S')
modified_at = serializers.DateTimeField('%Y/%m/%d %H:%M:%S')
post = serializers.CharField(source="post.get_absolute_url")
author = serializers.CharField(source="author.get_full_name")
class Meta:
model = Comment
fields = ('id', 'post', 'author','text', 'created_at', 'modified_at')
class PostSerializer(serializers.ModelSerializer):
created=serializers.DateTimeField('%Y/%m/%d %H:%M:%S') #created customize
author=serializers.CharField(source="author.get_full_name")
category = serializers.CharField(source="category.get_name",default='X') #시발 드디어..
#author 은 forignkey라 따로 처리를 해줘야함 forignkey추가를 위해선 해당 연결된 모델을 serializer를 생성해줘야함
class Meta:
model = Post
fields = ('id', 'title', 'content', 'created', 'author', 'category',)
class PostListSerializer(serializers.ModelSerializer):
created = serializers.DateTimeField('%Y/%m/%d %H:%M:%S')
author=serializers.CharField(source="author.get_full_name")
category = serializers.CharField(source="category.get_name",default='X') #빈값을 serializer하면 오류가 뜨기떄문에 default값을 설정해줌.
class Meta:
model = Post
fields = ('id', 'title', 'created', 'author', 'category', )
| StarcoderdataPython |
3251045 | # -*- coding: utf-8 -*-
#
# Copyright 2017-2018 AVSystem <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
from framework.lwm2m_test import *
from .dm.utils import DataModel
class Test401_UDPChannelSecurity_PreSharedKeyMode(DataModel.Test):
PSK_IDENTITY = b'test-identity'
PSK_KEY = b'test-key'
def setUp(self):
from pymbedtls import PskSecurity
self.setup_demo_with_servers(servers=[Lwm2mServer(coap.DtlsServer(psk_key=self.PSK_KEY, psk_identity=self.PSK_IDENTITY))],
extra_cmdline_args=['--identity',
str(binascii.hexlify(self.PSK_IDENTITY), 'ascii'),
'--key', str(binascii.hexlify(self.PSK_KEY), 'ascii')],
auto_register=False)
def runTest(self):
# 1. DTLS Session is established
# 2. Registration message (CoAP POST) is sent from LwM2M
# Client to LwM2M Server.
# 3. Client receives Success message (2.01 Created) from the Server.
#
# A. In test step 2 & 3, Registration command of the Client on the Server
# is performed successfully over the DTLS session
self.assertDemoRegisters()
# 4. READ (CoAP GET) on the Instance of the Device Object is
# performed using the default TLV data format (cf Test
# LwM2M-1.0-int-203)
# 5. Server receives success message (2.05 Content) and the
# requested values (encrypted)
#
# B. In test step 4 & 5 the READ command work successfully over the
# DTLS session.
self.test_read('/%d/0' % OID.Device)
| StarcoderdataPython |
1614629 | <filename>test.py
import i3
import unittest
import platform
py3 = platform.python_version_tuple() > ('3',)
class ParseTest(unittest.TestCase):
def setUp(self):
self.msg_types = ['get_tree', 4, '4']
self.event_types = ['output', 1, '1']
def test_msg_parse(self):
msg_types = []
for msg_type in self.msg_types:
msg_types.append(i3.parse_msg_type(msg_type))
for index in range(-1, len(msg_types) - 1):
self.assertEqual(msg_types[index], msg_types[index+1])
self.assertIsInstance(msg_types[index], int)
def test_event_parse(self):
event_types = []
for event_type in self.event_types:
event_types.append(i3.parse_event_type(event_type))
for index in range(-1, len(event_types) - 1):
self.assertEqual(event_types[index], event_types[index+1])
self.assertIsInstance(event_types[index], str)
def test_msg_type_error(self):
border_lower = -1
border_higher = len(i3.MSG_TYPES)
values = ['joke', border_lower, border_higher, -100, 100]
for val in values:
self.assertRaises(i3.MessageTypeError, i3.parse_msg_type, val)
self.assertRaises(i3.MessageTypeError, i3.parse_msg_type, str(val))
def test_event_type_error(self):
border_lower = -1
border_higher = len(i3.EVENT_TYPES)
values = ['joke', border_lower, border_higher, -100, 100]
for val in values:
self.assertRaises(i3.EventTypeError, i3.parse_event_type, val)
self.assertRaises(i3.EventTypeError, i3.parse_event_type, str(val))
def test_msg_error(self):
"""If i3.yada doesn't pass, see http://bugs.i3wm.org/report/ticket/693"""
self.assertRaises(i3.MessageError, i3.focus) # missing argument
self.assertRaises(i3.MessageError, i3.yada) # doesn't exist
self.assertRaises(i3.MessageError, i3.meh, 'some', 'args')
class SocketTest(unittest.TestCase):
def setUp(self):
pass
def test_connection(self):
def connect():
return i3.Socket('/nil/2971.socket')
self.assertRaises(i3.ConnectionError, connect)
def test_response(self, socket=i3.default_socket()):
workspaces = socket.get('get_workspaces')
self.assertIsNotNone(workspaces)
for workspace in workspaces:
self.assertTrue('name' in workspace)
def test_multiple_sockets(self):
socket1 = i3.Socket()
socket2 = i3.Socket()
socket3 = i3.Socket()
for socket in [socket1, socket2, socket3]:
self.test_response(socket)
for socket in [socket1, socket2, socket3]:
socket.close()
def test_pack(self):
packed = i3.default_socket().pack(0, "haha")
if py3:
self.assertIsInstance(packed, bytes)
class GeneralTest(unittest.TestCase):
def setUp(self):
pass
def test_getattr(self):
func = i3.some_attribute
self.assertTrue(callable(func))
socket = i3.default_socket()
self.assertIsInstance(socket, i3.Socket)
def test_success(self):
data = {'success': True}
self.assertEqual(i3.success(data), True)
self.assertEqual(i3.success([data, {'success': False}]), [True, False])
data = {'success': False, 'error': 'Error message'}
self.assertIsInstance(i3.success(data), i3.MessageError)
def test_container(self):
container = i3.container(title='abc', con_id=123)
output = ['[title="abc" con_id="123"]',
'[con_id="123" title="abc"]']
self.assertTrue(container in output)
def test_criteria(self):
self.assertTrue(i3.focus(clasS='xterm'))
def test_filter1(self):
windows = i3.filter(nodes=[])
for window in windows:
self.assertEqual(window['nodes'], [])
def test_filter2(self):
unfocused_windows = i3.filter(focused=False)
parent_count = 0
for window in unfocused_windows:
self.assertEqual(window['focused'], False)
if window['nodes'] != []:
parent_count += 1
self.assertGreater(parent_count, 0)
def test_filter_function_wikipedia(self):
"""You have to have a Wikipedia tab opened in a browser."""
func = lambda node: 'Wikipedia' in node['name']
nodes = i3.filter(function=func)
self.assertTrue(nodes != [])
for node in nodes:
self.assertTrue('free encyclopedia' in node['name'])
if __name__ == '__main__':
test_suits = []
for Test in [ParseTest, SocketTest, GeneralTest]:
test_suits.append(unittest.TestLoader().loadTestsFromTestCase(Test))
unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(test_suits))
| StarcoderdataPython |
1700033 | """https://github.com/lw/BluRay/wiki/MPLS"""
__all__ = ['MoviePlaylist']
from abc import ABC, abstractmethod
from io import BufferedReader
from pprint import pformat
from struct import unpack
from typing import Any, Dict, Tuple
class MplsObject(ABC):
"""Abstract MPLS object interface"""
mpls: BufferedReader
def __init__(self, mpls: BufferedReader) -> None:
self.mpls = mpls
super().__init__()
def __repr__(self) -> str:
return pformat(vars(self), sort_dicts=False)
@abstractmethod
def load(self):
"""Method loading the MPLS object"""
def _get_pos(self) -> int:
return self.mpls.tell()
def _unpack_byte(self, n: int) -> Tuple[Any, ...]:
"""
Size 1 -> big-endian unsigned char
Size 2 -> big-endian unsigned short
Size 4 -> big-endian unsigned int
Size 8 -> big-endian unsigned long long
"""
formats: Dict[int, str] = {1: '>B', 2: '>H', 4: '>I', 8: '>Q'}
return unpack(formats[n], self.mpls.read(n))
class MoviePlaylist(MplsObject):
"""https://github.com/lw/BluRay/wiki/MPLS"""
type_indicator: str
version_number: str
playlist_start_address: int
playlist_mark_start_address: int
extension_data_start_address: int
def load(self):
pos = self._get_pos()
if pos != 0:
raise Exception('MoviePlaylist: You should called it at the start of the mpls file!')
self.type_indicator = self.mpls.read(4).decode('utf-8') # 4 bytes - 32 bits
self.version_number = self.mpls.read(4).decode('utf-8') # 4 bytes - 32 bits
self.playlist_start_address, = self._unpack_byte(4) # 4 bytes - 32 bits
self.playlist_mark_start_address, = self._unpack_byte(4) # 4 bytes - 32 bits
self.extension_data_start_address, = self._unpack_byte(4) # 4 bytes - 32 bits
self.mpls.read(20) # 20 bytes - 160 bits - Reserved
return self
| StarcoderdataPython |
25448 | """TuneBlade API Client."""
import logging
import asyncio
import socket
from typing import Optional
import aiohttp
import async_timeout
TIMEOUT = 10
_LOGGER: logging.Logger = logging.getLogger(__package__)
HEADERS = {"Content-type": "application/json; charset=UTF-8"}
class TuneBladeApiClient:
def __init__(
self, host: str, port: str, device_id: str, username: str, password: str, airplay_password: str, session: aiohttp.ClientSession, auth
) -> None:
"""Sample API Client."""
self._host = host
self._port = port
self._username = username
self._password = password
self._airplay_password = <PASSWORD>
self._session = session
if device_id == "Master":
self._url = "http://"+host+":"+port+"/master"
else:
self._url = "http://"+host+":"+port+"/devices/"+device_id
async def async_get_data(self) -> dict:
"""Get data from the API."""
return await self.api_wrapper("get", self._url)
async def async_conn(self, value: str) -> None:
"""Get data from the API."""
await self.api_wrapper("put", self._url, data={"Password": self._<PASSWORD>, "Status": value}, headers=HEADERS)
async def async_set_volume(self, volume: str) -> None:
"""Get data from the API."""
await self.api_wrapper("put", self._url, data={"Password": self._<PASSWORD>, "Volume": str(int(volume*100))}, headers=HEADERS)
async def async_set_volume_master(self, volume: str) -> None:
"""Get data from the API."""
await self.api_wrapper("put", self._url, data={"Status": "Connect", "Volume": str(int(volume*100))}, headers=HEADERS)
async def api_wrapper(
self, method: str, url: str, data: dict = {}, headers: dict = {}
) -> dict:
"""Get information from the API."""
try:
async with async_timeout.timeout(TIMEOUT):
if method == "get":
response = await self._session.get(url, headers=headers)
return await response.json()
elif method == "put":
await self._session.put(url, headers=headers, json=data)
elif method == "patch":
await self._session.patch(url, headers=headers, json=data)
elif method == "post":
await self._session.post(url, headers=headers, json=data)
except asyncio.TimeoutError as exception:
_LOGGER.error(
"Timeout error fetching information from %s - %s",
url,
exception,
)
except (KeyError, TypeError) as exception:
_LOGGER.error(
"Error parsing information from %s - %s",
url,
exception,
)
except (aiohttp.ClientError, socket.gaierror) as exception:
_LOGGER.error(
"Error fetching information from %s - %s",
url,
exception,
)
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Something really wrong happened! - %s", exception)
| StarcoderdataPython |
3201536 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 10 13:17:52 2020
@author: <NAME>, Finnish Meteorological Institute
@licence: MIT licence Copyright
"""
import matplotlib
import numpy
import os
import pathlib
import seaborn
from Data import Data
from FileSystem import FileSystem
from PlotTweak import PlotTweak
from Simulation import Simulation
from decimal import Decimal
class Plot:
def getVerticalLine(ax, x, color = 'k', linestyle = '--' ):
ax.axvline( x, color = color , linestyle = linestyle )
def getHorizontalLine(ax, y, color = 'k', linestyle = '--' ):
ax.axhline( y, color = color , linestyle = linestyle )
def getTimeseries(ax,
simulation : Simulation,
muuttuja,
conversionFactor = 1.0):
if isinstance(simulation, list):
for simulationInstance in simulation:
ax = Plot.getTimeseries(ax, simulationInstance, muuttuja, conversionFactor)
return ax
ts = simulation.getTSDataset()
try:
dataset = ts[muuttuja]
except KeyError:
print("KeyError")
return None
# conversion
dataset = dataset*conversionFactor
dataset.plot(ax = ax,
color = simulation.getColor(),
label = simulation.getLabel(),
linewidth = simulation.getLineWidth())
return ax
#REVISEu
def getTimeseriesOfProfile(ax,
simulation : Simulation,
muuttuja,
levels = None,
useLogaritmic = False,
useColorBar = False,
colors = None):
ps = simulation.getPSDataset()
try:
data = ps[muuttuja]
except KeyError:
print("KeyError", simulation, muuttuja, "Plot.getTimeseriesOfProfile")
return
if useLogaritmic:
if levels is None:
levels, rangePotenssi, minimiPotenssi, maksimiPotenssi = Data.getLogScale(data.values)
levels = rangePotenssi
data.values = numpy.log10(data.values)
im = data.plot.contourf("time","zt", ax = ax, levels=levels, add_colorbar = useColorBar, colors = colors) #
return ax, im, levels
def getContourLine(ax,
simulation : Simulation,
muuttuja,
value,
color = "black",
epsilon = 1e-12):
ps = simulation.getPSDataset()
try:
data = ps[muuttuja]
except KeyError:
print("KeyError", simulation, muuttuja, "Plot.getContourLine")
return
data.plot.contour(x="time", y="zt",ax=ax, colors = color, vmin = value - epsilon , vmax = value + epsilon)
return ax
def getColorBar(im, ax, levels = None):
cb = matplotlib.pyplot.colorbar(im, cax = ax, ticks = levels, orientation='horizontal') #, pad=0.21
if levels is not None:
cb.ax.set_xticklabels([r"$10^{" + str(int(elem)) + "}$" for elem in levels])
colorbarLabelListShowBoolean = Data.getIntegerExponentsAsBoolean( levels )
cb = PlotTweak.hideColorbarXLabels(cb, colorbarLabelListShowBoolean)
cb.ax.tick_params(labelsize=36)
# REVISE
def getTimeseriesOfProportions(axes,
simulation : Simulation,
muuttuja,
mode = "inCloud", cmap = "bright", limit = 1e-6, height = None, packing = None,
timeStartH = 2.05, timeEndH = 48, analysis = False,
fontsize = None, useLegend = True,
figurePrefix = "",
kuvakansio = "/home/aholaj/OneDrive/000_WORK/000_ARTIKKELIT/000-Manuscript-ICE/kuvat/bini/",
kuvakansioPDF = "/home/aholaj/OneDrive/000_WORK/000_ARTIKKELIT/000-Manuscript-ICE/figures_pdf",
filenamePDF = "figure6"):
print(mode, end = " ")
if height is not None:
print(height)
else:
print()
ps = simulation.getPSDataset()
if ps is None:
return "FileNotFound"
ts = simulation.getTSDataset()
if ts is None:
return "FileNotFound"
timeStartInd = Data.getClosestIndex( ps.time.values, timeStartH*3600 )
timeEndInd = Data.getClosestIndex( ps.time.values, timeEndH*3600 )
ps = ps.isel(time = slice(timeStartInd,timeEndInd))
try:
if mode == "inCloud":
# ps = ps.sel(zt = slice(665,745)).mean(dim = "zt")
ps = ps.where( (ps.P_rl > limit) & (ps.P_ri > limit), drop = True).mean(dim = "zt", skipna = True) #ps.where(ps.zt > ts.zb).where(ps.zt < ts.zc).mean(dim = "zt")#
elif mode == "belowCloud":
#ps = ps.sel(zt = slice(5,410)).mean(dim = "zt")
ps = ps.where(ps.P_rl < limit, drop = True).where(ps.zt < ts.zb, drop = True).mean(dim = "zt", skipna = True) #.where(ps.P_rl < 1e-6, drop = True)
elif mode == "aboveCloud":
ps = ps.where(ps.zt > ts.zc, drop = True).mean(dim = "zt", skipna = True) #.where(ps.P_rl < 1e-6, drop = True)
elif mode == "height":
ps = ps.sel(zt = height, method = 'nearest')
except KeyError:
return
ps = ps.assign_coords(time = (ps.time / 3600))
try:
aero = ps["P_Nabb"]
cloud = ps["P_Ncbb"]
ice = ps["P_Nibb"]
except KeyError:
return
newname = "dryRadiusBinB"
aero = aero.rename({"aeb":newname})
cloud = cloud.rename({"clb":newname})
ice = ice.rename({"icb":newname})
total = aero + cloud + ice
if packing is not None:
for daatta in aero, cloud, ice, total:
daatta[:,packing] = numpy.sum(daatta[:,packing:], axis = 1)
binNumber = min( numpy.shape(total.values)[1], packing +1 )
matplotlib.rcParams['lines.linewidth'] = 6
yTicks = [0, 0.5, 1]
yTickLabels = map(str, yTicks)
matplotlib.pyplot.subplots_adjust(hspace=0.05, wspace = 0.05)
xLabelListShow = numpy.arange(8, 48+1, 8)
xLabelListShow = numpy.insert(xLabelListShow, 0, 2)
xLabelListMajorLine = numpy.arange(4, 48+1, 4)
xLabelListMajorLine = numpy.insert(xLabelListMajorLine, 0, 2)
for bini in range(binNumber):
ax = axes[bini]
aeroBin = aero[:,bini]
cloudBin = cloud[:,bini]
iceBin = ice[:,bini]
totalBin = total[:,bini]
aeroFrac = aeroBin/totalBin
cloudFrac = cloudBin/totalBin
iceFrac = iceBin/totalBin
totalBinRelative = totalBin / totalBin.values[0]
aeroFrac.plot(ax=ax, color = "#e6194B")
cloudFrac.plot(ax=ax, color = "#000075")
iceFrac.plot(ax=ax, color = "#42d4f4")
totalBinRelative.plot(ax = ax, color = "black")
ax.set_yticks( yTicks )
ax.set_yticklabels( yTickLabels )
ax.set_ylim( 0, 1.5)
ax.set_title("")
matplotlib.pyplot.setp(ax.get_yticklabels()[1], visible=False)
if packing is not None and bini == (binNumber - 1):
bininame = str(bini + 1 ) + " - 7"
else:
bininame = str(bini +1)
if useLegend:
legend_handles = [matplotlib.patches.Patch( facecolor = "black",
label = " ".join([ "Bin", bininame + ",", "Total", r"$N_0$", str(int(totalBin.values[0])) + ",", "Min", r"$N$", str(int(numpy.min(totalBin))), "$(kg^{-1})$" ]))]
legend = ax.legend(handles = legend_handles, loc = "best", fontsize = fontsize)
ax.add_artist(legend)
if bini == 0:
header_handles = [matplotlib.patches.Patch(facecolor = "#e6194B", label="Aerosol"),
matplotlib.patches.Patch(facecolor = "#000075", label="Cloud"),
matplotlib.patches.Patch(facecolor = "#42d4f4", label="Ice")]
header_legend = ax.legend(handles = header_handles, loc =(0.3,1.05), ncol = 3, frameon = True, framealpha = 1.0, fontsize = fontsize)
ax.add_artist(header_legend)
########## END USELEGEND
if bini in [2,3]:
setXlabel= True
else:
setXlabel =False
ax = PlotTweak.setXTicksLabelsAsTime(ax, ps.time.values, xLabelListShow = xLabelListShow, xLabelListMajorLine = xLabelListMajorLine, setXlabel = setXlabel)
if bini in [0,1]:
ax.set_xticklabels([])
axes[2].set_yticklabels([str(item) for item in yTicks])
for tick in axes[2].get_yticklabels():
print(tick)
tick.set_visible(True)
return axes
# REVISE
def getTimeseriesOfBinMass(ax,
simulation : Simulation,
muuttuja,
height,
cmap = "OrRd", relative = True, limiter = 1e-3):
ps = simulation.getPSDataset()
if ps is None:
return "FileNotFound"
zt = ps.zt
psSliced = ps.sel(zt = height, method = "nearest").isel(time = slice(61,1440))
try:
data = psSliced[muuttuja]
except KeyError:
return
aerosolsUsed = False
AerosolAbins = False
AerosolBbins = False
parallelAeroB = None
parallelAeroA = None
################################
if muuttuja == "P_Naba":
aerosolsUsed = True
if muuttuja == "P_Naba":
AerosolAbins = True
elif muuttuja == "P_Nabb":
AerosolBbins = True
if aerosolsUsed:
parallelCloudA = psSliced["P_Ncba"]
parallelCloudB = psSliced["P_Ncbb"]
if AerosolAbins:
parallelAeroB = psSliced["P_Nabb"]
elif AerosolBbins:
parallelAeroA = psSliced["P_Naba"]
biniTieto = muuttuja[-1].upper()
################################
dataAllBins = data
dataAllBins = dataAllBins.assign_coords(time = (dataAllBins.time / 3600))
size = numpy.shape(dataAllBins.values)[1]
colorpalette = seaborn.color_palette(cmap, 10)
skip = 0
aero = None
includeOtherAero = False
includeParallelOthercloud = False
includeParallelCloud = True
label = biniTieto + " bin |" + r"$N_0\ (\#/kg)$"
legend_elements = [matplotlib.patches.Patch(facecolor="white",label=label)]
for bini in range(size):
plottable = dataAllBins[:,bini]
vertailuData = numpy.zeros( numpy.shape(plottable.values))
if Data.isCloseToEpsilon(plottable, limiter):
skip += 1
continue
if AerosolAbins:
parallelBaeroBini = bini - 3
parallelAcloudBini = bini - 3
parallelBcloudBini = bini - 3
elif AerosolBbins:
parallelAaeroBini = bini + 3
parallelAcloudBini = bini
parallelBcloudBini = bini
if aerosolsUsed:# and (parallelbini >= 0):
if includeOtherAero:
if AerosolAbins and parallelBaeroBini > 0:
aero = parallelAeroB[:, parallelBaeroBini]
elif AerosolBbins:
aero = parallelAeroA[:, parallelAaeroBini]
vertailuData = vertailuData + aero.values
if includeParallelOthercloud:
if AerosolAbins and parallelBcloudBini>0:
parallelOtherCloud = parallelCloudB[:, parallelBcloudBini ]
elif AerosolBbins and parallelAcloudBini>0:
parallelOtherCloud = parallelCloudA[:, parallelAcloudBini ]
vertailuData = vertailuData + parallelOtherCloud.values
if includeParallelCloud:
if AerosolAbins:
parallelCloud = parallelCloudA[:, parallelAcloudBini]
elif AerosolBbins:
parallelCloud = parallelCloudB[:, parallelBcloudBini]
vertailuData = vertailuData + parallelCloud.values
denom = plottable.values + vertailuData
plottable, lahtoarvo = Data.getRelativeChange(plottable, denominator=denom, limiter = limiter)
color = Data.getColorBin(colorpalette, bini, plottable)
plottable.plot(ax=ax, color = color)
if lahtoarvo > 1000 or lahtoarvo < 0.1:
label = '{0:8d}{1:11.1E}'.format(bini + 1, Decimal(lahtoarvo))
else:
label = '{0:8d}{1:11.1f}'.format(bini + 1, lahtoarvo)
legend_elements.append(matplotlib.patches.Patch(facecolor=color,label=label))
if skip == size:
matplotlib.pyplot.close()
return None
#matplotlib.pyplot.axvline( 2, color = "k" , linestyle = "--" )
#matplotlib.pyplot.legend()
ax.legend(handles=legend_elements, loc='best', frameon = True, framealpha = 1.0 )
heightTosi = str(int(zt.sel(zt = height, method = 'nearest' ).values))
matplotlib.pyplot.title("zt =" + heightTosi + "(m)" )
# print(time.values)
ax = PlotTweak.setXTicksLabelsAsTime(ax, plottable["time"].values, startPoint=8)
#matplotlib.pyplot.ylim(0, 5)
ax.set_yscale('log')
return ax
# REVISE
def getSizeDistributionHeightTimeSpecified(ax,
simulation : Simulation,
muuttuja,
label = None,
color = "b",
height =745, timeH = 2.5):
ps = simulation.getPSDataset()
if ps is None:
return "FileNotFound"
ps = ps.sel(zt=height).sel(time=timeH*3600, method = "nearest")
try:
dataarray = ps[muuttuja]
except KeyError:
return
if label is None:
label = dataarray.longname
dataarray.plot.line(ax=ax, color = color , marker="o", label=label) #aero "#e6194B" cloud "#000075" ice "#42d4f4"
matplotlib.pyplot.legend()
ax.set_yscale("log")
ax.set_xscale("log")
return ax
### getAnimation2D NEEDS REVISION ####
def getAnimation2D(ax,
simulation : Simulation,
muuttuja = "S_Nc",
kuvakansio = "/home/aholaj/OneDrive/000_WORK/000_ARTIKKELIT/000-Manuscript-ICE/kuvat/anim",
useAverage=False, ytValue = 0, useLog = False):
nc = simulation.getNCDataset()
if nc is None:
return "FileNotFound"
try:
dataAnim = nc[muuttuja]#.sel(zt = slice(395, 850))
except KeyError:
return
dataAnim = dataAnim.assign_coords(time = (dataAnim.time / 3600))
print(" ")
print("animate", muuttuja)
if useLog:
dataAnim.values = numpy.ma.log10(dataAnim.values).filled(0)
if useAverage:
dataAnim = dataAnim.mean(dim="yt")
else:
dataAnim = dataAnim.sel(yt = ytValue, method="nearest")
dataAnim= dataAnim.sel(time = slice(2.5,49))
timeSpan = numpy.shape(dataAnim)[0]
subkuvakansio = FileSystem.createSubfolder(kuvakansio, muuttuja)
for i in range(timeSpan):
fig, ax = plot_alustus()
plottable = dataAnim.isel(time=i)
plottable.plot(x = "xt", y = "zt",ax = ax, add_colorbar = False, cmap = "Blues_r") #, levels = levels
ax.set_title("time = " +"{:5.1f} (h)".format(plottable.time.values) )
ax.set_ylabel("height (m)")
ax.set_xlabel("East-west displacement of cell centers (m)")
saveFig(subkuvakansio, muuttuja + "_{:04d}".format(i))
origDir = os.getcwd()
os.chdir(subkuvakansio)
os.system("convert -delay 50 -loop 0 *.png animation.gif")
os.chdir(origDir)
| StarcoderdataPython |
3337751 | import pandas as pd
import numpy as np
import xgboost as xgb
from collections import Counter
import random
from sklearn.metrics import accuracy_score, confusion_matrix
# from xgboost.sklearn import XGBClassifier
from sklearn.model_selection import train_test_split
# from sklearn import cross_validation, metrics
from sklearn.model_selection import RandomizedSearchCV
from sklearn import metrics
def F1_score(confusion_max):
precision = []
recall = []
F1 = []
class_num = len(confusion_max)
for i in range(class_num):
temp_row = confusion_max[i]
TP = temp_row[i]
FN_sum = sum(temp_row)
temp_column = confusion_max[:, i]
FP_sum = sum(temp_column)
pre = TP / max(FP_sum, 1)
rec = TP / max(FN_sum, 1)
f1 = (2 * pre * rec) / max((pre + rec), 1)
F1.append(f1)
precision.append(pre)
recall.append(rec)
print("F1")
print(F1)
print("precision")
print(precision)
print("recall")
print(recall)
F_score = ((1 / len(F1)) * sum(F1)) ** 2
return F_score
def decode(encode_list):
final_re = []
for i in encode_list:
if i == 0:
final_re.append(89950166)
if i == 1:
final_re.append(89950167)
if i == 2:
final_re.append(89950168)
if i == 3:
final_re.append(99999825)
if i == 4:
final_re.append(99999826)
if i == 5:
final_re.append(99999827)
if i == 6:
final_re.append(99999828)
if i == 7:
final_re.append(99999830)
return final_re
# raw_data = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\final_data\train4_final.csv",
# encoding="utf-8", low_memory=False)
raw_data = pd.read_csv(r"/data/projects/CCFDF_18/data/train4_final.csv",
encoding="utf-8", low_memory=False)
raw_data = raw_data.iloc[0:100]
# raw_data = pd.read_csv(r"/Users/peterlee/Documents/CCFDF18/final_data/class_2.csv",
# encoding="utf-8", low_memory=False)
# num_total = len(raw_data)
# random_site = random.sample(range(num_total), round(num_total*0.001))
# raw_data = raw_data.iloc[random_site]
# para_list = ['is_mix_service', 'online_time', '1_total_fee', '2_total_fee', '3_total_fee', '4_total_fee',
# 'month_traffic', 'many_over_bill', 'contract_type', 'contract_time',
# 'is_promise_low_consume', 'net_service', 'pay_times', 'pay_num', 'last_month_traffic',
# 'local_trafffic_month', 'local_caller_time', 'service1_caller_time', 'service2_caller_time', 'gender',
# 'age', 'complaint_level', 'former_complaint_num', 'former_complaint_fee', 'user_id']
para_list = ['is_mix_service', 'online_time', '1_total_fee', '2_total_fee', '3_total_fee', '4_total_fee', 'fee_distance',
'1_total_fee_norm', '2_total_fee_norm', '3_total_fee_norm', '4_total_fee_norm',
'month_traffic', 'many_over_bill', 'contract_type', 'contract_time',
'is_promise_low_consume', 'net_service', 'pay_times', 'pay_num', 'pay_mean', 'last_month_traffic',
'local_trafffic_month', 'local_caller_time', 'service1_caller_time', 'service2_caller_time', 'gender',
'age', 'complaint_level', 'former_complaint_num', 'former_complaint_fee',
'fee_mean', 'fee_std', 'fee_fluctuate', 'fee_mean_2',
'service_caller_time_fluctuate', 'service_caller_time_mean', 'online_time_norm', 'fee_mean_norm', 'fee_std_norm',
'fee_fluctuate_norm', 'fee_distance_norm', 'month_traffic_norm', 'contract_time_norm', 'pay_num_norm',
'last_month_traffic_norm', 'local_trafffic_month_norm', 'local_caller_time_norm',
'service1_caller_time_norm', 'service2_caller_time_norm', 'age_norm', 'former_complaint_num_norm',
'former_complaint_fee_norm', 'fee_mean_2_norm', 'service_caller_time_fluctuate_norm', 'service_caller_time_mean_norm',
'month_traffic_precentage', 'contract_time_precentage',
'pay_times_precentage', 'pay_num_precentage', 'last_month_traffic_precentage',
'local_trafffic_month_precentage', 'local_caller_time_precentage', 'service1_caller_time_precentage',
'service2_caller_time_precentage',
'user_id']
label = raw_data["service_type_encode"].tolist()
par_list = para_list[:len(para_list) - 1]
select_data = raw_data[par_list]
# data_submit_raw = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\final_data\test4_final.csv",
# encoding="utf-8", low_memory=False)
data_submit_raw = pd.read_csv(r"/data/projects/CCFDF_18/data/test4_final.csv",
encoding="utf-8", low_memory=False)
data_submit = data_submit_raw[par_list]
submit_label_encode = []
F1_list = []
nrow = len(label)
for ixval in range(10):
idxtest = [a for a in range(nrow) if a % 10 == ixval % 10]
idxtrain = [a for a in range(nrow) if a % 10 != ixval % 10]
label_train = [label[r] for r in idxtrain]
label_test = [label[r] for r in idxtest]
data_train = select_data.iloc[idxtrain]
data_test = select_data.iloc[idxtest]
# label_train, label_test, data_train, data_test = train_test_split(label, raw_data[par_list], test_size=0.02)
m_class = xgb.XGBClassifier(learning_rate=0.1, n_estimators=1500, max_depth=7, min_child_weight=6, gamma=0,
subsample=0.8, n_jobs=-1, reg_alpha=0.05, reg_lambda=0.05,
colsample_bytree=0.8, objective='multi:softmax', num_class=8, seed=27)
# 训练
m_class.fit(data_train, label_train)
test_8 = m_class.predict(data_test)
print(test_8)
print("Accuracy : %.2f" % accuracy_score(label_test, test_8))
confusion_mat = confusion_matrix(label_test, test_8)
print("Test confusion matrix")
print(confusion_mat)
F_sc = F1_score(confusion_mat)
F1_list.append(F_sc)
print("test F1_score")
print(F_sc)
submit_label_encode.append(m_class.predict(data_submit))
print("F1_list")
print(F1_list)
vote_combine = np.array(submit_label_encode).transpose()
vote_num = len(vote_combine)
vote_final = [Counter(vote_combine[i]).most_common(1)[0][0] for i in range(vote_num)]
decode_list = decode(vote_final)
user_id_4 = data_submit_raw["user_id"]
# submit_result = pd.read_csv(r"E:\CCFDF\plansmatching\data\raw data\final_data\result_test\lgb_baseline.csv",
# encoding="utf-8", low_memory=False)
submit_result = pd.read_csv(r"/data/projects/CCFDF_18/data/lgb_baseline.csv", encoding="utf-8", low_memory=False)
origin_id = submit_result["user_id"].tolist()
origin_result = submit_result["current_service"].tolist()
num_4 = len(user_id_4)
for i in range(num_4):
origin_result[origin_id.index(user_id_4[i])] = decode_list[i]
final_da = pd.DataFrame({"user_id": origin_id, "current_service": origin_result})
# final_da.to_csv(r"E:\CCFDF\plansmatching\data\raw data\final_data\result_test\XGBoost_vote.csv", index=False)
# final_da.to_csv(r"/data/projects/CCFDF_18/data/XGB_vote.csv", index=False) | StarcoderdataPython |
170109 | <gh_stars>1-10
from comet_ml import OfflineExperiment # needed at top for Comet plugin
from collections import defaultdict, OrderedDict
import torch
import torch.nn as nn
import tqdm
import time
from sklearn.metrics import f1_score, precision_score, recall_score
import torch.nn.functional as F
from utils import *
import configparser
LABEL_MAPPING = {0: 'hateful', 1: 'abusive', 2: 'normal', 3: 'spam'}
DEBUG = False
config = configparser.ConfigParser()
config.read(os.path.join(ROOT_DIR, 'config.ini'))
class ExperimentBuilder(nn.Module):
def __init__(self, network_model, device, hyper_params, data_map,
train_data, valid_data, test_data, experiment_flag,
data_provider, experiment):
"""
Initializes an ExperimentBuilder object. Such an object takes care of running training and evaluation of a deep net
on a given dataset. It also takes care of saving per epoch models and automatically inferring the best val model
to be used for evaluating the test set metrics.
"""
super(ExperimentBuilder, self).__init__()
self.experiment = experiment # comet experiment
self.experiment_flag = experiment_flag
self.experiment_name = hyper_params['experiment_name']
self.model = network_model
self.model.reset_parameters()
self.device = device
self.seed = hyper_params['seed']
self.num_epochs = hyper_params['num_epochs']
self.starting_epoch = 0
self.state = dict()
self.data_provider = data_provider
# re-initialize network parameters
self.data_map = data_map
self.train_data_raw = train_data
self.valid_data_raw = valid_data
self.test_data_raw = test_data
self.train_data = []
self.valid_data = []
self.test_data = []
self.train_data_tweets = None
self.valid_data_tweets = None
self.test_data_tweets = None
self.confusion_matrix = torch.zeros(4, 4) # number of classes
# build extra layer of model
self.preprocess_data()
self.criterion = nn.CrossEntropyLoss().to(self.device) # send the loss computation to the GPU
self.optimizer = torch.optim.Adam(self.model.parameters(), weight_decay=1e-4, lr=hyper_params["learning_rate"])
# self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=self.num_epochs, eta_min=1e-4)
self.scheduler = None
# Generate the directory names
self.experiment_folder = hyper_params['results_dir']
self.experiment_saved_models = os.path.abspath(os.path.join(self.experiment_folder, "saved_models"))
# Set best models to be at 0 since we are just starting
self.best_val_model_idx = 0
self.best_val_model_criteria = 0.
if not os.path.exists(self.experiment_folder): # If experiment directory does not exist
os.makedirs(self.experiment_folder) # create the experiment directory
if not os.path.exists(self.experiment_saved_models):
os.makedirs(self.experiment_saved_models) # create the experiment saved models directory
def get_num_parameters(self):
total_num_params = 0
for param in self.parameters():
total_num_params += np.prod(param.shape)
return total_num_params
def forward_pass_helper(self, x):
tweet_input, feature_input = x[0], x[1]
if self.experiment_flag == 2 or self.experiment_flag == 4:
# tweet level processing
feature_input = feature_input.to(self.device)
tweet_input = tweet_input.to(self.device)
feature_out = self.model.forward(feature_input, layer_key='feature', flatten_flag=True)
tweet_out = self.model.forward(tweet_input, layer_key='tweet', flatten_flag=True)
out = torch.cat((tweet_out.cpu(), feature_out.cpu()), 1).to(self.device)
return self.model.layer_dict['fc_layer'](out)
elif self.experiment_flag == 5:
# tweet level processing
tweet_input = tweet_input.to(self.device)
tweet_out = self.model.forward(tweet_input, layer_key='tweet', flatten_flag=True)
feature_out = np.sum([self.model.forward(item.to(self.device), layer_key='user-timeline').cpu() for item in feature_input])
out = torch.cat((tweet_out.cpu(), feature_out.cpu()), 1).to(self.device)
return self.model.layer_dict['fc_layer'](out)
else: #experiments 1 & 3
tweet_input = tweet_input.to(self.device)
out = self.model.forward(tweet_input, flatten_flag=True, layer_key='tweet') # forward the data in the model
return self.model.layer_dict['fc_layer'](out)
def run_train_iter(self, x, y, stats, experiment_key='train'):
"""
Receives the inputs and targets for the model and runs a training iteration. Returns loss and accuracy metrics.
:param x: The inputs to the model. A numpy array of shape batch_size, channels, height, width
:param y: The targets for the model. A numpy array of shape batch_size, num_classes
:return: the loss and accuracy for this batch
"""
# sets model to training mode
# (in case batch normalization or other methods have different procedures for training and evaluation)
self.train()
self.optimizer.zero_grad() # set all weight grads from previous training iters to 0
y = y.to(self.device)
out = self.forward_pass_helper(x) # forward the data in the model
loss = self.criterion(out, y)
loss.backward() # backpropagate to compute gradients for current iter loss
self.optimizer.step() # update network parameters
_, predicted = torch.max(out.data, 1) # get argmax of predictions
accuracy = np.mean(list(predicted.eq(y.data).cpu())) # compute accuracy
stats['{}_acc'.format(experiment_key)].append(accuracy)
stats['{}_loss'.format(experiment_key)].append(loss.data.detach().cpu().numpy())
self.compute_f_metrics(stats, y, predicted, experiment_key)
def run_evaluation_iter(self, x, y, stats, experiment_key='valid'):
"""
Receives the inputs and targets for the model and runs an evaluation iterations. Returns loss and accuracy metrics.
:param x: The inputs to the model. A numpy array of shape batch_size, channels, height, width
:param y: The targets for the model. A numpy array of shape batch_size, num_classes
:return: the loss and accuracy for this batch
"""
self.eval() # sets the system to validation mode
y = y.to(self.device)
out = self.forward_pass_helper(x)
loss = self.criterion(out, y)
_, predicted = torch.max(out.data, 1) # get argmax of predictions
accuracy = np.mean(list(predicted.eq(y.data).cpu()))
if experiment_key == 'test':
for t, p in zip(y.data.view(-1), predicted.cpu().view(-1)):
self.confusion_matrix[t.long(), p.long()] += 1
stats['{}_acc'.format(experiment_key)].append(accuracy) # compute accuracy
stats['{}_loss'.format(experiment_key)].append(loss.data.detach().cpu().numpy())
self.compute_f_metrics(stats, y, predicted, experiment_key)
return predicted
def save_model(self, model_save_dir, model_save_name, model_idx):
"""
Save the network parameter state and current best val epoch idx and best val accuracy.
:param model_save_name: Name to use to save model without the epoch index
:param model_idx: The index to save the model with.
:param best_validation_model_idx: The index of the best validation model to be stored for future use.
:param best_validation_model_acc: The best validation accuracy to be stored for use at test time.
:param model_save_dir: The directory to store the state at.
:param state: The dictionary containing the system state.
"""
# Save state each epoch
path = os.path.join(model_save_dir, "{}_{}.pt".format(model_save_name, str(model_idx)))
torch.save(self.state_dict(), f=path)
def load_model(self, model_save_dir, model_save_name, model_idx):
"""
Load the network parameter state and the best val model idx and best val acc to be compared with the future val accuracies, in order to choose the best val model
:param model_save_dir: The directory to store the state at.
:param model_save_name: Name to use to save model without the epoch index
:param model_idx: The index to save the model with.
"""
path = os.path.join(model_save_dir, "{}_{}.pt".format(model_save_name, str(model_idx)))
self.load_state_dict(torch.load(f=path))
def remove_excess_models(self):
dir_list_list = [dir_names for (_, dir_names, _) in os.walk(self.experiment_folder)]
for dir_list in dir_list_list:
if 'saved_models' in dir_list:
path = os.path.abspath(os.path.join(self.experiment_folder, 'saved_models'))
file_list_list = [file_names for (_, _, file_names) in os.walk(path)]
for file_list in file_list_list:
for file in file_list:
epoch = file.split('_')[-1]
epoch = epoch.replace('.pt', '')
if int(epoch) != self.best_val_model_idx:
os.remove(os.path.join(path, file))
@staticmethod
def compute_f_metrics(stats, y_true, predicted, type_key):
f1score_overall = f1_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average='weighted'
)
stats[type_key + '_f_score'].append(f1score_overall)
precision_overall = precision_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average='weighted'
)
stats[type_key + '_precision'].append(precision_overall)
recall_overall = recall_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average='weighted'
)
stats[type_key + '_recall'].append(recall_overall)
f1scores = f1_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average=None
)
precision = precision_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average=None
)
recall = recall_score(
y_true.cpu().detach().numpy(),
predicted.cpu().detach().numpy(),
average=None
)
for i in range(len(f1scores)):
stats[type_key + '_f_score_' + LABEL_MAPPING[i]].append(f1scores[i])
stats[type_key + '_precision_' + LABEL_MAPPING[i]].append(precision[i])
stats[type_key + '_recall_' + LABEL_MAPPING[i]].append(recall[i])
def save_best_performing_model(self, epoch_stats, epoch_idx):
criteria = epoch_stats['valid_f_score_hateful']
if criteria > self.best_val_model_criteria: # if current epoch's mean val acc is greater than the saved best val acc then
self.best_val_model_criteria = criteria # set the best val model acc to be current epoch's val accuracy
self.best_val_model_idx = epoch_idx # set the experiment-wise best val idx to be the current epoch's idx
@staticmethod
def iter_logs(stats, start_time, index):
# Log results to terminal
out_string = "".join(["{}: {:0.4f}\n".format(key, value)
for key, value in stats.items() if key != 'epoch'])
epoch_elapsed_time = (time.time() - start_time) / 60 # calculate time taken for epoch
epoch_elapsed_time = "{:.4f}".format(epoch_elapsed_time)
print("\n===Epoch {}===\n{}===Elapsed time: {} mins===".format(index, out_string, epoch_elapsed_time))
def extract_sample_data(self, sample_ids):
embedded_tweets = []
embedded_context_tweets = []
embedded_topic_words = []
embedded_timeline_list = []
for _id in sample_ids:
embedded_tweet = self.data_map[_id]['embedded_tweet']
if self.experiment_flag == 2:
# concatenates retweet/favorite to tweet
retweet_count, favorite_count = self.data_map[_id]['retweet_count'], self.data_map[_id]['favorite_count']
features = torch.Tensor([[retweet_count, favorite_count] for _ in range(np.array(embedded_tweet).shape[0])])
embedded_tweet = np.concatenate((embedded_tweet, features), -1)
# adds context tweet
embedded_context_tweets.append(self.data_map[_id]['embedded_context_tweet'])
if self.experiment_flag == 3:
# concatenates retweet/favorite to tweet
retweet_count, favorite_count = self.data_map[_id]['retweet_count'], self.data_map[_id]['favorite_count']
features = torch.Tensor([[retweet_count, favorite_count] for _ in range(np.array(embedded_tweet).shape[0])])
embedded_tweet = np.concatenate((embedded_tweet, features), -1)
if self.experiment_flag == 4:
embedded_topic_words.append(self.data_map[_id]['embedded_topic_words'])
if self.experiment_flag == 5:
embedded_timeline_list = [torch.Tensor(tweet).float()
for tweet in self.data_map[_id]['embedded_user_timeline']]
# append main tweet
embedded_tweets.append(embedded_tweet)
# get all tweets as corpus and do LDA from that
if self.experiment_flag == 4:
return torch.Tensor(embedded_tweets).float(), torch.Tensor(embedded_topic_words).float()
if self.experiment_flag == 2:
return torch.Tensor(embedded_tweets).float(), torch.Tensor(embedded_context_tweets).float()
elif self.experiment_flag == 1 or self.experiment_flag == 3: # experiments 1 and 3
return torch.Tensor(embedded_tweets).float(), torch.Tensor(embedded_tweets).float()
elif self.experiment_flag == 5:
return torch.Tensor(embedded_tweets).float(), embedded_timeline_list
@staticmethod
def flatten_embedding(out):
"""
Flattens tweet embedding to have dim (batch_size, 1, embed_dim)
:param out:
:return:
"""
out = F.max_pool1d(out, out.shape[-1])
out = out.permute([0, 2, 1])
return out
def build_model(self, data_sample):
# build model
embedded_tweet, features_tweet = data_sample # first element, tuple, first value in tuple
self.model.build_layers(embedded_tweet.shape, 'tweet')
embedded_tweet_out = self.model.forward(torch.zeros(embedded_tweet.shape), layer_key='tweet')
if self.experiment_flag == 1 or self.experiment_flag == 3:
out = embedded_tweet_out
elif self.experiment_flag == 2 or self.experiment_flag == 4:
self.model.build_layers(features_tweet.shape, 'feature')
features_tweet_out = self.model.forward(torch.zeros(features_tweet.shape), layer_key='feature')
out = torch.cat((embedded_tweet_out, features_tweet_out), 1)
elif self.experiment_flag == 5:
self.model.build_layers(embedded_tweet.shape, 'user-timeline')
user_timeline_tweet_out = self.model.forward(torch.zeros(embedded_tweet.shape), layer_key='user-timeline')
out = torch.cat((embedded_tweet_out, user_timeline_tweet_out), 1)
self.model.build_fc_layer(out.shape)
# send model to device
if torch.cuda.device_count() > 1:
self.model = nn.DataParallel(self.model).cuda()
self.model.to(self.device)
self.model = self.model.module
else:
self.model.to(self.device) # sends the model from the cpu to the gpu
def extract_tweets(self, ids):
return [(self.data_map[_id]['tweet'],self.data_map[_id]['label']) for _id in ids]
def preprocess_data(self):
print("Preprocessing train data")
start = time.time()
self.train_data = [(self.extract_sample_data(x), y) for x, y in self.train_data_raw]
self.train_data_tweets = [self.extract_tweets(x) for x, y in self.train_data_raw]
self.build_model(self.train_data[0][0])
print("Preprocessing train data finished in {:.2f} minutes".format((time.time() - start) / 60))
print("Preprocessing valid data")
self.valid_data = [(self.extract_sample_data(x), y) for x, y in self.valid_data_raw]
self.valid_data_tweets = [self.extract_tweets(x) for x, y in self.valid_data_raw]
print("Preprocessing test data")
self.test_data = [(self.extract_sample_data(x), y) for x, y in self.test_data_raw]
self.test_data_tweets = [self.extract_tweets(x) for x, y in self.test_data_raw]
def run_experiment(self):
"""
Runs experiment train and evaluation iterations, saving the model and best val model and val model accuracy after each epoch
:return: The summary current_epoch_losses from starting epoch to total_epochs.
"""
train_stats = OrderedDict()
for epoch_idx in range(self.num_epochs):
epoch_start_time = time.time()
epoch_stats = defaultdict(list)
with tqdm.tqdm(total=len(self.train_data)) as pbar_train: # create a progress bar for training
for x, y in self.train_data: # get data batches
self.run_train_iter(x=x, y=y, stats=epoch_stats) # take a training iter step
pbar_train.update(1)
pbar_train.set_description(
"{} Epoch {}: f-score-hateful: {:.4f}, accuracy: {:.4f}".format('Train', epoch_idx,
np.mean(epoch_stats[
'train_f_score_hateful']),
np.mean(
epoch_stats['train_acc'])))
with tqdm.tqdm(total=len(self.valid_data)) as pbar_val: # create a progress bar for validation
for x, y in self.valid_data: # get data batches
self.run_evaluation_iter(x=x, y=y, stats=epoch_stats) # run a validation iter
pbar_val.update(1) # add 1 step to the progress bar
pbar_val.set_description(
"{} Epoch {}: f-score-hateful: {:.4f}, accuracy: {:.4f}".format('Valid', epoch_idx,
np.mean(epoch_stats[
'valid_f_score_hateful']),
np.mean(
epoch_stats['valid_acc'])))
# learning rate
if self.scheduler is not None:
self.scheduler.step()
epoch_stats['learning_rate'] = self.optimizer.param_groups[0]['lr']
# save to train stats
for key, value in epoch_stats.items():
epoch_stats[key] = np.mean(value)
if not DEBUG:
self.experiment.log_metric(name=key, value=epoch_stats[key], step=epoch_idx)
epoch_stats['epoch'] = epoch_idx
train_stats["epoch_{}".format(epoch_idx)] = epoch_stats
if DEBUG:
self.iter_logs(epoch_stats, epoch_start_time, epoch_idx)
self.save_model(model_save_dir=self.experiment_saved_models,
model_save_name="train_model",
model_idx=epoch_idx)
self.save_best_performing_model(epoch_stats=epoch_stats, epoch_idx=epoch_idx)
### EXPERIMENTS END ###
# save train statistics
prepare_output_file(filename="{}/{}".format(self.experiment_folder, "train_statistics_{}.csv".format(self.seed)),
output=list(train_stats.values()))
print("Generating test set evaluation metrics with best model index {}".format(self.best_val_model_idx))
self.load_model(model_save_dir=self.experiment_saved_models,
model_idx=self.best_val_model_idx,
model_save_name="train_model")
test_stats = defaultdict(list)
with tqdm.tqdm(total=len(self.test_data)) as pbar_test: # ini a progress bar
for i, (x, y) in enumerate(self.test_data): # sample batch
preds = self.run_evaluation_iter(x=x, y=y, stats=test_stats, experiment_key='test')
pbar_test.update(1) # update progress bar status
pbar_test.set_description("loss: {:.4f}, accuracy: {:.4f}".format(np.mean(test_stats['test_loss']),
np.mean(test_stats['test_acc'])))
for j, pred in enumerate(preds):
print("Pred: {} Label: {}\n{}\n".format(pred,
self.test_data_tweets[i][j][1],
self.test_data_tweets[i][j][0]))
print(self.confusion_matrix)
# save to test stats
for key, value in test_stats.items():
test_stats[key] = np.mean(value)
if not DEBUG:
self.experiment.log_metric(name=key, value=test_stats[key])
merge_dict = dict(list(test_stats.items()) +
list(train_stats["epoch_{}".format(self.best_val_model_idx)].items()))
merge_dict['epoch'] = self.best_val_model_idx
merge_dict['seed'] = self.seed
merge_dict['title'] = self.experiment_name
merge_dict['num_epochs'] = self.num_epochs
for key, value in merge_dict.items():
if isinstance(value, float):
merge_dict[key] = np.around(value, 4)
print(merge_dict)
prepare_output_file(filename="{}/{}".format(self.experiment_folder, "results.csv"),
output=[merge_dict])
self.remove_excess_models()
return train_stats, test_stats
| StarcoderdataPython |
87576 | <reponame>rkulyn/telegram-pig-latin-bot
from functools import lru_cache
from .constants import VOWELS
from .decorators import check_if_word_capitalized
from .rules import vowel_rule, h_rule, consonant_rule
@lru_cache(maxsize=50)
@check_if_word_capitalized
def translate(word):
"""
Translation strategy selection function.
Args:
word (str): Word to translate.
Returns:
(str): Translated word.
"""
if not word:
return ""
first_symbol = word[0].lower()
if first_symbol in VOWELS:
return vowel_rule(word)
# Special rules for words
# started with "h"
if first_symbol == "h":
return h_rule(word)
return consonant_rule(word)
| StarcoderdataPython |
1639822 | <reponame>sermonis/three-globe-flight-line
import os
command = "python -m SimpleHTTPServer 8000"
os.system(command)
| StarcoderdataPython |
1610325 | from pathlib import Path
import moonleap.resource.props as P
from moonleap import extend, rule
from moonleap.verbs import has
from titan.react_pkg.reactapp import ReactApp
from .props import get_context
@rule("react-app", has, "routes:module")
def react_app_has_routes_module(react_app, routes_module):
routes_module.add_template_dir(Path(__file__).parent / "templates", get_context)
@extend(ReactApp)
class ExtendReactApp:
routes_module = P.child(has, "routes:module")
| StarcoderdataPython |
1711723 | <gh_stars>0
from collections import defaultdict, OrderedDict, namedtuple
from decimal import Decimal
from operator import itemgetter
from billy import db
KEYS = 'versions actions documents votes sponsors'.split()
class SaneReprList(list):
def __repr__(self):
return '<SaneReprList: %d elements>' % len(self)
class Summarizer(object):
def __init__(self, spec={}):
self.spec = spec
def build(self, keys=KEYS):
listdict = lambda: defaultdict(SaneReprList)
counts = defaultdict(listdict)
keys = 'versions actions documents votes sponsors'.split()
for bill in db.bills.find(self.spec):
for k in keys:
counts[k][len(bill[k])].append(bill['_id'])
self.counts = dict(counts)
return dict(counts)
def count(self):
return db.bills.find(self.spec).count()
def max_ids(self):
'''Yield the key, maximum value length, and the id of the
bill in which the max was found for each key in KEYS. In
other words, if TAB0000001 has the most actions (345), then
one tuple yielded from this generator would be:
('actions', 345, 'TAB0000001')
'''
for k, v in self.counts.items():
max_ = max(v)
id_ = v[max_]
yield k, max_, id_
def mean(self, key):
counts = self.counts[key]
sum_ = sum(k * len(v) for (k, v) in counts.items())
return sum_ / self.count()
def median(self, key):
counts = self.counts[key]
if 1 < len(counts):
counts = self.counts[key]
div, mod = divmod(len(counts), 2)
return div
else:
return list(counts).pop()
def mode(self, key):
counts = self.counts[key]
if 1 < len(counts):
return (max(counts) + min(counts)) / 2
else:
return list(counts).pop()
def percentages(self, key):
'''Returns an OrderedDict where the keys are the numbers of
actions/votes found and the values are the percentages of how
many bills had that number of actions out of the total number
of bills.
'''
counts = self.counts[key]
sum_ = Decimal(self.count())
items = ((k, (len(v) / sum_) * 100) for (k, v) in counts.items())
sorter = itemgetter(slice(None, None, -1))
items = sorted(items, key=sorter, reverse=True)
return OrderedDict(items)
def report(self):
Stats = namedtuple('Stats', 'mean median mode percentages')
methods = [self.mean, self.median, self.mode, self.percentages]
return dict((key, Stats(*[meth(key) for meth in methods])) for key in KEYS)
def print_report(self):
tab = ' '
for k, v in self.report().items():
print
print repr(k)
for key in ('mean', 'median', 'mode'):
print tab, key, '->', getattr(v, key)
print
print tab, 'Percentage breakdown'
for value, percentage in v.percentages.items():
print tab * 2, value, "{0:.2f}".format(percentage)
if __name__ == '__main__':
# import pprint
# pprint.pprint(get_counts())
x = Summarizer()
x.build()
x.print_report()
| StarcoderdataPython |
1745611 | import numpy as np
import pandas as pd
import pytest
from featuretools.primitives import CityblockDistance, GeoMidpoint, IsInGeoBox
def test_cityblock():
primitive_instance = CityblockDistance()
latlong_1 = pd.Series([(i, i) for i in range(3)])
latlong_2 = pd.Series([(i, i) for i in range(3, 6)])
primitive_func = primitive_instance.get_function()
answer = pd.Series([414.56051391, 414.52893691, 414.43421555])
given_answer = primitive_func(latlong_1, latlong_2)
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
primitive_instance = CityblockDistance(unit='kilometers')
primitive_func = primitive_instance.get_function()
answer = primitive_func(latlong_1, latlong_2)
given_answer = pd.Series([667.1704814, 667.11966315, 666.96722389])
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
def test_cityblock_nans():
primitive_instance = CityblockDistance()
lats_longs_1 = [(i, i) for i in range(2)]
lats_longs_2 = [(i, i) for i in range(2, 4)]
lats_longs_1 += [(1, 1), (np.nan, 3), (4, np.nan), (np.nan, np.nan)]
lats_longs_2 += [(np.nan, np.nan), (np.nan, 5), (6, np.nan), (np.nan,
np.nan)]
primitive_func = primitive_instance.get_function()
given_answer = pd.Series(list([276.37367594, 276.35262728] +
[np.nan] * 4))
answer = primitive_func(lats_longs_1, lats_longs_2)
np.testing.assert_allclose(given_answer, answer, rtol=1e-09)
def test_cityblock_error():
error_text = 'Invalid unit given'
with pytest.raises(ValueError, match=error_text):
CityblockDistance(unit='invalid')
def test_midpoint():
latlong1 = pd.Series([(-90, -180), (90, 180)])
latlong2 = pd.Series([(+90, +180), (-90, -180)])
function = GeoMidpoint().get_function()
answer = function(latlong1, latlong2)
for lat, longi in answer:
assert lat == 0.0
assert longi == 0.0
def test_midpoint_floating():
latlong1 = pd.Series([(-45.5, -100.5), (45.5, 100.5)])
latlong2 = pd.Series([(+45.5, +100.5), (-45.5, -100.5)])
function = GeoMidpoint().get_function()
answer = function(latlong1, latlong2)
for lat, longi in answer:
assert lat == 0.0
assert longi == 0.0
def test_midpoint_zeros():
latlong1 = pd.Series([(0, 0), (0, 0)])
latlong2 = pd.Series([(0, 0), (0, 0)])
function = GeoMidpoint().get_function()
answer = function(latlong1, latlong2)
for lat, longi in answer:
assert lat == 0.0
assert longi == 0.0
def test_midpoint_nan():
all_nan = pd.Series([(np.nan, np.nan), (np.nan, np.nan)])
latlong1 = pd.Series([(0, 0), (0, 0)])
function = GeoMidpoint().get_function()
answer = function(all_nan, latlong1)
for lat, longi in answer:
assert np.isnan(lat)
assert np.isnan(longi)
def test_isingeobox():
latlong = pd.Series([(1, 2), (5, 7), (-5, 4), (2, 3), (0, 0),
(np.nan, np.nan), (-2, np.nan), (np.nan, 1)])
bottomleft = (-5, -5)
topright = (5, 5)
primitive = IsInGeoBox(bottomleft, topright)
function = primitive.get_function()
primitive_answer = function(latlong)
answer = pd.Series([True, False, True, True, True, False, False, False])
assert np.array_equal(primitive_answer, answer)
def test_boston():
NYC = (40.7128, -74.0060)
SF = (37.7749, -122.4194)
Somerville = (42.3876, -71.0995)
Bejing = (39.9042, 116.4074)
CapeTown = (-33.9249, 18.4241)
latlong = pd.Series([NYC, SF, Somerville, Bejing, CapeTown])
LynnMA = (42.4668, -70.9495)
DedhamMA = (42.2436, -71.1677)
primitive = IsInGeoBox(LynnMA, DedhamMA)
function = primitive.get_function()
primitive_answer = function(latlong)
answer = pd.Series([False, False, True, False, False])
assert np.array_equal(primitive_answer, answer)
| StarcoderdataPython |
164289 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
##
## mds.py
##
## Created on: Dec 3, 2017
## Author: <NAME>
## E-mail: <EMAIL>
##
# print function as in Python3
#==============================================================================
from __future__ import print_function
from minds.check import ConsistencyChecker
from minds.data import Data
from minds.minds1 import MinDS1Rules
from minds.mp92 import MP92Rules
from minds.options import Options
from minds.mxsatl import MaxSATLits
from minds.mxsatls import MaxSATLitsSep
from minds.mxsatsp import MaxSATSparse
from minds.satr import SATRules
from minds.satl import SATLits
from minds.satls import SATLitsSep
from minds.twostage import TwoStageApproach
import os
import resource
import six
import sys
#
#==============================================================================
def show_info():
"""
Print info message.
"""
print('c MDS: miner of (optimal) decision sets')
print('c author: <NAME> [email:<EMAIL>]')
print('')
#
#==============================================================================
def do_two_stage(data, options):
"""
Run the prime-based approach.
"""
ruler = TwoStageApproach(data, options)
covers = ruler.compute()
# save result to a CSV file
if options.rdump:
data.dump_result(primes, covers)
#
#==============================================================================
def do_single_stage(data, options):
"""
Run a single-stage SAT-based approach.
"""
if options.approach == 'mp92':
solver = MP92Rules(data, options) # MP92 approach
elif options.approach in ('satr', 'sr'):
solver = SATRules(data, options) # MinDS2 and MinDS2*
elif options.approach in ('satl', 'sl'):
solver = SATLits(data, options) # Opt
elif options.approach in ('satls', 'sls'):
solver = SATLitsSep(data, options) # OptSep
elif options.approach in ('mxsatls', 'mls'):
solver = MaxSATLitsSep(data, options) # MOptSep
elif options.approach in ('mxsatsparse', 'msparse', 'sparse'):
solver = MaxSATSparse(data, options) # Sparse
elif options.approach in ('mxsatl', 'ml'):
solver = MaxSATLits(data, options) # MOpt
else:
solver = MinDS1Rules(data, options) # MinDS1
covers = solver.compute()
# dealing with default rules (if any)
if options.verb:
wghts, label = [], None
if options.default:
# if the 'default' option is given
for lb in six.iterkeys(covers):
wghts.append(tuple([lb, sum(data.wghts[i] for i in solver.samps[lb])]))
label = max(wghts, key=lambda p: p[1])[0]
else:
# for the sparse model only:
# checking if there are default rules
# and selecting the best among them
for lb, premise in six.iteritems(covers):
if len(premise) == 1 and len(premise[0]) == 0:
wghts.append(tuple([lb, sum(data.wghts[i] for i in solver.samps[lb])]))
if wghts:
label = max(filter(lambda p: len(covers[p[0]]) != 1 or covers[p[0]] != [], wghts), key=lambda p: p[1])[0]
if label is not None:
print('c1 cover: true => {0}'.format(': '.join(data.fvmap.opp[label])))
print('c2 cover size: {0}'.format(sum([len(p) for p in six.itervalues(covers)])))
print('c2 cover wght: {0}'.format(solver.cost))
if hasattr(solver, 'accy'):
print('c2 accy filtr: {0:.2f}%'.format(solver.accy))
if hasattr(solver, 'accy_tot'):
print('c2 accy total: {0:.2f}%'.format(solver.accy_tot))
print('c2 cover time: {0:.4f}'.format(solver.time))
#
#==============================================================================
if __name__ == '__main__':
# parsing command-line options
options = Options(sys.argv)
# showing head
show_info()
# parsing data
if options.files:
data = Data(filename=options.files[0], mapfile=options.mapfile,
separator=options.separator, ranges=options.ranges)
else:
data = Data(fpointer=sys.stdin, mapfile=options.mapfile,
separator=options.separator)
if options.verb:
print('c0 # of samps: {0} ({1} weighted)'.format(sum(data.wghts), len(data.samps)))
print('c0 # of feats: {0} ({1} binary)'.format(len(data.names) - 1, len(list(filter(lambda x: x > 0, data.fvmap.opp.keys()))) - len(data.feats[-1])))
print('c0 # of labls: {0}'.format(len(data.feats[-1])))
used_time = resource.getrusage(resource.RUSAGE_SELF).ru_utime
print('c0 parse time: {0:.4f}'.format(used_time))
print('')
if options.noccheck == False:
# phase0: consistency check
checker = ConsistencyChecker(data, options)
if checker.status and checker.do() == False:
checker.remove_inconsistent()
if options.verb:
print('c0 data set is inconsistent')
print('c0 filtering out {0} samples ({1} left)'.format(data.samps_filt, len(data.samps)))
print('c0 filtering out {0} weights ({1} left)'.format(data.wghts_filt, sum(data.wghts)))
print('c0 check time: {0:.4f}'.format(checker.time))
print('')
if options.cdump:
checker.dump_consistent()
if checker.status == False:
print('c0 not enough classes => classification makes no sense')
sys.exit(1)
if options.approach in ('2', '2stage', 'pbased'):
do_two_stage(data, options)
else: # single-phase models (sat, mp92, minds1, opt, etc)
do_single_stage(data, options)
if options.verb:
total_time = resource.getrusage(resource.RUSAGE_CHILDREN).ru_utime + resource.getrusage(resource.RUSAGE_SELF).ru_utime
print('c3 total time: {0:.4f}'.format(total_time))
| StarcoderdataPython |
1792031 | from ifem import test_solve_system
test_solve_system()
from applications import test_uniform_bar
test_uniform_bar()
| StarcoderdataPython |
195113 | """A basic example of using the SQLAlchemy Sharding API.
Sharding refers to horizontally scaling data across multiple
databases.
The basic components of a "sharded" mapping are:
* multiple databases, each assigned a 'shard id'
* a function which can return a single shard id, given an instance
to be saved; this is called "shard_chooser"
* a function which can return a list of shard ids which apply to a particular
instance identifier; this is called "id_chooser". If it returns all shard ids,
all shards will be searched.
* a function which can return a list of shard ids to try, given a particular
Query ("query_chooser"). If it returns all shard ids, all shards will be
queried and the results joined together.
In this example, four sqlite databases will store information about weather
data on a database-per-continent basis. We provide example shard_chooser,
id_chooser and query_chooser functions. The query_chooser illustrates
inspection of the SQL expression element in order to attempt to determine a
single shard being requested.
The construction of generic sharding routines is an ambitious approach
to the issue of organizing instances among multiple databases. For a
more plain-spoken alternative, the "distinct entity" approach
is a simple method of assigning objects to different tables (and potentially
database nodes) in an explicit way - described on the wiki at
`EntityName <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/EntityName>`_.
"""
| StarcoderdataPython |
1783390 | from django.urls import path, include
from rest_framework import routers
from .viewsets import NewsReadOnlyModelViewSet, NewsRetrieveModelViewSet
router = routers.SimpleRouter()
router.register('news', NewsReadOnlyModelViewSet)
router.register('retrieve', NewsRetrieveModelViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| StarcoderdataPython |
3359863 | import tensorflow as tf
import numpy as np
############################################################################################################
# Convolution layer Methods
def __conv2d_p(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0):
"""
Convolution 2D Wrapper
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param w: (tf.tensor) pretrained weights (if None, it means no pretrained weights)
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias. (if not float, it means pretrained bias)
:return out: The output of the layer. (N, H', W', num_filters)
"""
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], num_filters]
with tf.name_scope('layer_weights'):
if w == None:
w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength)
__variable_summaries(w)
with tf.name_scope('layer_biases'):
if isinstance(bias, float):
bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias))
__variable_summaries(bias)
with tf.name_scope('layer_conv2d'):
conv = tf.nn.conv2d(x, w, stride, padding)
out = tf.nn.bias_add(conv, bias)
return out
def conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = __conv2d_p(scope, x=x, w=w, num_filters=num_filters, kernel_size=kernel_size, stride=stride,
padding=padding,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
def dropout_with_keep():
return tf.nn.dropout(conv_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(conv_a, 1.0)
if dropout_keep_prob != -1:
conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(conv_o_dr)
return conv_o
def __depthwise_conv2d_p(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0):
with tf.variable_scope(name):
stride = [1, stride[0], stride[1], 1]
kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], 1]
with tf.name_scope('layer_weights'):
if w is None:
w = __variable_with_weight_decay(kernel_shape, initializer, l2_strength)
__variable_summaries(w)
with tf.name_scope('layer_biases'):
if isinstance(bias, float):
bias = tf.get_variable('biases', [x.shape[-1]], initializer=tf.constant_initializer(bias))
__variable_summaries(bias)
with tf.name_scope('layer_conv2d'):
conv = tf.nn.depthwise_conv2d(x, w, stride, padding)
out = tf.nn.bias_add(conv, bias)
return out
def depthwise_conv2d(name, x, w=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0, activation=None,
batchnorm_enabled=False, is_training=True):
"""Implementation of depthwise 2D convolution wrapper"""
with tf.variable_scope(name) as scope:
conv_o_b = __depthwise_conv2d_p(name=scope, x=x, w=w, kernel_size=kernel_size, padding=padding,
stride=stride, initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
return conv_a
def depthwise_separable_conv2d(name, x, w_depthwise=None, w_pointwise=None, width_multiplier=1.0, num_filters=16,
kernel_size=(3, 3),
padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, biases=(0.0, 0.0),
activation=None, batchnorm_enabled=True,
is_training=True):
"""Implementation of depthwise separable 2D convolution operator as in MobileNet paper"""
total_num_filters = int(round(num_filters * width_multiplier))
with tf.variable_scope(name) as scope:
conv_a = depthwise_conv2d('depthwise', x=x, w=w_depthwise, kernel_size=kernel_size, padding=padding,
stride=stride,
initializer=initializer, l2_strength=l2_strength, bias=biases[0],
activation=activation,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
conv_o = conv2d('pointwise', x=conv_a, w=w_pointwise, num_filters=total_num_filters, kernel_size=(1, 1),
initializer=initializer, l2_strength=l2_strength, bias=biases[1], activation=activation,
batchnorm_enabled=batchnorm_enabled, is_training=is_training)
return conv_a, conv_o
############################################################################################################
# Fully Connected layer Methods
def __dense_p(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0):
"""
Fully connected layer
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias. (if not float, it means pretrained bias)
:return out: The output of the layer. (N, H)
"""
n_in = x.get_shape()[-1].value
with tf.variable_scope(name):
if w == None:
w = __variable_with_weight_decay([n_in, output_dim], initializer, l2_strength)
__variable_summaries(w)
if isinstance(bias, float):
bias = tf.get_variable("layer_biases", [output_dim], tf.float32, tf.constant_initializer(bias))
__variable_summaries(bias)
output = tf.nn.bias_add(tf.matmul(x, w), bias)
return output
def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True
):
"""
This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (N, H)
"""
with tf.variable_scope(name) as scope:
dense_o_b = __dense_p(name=scope, x=x, w=w, output_dim=output_dim, initializer=initializer,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training)
if not activation:
dense_a = dense_o_bn
else:
dense_a = activation(dense_o_bn)
else:
if not activation:
dense_a = dense_o_b
else:
dense_a = activation(dense_o_b)
def dropout_with_keep():
return tf.nn.dropout(dense_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(dense_a, 1.0)
if dropout_keep_prob != -1:
dense_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
dense_o_dr = dense_a
dense_o = dense_o_dr
return dense_o
def dropout(x, dropout_keep_prob, is_training):
"""Dropout special layer"""
def dropout_with_keep():
return tf.nn.dropout(x, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(x, 1.0)
if dropout_keep_prob != -1:
output = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
output = x
return output
def flatten(x):
"""
Flatten a (N,H,W,C) input into (N,D) output. Used for fully connected layers after conolution layers
:param x: (tf.tensor) representing input
:return: flattened output
"""
all_dims_exc_first = np.prod([v.value for v in x.get_shape()[1:]])
o = tf.reshape(x, [-1, all_dims_exc_first])
return o
############################################################################################################
# Pooling Methods
def max_pool_2d(x, size=(2, 2), stride=(2, 2), name='pooling'):
"""
Max pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param stride: (tuple) specifies the stride of pooling.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.max_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding='VALID',
name=name)
def avg_pool_2d(x, size=(2, 2), stride=(2, 2), name='avg_pooling'):
"""
Average pooling 2D Wrapper
:param x: (tf.tensor) The input to the layer (N,H,W,C).
:param size: (tuple) This specifies the size of the filter as well as the stride.
:param name: (string) Scope name.
:return: The output is the same input but halfed in both width and height (N,H/2,W/2,C).
"""
size_x, size_y = size
stride_x, stride_y = stride
return tf.nn.avg_pool(x, ksize=[1, size_x, size_y, 1], strides=[1, stride_x, stride_y, 1], padding='VALID',
name=name)
############################################################################################################
# Utilities for layers
def __variable_with_weight_decay(kernel_shape, initializer, wd):
"""
Create a variable with L2 Regularization (Weight Decay)
:param kernel_shape: the size of the convolving weight kernel.
:param initializer: The initialization scheme, He et al. normal or Xavier normal are recommended.
:param wd:(weight decay) L2 regularization parameter.
:return: The weights of the kernel initialized. The L2 loss is added to the loss collection.
"""
w = tf.get_variable('weights', kernel_shape, tf.float32, initializer=initializer)
collection_name = tf.GraphKeys.REGULARIZATION_LOSSES
if wd and (not tf.get_variable_scope().reuse):
weight_decay = tf.multiply(tf.nn.l2_loss(w), wd, name='w_loss')
tf.add_to_collection(collection_name, weight_decay)
return w
# Summaries for variables
def __variable_summaries(var):
"""
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
:param var: variable to be summarized
:return: None
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
| StarcoderdataPython |
127300 | import claripy
from ..errors import SimMemoryError
def obj_bit_size(o):
if type(o) is bytes:
return len(o) * 8
return o.size()
class SimMemoryObject(object):
"""
A MemoryObjectRef instance is a reference to a byte or several bytes in
a specific object in SimSymbolicMemory. It is only used inside
SimSymbolicMemory class.
"""
def __init__(self, obj, base, length=None, byte_width=8):
if type(obj) is bytes:
assert byte_width == 8
elif not isinstance(obj, claripy.ast.Base):
raise SimMemoryError('memory can only store claripy Expression')
self.is_bytes = type(obj) == bytes
self._byte_width = byte_width
self.base = base
self.object = obj
self.length = obj_bit_size(obj) // self._byte_width if length is None else length
def size(self):
return self.length * self._byte_width
def __len__(self):
return self.size()
@property
def last_addr(self):
return self.base + self.length - 1
def includes(self, x):
return 0 <= x - self.base < self.length
def bytes_at(self, addr, length, allow_concrete=False):
if addr == self.base and length == self.length:
return claripy.BVV(self.object) if not allow_concrete and self.is_bytes else self.object
if self.is_bytes:
start = addr - self.base
end = start + length
o = self.object[start:end]
return o if allow_concrete else claripy.BVV(o)
obj_size = self.size()
left = obj_size - (addr-self.base)*self._byte_width - 1
right = left - length*self._byte_width + 1
return self.object[left:right]
def _object_equals(self, other):
if self.is_bytes != other.is_bytes:
return False
if self.is_bytes:
return self.object == other.object
else:
return self.object.cache_key == other.object.cache_key
def _length_equals(self, other):
if type(self.length) != type(other.length):
return False
if type(self.length) is int:
return self.length == other.length
else:
return self.length.cache_key == other.length.cache_key
def __eq__(self, other):
if type(other) is not SimMemoryObject:
return NotImplemented
return self.base == other.base and \
self._object_equals(other) and \
self._length_equals(other)
def __hash__(self):
obj_hash = hash(self.object) if self.is_bytes else self.object.cache_key
return hash((obj_hash, self.base, hash(self.length)))
def __ne__(self, other):
return not self == other
def __repr__(self):
return "MO(%s)" % self.object
| StarcoderdataPython |
1645664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `rmdawn` package."""
import pytest
from click.testing import CliRunner
from cli import rmdawn_cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument."""
def test_command_line_interface():
"""Test the CLI."""
runner = CliRunner()
| StarcoderdataPython |
3366890 | """
module docs go here
"""
| StarcoderdataPython |
3355450 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from designate.openstack.common import log
from designate.network_api.base import NetworkAPI
LOG = log.getLogger(__name__)
POOL = dict([(str(uuid.uuid4()), '192.168.2.%s' % i) for i in xrange(0, 254)])
ALLOCATIONS = {}
def _format_floatingip(id_, address):
return {
'region': 'RegionOne',
'address': address,
'id': id_
}
def allocate_floatingip(tenant_id, floatingip_id=None):
"""
Allocates a floating ip from the pool to the tenant.
"""
ALLOCATIONS.setdefault(tenant_id, {})
id_ = floatingip_id or POOL.keys()[0]
ALLOCATIONS[tenant_id][id_] = POOL.pop(id_)
values = _format_floatingip(id_, ALLOCATIONS[tenant_id][id_])
LOG.debug("Allocated to id_ %s to %s - %s" % (id_, tenant_id, values))
return values
def deallocate_floatingip(id_):
"""
Deallocate a floatingip
"""
LOG.debug('De-allocating %s' % id_)
for tenant_id, allocated in ALLOCATIONS.items():
if id_ in allocated:
POOL[id_] = allocated.pop(id_)
break
else:
raise KeyError('No such FloatingIP %s' % id_)
def reset_floatingips():
LOG.debug('Resetting any allocations.')
for tenant_id, allocated in ALLOCATIONS.items():
for key, value in allocated.items():
POOL[key] = allocated.pop(key)
class FakeNetworkAPI(NetworkAPI):
__plugin_name__ = 'fake'
def list_floatingips(self, context, region=None):
if context.is_admin:
data = []
for tenant_id, allocated in ALLOCATIONS.items():
data.extend(allocated.items())
else:
data = ALLOCATIONS.get(context.tenant, {}).items()
formatted = [_format_floatingip(k, v) for k, v in data]
LOG.debug('Returning %i FloatingIPs: %s' %
(len(formatted), formatted))
return formatted
| StarcoderdataPython |
3278172 | <filename>Python3/0747-Largest-Number-At-Least-Twice-of-Others/soln.py
class Solution:
def dominantIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# find the largest and second
first, second, idx = float('-inf'), float('-inf'), 0
for i, num in enumerate(nums):
if num > first:
idx = i
first, second = num, first
elif num != first and num > second:
second = num
return idx if first == second or first >= second * 2 else -1 | StarcoderdataPython |
184388 | <reponame>ExpressAI/eaas_client<filename>setup.py
from setuptools import setup, find_packages
import codecs
import eaas
import eaas.client
setup(
name="eaas",
version=eaas.__version__,
description="Evaluation as a Service for Natural Language Processing",
long_description=codecs.open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
url="https://github.com/ExpressAI/EaaS_API",
author="ExpressAI",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Text Processing",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
entry_points={
"console_scripts": [
"eaas-cli=eaas.eaas_cli:main",
],
},
install_requires=[
"nltk>=3.2",
"numpy",
"scipy",
"matplotlib",
"scikit-learn",
"pandas",
"tqdm",
"requests"
],
include_package_data=True,
)
| StarcoderdataPython |
130218 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import ast
import contextlib
import fnmatch
import json
import os
import pipes
import re
import shlex
import shutil
import stat
import subprocess
import sys
import tempfile
import zipfile
# Some clients do not add //build/android/gyp to PYTHONPATH.
import md5_check # pylint: disable=relative-import
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
from pylib.constants import host_paths
COLORAMA_ROOT = os.path.join(host_paths.DIR_SOURCE_ROOT,
'third_party', 'colorama', 'src')
# aapt should ignore OWNERS files in addition the default ignore pattern.
AAPT_IGNORE_PATTERN = ('!OWNERS:!.svn:!.git:!.ds_store:!*.scc:.*:<dir>_*:' +
'!CVS:!thumbs.db:!picasa.ini:!*~:!*.d.stamp')
_HERMETIC_TIMESTAMP = (2001, 1, 1, 0, 0, 0)
_HERMETIC_FILE_ATTR = (0644 << 16L)
@contextlib.contextmanager
def TempDir():
dirname = tempfile.mkdtemp()
try:
yield dirname
finally:
shutil.rmtree(dirname)
def MakeDirectory(dir_path):
try:
os.makedirs(dir_path)
except OSError:
pass
def DeleteDirectory(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
def Touch(path, fail_if_missing=False):
if fail_if_missing and not os.path.exists(path):
raise Exception(path + ' doesn\'t exist.')
MakeDirectory(os.path.dirname(path))
with open(path, 'a'):
os.utime(path, None)
def FindInDirectory(directory, filename_filter):
files = []
for root, _dirnames, filenames in os.walk(directory):
matched_files = fnmatch.filter(filenames, filename_filter)
files.extend((os.path.join(root, f) for f in matched_files))
return files
def FindInDirectories(directories, filename_filter):
all_files = []
for directory in directories:
all_files.extend(FindInDirectory(directory, filename_filter))
return all_files
def ParseGnList(gn_string):
# TODO(brettw) bug 573132: This doesn't handle GN escaping properly, so any
# weird characters like $ or \ in the strings will be corrupted.
#
# The code should import build/gn_helpers.py and then do:
# parser = gn_helpers.GNValueParser(gn_string)
# return return parser.ParseList()
# As of this writing, though, there is a CastShell build script that sends
# JSON through this function, and using correct GN parsing corrupts that.
#
# We need to be consistent about passing either JSON or GN lists through
# this function.
return ast.literal_eval(gn_string)
def ParseGypList(gyp_string):
# The ninja generator doesn't support $ in strings, so use ## to
# represent $.
# TODO(cjhopman): Remove when
# https://code.google.com/p/gyp/issues/detail?id=327
# is addressed.
gyp_string = gyp_string.replace('##', '$')
if gyp_string.startswith('['):
return ParseGnList(gyp_string)
return shlex.split(gyp_string)
def CheckOptions(options, parser, required=None):
if not required:
return
for option_name in required:
if getattr(options, option_name) is None:
parser.error('--%s is required' % option_name.replace('_', '-'))
def WriteJson(obj, path, only_if_changed=False):
old_dump = None
if os.path.exists(path):
with open(path, 'r') as oldfile:
old_dump = oldfile.read()
new_dump = json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))
if not only_if_changed or old_dump != new_dump:
with open(path, 'w') as outfile:
outfile.write(new_dump)
def ReadJson(path):
with open(path, 'r') as jsonfile:
return json.load(jsonfile)
class CalledProcessError(Exception):
"""This exception is raised when the process run by CheckOutput
exits with a non-zero exit code."""
def __init__(self, cwd, args, output):
super(CalledProcessError, self).__init__()
self.cwd = cwd
self.args = args
self.output = output
def __str__(self):
# A user should be able to simply copy and paste the command that failed
# into their shell.
copyable_command = '( cd {}; {} )'.format(os.path.abspath(self.cwd),
' '.join(map(pipes.quote, self.args)))
return 'Command failed: {}\n{}'.format(copyable_command, self.output)
# This can be used in most cases like subprocess.check_output(). The output,
# particularly when the command fails, better highlights the command's failure.
# If the command fails, raises a build_utils.CalledProcessError.
def CheckOutput(args, cwd=None, env=None,
print_stdout=False, print_stderr=True,
stdout_filter=None,
stderr_filter=None,
fail_func=lambda returncode, stderr: returncode != 0):
if not cwd:
cwd = os.getcwd()
child = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
stdout, stderr = child.communicate()
if stdout_filter is not None:
stdout = stdout_filter(stdout)
if stderr_filter is not None:
stderr = stderr_filter(stderr)
if fail_func(child.returncode, stderr):
raise CalledProcessError(cwd, args, stdout + stderr)
if print_stdout:
sys.stdout.write(stdout)
if print_stderr:
sys.stderr.write(stderr)
return stdout
def GetModifiedTime(path):
# For a symlink, the modified time should be the greater of the link's
# modified time and the modified time of the target.
return max(os.lstat(path).st_mtime, os.stat(path).st_mtime)
def IsTimeStale(output, inputs):
if not os.path.exists(output):
return True
output_time = GetModifiedTime(output)
for i in inputs:
if GetModifiedTime(i) > output_time:
return True
return False
def IsDeviceReady():
device_state = CheckOutput(['adb', 'get-state'])
return device_state.strip() == 'device'
def CheckZipPath(name):
if os.path.normpath(name) != name:
raise Exception('Non-canonical zip path: %s' % name)
if os.path.isabs(name):
raise Exception('Absolute zip path: %s' % name)
def IsSymlink(zip_file, name):
zi = zip_file.getinfo(name)
# The two high-order bytes of ZipInfo.external_attr represent
# UNIX permissions and file type bits.
return stat.S_ISLNK(zi.external_attr >> 16L)
def ExtractAll(zip_path, path=None, no_clobber=True, pattern=None,
predicate=None):
if path is None:
path = os.getcwd()
elif not os.path.exists(path):
MakeDirectory(path)
with zipfile.ZipFile(zip_path) as z:
for name in z.namelist():
if name.endswith('/'):
continue
if pattern is not None:
if not fnmatch.fnmatch(name, pattern):
continue
if predicate and not predicate(name):
continue
CheckZipPath(name)
if no_clobber:
output_path = os.path.join(path, name)
if os.path.exists(output_path):
raise Exception(
'Path already exists from zip: %s %s %s'
% (zip_path, name, output_path))
if IsSymlink(z, name):
dest = os.path.join(path, name)
MakeDirectory(os.path.dirname(dest))
os.symlink(z.read(name), dest)
else:
z.extract(name, path)
def AddToZipHermetic(zip_file, zip_path, src_path=None, data=None,
compress=None):
"""Adds a file to the given ZipFile with a hard-coded modified time.
Args:
zip_file: ZipFile instance to add the file to.
zip_path: Destination path within the zip file.
src_path: Path of the source file. Mutually exclusive with |data|.
data: File data as a string.
compress: Whether to enable compression. Default is take from ZipFile
constructor.
"""
assert (src_path is None) != (data is None), (
'|src_path| and |data| are mutually exclusive.')
CheckZipPath(zip_path)
zipinfo = zipfile.ZipInfo(filename=zip_path, date_time=_HERMETIC_TIMESTAMP)
zipinfo.external_attr = _HERMETIC_FILE_ATTR
if src_path and os.path.islink(src_path):
zipinfo.filename = zip_path
zipinfo.external_attr |= stat.S_IFLNK << 16L # mark as a symlink
zip_file.writestr(zipinfo, os.readlink(src_path))
return
if src_path:
with file(src_path) as f:
data = f.read()
# zipfile will deflate even when it makes the file bigger. To avoid
# growing files, disable compression at an arbitrary cut off point.
if len(data) < 16:
compress = False
# None converts to ZIP_STORED, when passed explicitly rather than the
# default passed to the ZipFile constructor.
compress_type = zip_file.compression
if compress is not None:
compress_type = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
zip_file.writestr(zipinfo, data, compress_type)
def DoZip(inputs, output, base_dir=None):
"""Creates a zip file from a list of files.
Args:
inputs: A list of paths to zip, or a list of (zip_path, fs_path) tuples.
output: Destination .zip file.
base_dir: Prefix to strip from inputs.
"""
input_tuples = []
for tup in inputs:
if isinstance(tup, basestring):
tup = (os.path.relpath(tup, base_dir), tup)
input_tuples.append(tup)
# Sort by zip path to ensure stable zip ordering.
input_tuples.sort(key=lambda tup: tup[0])
with zipfile.ZipFile(output, 'w') as outfile:
for zip_path, fs_path in input_tuples:
AddToZipHermetic(outfile, zip_path, src_path=fs_path)
def ZipDir(output, base_dir):
"""Creates a zip file from a directory."""
inputs = []
for root, _, files in os.walk(base_dir):
for f in files:
inputs.append(os.path.join(root, f))
DoZip(inputs, output, base_dir)
def MatchesGlob(path, filters):
"""Returns whether the given path matches any of the given glob patterns."""
return filters and any(fnmatch.fnmatch(path, f) for f in filters)
def MergeZips(output, inputs, exclude_patterns=None, path_transform=None):
path_transform = path_transform or (lambda p, z: p)
added_names = set()
with zipfile.ZipFile(output, 'w') as out_zip:
for in_file in inputs:
with zipfile.ZipFile(in_file, 'r') as in_zip:
in_zip._expected_crc = None
for info in in_zip.infolist():
# Ignore directories.
if info.filename[-1] == '/':
continue
dst_name = path_transform(info.filename, in_file)
already_added = dst_name in added_names
if not already_added and not MatchesGlob(dst_name, exclude_patterns):
AddToZipHermetic(out_zip, dst_name, data=in_zip.read(info))
added_names.add(dst_name)
def PrintWarning(message):
print 'WARNING: ' + message
def PrintBigWarning(message):
print '***** ' * 8
PrintWarning(message)
print '***** ' * 8
def GetSortedTransitiveDependencies(top, deps_func):
"""Gets the list of all transitive dependencies in sorted order.
There should be no cycles in the dependency graph.
Args:
top: a list of the top level nodes
deps_func: A function that takes a node and returns its direct dependencies.
Returns:
A list of all transitive dependencies of nodes in top, in order (a node will
appear in the list at a higher index than all of its dependencies).
"""
def Node(dep):
return (dep, deps_func(dep))
# First: find all deps
unchecked_deps = list(top)
all_deps = set(top)
while unchecked_deps:
dep = unchecked_deps.pop()
new_deps = deps_func(dep).difference(all_deps)
unchecked_deps.extend(new_deps)
all_deps = all_deps.union(new_deps)
# Then: simple, slow topological sort.
sorted_deps = []
unsorted_deps = dict(map(Node, all_deps))
while unsorted_deps:
for library, dependencies in unsorted_deps.items():
if not dependencies.intersection(unsorted_deps.keys()):
sorted_deps.append(library)
del unsorted_deps[library]
return sorted_deps
def GetPythonDependencies():
"""Gets the paths of imported non-system python modules.
A path is assumed to be a "system" import if it is outside of chromium's
src/. The paths will be relative to the current directory.
"""
module_paths = (m.__file__ for m in sys.modules.itervalues()
if m is not None and hasattr(m, '__file__'))
abs_module_paths = map(os.path.abspath, module_paths)
assert os.path.isabs(host_paths.DIR_SOURCE_ROOT)
non_system_module_paths = [
p for p in abs_module_paths if p.startswith(host_paths.DIR_SOURCE_ROOT)]
def ConvertPycToPy(s):
if s.endswith('.pyc'):
return s[:-1]
return s
non_system_module_paths = map(ConvertPycToPy, non_system_module_paths)
non_system_module_paths = map(os.path.relpath, non_system_module_paths)
return sorted(set(non_system_module_paths))
def AddDepfileOption(parser):
# TODO(agrieve): Get rid of this once we've moved to argparse.
if hasattr(parser, 'add_option'):
func = parser.add_option
else:
func = parser.add_argument
func('--depfile',
help='Path to depfile. Must be specified as the action\'s first output.')
def WriteDepfile(path, dependencies):
with open(path, 'w') as depfile:
depfile.write(path)
depfile.write(': ')
depfile.write(' '.join(dependencies))
depfile.write('\n')
def ExpandFileArgs(args):
"""Replaces file-arg placeholders in args.
These placeholders have the form:
@FileArg(filename:key1:key2:...:keyn)
The value of such a placeholder is calculated by reading 'filename' as json.
And then extracting the value at [key1][key2]...[keyn].
Note: This intentionally does not return the list of files that appear in such
placeholders. An action that uses file-args *must* know the paths of those
files prior to the parsing of the arguments (typically by explicitly listing
them in the action's inputs in build files).
"""
new_args = list(args)
file_jsons = dict()
r = re.compile('@FileArg\((.*?)\)')
for i, arg in enumerate(args):
match = r.search(arg)
if not match:
continue
if match.end() != len(arg):
raise Exception('Unexpected characters after FileArg: ' + arg)
lookup_path = match.group(1).split(':')
file_path = lookup_path[0]
if not file_path in file_jsons:
file_jsons[file_path] = ReadJson(file_path)
expansion = file_jsons[file_path]
for k in lookup_path[1:]:
expansion = expansion[k]
new_args[i] = arg[:match.start()] + str(expansion)
return new_args
def CallAndWriteDepfileIfStale(function, options, record_path=None,
input_paths=None, input_strings=None,
output_paths=None, force=False,
pass_changes=False,
depfile_deps=None):
"""Wraps md5_check.CallAndRecordIfStale() and also writes dep & stamp files.
Depfiles and stamp files are automatically added to output_paths when present
in the |options| argument. They are then created after |function| is called.
By default, only python dependencies are added to the depfile. If there are
other input paths that are not captured by GN deps, then they should be listed
in depfile_deps. It's important to write paths to the depfile that are already
captured by GN deps since GN args can cause GN deps to change, and such
changes are not immediately reflected in depfiles (http://crbug.com/589311).
"""
if not output_paths:
raise Exception('At least one output_path must be specified.')
input_paths = list(input_paths or [])
input_strings = list(input_strings or [])
output_paths = list(output_paths or [])
python_deps = None
if hasattr(options, 'depfile') and options.depfile:
python_deps = GetPythonDependencies()
# List python deps in input_strings rather than input_paths since the
# contents of them does not change what gets written to the depfile.
input_strings += python_deps
output_paths += [options.depfile]
stamp_file = hasattr(options, 'stamp') and options.stamp
if stamp_file:
output_paths += [stamp_file]
def on_stale_md5(changes):
args = (changes,) if pass_changes else ()
function(*args)
if python_deps is not None:
all_depfile_deps = list(python_deps)
if depfile_deps:
all_depfile_deps.extend(depfile_deps)
WriteDepfile(options.depfile, all_depfile_deps)
if stamp_file:
Touch(stamp_file)
md5_check.CallAndRecordIfStale(
on_stale_md5,
record_path=record_path,
input_paths=input_paths,
input_strings=input_strings,
output_paths=output_paths,
force=force,
pass_changes=True)
| StarcoderdataPython |
1613772 | <reponame>hbasria/bitresource
from bitutils.objects import Exchange
from registry import Registry
class ResourceRegistry(Registry):
def get_object_name(self, data):
if hasattr(data, 'name'):
exchange_code = getattr(data, 'name')
exchange_registry.register(Exchange(code=exchange_code), name=exchange_code)
return exchange_code
return super(ResourceRegistry, self).get_object_name(data)
exchange_registry = Registry()
currency_registry = Registry()
market_registry = Registry()
resource_registry = ResourceRegistry()
# from bitresource import bitpay
# from bitresource import bitstamp
from bitresource import bittrex
from bitresource import binance
from bitresource import blockchain
# from bitresource import coinbase
from bitresource import coindesk
# from bitresource import kraken
| StarcoderdataPython |
3231733 | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Example for commanding robot with position control using moveit planner
"""
from pyrobot import Robot
from pyrobot.utils.util import MoveitObjectHandler
from pyrobot.utils.move_group_interface import MoveGroupInterface
import time
import numpy as np
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
def main():
config = dict(moveit_planner="ESTkConfigDefault")
bot = Robot(
"sawyer",
use_arm=True,
use_base=False,
use_camera=False,
use_gripper=True,
arm_config=config,
)
obstacle_handler = MoveitObjectHandler()
# Add a table
# position and orientation (quaternion: x, y, z, w) of the table
pose = [0.8, 0.0, -0.1, 0.0, 0.0, 0.0, 1.0]
# size of the table (x, y, z)
size = (1.35, 2.0, 0.1)
obstacle_handler.add_table(pose, size)
target_pos = np.array([0.45251711, 0.16039618, 0.08021886])
target_ori = np.array(
[
[-0.00437824, 0.99994626, 0.00939675],
[0.99998735, 0.0044013, -0.00243524],
[-0.00247647, 0.00938597, -0.99995288],
]
)
bot.arm.go_home()
time.sleep(1)
traj = bot.arm.make_plan_pose(target_pos, target_ori)
time.sleep(1)
print(traj)
for joints in traj:
bot.arm.set_joint_positions(joints, plan=False)
time.sleep(1)
# bot.arm.set_ee_pose(target_pos, target_ori, plan=True)
# time.sleep(1)
# bot.arm.go_home()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1741841 | <reponame>SirLegolot/hide.me
#Steganography for hiding text inside images
from PIL import Image
import imghdr
import math
import ast
import random
# the structure of the for loops for the encoding functions was inspired by:
# https://hackernoon.com/simple-image-steganography-in-python-18c7b534854f
# particularly how an iterator is used to iterate through the bits of the secret message
class StegoImg():
def __init__(self, filePath):
self.filePath = filePath
self.imgType = imghdr.what(filePath)
with Image.open(filePath) as tmpImg:
self.imgMode = "".join(tmpImg.getbands())
### Encode Section ###
def getASCII(self,s):
binary = ""
for char in s:
binary+=str(bin(ord(char))[2:].zfill(8))
return binary
def alterPixel(self, value, bit):
listValue = list(bin(value)[2:])
listValue[-1] = bit
binResult = ""
for num in listValue:
binResult+=str(num)
result = int(binResult, 2)
return result
def randomize(self, width, height, binMsg, pixelWeight):
numChoices = math.ceil(len(binMsg)/pixelWeight)
coordSet = set()
while len(coordSet)<numChoices:
newCoord = (random.randint(width//2,width-1), random.randint(0,height-1))
if newCoord not in coordSet:
coordSet.add(newCoord)
coordList = list(coordSet)
return coordList
def jumble(self, lst, key):
s = str(lst).replace(" ","").replace("[","").replace("]","").replace("(","").replace(")","")
jumbledString = ""
for i in range(len(s)):
jumbledString+=chr(((ord(s[i])+ord(key[i%len(key)]))%126)+33)
return jumbledString
def encodeMsgLinearLSB_L(self, imagePath, msg, savePath):
binMsg = self.getASCII(msg)
iterBinMsg = iter(binMsg)
im = Image.open(imagePath)
width, height = im.size
if width*height<len(binMsg):
raise Exception("Cannot hide a message that is too large for the image")
newIm = im.copy()
zeroCount = 0
for row in range(width):
for col in range(height):
grd = im.getpixel((row,col))
nextBit = next(iterBinMsg,0)
newGrd = self.alterPixel(grd, nextBit)
if nextBit == "1":
zeroCount = 0
else:
zeroCount +=1
newIm.putpixel((row, col), (newGrd))
if zeroCount >20:
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
def encodeMsgRandomLSB_L(self, imagePath, msg, key, savePath):
binMsg = self.getASCII(msg)
iterBinMsg = iter(binMsg)
im = Image.open(imagePath)
width, height = im.size
if width*height/2<len(binMsg):
raise Exception("Cannot hide a message that is too large for the image")
coordList = self.randomize(width, height, binMsg, 1)
keyString = self.jumble(coordList, key)
self.encodeMsgLinearLSB_L(imagePath,keyString, savePath)
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm = Image.open(newImagePath)
finalIm = newIm.copy()
for coord in coordList:
grd = newIm.getpixel(coord)
newGrd = self.alterPixel(grd, next(iterBinMsg,0))
finalIm.putpixel(coord, (newGrd))
finalIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
def encodeMsgLinearLSB_RGB(self, imagePath, msg, savePath):
binMsg = self.getASCII(msg)
iterBinMsg = iter(binMsg)
im = Image.open(imagePath)
width, height = im.size
if width*height*3<len(binMsg):
raise Exception("Cannot hide a message that is too large for the image")
newIm = im.copy()
zeroCount = 0
for row in range(width):
for col in range(height):
r,g,b = im.getpixel((row,col))
nextBit = next(iterBinMsg,0)
newR = self.alterPixel(r, nextBit)
if nextBit == "1":
zeroCount = 0
else:
zeroCount +=1
nextBit = next(iterBinMsg,0)
newG = self.alterPixel(g, nextBit)
if nextBit == "1":
zeroCount = 0
else:
zeroCount +=1
nextBit = next(iterBinMsg,0)
newB = self.alterPixel(b, nextBit)
if nextBit == "1":
zeroCount = 0
else:
zeroCount +=1
newIm.putpixel((row, col), (newR, newG, newB))
if zeroCount >20:
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
def encodeMsgRandomLSB_RGB(self, imagePath, msg, key, savePath):
binMsg = self.getASCII(msg)
iterBinMsg = iter(binMsg)
im = Image.open(imagePath)
width, height = im.size
if width*height*3/2<len(binMsg):
raise Exception("Cannot hide a message that is too large for the image")
coordList = self.randomize(width, height, binMsg, 3)
keyString = self.jumble(coordList, key)
self.encodeMsgLinearLSB_RGB(imagePath,keyString, savePath)
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm = Image.open(newImagePath)
finalIm = newIm.copy()
for coord in coordList:
r,g,b = newIm.getpixel(coord)
newR = self.alterPixel(r, next(iterBinMsg,0))
newG = self.alterPixel(g, next(iterBinMsg,0))
newB = self.alterPixel(b, next(iterBinMsg,0))
finalIm.putpixel(coord, (newR, newG, newB))
finalIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
def encodeMsgLinearLSB_RGBA(self, imagePath, msg, savePath):
binMsg = self.getASCII(msg)
iterBinMsg = iter(binMsg)
im = Image.open(imagePath)
width, height = im.size
if width*height*3<len(binMsg):
raise Exception("Cannot hide a message that is too large for the image")
newIm = im.copy()
zeroCount = 0
for row in range(width):
for col in range(height):
r,g,b,a = im.getpixel((row,col))
nextBit = next(iterBinMsg,0)
newR = self.alterPixel(r, nextBit)
if nextBit == "1":
zeroCount = 0
else:
zeroCount +=1
nextBit = next(iterBinMsg,0)
newG = self.alterPixel(g, nextBit)
if nextBit == "1":
zeroCount = 0
else:
zeroCount +=1
nextBit = next(iterBinMsg,0)
newB = self.alterPixel(b, nextBit)
if nextBit == "1":
zeroCount = 0
else:
zeroCount +=1
newIm.putpixel((row, col), (newR, newG, newB))
if zeroCount >20:
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
def encodeMsgRandomLSB_RGBA(self, imagePath, msg, key, savePath):
binMsg = self.getASCII(msg)
iterBinMsg = iter(binMsg)
im = Image.open(imagePath)
width, height = im.size
if width*height*3/2<len(binMsg):
raise Exception("Cannot hide a message that is too large for the image")
coordList = self.randomize(width, height, binMsg, 3)
keyString = self.jumble(coordList, key)
self.encodeMsgLinearLSB_RGBA(imagePath,keyString, savePath)
tmpPathString = imagePath.split("/")[-1].split(".")
newImagePath = savePath+"/"+tmpPathString[0]+"_adjusted."+tmpPathString[1]
newIm = Image.open(newImagePath)
finalIm = newIm.copy()
for coord in coordList:
r,g,b,a = newIm.getpixel(coord)
newR = self.alterPixel(r, next(iterBinMsg,0))
newG = self.alterPixel(g, next(iterBinMsg,0))
newB = self.alterPixel(b, next(iterBinMsg,0))
finalIm.putpixel(coord, (newR, newG, newB, a))
finalIm.save(newImagePath)
adjImg = StegoImg(newImagePath)
return adjImg
### Decode section ###
def getString(self, bits):
binLetters = []
for i in range(0,len(bits), 8):
if i+8<len(bits):
binLetters.append(bits[i:i+8])
else:
binLetters.append(bits[i:])
msg = ""
for i in range(len(binLetters)):
if "1" in binLetters[i]:
msg += chr(int(binLetters[i],2))
return msg
def unjumble(self, s, key):
unjumbledString = ""
for i in range(len(s)):
unjumbledString+=chr((ord(s[i])-ord(key[i%len(key)])-33)%126)
unjumbledList = list(map(int,unjumbledString.split(",")))
coordList = []
for i in range(0,len(unjumbledList),2):
coordList.append((unjumbledList[i],unjumbledList[i+1]))
return coordList
def decodeMsgLinearLSB_L(self):
im = Image.open(self.filePath)
width, height = im.size
bits = ""
# structure of the for loops for the decoding functions was also inspired by:
# https://hackernoon.com/simple-image-steganography-in-python-18c7b534854f
# this was altered to be much quicker by creating a flag of 20+ zeroes
# to signify that the entire message data has been read
for row in range(width):
for col in range(height):
grd = im.getpixel((row,col))
bits+=bin(grd)[-1]
try:
if bits[-20:]==("0"*20):
msg = self.getString(bits)
return msg
except:
pass
msg = self.getString(bits)
return msg
def decodeMsgRandomLSB_L(self, key):
keyString = self.decodeMsgLinearLSB_L()
keyList = self.unjumble(keyString, key)
im = Image.open(self.filePath)
width, height = im.size
bits = ""
for item in keyList:
grd = im.getpixel(item)
bits+=bin(grd)[-1]
msg = self.getString(bits)
return msg
def decodeMsgLinearLSB_RGB(self):
im = Image.open(self.filePath)
width, height = im.size
bits = ""
for row in range(width):
for col in range(height):
r,g,b = im.getpixel((row,col))
bits+=bin(r)[-1]
bits+=bin(g)[-1]
bits+=bin(b)[-1]
try:
if bits[-20:]==("0"*20):
msg = self.getString(bits)
return msg
except:
pass
msg = self.getString(bits)
return msg
def decodeMsgRandomLSB_RGB(self, key):
keyString = self.decodeMsgLinearLSB_RGB()
keyList = self.unjumble(keyString, key)
im = Image.open(self.filePath)
width, height = im.size
bits = ""
for item in keyList:
r,g,b = im.getpixel(item)
bits+=bin(r)[-1]
bits+=bin(g)[-1]
bits+=bin(b)[-1]
msg = self.getString(bits)
return msg
def decodeMsgLinearLSB_RGBA(self):
im = Image.open(self.filePath)
width, height = im.size
bits = ""
for row in range(width):
for col in range(height):
r,g,b,a = im.getpixel((row,col))
bits+=bin(r)[-1]
bits+=bin(g)[-1]
bits+=bin(b)[-1]
try:
if bits[-20:]==("0"*20):
msg = self.getString(bits)
return msg
except:
pass
msg = self.getString(bits)
return msg
def decodeMsgRandomLSB_RGBA(self, key):
keyString = self.decodeMsgLinearLSB_RGBA()
keyList = self.unjumble(keyString, key)
im = Image.open(self.filePath)
width, height = im.size
bits = ""
for item in keyList:
r,g,b,a = im.getpixel(item)
bits+=bin(r)[-1]
bits+=bin(g)[-1]
bits+=bin(b)[-1]
msg = self.getString(bits)
return msg
### Overarching encode/decode functions ###
def encode(self, msg, savePath, method="linearLSB", key=None):
if method=="linearLSB":
if self.imgType not in {"png", "tiff", "bmp"}:
raise Exception("Image must be a lossless compression format ('png', 'tiff', 'bmp') for LSB. The image type was '{}'".format(self.imgType))
if self.imgMode == "L":
newImg = self.encodeMsgLinearLSB_L(self.filePath, msg, savePath)
elif self.imgMode == "RGB":
newImg = self.encodeMsgLinearLSB_RGB(self.filePath, msg, savePath)
elif self.imgMode == "RGBA":
newImg = self.encodeMsgLinearLSB_RGBA(self.filePath, msg, savePath)
else:
raise Exception("Image must be have one of these modes: 'L', 'RGB', or 'RGBA'. The mode of selected image was '{}'".format(self.imgMode))
return newImg
elif method=="randomLSB":
if self.imgType not in {"png", "tiff", "bmp"}:
raise Exception("Image must be a lossless compression format ('png', 'tiff', 'bmp') for LSB. The image type was '{}'".format(self.imgType))
if self.imgMode == "L":
newImg = self.encodeMsgRandomLSB_L(self.filePath, msg, key, savePath)
elif self.imgMode == "RGB":
newImg = self.encodeMsgRandomLSB_RGB(self.filePath, msg, key, savePath)
elif self.imgMode == "RGBA":
newImg = self.encodeMsgRandomLSB_RGBA(self.filePath, msg, key, savePath)
else:
raise Exception("Image must be have one of these modes: 'L', 'RGB', or 'RGBA'. The mode of selected image was '{}'".format(self.imgMode))
return newImg
elif method=="DFT":
pass
# perform discrete fourier transform LSB
# IDK how to do this
elif method=="DCT":
pass
# perform discrete cosine transform LSB
# IDK how to do this
def decode(self, method="linearLSB", key = None):
if method=="linearLSB":
if self.imgType not in {"png", "tiff", "bmp"}:
raise Exception("Image must be a lossless compression format ('png', 'tiff', 'bmp') for LSB. The image type was '{}'".format(self.imgType))
if self.imgMode == "L":
message = self.decodeMsgLinearLSB_L()
elif self.imgMode == "RGB":
message = self.decodeMsgLinearLSB_RGB()
elif self.imgMode == "RGBA":
message = self.decodeMsgLinearLSB_RGBA()
else:
raise Exception("Image must be have one of these modes: 'L', 'RGB', or 'RGBA'. The mode of selected image was '{}'".format(self.imgMode))
elif method=="randomLSB":
if self.imgType not in {"png", "tiff", "bmp"}:
raise Exception("Image must be a lossless compression format ('png', 'tiff', 'bmp') for LSB. The image type was '{}'".format(self.imgType))
if self.imgType not in {"png", "tiff", "bmp"}:
raise Exception("Image must be a lossless compression format ('png', 'tiff', 'bmp') for LSB. The image type was '{}'".format(self.imgType))
if self.imgMode == "L":
message = self.decodeMsgRandomLSB_L(key)
elif self.imgMode == "RGB":
message = self.decodeMsgRandomLSB_RGB(key)
elif self.imgMode == "RGBA":
message = self.decodeMsgRandomLSB_RGBA(key)
else:
raise Exception("Image must be have one of these modes: 'L', 'RGB', or 'RGBA'. The mode of selected image was '{}'".format(self.imgMode))
elif method=="DFT":
pass
# perform discrete fourier transform LSB
# IDK how to do this
elif method=="DCT":
pass
# perform discrete cosine transform LSB
# IDK how to do this
return message | StarcoderdataPython |
3256984 | #!/usr/bin/env python
import os, sys, time
if len(sys.argv) < 2:
print "usage: %s directory..." % sys.argv[0]
sys.exit(1)
def get_date(file_name):
i = file_name.rfind('-')
if i == -1:
return ""
t = int(file_name[i+1:])
return time.ctime(t)
for dir in sys.argv[1:]:
print dir
for file_name in sorted(os.listdir(dir)):
print " %s - %s" % (file_name, get_date(file_name))
| StarcoderdataPython |
3270332 | # 青岛啤酒活动,联通每天领3次共90M流量
# https://www.52pojie.cn/thread-950775-1-1.html
import requests as r
import time
def f1(num):
data1 = {
'phoneVal': num,
'type': '21'
}
# 获取验证码
print(r.post('https://m.10010.com/god/AirCheckMessage/sendCaptcha', data=data1).text)
# 领取流量
print(r.get('https://m.10010.com/god/qingPiCard/flowExchange?number=%s&type=21&captcha=%s' % (
num, input('验证码: ').strip())).text)
phonenumber = input("请输入手机号:")
for i in range(3):
print('==第{}次领取=='.format(i + 1))
f1(phonenumber)
if i == 2:
print('今天已经领完啦!请明天再来吧。')
print('程序将在5s后退出。。。')
time.sleep(65)
else:
print('请等待65s后面还有{}次哦'.format(2 - i))
time.sleep(65) | StarcoderdataPython |
1738600 | from .CompDesc import CompDesc
from .functionToolbox import *
| StarcoderdataPython |
151094 | import gtimer as gt
from rlkit.core import logger
from ROLL.online_LSTM_replay_buffer import OnlineLSTMRelabelingBuffer
import rlkit.torch.vae.vae_schedules as vae_schedules
import ROLL.LSTM_schedule as lstm_schedules
from rlkit.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
)
import rlkit.torch.pytorch_util as ptu
from torch.multiprocessing import Process, Pipe
from threading import Thread
from test_latent_space.test_LSTM import compare_latent_distance
from test_latent_space.test_LSTM2 import test_lstm_traj
from test_latent_space.test_masked_traj import test_masked_traj_lstm
import os
import os.path as osp
import numpy as np
from multiworld.core.image_env import unormalize_image, normalize_image
class OnlineLSTMAlgorithm(TorchBatchRLAlgorithm):
def __init__(
self,
env_id,
vae_original,
lstm_segmented,
vae_trainer_original,
lstm_trainer_segmented,
*base_args,
vae_save_period=1,
lstm_save_period=1,
vae_training_schedule=vae_schedules.never_train,
lstm_training_schedule=lstm_schedules.never_train,
lstm_test_N=500,
lstm_segmentation_method='color',
oracle_data=False,
parallel_vae_train=False,
vae_min_num_steps_before_training=0,
uniform_dataset=None,
keep_train_segmentation_lstm=False,
keep_train_original_vae=True,
**base_kwargs
):
super().__init__(*base_args, **base_kwargs)
assert isinstance(self.replay_buffer, OnlineLSTMRelabelingBuffer)
self.vae_original = vae_original
self.lstm_segmented = lstm_segmented
self.vae_trainer_original = vae_trainer_original
self.lstm_trainer_segmented = lstm_trainer_segmented
self.vae_trainer_original.model = self.vae_original
self.lstm_trainer_segmented.model = self.lstm_segmented
self.vae_save_period = vae_save_period
self.lstm_save_period = lstm_save_period
self.vae_training_schedule = vae_training_schedule
self.lstm_training_schedule = lstm_training_schedule
self.oracle_data = oracle_data
self.parallel_vae_train = parallel_vae_train
self.vae_min_num_steps_before_training = vae_min_num_steps_before_training
self.uniform_dataset = uniform_dataset
self._vae_training_process = None
self._update_subprocess_vae_thread = None
self._vae_conn_pipe = None
self.keep_train_segmentation_lstm = keep_train_segmentation_lstm
self.keep_train_original_vae = keep_train_original_vae
# below is just used for testing the segmentation vae.
self.env_id = env_id
self.lstm_test_N = lstm_test_N
self.lstm_segmentation_method = lstm_segmentation_method
def _train(self):
super()._train()
self._cleanup()
def _end_epoch(self, epoch):
# self.check_replay_buffer()
self._train_vae(epoch)
gt.stamp('vae training')
super()._end_epoch(epoch)
def _log_stats(self, epoch):
self._log_vae_stats()
super()._log_stats(epoch)
def to(self, device):
self.vae_original.to(device)
self.lstm_segmented.to(device)
super().to(device)
def _get_snapshot(self):
snapshot = super()._get_snapshot()
assert 'vae' not in snapshot
snapshot['vae_original'] = self.vae_original
snapshot['lstm_segmented'] = self.lstm_segmented
return snapshot
"""
debug code
"""
def check_replay_buffer(self):
batch = self.replay_buffer.random_batch(
self.batch_size)
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
goals = batch['resampled_goals']
print("obs: ", type(obs))
print("obs shape: ", obs.shape)
decoded_obs = self.eval_env._decode(obs, self.eval_env.vae_original)
for idx in range(10):
self.eval_env.show_obs(decoded_obs[idx], "sac policy obs")
print("next_obs: ", type(next_obs))
print("next obs shape: ", next_obs.shape)
decoded_next_obs = self.eval_env._decode(next_obs, self.eval_env.vae_original)
for idx in range(10):
self.eval_env.show_obs(decoded_next_obs[idx], "sac policy next_obs")
decoded_goal = self.eval_env._decode(goals, self.eval_env.lstm_segmented)
for idx in range(10):
self.eval_env.show_obs(decoded_goal[idx], "sac policy goal")
"""
VAE-specific Code
"""
def _train_vae(self, epoch):
if self.parallel_vae_train and self._vae_training_process is None:
self.init_vae_training_subprocess()
should_train, amount_to_train = self.vae_training_schedule(epoch)
_, lstm_amount_to_train = self.lstm_training_schedule(epoch)
rl_start_epoch = int(self.min_num_steps_before_training / (
self.num_expl_steps_per_train_loop * self.num_train_loops_per_epoch
))
print(" _train_vae called, should_train, amount_to_train", should_train, amount_to_train)
if should_train or epoch <= (rl_start_epoch - 1):
if self.parallel_vae_train:
assert self._vae_training_process.is_alive()
# Make sure the last vae update has finished before starting
# another one
if self._update_subprocess_vae_thread is not None:
self._update_subprocess_vae_thread.join()
self._update_subprocess_vae_thread = Thread(
target=OnlineVaeAlgorithmSegmented.update_vae_in_training_subprocess,
args=(self, epoch, ptu.device)
)
self._update_subprocess_vae_thread.start()
self._vae_conn_pipe.send((amount_to_train, epoch))
else:
if self.keep_train_original_vae:
_train_vae(
self.vae_trainer_original,
self.replay_buffer,
epoch,
amount_to_train,
key='image_observation'
)
_test_vae(
self.vae_trainer_original,
epoch,
self.replay_buffer,
vae_save_period=self.vae_save_period,
uniform_dataset=self.uniform_dataset,
save_prefix='r_original_'
)
if self.keep_train_segmentation_lstm:
_train_lstm(
lstm_trainer=self.lstm_trainer_segmented,
replay_buffer=self.replay_buffer,
epoch=epoch,
batches=lstm_amount_to_train,
oracle_data=False,
key='image_observation_segmented'
)
_test_lstm(
lstm_trainer=self.lstm_trainer_segmented,
epoch=epoch,
replay_buffer=self.replay_buffer,
env_id=self.env_id,
lstm_save_period=self.lstm_save_period,
uniform_dataset=None,
save_prefix='r_lstm_' ,
lstm_test_N=self.lstm_test_N,
lstm_segmentation_method=self.lstm_segmentation_method
)
# we only refresh goals if the segmentation lstm (used for goal sampling) has changed
self.replay_buffer.refresh_latents(epoch, refresh_goals=self.keep_train_segmentation_lstm)
def _log_vae_stats(self):
logger.record_dict(
self.vae_trainer_original.get_diagnostics(),
prefix='vae_trainer_original/',
)
logger.record_dict(
self.lstm_trainer_segmented.get_diagnostics(),
prefix='lstm_trainer_segmented/',
)
def _cleanup(self):
if self.parallel_vae_train:
self._vae_conn_pipe.close()
self._vae_training_process.terminate()
def init_vae_training_subprocess(self):
self._vae_conn_pipe, process_pipe = Pipe()
self._vae_training_process = Process(
target=subprocess_train_vae_loop,
args=(
process_pipe,
self.vae,
self.vae.state_dict(),
self.replay_buffer,
self.replay_buffer.get_mp_info(),
ptu.device,
)
)
self._vae_training_process.start()
self._vae_conn_pipe.send(self.vae_trainer)
def update_vae_in_training_subprocess(self, epoch, device):
self.vae.__setstate__(self._vae_conn_pipe.recv())
self.vae.to(device)
_test_vae(
self.vae_trainer,
epoch,
self.replay_buffer,
vae_save_period=self.vae_save_period,
uniform_dataset=self.uniform_dataset,
)
def _train_vae(vae_trainer, replay_buffer, epoch, batches=50, oracle_data=False, key='image_observation'):
batch_sampler = replay_buffer.random_vae_training_data
if oracle_data:
batch_sampler = None
vae_trainer.train_epoch(
epoch,
sample_batch=batch_sampler,
batches=batches,
from_rl=True,
key=key,
)
def _train_lstm(lstm_trainer, replay_buffer, epoch, batches=50, oracle_data=False, key='image_observation_segmented'):
batch_sampler = replay_buffer.random_lstm_training_data
if oracle_data:
batch_sampler = None
lstm_trainer.train_epoch(
epoch,
sample_batch=batch_sampler,
batches=batches,
from_rl=True,
key=key,
)
def _test_vae(vae_trainer, epoch, replay_buffer, vae_save_period=1, uniform_dataset=None, save_prefix='r'):
save_imgs = epoch % vae_save_period == 0
log_fit_skew_stats = replay_buffer._prioritize_vae_samples and uniform_dataset is not None
if uniform_dataset is not None:
replay_buffer.log_loss_under_uniform(uniform_dataset, vae_trainer.batch_size, rl_logger=vae_trainer.vae_logger_stats_for_rl)
vae_trainer.test_epoch(
epoch,
from_rl=True,
save_reconstruction=save_imgs,
save_prefix=save_prefix
)
if save_imgs:
sample_save_prefix = save_prefix.replace('r', 's')
vae_trainer.dump_samples(epoch, save_prefix=sample_save_prefix)
if log_fit_skew_stats:
replay_buffer.dump_best_reconstruction(epoch)
replay_buffer.dump_worst_reconstruction(epoch)
replay_buffer.dump_sampling_histogram(epoch, batch_size=vae_trainer.batch_size)
if uniform_dataset is not None:
replay_buffer.dump_uniform_imgs_and_reconstructions(dataset=uniform_dataset, epoch=epoch)
def _test_lstm(lstm_trainer, epoch, replay_buffer, env_id, lstm_save_period=1, uniform_dataset=None,
save_prefix='r', lstm_segmentation_method='color', lstm_test_N=500, key='image_observation_segmented'):
batch_sampler = replay_buffer.random_lstm_training_data
save_imgs = epoch % lstm_save_period == 0
log_fit_skew_stats = replay_buffer._prioritize_vae_samples and uniform_dataset is not None
if uniform_dataset is not None:
replay_buffer.log_loss_under_uniform(uniform_dataset, lstm_trainer.batch_size, rl_logger=lstm_trainer.vae_logger_stats_for_rl)
lstm_trainer.test_epoch(
epoch,
from_rl=True,
key=key,
sample_batch=batch_sampler,
save_reconstruction=save_imgs,
save_prefix=save_prefix
)
if save_imgs:
sample_save_prefix = save_prefix.replace('r', 's')
lstm_trainer.dump_samples(epoch, save_prefix=sample_save_prefix)
if log_fit_skew_stats:
replay_buffer.dump_best_reconstruction(epoch)
replay_buffer.dump_worst_reconstruction(epoch)
replay_buffer.dump_sampling_histogram(epoch, batch_size=lstm_trainer.batch_size)
if uniform_dataset is not None:
replay_buffer.dump_uniform_imgs_and_reconstructions(dataset=uniform_dataset, epoch=epoch)
m = lstm_trainer.model
pjhome = os.environ['PJHOME']
seg_name = 'seg-' + 'color'
if env_id in ['SawyerPushNIPSEasy-v0', 'SawyerPushHurdle-v0', 'SawyerPushHurdleMiddle-v0']:
N = 500
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', '{}-{}-{}-0.3-0.5.npy'.format(env_id, seg_name, N))
puck_pos_path = osp.join(pjhome, 'data/local/pre-train-lstm', '{}-{}-{}-0.3-0.5-puck-pos.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
puck_pos = np.load(puck_pos_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, puck_pos, save_dir=logger.get_snapshot_dir(), obj_name='puck',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
elif env_id == 'SawyerDoorHookResetFreeEnv-v1':
N = 1000
seg_name = 'seg-' + 'unet'
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0-0.npy'.format(env_id, seg_name, N))
door_angle_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0-0-door-angle.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
door_angle = np.load(door_angle_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, door_angle, save_dir=logger.get_snapshot_dir(), obj_name='door',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
elif env_id == 'SawyerPushHurdleResetFreeEnv-v0':
N = 2000
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0.3-0.5.npy'.format(env_id, seg_name, N))
puck_pos_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0.3-0.5-puck-pos.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
puck_pos = np.load(puck_pos_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, puck_pos, save_dir=logger.get_snapshot_dir(), obj_name='puck',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
test_lstm_traj(env_id, m, save_path=logger.get_snapshot_dir(),
save_name='online_lstm_test_traj_{}.png'.format(epoch))
test_masked_traj_lstm(env_id, m, save_dir=logger.get_snapshot_dir(),
save_name='online_masked_test_{}.png'.format(epoch))
def subprocess_train_vae_loop(
conn_pipe,
vae,
vae_params,
replay_buffer,
mp_info,
device,
):
"""
The observations and next_observations of the replay buffer are stored in
shared memory. This loop waits until the parent signals to start vae
training, trains and sends the vae back, and then refreshes the latents.
Refreshing latents in the subprocess reflects in the main process as well
since the latents are in shared memory. Since this is does asynchronously,
it is possible for the main process to see half the latents updated and half
not.
"""
ptu.device = device
vae_trainer = conn_pipe.recv()
vae.load_state_dict(vae_params)
vae.to(device)
vae_trainer.set_vae(vae)
replay_buffer.init_from_mp_info(mp_info)
replay_buffer.env.vae = vae
while True:
amount_to_train, epoch = conn_pipe.recv()
_train_vae(vae_trainer, replay_buffer, epoch, amount_to_train)
conn_pipe.send(vae_trainer.model.__getstate__())
replay_buffer.refresh_latents(epoch)
| StarcoderdataPython |
3391041 | <reponame>czchen/debian-pgcli
from __future__ import print_function
import sys
import logging
from collections import namedtuple
from .tabulate import tabulate
TableInfo = namedtuple("TableInfo", ['checks', 'relkind', 'hasindex',
'hasrules', 'hastriggers', 'hasoids', 'tablespace', 'reloptions', 'reloftype',
'relpersistence'])
log = logging.getLogger(__name__)
use_expanded_output = False
def is_expanded_output():
return use_expanded_output
TIMING_ENABLED = False
def parse_special_command(sql):
command, _, arg = sql.partition(' ')
verbose = '+' in command
command = command.strip().replace('+', '')
return (command, verbose, arg.strip())
def list_schemas(cur, pattern, verbose):
"""
Returns (rows, headers, status)
"""
sql = '''SELECT n.nspname AS "Name",
pg_catalog.pg_get_userbyid(n.nspowner) AS "Owner"''' + (''',
pg_catalog.array_to_string(n.nspacl, E'\\n') AS "Access privileges",
pg_catalog.obj_description(n.oid, 'pg_namespace') AS "Description"''' if verbose else '') + """
FROM pg_catalog.pg_namespace n WHERE n.nspname """
params = []
if pattern:
_, schema = sql_name_pattern(pattern)
sql += '~ %s'
params.append(schema)
else:
sql += "!~ '^pg_' AND n.nspname <> 'information_schema'"
sql = cur.mogrify(sql + " ORDER BY 1", params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
def list_objects(cur, pattern, verbose, relkinds):
"""
Returns (rows, header, status)
This method is used by list_tables, list_views, and list_indexes
relkinds is a list of strings to filter pg_class.relkind
"""
schema_pattern, table_pattern = sql_name_pattern(pattern)
if verbose:
verbose_columns = '''
,pg_catalog.pg_size_pretty(pg_catalog.pg_table_size(c.oid)) as "Size",
pg_catalog.obj_description(c.oid, 'pg_class') as "Description" '''
else:
verbose_columns = ''
sql = '''SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind
WHEN 'r' THEN 'table' WHEN 'v' THEN 'view'
WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index'
WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special'
WHEN 'f' THEN 'foreign table' END
as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
''' + verbose_columns + '''
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
WHERE c.relkind = ANY(%s) '''
params = [relkinds]
if schema_pattern:
sql += ' AND n.nspname ~ %s'
params.append(schema_pattern)
else:
sql += '''
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid) '''
if table_pattern:
sql += ' AND c.relname ~ %s'
params.append(table_pattern)
sql = cur.mogrify(sql + ' ORDER BY 1, 2', params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
def list_tables(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['r', ''])
def list_views(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['v', 's', ''])
def list_sequences(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['S', 's', ''])
def list_indexes(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['i', 's', ''])
def list_functions(cur, pattern, verbose):
if verbose:
verbose_columns = '''
,CASE
WHEN p.provolatile = 'i' THEN 'immutable'
WHEN p.provolatile = 's' THEN 'stable'
WHEN p.provolatile = 'v' THEN 'volatile'
END as "Volatility",
pg_catalog.pg_get_userbyid(p.proowner) as "Owner",
l.lanname as "Language",
p.prosrc as "Source code",
pg_catalog.obj_description(p.oid, 'pg_proc') as "Description" '''
verbose_table = ''' LEFT JOIN pg_catalog.pg_language l
ON l.oid = p.prolang'''
else:
verbose_columns = verbose_table = ''
sql = '''
SELECT n.nspname as "Schema",
p.proname as "Name",
pg_catalog.pg_get_function_result(p.oid)
as "Result data type",
pg_catalog.pg_get_function_arguments(p.oid)
as "Argument data types",
CASE
WHEN p.proisagg THEN 'agg'
WHEN p.proiswindow THEN 'window'
WHEN p.prorettype = 'pg_catalog.trigger'::pg_catalog.regtype
THEN 'trigger'
ELSE 'normal'
END as "Type" ''' + verbose_columns + '''
FROM pg_catalog.pg_proc p
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
''' + verbose_table + '''
WHERE '''
schema_pattern, func_pattern = sql_name_pattern(pattern)
params = []
if schema_pattern:
sql += ' n.nspname ~ %s '
params.append(schema_pattern)
else:
sql += ' pg_catalog.pg_function_is_visible(p.oid) '
if func_pattern:
sql += ' AND p.proname ~ %s '
params.append(func_pattern)
if not (schema_pattern or func_pattern):
sql += ''' AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema' '''
sql = cur.mogrify(sql + ' ORDER BY 1, 2, 4', params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
def describe_table_details(cur, pattern, verbose):
"""
Returns (rows, headers, status)
"""
# This is a simple \d command. No table name to follow.
if not pattern:
sql = """SELECT n.nspname as "Schema", c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table'
WHEN 'v' THEN 'view'
WHEN 'm' THEN 'materialized view'
WHEN 'i' THEN 'index'
WHEN 'S' THEN 'sequence'
WHEN 's' THEN 'special'
WHEN 'f' THEN 'foreign table'
END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','m','S','f','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY 1,2 """
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
# This is a \d <tablename> command. A royal pain in the ass.
schema, relname = sql_name_pattern(pattern)
where = []
params = []
if not pattern:
where.append('pg_catalog.pg_table_is_visible(c.oid)')
if schema:
where.append('n.nspname ~ %s')
params.append(schema)
if relname:
where.append('c.relname ~ %s')
params.append(relname)
sql = """SELECT c.oid, n.nspname, c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
""" + ('WHERE ' + ' AND '.join(where) if where else '') + """
ORDER BY 2,3"""
sql = cur.mogrify(sql, params)
# Execute the sql, get the results and call describe_one_table_details on each table.
log.debug(sql)
cur.execute(sql)
if not (cur.rowcount > 0):
return [(None, None, None, 'Did not find any relation named %s.' % pattern)]
results = []
for oid, nspname, relname in cur.fetchall():
results.append(describe_one_table_details(cur, nspname, relname, oid, verbose))
return results
def describe_one_table_details(cur, schema_name, relation_name, oid, verbose):
if verbose:
suffix = """pg_catalog.array_to_string(c.reloptions || array(select
'toast.' || x from pg_catalog.unnest(tc.reloptions) x), ', ')"""
else:
suffix = "''"
sql ="""SELECT c.relchecks, c.relkind, c.relhasindex,
c.relhasrules, c.relhastriggers, c.relhasoids,
%s,
c.reltablespace,
CASE WHEN c.reloftype = 0 THEN ''
ELSE c.reloftype::pg_catalog.regtype::pg_catalog.text
END,
c.relpersistence
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_class tc ON (c.reltoastrelid = tc.oid)
WHERE c.oid = '%s'""" % (suffix, oid)
# Create a namedtuple called tableinfo and match what's in describe.c
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
tableinfo = TableInfo._make(cur.fetchone())
else:
return (None, None, None, 'Did not find any relation with OID %s.' % oid)
# If it's a seq, fetch it's value and store it for later.
if tableinfo.relkind == 'S':
# Do stuff here.
sql = '''SELECT * FROM "%s"."%s"''' % (schema_name, relation_name)
log.debug(sql)
cur.execute(sql)
if not (cur.rowcount > 0):
return (None, None, None, 'Something went wrong.')
seq_values = cur.fetchone()
# Get column info
sql = """SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 128)
FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum =
a.attnum AND a.atthasdef), a.attnotnull, a.attnum, (SELECT c.collname
FROM pg_catalog.pg_collation c, pg_catalog.pg_type t WHERE c.oid =
a.attcollation AND t.oid = a.atttypid AND a.attcollation <>
t.typcollation) AS attcollation"""
if tableinfo.relkind == 'i':
sql += """, pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE)
AS indexdef"""
else:
sql += """, NULL AS indexdef"""
if tableinfo.relkind == 'f':
sql += """, CASE WHEN attfdwoptions IS NULL THEN '' ELSE '(' ||
array_to_string(ARRAY(SELECT quote_ident(option_name) || ' '
|| quote_literal(option_value) FROM
pg_options_to_table(attfdwoptions)), ', ') || ')' END AS
attfdwoptions"""
else:
sql += """, NULL AS attfdwoptions"""
if verbose:
sql += """, a.attstorage"""
sql += """, CASE WHEN a.attstattarget=-1 THEN NULL ELSE
a.attstattarget END AS attstattarget"""
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or tableinfo.relkind == 'f' or
tableinfo.relkind == 'c'):
sql += """, pg_catalog.col_description(a.attrelid,
a.attnum)"""
sql += """ FROM pg_catalog.pg_attribute a WHERE a.attrelid = '%s' AND
a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum; """ % oid
log.debug(sql)
cur.execute(sql)
res = cur.fetchall()
title = (tableinfo.relkind, schema_name, relation_name)
# Set the column names.
headers = ['Column', 'Type']
show_modifiers = False
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or tableinfo.relkind == 'f' or
tableinfo.relkind == 'c'):
headers.append('Modifiers')
show_modifiers = True
if (tableinfo.relkind == 'S'):
headers.append("Value")
if (tableinfo.relkind == 'i'):
headers.append("Definition")
if (tableinfo.relkind == 'f'):
headers.append("FDW Options")
if (verbose):
headers.append("Storage")
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
headers.append("Stats target")
# Column comments, if the relkind supports this feature. */
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or
tableinfo.relkind == 'c' or tableinfo.relkind == 'f'):
headers.append("Description")
view_def = ''
# /* Check if table is a view or materialized view */
if ((tableinfo.relkind == 'v' or tableinfo.relkind == 'm') and verbose):
sql = """SELECT pg_catalog.pg_get_viewdef('%s'::pg_catalog.oid, true)""" % oid
log.debug(sql)
cur.execute(sql)
if cur.rowcount > 0:
view_def = cur.fetchone()
# Prepare the cells of the table to print.
cells = []
for i, row in enumerate(res):
cell = []
cell.append(row[0]) # Column
cell.append(row[1]) # Type
if show_modifiers:
modifier = ''
if row[5]:
modifier += ' collate %s' % row[5]
if row[3]:
modifier += ' not null'
if row[2]:
modifier += ' default %s' % row[2]
cell.append(modifier)
# Sequence
if tableinfo.relkind == 'S':
cell.append(seq_values[i])
# Index column
if TableInfo.relkind == 'i':
cell.append(row[6])
# /* FDW options for foreign table column, only for 9.2 or later */
if tableinfo.relkind == 'f':
cell.append(row[7])
if verbose:
storage = row[8]
if storage[0] == 'p':
cell.append('plain')
elif storage[0] == 'm':
cell.append('main')
elif storage[0] == 'x':
cell.append('extended')
elif storage[0] == 'e':
cell.append('external')
else:
cell.append('???')
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
cell.append(row[9])
# /* Column comments, if the relkind supports this feature. */
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or
tableinfo.relkind == 'c' or tableinfo.relkind == 'f'):
cell.append(row[10])
cells.append(cell)
# Make Footers
status = []
if (tableinfo.relkind == 'i'):
# /* Footer information about an index */
sql = """SELECT i.indisunique, i.indisprimary, i.indisclustered,
i.indisvalid, (NOT i.indimmediate) AND EXISTS (SELECT 1 FROM
pg_catalog.pg_constraint WHERE conrelid = i.indrelid AND conindid =
i.indexrelid AND contype IN ('p','u','x') AND condeferrable) AS
condeferrable, (NOT i.indimmediate) AND EXISTS (SELECT 1 FROM
pg_catalog.pg_constraint WHERE conrelid = i.indrelid AND conindid =
i.indexrelid AND contype IN ('p','u','x') AND condeferred) AS
condeferred, a.amname, c2.relname, pg_catalog.pg_get_expr(i.indpred,
i.indrelid, true) FROM pg_catalog.pg_index i, pg_catalog.pg_class c,
pg_catalog.pg_class c2, pg_catalog.pg_am a WHERE i.indexrelid = c.oid
AND c.oid = '%s' AND c.relam = a.oid AND i.indrelid = c2.oid;""" % oid
log.debug(sql)
cur.execute(sql)
(indisunique, indisprimary, indisclustered, indisvalid,
deferrable, deferred, indamname, indtable, indpred) = cur.fetchone()
if indisprimary:
status.append("primary key, ")
elif indisunique:
status.append("unique, ")
status.append("%s, " % indamname)
#/* we assume here that index and table are in same schema */
status.append('for table "%s.%s"' % (schema_name, indtable))
if indpred:
status.append(", predicate (%s)" % indpred)
if indisclustered:
status.append(", clustered")
if indisvalid:
status.append(", invalid")
if deferrable:
status.append(", deferrable")
if deferred:
status.append(", initially deferred")
status.append('\n')
#add_tablespace_footer(&cont, tableinfo.relkind,
#tableinfo.tablespace, true);
elif tableinfo.relkind == 'S':
# /* Footer information about a sequence */
# /* Get the column that owns this sequence */
sql = ("SELECT pg_catalog.quote_ident(nspname) || '.' ||"
"\n pg_catalog.quote_ident(relname) || '.' ||"
"\n pg_catalog.quote_ident(attname)"
"\nFROM pg_catalog.pg_class c"
"\nINNER JOIN pg_catalog.pg_depend d ON c.oid=d.refobjid"
"\nINNER JOIN pg_catalog.pg_namespace n ON n.oid=c.relnamespace"
"\nINNER JOIN pg_catalog.pg_attribute a ON ("
"\n a.attrelid=c.oid AND"
"\n a.attnum=d.refobjsubid)"
"\nWHERE d.classid='pg_catalog.pg_class'::pg_catalog.regclass"
"\n AND d.refclassid='pg_catalog.pg_class'::pg_catalog.regclass"
"\n AND d.objid=%s \n AND d.deptype='a'" % oid)
log.debug(sql)
cur.execute(sql)
result = cur.fetchone()
status.append("Owned by: %s" % result[0])
#/*
#* If we get no rows back, don't show anything (obviously). We should
#* never get more than one row back, but if we do, just ignore it and
#* don't print anything.
#*/
elif (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
#/* Footer information about a table */
if (tableinfo.hasindex):
sql = "SELECT c2.relname, i.indisprimary, i.indisunique, i.indisclustered, "
sql += "i.indisvalid, "
sql += "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true),\n "
sql += ("pg_catalog.pg_get_constraintdef(con.oid, true), "
"contype, condeferrable, condeferred")
sql += ", c2.reltablespace"
sql += ("\nFROM pg_catalog.pg_class c, pg_catalog.pg_class c2, "
"pg_catalog.pg_index i\n")
sql += " LEFT JOIN pg_catalog.pg_constraint con ON (conrelid = i.indrelid AND conindid = i.indexrelid AND contype IN ('p','u','x'))\n"
sql += ("WHERE c.oid = '%s' AND c.oid = i.indrelid AND i.indexrelid = c2.oid\n"
"ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname;") % oid
log.debug(sql)
result = cur.execute(sql)
if (cur.rowcount > 0):
status.append("Indexes:\n")
for row in cur:
#/* untranslated index name */
status.append(' "%s"' % row[0])
#/* If exclusion constraint, print the constraintdef */
if row[7] == "x":
status.append(row[6])
else:
#/* Label as primary key or unique (but not both) */
if row[1]:
status.append(" PRIMARY KEY,")
elif row[2]:
if row[7] == "u":
status.append(" UNIQUE CONSTRAINT,")
else:
status.append(" UNIQUE,")
# /* Everything after "USING" is echoed verbatim */
indexdef = row[5]
usingpos = indexdef.find(" USING ")
if (usingpos >= 0):
indexdef = indexdef[(usingpos + 7):]
status.append(" %s" % indexdef)
# /* Need these for deferrable PK/UNIQUE indexes */
if row[8]:
status.append(" DEFERRABLE")
if row[9]:
status.append(" INITIALLY DEFERRED")
# /* Add these for all cases */
if row[3]:
status.append(" CLUSTER")
if not row[4]:
status.append(" INVALID")
status.append('\n')
# printTableAddFooter(&cont, buf.data);
# /* Print tablespace of the index on the same line */
# add_tablespace_footer(&cont, 'i',
# atooid(PQgetvalue(result, i, 10)),
# false);
# /* print table (and column) check constraints */
if (tableinfo.checks):
sql = ("SELECT r.conname, "
"pg_catalog.pg_get_constraintdef(r.oid, true)\n"
"FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'c'\n"
"ORDER BY 1;" % oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Check constraints:\n")
for row in cur:
#/* untranslated contraint name and def */
status.append(" \"%s\" %s" % row)
status.append('\n')
#/* print foreign-key constraints (there are none if no triggers) */
if (tableinfo.hastriggers):
sql = ("SELECT conname,\n"
" pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1;" %
oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Foreign-key constraints:\n")
for row in cur:
#/* untranslated constraint name and def */
status.append(" \"%s\" %s\n" % row)
#/* print incoming foreign-key references (none if no triggers) */
if (tableinfo.hastriggers):
sql = ("SELECT conname, conrelid::pg_catalog.regclass,\n"
" pg_catalog.pg_get_constraintdef(c.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint c\n"
"WHERE c.confrelid = '%s' AND c.contype = 'f' ORDER BY 1;" %
oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Referenced by:\n")
for row in cur:
status.append(" TABLE \"%s\" CONSTRAINT \"%s\" %s\n" % row)
# /* print rules */
if (tableinfo.hasrules and tableinfo.relkind != 'm'):
sql = ("SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true)), "
"ev_enabled\n"
"FROM pg_catalog.pg_rewrite r\n"
"WHERE r.ev_class = '%s' ORDER BY 1;" %
oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
for category in range(4):
have_heading = False
for row in cur:
if category == 0 and row[2] == 'O':
list_rule = True
elif category == 1 and row[2] == 'D':
list_rule = True
elif category == 2 and row[2] == 'A':
list_rule = True
elif category == 3 and row[2] == 'R':
list_rule = True
if not list_rule:
continue
if not have_heading:
if category == 0:
status.append("Rules:")
if category == 1:
status.append("Disabled rules:")
if category == 2:
status.append("Rules firing always:")
if category == 3:
status.append("Rules firing on replica only:")
have_heading = True
# /* Everything after "CREATE RULE" is echoed verbatim */
ruledef = row[1]
ruledef += 12
status.append(" %s", ruledef)
if (view_def):
#/* Footer information about a view */
status.append("View definition:\n")
status.append("%s \n" % view_def)
#/* print rules */
if tableinfo.hasrules:
sql = ("SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true))\n"
"FROM pg_catalog.pg_rewrite r\n"
"WHERE r.ev_class = '%s' AND r.rulename != '_RETURN' ORDER BY 1;" % oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Rules:\n")
for row in cur:
#/* Everything after "CREATE RULE" is echoed verbatim */
ruledef = row[1]
ruledef += 12;
status.append(" %s\n", ruledef)
#/*
# * Print triggers next, if any (but only user-defined triggers). This
# * could apply to either a table or a view.
# */
if tableinfo.hastriggers:
sql = ( "SELECT t.tgname, "
"pg_catalog.pg_get_triggerdef(t.oid, true), "
"t.tgenabled\n"
"FROM pg_catalog.pg_trigger t\n"
"WHERE t.tgrelid = '%s' AND " % oid);
sql += "NOT t.tgisinternal"
sql += "\nORDER BY 1;"
log.debug(sql)
cur.execute(sql)
if cur.rowcount > 0:
#/*
#* split the output into 4 different categories. Enabled triggers,
#* disabled triggers and the two special ALWAYS and REPLICA
#* configurations.
#*/
for category in range(4):
have_heading = False;
list_trigger = False;
for row in cur:
#/*
# * Check if this trigger falls into the current category
# */
tgenabled = row[2]
if category ==0:
if (tgenabled == 'O' or tgenabled == True):
list_trigger = True
elif category ==1:
if (tgenabled == 'D' or tgenabled == False):
list_trigger = True
elif category ==2:
if (tgenabled == 'A'):
list_trigger = True
elif category ==3:
if (tgenabled == 'R'):
list_trigger = True
if list_trigger == False:
continue;
# /* Print the category heading once */
if not have_heading:
if category == 0:
status.append("Triggers:")
elif category == 1:
status.append("Disabled triggers:")
elif category == 2:
status.append("Triggers firing always:")
elif category == 3:
status.append("Triggers firing on replica only:")
status.append('\n')
have_heading = True
#/* Everything after "TRIGGER" is echoed verbatim */
tgdef = row[1]
triggerpos = tgdef.find(" TRIGGER ")
if triggerpos >= 0:
tgdef = triggerpos + 9;
status.append(" %s\n" % tgdef);
#/*
#* Finish printing the footer information about a table.
#*/
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
# /* print foreign server name */
if tableinfo.relkind == 'f':
#/* Footer information about foreign table */
sql = ("SELECT s.srvname,\n"
" array_to_string(ARRAY(SELECT "
" quote_ident(option_name) || ' ' || "
" quote_literal(option_value) FROM "
" pg_options_to_table(ftoptions)), ', ') "
"FROM pg_catalog.pg_foreign_table f,\n"
" pg_catalog.pg_foreign_server s\n"
"WHERE f.ftrelid = %s AND s.oid = f.ftserver;" % oid)
log.debug(sql)
cur.execute(sql)
row = cur.fetchone()
# /* Print server name */
status.append("Server: %s\n" % row[0])
# /* Print per-table FDW options, if any */
if (row[1]):
status.append("FDW Options: (%s)\n" % ftoptions)
#/* print inherited tables */
sql = ("SELECT c.oid::pg_catalog.regclass FROM pg_catalog.pg_class c, "
"pg_catalog.pg_inherits i WHERE c.oid=i.inhparent AND "
"i.inhrelid = '%s' ORDER BY inhseqno;" % oid)
log.debug(sql)
cur.execute(sql)
spacer = ''
if cur.rowcount > 0:
status.append("Inherits")
for row in cur:
status.append("%s: %s,\n" % (spacer, row))
spacer = ' ' * len('Inherits')
#/* print child tables */
sql = ("SELECT c.oid::pg_catalog.regclass FROM pg_catalog.pg_class c,"
" pg_catalog.pg_inherits i WHERE c.oid=i.inhrelid AND"
" i.inhparent = '%s' ORDER BY"
" c.oid::pg_catalog.regclass::pg_catalog.text;" % oid)
log.debug(sql)
cur.execute(sql)
if not verbose:
#/* print the number of child tables, if any */
if (cur.rowcount > 0):
status.append("Number of child tables: %d (Use \d+ to list"
"them.)\n" % cur.rowcount)
else:
spacer = ''
if (cur.rowcount >0):
status.append('Child tables')
#/* display the list of child tables */
for row in cur:
status.append("%s: %s,\n" % (spacer, row))
spacer = ' ' * len('Child tables')
#/* Table type */
if (tableinfo.reloftype):
status.append("Typed table of type: %s\n" % tableinfo.reloftype)
#/* OIDs, if verbose and not a materialized view */
if (verbose and tableinfo.relkind != 'm'):
status.append("Has OIDs: %s\n" %
("yes" if tableinfo.hasoids else "no"))
#/* Tablespace info */
#add_tablespace_footer(&cont, tableinfo.relkind, tableinfo.tablespace,
#true);
# /* reloptions, if verbose */
if (verbose and tableinfo.reloptions):
status.append("Options: %s\n" % tableinfo.reloptions)
return (None, cells, headers, "".join(status))
def sql_name_pattern(pattern):
"""
Takes a wildcard-pattern and converts to an appropriate SQL pattern to be
used in a WHERE clause.
Returns: schema_pattern, table_pattern
>>> sql_name_pattern('foo*."b""$ar*"')
('^(foo.*)$', '^(b"\\\\$ar\\\\*)$')
"""
inquotes = False
relname = ''
schema = None
pattern_len = len(pattern)
i = 0
while i < pattern_len:
c = pattern[i]
if c == '"':
if inquotes and i + 1 < pattern_len and pattern[i + 1] == '"':
relname += '"'
i += 1
else:
inquotes = not inquotes
elif not inquotes and c.isupper():
relname += c.lower()
elif not inquotes and c == '*':
relname += '.*'
elif not inquotes and c == '?':
relname += '.'
elif not inquotes and c == '.':
# Found schema/name separator, move current pattern to schema
schema = relname
relname = ''
else:
# Dollar is always quoted, whether inside quotes or not.
if c == '$' or inquotes and c in '|*+?()[]{}.^\\':
relname += '\\'
relname += c
i += 1
if relname:
relname = '^(' + relname + ')$'
if schema:
schema = '^(' + schema + ')$'
return schema, relname
def show_help(cur, arg, verbose): # All the parameters are ignored.
headers = ['Command', 'Description']
result = []
for command, value in sorted(CASE_SENSITIVE_COMMANDS.items()):
if value[1]:
result.append(value[1])
return [(None, result, headers, None)]
def change_db(cur, arg, verbose):
raise NotImplementedError
def expanded_output(cur, arg, verbose):
global use_expanded_output
use_expanded_output = not use_expanded_output
message = u"Expanded display is "
message += u"on." if use_expanded_output else u"off."
return [(None, None, None, message)]
def toggle_timing(cur, arg, verbose):
global TIMING_ENABLED
TIMING_ENABLED = not TIMING_ENABLED
message = "Timing is "
message += "on." if TIMING_ENABLED else "off."
return [(None, None, None, message)]
CASE_SENSITIVE_COMMANDS = {
'\?': (show_help, ['\?', 'Help on pgcli commands.']),
'\c': (change_db, ['\c database_name', 'Connect to a new database.']),
'\l': ('''SELECT datname FROM pg_database;''', ['\l', 'list databases.']),
'\d': (describe_table_details, ['\d [pattern]', 'list or describe tables, views and sequences.']),
'\dn': (list_schemas, ['\dn[+] [pattern]', 'list schemas']),
'\\x': (expanded_output, ['\\x', 'Toggle expanded output.']),
'\\timing': (toggle_timing, ['\\timing', 'Toggle timing of commands.']),
'\\dt': (list_tables, ['\\dt[+] [pattern]', 'list tables.']),
'\\di': (list_indexes, ['\\di[+] [pattern]', 'list indexes.']),
'\\dv': (list_views, ['\\dv[+] [pattern]', 'list views.']),
'\\ds': (list_sequences, ['\\ds[+] [pattern]', 'list sequences.']),
'\\df': (list_functions, ['\\df[+] [pattern]', 'list functions.'])
}
NON_CASE_SENSITIVE_COMMANDS = {
'describe': (describe_table_details, ['DESCRIBE [pattern]', '']),
}
def execute(cur, sql):
"""Execute a special command and return the results. If the special command
is not supported a KeyError will be raised.
"""
command, verbose, arg = parse_special_command(sql)
# Look up the command in the case-sensitive dict, if it's not there look in
# non-case-sensitive dict. If not there either, throw a KeyError exception.
global CASE_SENSITIVE_COMMANDS
global NON_CASE_SENSITIVE_COMMANDS
try:
command_executor = CASE_SENSITIVE_COMMANDS[command][0]
except KeyError:
command_executor = NON_CASE_SENSITIVE_COMMANDS[command.lower()][0]
# If the command executor is a function, then call the function with the
# args. If it's a string, then assume it's an SQL command and run it.
if callable(command_executor):
return command_executor(cur, arg, verbose)
elif isinstance(command_executor, str):
cur.execute(command_executor)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
else:
return [(None, None, None, cur.statusmessage)]
if __name__ == '__main__':
import psycopg2
con = psycopg2.connect(database='misago_testforum')
cur = con.cursor()
table = sys.argv[1]
for rows, headers, status in describe_table_details(cur, table, False):
print(tabulate(rows, headers, tablefmt='psql'))
print(status)
| StarcoderdataPython |
4813898 | import glob
import logging
import urllib.request
from discord.ext import commands
from discord import Embed, Color
from config import config
from log import DiscordHandler
description = """
Solving your image needs
"""
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
log.info(urllib.request.urlopen("http://163.172.163.196"))
class ImageMan(commands.Bot):
def __init__(self):
super().__init__(command_prefix="im ", description=description,
pm_help=None)
async def on_ready(self):
log.info(f"Logged in as {self.user.name}")
await self.load_cogs()
async def log_error(self, exception, title="Error"):
error_embed = Embed(title=title, color=Color.red())
error_embed.description = exception
error_channel = self.get_channel(config.channels.errors)
log.error(exception)
await error_channel.send(embed=error_embed)
async def load_cogs(self):
files = glob.glob("cogs/*.py")
module_names = [name.replace("/", ".")[:-3] for name in files]
for module in module_names:
try:
self.load_extension(module)
log.info(f"[+] Loaded {module}")
except Exception as e:
await self.log_error(f"{e.name}: {e.args[0]}",
title="Could not load cog")
log.error(f"[-] Could not load {module}")
async def on_command_error(self, ctx, error):
# Try provide some user feedback instead of logging all errors.
if isinstance(error, commands.CommandNotFound):
return # No need to unknown commands anywhere or return feedback
if isinstance(error, commands.MissingRequiredArgument):
# Missing arguments are likely human error so do not need logging
parameter_name = error.param.name
return await ctx.send(f"\N{NO ENTRY SIGN} Required argument "
f"{parameter_name} was missing")
elif isinstance(error, commands.CheckFailure):
return await ctx.send("\N{NO ENTRY SIGN} You do not have "
"permission to use that command")
elif isinstance(error, commands.CommandOnCooldown):
retry_after = round(error.retry_after)
return await ctx.send(f"\N{HOURGLASS} Command is on cooldown, try"
f"again after {retry_after} seconds")
# All errors below this need reporting and so do not return
if isinstance(error, commands.ArgumentParsingError):
# Provide feedback & report error
await ctx.send("\N{NO ENTRY SIGN} An issue occurred while"
"attempting to parse an argument")
elif isinstance(error, commands.BadArgument):
await ctx.send("\N{NO ENTRY SIGN} Conversion of an argument"
"failed")
else:
await ctx.send("\N{NO ENTRY SIGN} An error occured during "
"execution, the error has been reported.")
extra_context = {
"discord_info": {
"Channel": ctx.channel.mention,
"User": ctx.author.mention,
"Command": ctx.message.content
}
}
if ctx.guild is not None:
# We are NOT in a DM
extra_context["discord_info"]["Message"] = (
f'[{ctx.message.id}](https://discordapp.com/channels/'
f'{ctx.guild.id}/{ctx.channel.id}/{ctx.message.id})'
)
else:
extra_context["discord_info"]["Message"] = f"{ctx.message.id} (DM)"
log.exception(error, extra=extra_context)
if __name__ == "__main__":
bot = ImageMan()
log.addHandler(DiscordHandler(bot))
bot.run(config.token)
| StarcoderdataPython |
4832404 | ################################################################################
# Copyright 2016-2021 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell cop-
# ies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IM-
# PLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNE-
# CTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
################################################################################
from .Common import printExit, printWarning, versionIsCompatible
from .SolutionStructs import Solution, ProblemSizes, ProblemType
from . import __version__
from . import Common
from . import SolutionLibrary
try:
import yaml
except ImportError:
printExit("You must install PyYAML to use Tensile (to parse config files). See http://pyyaml.org/wiki/PyYAML for installation instructions.")
try:
import msgpack
except ImportError:
print("Message pack python library not detected. Must use YAML backend instead.")
###################
# Writing functions
###################
def write(filename_noExt, data, format="yaml"):
"""Writes data to file with specified format; extension is appended based on format."""
if format == "yaml":
writeYAML(filename_noExt + ".yaml", data)
elif format == "msgpack":
writeMsgPack(filename_noExt + ".dat", data)
else:
printExit("Unrecognized format {}".format(format))
def writeYAML(filename, data, **kwargs):
"""Writes data to file in YAML format."""
# set default kwags for yaml dump
if "explicit_start" not in kwargs:
kwargs["explicit_start"] = True
if "explicit_end" not in kwargs:
kwargs["explicit_end"] = True
if "default_flow_style" not in kwargs:
kwargs["default_flow_style"] = None
with open(filename, "w") as f:
yaml.dump(data, f, **kwargs)
def writeMsgPack(filename, data):
"""Writes data to file in Message Pack format."""
with open(filename, "wb") as f:
msgpack.pack(data, f)
def writeSolutions(filename, problemSizes, solutions):
"""Writes solution YAML file."""
# convert objects to nested dictionaries
solutionStates = []
for hardcoded in solutions:
for solution in hardcoded:
solutionState = solution.getAttributes()
solutionState["ProblemType"] = solutionState["ProblemType"].state
solutionState["ProblemType"]["DataType"] = \
solutionState["ProblemType"]["DataType"].value
solutionState["ProblemType"]["DestDataType"] = \
solutionState["ProblemType"]["DestDataType"].value
solutionState["ProblemType"]["ComputeDataType"] = \
solutionState["ProblemType"]["ComputeDataType"].value
solutionStates.append(solutionState)
# write dictionaries
with open(filename, "w") as f:
f.write("- MinimumRequiredVersion: %s\n" % __version__ )
f.write("- ProblemSizes:\n")
if problemSizes:
for sizeRange in problemSizes.ranges:
f.write(" - Range: %s\n" % sizeRange)
for problemExact in problemSizes.exacts:
#FIXME-problem, this ignores strides:
f.write(" - Exact: %s\n" % str(problemExact))
yaml.dump(solutionStates, f, default_flow_style=None)
###############################
# Reading and parsing functions
###############################
def readYAML(filename):
"""Reads and returns YAML data from file."""
with open(filename, "r") as f:
data = yaml.load(f, yaml.SafeLoader)
return data
def parseSolutionsFile(filename):
"""Wrapper function to read and parse a solutions file."""
return parseSolutionsData(readYAML(filename), filename)
def parseSolutionsData(data, srcFile="?"):
"""Parses problem sizes and solutions from the data of a solutions file."""
if len(data) < 3:
printExit("Solution file {} is missing required fields (len = {} < 3".format(srcFile, len(data)))
versionString = data[0]["MinimumRequiredVersion"]
if not versionIsCompatible(versionString):
printWarning("Version = {} in solution file {} does not match Tensile version = {}" \
.format(srcFile, versionString, __version__) )
if "ProblemSizes" not in data[1]:
printExit("Solution file {} doesn't begin with ProblemSizes".format(srcFile))
problemSizesConfig = data[1]["ProblemSizes"]
solutions = []
for i in range(2, len(data)):
solutionState = data[i]
# force redo the deriving of parameters, make sure old version logic yamls can be validated
solutionState["AssignedProblemIndependentDerivedParameters"] = False
solutionState["AssignedDerivedParameters"] = False
solutionObject = Solution(solutionState)
solutions.append(solutionObject)
problemType = solutions[0]["ProblemType"]
problemSizes = ProblemSizes(problemType, problemSizesConfig)
return (problemSizes, solutions)
def parseLibraryLogicFile(filename):
"""Wrapper function to read and parse a library logic file."""
return parseLibraryLogicData(readYAML(filename), filename)
def parseLibraryLogicData(data, srcFile="?"):
"""Parses the data of a library logic file."""
if len(data) < 9:
printExit("Library logic file {} is missing required fields (len = {} < 9)".format(srcFile, len(data)))
versionString = data[0]["MinimumRequiredVersion"]
scheduleName = data[1]
architectureName = data[2] if isinstance(data[2], str) else data[2]["Architecture"]
deviceNames = data[3]
problemTypeState = data[4]
solutionStates = data[5]
indexOrder = data[6]
exactLogic = data[7]
rangeLogic = data[8]
if not versionIsCompatible(versionString):
printWarning("Version = {} in library logic file {} does not match Tensile version = {}" \
.format(srcFile, versionString, __version__) )
# unpack problemType
problemType = ProblemType(problemTypeState)
# unpack solutions
solutions = []
for i in range(0, len(solutionStates)):
solutionState = solutionStates[i]
if solutionState["KernelLanguage"] == "Assembly":
solutionState["ISA"] = Common.gfxArch(architectureName)
else:
solutionState["ISA"] = (0, 0, 0)
# force redo the deriving of parameters, make sure old version logic yamls can be validated
solutionState["AssignedProblemIndependentDerivedParameters"] = False
solutionState["AssignedDerivedParameters"] = False
solutionObject = Solution(solutionState)
if solutionObject["ProblemType"] != problemType:
printExit("ProblemType in library logic file {} doesn't match solution: {} != {}" \
.format(srcFile, problemType, solutionObject["ProblemType"]))
solutions.append(solutionObject)
newLibrary = SolutionLibrary.MasterSolutionLibrary.FromOriginalState(data, solutions)
return (scheduleName, deviceNames, problemType, solutions, indexOrder, \
exactLogic, rangeLogic, newLibrary, architectureName)
def rawLibraryLogic(data):
"""Returns a tuple of the data in a library logic file."""
versionString = data[0]
scheduleName = data[1]
architectureName = data[2]
deviceNames = data[3]
problemTypeState = data[4]
solutionStates = data[5]
indexOrder = data[6]
exactLogic = data[7]
rangeLogic = data[8]
otherFields = []
dataLength = len(data)
if dataLength > 9:
for idx in range(9, dataLength):
otherFields.append(data[idx])
return (versionString, scheduleName, architectureName, deviceNames,\
problemTypeState, solutionStates, indexOrder, exactLogic, rangeLogic, otherFields)
#################
# Other functions
#################
def createLibraryLogic(schedulePrefix, architectureName, deviceNames, logicTuple):
"""Creates the data for a library logic file suitable for writing to YAML."""
problemType = logicTuple[0]
solutions = logicTuple[1]
indexOrder = logicTuple[2]
exactLogic = logicTuple[3]
rangeLogic = logicTuple[4]
tileSelection = False
if len(logicTuple) > 5 and logicTuple[5]:
tileSelection = True
data = []
# Tensile version
data.append({"MinimumRequiredVersion":__version__})
# schedule name
data.append(schedulePrefix) # change from Tensile to vega10
data.append(architectureName)
# schedule device names
data.append(deviceNames)
# problem type
problemTypeState = problemType.state
problemTypeState["DataType"] = \
problemTypeState["DataType"].value
problemTypeState["DestDataType"] = \
problemTypeState["DestDataType"].value
problemTypeState["ComputeDataType"] = \
problemTypeState["ComputeDataType"].value
data.append(problemTypeState)
# solutions
solutionList = []
for solution in solutions:
solutionState = solution.getAttributes()
solutionState["ProblemType"] = solutionState["ProblemType"].state
solutionState["ProblemType"]["DataType"] = \
solutionState["ProblemType"]["DataType"].value
solutionState["ProblemType"]["DestDataType"] = \
solutionState["ProblemType"]["DestDataType"].value
solutionState["ProblemType"]["ComputeDataType"] = \
solutionState["ProblemType"]["ComputeDataType"].value
solutionList.append(solutionState)
if tileSelection:
tileSolutions = logicTuple[5]
for solution in tileSolutions:
solutionState = solution.getAttributes()
solutionState["ProblemType"] = solutionState["ProblemType"].state
solutionState["ProblemType"]["DataType"] = \
solutionState["ProblemType"]["DataType"].value
solutionState["ProblemType"]["DestDataType"] = \
solutionState["ProblemType"]["DestDataType"].value
solutionState["ProblemType"]["ComputeDataType"] = \
solutionState["ProblemType"]["ComputeDataType"].value
solutionList.append(solutionState)
data.append(solutionList)
# index order
data.append(indexOrder)
# exactLogic
exactLogicList = []
for key in exactLogic:
exactLogicList.append([list(key), exactLogic[key]])
data.append(exactLogicList)
# rangeLogic
data.append(rangeLogic)
if tileSelection:
tileSelectionLogic = {}
tileSelectionIndices = logicTuple[6]
tileSelectionLogic["TileSelectionIndices"] = tileSelectionIndices
data.append(tileSelectionLogic)
else:
data.append(None)
data.append(logicTuple[7])
return data
| StarcoderdataPython |
3367621 | <filename>testing/test_all.py
#!/usr/bin/env python
#
# Launch all testing scripts.
#
# Author: <NAME>, <NAME>
# Last Modif: 2014-06-11
import os
import getopt
import sys
from numpy import loadtxt
import commands
# get path of the toolbox
status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
import sct_utils as sct
# define nice colors
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
# Print without new carriage return
# ==========================================================================================
def print_line(string):
import sys
sys.stdout.write(string)
sys.stdout.flush()
def print_ok():
print "[" + bcolors.OKGREEN + "OK" + bcolors.ENDC + "]"
def print_warning():
print "[" + bcolors.WARNING + "WARNING" + bcolors.ENDC + "]"
def print_fail():
print "[" + bcolors.FAIL + "FAIL" + bcolors.ENDC + "]"
def write_to_log_file(fname_log,string):
f = open(fname_log, 'w')
f.write(string+'\n')
f.close()
def test_function(folder_test,dot_lines):
fname_log = folder_test + ".log"
print_line('Checking '+folder_test+dot_lines)
os.chdir(folder_test)
status, output = commands.getstatusoutput('./test_'+folder_test+'.sh')
if status == 0:
print_ok()
else:
print_fail()
os.chdir('../')
write_to_log_file(fname_log,output)
return status
# START MAIN
# ==========================================================================================
status = []
status.append( test_function('sct_segmentation_propagation',' .............. ') )
status.append( test_function('sct_register_to_template',' .................. ') )
status.append( test_function('sct_register_multimodal',' ................... ') )
status.append( test_function('sct_warp_atlas2metric',' ..................... ') )
status.append( test_function('sct_estimate_MAP_tracts',' ................... ') )
status.append( test_function('sct_dmri_moco',' ............................. ') )
print str(status)
print "done!\n"
| StarcoderdataPython |
3374490 | <reponame>andreportela/qas_intrusion_detection<filename>qas_experimental_evaluation_project/ransomware.py<gh_stars>0
from os import listdir, remove
from os.path import isfile, join
from random import seed, randint
import time
seed(1)
folder = "medical_records/"
first_file_index = 0
seconds_to_sleep = 1
def start():
global seconds_to_sleep
while True:
time.sleep(seconds_to_sleep)
all_files = [file_name for file_name in listdir(folder) if isfile(join(folder, file_name))]
number_of_files = len(all_files)
if number_of_files == 0:
seconds_to_sleep += 0.7
print(f"folder is empty, trying to avoid that by increasing sleeping time to {seconds_to_sleep}")
continue
last_file_index = number_of_files - 1
random_index = randint(first_file_index, last_file_index)
file_marked_to_deletion = all_files[random_index]
remove(join(folder, file_marked_to_deletion))
print(f"deleted {file_marked_to_deletion}")
| StarcoderdataPython |
1782047 | '''
Base driver class
'''
import pandas as pd
import requests
import json
from copy import deepcopy
import pyperclip
import math
import re
import inspect
import yaml
import itertools
from datetime import datetime
import warnings
import functools
from textwrap import dedent
from datapungi_fed import generalSettings #NOTE: projectName
#import generalSettings #NOTE: projectName
from datapungi_fed import utils #NOTE: projectName
#import utils #NOTE: projectName
class driverCore():
r'''
Given a dbGroupName and its default db, starts a factory of query functions - ie, a function for
each db in the group. If dbGroupName is empty, return the list of dbGroups, dbs in the group, and their parameters
'''
def __init__(self,dbGroupName='', baseRequest={},connectionParameters={},userSettings={}):
#TODO: place defaultQueryFactoryEntry in yaml
self._dbParams, self.defaultQueryFactoryEntry = self._getDBParameters(dbGroupName)
self._ETDB = extractTransformDB(baseRequest,connectionParameters,userSettings) #a generic query is started
self._ETFactory = extractTransformFactory(dbGroupName,self._ETDB,self._dbParams,self.defaultQueryFactoryEntry)
self._driverMeta = driverMetadata()(dbGroupName)
self.__setdoc__(dbGroupName)
def __getitem__(self,dbName):
return(self._ETFactory.extractTransformFactory[dbName])
def __call__(self,*args,**kwargs):
out = self._ETFactory.extractTransformFactory[self.defaultQueryFactoryEntry](*args,**kwargs)
return(out)
def __setdoc__(self,dbGroupName):
if dbGroupName == '':
self.__doc__ = 'Returns the metadata of the dataset groups and their databases. Do not need inputs.'
else:
self.__doc__ = 'Queries the databases of {} \n \n'.format(dbGroupName)
for entry in self.__docParams__:
self.__doc__ += '- {short name}: {description} \n'.format(**entry)
self.__doc__ += ' parameters: {}\n'.format(str(entry['parameters']))
self.__doc__ += ' official database name: {}\n'.format(entry['database'])
self.__doc__ += '\nDefault query database: {}\n'.format(self.defaultQueryFactoryEntry)
self.__doc__ += "Sample functions: \n-data.{dbGroupName}() (default) \n-data.{dbGroupName}['{db}']() (query the {db} database)".format(**{'dbGroupName':dbGroupName.lower(),'db':self.defaultQueryFactoryEntry})
self.__doc__ += "\n\nNOTE: don't need to pass most parameters. Eg, api_key and file_type (json)."
def __str__(self):
return(self.__doc__)
def _getDBParameters(self,dbGroupName = ''):
r'''
The parameters of each database in the group (if empty returns all groups x databases)
'''
dataPath = utils.getResourcePath('/config/datasetlist.yaml')
with open(dataPath, 'r') as yf:
datasetlist = yaml.safe_load(yf)
if dbGroupName == '':
defaultDB = {}
return((datasetlist,defaultDB))
#get the entry of the group:
selected = list(filter( lambda x: x['group'] == dbGroupName , datasetlist))[0]
defaultDB = selected.get('default query','')
datasets = selected.get('datasets',{})
removeCases = lambda array: list(filter( lambda x: x not in ['api_key','file_type'] , array ))
dbParams = { entry['short name'] : { 'urlSuffix' : entry['database'] , 'json key': entry['json key'], 'params': removeCases(entry['parameters']) } for entry in datasets }
self.__docParams__ = datasets #parameters used to write a doc string for the class instance.
return((dbParams,defaultDB))
class extractTransformFactory():
r'''
given a groupName of databases, constructs dictionary of functions querying all of its databases
'''
def __init__(self,dbGroupName,ETDB,dbParams,defaultQueryFactoryEntry):
if dbGroupName:
self.dbGroupName = dbGroupName
self.dbParams = dbParams
self.ETDB = ETDB
self.ETDB(self.dbGroupName,self.dbParams) #update the connector to the databases with parameters specific to the collection of dbs.
self.extractTransformFactory = { dbName : self.selectDBQuery(self.query, dbName) for dbName in self.dbParams.keys() }
self.defaultQueryFactoryEntry = defaultQueryFactoryEntry #the entry in query factory that __call__ will use.
else:
self.extractTransformFactory = {}
def query(self,*args,**kwargs):
return( self.ETDB.query(*args,**kwargs) )
def selectDBQuery(self,queryFun,dbName):
r'''
Fix a generic query to a query to dbName, creates a lambda that, from
args/kwargs creates a query of the dbName
'''
fun = functools.partial(queryFun,dbName)
lfun = lambda *args,**kwargs: fun(**self.getQueryArgs(dbName,*args,**kwargs))
#add quick user tips
lfun.options = self.dbParams[dbName]['params']
return(lfun)
def getQueryArgs(self,dbName,*args,**kwargs):
r'''
Map args and kwargs to driver args
'''
#paramaters to be passed to a requests query:
paramArray = self.dbParams[dbName]['params']
params = dict(zip(paramArray,args))
paramsAdd = {key:val for key, val in kwargs.items() if key in paramArray}
params.update(paramsAdd)
#non query options (eg, verbose)
otherArgs = {key:val for key, val in kwargs.items() if not key in paramArray}
return({**{'params':params},**otherArgs})
class extractTransformDB():
r'''
Functions to connect and query a db given its dbName and dbParams (see yaml in config for these).
'''
def __init__(self,baseRequest={},connectionParameters={},userSettings={}):
'''
loads generic parametes (ie api key, location fo data.)
'''
self._connectionInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings )
self._baseRequest = self.getBaseRequest(baseRequest,connectionParameters,userSettings)
self._lastLoad = {} #data stored here to assist functions such as clipcode
self._transformData = transformExtractedData()
self._getCode = transformIncludeCodeSnippet()
self._cleanCode = "" #TODO: improvable - this is the code snippet producing a pandas df
def __call__(self,dbGroup,dbParams):
r'''
A call to an instance of the class Loads specific parameters of the dbs of dbGroup
'''
self.dbGroup = dbGroup
self.dbParams = dbParams
def query(self,dbName,params={},file_type='json',verbose=False,warningsOn=True):
r'''
Args:
params
file_type
verbose
warningsOn
'''
# get requests' query inputs
warningsList = ['countPassLimit'] # warn on this events.
prefixUrl = self.dbParams[dbName]['urlSuffix']
output = self.queryApiCleanOutput(prefixUrl, dbName, params, warningsList, warningsOn, verbose)
return(output)
def queryApiCleanOutput(self,urlPrefix,dbName,params,warningsList,warningsOn,verbose):
r'''
Core steps of querying and cleaning data. Notice, specific data cleaning should be
implemented in the specific driver classes
Args:
self - should containg a base request (url)
urlPrefix (str) - a string to be appended to request url (eg, https:// ...// -> https//...//urlPrefix?)
params (dict) - usually empty, override any query params with the entries of this dictionary
warningsList (list) - the list of events that can lead to warnings
warningsOn (bool) - turn on/off driver warnings
verbose (bool) - detailed output or short output
'''
#get data
query = self.getBaseQuery(urlPrefix,params)
retrivedData = requests.get(** { key:entry for key, entry in query.items() if key in ['params','url'] } )
#clean data
df_output,self._cleanCode = self.cleanOutput(dbName,query,retrivedData)
#print warning if there is more data the limit to download
for entry in warningsList:
self._warnings(entry,retrivedData,warningsOn)
#short or detailed output, update _lastLoad attribute:
output = self.formatOutputupdateLoadedAttrib(query,df_output,retrivedData,verbose)
return(output)
def getBaseQuery(self,urlPrefix,params):
r'''
Return a dictionary of request arguments.
Args:
urlPrefix (str) - string appended to the end of the core url (eg, series -> http:...\series? )
dbName (str) - the name of the db being queried
params (dict) - a dictionary with request paramters used to override all other given parameters
Returns:
query (dict) - a dictionary with 'url' and 'params' (a string) to be passed to a request
'''
query = deepcopy(self._baseRequest)
#update query url
query['url'] = query['url']+urlPrefix
query['params'].update(params)
query['params_dict'] = query['params']
query['params'] = '&'.join([str(entry[0]) + "=" + str(entry[1]) for entry in query['params'].items()])
return(query)
def formatOutputupdateLoadedAttrib(self,query,df_output,retrivedData,verbose):
if verbose == False:
self._lastLoad = df_output
return(df_output)
else:
code = self._getCode.transformIncludeCodeSnippet(query,self._baseRequest,self._connectionInfo.userSettings,self._cleanCode)
output = dict(dataFrame = df_output, request = retrivedData, code = code)
self._lastLoad = output
return(output)
def cleanOutput(self,dbName,query,retrivedData):
r'''
This is a placeholder - specific drivers should have their own cleaning method
this generates self._cleanCode
'''
transformedOutput = self._transformData(self.dbGroup,dbName,self.dbParams,query,retrivedData)
return(transformedOutput)
def getBaseRequest(self,baseRequest={},connectionParameters={},userSettings={}):
r'''
Write a base request. This is the information that gets used in most requests such as getting the userKey
'''
if baseRequest =={}:
connectInfo = generalSettings.getGeneralSettings(connectionParameters = connectionParameters, userSettings = userSettings )
return(connectInfo.baseRequest)
else:
return(baseRequest)
def _warnings(self,warningName,inputs,warningsOn = True):
if not warningsOn:
return
if warningName == 'countPassLimit':
'''
warns if number of lines in database exceeds the number that can be downloaded.
inputs = a request result of a FED API
'''
_count = inputs.json().get('count',1)
_limit = inputs.json().get('limit',1000)
if _count > _limit:
warningText = 'NOTICE: dataset exceeds download limit! Check - count ({}) and limit ({})'.format(_count,_limit)
warnings.warn(warningText)
class transformExtractedData():
def __call__(self,dbGroup,dbName,dbParams,query,retrivedData):
if dbGroup == 'Series':
return( self.cleanOutputSeries(dbName,dbParams,query,retrivedData) )
if dbGroup == 'Geo':
return( self.cleanOutputGeo(dbName,dbParams,query,retrivedData) )
else:
return( self.cleanOutput(dbName,dbParams,query,retrivedData) )
def cleanOutput(self, dbName, dbParams,query, retrivedData): #categories, releases, sources, tags
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
def cleanOutputSeries(self, dbName, dbParams,query, retrivedData): #series
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
if dbName == 'observations':
seriesID = query['params_dict']['series_id'] #{ x.split('=')[0] : x.split('=')[1] for x in query['params'].split("&") }['series_id']
df_output = (df_output[['date','value']]
.assign( dropRow = lambda df: pd.to_numeric(df['value'],errors='coerce') )
.dropna()
.drop('dropRow',axis=1)
.assign(value=lambda df: df['value'].astype('float'), date=lambda df: pd.to_datetime(df['date'] ) )
.set_index('date')
.rename({'value':seriesID},axis='columns'))
codeAddendum = f'''\n
df_output = (df_output[['date','value']]
.assign( dropRow = lambda df: pd.to_numeric(df['value'],errors='coerce') )
.dropna()
.drop('dropRow',axis=1)
.assign(value=lambda df: df['value'].astype('float'), date=lambda df: pd.to_datetime(df['date'] ) )
.set_index('date')
.rename({{'value': '{seriesID}' }},axis='columns'))
'''
cleanCode += dedent(codeAddendum)
#TODO: relabel value column with symbol
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
def cleanOutputGeo(self, dbName, dbParams,query, retrivedData): #categories, releases, sources, tags
if dbName == 'shapes':
dataKey = query['params_dict']['shape']
elif dbName == 'series' or dbName == 'data':
#reproducible code
cleanCode = "includeDate = lambda key, array: [ dict(**entry,**{'_date':key}) for entry in array ]"
cleanCode += "\ndictData = [ includeDate(key,array) for key,array in retrivedData.json()['meta']['data'].items() ]"
cleanCode += "\ndictDataFlat = [item for sublist in dictData for item in sublist]"
cleanCode += "\ndf_output = pd.DataFrame( dictDataFlat )"
#create dataframe
includeDate = lambda key, array: [ dict(**entry,**{'_date':key}) for entry in array ]
dictData = [ includeDate(key,array) for key,array in retrivedData.json()['meta']['data'].items() ]
dictDataFlat = [item for sublist in dictData for item in sublist]
df_output = pd.DataFrame( dictDataFlat )
#dataframe metadata
jsonMeta = retrivedData.json()['meta']
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != 'data', jsonMeta.items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
else:
dataKey = dbParams[dbName]['json key']
cleanCode = "df_output = pd.DataFrame( retrivedData.json()['{}'] )".format(dataKey)
df_output = pd.DataFrame(retrivedData.json()[dataKey]) # TODO: deal with xml
warnings.filterwarnings("ignore", category=UserWarning)
setattr(df_output, '_meta', dict(filter(lambda entry: entry[0] != dataKey, retrivedData.json().items())))
warnings.filterwarnings("always", category=UserWarning)
return((df_output,cleanCode))
class transformIncludeCodeSnippet():
def transformIncludeCodeSnippet(self,query,baseRequest,userSettings={},pandasCode=""):
#load code header - get keys
apiCode = self.getApiCode(query,userSettings)
#load request's code
queryCode = self.getQueryCode(query,baseRequest,pandasCode)
return(apiCode + queryCode)
def getQueryCode(self,query,baseRequest,pandasCode=""):
queryClean = {'url':query['url'],'params':query['params']} #passing only these two entries of query; params_dict is dropped.
queryClean['url'] = 'url'
queryClean['params']=queryClean['params'].replace(baseRequest['params']['api_key'],'{}')+'.format(key)' #replace explicit api key by the var "key" poiting to it.
queryCode = '''\
query = {}
retrivedData = requests.get(**query)
{} #replace json by xml if this is the request format
'''
queryCode = dedent(queryCode).format(json.dumps(queryClean),pandasCode)
queryCode = queryCode.replace('"url": "url"', '"url": url')
queryCode = queryCode.replace('.format(key)"', '".format(key)')
queryCode = queryCode.replace('"UserID": "key"', '"UserID": key') #TODO: need to handle generic case, UserID, api_key...
return(queryCode)
def getApiCode(self,query,userSettings):
r'''
The base format of a code that can be used to replicate a driver using Requests directly.
'''
try:
url = query['url']
if userSettings:
apiKeyPath = userSettings['ApiKeysPath']
apiKeyLabel = userSettings["ApiKeyLabel"]
else:
userSettings = generalSettings.getGeneralSettings( ).userSettings['ApiKeysPath']
apiKeyPath = userSettings['ApiKeysPath']
apiKeyLabel = userSettings["ApiKeyLabel"]
except:
url = " incomplete connection information "
apiKeyPath = " incomplete connection information "
#userSettings = utils.getUserSettings()
#pkgConfig = utils.getPkgConfig()
storagePref = apiKeyPath.split('.')[-1]
passToCode = {'ApiKeyLabel': apiKeyLabel, "url":url, 'ApiKeysPath':apiKeyPath} #userSettings["ApiKeyLabel"]
code = self.apiCodeOptions(storagePref)
code = code.format(**passToCode)
return(code)
def apiCodeOptions(self,storagePref):
r''''
storagePref: yaml, json, env
'''
if storagePref == 'yaml':
code = '''\
import requests
import yaml
import pandas as pd
apiKeysFile = '{ApiKeysPath}'
with open(apiKeysFile, 'r') as stream:
apiInfo= yaml.safe_load(stream)
url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key']
'''
elif storagePref == 'json':
code = '''\
import requests
import json
import pandas as pd
# json file should contain: {"BEA":{"key":"YOUR KEY","url": "{url}" }
apiKeysFile = '{ApiKeysPath}'
with open(apiKeysFile) as jsonFile:
apiInfo = json.load(jsonFile)
url,key = apiInfo['{ApiKeyLabel}']['url'], apiInfo['{ApiKeyLabel}']['key']
'''
else: #default to env
code = '''\
import requests
import os
import pandas as pd
url = "{url}"
key = os.getenv("{ApiKeyLabel}")
'''
return(dedent(code))
def clipcode(self):
r'''
Copy the string to the user's clipboard (windows only)
'''
try:
pyperclip.copy(self._lastLoad['code'])
except:
print("Loaded session does not have a code entry. Re-run with verbose option set to True. eg: v.drivername(...,verbose=True)")
class driverMetadata():
def __call__(self,dbGroup):
if dbGroup == 'Categories':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Releases':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Series':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Sources':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
elif dbGroup == 'Tags':
self.metadata = [{
"displayName": "tags",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "tags",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
else:
self.metadata = [{
"displayName": "datasetlist",
# Name of driver main function - run with getattr(data,'datasetlist')()
"method": "datasetlist",
"params": {'file_type': 'json', 'realtime_start': '', 'realtime_end': '', 'tag_names': '', 'exclude_tag_names': '', 'tag_group_id': '', 'search_text': '', 'limit': '', 'offset': '', 'order_by': '', 'sort_order': ''},
}]
return(self.metadata)
if __name__ == '__main__':
case = driverCore(dbGroupName = 'Series')
print(case('gdp',verbose=True)) | StarcoderdataPython |
170873 | ## pip install librosa
#
import time
import matplotlib.pyplot as plt
import librosa
import librosa.display
# wav 采样率转换
def convert_wav(file, rate=16000):
signal, sr = librosa.load(file, sr=None)
new_signal = librosa.resample(signal, sr, rate) #
out_path = file.split('.wav')[0] + "_new.wav"
librosa.output.write_wav(out_path, new_signal , rate) #保存为音频文件
return out_path
def waveplot(file, duration=15):
y, sr = librosa.load(file, duration=duration)
plt.figure(figsize=(12, 8))
plt.subplot(3, 1, 1)
librosa.display.waveplot(y, sr=sr)
plt.title('Monophonic')
print(sr)
print(y)
plt.show()
file = "/Users/chenwei/Library/Logs/jj.wav"
s = time.clock()
# print(convert_wav(file))
waveplot(file)
print("usage: ", time.clock()-s)
# import sox
# def upsample_wav(file="/home/em/jj.wav", rate="16000"):
# tfm = sox.Transformer()
# tfm.rate(rate)
# out_path = file.split('.wav')[0] + "_hr.wav"
# tfm.build(file, out_path)
# return out_path
# upsample_wav()
| StarcoderdataPython |
1730346 | from Core.IFactory import IFactory
from Regs.Block_C import RC120
class RC120Factory(IFactory):
def create_block_object(self, line):
self.rc120 = _rc120 = RC120()
_rc120.reg_list = line
return _rc120
| StarcoderdataPython |
17996 | <gh_stars>1-10
#zadanie 1
i=1
j=1
k=1
ciag=[1,1]
while len(ciag)<50:
k=i+j
j=i
i=k
ciag.append(k)
print(ciag)
#zadanie 2
wpisane=str(input("Proszę wpisać dowolne słowa po przecinku "))
zmienne=wpisane.split(",")
def funkcja(*args):
'''Funkcja sprawdza długość słów i usuwa te, które są za krótkie'''
lista=[]
lista2=[]
wartosc = int(input("Proszę wpisać jakąś wartość "))
for arg in args:
lista.append(arg)
dlugosc=len(arg)
if len(arg)>wartosc:
lista2.append(arg)
procenty=(len(lista2)/len(lista))*100
return procenty,lista,lista2
print(funkcja(zmienne))
#zadanie 3
liczby=list(input("Proszę wpisać liczby po przecinku: "))
unikalna_lista=[]
n=1
a=liczby[n]
unikalna_lista.append(liczby[0])
while n<len(liczby):
if liczby[n]!=unikalna_lista[n-1]:
unikalna_lista.append(a)
n+=1 | StarcoderdataPython |
1743918 | from gym_TD.utils import logger
import time
def PPO_train_single(ppo, state, action, next_state, reward, done, info, writer, title, config):
if (action != info['RealAction']).any():
reward -= 0.3
ppo.record_single(state, action, reward, done)
if ppo.len_trajectory % config.horizon == 0:
ppo.flush_single(next_state)
logger.debug('P', 'PPO_train_single: flush one trajectory')
if ppo.num_trajectories == config.num_actors:
logger.debug('P', 'PPO_train: start training')
ts = time.perf_counter()
losses = ppo.learn()
te = time.perf_counter()
logger.verbose('P', 'PPO_train: finish training, used {} seconds', te-ts)
return losses
return None
def PPO_train(ppo, states, actions, next_states, rewards, dones, infos, writer, title, config):
for i, action in enumerate(actions):
if (action != infos[i]['RealAction']).any():
rewards[i] -= 0.3
ppo.record(states, actions, rewards, dones)
if ppo.len_trajectory % config.horizon == 0:
ppo.flush(next_states)
logger.debug('P', 'PPO_train: flush trajectories')
logger.debug('P', 'PPO_train: start training')
ts = time.perf_counter()
losses = ppo.learn()
te = time.perf_counter()
logger.verbose('P', 'PPO_train: finish training, used {} seconds', te-ts)
return losses
return None
def PPO_loss_parse(losses, writer, title):
surr, vf, ent, ls, step = [], [], [], [], []
for loss in losses:
surr += map(lambda x: x[0], loss)
vf += map(lambda x: x[1], loss)
ent += map(lambda x: x[2], loss)
ls += map(lambda x: x[3], loss)
step += map(lambda x: x[4], loss)
for i in range(len(surr)):
writer.add_scalar(title+'/Surrogate', surr[i], step[i])
writer.add_scalar(title+'/ValueFunction', vf[i], step[i])
writer.add_scalar(title+'/Entropy', ent[i], step[i])
writer.add_scalar(title+'/Loss', ls[i], step[i])
dict = {
'SurrogateLoss': surr,
'ValueFunctionLoss': vf,
'Entropy': ent,
'TotalLoss': ls
}
return dict
def PPO_model(env, env_name, map_size, config):
import PPO
import Net
if env_name.startswith('TD-def'):
net = Net.UNet(
env.observation_space.shape[0], 64,
env.observation_space.shape[1], env.observation_space.shape[2],
4, 1
).to(config.device)
ppo = PPO.PPO(
None, None, net,
env.observation_space.shape[2],
(),
config
)
elif env_name.startswith('TD-atk'):
net = Net.FCN(
env.observation_space.shape[0],
env.observation_space.shape[1], env.observation_space.shape[2],
[4, *env.action_space.shape], [1]
).to(config.device)
ppo = PPO.PPO(
None, None, net,
env.observation_space.shape,
env.action_space.shape,
config
)
else:
logger.error('P', 'Unknown Environment {} ({})', env, type(env))
return ppo
| StarcoderdataPython |
3240024 |
# author: https://blog.furas.pl
# date: 2020.07.16
# link: https://stackoverflow.com/questions/62921395/pandas-include-key-to-json-file/
import requests
import pandas as pd
import json
url = 'http://www.fundamentus.com.br/resultado.php'
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36"}
response = requests.get(url, headers=headers)
dfs = pd.read_html(response.text)
table = dfs[0]
table.to_json('table7.json', orient='records', indent=2)
jasonfile = open('table7.json', 'r')
stocks = jasonfile.read()
jason_object = json.loads(stocks)
#print(jason_object[0]['Papel'])
#for key in jason_object[0].keys():
# print(key)
new_data = dict()
for item in jason_object:
key = item['Papel']
item.pop('Papel')
val = item
new_data[key] = val
print(new_data)
| StarcoderdataPython |
3220702 | <reponame>corycrean/moveit2
import os
import yaml
from launch import LaunchDescription
from launch.actions import ExecuteProcess
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
import xacro
def load_file(package_name, file_path):
package_path = get_package_share_directory(package_name)
absolute_file_path = os.path.join(package_path, file_path)
try:
with open(absolute_file_path, "r") as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def load_yaml(package_name, file_path):
package_path = get_package_share_directory(package_name)
absolute_file_path = os.path.join(package_path, file_path)
try:
with open(absolute_file_path, "r") as file:
return yaml.safe_load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def generate_launch_description():
# Get URDF and SRDF
robot_description_config = xacro.process_file(
os.path.join(
get_package_share_directory("moveit_resources_panda_moveit_config"),
"config",
"panda.urdf.xacro",
)
)
robot_description = {"robot_description": robot_description_config.toxml()}
robot_description_semantic_config = load_file(
"moveit_resources_panda_moveit_config", "config/panda.srdf"
)
robot_description_semantic = {
"robot_description_semantic": robot_description_semantic_config
}
# Get parameters for the Pose Tracking node
pose_tracking_yaml = load_yaml("moveit_servo", "config/pose_tracking_settings.yaml")
pose_tracking_params = {"moveit_servo": pose_tracking_yaml}
# Get parameters for the Servo node
servo_yaml = load_yaml(
"moveit_servo", "config/panda_simulated_config_pose_tracking.yaml"
)
servo_params = {"moveit_servo": servo_yaml}
kinematics_yaml = load_yaml(
"moveit_resources_panda_moveit_config", "config/kinematics.yaml"
)
joint_limits_yaml = {
"robot_description_planning": load_yaml(
"moveit_resources_panda_moveit_config", "config/joint_limits.yaml"
)
}
# RViz
rviz_config_file = (
get_package_share_directory("moveit_servo")
+ "/config/demo_rviz_pose_tracking.rviz"
)
rviz_node = Node(
package="rviz2",
executable="rviz2",
name="rviz2",
# prefix=['xterm -e gdb -ex run --args'],
output="log",
arguments=["-d", rviz_config_file],
parameters=[
robot_description,
robot_description_semantic,
kinematics_yaml,
joint_limits_yaml,
],
)
# Publishes tf's for the robot
robot_state_publisher = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
output="screen",
parameters=[robot_description],
)
# A node to publish world -> panda_link0 transform
static_tf = Node(
package="tf2_ros",
executable="static_transform_publisher",
name="static_transform_publisher",
output="log",
arguments=["0.0", "0.0", "0.0", "0.0", "0.0", "0.0", "world", "panda_link0"],
)
pose_tracking_node = Node(
package="moveit_servo",
executable="servo_pose_tracking_demo",
# prefix=['xterm -e gdb -ex run --args'],
output="screen",
parameters=[
robot_description,
robot_description_semantic,
kinematics_yaml,
pose_tracking_params,
servo_params,
joint_limits_yaml,
],
)
# ros2_control using FakeSystem as hardware
ros2_controllers_path = os.path.join(
get_package_share_directory("moveit_resources_panda_moveit_config"),
"config",
"panda_ros_controllers.yaml",
)
ros2_control_node = Node(
package="controller_manager",
executable="ros2_control_node",
parameters=[robot_description, ros2_controllers_path],
output={
"stdout": "screen",
"stderr": "screen",
},
)
# Load controllers
load_controllers = []
for controller in ["panda_arm_controller", "joint_state_broadcaster"]:
load_controllers += [
ExecuteProcess(
cmd=["ros2 run controller_manager spawner {}".format(controller)],
shell=True,
output="screen",
)
]
return LaunchDescription(
[
rviz_node,
static_tf,
pose_tracking_node,
ros2_control_node,
robot_state_publisher,
]
+ load_controllers
)
| StarcoderdataPython |
1649649 | <filename>utils/logConf.py
import logging
format="%(asctime)s [%(filename)s:%(lineno)d] %(levelname)-8s %(message)s"
logging.basicConfig(level=logging.DEBUG, format=format) | StarcoderdataPython |
1641187 | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from rdflib import Graph, Namespace, Literal
from os.path import abspath, dirname, join
hierarchy_path = join(dirname(abspath(__file__)),
'../resources/activity_hierarchy.rdf')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def main():
indra_ns = 'http://sorger.med.harvard.edu/indra/'
rn = Namespace(indra_ns + 'relations/')
en = Namespace(indra_ns + 'entities/')
g = Graph()
isa = rn.term('isa')
g.add((en.term('transcription'), isa, en.term('activity')))
g.add((en.term('catalytic'), isa, en.term('activity')))
g.add((en.term('gtpbound'), isa, en.term('activity')))
g.add((en.term('kinase'), isa, en.term('catalytic')))
g.add((en.term('phosphatase'), isa, en.term('catalytic')))
save_hierarchy(g, hierarchy_path)
if __name__ == '__main__':
main()
| StarcoderdataPython |
22930 | from pytracetable.core import tracetable
__all__ = [
'tracetable',
]
| StarcoderdataPython |
1750825 | import os.path
import re
with open(os.path.join(os.path.dirname(__file__), 'VERSION')) as f:
version_content = [line for line in f.readlines() if re.search(r'([\d.]+)',line)]
if len(version_content) != 1:
raise RuntimeError('Invalid format of VERSION file.')
__version__ = version_content[0]
| StarcoderdataPython |
1798842 | <reponame>82ndAirborneDiv/BMGAP
#!/usr/bin/env python3.4
### Phylogeny Building Tool v1
### Import Modules ###
import sys
from Bio import SeqIO
from Bio.Phylo.TreeConstruction import DistanceCalculator
from Bio.Phylo.TreeConstruction import _DistanceMatrix
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
from Bio import Phylo
import os
import string
import re
import operator
import csv
import pprint as pp
import locale
import argparse
import datetime
import json
import time
import shutil
import math
import collections
import urllib3
from multiprocessing import Pool,Process, Queue
import tempfile
import sqlite3
from random import randint
from subprocess import *
from scipy import stats
import numpy as np
from operator import attrgetter
#import matplotlib.pyplot as plt
encoding = locale.getdefaultlocale()[1]
http = urllib3.PoolManager()
##PATHS###
SCRIPT_PATH = os.path.realpath(__file__)
DIR_PATH = os.path.dirname(SCRIPT_PATH)
home_dir = os.path.expanduser("~")
mask_map_script = "{}/ML/tools/mask_mapped_aln/mask_mapped_aln.py".format(home_dir)
adjust_size_script = "{}/ML/tools/mask_mapped_aln/adjust_partition_size.py".format(home_dir)
pacbio_ref_sketch = "{}/ML/Projects/NadavTopaz/Scripts/lib/pacbio_references.msh".format(home_dir)
reference_files = "{}/ML/Projects/NadavTopaz/Scripts/reference_pacbios".format(home_dir)
#GENOME_DIR = "{}/ML/Projects/NadavTopaz/All_nm/fastas".format(home_dir)
# print(home_dir)
# print(mask_map_script)
# print(GENOME_DIR)
# print(adjust_size_script)
OUTPUT_DIR = ""
VERBOSITY = False
weights = {"1":1.0,
"2":0,
"3":0}
fasta_extensions = [".fa",".fasta",".fna"]
def set_output(output):
global OUTPUT_DIR
OUTPUT_DIR = output
if os.path.isdir(output):
print("Output Directory",output,"aleady exists, not creating")
else:
os.system("mkdir {}".format(output))
print("Created Output Directory",output)
def pick_reference(query_sketch,threads):
mash_results = check_output(["mash","dist","-p",threads,pacbio_ref_sketch,query_sketch],shell=False)
mash_result = re.split(b"\n",mash_results.rstrip())
current_min = 100.0
current_ref = ""
for line in mash_result:
line = line.decode(encoding)
ref_assembly = line.split("\t")[0]
query_name = line.split("\t")[1]
mash_dist = float(line.split("\t")[2])
if mash_dist < current_min:
current_min = mash_dist
current_ref = ref_assembly
ref_path = os.path.join(reference_files,current_ref)
return ref_path
def pick_genomes(query_sketch,mash_db,threads,max_num,force_max):
print("Calculating Distances")
mash_results_dict = {}
mash_results = check_output(["mash","dist","-p",threads,mash_db,query_sketch],shell=False)
mash_result = re.split(b"\n",mash_results.rstrip())
seen_basenames = []
counts = {}
for line in mash_result:
query_name = line.decode(encoding).split("\t")[1]
query_name = os.path.basename(query_name)
if query_name not in mash_results_dict:
mash_results_dict[query_name] = {"mash_results":{},"mash_dists":[]}
hit = line.decode(encoding).split("\t")[0]
if "/" in hit:
hit_path = hit
hit = os.path.basename(hit)
if "_" in hit:
hit_basename = hit.split("_")[0]
else:
hit_basename = hit
if "_" in query_name:
query_basename = query_name.split("_")[0]
if query_basename == hit_basename:
continue
if query_name == hit:
continue
mash_dist = float(line.decode(encoding).split("\t")[2])
p_val = line.decode(encoding).split("\t")[3]
match_hash = line.decode(encoding).split("\t")[4]
mash_score = mash_dist
mash_results_dict[query_name]["mash_results"][hit] = {"score":mash_score,"p_val":p_val,"hash":match_hash,"hit":hit,"path":hit_path}
mash_results_dict[query_name]["mash_dists"].append(mash_score)
final_genomes = {"all_genomes":[],"details":{}}
for query in mash_results_dict:
scores_set = []
for hit,_ in sorted(mash_results_dict[query]["mash_results"].items(),key=lambda x: float(x[1]["score"])):
score = mash_results_dict[query]["mash_results"][hit]["score"]
if score not in scores_set:
scores_set.append(score)
a = np.asarray(scores_set)
count = 0
for hit,_ in sorted(mash_results_dict[query]["mash_results"].items(),key=lambda x: float(x[1]["score"])):
if hit not in final_genomes["all_genomes"]:
score = mash_results_dict[query]["mash_results"][hit]["score"]
if count < max_num:
hit_path = mash_results_dict[query]["mash_results"][hit]["path"]
final_genomes["all_genomes"].append(hit_path)
final_genomes["details"][hit] = {"query":query,"dist":score}
count+=1
print(final_genomes["all_genomes"])
print(len(final_genomes["all_genomes"]))
final_scores = []
for query in mash_results_dict:
for genome in final_genomes["details"]:
score = mash_results_dict[query]["mash_results"][genome]["score"]
final_scores.append(score)
#print(genome,score)
#print(sorted(final_scores))
#~ plt.savefig("hist.svg",format="svg")
#final_genomes["query"] = query_name
return final_genomes
def mash_sketch(threads,genome_dir,temp_dir,sketch_info):
print("Running Mash")
threads = threads
kmer_size = 32
sketch_size = 10000
sketch_info_dict = {}
call(["mash sketch -k {} -p {} -s {} -o {} {}/*".format(kmer_size,threads,sketch_size,os.path.join(temp_dir,"nm_sketch"),genome_dir)], shell=True)
sketch_info_dict["path"] = temp_dir+"/nm_sketch.msh"
sketch_info = {"sketch_dict":sketch_info_dict,"temp_dir":temp_dir}
return sketch_info
def mash_sketch_list(threads,mash_assembly_list,output_dir,proj_name,temp_dir):
print("Running Mash")
kmer_size = 32
sketch_size = 10000
with open(os.path.join(temp_dir,"temp_assembly_list"),"w") as f:
for obj in mash_assembly_list:
f.write("{}\n".format(obj))
mash_assembly_list = os.path.join(temp_dir,"temp_assembly_list")
unique_time = str(time.time()).split(".")[1]
out_sketch_name = "{}_{}".format("BMGAP_DATA_MASH_DB",unique_time)
output_sketch = os.path.join(output_dir,out_sketch_name)
call(["mash sketch -k {} -p {} -l {} -s {} -o {}".format(kmer_size,threads,mash_assembly_list,sketch_size,output_sketch)], shell=True)
call(["rm {}".format(mash_assembly_list)], shell=True)
return("{}.msh".format(output_sketch))
def make_mash_matrix(threads,genome_list,output_dir,proj_name,temp_dir):
print("Running Mash")
kmer_size = 32
sketch_size = 10000
with open(os.path.join(temp_dir,"temp_assembly_list"),"w") as f:
for obj in genome_list:
f.write("{}\n".format(obj))
mash_assembly_list = os.path.join(temp_dir,"temp_assembly_list")
unique_time = str(time.time()).split(".")[1]
out_sketch_name = "{}_{}".format(proj_name,unique_time)
output_sketch = os.path.join(output_dir,out_sketch_name)
call(["mash sketch -k {} -p {} -l {} -s {} -o {}".format(kmer_size,threads,mash_assembly_list,sketch_size,output_sketch)], shell=True)
call(["rm {}".format(mash_assembly_list)], shell=True)
output_sketch = "{}.msh".format(output_sketch)
mash_results = check_output(["mash","dist","-p",threads,output_sketch,output_sketch],shell=False)
mash_result = re.split(b"\n",mash_results.rstrip())
headers = []
data_set = {}
for line in mash_result:
line = line.decode(encoding)
query = line.split("\t")[0]
query = os.path.basename(query)
query = query.split(".")[0]
subject = line.split("\t")[1]
subject = os.path.basename(subject)
subject = subject.split(".")[0]
score = float(line.split("\t")[2])
if query not in data_set:
data_set[query] = {}
if subject not in data_set[query]:
data_set[query][subject] = score
if query not in headers:
headers.append(query)
i=0
final_text="\t"
header_dict={}
for query in sorted(headers):
header_dict[i] = query
i+=1
final_text+="{}\t".format(query)
final_text+="\n"
final_text.replace("\t\n","\n")
for query in sorted(data_set):
final_text+="{}\t".format(query)
for i in range(0,len(headers)):
current_score = data_set[query][header_dict[i]]
final_text+="{}\t".format(current_score)
final_text+="\n"
final_text.replace("\t\n","\n")
return(final_text)
def call_snippy(ref,file_path):
entry = file_path.split("/")[-1]
if ".fasta" in entry:
entry_name = entry.replace(".fasta","")
else:
entry_name = entry
if "_" in entry_name:
entry_name = entry_name.split("_")[0]
call(["snippy","--outdir",os.path.join(OUTPUT_DIR,"snippy_dir","{}".format(entry_name)),"--cpus","1","--ref",ref,"--ctgs",file_path,"--force"],shell=False)
#call(["snippy","--outdir",os.path.join(OUTPUT_DIR,"snippy_dir","{}".format(entry_name)),"--cpus","1","--ref",ref,"--ctgs",file_path],shell=False)
return True
def snippy_check(snippy_dir):
total_size = 0.0
i=0
size_dict = {}
redo_list = []
for snp_file in os.listdir(snippy_dir):
file_path = os.path.join(snippy_dir,snp_file)
size = os.stat(file_path).st_size
total_size += float(size)
i+=1
size_dict[snp_file] = size
avg_size = float(total_size/i)
for obj in size_dict:
if size_dict[obj] < avg_size:
redo_list.append(obj)
return redo_list
def run_snippy(final_genomes,threads,query_assemblies,dir_flag):
processes = int(threads)
pool = Pool(processes)
snippy_dir = os.path.join(OUTPUT_DIR,"snippy_dir")
snippy_list = []
if os.path.isdir(snippy_dir):
print("Snippy Directory",snippy_dir,"aleady exists, not creating")
else:
os.system("mkdir {}".format(snippy_dir))
print("Created Output Directory",snippy_dir)
for file_path in final_genomes["all_genomes"]:
snippy_list.append(file_path)
for file_path in query_assemblies:
snippy_list.append(file_path)
ref = final_genomes["ref"]
snippy_time = [pool.apply_async(call_snippy,args=(ref,in_file)) for in_file in snippy_list]
output = [result.get() for result in snippy_time]
pool.terminate()
redo_list = snippy_check(snippy_dir)
if len(redo_list) > 0:
for obj in redo_list:
for item in snippy_list:
if obj in item:
call_snippy(ref,item)
return snippy_dir
# def update_mash_sketch(mash_db,assembly_list):
# mash_info_assemblies = {}
# mash_info = check_output(["mash info {}".format(mash_db)],shell=True)
# pp.pprint(mash_info)
# mash_info = mash_info.decode(encoding)
# mash_info_lines = mash_info.split("\n")
# print(mash_info_lines)
# for line in mash_info_lines:
# print(line)
# if line.strip() == "":
# continue
# else:
# if "[Hashes]" in line:
# continue
# line = line.replace(" ","***")
# line_items_pre = line.split("***")
# line_items = []
# for obj in line_items_pre:
# if obj.strip() != "":
# line_items.append(obj)
# if len(line_items) == 4:
# mash_info_assemblies[line_items[2]] = ""
# pp.pprint(mash_info_assemblies)
# check_set = []
# for genome_file_path in assembly_list:
# genome_file = genome_file_path.split("\\")[-1]
# if genome_file not in mash_info_assemblies:
# check_set.append(genome_file_path)
# return(check_set)
# def paste_sketch(threads,mash_db,input_file,temp_dir):
# print("Running Mash")
# threads = threads
# in_file_name = input_file.split("\\")[-1]
# kmer_size = 32
# sketch_size = 10000
# call(["mash sketch -k {} -p {} -s {} -o {} {}".format(kmer_size,threads,sketch_size,os.path.join(temp_dir,"input_sketch.msh"),input_file)], shell=True)
# sketch_path = os.path.join(temp_dir,"input_sketch.msh")
# call(["mash paste {} {}".format(mash_db,sketch_path)],shell=True)
# print("added {} to mash db".format(in_file_name))
# call(["rm {}".format(os.path.join(temp_dir,"input_sketch.msh"))],shell=True)
def call_bmgap_api():
final_data = {} #set up dict to hold our final data
#since there is no direct way to filter by run using the API yet, we will use a different approach
#we will pull all of the data from BMGAP, and then filter it ourselves by the run that we want
#since there is no way to pull all of the data from BMGAP, we will do one API call with default settings to get the count the total # of records, then another to pull all of those records
url_1 = 'http://amdportal-sams.cdc.gov/bmgap-api/samples' #first url
#this is the actual API request below. REST APIs usually have two options (GET and POST). GET is when we want to get data, POST is when we want to submit data. Either one can also return data.
request = http.request("GET",url_1) #request is a httpresponse object, we want the data stored in it, and we want to decode it from bytes to utf-8 unicode
request_data = json.loads(request.data.decode('utf-8')) #this handles the decoding, and it converts the json to a python dictionary "request_data"
#pp.pprint(request_data) #print the data we got
#for category in request_data:
# print(category) #this shows us that there are three main categories in the data, "docs", "total" and "limit" - the docs store the record data, and total tells us how many records exist in BMGAP, limit is set to 50 by default
total_records = request_data["total"] #get total record count
pages = math.ceil(total_records/1000)
#print(pages)
print("grabbing {} BMGAP records across {} pages".format(total_records,pages)) #print how many records we will get in the next API call
#print(type(total_records)) #make sure "total_records" is an integeter and not a string, and it is an int
merged_data = []
for i in range(1,pages+1):
print("getting page {}".format(i))
url_2 = 'http://amdportal-sams.cdc.gov/bmgap-api/samples?page={}&perPage={}'.format(i,1000) #Now that we know how many records exist, we will pull them all by adding the perPage filter
request = http.request("GET",url_2)
#pp.pprint(request.data.decode('utf-8'))
request_data = json.loads(request.data.decode('utf-8')) #override our previous request_data with the total records
#for record in request_data["docs"]: #now that we know that we want to loop through docs, we do so here and print each record
# pp.pprint(record)
merged_data.append(request_data["docs"])
#time.sleep(60)
total = 0
for obj in merged_data:
for record in obj:
total+=1
print("got {} BMGAP records".format(total)) #make sure we got them all by printing the count of the records
for obj in merged_data:
for record in obj:
if "mash" in record:
if "QC_flagged" in record: #if the record has been QC flagged
if record["QC_flagged"]: #true means it was flagged as bad quality
continue #skip
else:
if "assemblyPath" in record:
assembly_path = record["assemblyPath"]
#orig_assembly_file = assembly_path.split("/")[-1]
#assembly_file = orig_assembly_file.replace("-","_")
#assembly_path = os.path.join(assembly_path.replace(orig_assembly_file,""),assembly_file)
else:
continue
lab_id = record["Lab_ID"]
bmgap_id = record["identifier"]
assembly_file = assembly_path.split("/")[-1]
final_data[bmgap_id] = {"lab_id":lab_id,"assembly_path":assembly_path,"assembly_file":assembly_file}
#pp.pprint(final_data)
#print(len(final_data))
return final_data
def error(error_dict):
for error in error_dict:
print(error_dict[error],error)
exit()
def main():
### Main Arg Parse ###
parser = argparse.ArgumentParser(description="Automated Phylogeny Builder v1")
parser.add_argument('-d','--indir',help="Input Directory: Directory of FASTA files to analyze")
parser.add_argument('-o','--out',help="Output Directory", required=True)
parser.add_argument('-t','--threads',help="Number of max threads to use (default=1)",default="1")
parser.add_argument('-b','--mash_db',help="Provide prebuilt mash DB, otherwise build from scratch")
parser.add_argument('-f','--fast',help="Fast option for distance based neighbor joining tree", action="store_true")
parser.add_argument('-m','--max_num',help="Maximum number of isolates to include (default=50)",default="50")
parser.add_argument('-g','--genomes',help="Provide genome directory to build tree with instead of automatically picking, requires -r flag")
parser.add_argument('-r','--reference',help="Required with -g flag; provide reference to use for phylogeny when providing genome directory")
parser.add_argument('-s','--snippy',help="existing snippy dir, requires -g and -r")
parser.add_argument('-p','--proj_name',help="project prefix - will be used to label all files associated with project", required=True)
args = vars(parser.parse_args())
start_time = time.time()
### Print Args ###
print ("Running with the following parameters:")
for arg in args:
print (arg,":",args[arg])
### Set Output (Create if doesn't exist already) ###
set_output(args["out"])
### Initialize variables ###
automatic_selection = True
threads = args["threads"]
q_dict = {}
sketches_dict = {}
sketches = []
sketch_info = {}
results_dict = {}
thresholds = {}
error_dict = {}
temp_dir = tempfile.mkdtemp()
project_name = args["proj_name"]
dir_flag = False
mash_assembly_list = []
max_num = int(args["max_num"])
if args["fast"]:
need_ref = False
else:
need_ref = True
if args["mash_db"]:
mash_db = args["mash_db"]
if args["indir"]:
input_dir = args["indir"]
query_assemblies = []
if args["snippy"]:
if not args["genomes"]:
error_dict["snippy dir provided without genome dir, exiting"] = "Input error: "
error(error_dict)
if not args["reference"]:
error_dict["snippy dir provided without reference, exiting"] = "Input error: "
error(error_dict)
automatic_selection = False
if args["genomes"] and args["reference"]:
input_dir = args["genomes"]
reference = args["reference"]
dir_flag = True
automatic_selection = False
if args["genomes"] and not args["reference"]:
error_dict["Genome dir provided without reference, exiting"] = "Input error: "
error(error_dict)
if args["reference"] and not args["genomes"]:
error_dict["Reference provided without genome directory, exiting"] = "Input error: "
error(error_dict)
in_file_counter = 0
for in_file in os.listdir(input_dir):
in_file_path = os.path.join(input_dir,in_file)
query_assemblies.append(in_file_path)
in_file_counter +=1
max_num_per_query = (max_num-in_file_counter)/in_file_counter
query_sketch = mash_sketch_list(threads,query_assemblies,OUTPUT_DIR,project_name,temp_dir)
if need_ref:
ref_path = pick_reference(query_sketch,threads)
if not args["mash_db"]:
bmgap_data = call_bmgap_api()
for record in bmgap_data:
mash_assembly_list.append(bmgap_data[record]["assembly_path"])
mash_db = mash_sketch_list(threads,mash_assembly_list,OUTPUT_DIR,project_name,temp_dir)
if automatic_selection:
final_genomes = pick_genomes(query_sketch,mash_db,args["threads"],int(max_num_per_query),force_max)
if need_ref:
final_genomes["ref"] = ref_path
print(ref_path)
else:
final_genomes = {"all_genomes":[],"details":{},"ref":args["reference"]}
for infile in os.listdir(args["genomes"]):
for ext in fasta_extensions:
if ext in infile:
infile_path = os.path.join(args["genomes"],infile)
if infile_path not in final_genomes["all_genomes"]:
final_genomes["all_genomes"].append(infile_path)
continue
#pp.pprint(final_genomes)
if not args["fast"]:
if not args["snippy"]:
snippy_dir = run_snippy(final_genomes,args["threads"],query_assemblies,dir_flag)
else:
snippy_dir = args["snippy"]
redo_list = snippy_check(snippy_dir)
for obj in redo_list:
print(obj)
for genome in os.listdir(input_dir):
print(genome)
if obj in genome:
print("found")
redo_obj = os.path.join(genome_dir,genome)
call_snippy(reference,redo_obj)
call(["snippy-core --prefix={}_core --aformat=fasta {}/*".format(project_name,snippy_dir)], shell=True)
p2 = Popen(["mv {}_core* {}".format(project_name,snippy_dir)], shell=True)
p2.wait()
p3 = Popen(["python3 {} {}/{}_core.full.aln -o {}".format(mask_map_script,snippy_dir,project_name,OUTPUT_DIR)], shell=True)
p3.wait()
masked_aln_file = "{}/{}_core.full_masked.aln".format(OUTPUT_DIR,project_name)
partition_file = "{}/{}_core.full_partition.txt".format(OUTPUT_DIR,project_name)
print("gubbins")
p4 = Popen(["run_gubbins.py -c {} -i 10 -u -p {}/gubbins_masked -v -t raxml {}".format(args["threads"],OUTPUT_DIR,masked_aln_file)], shell=True)
p4.wait()
gubbins_phylip_file = "{}/gubbins_masked.filtered_polymorphic_sites.phylip".format(OUTPUT_DIR)
p5 = Popen(["python3 {} {} {}".format(adjust_size_script,gubbins_phylip_file,partition_file)], shell=True)
p5.wait()
abs_output = os.path.abspath(OUTPUT_DIR)
print("raxml")
p6 = Popen(["raxmlHPC-PTHREADS -s {} -w {} -n {}_out --asc-cor=stamatakis -q {} -m GTRGAMMAX -T {} -N autoMRE -p 6420662893125220392 -f a -x 7125452922221827660".format(gubbins_phylip_file,abs_output,project_name,partition_file,args["threads"])], shell=True)
p6.wait()
else:
mash_matrix = make_mash_matrix(threads,final_genomes["all_genomes"],OUTPUT_DIR,project_name,temp_dir)
# with open("test_out.txt","w") as f:
# f.write(mash_matrix)
i=2
matrix = []
names = []
firstLine = True
mash_matrix_lines = mash_matrix.split("\n")
for line in mash_matrix_lines:
if line.strip() != "":
if firstLine:
print(line)
current_names = line.split("\t")
for obj in current_names:
if len(obj) > 0:
names.append(obj)
firstLine = False
else:
sub_matrix = []
values = line.split("\t")
for q in range(1,i):
val = float(values[q])
sub_matrix.append(val)
matrix.append(sub_matrix)
i+=1
#print(names)
#print(len(names),len(matrix))
print("building tree")
dm = _DistanceMatrix(names,matrix)
constructor = DistanceTreeConstructor(method="nj")
tree = constructor.nj(dm)
Phylo.write([tree],"my_tree.tree","newick")
if __name__ == "__main__":
main()
| StarcoderdataPython |
171835 | <reponame>mdaal/rap
import numpy as np
import copy
import sys
def circle_fit(loop):
S21 = loop.z
Freq = loop.freq
LargeCircle = 10
def pythag(m,n):
'''compute pythagorean distance
sqrt(m*m + n*n)'''
return np.sqrt(np.square(m) + np.square(n))
def eigen2x2(a,b,c):
'''a,b,c - matrix components [[a c]
[c d]]
d1,d2 - eigen values where |d1| >= |d2|
(Vx,Vy) - unit eigen vector of d1, Note: (-Vy,Vx) is eigen vector for d2
'''
disc = pythag(a-b,2*c) # discriminant
d1 = max(a+b + disc, a+b - disc)/2
d2 = (a*b-c*c)/d1
if np.abs(a-d1) > np.abs(b-d1):
f = pythag(c,d1-a)
if f == 0.0:
Vx = 1.
Vy = 0.
else:
Vx = c/f
Vy = (d1-a)/f
else:
f = pythag(c,d1-b)
if f == 0.0:
Vx = 1.
Vy = 0.
else:
Vx = (d1-b)/f
Vy = c/f
return d1,d2,Vx,Vy
def F(x,y,a,b):
''' computes and returns the value of the objective fuction.
do this for the case of a large circle and a small circle '''
if (np.abs(a) < LargeCircle) and (np.abs(b) < LargeCircle): # Case of Small circle
xx = x - a
yy = y - b
D = pythag(xx,yy)
r = D.mean()
return (np.square(D - r)).mean()
else: # Case of Large circle
a0 = a - x.mean()
b0 = b - y.mean()
d = 1.0/pythag(a0,b0)
dd = d*d
s = b0*d
c = a0*d
xx = x - x.mean()
yy = y - y.mean()
z = np.square(xx) + np.square(yy)
p = xx*c + yy*s
t = d*z - 2.0*p
g = t/(1+np.sqrt(1.+d*t))
W = (z+p*g)/(2.0+d*g)
Z = z
return Z.mean() - W.mean()*(2.0+d*d*W.mean())
def GradHessF(x,y,a,b):
'''Compute gradient of F, GradF = [F1,F2] and Hessian of F, HessF = [[A11 A12]
A12 A22]]
at point p = [a,b].
Note Hessian is symmetric.
'''
if (np.abs(a) < LargeCircle) and (np.abs(b) < LargeCircle): # Case of Small circle
xx = x - a
yy = y - b
r = pythag(xx,yy)
u = xx/r
v = yy/r
Mr = r.mean()
Mu = u.mean()
Mv = v.mean()
Muu = (u*u).mean()
Mvv = (v*v).mean()
Muv = (u*v).mean()
Muur = (u*u/r).mean()
Mvvr = (v*v/r).mean()
Muvr = (u*v/r).mean()
F1 = a + Mu * Mr - x.mean()
F2 = b + Mv * Mr - y.mean()
A11 = 1.0 - Mu * Mu - Mr * Mvvr
A22 = 1.0 - Mv * Mv - Mr * Muur
A12 = -1.0 * Mu * Mv + Mr * Muvr
else: # Case of Large circle
a0 = a - x.mean()
b0 = b - y.mean()
d = 1.0/pythag(a0,b0)
dd = d*d
s = b0*d
c = a0*d
xx = x - x.mean()
yy = y - y.mean()
z = np.square(xx) + np.square(yy)
p = xx*c + yy*s
t = 2.0*p - d*z
w = np.sqrt(1.0-d*t)
g = -1.0*t/(1.0+w)
g1 = 1.0/(1.0+d*g)
gg1 = g*g1
gg2 = g/(2.0 + d * g)
aa = (xx+g*c)/w
bb = (yy+g*s)/w
X = (xx*gg1).mean()
Y = (yy*gg1).mean()
R = (z+t*gg2).mean()
T = (t*gg1).mean()
W = (t*gg1*gg2).mean()
AA = (aa*aa*g1).mean()
BB = (bb*bb*g1).mean()
AB = (aa*bb*g1).mean()
AG = (aa*gg1).mean()
BG = (bb*gg1).mean()
GG = (g*gg1).mean()
U = (T-b*W)*c*0.5 - X + R*c*0.5
V = (T-b*W)*s*0.5 - Y + R*s*0.5
F1 = d * ((dd*R*U - d*W*c + T*c)*0.5 - X)
F2 = d * ((dd*R*V - d*W*s + T*s)*0.5 - Y)
UUR = ((GG-R*0.5)*c + 2.0*(AG-U))*c + AA
VVR = ((GG-R*0.5)*s + 2.0*(BG-V))*s + BB
UVR = ((GG-R*0.5)*c + (AG-U))*s + (BG-V)*c + AB
A11 = dd*(U*(2.0*c-dd*U) - R*s*s*0.5 - VVR*(1.0+dd*R*0.5))
A22 = dd*(V*(2.0*s-dd*V) - R*c*c*0.5 - UUR*(1.0+dd*R*0.5))
A12 = dd*(U*s + V*c + R*s*c*0.5 - dd*U*V + UVR*(1.0 + dd*R*0.5))
return F1,F2,A11,A22,A12
def sigma(x,y,loop_var):
'''estimate of Sigma = square root of RSS divided by N
gives the root-mean-square error of the geometric circle fit'''
dx = x-loop_var.a
dy = x-loop_var.b
loop_var.sigma = (pythag(dx,dy)-loop_var.r).mean()
def CircleFitByChernovHoussam(x,y, init, lambda_init):
REAL_EPSILON = sys.float_info.epsilon
REAL_MAX = sys.float_info.max
IterMAX=200
check_line= True
#dmin = 1.0
ParLimit2 = 100.
epsilon = 1.e+7*REAL_EPSILON
factor1 = 32.
factor2 = 32.
ccc = 0.4
factorUp = 10.
factorDown = 0.1
new = copy.copy(init)
#new = sys.modules[__name__].loop() #This is how to access the loop class from inside this function
#old = loop()
new.s = F(x,y,init.a,init.b) # compute root mean square error
F1,F2,A11,A22,A12 = GradHessF(x,y,init.a,init.b) # compute gradient vector and Hessian matrix
new.Gx = F1
new.Gy = F2
new.g = pythag(F1,F2) # The gradient vector and its norm
lambda_ = lambda_init
sBest = gBest = progess = REAL_MAX
enough = False
i = 0
ii = 0
while not enough:
if i > 0:
# evaluate the progress made during the previous iteration
progress = (np.abs(new.a - old.a)+np.abs(new.b - old.b))/(np.square(old.a) + np.square(old.b) + 1.0)
old = copy.copy(new)
i = i+1
if i > IterMAX: #termination due to going over the limit
enough = True
break
d1,d2,Vx,Vy = eigen2x2(A11,A22,A12) #eigendecomposition of the Hessian matrix
dmin = min(d1,d2) #recording the smaller e-value
AB = pythag(old.a,old.b) + 1.0 # approximation to the circle size
# main stopping rule: terminate iterations if
# the gradient vector is small enough and the
# progress is not substantial
if (old.g < factor1*REAL_EPSILON) and (progress<epsilon):
#print('primary stopping rule')
enough = True
break
# secondary stopping rule (prevents some stupid cycling)
if (old.s >= sBest) and (old.g >= gBest):
print(old.s, sBest, old.g, gBest)
#print('secondary stopping rule')
enough = True
break
if (sBest > old.s):
sBest = old.s # updating the smallest value of the objective function found so far
if (gBest > old.g):
gBest = old.g # updating the smallest length of the gradient vector found so far
G1 = Vx*F1 + Vy*F2 # rotating the gradient vector
G2 = Vx*F2 - Vy*F1 # (expressing it in the eigensystem of the Hessian matrix)
while not enough: # starting point of an "inner" iteration (adjusting lambda)
# enforcing a lower bound on lambda that guarantees that
# (i) the augmented Hessian matrix is positive definite
# (ii) the step is not too big (does not exceed a certain
# fraction of the circle size) the fraction is defined by
# the factor "ccc")
if lambda_ < (np.abs(G1)/AB/ccc) - d1:
lambda_ = np.abs(G1)/AB/ccc - d1
if lambda_ < (np.abs(G2)/AB/ccc) - d2:
lambda_ = np.abs(G2)/AB/ccc - d2
# compute the step (dX,dY) by using the current value of lambda
dX = old.Gx*(Vx*Vx/(d1+lambda_)+Vy*Vy/(d2+lambda_)) + old.Gy*Vx*Vy*(1.0/(d1+lambda_)-1.0/(d2+lambda_))
dY = old.Gx*Vx*Vy*(1.0/(d1+lambda_)-1.0/(d2+lambda_)) + old.Gy*(Vx*Vx/(d2+lambda_)+Vy*Vy/(d1+lambda_))
# updating the loop parameter
new.a = old.a - dX
new.b = old.b - dY
if (new.a==old.a) and (new.b==old.b): #if no change, terminate iterations
enough = True
break
#check if the circle is very large
if np.abs(new.a)>ParLimit2 or np.abs(new.b)>ParLimit2:
#when the circle is very large for the first time, check if
#the best fitting line gives the best fit
if check_line: # initially, check_line= True, then it is set to zero
#compute scatter matrix
dx = x - x.mean()
dy = y - y.mean()
Mxx = (dx*dx).sum()
Myy = (dy*dy).sum()
Mxy = (dy*dx).sum()
dL1,dL2,VLx,VLy = eigen2x2(Mxx,Myy,Mxy) # eigendecomposition of scatter matrix
#compute the third mixed moment (after rotation of coordinates)
dx = (x - x.mean())*VLx + (y - y.mean())*VLy
dy = (y - y.mean())*VLx - (x - x.mean())*VLy
Mxxy = (dx*dx*dy).sum()
#rough estimate of the center to be used later to recoved from the wrong valley
if Mxxy > 0.0:
R = ParLimit2
else:
R = -ParLimit2
aL = -VLy*R
bL = VLx*R
check_line = False
# check if the circle is in the wrong valley
if (new.a*VLy - new.b*VLx)*R>0.0:
# switch to the rough circle estimate (precomupted earlier)
new.a = aL;
new.b = bL;
new.s = F(x,y,new.a,new.b) # compute the root-mean-square error
# compute the gradient vector and Hessian matrix
F1,F2,A11,A22,A12 = GradHessF(x,y,new.a,new.b)
# the gradient vector and its norm
new.Gx = F1;
new.Gy = F2;
new.g = pythag(F1,F2)
lambda_ = lambda_init #reset lambda
sBest = gBest = REAL_MAX #reset best circle characteristics
break
# compute the root-mean-square error
new.s = F(x,y,new.a,new.b)
# compute the gradient vector and Hessian matrix
F1,F2,A11,A22,A12 = GradHessF(x,y,new.a,new.b)
# the gradient vector and its norm
new.Gx = F1
new.Gy = F2
new.g = pythag(F1,F2)
# check if improvement is gained
if new.s < sBest*(1.0+factor2*REAL_EPSILON): #yes, improvement
lambda_ *= factorDown # reduce lambda
break
else:
ii += 1
if ii > IterMAX: #termination due to going over the limit
enough = True
break
lambda_ *= factorUp #increace lambda
continue
old.r = pythag(x - old.a, y - old.b).mean()
old.outer_iterations = i
old.inner_iterations = ii
exit_code = 0
if old.outer_iterations > IterMAX:
exit_code = 1
if old.inner_iterations > IterMAX:
exit_code = 2
if (dmin <= 0.0) and (exit_code==0):
exit_code = 3
old.circle_fit_exit_code = exit_code
sigma(x,y,old) #adds old.sigma value
return old
x = S21.real
y = S21.imag
loop.a = 0#guess.real#0
loop.b = 0#guess.imag #0
lambda_init = 0.001
#self.loop = CircleFitByChernovHoussam(x,y, self.loop, lambda_init)
if True: #self.loop.circle_fit_exit_code != 0:
#print('Circle Fit Failed! Trying again...')
#another initial guess
norm = np.abs(S21[1:5].mean())
S21 = S21/norm
guess = np.mean(S21)
loop.a = guess.real#0
loop.b = guess.imag #0
lambda_init = 0.001
x = S21.real
y = S21.imag
loop.__dict__.update(CircleFitByChernovHoussam(x,y, loop, lambda_init).__dict__)
loop.a = loop.a*norm
loop.b = loop.b*norm
loop.r = loop.r*norm
loop.z = S21*norm
if loop.circle_fit_exit_code != 0:
print('!!!!!!!!!!!!!! Circle Fit Failed Again! Giving Up...')
| StarcoderdataPython |
3346075 | # -*- coding: utf-8 -*-
"""Top-level package for Visualate."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.