id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
69804 | '''
Function:
配置文件
'''
import os
'''字体'''
FONTPATH = os.path.join(os.getcwd(), 'G2/resources/font/font.ttf')
'''图片'''
BULLET_IMAGE_PATHS = {
'up': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_up.png'),
'down': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_down.png'),
'left': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_left.png'),
'right': os.path.join(os.getcwd(), 'G2/resources/images/bullet/bullet_right.png')
}
ENEMY_TANK_IMAGE_PATHS = {
'1': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_1_3.png')],
'2': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_2_3.png')],
'3': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_3_3.png')],
'4': [os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_2.png'),
os.path.join(os.getcwd(), 'G2/resources/images/enemyTank/enemy_4_3.png')]
}
PLAYER_TANK_IMAGE_PATHS = {
'player1': [os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T1_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T1_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T1_2.png')],
'player2': [os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T2_0.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T2_1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/playerTank/tank_T2_2.png')]
}
FOOD_IMAGE_PATHS = {
'boom': os.path.join(os.getcwd(), 'G2/resources/images/food/food_boom.png'),
'clock': os.path.join(os.getcwd(), 'G2/resources/images/food/food_clock.png'),
'gun': os.path.join(os.getcwd(), 'G2/resources/images/food/food_gun.png'),
'iron': os.path.join(os.getcwd(), 'G2/resources/images/food/food_iron.png'),
'protect': os.path.join(os.getcwd(), 'G2/resources/images/food/food_protect.png'),
'star': os.path.join(os.getcwd(), 'G2/resources/images/food/food_star.png'),
'tank': os.path.join(os.getcwd(), 'G2/resources/images/food/food_tank.png')
}
HOME_IMAGE_PATHS = [os.path.join(os.getcwd(), 'G2/resources/images/home/home1.png'),
os.path.join(os.getcwd(), 'G2/resources/images/home/home_destroyed.png')]
SCENE_IMAGE_PATHS = {
'brick': os.path.join(os.getcwd(), 'G2/Gresources/images/scene/brick.png'),
'ice': os.path.join(os.getcwd(), 'G2/resources/images/scene/ice.png'),
'iron': os.path.join(os.getcwd(), 'G2/resources/images/scene/iron.png'),
'river1': os.path.join(os.getcwd(), 'G2/resources/images/scene/river1.png'),
'river2': os.path.join(os.getcwd(), 'G2/resources/images/scene/river2.png'),
'tree': os.path.join(os.getcwd(), 'G2/resources/images/scene/tree.png')
}
OTHER_IMAGE_PATHS = {
'appear': os.path.join(os.getcwd(), 'G2/resources/images/others/appear.png'),
'background': os.path.join(os.getcwd(), 'G2/resources/images/others/background.png'),
'boom_dynamic': os.path.join(os.getcwd(), 'G2/resources/images/others/boom_dynamic.png'),
'boom_static': os.path.join(os.getcwd(), 'G2/resources/images/others/boom_static.png'),
'gameover': os.path.join(os.getcwd(), 'G2/resources/images/others/gameover.png'),
'logo': os.path.join(os.getcwd(), 'G2/resources/images/others/logo.png'),
'mask': os.path.join(os.getcwd(), 'G2/resources/images/others/mask.png'),
'protect': os.path.join(os.getcwd(), 'G2/resources/images/others/protect.png'),
'tip': os.path.join(os.getcwd(), 'G2/resources/images/others/tip.png'),
'gamebar': os.path.join(os.getcwd(), 'G2/resources/images/others/gamebar.png')
}
'''声音'''
AUDIO_PATHS = {
'add': os.path.join(os.getcwd(), 'G2/resources/audios/add.wav'),
'bang': os.path.join(os.getcwd(), 'G2/resources/audios/bang.wav'),
'blast': os.path.join(os.getcwd(), 'G2/resources/audios/blast.wav'),
'fire': os.path.join(os.getcwd(), 'G2/resources/audios/fire.wav'),
'Gunfire': os.path.join(os.getcwd(), 'G2/resources/audios/Gunfire.wav'),
'hit': os.path.join(os.getcwd(), 'G2/resources/audios/hit.wav'),
'start': os.path.join(os.getcwd(), 'G2/resources/audios/start.wav')
}
'''屏幕'''
WIDTH = 630
HEIGHT = 630
BORDER_LEN = 3
GRID_SIZE = 24
PANEL_WIDTH = 150
TITLE = '坦克大战'
'''关卡'''
LEVELFILEDIR = os.path.join(os.getcwd(), 'G2/modules/levels') | StarcoderdataPython |
1651253 | """ pixtosky - A module to perform coordinate transformation from pixel coordinates
in one image to pixel coordinates in another frame
:Authors: <NAME>
:License: :doc:`LICENSE`
PARAMETERS
----------
inimage : str
full filename with path of input image, an extension name ['sci',1] should be
provided if input is a multi-extension FITS file
outimage : str, optional
full filename with path of output image, an extension name ['sci',1] should be
provided if output is a multi-extension FITS file. If no image gets
specified, the input image will be used to generate a default output
WCS using stwcs.distortion.util.output_wcs().
direction : str
Direction of transform (forward or backward). The 'forward' transform
takes the pixel positions (assumed to be from the 'input' image) and determines
their position in the 'output' image. The 'backward' transform converts
the pixel positions (assumed to be from the 'output' image) into pixel
positions in the 'input' image.
Optional Parameters
-------------------
x : float, optional
X position from image
y : float, optional
Y position from image
coords : str, deprecated
[DEPRECATED] full filename with path of file with x,y coordinates
Filename given here will be *ignored* if a file has been specified
in `coordfile` parameter.
coordfile : str, optional
full filename with path of file with starting x,y coordinates
colnames : str, optional
comma separated list of column names from 'coordfile' files
containing x,y coordinates, respectively. Will default to
first two columns if None are specified. Column names for ASCII
files will use 'c1','c2',... convention.
separator : str, optional
non-blank separator used as the column delimiter in the coordfile file
precision : int, optional
Number of floating-point digits in output values
output : str, optional
Name of output file with results, if desired
verbose : bool
Print out full list of transformation results (default: False)
RETURNS
-------
outx : float
X position of transformed pixel. If more than 1 input value, then it
will be a numpy array.
outy : float
Y position of transformed pixel. If more than 1 input value, then it
will be a numpy array.
NOTES
-----
This module performs a full distortion-corrected coordinate transformation
based on all WCS keywords and any recognized distortion keywords from the
input image header.
Usage
-----
It can be called from within Python using the syntax::
>>> from drizzlepac import pixtopix
>>> outx,outy = pixtopix.tran("input_flt.fits[sci,1]",
"output_drz.fits[sci,1],"forward",100,100)
EXAMPLES
--------
1. The following command will transform the position 256,256 from
'input_flt.fits[sci,1]' into a position on the output image
'output_drz.fits[sci,1]' using::
>>> from drizzlepac import pixtopix
>>> outx,outy = pixtopix.tran("input_file_flt.fits[sci,1]",
"output_drz.fits[sci,1],"forward", 256,256)
2. The set of X,Y positions from 'output_drz.fits[sci,1]' stored as
the 3rd and 4th columns from the ASCII file 'xy_sci1.dat'
will be transformed into pixel positions from 'input_flt.fits[sci,1]'
and written out to 'xy_flt1.dat' using::
>>> from drizzlepac import pixtopix
>>> x,y = pixtopix.tran("input_flt.fits[sci,1]", "output_drz.fits[sci,1]",
"backward", coordfile='xy_sci1.dat', colnames=['c3','c4'],
output="xy_flt1.dat")
"""
from __future__ import absolute_import, division, print_function # confidence medium
import os,copy
import warnings
import numpy as np
from stsci.tools import fileutil, teal
from . import wcs_functions
from . import util
from stwcs import wcsutil, distortion
# This is specifically NOT intended to match the package-wide version information.
__version__ = '0.1'
__version_date__ = '1-Mar-2011'
__taskname__ = 'pixtopix'
def tran(inimage,outimage,direction='forward',x=None,y=None,
coords=None, coordfile=None,colnames=None,separator=None,
precision=6, output=None,verbose=True):
""" Primary interface to perform coordinate transformations in pixel
coordinates between 2 images using STWCS and full distortion models
read from each image's header.
"""
single_coord = False
# Only use value provided in `coords` if nothing has been specified for coordfile
if coords is not None and coordfile is None:
coordfile = coords
warnings.simplefilter('always',DeprecationWarning)
warnings.warn("Please update calling code to pass in `coordfile` instead of `coords`.",
category=DeprecationWarning)
warnings.simplefilter('default',DeprecationWarning)
if coordfile is not None:
if colnames in util.blank_list:
colnames = ['c1','c2']
# Determine columns which contain pixel positions
cols = util.parse_colnames(colnames,coordfile)
# read in columns from input coordinates file
xyvals = np.loadtxt(coordfile,usecols=cols,delimiter=separator)
if xyvals.ndim == 1: # only 1 entry in coordfile
xlist = [xyvals[0].copy()]
ylist = [xyvals[1].copy()]
else:
xlist = xyvals[:,0].copy()
ylist = xyvals[:,1].copy()
del xyvals
else:
if isinstance(x,np.ndarray):
xlist = x.tolist()
ylist = y.tolist()
elif not isinstance(x,list):
xlist = [x]
ylist = [y]
single_coord = True
else:
xlist = x
ylist = y
# start by reading in WCS+distortion info for each image
im1wcs = wcsutil.HSTWCS(inimage)
if im1wcs.wcs.is_unity():
print("####\nNo valid input WCS found in {}.\n Results may be invalid.\n####\n".format(inimage))
if util.is_blank(outimage):
fname,fextn = fileutil.parseFilename(inimage)
numsci = fileutil.countExtn(fname)
chips = []
for e in range(1,numsci+1):
chips.append(wcsutil.HSTWCS(fname,ext=('sci',e)))
if len(chips) == 0:
chips = [im1wcs]
im2wcs = distortion.utils.output_wcs(chips)
else:
im2wcs = wcsutil.HSTWCS(outimage)
if im2wcs.wcs.is_unity():
print("####\nNo valid output WCS found in {}.\n Results may be invalid.\n####\n".format(outimage))
# Setup the transformation
p2p = wcs_functions.WCSMap(im1wcs,im2wcs)
if direction[0].lower() == 'f':
outx,outy = p2p.forward(xlist,ylist)
else:
outx,outy = p2p.backward(xlist,ylist)
if isinstance(outx,np.ndarray):
outx = outx.tolist()
outy = outy.tolist()
# add formatting based on precision here...
xstr = []
ystr = []
fmt = "%."+repr(precision)+"f"
for ox,oy in zip(outx,outy):
xstr.append(fmt%ox)
ystr.append(fmt%oy)
if verbose or (not verbose and util.is_blank(output)):
print('# Coordinate transformations for ',inimage)
print('# X(in) Y(in) X(out) Y(out)\n')
for xs,ys,a,b in zip(xlist,ylist,xstr,ystr):
print("%.4f %.4f %s %s"%(xs,ys,a,b))
# Create output file, if specified
if output:
f = open(output,mode='w')
f.write("# Coordinates converted from %s\n"%inimage)
for xs,ys in zip(xstr,ystr):
f.write('%s %s\n'%(xs,ys))
f.close()
print('Wrote out results to: ',output)
if single_coord:
outx = outx[0]
outy = outy[0]
return outx,outy
#--------------------------
# TEAL Interface functions
#--------------------------
def run(configObj):
if 'coords' in configObj:
coords = util.check_blank(configObj['coords'])
else:
coords = None
coordfile = util.check_blank(configObj['coordfile'])
colnames = util.check_blank(configObj['colnames'])
sep = util.check_blank(configObj['separator'])
outfile = util.check_blank(configObj['output'])
outimage = util.check_blank(configObj['outimage'])
tran(configObj['inimage'], outimage,direction=configObj['direction'],
x = configObj['x'], y = configObj['y'], coords=coords,
coordfile = coordfile, colnames = colnames,
separator= sep, precision= configObj['precision'],
output= outfile, verbose = configObj['verbose'])
def help(file=None):
"""
Print out syntax help for running astrodrizzle
Parameters
----------
file : str (Default = None)
If given, write out help to the filename specified by this parameter
Any previously existing file with this name will be deleted before
writing out the help.
"""
helpstr = getHelpAsString(docstring=True, show_ver = True)
if file is None:
print(helpstr)
else:
if os.path.exists(file): os.remove(file)
f = open(file, mode = 'w')
f.write(helpstr)
f.close()
def getHelpAsString(docstring = False, show_ver = True):
"""
return useful help from a file in the script directory called
__taskname__.help
"""
install_dir = os.path.dirname(__file__)
taskname = util.base_taskname(__taskname__, '')
htmlfile = os.path.join(install_dir, 'htmlhelp', taskname + '.html')
helpfile = os.path.join(install_dir, taskname + '.help')
if docstring or (not docstring and not os.path.exists(htmlfile)):
if show_ver:
helpString = os.linesep + \
' '.join([__taskname__, 'Version', __version__,
' updated on ', __version_date__]) + 2*os.linesep
else:
helpString = ''
if os.path.exists(helpfile):
helpString += teal.getHelpFileAsString(taskname, __file__)
else:
if __doc__ is not None:
helpString += __doc__ + os.linesep
else:
helpString = 'file://' + htmlfile
return helpString
__doc__ = getHelpAsString(docstring = True, show_ver = False)
| StarcoderdataPython |
3250745 | <filename>meutils/request_utils/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : http_utils
# @Time : 2020/11/12 11:49 上午
# @Author : yuanjie
# @Email : <EMAIL>
# @Software : PyCharm
# @Description :
import requests
from loguru import logger
from tenacity import retry, stop_after_delay, stop_after_attempt, wait_fixed
@retry(wait=wait_fixed(3), # 重试之前等待3秒
stop=stop_after_delay(7) | stop_after_attempt(3), # 同时满足用 | 没毛病:重试7秒重试3次
retry_error_callback=lambda log: logger.error(log),
reraise=True)
# @lru_cache()
def request(url=None, json=None, parser=lambda x: x, encoding=None, **kwargs):
"""
:param url:
:param json:
:param parser: None 的时候返回r,否则返回 parser(r.json())
:param kwargs:
:return:
"""
method = 'post' if json is not None else 'get' # 特殊情况除外
logger.info(f"Request Method: {method}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE '
}
r = requests.request(method, url, json=json, headers=headers)
r.encoding = encoding if encoding else r.apparent_encoding
if parser is None:
return r
return parser(r.json())
| StarcoderdataPython |
1744420 | <filename>hero/myproject/pipelines.py<gh_stars>0
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
import json
class MyprojectPipeline(object):
def process_item(self, item, spider):
if (item.get('title') is not None and item.get('href') is not None):
return item
else:
pass
class MongoDBPipeline(object):
"""
将item写入MongoDB
"""
@classmethod
def from_crawler(cls, crawler):
cls.MONGODB_SETTINGS = crawler.settings.get('MONGODB_SETTINGS')
return cls()
def open_spider(self, spider):
dbname = self.MONGODB_SETTINGS['db']
host = self.MONGODB_SETTINGS['host']
port = self.MONGODB_SETTINGS['port']
username = self.MONGODB_SETTINGS['username']
password = self.MONGODB_SETTINGS['password']
self.client = MongoClient(host=host,port=port,username=username,password=password,authSource=dbname,authMechanism='SCRAM-SHA-1')
self.db = self.client[dbname]
self.collection = self.db['zufang']
def close_spider(self, spider):
self.client.close()
def process_item(self, item, spider):
self.collection.insert_one(dict(item))
return item | StarcoderdataPython |
1649827 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import requests
import multiprocessing
import models
from gevent.pool import Pool
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from helpers import random_str
from webs.douban import parsers
from config import sqla
from . import get_main_movies_base_data
douban_movie_url = 'http://movie.douban.com/subject/'
douban_celebrity_url = 'http://movie.douban.com/celebrity/'
cookies = {
'bid': ''
}
def create_requests_and_save_datas(douban_id):
session = sqla['session']
cookies['bid'] = random_str(11)
r = requests.get(
douban_movie_url + str(douban_id),
cookies=cookies,
timeout=10
)
if r.status_code != 200:
return
data = parsers.movie.start_parser(r.text)
data['douban_url'] = r.url
directors = data.pop('directors', [])
director_douban_ids = set(director['douban_id'] for director in directors)
playwrights = data.pop('playwrights', [])
playwright_douban_ids = set(
playwright['douban_id']
for playwright in playwrights
)
actors = data.pop('actors', [])
actor_douban_ids = set(actor['douban_id'] for actor in actors)
celebrities = directors + playwrights + actors
celebrity_douban_ids = \
director_douban_ids | playwright_douban_ids | actor_douban_ids
douban_id_celebrity_obj_dict = {}
for celebrity in celebrities:
celebrity_douban_id = celebrity['douban_id']
if celebrity_douban_id is not None:
try:
celebrity_obj = models.Celebrity(**celebrity)
session.add(celebrity_obj)
session.commit()
except (IntegrityError, InvalidRequestError):
session.rollback()
celebrity_obj = session.query(models.Celebrity).filter_by(
douban_id=celebrity_douban_id
).first()
douban_id_celebrity_obj_dict[celebrity_douban_id] = celebrity_obj
video = session.query(models.Video).filter_by(douban_id=douban_id).one()
video.directors.clear()
video.playwrights.clear()
video.actors.clear()
for (celebrity_douban_id,
celeBrity_obj) in douban_id_celebrity_obj_dict.items():
if celebrity_douban_id in director_douban_ids:
video.directors.append(celebrity_obj)
if celebrity_douban_id in playwright_douban_ids:
video.playwrights.append(celebrity_obj)
if celebrity_douban_id in actor_douban_ids:
video.actors.append(celebrity_obj)
session.commit()
"""If use query.update(data), an error is raised,
beacuse movie table is multiple table and we want to
update movie table and subject table some columns.
"""
video.genres.clear()
video.countries.clear()
video.languages.clear()
session.commit()
table_name = video.__tablename__
if table_name == 'movies':
genre_class = models.MovieGenre
elif table_name == 'tvs':
genre_class = models.TVGenre
elif table_name == 'animations':
genre_class = models.AnimationGenre
for k, v in data.items():
if k == 'genres':
for genre in v:
try:
genre_obj = genre_class(**genre)
session.add(genre_obj)
session.commit()
except (IntegrityError, InvalidRequestError):
session.rollback()
genre_obj = session.query(genre_class).filter_by(
name=genre['name']
).one()
video.genres.append(genre_obj)
elif k == 'countries':
for country in v:
try:
country_obj = models.Country(**country)
session.add(country_obj)
session.commit()
except (IntegrityError, InvalidRequestError):
session.rollback()
country_obj = session.query(models.Country).filter_by(
name=country['name']
).one()
video.countries.append(country_obj)
elif k == 'languages':
for language in v:
try:
language_obj = models.Language(**language)
session.add(language_obj)
session.commit()
except (IntegrityError, InvalidRequestError):
session.rollback()
language_obj = session.query(models.Language).filter_by(
name=language['name']
).one()
video.languages.append(language_obj)
session.commit()
'''Why set other value not in above for cycle?
Beacuse above "for cycle" have rollback.
'''
for k, v in data.items():
if k != 'genres' and k != 'countries' and k != 'languages':
if k == 'aliases' or k == 'thumbnail_photos':
v = str(v)
setattr(video, k, v)
session.commit()
# parser movie photo
r = requests.get(
douban_movie_url + str(douban_id) + '/all_photos',
cookies=cookies,
timeout=10
)
photo_data = parsers.movie_photo.start_parser(r.text)
for k, v in photo_data.items():
v = str(v)
setattr(video, k, v)
video.is_detail = True
session.commit()
print(','.join(
[table_name, douban_id, data.get('title')]
))
def task(douban_ids, pool_number):
pool = Pool(pool_number)
for douban_id in douban_ids:
pool.spawn(
create_requests_and_save_datas,
douban_id=douban_id,
)
pool.join()
| StarcoderdataPython |
3293705 | <reponame>skostya64/Music_store_test
class CheckBassNamePage:
def __init__(self, driver):
self.driver = driver
def click_menu_bass_guitars(self):
self.driver.find_element_by_xpath("//a[@href='https://www.musicstore.de/ru_OT/EUR/-/cat-BASS']").click()
def click_electric_bass_guitars(self):
self.driver.find_element_by_xpath("//a[@href='https://www.musicstore.de/ru_OT/EUR/-/-/cat-BASS-BASEBASS']").click()
def click_four_strings(self):
self.driver.find_element_by_xpath("//a[@href='https://www.musicstore.de/ru_OT/EUR/-/4-/cat-BASS-BASEB4']").click()
def select_name_brand(self):
self.driver.find_element_by_xpath("//span[text() = 'Производитель']").click()
self.driver.find_element_by_xpath("//span[@title = 'Epiphone']").click()
self.driver.find_element_by_xpath("//span[@class = 'apply btn btn-ms-std btn-lg']").click()
def check_name_brand_in_products(self):
for i in range(len(self.driver.find_elements_by_xpath("//div[@id = 'tile-product-BAS0008210-000']"))):
self.driver.find_elements_by_xpath("//div[@id = 'tile-product-BAS0008210-000']")[i].click()
brand_name = self.driver.find_element_by_xpath("//img[@title = 'Epiphone']").text
assert brand_name == "Epiphone"
| StarcoderdataPython |
3248873 | import logging
from time import sleep
from django.conf import settings
from django.core.management.base import BaseCommand
import arrow
from kegbot.reader import TapReader
from kegbot.tasks import record_pulses
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **opts):
readers = TapReader.get_readers()
while True:
now = arrow.now()
dispense_expire = now.replace(seconds=-settings.DISPENSE_EXPIRE)
for reader in readers:
if reader.first_pulse:
if reader.last_pulse and reader.last_pulse < dispense_expire:
logger.debug('reset pulses')
reader.reset_pulses()
else:
payload = reader.as_payload()
logger.debug('send payload {}'.format(payload))
record_pulses.delay(payload=payload)
sleep(1)
| StarcoderdataPython |
193873 | # -*- coding: utf-8 -*-
import tweepy
import os
from airpoldata import NOX, P2, highorlow, highorlowNOX, highorlowp2
#this script runs the twitterbot
auth = tweepy.OAuthHandler(os.environ['CONSUMER_KEY'], os.environ['CONSUMER_SECRET'])
auth.set_access_token(os.environ['ACCESS_TOKEN'], os.environ['ACCESS_TOKEN_SECRET'])
api = tweepy.API(auth)
airnow = ('The #airpollution now is '+ (highorlow(highorlowNOX(NOX), highorlowp2(P2))) + '. Particulate matter <2.5 um = ' + str(P2)+ u'µg/m³, Nitrous oxides concentration = ' + str(NOX) + u'µg/m³')
if os.environ.get("DEBUG"):
print (airnow)
if P2 != None and NOX != None:
api.update_status(status=airnow)
| StarcoderdataPython |
3395850 | <reponame>sivasanarul/amfe_topopt<filename>amfe/parametric/morphing/morpher/__init__.py
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Morphing module.
"""
# -- import base morpher (important for subclassing) --
from .basemorpher import *
# -- import geometric morpher --
from .cuboidmorpher import *
from .cylindermorpher import *
from .rectanglemorpher import *
| StarcoderdataPython |
189988 | from django.db import models
from .auth.custom_user_model import AbstractBaseUser
from django.utils.translation import ugettext_lazy as _
# Create your models here.
class Usermaster(AbstractBaseUser):
user_no = models.IntegerField(db_column='userno', primary_key=True) # Field name made lowercase.
user_status_code = models.CharField(db_column='userstatuscode', max_length=10) # Field name made lowercase.
name = models.CharField(db_column='name', max_length=100) # Field name made lowercase.
id = models.CharField(db_column='id', max_length=100, blank=True, null=True) # Field name made lowercase.
user_password = models.CharField(db_column='userpassword', max_length=1000, blank=True, null=True) # Field name made lowercase.
gender = models.CharField(db_column='gender', max_length=1, blank=True, null=True) # Field name made lowercase.
mobile_phone_no = models.CharField(db_column='mobilephoneno', max_length=30, blank=True, null=True) # Field name made lowercase.
email_address = models.CharField(db_column='emailaddress', max_length=100, blank=True, null=True) # Field name made lowercase.
is_email_receive = models.CharField(db_column='isemailreceive', max_length=5) # Field name made lowercase.
enter_join_time = models.DateTimeField(db_column='enterjointime') # Field name made lowercase.
last_update_uno = models.IntegerField(db_column='lastupdateuno') # Field name made lowercase.
last_update_time = models.DateTimeField(db_column='lastupdatetime') # Field name made lowercase.
USERNAME_FIELD = 'user_no'
REQUIRED_FIELDS = []
temp_password = None
class Meta:
managed = False
db_table = 'usermaster'
#verbose_name = _('user')
#verbose_name_plural = _('users')
# class Usermaster(models.Model):
# userno = models.IntegerField(blank=True, null=True)
# userstatuscode = models.CharField(max_length=10, blank=True, null=True)
# name = models.CharField(max_length=100)
# id = models.CharField(max_length=100)
# userpassword = models.CharField(max_length=1000, blank=True, null=True)
# gender = models.CharField(max_length=1, blank=True, null=True)
# mobilephoneno = models.CharField(max_length=30, blank=True, null=True)
# emailaddress = models.CharField(max_length=100, blank=True, null=True)
# isemailreceive = models.CharField(max_length=5)
# enterjointime = models.DateTimeField()
# lastupdateuno = models.IntegerField(blank=True, null=True)
# lastupdatetime = models.DateTimeField()
#
# class Meta:
# managed = False
# db_table = 'usermaster'
| StarcoderdataPython |
59110 | <gh_stars>1-10
# Copyright (C) 2020 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: <NAME>, <NAME>
from mocasin.common.platform import Platform, Processor
from mocasin.platforms.topologies import fullyConnectedTopology
from mocasin.platforms.platformDesigner import PlatformDesigner
from mocasin.platforms.utils import simpleDijkstra as sd
from hydra.utils import instantiate
class DesignerPlatformCoolidge(Platform):
# The topology and latency numbers (!) of this should come from the MPPA3 Coolidge
# sheet published by Kalray
def __init__(
self,
processor_0,
processor_1,
name="coolidge",
symmetries_json=None,
embedding_json=None,
):
super(DesignerPlatformCoolidge, self).__init__(
name, symmetries_json, embedding_json
)
# workaround until Hydra 1.1
if not isinstance(processor_0, Processor):
processor_0 = instantiate(processor_0)
if not isinstance(processor_1, Processor):
processor_1 = instantiate(processor_1)
designer = PlatformDesigner(self)
designer.setSchedulingPolicy("FIFO", 1000)
designer.newElement("coolidge")
# create five chips with 16 cores, NoC, +Security Core
clusters = []
for i in range(5):
cluster = "cluster_{0}".format(i)
designer.newElement(cluster)
clusters.append(cluster)
designer.addPeClusterForProcessor(f"cluster_{i}_0", processor_0, 16)
topology = fullyConnectedTopology(
[
"processor_{:04d}".format(i * 17),
"processor_{:04d}".format(i * 17 + 1),
"processor_{:04d}".format(i * 17 + 2),
"processor_{:04d}".format(i * 17 + 3),
"processor_{:04d}".format(i * 17 + 4),
"processor_{:04d}".format(i * 17 + 5),
"processor_{:04d}".format(i * 17 + 6),
"processor_{:04d}".format(i * 17 + 7),
"processor_{:04d}".format(i * 17 + 8),
"processor_{:04d}".format(i * 17 + 9),
"processor_{:04d}".format(i * 17 + 10),
"processor_{:04d}".format(i * 17 + 11),
"processor_{:04d}".format(i * 17 + 12),
"processor_{:04d}".format(i * 17 + 13),
"processor_{:04d}".format(i * 17 + 14),
"processor_{:04d}".format(i * 17 + 15),
]
)
designer.createNetworkForCluster(
f"cluster_{i}_0",
f"noc_{i}",
topology,
sd,
40000.0,
100,
150,
100,
60,
)
designer.addPeClusterForProcessor(f"cluster_{i}_1", processor_1, 1)
designer.addCommunicationResource(
f"L2_{i}",
[f"cluster_{i}_0", f"cluster_{i}_1"],
500,
1500,
float("inf"),
float("inf"),
frequencyDomain=600000.0,
)
designer.finishElement()
designer.addCommunicationResource(
"RAM",
clusters,
1000,
3000,
float("inf"),
float("inf"),
frequencyDomain=10000,
)
designer.finishElement()
| StarcoderdataPython |
3256375 | # from .ddaig import DDAIG
# from .daeldg import DAELDG
# from .vanilla import Vanilla
from .crossgrad import CrossGrad
from .dddcian import CIAN
from .CAN import CAN
from .mixture import mixup
from .ADV import ADV
from .EpiDG import EpiDG
from .MetaReg import MetaReg
from .FeatureCritic import FCDG
# from .MLDG_tmp import MLDG
from .MASF import MASF
# from .MLDG_tmp_V1 import MLDGV1
from .MetaSGD import MetaSGD
from .MLDG import MLDG | StarcoderdataPython |
3337705 | <reponame>97littleleaf11/mypyc-benchmarks
"""Utilities for generation markdown."""
from reporting.common import BENCHMARKS_DIR
def mypy_commit_link(commit: str) -> str:
url = 'https://github.com/python/mypy/commit/%s' % commit
return '[%s](%s)' % (commit[:12], url)
def benchmark_link(benchmark: str, link_name: str = '') -> str:
link_name = link_name or benchmark
return '[%s](%s/%s.md)' % (link_name, BENCHMARKS_DIR, benchmark)
def bold(s: str) -> str:
if not s:
return s
return '**%s**' % s
| StarcoderdataPython |
26314 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-06 16:37
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='Title')),
('text', models.TextField(verbose_name='Text')),
('visible', models.BooleanField(default=False, verbose_name='Visible')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Created time')),
('edit_time', models.DateTimeField(auto_now=True, verbose_name='Edit time')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-edit_time'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(verbose_name='Text')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Created time')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('blog', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.Blog')),
('problem', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='problem.Problem')),
],
options={
'ordering': ['-create_time'],
},
),
]
| StarcoderdataPython |
1627093 | # -*- coding: utf-8 -*-
"""Input and output functions for BEL graphs.
PyBEL provides multiple lossless interchange options for BEL. Lossy output formats are also included for convenient
export to other programs. Notably, a *de facto* interchange using Resource Description Framework (RDF) to match the
ability of other existing software is excluded due the immaturity of the BEL to RDF mapping.
"""
from .api import dump, load
from .aws import from_s3, to_s3
from .bel_commons_client import from_bel_commons, to_bel_commons
from .biodati_client import from_biodati, to_biodati
from .cx import from_cx, from_cx_file, from_cx_gz, from_cx_jsons, to_cx, to_cx_file, to_cx_gz, to_cx_jsons
from .emmaa import from_emmaa
from .extras import to_csv, to_gsea, to_sif
from .fraunhofer_orientdb import from_fraunhofer_orientdb
from .gpickle import from_bytes, from_pickle, to_bytes, to_pickle
from .graphdati import (
from_graphdati, from_graphdati_file, from_graphdati_gz, from_graphdati_jsons, to_graphdati, to_graphdati_file,
to_graphdati_gz, to_graphdati_jsonl, to_graphdati_jsonl_gz, to_graphdati_jsons,
)
from .graphml import to_graphml
from .hetionet import from_hetionet_file, from_hetionet_gz, from_hetionet_json, get_hetionet
from .hipathia import from_hipathia_dfs, from_hipathia_paths, to_hipathia, to_hipathia_dfs
from .indra import (
from_biopax, from_indra_pickle, from_indra_statements, from_indra_statements_json, from_indra_statements_json_file,
to_indra_statements, to_indra_statements_json, to_indra_statements_json_file,
)
from .jgif import (
from_cbn_jgif, from_cbn_jgif_file, from_jgif, from_jgif_file, from_jgif_gz, from_jgif_jsons, post_jgif, to_jgif,
to_jgif_file, to_jgif_gz, to_jgif_jsons,
)
from .jupyter import to_jupyter, to_jupyter_str
from .lines import from_bel_script, from_bel_script_url
from .neo4j import to_neo4j
from .nodelink import (
from_nodelink, from_nodelink_file, from_nodelink_gz, from_nodelink_jsons, to_nodelink,
to_nodelink_file, to_nodelink_gz, to_nodelink_jsons,
)
from .pynpa import to_npa_dfs, to_npa_directory
from .spia import to_spia_dfs, to_spia_excel, to_spia_tsvs
from .tsv import to_edgelist, to_tsv
from .umbrella_nodelink import to_umbrella_nodelink, to_umbrella_nodelink_file, to_umbrella_nodelink_gz
| StarcoderdataPython |
1649614 | <reponame>drschwabe/prototype
# Copyright (c) 2020 <NAME>
# Distributed under the MIT software license, see the accompanying# file LICENSE or http://www.opensource.org/licenses/mit-license.php
from moneysocket.protocol.layer import ProtocolLayer
from moneysocket.protocol.transact.consumer_nexus import ConsumerTransactNexus
class ConsumerTransactLayer(ProtocolLayer):
def __init__(self, stack, above_layer):
super().__init__(stack, above_layer, "CONSUMER_TRANSACT")
assert "notify_preimage_cb" in dir(stack)
assert "notify_invoice_cb" in dir(stack)
assert "notify_provider_cb" in dir(stack)
def announce_nexus_from_below_cb(self, below_nexus):
consumer_transact_nexus = ConsumerTransactNexus(below_nexus, self)
self._track_nexus(consumer_transact_nexus, below_nexus)
self._track_nexus_announced(consumer_transact_nexus)
self.notify_app_of_status(consumer_transact_nexus, "NEXUS_ANNOUNCED")
self.announce_nexus_above_cb(consumer_transact_nexus)
def notify_invoice_cb(self, consumer_transact_nexus, bolt11,
request_reference_uuid):
self.stack.notify_invoice_cb(consumer_transact_nexus, bolt11,
request_reference_uuid)
def notify_preimage_cb(self, consumer_transact_nexus, preimage,
request_reference_uuid):
self.stack.notify_preimage_cb(consumer_transact_nexus, preimage,
request_reference_uuid)
def request_invoice(self, nexus_uuid, msats, description):
if nexus_uuid not in self.nexuses:
return None, "nexus not online"
nexus = self.nexuses[nexus_uuid]
request_uuid = nexus.request_invoice(msats, "")
return request_uuid, None
def request_pay(self, nexus_uuid, bolt11):
if nexus_uuid not in self.nexuses:
return None, "nexus not online"
nexus = self.nexuses[nexus_uuid]
request_uuid = nexus.request_pay(bolt11)
return request_uuid, None
def notify_provider_cb(self, consumer_transact_nexus, msg):
self.stack.notify_provider_cb(consumer_transact_nexus, msg)
| StarcoderdataPython |
3364586 | <reponame>xxchenxx/dlrm<filename>metrics/pac.py
'''
Metrics:
PAC Bayesian flatness with respect to input and weight
'''
import os
import time
import copy
import torch
import numpy as np
import torch.nn.functional as F
from dlrm_s_pytorch import unpack_batch
__all__ = ['eval_pac_weight', 'eval_pac_input']
@torch.no_grad()
def evaluate_function_noise(xloader, dlrm, network, noise, loss_fn_wrap, use_gpu=True, ndevices=1):
#eval function for weight perturbation
dlrm.eval()
num_batch = 50
device = torch.cuda.current_device()
sum_E = 0
for i, inputBatch in enumerate(xloader):
if num_batch > 0 and i >= num_batch: break
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch)
new_X = torch.normal(mean=X, std=X.abs() * noise)
#new_X = (X + gaussian_noise)
# compute output
Z = network(
new_X,
lS_o,
lS_i,
use_gpu,
device,
ndevices=ndevices,
)
E = loss_fn_wrap(Z, T, use_gpu, device)
sum_E += E.detach().cpu().numpy()
return sum_E / num_batch
@torch.no_grad()
def evaluate_function(xloader, dlrm, network, loss_fn_wrap, use_gpu=True, ndevices=1):
#eval function for weight perturbation
dlrm.eval()
num_batch = 50
device = torch.cuda.current_device()
sum_E = 0
for i, inputBatch in enumerate(xloader):
if num_batch > 0 and i >= num_batch: break
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch)
# compute output
Z = network(
X,
lS_o,
lS_i,
use_gpu,
device,
ndevices=ndevices,
)
E = loss_fn_wrap(Z, T, use_gpu, device)
sum_E += E.detach().cpu().numpy()
return sum_E / num_batch
def eval_pac_weight(
dlrm, xloader, network, loss_fn_wrap, train_mode=False, num_batch=5, use_gpu=True, ndevices=1,
beta=0.1,
max_search_times=20,
iteration_times=15,
sigma_min=0.,
sigma_max=5.,
eps=1e-3):
original_weight = copy.deepcopy(dlrm.state_dict())
device = torch.cuda.current_device()
original_loss = evaluate_function(xloader, dlrm, network, loss_fn_wrap, use_gpu=use_gpu, ndevices=ndevices) # numpy array
max_loss = (1 + beta) * original_loss
for episode in range(max_search_times):
sigma_new = (sigma_max + sigma_min) / 2
loss_list = []
for step in range(iteration_times):
# generate perturbed weight
perturb_weight = {}
for key in original_weight.keys():
if 'mask' in key:
# mask that represents network structure.
perturb_weight[key] = original_weight[key]
else:
if len(original_weight[key].size()) in [2,4]:
perturb_weight[key] = torch.normal(mean = original_weight[key], std = sigma_new * (original_weight[key].abs()))
else:
perturb_weight[key] = original_weight[key]
dlrm.load_state_dict(perturb_weight)
perturb_loss = evaluate_function(xloader, dlrm, network, loss_fn_wrap, use_gpu=use_gpu, ndevices=ndevices)
loss_list.append(perturb_loss)
loss_mean = np.mean(np.array(loss_list))
print('current-sigma = {}, tolerent loss = {}, current loss = {}'.format(sigma_new, max_loss, loss_mean))
#compare with original_loss
if loss_mean <= max_loss and (sigma_max - sigma_min) < eps:
return sigma_new
else:
if loss_mean > max_loss:
sigma_max = sigma_new
else:
sigma_min = sigma_new
dlrm.load_state_dict(original_weight)
return 1 / sigma_new**2
def eval_pac_input(
dlrm, xloader, network, loss_fn_wrap, train_mode=False, num_batch=5, use_gpu=True, ndevices=1,
beta=0.1,
max_search_times=20,
iteration_times=15,
sigma_min=0.,
sigma_max=5.,
eps=1e-3):
original_weight = copy.deepcopy(dlrm.state_dict())
device = torch.cuda.current_device()
original_loss = evaluate_function(xloader, dlrm, network, loss_fn_wrap, use_gpu=use_gpu, ndevices=ndevices) # numpy array
max_loss = (1 + beta) * original_loss
for episode in range(max_search_times):
sigma_new = (sigma_max + sigma_min) / 2
loss_list = []
for step in range(iteration_times):
perturb_loss = evaluate_function_noise(xloader, dlrm, network, sigma_new, loss_fn_wrap, use_gpu=use_gpu, ndevices=ndevices)
loss_list.append(perturb_loss)
loss_mean = np.mean(np.array(loss_list))
print('current-sigma = {}, tolerent loss = {}, current loss = {}'.format(sigma_new, max_loss, loss_mean))
#compare with original_loss
if loss_mean <= max_loss and (sigma_max - sigma_min) < eps:
return sigma_new
else:
if loss_mean > max_loss:
sigma_max = sigma_new
else:
sigma_min = sigma_new
dlrm.load_state_dict(original_weight)
return 1 / sigma_new**2
| StarcoderdataPython |
3337914 | # !/usr/bin/env python3
# Check for missing dependencies which are: SpeechRecognition, Pandas and PyAudio
try:
import os
import speech_recognition as sr
from pandas.io.clipboard import copy as cp
except ImportError as e:
exit(f"\033[91mMissing dependency: {e.name}. Please check your installation!")
# Colors used to make the terminal look nicer
GREEN, YELLOW, RED, CYAN, PURPLE, RESET = '\033[92m', '\033[93m', '\033[91m', '\033[96m', '\033[95m', '\033[0m'
restart = "yes"
# Always check for ctrl+c
try:
# Create recognizer and microphone
r = sr.Recognizer()
m = sr.Microphone()
mics = m.list_microphone_names()
# Clear up all debug messages from PyAudio and print the heading
os.system('cls' if os.name == 'nt' else 'clear')
print(""
f"\n ███████{RED}╗{RESET} █████{YELLOW}╗{RESET} ███████{GREEN}╗{RESET}████████{CYAN}╗{RESET}██████{PURPLE}╗{RESET} "
f"\n ██{RED}╔════╝{RESET}██{YELLOW}╔══{RESET}██{YELLOW}╗{RESET}██{GREEN}╔════╝{CYAN}╚══{RESET}██{CYAN}╔══╝{RESET}██{PURPLE}╔══{RESET}██{PURPLE}╗{RESET}"
f"\n █████{RED}╗{RESET} ███████{YELLOW}║{RESET}███████{GREEN}╗ {RESET}██{CYAN}║{RESET} ██████{PURPLE}╔╝{RESET}"
f"\n ██{RED}╔══╝{RESET} ██{YELLOW}╔══{RESET}██{YELLOW}║{GREEN}╚════{RESET}██{GREEN}║ {RESET}██{CYAN}║{RESET} ██{PURPLE}╔══{RESET}██{PURPLE}╗{RESET}"
f"\n ██{RED}║{RESET} ██{YELLOW}║{RESET} ██{YELLOW}║{RESET}███████{GREEN}║{RESET} ██{CYAN}║{RESET} ██{PURPLE}║ {RESET}██{PURPLE}║{RESET}"
f"\n {RED}╚═╝ {YELLOW}╚═╝ ╚═╝{GREEN}╚══════╝ {CYAN}╚═╝ {PURPLE}╚═╝ ╚═╝{RESET}"
"\n by DevEmperor\n\n")
# Check if there are no input devices and then exit
if len(mics) == 0:
exit(f"[{RED}-{RESET}] Could't find any microphone/soundcard on your device. Exiting! :-(")
# Ask the user if he wants to choose the mic manually
if input(f"[{CYAN}?{RESET}] Do you want to choose your microphone manually? [yes/NO] ").lower() == "yes":
print(f"[{GREEN}+{RESET}] These microphones/soundcards were found:")
for index, name in enumerate(mics): # List all devices
print(f" {index} = {name}")
while True:
mic_index = input(f"[{CYAN}?{RESET}] Which input device do you want to use? ")
if mic_index.isdigit():
if int(mic_index) > len(mics) - 1:
continue
m = sr.Microphone(device_index=int(mic_index)) # Override the microphone with another device_index
break
continue
with open(os.path.dirname(os.path.realpath(__file__)) + "/language_codes.dat", "r") as f:
codes = f.read().splitlines() # Read all language-codes
while True:
lang = input(f"[{CYAN}?{RESET}] Enter the language code of your language [en-US]: ")
if lang == "": # default language
lang = "en-US"
elif len(lang) == 2 and not any(lang in s for s in codes): # check for short codes
print(f"[{RED}-{RESET}] I don't know this language...")
continue
elif len(lang) != 2 and lang not in codes: # check for long codes
print(f"[{RED}-{RESET}] I don't know this language...")
continue
break
copy = input(f"[{CYAN}?{RESET}] Should I copy the result to your clipboard? [YES/no] ")
if copy.lower() == "no":
copy = False
else:
copy = True
input(f"[{CYAN}?{RESET}] Press any key to start...\n") # last confirmation
while restart.lower() != "no": # always repeat the recording/recognation
print(f"[{GREEN}+{RESET}] Now start talking and I'll try to recognize what you said...")
with m as source:
try:
r.pause_threshold = 2 # Stop after 2 seconds of silence
audio = r.listen(m, timeout=10) # Start listening...
except sr.WaitTimeoutError: # ... and exit if there isn't any signal
exit(f"[{RED}-{RESET}] Didn't get any input signal. Exiting! :-(")
print(f"[{GREEN}+{RESET}] Recognizing...")
try:
result = str(r.recognize_google(audio, language=lang)).strip('\n') # Start recognization...
result = result[0].upper() + result[1:] # (to start with a capital letter)
print(f"[{GREEN}+{RESET}] I think you said:\n\n\033[1m{result}\n") # ...print the result...
if copy:
cp(result) # ...and copy it to your system clipboard
print(f"[{GREEN}+{RESET}] Copied this result to your clipboard! ;-)")
except sr.UnknownValueError: # Error if nothing was said...
print(f"[{RED}-{RESET}] Couldn't understand your speech!")
except sr.RequestError as e: # ...or if Google couldn't handle the request
print(f"[{RED}-{RESET}] Could not request results from Google Speech Recognition service; {e}")
restart = input(f"[{CYAN}?{RESET}] Do you want me to recognize something else? [YES/no] ") # Ask for repeat
print(f"{GREEN}Have a nice day!")
exit(0)
except KeyboardInterrupt:
print(f"\n{RED}Exiting... Bye!")
| StarcoderdataPython |
3211389 | <gh_stars>1-10
import logging
# Define the name of the logger for the package
logger = logging.getLogger('swallow')
EXIT_SUCCESS = 0
EXIT_IO_ERROR = 11
EXIT_USER_INTERRUPT = 21
| StarcoderdataPython |
3355396 | from protorpc import messages
from protorpc import protojson
import bleach
import grow
import os
import requests
try:
from HTMLParser import HTMLParser
except ImportError:
from html.parser import HTMLParser
class Error(Exception):
pass
class AttributeMessage(messages.Message):
tag = messages.StringField(1)
attributes = messages.StringField(2, repeated=True)
class JazzhrPreprocessor(grow.Preprocessor):
KIND = 'jazzhr'
JOBS_URL = 'https://api.resumatorapi.com/v1/jobs/status/open/confidential/false/private/false?apikey={api_key}'
JOB_URL = 'https://api.resumatorapi.com/v1/jobs/{job_id}?apikey={api_key}'
class Config(messages.Message):
api_key = messages.StringField(1)
jobs_collection = messages.StringField(2)
allowed_html_tags = messages.StringField(4, repeated=True)
allowed_html_attributes = messages.MessageField(AttributeMessage, 5, repeated=True)
def bind_jobs(self, api_key, collection_path):
url = JazzhrPreprocessor.JOBS_URL.format(api_key=api_key)
resp = requests.get(url)
if resp.status_code != 200:
raise Error('Error requesting -> {}'.format(url))
content = resp.json()
self._bind(collection_path, content)
def _parse_entry(self, item):
if item.get('title'):
item['$title'] = item.pop('title')
if item.get('maximum_salary'):
del item['maximum_salary']
if item.get('minimum_salary'):
del item['minimum_salary']
if item.get('job_applicants'):
del item['job_applicants']
if item.get('content'):
item['content'] = self._parse_content(item.get('content'))
if item.get('compliance'):
for i, row in enumerate(item['compliance']):
item['compliance'][i]['description'] = \
self._parse_content(row['description'])
return item
def _parse_content(self, content):
parser = HTMLParser()
content = parser.unescape(content)
tags = self.config.allowed_html_tags
if tags:
attributes = {}
if self.config.allowed_html_attributes:
for attribute in self.config.allowed_html_attributes:
attributes[attribute.tag] = attribute.attributes
content = bleach.clean(
content, tags=tags, attributes=attributes, strip=True)
return content
def _get_single_job(self, item):
api_key = self.config.api_key
url = JazzhrPreprocessor.JOB_URL.format(
api_key=api_key, job_id=item['id'])
resp = requests.get(url)
if resp.status_code != 200:
raise Error('Error requesting -> {}'.format(url))
content = resp.json()
return content
def _bind(self, collection_path, items):
existing_paths = self.pod.list_dir(collection_path)
existing_basenames = [path.lstrip('/') for path in existing_paths]
new_basenames = []
for item in items:
item = self._get_single_job(item)
item = self._parse_entry(item)
path = os.path.join(collection_path, '{}.yaml'.format(item['id']))
self.pod.write_yaml(path, item)
self.pod.logger.info('Saving -> {}'.format(path))
new_basenames.append(os.path.basename(path))
basenames_to_delete = set(existing_basenames) - set(new_basenames)
for basename in basenames_to_delete:
# Skip deleting _blueprint, etc.
if basename.startswith('_'):
continue
path = os.path.join(collection_path, basename)
self.pod.delete_file(path)
self.pod.logger.info('Deleting -> {}'.format(path))
def run(self, *args, **kwargs):
self.bind_jobs(
self.config.api_key,
self.config.jobs_collection)
| StarcoderdataPython |
138824 | '''
Created by <NAME> 2020
# Read in WAV file into Python Class
sound1 = AudioProcessing('input.wav')
# Set the speed of the audio
sound1.set_audio_speed(0.5)
# Set the pitch of the audio
sound1.set_audio_pitch(2)
# Reverse the content of the audio
sound1.set_reverse()
# Add an echo to the audio
sound1.set_echo(1)
# Applies a bandpass filter between the (<low>, <high>) range of frequencies
sound.set_bandpass(50, 2600)
# Save the resulting processed audio data into a file
sound1.save_to_file('out.wav')
'''
import sys, wave
import numpy as np
from numpy import array, int16
from scipy.signal import lfilter, butter
from scipy.io.wavfile import read,write
from scipy import signal
import random
class AudioProcessing(object):
__slots__ = ('audio_data', 'sample_freq')
def __init__(self, input_audio_path):
self.sample_freq, self.audio_data = read(input_audio_path)
# self.audio_data = AudioProcessing.convert_to_mono_audio(self.audio_data)
def save_to_file(self, output_path):
'''Writes a WAV file representation of the processed audio data'''
write(output_path, self.sample_freq, array(self.audio_data, dtype = int16))
def set_audio_speed(self, speed_factor):
'''Sets the speed of the audio by a floating-point factor'''
sound_index = np.round(np.arange(0, len(self.audio_data), speed_factor))
self.audio_data = self.audio_data[sound_index[sound_index < len(self.audio_data)].astype(int)]
def set_echo(self, delay):
'''Applies an echo that is 0...<input audio duration in seconds> seconds from the beginning'''
output_audio = np.zeros(len(self.audio_data))
output_delay = delay * self.sample_freq
for count, e in enumerate(self.audio_data):
output_audio[count] = e + self.audio_data[count - int(output_delay)]
self.audio_data = output_audio
def set_volume(self, level):
'''Sets the overall volume of the data via floating-point factor'''
output_audio = np.zeros(len(self.audio_data))
for count, e in enumerate(self.audio_data):
output_audio[count] = (e * level)
self.audio_data = output_audio
def set_reverse(self):
'''Reverses the audio'''
self.audio_data = self.audio_data[::-1]
def set_audio_pitch(self, n, window_size=2**13, h=2**11):
'''Sets the pitch of the audio to a certain threshold'''
factor = 2 ** (1.0 * n / 12.0)
self._set_stretch(1.0 / factor, window_size, h)
self.audio_data = self.audio_data[window_size:]
self.set_audio_speed(factor)
def _set_stretch(self, factor, window_size, h):
phase = np.zeros(window_size)
hanning_window = np.hanning(window_size)
result = np.zeros(int(len(self.audio_data) / factor + window_size))
for i in np.arange(0, len(self.audio_data) - (window_size + h), h*factor):
# Two potentially overlapping subarrays
a1 = self.audio_data[int(i): int(i + window_size)]
a2 = self.audio_data[int(i + h): int(i + window_size + h)]
# The spectra of these arrays
s1 = np.fft.fft(hanning_window * a1)
s2 = np.fft.fft(hanning_window * a2)
# Rephase all frequencies
phase = (phase + np.angle(s2/s1)) % 2*np.pi
a2_rephased = np.fft.ifft(np.abs(s2)*np.exp(1j*phase))
i2 = int(i / factor)
result[i2: i2 + window_size] += hanning_window*a2_rephased.real
# normalize (16bit)
result = ((2 ** (16 - 4)) * result/result.max())
self.audio_data = result.astype('int16')
def set_lowpass(self, cutoff_low, order=5):
'''Applies a low pass filter'''
nyquist = self.sample_freq / 2.0
cutoff = cutoff_low / nyquist
x, y = signal.butter(order, cutoff, btype='lowpass', analog=False)
self.audio_data = signal.filtfilt(x, y, self.audio_data)
def set_highpass(self, cutoff_high, order=5):
'''Applies a high pass filter'''
nyquist = self.sample_freq / 2.0
cutoff = cutoff_high / nyquist
x, y = signal.butter(order, cutoff, btype='highpass', analog=False)
self.audio_data = signal.filtfilt(x, y, self.audio_data)
def set_bandpass(self, cutoff_low, cutoff_high, order=5):
'''Applies a band pass filter'''
cutoff = np.zeros(2)
nyquist = self.sample_freq / 2.0
cutoff[0] = cutoff_low / nyquist
cutoff[1] = cutoff_high / nyquist
x, y = signal.butter(order, cutoff, btype='bandpass', analog=False)
self.audio_data = signal.filtfilt(x, y, self.audio_data)
@staticmethod
def convert_to_mono_audio(input_audio):
'''Returns a numpy array that represents the mono version of a stereo input'''
output_audio = []
temp_audio = input_audio.astype(float)
for e in temp_audio:
output_audio.append((e[0] / 2) + (e[1] / 2))
return np.array(output_audio, dtype = 'int16')
# Example
# applyDSP_worsenRand("1.wav", "out.wav")
def applyDSP_worsenRand(file_in, file_out):
sound1 = AudioProcessing(file_in)
vol = 1-random.randint(-5, 5)/100
echo = random.randint(3, 5)/100
speed = 1-random.randint(5, 5)/100
sound1.set_volume(vol)
if random.randint(0, 1) == 1:
sound1.set_echo(echo) # can cause audio crackling
sound1.set_audio_speed(speed)
band_1 = random.randint(50, 200)
band_2 = random.randint(3000, 10000)
highpass = random.randint(10, 350)
sound1.set_highpass(highpass)
sound1.set_bandpass(band_1, band_2)
sound1.save_to_file(file_out)
| StarcoderdataPython |
3204560 | import warnings
import numpy as np
from .utils import *
from .exceptions import NotEvaluatedError, NotSupposedToHappenError
class TreeNode:
"""Decision tree node
We would not want to define another node in the constructor
so the API requires you to set the left and right
node attributes manually.
Params
------
data: tuple of (X,y) where X and y are `ndarray`s
Training data
idx: int
Node id
depth: int
Depth of node in the tree it is in
"""
def __init__(self,
data=None, categorical_features=None,
idx=None,
depth=None):
self.data = gen(*data)
self.idx = idx
self.depth = depth
self.categorical_features = categorical_features
self.qn = None
self.col = None
self.pred = None
self.left = None
self.right = None
self.criteria = None
self.evaluated = False
def split(self, max_depth, features_to_select, splits_to_select):
"""
Performs a split on the self.data.
Returns none if no split should be done.
This method makes self become 'evaluated'.
Criterion = gini
Returns
-------
left_data: ndarray
Feature
right_data: ndarray
Feature
"""
# 1. Get the features and the targets
features, y = next(self.data)
if features.ndim == 1:
features = features[:, None]
# 2. Check conditions to not split.
# Terminal nodes must provide prediction value.
if len(np.unique(y)) == 1:
self.pred = np.unique(y)[0]
self.evaluated = True
return None, None
elif self.depth == max_depth:
self.pred = np.argmax(np.bincount(
y)) if self.criteria == "gini" else np.mean(y)
self.evaluated = True
return None, None
elif self.depth > max_depth:
raise NotSupposedToHappenError
# 3. Prepare features to loop through.
feature_indices_to_select = select_features(
n_features=features.shape[1], method=features_to_select)
# 4. For categorical features, we want to perform a one-time
# calculation of the no. of classes. This will be used later
# when calculating the gini coefficient. Also, set it to empty
# list if there're no categorical features.
n_classes = np.max(y)+1 if self.criteria == "gini" else None
if self.categorical_features is None:
self.categorical_features = []
# 5. Prepare arrays to store criteria and thresholds
# from looping through features and thresholds.
# xxx means 'category' (categorical) or 'split' (numerical).
best_xxx_from_every_feature = [0]*features.shape[1]
best_gain_from_every_feature = [0]*features.shape[1]
# 6. Before looping through to get criteria, find the dataset's
# initial criterion.
criterion_initial = calculate_criterion_initial(self.criteria, y)
print(f"Criterion initial: {criterion_initial}")
# 7. Loop through every feature and threshold
for col_num, feature in enumerate(features.T):
if col_num not in feature_indices_to_select:
continue
if len(np.unique(feature)) == 1:
# FIXME what happens if there is only one feature
# and that feature has only 1 unique number?
warnings.warn(
f"Encountered only one unique feature in col {col_num}")
self.pred = np.argmax(np.bincount(y))
self.evaluated = True
return None, None
criterion_gains_for_one_feature = []
# 7a. Working with categorical features
if col_num in self.categorical_features:
print()
print(f"X[:,{col_num}]: {feature}")
print(f" y: {y}")
print()
# i) Loop through every threshold. For categorical features,
# thresholds are the categories themselves.
for category in np.unique(feature):
left, right = y[feature ==
category], y[feature != category]
criterion = calculate_criterion(
self.criteria, left, right, n_classes)
weights = np.array([
len(left)/len(feature),
1-len(left)/len(feature)])
weighted_criterion = np.sum(weights * criterion)
criterion_gain = criterion_initial - weighted_criterion
criterion_gains_for_one_feature.append(criterion_gain)
print(left, right)
print(f"Criterion gain: {criterion_gain}")
# v)
best_category_index = np.argmax(
criterion_gains_for_one_feature)
best_xxx_from_every_feature[col_num] = best_category_index
best_gain_from_every_feature[col_num] = \
criterion_gains_for_one_feature[best_category_index]
# 7b. Working with numerical features
else:
# i) Sort
sort_indices = np.argsort(feature)
y_sorted = y[sort_indices]
# ii) Find uniques in feature
unique_samples = np.unique(feature)
n_unique_samples = len(unique_samples)
print()
print(f"X[:,{col_num}]: {unique_samples}")
print(f" y: {y_sorted}")
print()
# iii)
split_indexes_to_try = select_split_indices(n_unique_samples,
splits_to_select)
# iv) Loop through every threshold. For categorical features,
# thresholds are the categories themselves.
for split_index in split_indexes_to_try:
left, right = np.split(y_sorted, [split_index])
criterion = calculate_criterion(
self.criteria, left, right, n_classes)
weights = np.array([
split_index/n_unique_samples,
1-split_index/n_unique_samples])
weighted_criterion = np.sum(weights * criterion)
criterion_gain = criterion_initial - weighted_criterion
criterion_gains_for_one_feature.append(criterion_gain)
print(left, right)
print(f"Criterion gain: {criterion_gain}")
# v)
best_split_index = np.argmax(criterion_gains_for_one_feature)
best_gain_from_every_feature[col_num] = \
criterion_gains_for_one_feature[best_split_index]
best_xxx_from_every_feature[col_num] = \
unique_samples[best_split_index]
print()
print(
f"Best criterion gain from every feature:\n{best_gain_from_every_feature}")
print(
f"Best split/category from every feature:\n{best_xxx_from_every_feature}")
# 8. If there are no useful splits, make this node a terminal
if np.max(best_gain_from_every_feature) < 0.05:
self.pred = np.argmax(np.bincount(
y)) if self.criteria == "gini" else np.mean(y)
self.evaluated = True
return None, None
# 9. Find the right question to ask
self.col = np.argmax(best_gain_from_every_feature)
self.qn = best_xxx_from_every_feature[self.col]
print()
print(f"Best question to ask: Is X[:,{self.col}]<={self.qn}")
# 10. Split the features into left and right
# based on the above question.
best_feature = features[:, self.col]
if self.col in self.categorical_features:
left_indices = best_feature == self.qn
right_indices = best_feature != self.qn
else:
left_indices = best_feature <= self.qn
right_indices = best_feature > self.qn
left_X_y = features[left_indices], y[left_indices]
right_X_y = features[right_indices], y[right_indices]
self.evaluated = True
return left_X_y, right_X_y
def __call__(self, x):
"""Calls a node recursively until it hits a prediction"""
if x.ndim != 1:
raise ValueError("x must be dim 1")
if self.pred is not None:
return self.pred
elif self.col in self.categorical_features and x[self.col] == self.qn:
return self.left(x)
elif self.col not in self.categorical_features and x[self.col] <= self.qn:
return self.left(x)
else:
return self.right(x)
def __repr__(self):
if not self.evaluated:
return f"\n Id: {self.idx}\n" + \
f"Depth: {self.depth}\n\n" + \
f" Qn: ?\n" + \
f" Left: ?\n" + \
f"Right: ?\n\n" + \
f" Pred: ?\n"
else:
return f"\n Id: {self.idx}\n" + \
f"Depth: {self.depth}\n\n" + \
f" Qn: {self._qn}?\n" + \
f" Left: {self._left}\n" + \
f"Right: {self._right}\n\n" + \
f" Pred: {self._pred}\n"
@property
def is_branch(self):
"""The `self.pred` value determines if node is branch"""
if self.evaluated:
return self.pred is None
else:
raise NotEvaluatedError
@property
def is_leaf(self):
"""The `self.pred` value determines if node is leaf"""
if self.evaluated:
return not self.is_branch
else:
raise NotEvaluatedError
@property
def _qn(self):
"""Internal property method used for representing object"""
if self.qn is None:
return ""
else:
return f"Is X[:,{self.col}] <= {self.qn}?"
@property
def _left(self):
"""Internal property method used for representing object"""
if isinstance(self.pred, int):
return "-"
elif self.left is None:
return "?"
elif self.left.evaluated is False:
return "?"
elif self.left.is_leaf:
return "(leaf)"
else:
return "(branch)"
@property
def _right(self):
"""Internal property method used for representing object"""
if isinstance(self.pred, int):
return "-"
elif self.right is None:
return "?"
elif self.right.evaluated is False:
return "?"
elif self.right.is_leaf:
return "(leaf)"
else:
return "(branch)"
@property
def _pred(self):
"""Internal property method used for representing object"""
if self.pred is None:
return "-"
else:
return self.pred
class ClassificationTreeNode(TreeNode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.criteria = "gini"
class RegressionTreeNode(TreeNode):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.criteria = "mse"
def __repr__(self):
return super().__repr__()
class Stack:
def __init__(self):
self.data = []
def push(self, *values):
self.data.extend(values)
def pop(self):
return self.data.pop()
def __len__(self):
return len(self.data)
def __repr__(self):
return self.data.__repr__()
@property
def is_not_empty(self):
return len(self.data) > 0
| StarcoderdataPython |
1781517 | """Pre-configured commands."""
from . import ffmpeg, icecast, sox
from ._aria2 import Aria2
from .icecast import Icecast
from ._redis import Redis
SRC_SILENCE = "ffmpeg -re -f s16le -i /dev/zero -f s16le -"
SRC_FILE = "ffmpeg -ac 2 -i {filename} -f s16le -ar 44.1k -acodec pcm_s16le -"
PYP_NORMED = """fmpeg -ac 2 -i {filename} -af loudnorm=I=-16:TP=-1.5:LRA=11
-ac 2 -f s16le -ar 44.1k -acodec pcm_s16le -"""
DST_UDP = """ffmpeg -re -ac 2 -ar 44.1k -f s16le -i -
-vn -acodec mp3 -q:a 0 -f mp3 udp://{ipaddress}:{port}"""
DST_ICECAST = """ffmpeg -re -ac 2 -ar 44.1k -f s16le -i -
-vn -acodec mp3 -q:a 0 -f mp3
icecast://source:{password}@{host}:{port}/{mount}"""
DST_SPEAKER = """play -t raw -r 44.1k -e signed-integer
-b 16 --endian little -c 2 -"""
| StarcoderdataPython |
3322313 | import krakenex
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn
import datetime
pandas.set_option('display.width', 1000)
import pprint
pp = pprint.PrettyPrinter()
import itertools
for index in itertools.count():
now = datetime.datetime.now()
k = krakenex.API()
k.load_key('lost.key')
asset_pairs = k.query_public("AssetPairs")['result']
key_string = ','.join(asset_pairs)
ticker = k.query_public("Ticker",{'pair': key_string})['result']
# pp.pprint(asset_pairs)
# pp.pprint(ticker)
exchange_graph = nx.DiGraph()
for asset_key, asset in asset_pairs.iteritems():
if asset_key.endswith('.d') or 'DASH' in asset_key:
continue
asset_name = str(asset_key.split('.')[0])
base = str(asset['base'])
quote = str(asset['quote'])
assert asset_name == base + quote, '%s != %s + %s' % (asset_name, base, quote)
assert asset_name in ticker, '`%s` not in ticker.' % asset_name
last_price, last_volume = map(float, ticker[asset_name]['c'])
fees = asset['fees']
fee = 1-fees[0][1]/100.
assert last_price > 0
exchange_graph.add_edge(base, quote, rate=last_price*fee)
exchange_graph.add_edge(quote, base, rate=1. / last_price*fee)
# pp.pprint(exchange_graph.edges())
# pp.pprint(exchange_graph.nodes())
# plt.figure(figsize=(12,12))
# # pos = nx.spring_layout(exchange_graph)
# pos = nx.circular_layout(exchange_graph)
# nx.draw_networkx(
# exchange_graph.to_undirected(),
# pos,
# node_color='orange',
# with_labels=True,
# font_size=8,
# node_size=1000,
# )
# edge_labels = {e: '%.2e' % exchange_graph.get_edge_data(*e)['last_price'] for e in exchange_graph.edges()}
# nx.draw_networkx_edge_labels(
# exchange_graph,
# pos,
# edge_labels=edge_labels,
# font_size=8,
# bbox=dict(alpha=1, ec='w', fc='w', boxstyle='round', pad=0.1),
# )
# plt.show()
cycle = ('XETH', 'XXBT', 'ZGBP')
edges = tuple(zip(cycle[:-1], cycle[1:]) + [(cycle[-1], cycle[0])])
rates = [exchange_graph.get_edge_data(*e)['rate'] for e in edges]
rate = np.product(rates)
print now, cycle, edges, rates, rate
import time
time.sleep(5)
# cycle_df = pandas.DataFrame({'cycle': map(tuple, nx.simple_cycles(exchange_graph))})
#
# def get_edges(row):
# cycle = row['cycle']
# return tuple(zip(cycle[:-1], cycle[1:]) + [(cycle[-1], cycle[0])])
# cycle_df['edges'] = cycle_df.apply(get_edges, axis=1)
#
# def get_rates(row):
# edges = row['edges']
# return [exchange_graph.get_edge_data(*e)['rate'] for e in edges]
# cycle_df['rates'] = cycle_df.apply(get_rates, axis=1)
#
# def get_rate(row):
# rates = row['rates']
# return np.product(rates)
# cycle_df['rate'] = cycle_df.apply(get_rate, axis=1)
#
# cycle_df['length'] = cycle_df.apply(lambda row: len(row['cycle']), axis=1)
#
# # print df.ix[df[df['length']==3]['rate'].idxmax()]
#
# records = []
# for index, row in cycle_df.iterrows():
# for currency in row['cycle']:
# records.append((currency, row['length'], row['rate']))
# df = pandas.DataFrame.from_records(records, columns=['currency', 'length', 'rate'])
#
# fg = seaborn.FacetGrid(
# data=df,
# col='currency',
# col_order=sorted(df['currency'].unique()),
# col_wrap=5,
# sharex=True,
# sharey=True,
# margin_titles=True
# )
#
# def facet(data, color):
# plt.scatter(data['length'], data['rate'], color=color, s=10)
#
# for (i, j, k), data in fg.facet_data():
# if k == 0:
# ax = fg.facet_axis(i, j)
# ax.axhline(y=1.0, linestyle='--', color='r', linewidth=1.)
#
# fg.map_dataframe(facet)
# fg.set_xlabels('cycle length')
# fg.set_ylabels('rate')
# fg.set(ylim=(1., 1.1))
# fg.set(xlim=(2, 8))
# plt.suptitle(str(now))
# plt.subplots_adjust(left=0.065, bottom=0.07, right=0.95, top=0.92, wspace=0.1, hspace=0.12)
# plt.show()
| StarcoderdataPython |
80268 | <filename>server_expects/matchers.py
# -*- coding: utf-8 -*-
from expects.matchers import Matcher, default_matcher
from .resources import (
package as package_resource,
host as host_resource,
path as path_resource
)
class _be_installed(Matcher):
def _match(self, package):
package = self._resource_for(package)
reasons = []
if package.version is not None:
reasons.append('{!r} version is installed'.format(package.current_version))
# TODO: This error could be from a raised exception
# instead of this obscure state.
if hasattr(package, 'failure_message'):
reasons.append(package.failure_message)
return package.is_installed, reasons
def _resource_for(self, package):
if hasattr(package, 'is_installed'):
return package
return package_resource(package)
class _be_reachable(Matcher):
def _match(self, host):
host = self._resource_for(host)
reasons = []
if not host.is_resolvable:
reasons.append('cannot be resolved')
return host.is_reachable, reasons
def _resource_for(self, host):
if hasattr(host, 'is_reachable'):
return host
return host_resource(host)
class _be_accessible(Matcher):
def _match(self, instance):
return instance.is_accessible, []
class _exists(Matcher):
def _match(self, path):
return self._resource_for(path).exists, []
def _resource_for(self, path):
if hasattr(path, 'exists'):
return path
return path_resource(path)
class _be_a_file(Matcher):
def _match(self, path):
path = self._resource_for(path)
reasons = []
if not path.exists:
reasons.append('does not exist')
if path.is_a_directory:
reasons.append('is a directory')
return path.is_a_file, reasons
def _resource_for(self, path):
if hasattr(path, 'exists'):
return path
return path_resource(path)
class _be_a_directory(Matcher):
def _match(self, path):
path = self._resource_for(path)
reasons = []
if not path.exists:
reasons.append('does not exist')
if path.is_a_file:
reasons.append('is a file')
return path.is_a_directory, reasons
def _resource_for(self, path):
if hasattr(path, 'exists'):
return path
return path_resource(path)
class have_owner(Matcher):
def __init__(self, expected):
self._expected = expected
def _match(self, path):
path = self._resource_for(path)
reasons = []
if not path.exists:
reasons.append('does not exist')
return default_matcher(self._expected)._match(path.owner)[0], reasons
def _resource_for(self, path):
if hasattr(path, 'exists'):
return path
return path_resource(path)
class have_group(Matcher):
def __init__(self, expected):
self._expected = expected
def _match(self, path):
path = self._resource_for(path)
reasons = []
if not path.exists:
reasons.append('does not exist')
return default_matcher(self._expected)._match(path.group)[0], reasons
def _resource_for(self, path):
if hasattr(path, 'exists'):
return path
return path_resource(path)
class have_mode(Matcher):
def __init__(self, expected):
self._expected = expected
def _match(self, path):
path = self._resource_for(path)
reasons = []
if not path.exists:
reasons.append('does not exist')
else:
reasons.append('has mode {}'.format(oct(path.mode)))
return default_matcher(self._expected)._match(path.mode)[0], reasons
def _resource_for(self, path):
if hasattr(path, 'exists'):
return path
return path_resource(path)
be_installed = _be_installed()
be_reachable = _be_reachable()
be_accessible = _be_accessible()
exists = _exists()
be_a_file = _be_a_file()
be_a_directory = _be_a_directory()
__all__ = [
'be_installed',
'be_reachable',
'be_accessible',
'exists',
'be_a_file',
'be_a_directory',
'have_owner',
'have_group',
'have_mode'
]
| StarcoderdataPython |
1725324 | """CLI"""
import sys
import datetime
import click
from advent_of_code import tools
today = datetime.date.today()
if today.month < 12:
MAX_YEAR = today.year - 1
else:
MAX_YEAR = today.year
@click.group()
def aoc():
"""Advent of Code CLI"""
pass
@aoc.command("solve")
@click.argument("year", type=click.IntRange(min=2015, max=MAX_YEAR))
@click.argument("day", type=click.IntRange(min=1, max=25))
@click.option(
"--file", "-f", type=click.File(lazy=True), required=True,
help="Path to file from which to read this.")
def solve(year, day, file=None):
"""Runs the appropriate solution for the given year and day."""
func_to_call = tools.get_func(year, day)
if file is None:
for _ in range(2):
next(sys.stdin)
inp = sys.stdin # ignore the first two inputs.
output = func_to_call(inp)
else:
output = func_to_call(file)
print(output)
@aoc.command("create")
@click.argument("year", type=click.IntRange(min=2015, max=MAX_YEAR))
@click.argument("day", type=click.IntRange(min=1, max=25))
def create(year, day):
"""Creates a new templatized solution file"""
path = tools.create_file(year, day)
print(f"Created a new solution at {path}")
| StarcoderdataPython |
1735448 | """
# My first app
Here's our first attempt at using data to create a table:
"""
import streamlit as st
import pandas as pd
df = pd.DataFrame({
'first column': [1, 2, 3, 4],
'second column': [10, 20, 30, 40]
})
df | StarcoderdataPython |
137777 | from .packet_buffer import PacketBuffer
from zlib import compress
from minecraft.networking.types import (
VarInt, Enum
)
class Packet(object):
packet_name = "base"
id = None
definition = None
# To define the packet ID, either:
# 1. Define the attribute `id', of type int, in a subclass; or
# 2. Override `get_id' in a subclass and return the correct packet ID
# for the given ConnectionContext. This is necessary if the packet ID
# has changed across protocol versions, for example.
@classmethod
def get_id(cls, context):
return cls.id
# To define the network data layout of a packet, either:
# 1. Define the attribute `definition', a list of fields, each of which
# is a dict mapping attribute names to data types; or
# 2. Override `get_definition' in a subclass and return the correct
# definition for the given ConnectionContext. This may be necessary
# if the layout has changed across protocol versions, for example; or
# 3. Override the methods `read' and/or `write_fields' in a subclass.
# This may be necessary if the packet layout cannot be described as a
# simple list of fields.
@classmethod
def get_definition(cls, context):
return cls.definition
def __init__(self, context=None, **kwargs):
self.context = context
self.set_values(**kwargs)
@property
def context(self):
return self._context
@context.setter
def context(self, _context):
self._context = _context
self._context_changed()
def _context_changed(self):
if self._context is not None:
self.id = self.get_id(self._context)
self.definition = self.get_definition(self._context)
else:
self.id = None
self.definition = None
def set_values(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
return self
def read(self, file_object):
for field in self.definition:
for var_name, data_type in field.items():
value = data_type.read_with_context(file_object, self.context)
setattr(self, var_name, value)
# Writes a packet buffer to the socket with the appropriate headers
# and compressing the data if necessary
def _write_buffer(self, socket, packet_buffer, compression_threshold):
# compression_threshold of None means compression is disabled
if compression_threshold is not None:
if len(packet_buffer.get_writable()) > compression_threshold != -1:
# compress the current payload
packet_data = packet_buffer.get_writable()
compressed_data = compress(packet_data)
packet_buffer.reset()
# write out the length of the uncompressed payload
VarInt.send(len(packet_data), packet_buffer)
# write the compressed payload itself
packet_buffer.send(compressed_data)
else:
# write out a 0 to indicate uncompressed data
packet_data = packet_buffer.get_writable()
packet_buffer.reset()
VarInt.send(0, packet_buffer)
packet_buffer.send(packet_data)
VarInt.send(len(packet_buffer.get_writable()), socket) # Packet Size
socket.send(packet_buffer.get_writable()) # Packet Payload
def write(self, socket, compression_threshold=None):
# buffer the data since we need to know the length of each packet's
# payload
packet_buffer = PacketBuffer()
# write packet's id right off the bat in the header
VarInt.send(self.id, packet_buffer)
# write every individual field
self.write_fields(packet_buffer)
self._write_buffer(socket, packet_buffer, compression_threshold)
def write_fields(self, packet_buffer):
# Write the fields comprising the body of the packet (excluding the
# length, packet ID, compression and encryption) into a PacketBuffer.
for field in self.definition:
for var_name, data_type in field.items():
data = getattr(self, var_name)
data_type.send_with_context(data, packet_buffer, self.context)
def __repr__(self):
str = type(self).__name__
if self.id is not None:
str = '0x%02X %s' % (self.id, str)
elif hasattr(self, "packet_id"):
str = 'pkt: 0x%02X %s' % (self.packet_id, str)
fields = self.fields
if fields is not None:
inner_str = ', '.join('%s=%s' % (a, self.field_string(a))
for a in fields if hasattr(self, a))
str = '%s(%s)' % (str, inner_str)
return str
@property
def fields(self):
""" An iterable of the names of the packet's fields, or None. """
if self.definition is None:
return None
return (field for defn in self.definition for field in defn)
def field_string(self, field):
""" The string representation of the value of a the given named field
of this packet. Override to customise field value representation.
"""
value = getattr(self, field, None)
enum_class = self.field_enum(field, self.context)
if enum_class is not None:
name = enum_class.name_from_value(value)
if name is not None:
return name
return repr(value)
@classmethod
def field_enum(cls, field, context=None):
""" The subclass of 'minecraft.networking.types.Enum' associated with
this field, or None if there is no such class.
"""
enum_name = ''.join(s.capitalize() for s in field.split('_'))
if hasattr(cls, enum_name):
enum_class = getattr(cls, enum_name)
if isinstance(enum_class, type) and issubclass(enum_class, Enum):
return enum_class
| StarcoderdataPython |
4816998 | # import unittest, os, subprocess, time, datetime, random, string, re
# from selenium import webdriver
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.common.by import By
# from selenium.webdriver.common.keys import Keys
# from bs4 import BeautifulSoup
# from nbmessages import APPLICATION_DATA_DIR
# from . import get_driver, BaseAcceptanceTester
# class TestSecurity(unittest.TestCase):
# """Ensure other users cannot create messages"""
# def setUp(self):
# # runuser -l jovyan -c '/opt/conda/bin/jupyter notebook --ip 0.0.0.0 --NotebookApp.token=""'
# os.system(f'mkdir -p {APPLICATION_DATA_DIR}/mboard')
# os.system(f'mkdir -p {APPLICATION_DATA_DIR}/test')
# os.seteuid(1000)
# self.proc = subprocess.Popen(['jupyter', 'notebook', '--ip', '0.0.0.0', '--NotebookApp.token=""'])
# time.sleep(2)
# self.driver = get_driver()
# self.driver.get('http://127.0.0.1:8888')
# def tearDown(self):
# os.seteuid(0)
# self.proc.terminate()
# os.system(f'rm -rf {APPLICATION_DATA_DIR}/mboard {APPLICATION_DATA_DIR}/test')
# def get_soup(self):
# source = self.driver.page_source
# soup = BeautifulSoup(source, 'html.parser')
# return soup
# def test_non_owners_cannot_write(self):
# board = 'test'
# author = 'author'
# body = 'test'
# m_id = 'm1'
# admin_tab = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.LINK_TEXT, 'nbmessages (Admin)')))
# admin_tab.click()
# # make sure we select a message board first
# select_board = WebDriverWait(self.driver, 5).until(EC.element_to_be_clickable((By.ID, 'select-message-board')))
# for option in select_board.find_elements_by_tag_name('option'):
# if option.text == board:
# option.click()
# break
# messages = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.LINK_TEXT, 'Messages')))
# messages.click()
# # fill in the name
# name = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.NAME, 'author')))
# name.send_keys(author)
# # fill in the message ID
# message_id_el = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.NAME, 'message_id')))
# message_id_el.send_keys(m_id)
# # fill in the body
# message_body_el = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.NAME, 'message_body')))
# message_body_el.send_keys(body)
# # submit, save, and close
# # (By.XPATH, '//*[@id="nbmessage-admin"]/button')
# submit_el = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#nbmessage-admin>button')))
# submit_el.click()
# time.sleep(1)
# # (By.XPATH, '//*[@id="save-message"]')
# save_el = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.ID, 'save-message')))
# save_el.click()
# time.sleep(1)
# self.driver.save_screenshot('/opt/nbmessages/tests/acceptance/screenshots/security.png')
# soup = self.get_soup()
# res = soup.findAll(text='You dont have permissions to create messages for message board = test')
# assert len(res) > 1
| StarcoderdataPython |
1634662 |
import os
import time
import uuid
import threading
import xml.etree.ElementTree as ET
from XSTAF.core.logger import LOGGER
class Run(object):
#test case result
NotRun = 0b10000000
Fail = 0b00000001
Pass = 0b00000000
#pretty results
Results = { NotRun : "not run",
Fail : "fail",
Pass : "pass",
"not run" : NotRun,
"fail" : Fail,
"pass" : Pass, }
def __init__(self):
self.start = ""
self.end = ""
self.status = ""
self._result = self.NotRun
@property
def result(self):
return self._result
@result.setter
def result(self, value):
if value in self.Results:
if isinstance(value, str):
value = self.Results[value]
self._result = value
else:
LOGGER.warning("unacceptable result: %s" % repr(value))
@property
def pretty_result(self):
return self.Results[self.result]
class TestCase(object):
#to make ID generation thread safe
mutex = threading.Lock()
def __init__(self):
#make a unique ID for each test case instance
self.mutex.acquire()
self._ID = uuid.uuid1()
time.sleep(0.01)
self.mutex.release()
self.name = ""
self.command = ""
self.auto = False
self.timeout = 600
self.description = ""
self.data = ""
self._runs = {}
def runs(self):
run_ids = self._runs.keys()
run_ids.sort()
for run_id in run_ids:
yield self._runs[run_id]
def add_run(self, run):
#we use task start time as id
self._runs[run.start] = run
def get_run(self, id):
return self._runs[id]
def remove_run(self, id):
del self._runs[id]
def remove_all_runs(self):
self._runs = {}
@property
def ID(self):
return self._ID
@ID.setter
def ID(self, id):
self._ID = id
class TestSuite(object):
def __init__(self, test_suite_file):
self.test_suite_file = test_suite_file
self._testcases = {}
self._parse_and_build()
def _parse_and_build(self):
self.name = os.path.basename(self.test_suite_file)
xml_tree = ET.parse(self.test_suite_file)
root_element = xml_tree.getroot()
if root_element.tag == "XMLTestCollection":
#pyanvil test scenarios
self._parse_pyanvil_test_suite(root_element)
elif root_element.tag == "TestSuite":
self._parse_test_suite(root_element)
def _parse_pyanvil_test_suite(self, root_element):
testcase_elements = root_element.findall("TestList/ToolCase")
for testcase_element in testcase_elements:
testcase = TestCase()
testcase.data = testcase_element.attrib["name"]
#pyanvil case do not have a global id, use system gen id
#testcase.ID = testcase.name
executable = testcase_element.find("Executable").text
parameters = testcase_element.find("Parameters").text
testcase.command = executable+" "+parameters
testcase.auto = True
if not testcase_element.find("Timeout") is None:
testcase.timeout = int(testcase_element.find("Timeout").text)
if not testcase_element.find("Description") is None:
testcase.name = testcase_element.find("Description").text
self._testcases[testcase.ID] = testcase
def _parse_test_suite(self, root_element):
testcases_element = root_element.find("TestCases")
testcase_elements = testcases_element.findall("TestCase")
for testcase_element in testcase_elements:
testcase = TestCase()
testcase.ID = uuid.UUID(testcase_element.find("ID").text)
testcase.name = testcase_element.find("Name").text
testcase.command = testcase_element.find("Command").text
#optional
if not testcase_element.find("Auto") is None:
auto = testcase_element.find("Auto").text
if auto.upper() == "TRUE":
testcase.auto = True
else:
testcase.auto = False
if not testcase_element.find("Timeout") is None:
testcase.timeout = int(testcase_element.find("Timeout").text)
if not testcase_element.find("Description") is None:
testcase.description = testcase_element.find("Description").text
if not testcase_element.find("Data") is None:
testcase.description = testcase_element.find("Data").text
#test run results
runs_element = testcase_element.find("Runs")
run_elements = runs_element.findall("Run")
for run_element in run_elements:
run = Run()
run.start = run_element.find("Start").text
run.end = run_element.find("End").text
run.result = run_element.find("Result").text
run.status = run_element.find("Status").text
if run.status is None:
run.status = ""
testcase.add_run(run)
self._testcases[testcase.ID] = testcase
def testcases(self):
testcase_ids = self._testcases.keys()
testcase_ids.sort()
for testcase_id in testcase_ids:
yield self._testcases[testcase_id]
def testcase_number(self):
return len(self._testcases)
def get_testcase(self, testcase_id):
return self._testcases[testcase_id] | StarcoderdataPython |
152429 | <reponame>rivamarco/alexa-apis-for-python
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.isp.subscription_information import SubscriptionInformation
from ask_smapi_model.v1.isp.product_type import ProductType
from ask_smapi_model.v1.isp.purchasable_state import PurchasableState
from ask_smapi_model.v1.isp.publishing_information import PublishingInformation
from ask_smapi_model.v1.isp.privacy_and_compliance import PrivacyAndCompliance
class InSkillProductDefinition(object):
"""
Defines the structure for an in-skill product.
:param version: Version of in-skill product definition.
:type version: (optional) str
:param object_type:
:type object_type: (optional) ask_smapi_model.v1.isp.product_type.ProductType
:param reference_name: Developer selected in-skill product name. This is for developer reference only, it can be used to filter query results to identify a matching in-skill product.
:type reference_name: (optional) str
:param purchasable_state:
:type purchasable_state: (optional) ask_smapi_model.v1.isp.purchasable_state.PurchasableState
:param subscription_information:
:type subscription_information: (optional) ask_smapi_model.v1.isp.subscription_information.SubscriptionInformation
:param publishing_information:
:type publishing_information: (optional) ask_smapi_model.v1.isp.publishing_information.PublishingInformation
:param privacy_and_compliance:
:type privacy_and_compliance: (optional) ask_smapi_model.v1.isp.privacy_and_compliance.PrivacyAndCompliance
:param testing_instructions: Special instructions provided by the developer to test the in-skill product.
:type testing_instructions: (optional) str
"""
deserialized_types = {
'version': 'str',
'object_type': 'ask_smapi_model.v1.isp.product_type.ProductType',
'reference_name': 'str',
'purchasable_state': 'ask_smapi_model.v1.isp.purchasable_state.PurchasableState',
'subscription_information': 'ask_smapi_model.v1.isp.subscription_information.SubscriptionInformation',
'publishing_information': 'ask_smapi_model.v1.isp.publishing_information.PublishingInformation',
'privacy_and_compliance': 'ask_smapi_model.v1.isp.privacy_and_compliance.PrivacyAndCompliance',
'testing_instructions': 'str'
} # type: Dict
attribute_map = {
'version': 'version',
'object_type': 'type',
'reference_name': 'referenceName',
'purchasable_state': 'purchasableState',
'subscription_information': 'subscriptionInformation',
'publishing_information': 'publishingInformation',
'privacy_and_compliance': 'privacyAndCompliance',
'testing_instructions': 'testingInstructions'
} # type: Dict
supports_multiple_types = False
def __init__(self, version=None, object_type=None, reference_name=None, purchasable_state=None, subscription_information=None, publishing_information=None, privacy_and_compliance=None, testing_instructions=None):
# type: (Optional[str], Optional[ProductType], Optional[str], Optional[PurchasableState], Optional[SubscriptionInformation], Optional[PublishingInformation], Optional[PrivacyAndCompliance], Optional[str]) -> None
"""Defines the structure for an in-skill product.
:param version: Version of in-skill product definition.
:type version: (optional) str
:param object_type:
:type object_type: (optional) ask_smapi_model.v1.isp.product_type.ProductType
:param reference_name: Developer selected in-skill product name. This is for developer reference only, it can be used to filter query results to identify a matching in-skill product.
:type reference_name: (optional) str
:param purchasable_state:
:type purchasable_state: (optional) ask_smapi_model.v1.isp.purchasable_state.PurchasableState
:param subscription_information:
:type subscription_information: (optional) ask_smapi_model.v1.isp.subscription_information.SubscriptionInformation
:param publishing_information:
:type publishing_information: (optional) ask_smapi_model.v1.isp.publishing_information.PublishingInformation
:param privacy_and_compliance:
:type privacy_and_compliance: (optional) ask_smapi_model.v1.isp.privacy_and_compliance.PrivacyAndCompliance
:param testing_instructions: Special instructions provided by the developer to test the in-skill product.
:type testing_instructions: (optional) str
"""
self.__discriminator_value = None # type: str
self.version = version
self.object_type = object_type
self.reference_name = reference_name
self.purchasable_state = purchasable_state
self.subscription_information = subscription_information
self.publishing_information = publishing_information
self.privacy_and_compliance = privacy_and_compliance
self.testing_instructions = testing_instructions
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, InSkillProductDefinition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
3365721 | <reponame>marshall/gaia
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
try:
from marionette import (expected,
Wait)
from marionette.by import By
from marionette.marionette import Actions
except:
from marionette_driver import (expected,
Wait)
from marionette_driver.by import By
from marionette_driver.marionette import Actions
from gaiatest.apps.base import Base
from gaiatest.apps.base import PageRegion
from gaiatest.apps.homescreen.regions.bookmark_menu import BookmarkMenu
class Collection(Base):
name = 'Smart Collections'
_apps_locator = (By.CSS_SELECTOR, 'gaia-grid .icon:not(.placeholder)')
_close_button_locator = (By.ID, 'close')
def __init__(self, marionette):
Base.__init__(self, marionette)
Wait(self.marionette).until(lambda m: self.apps.displayed_app.name == self.name)
self.apps.switch_to_displayed_app()
Wait(self.marionette).until(expected.elements_present(*self._apps_locator))
@property
def applications(self):
return [self.Result(self.marionette, app) for app in self.marionette.find_elements(*self._apps_locator)]
class Result(PageRegion):
# Modal dialog locators
_modal_dialog_save_locator = (By.ID, "bookmark-cloudapp")
@property
def name(self):
return self.root_element.text
def tap(self):
app_name = self.name
self.root_element.tap()
# Wait for the displayed app to be that we have tapped
Wait(self.marionette).until(lambda m: self.apps.displayed_app.name == app_name)
self.apps.switch_to_displayed_app()
# Wait for title to load (we cannot be more specific because the aut may change)
Wait(self.marionette).until(lambda m: m.title)
def long_tap_to_install(self):
Actions(self.marionette).long_press(self.root_element, 2).perform()
def tap_save_to_home_screen(self):
element = Wait(self.marionette).until(expected.element_present(
*self._modal_dialog_save_locator))
Wait(self.marionette).until(expected.element_displayed(element))
element.tap()
return BookmarkMenu(self.marionette)
| StarcoderdataPython |
3314497 | import argparse
import logging
import traceback
from typing import List
from sys import exit
import click
from cli.echo import Echo
from common.errors import BaseError
from vm_providers.factory import get_provider_for
from vm_providers.errors import ProviderNotFound
from common import definitions
logger = logging.getLogger(__name__)
@click.command(name="microk8s", context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True,
))
@click.option('-h', '--help', is_flag=True)
@click.pass_context
def cli(ctx, help):
try:
if help and len(ctx.args) == 0:
show_help()
exit(0)
elif help:
ctx.args.append("--help")
if len(ctx.args) == 0:
show_error()
exit(1)
if ctx.args[0] == 'install':
install(ctx.args[1:])
exit(0)
elif ctx.args[0] == 'uninstall':
uninstall()
exit(0)
elif ctx.args[0] == 'stop':
run(ctx.args)
stop()
exit(0)
else:
run(ctx.args)
exit(0)
except BaseError as e:
Echo.error(str(e))
exit(e.get_exit_code())
except Exception as e:
Echo.error("An unexpected error occurred.")
Echo.info(str(e))
Echo.info(traceback.print_exc())
exit(254)
def show_error():
msg = """Usage: microk8s [OPTIONS] COMMAND [ARGS]...
Options:
--help Shows the available COMMANDS."""
click.echo(msg)
def show_help():
msg = """Usage: microk8s [OPTIONS] COMMAND [ARGS]...
Options:
--help Show this message and exit.
Commands:
install Installs MicroK8s. Use --cpu, --mem, --disk to appoint resources.
uninstall Removes MicroK8s"""
click.echo(msg)
commands = _get_microk8s_commands()
for command in commands:
if command in definitions.command_descriptions:
click.echo(" {:<15} {}".format(command, definitions.command_descriptions[command]))
else:
click.echo(" {:<15}".format(command))
if len(commands) == 2:
click.echo("")
click.echo("Install and start MicroK8s to see the full list of commands.")
def _show_install_help():
msg = """Usage: microk8s install OPTIONS
Options:
--help Show this message and exit.
--cpu Cores used by MicroK8s (default={})
--mem RAM in GB used by MicroK8s (default={})
--disk Maximum volume in GB of the dynamicaly expandable hard disk to be used (default={})
-y, --assume-yes Automatic yes to prompts"""
Echo.info(msg.format(definitions.DEFAULT_CORES, definitions.DEFAULT_MEMORY, definitions.DEFAULT_DISK))
def install(args) -> None:
if "--help" in args:
_show_install_help()
return
parser = argparse.ArgumentParser("microk8s install")
parser.add_argument('--cpu', default=definitions.DEFAULT_CORES, type=int)
parser.add_argument('--mem', default=definitions.DEFAULT_MEMORY, type=int)
parser.add_argument('--disk', default=definitions.DEFAULT_DISK, type=int)
parser.add_argument('-y', '--assume-yes', action='store_true', default=definitions.DEFAULT_ASSUME)
args = parser.parse_args(args)
vm_provider_name: str = 'multipass'
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
except ProviderNotFound as provider_error:
if provider_error.prompt_installable:
if echo.is_tty_connected() and args.assume_yes:
vm_provider_class.setup_provider(echoer=echo)
elif echo.is_tty_connected() and echo.confirm(
"Support for {!r} needs to be set up. "
"Would you like to do that it now?".format(provider_error.provider)
) and not args.assume_yes:
vm_provider_class.setup_provider(echoer=echo)
else:
raise provider_error
else:
raise provider_error
instance = vm_provider_class(echoer=echo)
instance.launch_instance(vars(args))
echo.info("MicroK8s is up and running. See the available commands with 'microk8s --help'.")
def uninstall() -> None:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
except ProviderNotFound as provider_error:
if provider_error.prompt_installable:
if echo.is_tty_connected():
echo.warning((
"MicroK8s is not running. VM provider {!r} has been removed."
.format(provider_error.provider)))
return 1
else:
raise provider_error
instance = vm_provider_class(echoer=echo)
instance.destroy()
echo.info("Thank you for using MicroK8s!")
def stop() -> None:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
vm_provider_class.ensure_provider()
instance = vm_provider_class(echoer=Echo())
instance_info = instance.get_instance_info()
if instance_info.is_running():
instance.stop()
def run(cmd) -> None:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
except ProviderNotFound as provider_error:
if provider_error.prompt_installable:
if echo.is_tty_connected():
echo.warning("MicroK8s is not installed. Please run 'microk8s install'.")
return 1
else:
raise provider_error
instance = vm_provider_class(echoer=echo)
command = cmd[0]
cmd[0] = "microk8s.{}".format(command)
instance.run(cmd)
def _get_microk8s_commands() -> List:
vm_provider_name = "multipass"
vm_provider_class = get_provider_for(vm_provider_name)
echo = Echo()
try:
vm_provider_class.ensure_provider()
instance = vm_provider_class(echoer=echo)
instance_info = instance.get_instance_info()
if instance_info.is_running():
commands = instance.run('ls -1 /snap/bin/'.split(), hide_output=True)
mk8s = [c.decode().replace('microk8s.', '') for c in commands.split() if c.decode().startswith('microk8s')]
return mk8s
else:
return ["start", "stop"]
except ProviderNotFound as provider_error:
return ["start", "stop"]
if __name__ == '__main__':
cli()
| StarcoderdataPython |
1787837 | <reponame>kennethreitz-archive/wolfram
import os
from fabric.api import *
os.f
def scrub():
""" Death to the bytecode! """
local('rm -fr dist build')
local("find . -name \"*.pyc\" -exec rm '{}' ';'")
#def docs():
# """Build docs."""
# os.system('make html')
# os.chdir('_build/html')
# os.system('sphinxtogithub .')
# os.system('git add -A')
# os.system('git commit -m \'documentation update\'')
# os.system('git push origin gh-pages') | StarcoderdataPython |
4821914 | # coding=utf-8
__author__ = 'kohlmannj'
import os
import Ity
import FilePaths
from Corpus import Corpus
from CorpusText import CorpusText
__all__ = ["Corpus", "CorpusText", "FilePaths"]
def get_models(metadata_root_path=None):
if metadata_root_path is None:
metadata_root_path = Ity.metadata_root
return os.listdir(metadata_root_path)
def get_corpora(root_path=None, metadata_root_path=None):
"""
Returns a list of corpus_names (strs) which are available for use with Ity.
A corpus is available if, for a folder existing in Ity.corpus_root, a
folder of the same name in Ity.metadata_root also exists. Why yes, this is
an incredibly naïve check!
:return:
"""
check_for_individual_corpora_metadata = True
if root_path is None:
root_path = Ity.corpus_root
if metadata_root_path is None:
metadata_root_path = Ity.metadata_root
else:
check_for_individual_corpora_metadata = False
available_corpora = {}
# Return the empty dict if the root path is outside Ity.corpus_root.
common_corpora_prefix = os.path.commonprefix([
root_path, Ity.corpus_root
])
common_metadata_prefix = os.path.commonprefix([
metadata_root_path, Ity.metadata_root
])
if (
common_corpora_prefix != Ity.corpus_root or
common_metadata_prefix != Ity.metadata_root
):
return available_corpora
for corpus_name in os.listdir(root_path):
corpus_path = os.path.join(root_path, corpus_name)
# Only include the corpus if we have its corpus folder and metadata
# folder.
if check_for_individual_corpora_metadata:
corpus_metadata_path = os.path.join(metadata_root_path, corpus_name)
else:
corpus_metadata_path = metadata_root_path
if (
corpus_name not in available_corpora and
os.path.exists(corpus_path) and
os.path.isdir(corpus_path) and
os.path.exists(corpus_metadata_path) and
os.path.isdir(corpus_metadata_path)
):
# Get the files in this corpus.
corpus_data = dict(
texts=[],
corpora=get_corpora(
root_path=corpus_path,
metadata_root_path=os.path.join(
Ity.metadata_root,
corpus_name
)
)
)
# Populate corpus_data["texts"]
for file_name in os.listdir(corpus_path):
file_path = os.path.join(
corpus_path,
file_name
)
if not os.path.exists(file_path):
continue
elif os.path.isfile(file_path):
corpus_data["texts"].append(
os.path.splitext(file_name)[0]
)
available_corpora[corpus_name] = corpus_data
return available_corpora
| StarcoderdataPython |
3254480 | import os
import sys
import logging
sys.path.append(os.path.abspath("."))
print(sys.path)
# import own libs
from src.models.model_manager import *
from src.models.evaluate_model import evaluate
from src.data.data_utils import get_train_and_validation_generator
from src.models.model_utils import get_callbacks
from src.visualization.utils import plot_history
from src.utils_io import Console_and_file_logger, ensure_dir
from keras.backend.tensorflow_backend import set_session
from collections import Counter
# import external libs
import json
from argparse import ArgumentParser
import yaml
import numpy as np
import tensorflow as tf
global config
def train():
"""
training entrance, all heavy work is done here
:param config: Json representation of our config file
:return:
"""
logging.info('training starts')
# get train generator
train_generator, validation_generator = get_train_and_validation_generator(path_to_data=config['data_dir'],
validation_split=config[
'validation_split'],
image_size=(config['input_image_width'],
config[
'input_image_height']),
batch_size_train=config[
'batch_size_train'],
batch_size_val=config['batch_size_val'],
class_mode=config['class_mode'],
color_mode = config['color_mode'])
counter = Counter(train_generator.classes)
max_val = float(max(counter.values()))
class_weights = {class_id: max_val / num_images for class_id, num_images in counter.items()}
logging.info('Class weights: {0}'.format(class_weights))
# get model
logging.info('input shape: {}'.format(config['input_shape']))
aliases, model = get_model(config)
logging.info(model.summary())
# compile model
model.compile(loss=config['loss_function'],
optimizer=get_optimizer(),
metrics=config['metrics'])
callbacks = get_callbacks(config)
logging.info('len train_generator: {}'.format(str(len(train_generator))))
logging.info('Batch-size: {}'.format(config['batch_size_train']))
# model fit with generator
history = model.fit_generator(train_generator, steps_per_epoch=len(train_generator),
epochs=int(config['epochs']), verbose=1
, callbacks=callbacks,
validation_data=validation_generator,
validation_steps=20, class_weight=class_weights, max_queue_size=10, workers=1,
use_multiprocessing=False,
shuffle=True, initial_epoch=0)
plot_history(history=history, config=config)
evaluate(config['test_dir'])
if __name__ == '__main__':
#gpu_options = tf.GPUOptions(allow_growth=True)
#session_config =tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = False
session = tf.Session(config=config)
set_session(session)
# Define argument parser
parser = ArgumentParser()
# define arguments and default values to parse
# define tha path to your config file
parser.add_argument("--config", "-c", help="Define the path to config.yml",
default="config/experiments/inception_v3_base.yml", required=True)
parser.add_argument("--working_dir", help="Define the absolute path to the project root",
default="../../", required=False)
# parser.add_argument("--modelskiptraining", help="Skip Training", default="None", required=False)
args = parser.parse_args()
print(args.config)
# Make sure the config exists
assert os.path.exists(
args.config), "Config does not exist {}!, Please create a config.yml in root or set the path with --config.".format(
args.config)
# Load config
params = yaml.load(open(args.config, "r"))
# Make sure that source and destination are set
assert {"batch_size_train", "epochs", "data_dir", "test_dir_image", "experiment_name"} <= set(
params.keys()), "Configuration is incomplete! Please define data_dir and test_dir_image in config.yml"
# Make sure source folder exists
assert os.path.exists(params["data_dir"]), "Path to train src {} does not exist!".format(params["data_dir"])
assert os.path.exists(params["test_dir_image"]), "Path to test src {} does not exist!".format(params["test_dir_image"])
# Define central logger, set name and logging level
Console_and_file_logger(logfile_name=params["experiment_name"], log_lvl="INFO")
logging.info('Starting experiment {}'.format(params["experiment_name"]))
logging.info(json.dumps(params, indent=2))
config = params
train()
| StarcoderdataPython |
3243301 | <filename>xaikit/adapters/__init__.py
import sklearn
from xaikit.adapters.sklearn_adapter import SklearnModelAdapter
def create_model_adapter(external_model):
"""
Brings an external model under the xAIkit interface.
"""
if isinstance(external_model, sklearn.base.BaseEstimator):
return SklearnModelAdapter(external_model)
| StarcoderdataPython |
3215387 | # Demo Python Strings - Slicing Strings
'''
Negative Indexing
Use negative indexes to start the slice from the end of the string:
'''
# Get the characters from position 5 to position 1, starting the count from the end of the string:
b = "Hello, World!"
print("string original: ", b)
print(b[-5:-2])
# Incluye el caracter de la posición 9 y no incluye el caracter de la posición 4
print(b[-9:-4])
print(b[-2:-1])
| StarcoderdataPython |
1788284 | """Resources for nbvewerbot functionality"""
import os
import re
import logging
import pickle
import dotenv
import praw
# Relevant directories
SRC_DIR = os.path.dirname(__file__)
RESOURCES_DIR = os.path.join(SRC_DIR, "resources.d")
PROJECT_DIR = os.path.realpath(os.path.join(SRC_DIR, ".."))
# Logging
LOGFILE_PATH = os.path.join(PROJECT_DIR, "nbviewerbot.log")
LOGGER = logging.getLogger("nbviewerbot")
# Reddit auth info from PROJECT_DIR/.env
DOTENV_PATH = os.path.join(SRC_DIR, ".env")
dotenv.load_dotenv(DOTENV_PATH)
# Reddit authentication
def get_reddit_auth_kwargs():
"""Get the authentication kwargs for praw.Reddit from the environment.
Requires the following environment variables to be set:
* CLIENT_ID : the ID of your script application
* CLIENT_SECRET : the secret of your script application
* USERNAME : the username of your bot's Reddit account
* PASSWORD : the password of your bot's Reddit account
See https://github.com/reddit-archive/reddit/wiki/OAuth2-Quick-Start-Example
for more details.
"""
kwargs = dict()
kwargs["client_id"] = os.environ.get("CLIENT_ID")
kwargs["client_secret"] = os.environ.get("CLIENT_SECRET")
kwargs["username"] = os.environ.get("USERNAME")
kwargs["password"] = os.environ.get("PASSWORD")
kwargs["user_agent"] = "python:nbviewerbot:v0.1.0 (by /u/jd_paton)"
for key, value in kwargs.items():
if value is None:
raise KeyError(
"{} not found in environment variables. "
"Have you filled in your .env file?".format(key.upper())
)
return kwargs
def load_reddit():
"""
Get the authentication kwargs from the environment and authenticate with
Reddit.
Returns
-------
praw.Reddit : the authenticated Reddit client
See also: utils.get_reddit_auth_kwargs
"""
kwargs = get_reddit_auth_kwargs()
reddit = praw.Reddit(**kwargs)
LOGGER.info(
"Successfully authenticated with Reddit as {}".format(
reddit.user.me().name
)
)
return reddit
# Templates (for use with string.format)
# TODO: Convert these all to string.Template
NBVIEWER_URL_TEMPLATE = "https://nbviewer.jupyter.org/url/{}"
BINDER_URL_TEMPLATE_NO_FILEPATH = "https://mybinder.org/v2/gh/{}/{}"
BINDER_URL_TEMPLATE_WITH_FILEPATH = (
"https://mybinder.org/v2/gh/{}/{}?filepath={}"
)
_comment_footer = """
------
^(I am a bot.)
[^(Feedback)](https://www.reddit.com/message/compose/?to=jd_paton) ^(|)
[^(GitHub)](https://github.com/JohnPaton/nbviewerbot) ^(|)
[^(Author)](https://johnpaton.net/)
"""
COMMENT_TEMPLATE_SINGLE = (
"""
I see you've posted a GitHub link to a Jupyter Notebook! GitHub doesn't
render large Jupyter Notebooks, so just in case, here is an
[nbviewer](https://nbviewer.jupyter.org/) link to the notebook:
{}
Want to run the code yourself? Here is a [binder](https://mybinder.org/)
link to start your own Jupyter server and try it out!
{}
"""
+ _comment_footer
)
COMMENT_TEMPLATE_MULTI = (
"""
I see you've posted GitHub links to Jupyter Notebooks! GitHub doesn't
render large Jupyter Notebooks, so just in case here are
[nbviewer](https://nbviewer.jupyter.org/) links to the notebooks:
{}
Want to run the code yourself? Here are [binder](https://mybinder.org/)
links to start your own Jupyter server!
{}
"""
+ _comment_footer
)
# Regexes
_url_rx = "^http.*"
URL_RX = re.compile(_url_rx)
# Subreddit lists
SUBREDDITS_TEST = [
"testingground4bots",
"bottestingplace",
"bottesting",
"bottest",
]
SUBREDDITS_RELEVANT_PATH = os.path.join(RESOURCES_DIR, "subreddits.txt")
with open(SUBREDDITS_RELEVANT_PATH, "r") as h:
_raw = h.readlines()
# strip whitespace and drop empty lines
SUBREDDITS_RELEVANT = [sub.strip() for sub in _raw]
SUBREDDITS_RELEVANT = [sub for sub in SUBREDDITS_RELEVANT if sub]
SUBREDDITS_RELEVANT += SUBREDDITS_TEST
SUBREDDITS_ALL = ["all"]
| StarcoderdataPython |
3273386 | # Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for
# full license information.
import getopt
class OptionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def get_iothub_opt(
argv,
connection_string,
device_id):
if len(argv) > 0:
try:
opts, args = getopt.getopt(
argv, "hd:c:", [
"connectionstring=", "deviceid="])
except getopt.GetoptError as get_opt_error:
raise OptionError("Error: %s" % get_opt_error.msg)
for opt, arg in opts:
if opt == '-h':
raise OptionError("Help:")
elif opt in ("-c", "--connectionstring"):
connection_string = arg
elif opt in ("-d", "--deviceid"):
device_id = arg
if connection_string.find("HostName") < 0:
raise OptionError(
"Error: Hostname not found, not a valid connection string")
return connection_string, device_id
def get_iothub_opt_with_module(
argv,
connection_string,
device_id,
module_id):
if len(argv) > 0:
try:
opts, args = getopt.getopt(
argv, "hd:c:m:", [
"connectionstring=", "deviceid=", "moduleid="])
except getopt.GetoptError as get_opt_error:
raise OptionError("Error: %s" % get_opt_error.msg)
for opt, arg in opts:
if opt == '-h':
raise OptionError("Help:")
elif opt in ("-c", "--connectionstring"):
connection_string = arg
elif opt in ("-d", "--deviceid"):
device_id = arg
elif opt in ("-m", "--moduleid"):
module_id = arg
if connection_string.find("HostName") < 0:
raise OptionError(
"Error: Hostname not found, not a valid connection string")
return connection_string, device_id, module_id
def get_iothub_opt_configuration_id(
argv,
connection_string,
configuration_id):
if len(argv) > 0:
try:
opts, args = getopt.getopt(
argv, "hd:c:", [
"connectionstring=", "configurationid="])
except getopt.GetoptError as get_opt_error:
raise OptionError("Error: %s" % get_opt_error.msg)
for opt, arg in opts:
if opt == '-h':
raise OptionError("Help:")
elif opt in ("--connectionstring"):
connection_string = arg
elif opt in ("--configurationid"):
configuration_id = arg
if connection_string.find("HostName") < 0:
raise OptionError(
"Error: Hostname not found, not a valid connection string")
if (configuration_id is None):
raise OptionError(
"Error: configurationid required parameter")
return connection_string, configuration_id
| StarcoderdataPython |
3371013 | '''Python file for the traversal abstract class
This is an abstract class inherited from the NodeVisitor class to allow to kwargs
'''
from ...nodes import AST
from ...nodes import NodeVisitor
class Traversal(NodeVisitor):
def visit(self, node: AST, **kwargs):
'''
Public visit method
The visit method implemented is used to call the respective visit method based on the node type
It then passes the return value back
'''
node_name = type(node).__name__
method_name = f"visit_{node_name}"
vist_method = getattr(self, method_name, self.__visit_method_error)
return vist_method(node, **kwargs)
def __visit_method_error(self, node: AST, **kwargs):
'''
Private helper method
Used to raise a NotImplementedError when the node type visit method is not implemented
'''
node_name = type(node).__name__
error_msg = f"Visit method for {node_name} not implemented"
error_msg += '\n'
error_msg += f"Please implement the method visit_{node_name}"
error_msg += '\n'
error_msg += f"Did not expect kwargs, {','.join(['(' + str(k) + ',' + str(v) + ')' for k, v in kwargs.items()])}"
raise NotImplementedError(error_msg) | StarcoderdataPython |
3203682 | <filename>train_wqx/transformer_model.py<gh_stars>0
import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import time
# import matplotlib.pyplot as plt
import math
import yaml
# from fast_ctc_decode import beam_search, viterbi_search
# import parasail
from collections import deque, defaultdict, OrderedDict
import re
class SelfAttention(nn.Module):
def __init__(self,
num_attention_heads=12,
hidden_size=768,
attention_probs_dropout_prob=0.1,
position_embedding_type='absolute',
):
super().__init__()
self.num_attention_heads = num_attention_heads
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.dropout = nn.Dropout(attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type
def transpose_for_scores(self, x):
# [bs, N, heads, head_size]
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
# [bs, heads, N, head_size]
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
attention_scores_res=None,
):
mixed_query_layer = self.query(hidden_states)
# Self attention
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
attention_mask = encoder_attention_mask
# Not Self attention
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
attention_mask = attention_mask
# shape: [bs, N, all_head_size] -> [bs, heads, N, head_size]
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# shape = [bs, heads, N, N]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / np.sqrt(self.attention_head_size)
# RealFormer
if attention_scores_res is not None:
attention_scores = attention_scores + attention_scores_res
attention_scores_res = attention_scores
# 消除由于长度不一引起的 padding 影响
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# v: context_layer.shape = [bs, N, hidden_size]
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() # shape: [bs, heads, N, head_size] -> [bs, N, heads, head_size]
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
# padding 部分置零
# context_layer = context_layer * (attention_mask[:, 0, 0, :] > -5000).type_as(context_layer).unsqueeze(-1)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if attention_scores_res is not None:
outputs += (attention_scores_res,)
else:
outputs += (None,)
return outputs
class SelfOutput(nn.Module):
def __init__(self,
hidden_size=768,
layer_norm_eps=1e-12,
hidden_dropout_prob=0.1,
):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.LayerNorm = nn.LayerNorm(hidden_size, eps=layer_norm_eps)
self.dropout = nn.Dropout(hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class Attention(nn.Module):
def __init__(self,
num_attention_heads=12,
hidden_size=768,
attention_probs_dropout_prob=0.1,
position_embedding_type='absolute',
layer_norm_eps=1e-12,
hidden_dropout_prob=0.1,
):
super().__init__()
self.self = SelfAttention(
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
position_embedding_type=position_embedding_type,
)
self.output = SelfOutput(
hidden_size=hidden_size,
layer_norm_eps=layer_norm_eps,
hidden_dropout_prob=hidden_dropout_prob,
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
attention_scores_res=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
attention_scores_res,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class Intermediate(nn.Module):
def __init__(self,
hidden_size=768,
intermediate_size=1024,
hidden_act='gelu',
):
super().__init__()
self.dense = nn.Linear(hidden_size, intermediate_size)
self.intermediate_act_fn = getattr(F, hidden_act)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class Output(nn.Module):
def __init__(self,
hidden_size=768,
intermediate_size=1024,
hidden_dropout_prob=0.1,
layer_norm_eps=1e-12,
):
super().__init__()
self.dense = nn.Linear(intermediate_size, hidden_size)
self.dropout = nn.Dropout(hidden_dropout_prob)
self.LayerNorm = nn.LayerNorm(hidden_size, eps=layer_norm_eps)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerLayer(nn.Module):
def __init__(self,
num_attention_heads=12,
hidden_size=768,
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
intermediate_size=1024,
hidden_dropout_prob=0.1,
position_embedding_type='absolute',
layer_norm_eps=1e-12,
):
super().__init__()
self.attention = Attention(
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
position_embedding_type=position_embedding_type,
layer_norm_eps=layer_norm_eps,
hidden_dropout_prob=hidden_dropout_prob,
)
self.intermediate = Intermediate(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
)
self.output = Output(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
hidden_dropout_prob=hidden_dropout_prob,
layer_norm_eps=layer_norm_eps,
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
attention_scores_res=None,
):
hidden_states = self.attention(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
attention_scores_res,
)
attention_output = hidden_states[0]
attention_scores_res = hidden_states[-1]
hidden_states = self.intermediate(attention_output)
hidden_states = self.output(hidden_states, attention_output)
return hidden_states, attention_scores_res
class TransformerLayerDecoder(nn.Module):
def __init__(self,
num_attention_heads=12,
hidden_size=768,
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
intermediate_size=1024,
hidden_dropout_prob=0.1,
position_embedding_type='absolute',
layer_norm_eps=1e-12,
):
super().__init__()
self.attention_self = Attention(
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
position_embedding_type=position_embedding_type,
layer_norm_eps=layer_norm_eps,
hidden_dropout_prob=hidden_dropout_prob,
)
self.attention_encoder = Attention(
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
position_embedding_type=position_embedding_type,
layer_norm_eps=layer_norm_eps,
hidden_dropout_prob=hidden_dropout_prob,
)
self.intermediate = Intermediate(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
)
self.output = Output(
hidden_size=hidden_size,
intermediate_size=intermediate_size,
hidden_dropout_prob=hidden_dropout_prob,
layer_norm_eps=layer_norm_eps,
)
def forward(
self,
hidden_states, # q
attention_mask=None, # 做 self attention 时候需要的mask。做翻译的decoder的时候需要。视频零样本时序预测不需要
head_mask=None, # head上加mask。基本不需要
encoder_hidden_states=None, # kv
encoder_attention_mask=None, # 做 cross attention 时候需要的mask。用来消除文本长度不齐进行padding带来的影响
output_attentions=False, # 输出attention结果。不要输出
attention_scores_res_self=False, # real former 中上一层的 self attention
attention_scores_res_encoder=False,# real former 中上一层的 cross attention
):
hidden_states = self.attention_self(
hidden_states,
attention_mask,
head_mask,
None,
None,
output_attentions,
attention_scores_res_self,
)
hidden_states, attention_scores_res_self = hidden_states
hidden_states = self.attention_encoder(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
output_attentions,
attention_scores_res_encoder,
)
attention_output, attention_scores_res_encoder = hidden_states
hidden_states = self.intermediate(attention_output)
hidden_states = self.output(hidden_states, attention_output)
return hidden_states, attention_scores_res_self, attention_scores_res_encoder
class VisionLanguageDecoder(nn.Module):
def __init__(self,
num_transformer_decoder_layers=4,
num_attention_heads=12,
hidden_size=768,
attention_probs_dropout_prob=0.1,
hidden_act='gelu',
intermediate_size=1024,
hidden_dropout_prob=0.1,
position_embedding_type='absolute',
layer_norm_eps=1e-12,
real_former=False,
):
super().__init__()
self.transformer = nn.ModuleList([
TransformerLayerDecoder(
num_attention_heads=num_attention_heads,
hidden_size=hidden_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
hidden_act=hidden_act,
intermediate_size=intermediate_size,
hidden_dropout_prob=hidden_dropout_prob,
position_embedding_type=position_embedding_type,
layer_norm_eps=layer_norm_eps,
)
for _ in range(num_transformer_decoder_layers)
])
self.real_former = real_former
def forward(self,
vision_feature, # bs, N_v, 768
language_feature, # bs, N_l, 768
language_mask, # bs, N_l
vision_attention_mask=None,
language_attention_mask=None,
):
# 做 self attention 时候需要的mask。做翻译的decoder的时候需要。视频零样本时序预测不需要
if vision_attention_mask is None:
vision_attention_mask = 0
# 做 cross attention 时候需要的mask。用来消除文本长度不齐进行padding带来的影响
if language_attention_mask is None:
language_attention_mask = (1 - language_mask[..., None]) * -9999
hidden_states = vision_feature
if self.real_former:
attention_scores_res_self = 0
attention_scores_res_encoder = 0
else:
attention_scores_res_self = None
attention_scores_res_encoder = None
for i, layer_module in enumerate(self.transformer):
hidden_states = layer_module(
hidden_states=hidden_states, # q
attention_mask=vision_attention_mask, # q mask
encoder_hidden_states=language_feature, # kv
encoder_attention_mask=language_attention_mask, # kv mask
attention_scores_res_self=attention_scores_res_self, # self realf ormer
attention_scores_res_encoder=attention_scores_res_encoder, # cross realf ormer
)
hidden_states, attention_scores_res_self, attention_scores_res_encoder = hidden_states
return hidden_states
if __name__ == '__main__':
from rich import print
self_attention = SelfAttention()
self_output = SelfOutput()
attention = Attention()
intermediate = Intermediate()
output = Output()
transformer_layer = TransformerLayer()
transformer_decoder_layer = TransformerLayerDecoder()
x0 = torch.rand(10, 13, 768)
att_realformer0 = None
x1, att_realformer1 = transformer_layer(x0, attention_scores_res=att_realformer0)
x2, att_realformer2 = transformer_layer(x1, attention_scores_res=att_realformer1)
x3, att_realformer3 = transformer_layer(x2, attention_scores_res=att_realformer2)
print(x0.shape, )
print(x1.shape, att_realformer1.shape if att_realformer1 is not None else att_realformer1)
print(x2.shape, att_realformer2.shape if att_realformer2 is not None else att_realformer2)
print(x3.shape, att_realformer3.shape if att_realformer3 is not None else att_realformer3)
# hidden_states,
# attention_mask=selfattention_mask,
# head_mask=None,
# encoder_hidden_states=input_encoder,
# encoder_attention_mask=encoderattention_mask,
# output_attentions=False,
# attention_scores_res_self=attention_scores_res_self,
# attention_scores_res_encoder=attention_scores_res_encoder,
x = torch.rand(10, 7, 768)
x0 = torch.rand(10, 13, 768)
att_realformer0, att_realformer00 = 0, 0
x1, att_realformer1, att_realformer11 = transformer_decoder_layer(x0, encoder_hidden_states=x, attention_scores_res_self=att_realformer0, attention_scores_res_encoder=att_realformer00)
x2, att_realformer2, att_realformer22 = transformer_decoder_layer(x1, encoder_hidden_states=x, attention_scores_res_self=att_realformer1, attention_scores_res_encoder=att_realformer11)
x3, att_realformer3, att_realformer33 = transformer_decoder_layer(x2, encoder_hidden_states=x, attention_scores_res_self=att_realformer2, attention_scores_res_encoder=att_realformer22)
print(x0.shape, )
print(x1.shape, att_realformer1.shape if att_realformer1 is not None else att_realformer1)
print(x2.shape, att_realformer2.shape if att_realformer2 is not None else att_realformer2)
print(x3.shape, att_realformer3.shape if att_realformer3 is not None else att_realformer3)
pass | StarcoderdataPython |
1691938 | <gh_stars>1-10
# -*- encoding: utf-8 -*-
"""
Insertion Sort em Python
by Ed
"""
def insertion_sort(lista):
for i in range(1,len(lista)):
eleito = lista[i]
j = i-1
while j >= 0 and eleito < lista[j]:
lista[j+1] = lista[j]
j=j-1
lista[j+1] = eleito
print(lista)
return lista
while True:
valor_ini = input("Digite os números do seu vetor, como 10,20,30, digite:")
valores = eval("["+valor_ini+"]") #interpreta a sequencia como uma lista
print (insertion_sort(valores))
| StarcoderdataPython |
1720780 | <reponame>rtu715/NAS-Bench-360
from typing import Any, Dict, Union, Sequence
import boto3
import os
import tempfile
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.datasets as dset
from torch.optim.lr_scheduler import CosineAnnealingLR
from collections import Counter
from sklearn.metrics import classification_report, confusion_matrix
from determined.pytorch import (
PyTorchTrial,
PyTorchTrialContext,
DataLoader,
LRScheduler,
PyTorchCallback
)
from data import BilevelDataset
from model_search import Network
from optimizer import EG
from utils import AttrDict, accuracy, AverageMeter, calculate_stats
from data_utils.load_data import load_data
from data_utils.download_data import download_from_s3
TorchData = Union[Dict[str, torch.Tensor], Sequence[torch.Tensor], torch.Tensor]
class GenotypeCallback(PyTorchCallback):
def __init__(self, context):
self.model = context.models[0]
def on_validation_end(self, metrics):
print(self.model.genotype())
class GAEASearchTrial(PyTorchTrial):
def __init__(self, trial_context: PyTorchTrialContext) -> None:
self.context = trial_context
self.hparams = AttrDict(trial_context.get_hparams())
self.last_epoch = 0
self.download_directory = self.download_data_from_s3()
dataset_hypers = {'ECG': (4, 1), 'satellite': (24, 1), 'deepsea': (36, 4)}
n_classes, in_channels = dataset_hypers[self.hparams.task]
if self.hparams.task == 'deepsea':
criterion = nn.BCEWithLogitsLoss().cuda()
self.accuracy = False
else:
criterion = nn.CrossEntropyLoss().cuda()
self.accuracy = True
# Initialize the models.
self.model = self.context.wrap_model(
Network(
self.hparams.init_channels,
n_classes,
self.hparams.layers,
criterion,
self.hparams.nodes,
k=self.hparams.shuffle_factor,
in_channels=in_channels,
)
)
total_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)/ 1e6
print('Parameter size in MB: ', total_params)
# Initialize the optimizers and learning rate scheduler.
self.ws_opt = self.context.wrap_optimizer(
torch.optim.SGD(
self.model.ws_parameters(),
self.hparams.learning_rate,
momentum=self.hparams.momentum,
weight_decay=self.hparams.weight_decay,
)
)
self.arch_opt = self.context.wrap_optimizer(
EG(
self.model.arch_parameters(),
self.hparams.arch_learning_rate,
lambda p: p / p.sum(dim=-1, keepdim=True),
)
)
self.lr_scheduler = self.context.wrap_lr_scheduler(
lr_scheduler=CosineAnnealingLR(
self.ws_opt,
self.hparams.scheduler_epochs,
self.hparams.min_learning_rate,
),
step_mode=LRScheduler.StepMode.STEP_EVERY_EPOCH,
)
def download_data_from_s3(self):
'''Download data from s3 to store in temp directory'''
s3_bucket = self.context.get_data_config()["bucket"]
download_directory = f"/tmp/data-rank{self.context.distributed.get_rank()}"
s3 = boto3.client("s3")
os.makedirs(download_directory, exist_ok=True)
download_from_s3(s3_bucket, self.hparams.task, download_directory)
#download_directory = '.'
self.train_data, self.val_data, _ = load_data(self.hparams.task, download_directory, True)
return download_directory
def build_training_data_loader(self) -> DataLoader:
"""
For bi-level NAS, we'll need each instance from the dataloader to have one image
for training shared-weights and another for updating architecture parameters.
"""
bilevel = BilevelDataset(self.train_data)
self.train_data = bilevel
print('Length of bilevel dataset: ', len(bilevel))
return DataLoader(bilevel, batch_size=self.context.get_per_slot_batch_size(), shuffle=True, num_workers=2)
def build_validation_data_loader(self) -> DataLoader:
valset = self.val_data
return DataLoader(valset, batch_size=self.context.get_per_slot_batch_size(), shuffle=False, num_workers=2)
def train_batch(
self, batch: TorchData, epoch_idx: int, batch_idx: int
) -> Dict[str, torch.Tensor]:
if epoch_idx != self.last_epoch:
self.train_data.shuffle_val_inds()
self.last_epoch = epoch_idx
x_train, y_train, x_val, y_val = batch
if self.hparams.task == 'deepsea':
y_train = y_train.float()
y_val = y_val.float()
# Train shared-weights
for a in self.model.arch_parameters():
a.requires_grad = False
for w in self.model.ws_parameters():
w.requires_grad = True
loss = self.model._loss(x_train, y_train)
self.context.backward(loss)
self.context.step_optimizer(
optimizer=self.ws_opt,
clip_grads=lambda params: torch.nn.utils.clip_grad_norm_(
params,
self.context.get_hparam("clip_gradients_l2_norm"),
),
)
arch_loss = 0.0
# Train arch parameters
if epoch_idx > 10:
for a in self.model.arch_parameters():
a.requires_grad = True
for w in self.model.ws_parameters():
w.requires_grad = False
arch_loss = self.model._loss(x_val, y_val)
self.context.backward(arch_loss)
self.context.step_optimizer(self.arch_opt)
return {
"loss": loss,
"arch_loss": arch_loss,
}
def evaluate_full_dataset(
self, data_loader: torch.utils.data.DataLoader
) -> Dict[str, Any]:
if self.hparams.task == 'ECG':
return self.evaluate_full_dataset_ECG(data_loader)
elif self.hparams.task == 'satellite':
return self.evaluate_full_dataset_satellite(data_loader)
elif self.hparams.task == 'deepsea':
return self.evaluate_full_dataset_deepsea(data_loader)
return None
def evaluate_full_dataset_ECG(
self, data_loader: torch.utils.data.DataLoader
) -> Dict[str, Any]:
loss_avg = AverageMeter()
all_pred_prob = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
loss = self.model._loss(input, target)
loss_avg.update(loss, n)
all_pred_prob.append(logits.cpu().data.numpy())
all_pred_prob = np.concatenate(all_pred_prob)
all_pred = np.argmax(all_pred_prob, axis=1)
## vote most common
final_pred = []
final_gt = []
pid_test = self.val_data.pid
for i_pid in np.unique(pid_test):
tmp_pred = all_pred[pid_test == i_pid]
tmp_gt = self.val_data.label[pid_test == i_pid]
final_pred.append(Counter(tmp_pred).most_common(1)[0][0])
final_gt.append(Counter(tmp_gt).most_common(1)[0][0])
## classification report
tmp_report = classification_report(final_gt, final_pred, output_dict=True)
print(confusion_matrix(final_gt, final_pred))
f1_score = (tmp_report['0']['f1-score'] + tmp_report['1']['f1-score'] + tmp_report['2']['f1-score'] +
tmp_report['3']['f1-score']) / 4
results = {
"loss": loss_avg.avg,
"score": f1_score,
}
return results
def evaluate_full_dataset_satellite(
self, data_loader: torch.utils.data.DataLoader
) -> Dict[str, Any]:
acc_top1 = AverageMeter()
acc_top5 = AverageMeter()
loss_avg = AverageMeter()
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
loss = self.model._loss(input, target)
top1, top5 = accuracy(logits, target, topk=(1,5))
acc_top1.update(top1.item(), n)
acc_top5.update(top5.item(), n)
loss_avg.update(loss, n)
results = {
"loss": loss_avg.avg,
"top1_accuracy": acc_top1.avg,
"top5_accuracy": acc_top5.avg,
}
return results
def evaluate_full_dataset_deepsea(
self, data_loader: torch.utils.data.DataLoader
) -> Dict[str, Any]:
loss_avg = AverageMeter()
test_predictions = []
test_gts = []
with torch.no_grad():
for batch in data_loader:
batch = self.context.to_device(batch)
input, target = batch
n = input.size(0)
logits = self.model(input)
loss = self.model._loss(input, target.float())
loss_avg.update(loss, n)
logits_sigmoid = torch.sigmoid(logits)
test_predictions.append(logits_sigmoid.detach().cpu().numpy())
test_gts.append(target.detach().cpu().numpy())
test_predictions = np.concatenate(test_predictions).astype(np.float32)
test_gts = np.concatenate(test_gts).astype(np.int32)
stats = calculate_stats(test_predictions, test_gts)
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
results = {
"test_mAUC": mAUC,
"test_mAP": mAP,
}
return results
def build_callbacks(self):
return {"genotype": GenotypeCallback(self.context)}
| StarcoderdataPython |
3247147 | <reponame>faezezps/SiMQC<gh_stars>10-100
import argparse
import json
import nltk
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import pairwise_distances
from hotpot import config
from hotpot.tfidf_retriever.utils import STOPWORDS
def build_all_dev_rankings_file(out_file, docs_json, k):
print("loading data...")
with open(config.SQUAD_DEV_FILE, 'r') as f:
dev_data = json.load(f)['data']
questions = [{'qid': q['id'], 'question': q['question'], 'answers': [a['text'] for a in q['answers']]}
for doc in dev_data for par in doc['paragraphs'] for q in par['qas']]
with open(docs_json, 'r') as f:
documents = json.load(f)
paragraphs = [par for doc in documents.values() for par in doc]
# tokenizer = nltk.TreebankWordTokenizer()
tfidf = TfidfVectorizer(strip_accents="unicode", stop_words=STOPWORDS)
para_features = tfidf.fit_transform(paragraphs)
q_features = tfidf.transform([q['question'] for q in questions])
distances = pairwise_distances(q_features, para_features, "cosine")
top_k = np.argpartition(distances, kth=list(range(k)), axis=1)[:, :k]
for idx, q in enumerate(questions):
q['paragraphs'] = [paragraphs[i] for i in top_k[idx]]
with open(out_file, 'w') as f:
json.dump(questions, f)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Encode all DrQA-SQuAD data')
parser.add_argument('docs_json', help='model directory to use for ranking')
parser.add_argument('out_file', help="filename to dump the top-k dataset")
parser.add_argument('top_k', type=int)
# parser.add_argument('--all-dev', action='store_true')
args = parser.parse_args()
build_all_dev_rankings_file(args.out_file, args.docs_json, args.top_k)
| StarcoderdataPython |
3238078 | <reponame>Horta/limix-tool
from .nh2 import recovery_true_heritability as nh2
| StarcoderdataPython |
29879 | <gh_stars>0
# -*- coding: utf-8 -*-
from .._protos.public.uac import Organization_pb2 as _Organization
from .._protos.public.common import CommonService_pb2 as _CommonCommonService
class CollaboratorType:
def __init__(self, global_collaborator_type=None, default_repo_collaborator_type=None,
default_endpoint_collaborator_type=None, default_dataset_collaborator_type=None):
self.global_collaborator_type = global_collaborator_type
self.default_repo_collaborator_type = default_repo_collaborator_type
self.default_endpoint_collaborator_type = default_endpoint_collaborator_type
self.default_dataset_collaborator_type = default_dataset_collaborator_type
class Organization:
"""
Object representing an Organization.
"""
def __init__(self, conn, msg):
self.conn = conn
self.msg = msg
self.id = msg.id
self.name = msg.name
@classmethod
def _create(cls, conn, name, desc=None, collaborator_type=None, global_can_deploy=False):
Message = _Organization.SetOrganization
msg = cls._create_msg(name, desc, collaborator_type, global_can_deploy)
response = conn.make_proto_request("POST",
"/api/v1/uac-proxy/organization/setOrganization",
body=Message(organization=msg))
org = conn.must_proto_response(response, Message.Response).organization
print("created new Organization: {}".format(org.name))
return cls(conn, org)
@classmethod
def _create_msg(cls, name, desc, collaborator_type, global_can_deploy):
Message = _Organization.Organization
if not collaborator_type:
collaborator_type = CollaboratorType()
if global_can_deploy:
can_deploy_value = _CommonCommonService.TernaryEnum.Ternary.TRUE
else:
can_deploy_value = _CommonCommonService.TernaryEnum.Ternary.FALSE
msg = Message(name=name, description=desc, global_can_deploy=can_deploy_value)
for key in collaborator_type.__dict__:
try:
attr = getattr(collaborator_type, key)
if not attr:
value = _CommonCommonService.CollaboratorTypeEnum.CollaboratorType.READ_ONLY
else:
value = _CommonCommonService.CollaboratorTypeEnum.CollaboratorType.Value(attr)
setattr(msg, key, value)
except ValueError:
unknown_value_error = "Unknown value specified for {}. Possible values are READ_ONLY, READ_WRITE."
raise ValueError(unknown_value_error.format(key))
return msg
@classmethod
def _get_by_name(cls, conn, name):
Message = _Organization.GetOrganizationByName
msg = Message(org_name=name)
response = conn.make_proto_request("GET",
"/api/v1/uac-proxy/organization/getOrganizationByName",
params=msg)
org = conn.must_proto_response(response, Message.Response).organization
return cls(conn, org)
"""
Adds member to an organization
Parameters
----------
share_with : str
Represents email or username.
"""
def add_member(self, share_with):
Message = _Organization.AddUser
response = self.conn.make_proto_request("POST",
"/api/v1/uac-proxy/organization/addUser",
body=Message(org_id=self.id, share_with=share_with))
status = self.conn.must_proto_response(response, Message.Response).status
| StarcoderdataPython |
3277163 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""NIM REST API Python Client -- Team component"""
from __future__ import absolute_import
import json
from netease_im import util
from netease_im.components import base
from netease_im.util import is_str_type
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class TeamComponent(base.BaseComponent):
"""Component dealing with all user related matters"""
def create(self, **kwargs):
"""
创建群
"""
util.require_keys(kwargs, ['tname', 'owner', 'members', 'msg', 'magree', 'joinmode'], False)
# JSONArray对应的accid串,如:["zhangsan"]
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/create.action', data=kwargs)
def add(self, **kwargs):
"""
拉人入群
"""
util.require_keys(kwargs, ['tid', 'owner', 'members', 'msg', 'magree'], False)
# JSONArray对应的accid串,如:["zhangsan"]
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/add.action', data=kwargs)
def kick(self, **kwargs):
"""
踢人出群
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
if 'member' not in kwargs and 'members' not in kwargs:
raise ValueError("either 'member' or 'members' must be set")
if 'members' in kwargs and not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/kick.action', data=kwargs)
def remove(self, **kwargs):
"""
解散群
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
return self.post_request('/team/remove.action', data=kwargs)
def update(self, **kwargs):
"""
编辑群资料
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
return self.post_request('/team/update.action', data=kwargs)
def query(self, **kwargs):
"""
群信息与成员列表查询
"""
util.require_keys(kwargs, ['tids', 'ope'], False)
# JSONArray对应的accid串,如:["zhangsan"]
if not is_str_type(kwargs['tids']):
kwargs['tids'] = json.dumps(kwargs['tids'])
return self.post_request('/team/query.action', data=kwargs)
def query_detail(self, **kwargs):
"""
获取群组详细信息
"""
util.require_keys(kwargs, 'tid', False)
return self.post_request('/team/queryDetail.action', data=kwargs)
def get_mark_read_info(self, **kwargs):
"""
获取群组已读消息的已读详情信息
"""
util.require_keys(kwargs, ['tid', 'msgid', 'fromAccid'], False)
return self.post_request('/team/getMarkReadInfo.action', data=kwargs)
def change_owner(self, **kwargs):
"""
移交群主
"""
util.require_keys(kwargs, ['tid', 'owner', 'newowner', 'leave'], False)
return self.post_request('/team/changeOwner.action', data=kwargs)
def add_manager(self, **kwargs):
"""
任命管理员
"""
util.require_keys(kwargs, ['tid', 'owner', 'members'], False)
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/addManager.action', data=kwargs)
def remove_manager(self, **kwargs):
"""
移除管理员
"""
util.require_keys(kwargs, ['tid', 'owner', 'members'], False)
if not is_str_type(kwargs['members']):
kwargs['members'] = json.dumps(kwargs['members'])
return self.post_request('/team/removeManager.action', data=kwargs)
def join_teams(self, **kwargs):
"""
获取某用户所加入的群信息
"""
util.require_keys(kwargs, ['accid'], False)
return self.post_request('/team/joinTeams.action', data=kwargs)
def update_team_nick(self, **kwargs):
"""
修改群昵称
"""
util.require_keys(kwargs, ['tid', 'owner', 'accid', 'nick'], False)
return self.post_request('/team/updateTeamNick.action', data=kwargs)
def mute_team(self, **kwargs):
"""
修改消息提醒开关
"""
util.require_keys(kwargs, ['tid', 'accid', 'ope'], False)
return self.post_request('/team/muteTeam.action', data=kwargs)
def mute_tlist(self, **kwargs):
"""
禁言群成员
"""
util.require_keys(kwargs, ['tid', 'owner', 'accid', 'mute'], False)
return self.post_request('/team/muteTlist.action', data=kwargs)
def leave(self, **kwargs):
"""
主动退群
"""
util.require_keys(kwargs, ['tid', 'accid'], False)
return self.post_request('/team/leave.action', data=kwargs)
def mute_tlist_all(self, **kwargs):
"""
将群组整体禁言
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
if 'mute' not in kwargs and 'muteType' not in kwargs:
raise ValueError("either 'mute' or 'muteType' must be set")
return self.post_request('/team/muteTlistAll.action', data=kwargs)
def list_team_mute(self, **kwargs):
"""
获取群组禁言列表
"""
util.require_keys(kwargs, ['tid', 'owner'], False)
return self.post_request('/team/listTeamMute.action', data=kwargs)
| StarcoderdataPython |
3330363 | <filename>src/main.py
# import some modules to be used in py-spark SQL
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql import HiveContext
from tools import buildFiles
from latentsFactorsModel import generateUsersItensVectors
from mips import process
# Initialize Saprk Session
import os
DATA_PATH = '../data/'
RESULTS_PATH = '../results/'
INPUT_FILE_NAME = 'inputData-topNmillion.txt'
fileNames = [DATA_PATH + 'combined_data_1.txt', DATA_PATH + 'combined_data_2.txt', DATA_PATH + 'combined_data_3.txt', DATA_PATH + 'combined_data_4.txt']
if not os.path.isfile(RESULTS_PATH + INPUT_FILE_NAME):
buildFiles(fileNames, RESULTS_PATH + INPUT_FILE_NAME)
usersFactors, itensFactors = generateUsersItensVectors(RESULTS_PATH + INPUT_FILE_NAME)
usersDataframe = usersFactors.toPandas()
itensDataframe = itensFactors.toPandas()
process(usersFactors, itensFactors)
#print(usersDataframe)
#print(itensDataframe)
#f = open(USERS_FILE_NAME, "w")
#for user in users:
# f.write(str(user) + ',')
#f.close()
#print(users)
#spark=SparkSession.builder.appName("PySpark_Testing").getOrCreate()
#sc = spark.sparkContext
#sqlContext = HiveContext(sc) | StarcoderdataPython |
4834219 | <filename>packit_service/service/api/koji_builds.py<gh_stars>0
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from http import HTTPStatus
from logging import getLogger
try:
from flask_restx import Namespace, Resource
except ModuleNotFoundError:
from flask_restplus import Namespace, Resource
from packit_service.service.api.utils import response_maker
from packit_service.service.api.parsers import indices, pagination_arguments
from packit_service.models import KojiBuildModel, optional_time
logger = getLogger("packit_service")
koji_builds_ns = Namespace("koji-builds", description="Production builds")
@koji_builds_ns.route("")
class KojiBuildsList(Resource):
@koji_builds_ns.expect(pagination_arguments)
@koji_builds_ns.response(HTTPStatus.PARTIAL_CONTENT, "Koji builds list follows")
def get(self):
""" List all Koji builds. """
first, last = indices()
result = []
for build in KojiBuildModel.get_range(first, last):
build_dict = {
"build_id": build.build_id,
"status": build.status,
"build_submitted_time": optional_time(build.build_submitted_time),
"chroot": build.target,
"web_url": build.web_url,
# from old data, sometimes build_logs_url is same and sometimes different to web_url
"build_logs_url": build.build_logs_url,
"pr_id": build.get_pr_id(),
"branch_name": build.get_branch_name(),
"release": build.get_release_tag(),
}
project = build.get_project()
if project:
build_dict["project_url"] = project.project_url
build_dict["repo_namespace"] = project.namespace
build_dict["repo_name"] = project.repo_name
result.append(build_dict)
resp = response_maker(
result,
status=HTTPStatus.PARTIAL_CONTENT.value,
)
resp.headers["Content-Range"] = f"koji-builds {first + 1}-{last}/*"
return resp
@koji_builds_ns.route("/<int:id>")
@koji_builds_ns.param("id", "Koji build identifier")
class KojiBuildItem(Resource):
@koji_builds_ns.response(HTTPStatus.OK, "OK, koji build details follow")
@koji_builds_ns.response(
HTTPStatus.NOT_FOUND.value, "No info about build stored in DB"
)
def get(self, id):
"""A specific koji build details. From koji_build hash, filled by worker."""
builds_list = KojiBuildModel.get_all_by_build_id(str(id))
if not builds_list.first():
return response_maker(
{"error": "No info about build stored in DB"},
status=HTTPStatus.NOT_FOUND.value,
)
build = builds_list[0]
build_dict = {
"build_id": build.build_id,
"status": build.status,
"build_start_time": optional_time(build.build_start_time),
"build_finished_time": optional_time(build.build_finished_time),
"build_submitted_time": optional_time(build.build_submitted_time),
"chroot": build.target,
"web_url": build.web_url,
# from old data, sometimes build_logs_url is same and sometimes different to web_url
"build_logs_url": build.build_logs_url,
"pr_id": build.get_pr_id(),
"branch_name": build.get_branch_name(),
"ref": build.commit_sha,
"release": build.get_release_tag(),
}
project = build.get_project()
if project:
build_dict["project_url"] = project.project_url
build_dict["repo_namespace"] = project.namespace
build_dict["repo_name"] = project.repo_name
build_dict["srpm_logs"] = build.srpm_build.logs if build.srpm_build else None
return response_maker(build_dict)
| StarcoderdataPython |
3350989 | <reponame>fabiograssiotto/rashid
import sys
import os
import shutil
import time
from pathlib import Path
class Logger(object):
MODULES_MAIN = 'Main'
MODULES_MODEL = 'Model'
MODULES_MEMORY = 'Memory'
MODEL_ID = 'ID'
MODEL_EDD = 'EDD'
MODEL_SAM = 'SAM'
MODEL_TOM = 'TOM'
def __init__(self, module, steps, model = None):
self.terminal = sys.stdout
# Logging and Latex folders
log_folder = Path("output/log/")
tex_folder = Path("output/tex/")
# remove contents from a previous run
shutil.rmtree(log_folder, ignore_errors= True)
shutil.rmtree(tex_folder, ignore_errors= True)
if not os.path.exists(log_folder):
os.makedirs(log_folder)
if not os.path.exists(tex_folder):
os.makedirs(tex_folder)
self.modellog = []
self.modeltex = []
self.memlog = []
self.max_step = steps
if (module == Logger.MODULES_MAIN):
self.mainlog = open("output\main.log", "w+")
elif (module == Logger.MODULES_MODEL):
for i in range(steps):
log_file = model + "-" + str(i+1) + ".log"
tex_file = model + "-" + str(i+1) + ".tex"
file_name = log_folder / log_file
file_name_tex = tex_folder / tex_file
#file_name = "output\\log\\" + model + "-" + str(i+1) + ".log"
#file_name_tex = "output\\tex\\" + model + "-" + str(i+1) + ".tex"
self.modellog.append(open(file_name, "w+"))
self.modeltex.append(open(file_name_tex, "w+"))
elif (module == Logger.MODULES_MEMORY):
for i in range(steps):
self.memlog.append(open("output\memory.log", "w+"))
self.module = module
def write(self, message, step, logtoterm = False):
# Terminal Logs
if (logtoterm == True):
self.terminal.write(message + '\n')
# File Logs
if (self.module == Logger.MODULES_MAIN):
# Main logging
self.mainlog.write(message + '\n')
elif (self.module == Logger.MODULES_MODEL):
self.modellog[step-1].write(message + '\n')
elif (self.module == Logger.MODULES_MEMORY):
self.memlog[step-1].write(message + '\n')
def write_tex(self, message, step):
self.modeltex[step-1].write(message + '\n')
def flush(self):
pass
| StarcoderdataPython |
88772 | <gh_stars>0
with open('23B.in','r') as f:
insts = [s.split() for s in f]
p = 0
r = {'a':12,'b':0,'c':0,'d':0}
def get_value(v):
try: return int(v)
except: return r[v]
while p < len(insts) and p >= 0:
inst = insts[p][0]
if inst == 'tgl':
n_p = p+int(r[insts[p][1]])
if n_p < len(insts) and n_p >= 0:
if len(insts[n_p]) == 2:
insts[n_p][0] = 'dec' if insts[n_p][0] == 'inc' else 'inc'
elif len(insts[n_p]) == 3:
insts[n_p][0] = 'cpy' if insts[n_p][0] == 'jnz' else 'jnz'
if inst == 'cpy':
r[insts[p][2]] = get_value(insts[p][1])
elif inst == 'dec':
r[insts[p][1]] -= 1
elif inst == 'inc':
r[insts[p][1]] += 1
elif inst == 'mul':
r[insts[p][3]] += get_value(insts[p][1])*get_value(insts[p][2])
elif inst == 'jnz':
if get_value(insts[p][1]) != 0:
p += get_value(insts[p][2])
continue
p+=1
print r['a']
| StarcoderdataPython |
3264826 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = '<NAME>'
SITENAME = '<NAME>'
SITESUBTITLE = 'Data Scientist'
SITEURL = ''
BIO = 'Data scientist. Iron Man fan.'
PROFILE_IMAGE = 'avatar.jpg'
PATH = 'content'
STATIC_PATHS = ['images', 'pdfs', 'extra']
EXTRA_PATH_METADATA = {
'extra/CNAME': {'path': 'CNAME'},
'extra/favicon.png': {'path': 'favicon.png'},
'extra/apple-touch-icon.png': {'path': 'apple-touch-icon.png'}
}
FAVICON = EXTRA_PATH_METADATA['extra/favicon.png']['path']
APPLE_ICON = EXTRA_PATH_METADATA['extra/apple-touch-icon.png']['path']
PLUGIN_PATHS = ['pelican-plugins']
PLUGINS = ['assets']
# Disable caching and versioning of static resources since GitHub pages
# caches stuff for only 10 mins
ASSET_CONFIG = (
('url_expire', False),
('manifest', False),
('cache', False),
)
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = 'en'
DEFAULT_DATE_FORMAT = '%B %-d, %Y'
THEME = 'pelican-hyde'
DISPLAY_PAGES_ON_MENU = True
LOAD_CONTENT_CACHE = False
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Obfuscate email with: http://www.jottings.com/obfuscator/
# Then replace your `coded` and `key` js variables below
EMAIL_CODED = 'dZ8Z.8J.kZ3ME38@Jk8EF.LZk'
EMAIL_KEY = '<KEY>'
# Social widget
SOCIAL = (
# Email obfuscated using the above variables
('email', ''),
('github-square', 'https://github.com/jagmoreira'),
('linkedin', 'https://www.linkedin.com/in/joao-moreira'),
)
DEFAULT_PAGINATION = 10
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
YEAR_ARCHIVE_SAVE_AS = 'posts/{date:%Y}/index.html'
| StarcoderdataPython |
1729936 | #!/usr/bin/python -u
# -*- coding: utf-8 -*-
# v 0.0.4 Tlek
import random
import datetime
import sys
from libs import respond
from libs import sql_query as sql
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from telegram import KeyboardButton, ReplyKeyboardMarkup
token = str(sys.argv[1])
respond.token = token
updater = Updater(token=token)
dispatcher = updater.dispatcher
kb_def = [[KeyboardButton('/share')], [KeyboardButton('/stats')]]
kb_reg = [[KeyboardButton('/start')]]
kb_markup = ReplyKeyboardMarkup(kb_def, resize_keyboard=True)
kb_markup_reg = ReplyKeyboardMarkup(kb_reg, resize_keyboard=True)
def startCommand(bot, update):
try:
respond.welcome(bot, update, kb_markup)
if not update.message.from_user.is_bot:
res = sql.user(update.message.from_user.id)
if len(res) == 0:
sql.recordUser(update.message.from_user.id,
update.message.from_user.first_name,
datetime.datetime.now().strftime("%H:%M:%S"),
update.message.chat_id, 0)
except Exception as e:
print(e)
def textMessage(bot, update):
try:
allow = False
res = sql.user(update.message.from_user.id)
if len(res) == 0:
respond.register(bot, update, kb_markup_reg)
elif abs(int(res[0][0][3:5]) - int(datetime.datetime.now().strftime("%M"))) > 0:
sql.recordActivity(update.message.from_user.id, datetime.datetime.now().strftime("%H:%M:%S"))
sql.recordChatID(update.message.from_user.id, update.message.chat_id)
allow = True
else:
respond.wait(bot, update, kb_markup)
if allow:
respond.received(bot, update, kb_markup, update.message.text)
sql.recordMessage(update.message.from_user.id,
datetime.datetime.now().strftime("%Y-%m-%d"),
datetime.datetime.now().strftime("%H:%M:%S"),
update.message.text)
res = sql.messageID(update.message.from_user.id, update.message.text)
sql.recordHistory(update.message.from_user.id, res[0], 0)
res = sql.browsed()
reset = True
for data in res:
if data[1] != str(update.message.from_user.id): #data[1] - user_id
respond.notify(bot, data[0], kb_markup) #data[0] - chat_id
sql.resetBrowsedExcept(update.message.from_user.id)
except Exception as e:
print(e)
def individualreq(bot, update, args):
try:
id = update.message.text
id = id[1:]
if id == 'share':
res = sql.user(update.message.from_user.id)
if len(res) == 0:
respond.register(bot, update, kb_markup_reg)
else:
sql.recordChatID(update.message.from_user.id, update.message.chat_id)
res = sql.messages()
found = False
while (not found) and (len(res)!=0):
i = random.randint(0,len(res)-1)
if not sql.in_history(update.message.from_user.id, res[i][0]):
found = True
else:
res.pop(i)
if len(res)!=0:
sql.recordHistory(update.message.from_user.id, res[i][0], res[i][3]+1)
response = u"[O]: " + res[i][2]
if len(res) == 1:
sql.recordBrowsed(update.message.from_user.id)
else:
sql.recordBrowsed(update.message.from_user.id)
response = u"Простите, вы уже все посмотрели. Можете теперь сами мне написать, мы прочитаем!"
bot.send_message(chat_id=update.message.chat_id, text=response, reply_markup=kb_markup)
elif id == 'help':
respond.help(bot, update, kb_markup)
sql.recordChatID(update.message.from_user.id, update.message.chat_id)
elif id == 'stats':
res = sql.user(update.message.from_user.id)
if len(res) == 0:
respond.register(bot, update, kb_markup_reg)
else:
res = sql.stats(update.message.from_user.id)
if len(res) == 0:
respond.empty(bot, update, kb_markup)
else:
for stat in res:
respond.stats(bot, update, kb_markup, stat[0], stat[1]) # stat[0] - text_sent | stat[1] - views
except Exception as e:
print(e)
start_command_handler = CommandHandler('start', startCommand)
text_message_handler = MessageHandler(Filters.text, textMessage)
dispatcher.add_handler(start_command_handler)
dispatcher.add_handler(text_message_handler)
dispatcher.add_handler(CommandHandler(['share', 'help', 'stats'], individualreq, pass_args=True))
updater.start_polling(clean=True)
updater.idle() | StarcoderdataPython |
78101 | # -*- coding: utf-8 -*-
# upd dist
import sys
import sqlite3
import time
from typing import List, Dict
from collections import namedtuple, defaultdict
from common import upcase_regex, pos2pos, penn2pos, is_stop_ngram
Record = namedtuple('Record', ['word', 'lemma', 'sublemma', 'tag', 'abs', 'arf'])
def create_tables(db):
cur = db.cursor()
cur.execute('DROP TABLE IF EXISTS word')
cur.execute('DROP TABLE IF EXISTS lemma')
cur.execute('DROP TABLE IF EXISTS sublemma')
cur.execute('CREATE TABLE lemma (value TEXT, pos TEXT, count INTEGER, arf INTEGER, is_pname INTEGER, PRIMARY KEY(value, pos))')
cur.execute('CREATE TABLE sublemma (value TEXT, lemma TEXT, pos TEXT, count INTEGER, PRIMARY KEY (value, lemma, pos), FOREIGN KEY (lemma, pos) REFERENCES lemma(value, pos))')
cur.execute('CREATE TABLE word (value TEXT, lemma TEXT, sublemma TEXT, pos TEXT, count INTEGER, arf INTEGER, PRIMARY KEY (value, lemma, sublemma, pos), FOREIGN KEY (lemma, sublemma, pos) REFERENCES sublemma(lemma, value, pos))')
def get_lemma_total(rows: List[Record]):
return sum(row.abs for row in rows)
def get_lemma_arf(rows: List[Record]):
return sum(row.arf for row in rows)
def proc_line(cur, item: Record, curr_lemma: Record, words: List[Record], sublemmas: Dict[str, int]):
if curr_lemma is None or item.lemma != curr_lemma.lemma or item.tag != curr_lemma.tag:
if len(words) > 0:
try:
#print(f' ---> INSERT LEMMA {curr_lemma}')
cur.execute('INSERT INTO lemma (value, pos, count, arf, is_pname) VALUES (?, ?, ?, ?, ?)',
[curr_lemma.lemma, pos_imp(curr_lemma.tag), get_lemma_total(words), get_lemma_arf(words), int(upcase_regex.match(curr_lemma.lemma) is not None)])
except sqlite3.IntegrityError:
print('Duplicate lemma record {}'.format(curr_lemma))
print('UPDATE lemma SET count = count + %s, arf = arf + %s WHERE value = %s AND pos = %s' % (get_lemma_total(words), get_lemma_arf(words), curr_lemma.lemma, pos_imp(curr_lemma.tag)))
cur.execute('UPDATE lemma SET count = count + ?, arf = arf + ? WHERE value = ? AND pos = ?', [get_lemma_total(words), get_lemma_arf(words), curr_lemma.lemma, pos_imp(curr_lemma.tag)])
for s in sublemmas:
try:
cur.execute('INSERT INTO sublemma (value, lemma, pos, count) VALUES (?, ?, ?, ?)', (s, curr_lemma.lemma, pos_imp(curr_lemma.tag), sublemmas[s]))
except sqlite3.IntegrityError:
print('Duplicate sublemma: {}'.format(s))
print('UPDATE sublemma SET count = count + {} WHERE value = {} AND lemma = {} AND pos = {}'.format(sublemmas[s], s, curr_lemma.lemma, pos_imp(curr_lemma.tag)))
cur.execute('UPDATE sublemma SET count = count + ? WHERE value = ? AND lemma = ? AND pos = ?', (sublemmas[s], s, curr_lemma.lemma, pos_imp(curr_lemma.tag)))
for w in words:
try:
cur.execute('INSERT INTO word (value, lemma, sublemma, pos, count, arf) VALUES (?, ?, ?, ?, ?, ?)', [w.word, w.lemma, w.sublemma, pos_imp(w.tag), w.abs, w.arf])
except sqlite3.IntegrityError:
print('Duplicate word {}'.format(w))
print('UPDATE word SET count = count + %s, arf = arf + %s WHERE value = %s AND lemma = %s AND pos = %s' % (w.abs, w.arf, w.word, w.lemma, pos_imp(w.tag)))
cur.execute('UPDATE word SET count = count + ?, arf = arf + ? WHERE value = ? AND lemma = ? AND pos = ?', (w.abs, w.arf, w.word, w.lemma, pos_imp(w.tag)))
curr_lemma = item
words = []
sublemmas = defaultdict(lambda: 0)
words.append(item)
return words, sublemmas, curr_lemma
def run(db, pos_imp):
create_tables(db)
cur1 = db.cursor()
cur2 = db.cursor()
cur1.execute(
"SELECT col0, col2, col3, col4, `count` AS abs, arf "
"FROM colcounts "
"WHERE col4 <> 'X@-------------' "
"ORDER BY col2, col3, col4, col0")
curr_lemma = None
words: List[Record] = []
sublemmas: Dict[str, int] = defaultdict(lambda: 0)
num_stop = 0
for item in cur1:
item = Record(*item)
if is_stop_ngram(item.lemma):
num_stop += 1
continue
words, sublemmas, curr_lemma = proc_line(cur2, item, curr_lemma, words, sublemmas)
sublemmas[item.sublemma] += 1
proc_line(cur2, Record(None, None, None, None, None, None), curr_lemma, words, sublemmas) # proc the last element
print('num stop words: {}'.format(num_stop))
if __name__ == '__main__':
if len(sys.argv) < 1:
print('Missing database path')
sys.exit(1)
if len(sys.argv) > 2:
if sys.argv[2] == 'penn':
pos_imp = penn2pos
else:
print('Unknown PoS tag type {0}'.format(sys.argv[2]))
sys.exit(1)
else:
pos_imp = pos2pos
with sqlite3.connect(sys.argv[1]) as db:
t0 = time.time()
db.execute('PRAGMA journal_mode = OFF')
db.execute('BEGIN TRANSACTION')
run(db, pos_imp)
db.commit()
print('Done in {0}'.format(time.time() - t0))
| StarcoderdataPython |
4759 | from django.apps import AppConfig
from django.contrib.admin.apps import AdminConfig
from django.contrib.auth.apps import AuthConfig
from django.contrib.contenttypes.apps import ContentTypesConfig
from django.contrib.sessions.apps import SessionsConfig
from django.db.models.signals import post_migrate
from django_celery_results.apps import CeleryResultConfig
from geotrek.common.utils.signals import check_srid_has_meter_unit, pm_callback
class GeotrekConfig(AppConfig):
"""
Base class to handle table move on right schemas, and load SQL files
!! WARNING !! need to create subclass in geotrek.myapp.apps for project apps,
and create subclasses here for external subclasses
"""
def ready(self):
post_migrate.connect(pm_callback, sender=self, dispatch_uid='geotrek.core.pm_callback')
check_srid_has_meter_unit()
class AuthGeotrekConfig(AuthConfig, GeotrekConfig):
"""
bind for django.contrib.auth
"""
pass
class ContenttypeGeotrekConfig(ContentTypesConfig, GeotrekConfig):
"""
bind for django.contrib.contenttype
"""
pass
class SessionsGeotrekConfig(SessionsConfig, GeotrekConfig):
pass
class AdminGeotrekConfig(AdminConfig, GeotrekConfig):
pass
class CeleryGeotrekConfig(GeotrekConfig, CeleryResultConfig):
pass
class EasyThumbnailsGeotrekConfig(GeotrekConfig):
name = 'easy_thumbnails'
verbose_name = 'Easy thumbnails'
| StarcoderdataPython |
4827579 | # Polyline drawing in codeskulptor
import simplegui
polyline = []
def click(pos):
global polyline
polyline.append(pos)
def clear():
global polyline
polyline = []
def draw(canvas):
if len(polyline) == 1:
canvas.draw_point(polyline[0], "White")
for i in range(1, len(polyline)):
canvas.draw_line(polyline[i - 1], polyline[i], 2, "White")
frame = simplegui.create_frame("Echo click", 300, 200)
frame.set_mouseclick_handler(click)
frame.set_draw_handler(draw)
frame.add_button("Clear", clear)
frame.start()
| StarcoderdataPython |
152852 | class Solution:
"""
@param nums: an array with positive and negative numbers
@param k: an integer
@return: the maximum average
"""
def maxAverage(self, nums, k):
if not nums:
return 0
min_num = min(nums)
max_num = max(nums)
while min_num + 1e-6 < max_num:
mid = (min_num + max_num) / 2
if self.valid_average(nums, mid, k):
min_num = mid
else:
max_num = mid
return min_num
def valid_average(self, nums, mid, k):
subsum = 0
presum = 0
min_presum = 0
for i, num in enumerate(nums):
subsum += num - mid
if i >= k - 1 and subsum >= 0:
return True
if i >= k:
presum += nums[i - k] - mid
min_presum = min(min_presum, presum)
if subsum - min_presum >= 0:
return True
return False
# class Solution:
# """
# @param nums: an array with positive and negative numbers
# @param k: an integer
# @return: the maximum average
# """
# def maxAverage(self, nums, k):
# if not nums:
# return 0
# presum = [0] * (len(nums) + 1)
# for i, num in enumerate(nums):
# presum[i + 1] = presum[i] + num
# max_average = sum(nums[:k]) / k
# for i in range(len(nums) - k + 1):
# for j in range(i + k, len(nums) + 1):
# max_average = max(max_average, (presum[j] - presum[i]) / (j - i))
# return max_average
# class Solution:
# """
# @param nums: an array with positive and negative numbers
# @param k: an integer
# @return: the maximum average
# """
# def maxAverage(self, nums, k):
# n = len(nums)
# ans = sum(nums[:k]) / k
# for start in range(n - k + 1):
# for end in range(start + k - 1, n):
# subarray = nums[start:end + 1]
# average = sum(subarray) / (end - start + 1)
# ans = max(ans, average)
# return ans | StarcoderdataPython |
4831851 | <filename>test.py
import argparse
from matplotlib import pyplot as plt
import torch
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
from PIL import Image
import cv2
import os
import json
import math
from utils import get_model, WarmUpLR, my_eval
from dataset import get_imagefolder_train_loader, get_imagefoler_val_loader
import conf
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-model', type=str, required=True, help='model type')
parser.add_argument('-weights', type=str, required=True, help='the weights file you want to test')
parser.add_argument('-gpu', action="store_true", help = 'use gpu or not')
parser.add_argument('-data_path', type=str, required=True, help='test data path')
args = parser.parse_args()
model = get_model(model_type = args.model, use_gpu = args.gpu)
model.load_state_dict(torch.load(args.weights), args.gpu)
my_eval(model, args.data_path, args.gpu) | StarcoderdataPython |
1701929 | <filename>md5Ripper.py<gh_stars>0
'''
* Program: md5Ripper ;
* File: md5Ripper.py ;
* Author: F0r3bod1n' ;
* Version: v1.0 ;
'''
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import os.path
import codecs
import hashlib
import colorama
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--md5", dest = "md5hash", type = str, required = True, help = "MD5 Hash to decrypt.")
parser.add_argument("-d", "--dictionary", dest = "dictionary", type = str, default = "passwords.txt", help = "Path to passwords dictionary.")
_args = parser.parse_args()
colorama.init()
fDict = codecs.open(_args.dictionary, "r+", encoding = "cp1251")
def _md5(string):
result = hashlib.md5(str(string).encode())
return result.hexdigest()
def check_args():
hasError = False
if len(_args.md5hash) != 32:
print(colorama.Fore.RED + "[-] Value of argument -m/--md5 is not valid." + colorama.Style.RESET_ALL)
hasError = True
if not os.path.exists(_args.dictionary):
print(colorama.Fore.RED + "[-] Path to dictionary not found." + colorama.Style.RESET_ALL)
hasError = True
if not hasError:
main()
else:
sys.exit(0)
def main():
for l in fDict.read().split("\n"):
hash = _md5(l)
if _args.md5hash == hash:
print(colorama.Fore.GREEN + "[+] Success! {0}:{1}.".format(l, hash) + colorama.Style.RESET_ALL)
sys.exit(0)
del hash
del l
print(colorama.Fore.RED + "[-] Hash decrypting failed. No matches found." + colorama.Style.RESET_ALL)
if __name__ == "__main__":
check_args() | StarcoderdataPython |
3368007 | from typing import *
import numpy as np
from itertools import combinations, product, permutations
from scipy.special import loggamma
def rss_objective( y_test: np.ndarray, y_predicted: np.ndarray) -> float:
"""Return the residual sum of squares"""
return np.sum((y_predicted - y_test)**2)
def rmse_objective( y_test: np.ndarray, y_predicted: np.ndarray) -> float:
"""Return the residual sum of squares"""
return np.mean((y_predicted - y_test)**2)
def corr_objective( y_test: np.ndarray, y_predicted: np.ndarray) -> float:
"""Return the negative correlation (to minimize)"""
return -np.corrcoef(y_predicted.flat[:], y_test.flat[:])[0,1]
def new_obj(y_test, y_predicted, f):
print ("here")
return f*corr_objective(y_test, y_predicted) #+ rmse_objective(y_test, y_predicted)
def poisson_log_lik_objective(y_test: np.ndarray, y_predicted: np.ndarray) -> float:
"""Minus Log likelihood
$l(\lambda;x)=\sum\limits^n_{i=1}x_i \text{ log }\lambda-n\lambda$
"""
return -np.sum(y_test * np.log(y_predicted) - y_predicted)
def llf(y, mu, r):
a1 = mu/r
a2 = mu + a1
llf = (loggamma(y + a1) - loggamma(y + 1) - loggamma(a1) + a1 * np.log(a1) + y * np.log(mu) - (y + a1) * np.log(a2) )
return -np.sum(llf)
def nb_loglik3(y, mu, psi):
psi_min = 1. / psi
lggamma_fun_ratio = loggamma(y + psi_min) - loggamma(psi_min) - loggamma(y + 1)
log1mupsi = np.log(1 + mu * psi)
lgf1 = - psi_min * log1mupsi
lgf2 = y * (np.log(mu) - log1mupsi + np.log(psi))
return -np.sum(lggamma_fun_ratio + lgf1 + lgf2)
def nb_loglik(y, mu, r):
"""
Continuous Negative binomial loglikelihood function. Numerically stable implementation.
Arguments
---------
y: float or np.ndarray
The values to evaluate the loglikehood on
mu: float or np.ndarray
The mean parameter of the negative binomial distribution, is y_predicted
psi: float or np.ndarray
The psi parameter of the NB distribution
It corresponds to (VAR[x] - E[x])/ E[x]**2
For a constant overdispersion `r` set it to r/mu
Returns
-------
The Negative binomial LogLikelihood
Note
----
For more information on Continuous negative binomial likelihood function:
- <NAME> Smyth, Biostatistics 2007
Stability to high/low input values has been tested manually but there are no theoretical guarantees
"""
#print (mu)
psi = r/(mu)
psi_min = 1. / psi
lggamma_fun_ratio = loggamma(y + psi_min) - loggamma(psi_min) - loggamma(y + 1)
log1mupsi = np.log(1 + mu * psi)
lgf1 = - psi_min * log1mupsi
lgf2 = y * (np.log(mu) - log1mupsi + np.log(psi))
return -np.sum(lggamma_fun_ratio + lgf1 + lgf2)
def split_list(lista: List, split_size: Tuple[int, int]) -> Iterator[Sequence[Any]]:
"""Split a list in two groups of defined size in all possible permutations of combinations
Args
----
lista: list
list ot be split
split_size: Tuple[int, int]
a tuple of two integers , their sum neews to be len(lista)
Return
------
combinations: Itarator[Tuple[List, List]]
iterators of the possible splits for example ((1,2,3), (4,5)), ((1,2,4), (3,5)), ...
"""
for i in combinations(lista, split_size[0]):
left = tuple(set(lista) - set(i))
for j in combinations(left, split_size[1]):
yield i, j
def bool_from_interval(intervals_ixs: List[int], boundaries: np.ndarray, simmetry: bool=True) -> np.ndarray:
'''Given interval to include and boundaries returns an array that can be used for bool indexing.
Args
----
intervals_ixs: list
A list of integers of which interval include, for example if intervals_ixs = [0,3] you want to include only
data with ix so that boundaries[0] <= ix < boundaries[1] & boundaries[3] <= ix < boundaries[4]
boundaries: np.ndarray
an array indicating the borders of the boundaries
simmetry: bool
if True will adapt the result to the simmetery constrained problem
Returns
-------
bool_filter: np.ndarray of bool
a boolean array that can be used for filtering
'''
inboundary_i = np.digitize(np.arange(max(boundaries)), boundaries) - 1
bool_filter = np.in1d(inboundary_i, intervals_ixs)
if simmetry:
bool_filter = np.hstack([bool_filter, bool_filter])
return bool_filter
def cross_validate(A: np.ndarray, b: np.ndarray, mask: np.ndarray, boundaries: np.ndarray, alpha_beta_grid: List[List[float]],
score_f: Callable, reconstructor_class: Callable) -> List[List[float]]:
"""Slow but exhaustive crossvalidation by naive grid search and no optimization warmstart
Args
----
A: np.ndarray
the design matrix (as returned by a variant of tomography.prepare_regression function)
b: np.ndarray
the observation vector (as returned by a variant of tomography.prepare_regression function)
mask: np.ndarray
grayscale mask
boundaries: np.ndarray
array constaining the borders of the intervals of b corresponding to different projections (starting from 0)
alpha_beta_grid : List[List[float]]
a list of list containing the alpha, beta values to try
score_f: Callable
function taking two arguments (b_test, b_train) and returing the score to be calulated
reconstructor_class: class default(ReconstructorFast)
should be either Reconstructor or ReconstructorFast Note: class not instance
Returns
-------
all_scores: List[List[float]]
the result of calling score_f for every possible split for every element of the grid
"""
b1 = b / b.max() # do this normalization in case b was not already normalized
# it makes sure we are working with the same scale for all the splits
all_scores = [] # typle: List
for alpha, beta in alpha_beta_grid:
scores = []
print("alpha: %s beta: %s" % (alpha, beta))
for (train_list, test_list) in split_list(list(range(5)), (4, 1)):
trainset_bool = bool_from_interval(train_list, boundaries)
testset_bool = bool_from_interval(test_list, boundaries)
A_train, b_train = A[trainset_bool, :], b1[trainset_bool]
A_test, b_test = A[testset_bool, :], b1[testset_bool]
reconstructor = reconstructor_class(alpha=alpha, beta=beta, mask=(mask > 0.2).astype(int))
result = np.array(reconstructor.fit(b_train, A_train).x.value).flat[:]
scores.append(score_f(b_test, A_test.dot(result)))
all_scores.append(scores)
return all_scores | StarcoderdataPython |
94458 | <reponame>star2dust/MOSEK-MATLAB
"""
This setup.py file is a mish-mash of hacks that should make it work
under most circumstances. The core problems of the installation
process are
- Installation across different platforms
- Installation in user-defined locations
- Handling shared library dependencies
The extension modules depend on a set of dynamic libraries, namely
MOSEK and Cilk. These must be placed so they can be seen from the
extension module binaries.
==Windows==
On windows, a `A.dll` can locate `B.dll` if
- `B.dll` is in the `PATH`, or
- `B.dll` is in the current working directory
==OS X==
On OS X a program `P` that links to `A.dylib`, which in turn links to `B.dylib` can load `B.dylib` if
- `P` defines an RPATH and `A.dylib` is linked with `B.dylib` as `@rpath/B.dylib`, or
- `A.dylib` is linked with an absolute path to `B.dylib`.
==Linux==
On Linux a program `P` that links to `A.so`, which in turn links to `B.so` can load `B.so` if
- `P` defines an RPATH where `B.so` is located, including a relative path using `$ORIGIN`, or
- `B.so` is in the `LD_LIBRARY_PATH`.
"""
from distutils.core import setup
from distutils.core import Extension
import distutils.command.build_ext
import distutils.command.install
from distutils import log
import platform,sys,shutil
import os,os.path
import subprocess
import ctypes
import sysconfig
class InstallationError(Exception): pass
major,minor,_,_,_ = sys.version_info
if major != 3:
print ("Python 3.0+ required, got %d.%d" % (major,minor))
instdir = os.path.abspath(os.path.join(__file__,'..'))
mosekinstdir = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','bin'))
ver = ('9','0')
libd = {
'osx64x86' : [ 'libcilkrts.5.dylib', 'libmosek64.%s.%s.dylib'%ver, ],
'linux64x86' : [ 'libcilkrts.so.5', 'libmosek64.so.%s.%s' %ver, ],
'win64x86' : [ 'cilkrts20.dll', 'mosek64_%s_%s.dll' %ver, ],
'win32x86' : [ 'cilkrts20.dll', 'mosek%s_%s.dll' %ver, ]
}
sysid = None
if platform.system() == 'Darwin': sysid = 'osx64x86'
elif platform.system() == 'Linux':
if ctypes.sizeof(ctypes.c_void_p) == 8: sysid = 'linux64x86'
else: sysid = 'linux32x86'
elif platform.system() == 'Windows':
if ctypes.sizeof(ctypes.c_void_p) == 8: sysid = 'win64x86'
else: sysid = 'win32x86'
if sysid is None:
raise InstallationError("Unsupported system")
def osxfixlibs(libfile, libs, prefix=''):
"""
Replace path in the dynamic library reference in the DYLIB
`libfile` for all libraries listed in `libs` (disregarding their
current paths in the `libfile`).
To see current library references, use `otool -L LIBNAME`.
Example: If `prefix` is `@rpath/` the DYLIB `libfile` contains a
reference to `/Users/jdoe/libs/libtest.dylib`, and `libtest.dylib`
is listed in libs, the reference will be changed to
`@rpath/libtest.dylib`.
"""
L = [ l.strip().split(' ')[0] for l in subprocess.check_output(['otool','-L',libfile]).decode('utf-8').split('\n')[1:] ]
d = { os.path.basename(f) : f for f in L }
args = []
for l in libs:
if l in d:
args.extend([ '-change', d[l],prefix+l])
if len(args) > 0:
cmd = [ 'install_name_tool' ]
cmd.extend(args)
cmd.append(libfile)
subprocess.call(cmd)
def patchelf(libfile,rpath):
"""
Replace the `RPATH` entry in the SharedObject `libfile`.
"""
subprocess.call(['patchelf','--set-rpath',rpath,libfile])
class install(distutils.command.install.install):
"""
Extend the default install command, adding an additional operation
that installs the dynamic MOSEK libraries.
"""
def install_libs(self):
mskdir = os.path.join(self.install_lib,'mosek')
with open(os.path.join(mskdir,'mosekorigin.py'),'wt') as f:
f.write('__mosekinstpath__ = {0}\n'.format(repr(mosekinstdir)))
if platform.system() == 'Darwin':
for dirpath,dirnames,filenames in os.walk(self.install_lib):
for fname in filenames:
if fname.endswith('.so'):
osxfixlibs(os.path.join(dirpath,fname),['libmosek64.%s.%s.dylib'%ver,'libcilkrts.5.dylib'], mskdir + '/')
for lib in libd[sysid]:
log.info("copying %s -> %s/" % (os.path.join(mosekinstdir,lib),mskdir))
shutil.copy(os.path.join(mosekinstdir,lib),mskdir)
osxfixlibs(os.path.join(mskdir,lib),['libmosek64.%s.%s.dylib'%ver,'libcilkrts.5.dylib'], mskdir + '/')
elif platform.system() == 'Linux':
#for dirpath,dirnames,filenames in os.walk(self.install_lib):
# for fname in filenames:
# if fname.endswith('.so'):
# patchelf(os.path.join(dirpath,fname),rpath=os.path.join('$ORIGIN',os.path.relpath(mskdir,dirpath)))
for lib in libd[sysid]:
log.info("copying %s -> %s/" % (os.path.join(mosekinstdir,lib),mskdir))
shutil.copy(os.path.join(mosekinstdir,lib),mskdir)
with open(os.path.join(mskdir,'_mskpreload.py'),'wt') as f:
f.write('import ctypes,os.path\n')
f.write('DLLS = map(ctypes.CDLL,[ \n\t%s ])\n' % ',\n\t'.join([ "os.path.join(os.path.dirname(__file__),'%s')" % l for l in libd[sysid] ]))
else:
for lib in libd[sysid]:
log.info("copying %s -> %s/" % (os.path.join(mosekinstdir,lib),mskdir))
shutil.copy(os.path.join(mosekinstdir,lib),mskdir)
def install_preloader(self):
mskdir = os.path.join(self.install_lib,'mosek')
log.info("writing %s" % os.path.join(mskdir,'_mskpreload.py'))
with open(os.path.join(mskdir,'_mskpreload.py'),'wt') as f:
f.write('import ctypes,os.path\n')
for lib in libd[sysid]:
f.write('ctypes.CDLL(os.path.join(os.path.dirname(__file__),"%s"))\n' % lib)
def run(self):
distutils.command.install.install.run(self)
self.execute(self.install_libs, (), msg="Fixing library paths")
self.execute(self.install_preloader, (), msg="Installing preloader module")
class build_ext(distutils.command.build_ext.build_ext):
"""
Extend the default `build_ext` command replacing the extension
building functionality with one that simply copies a pre-built
extension module.
"""
def build_extension(self,ext):
tgtdir = os.path.join(self.build_lib,*ext.name.split('.')[:-1])
try: os.makedirs(tgtdir)
except OSError: pass
for s in ext.sources:
log.info("copying %s -> %s" % (s,tgtdir))
shutil.copy(s,tgtdir)
pyextsuffix = sysconfig.get_config_var('SO' if platform.system() == 'Windows' else "SHLIB_SUFFIX")
if platform.system() == 'Windows':
# hack because Anaconda forgot to include python3.lib for python3.5
pfname = 'win32' if sysid == 'win32x86' else 'win_amd64'
msksources = [
'mosek/_msk.cp36-'+pfname+pyextsuffix,
'mosek/_msk.cp37-'+pfname+pyextsuffix ]
fragmentssources = [
'mosek/fusion/impl/fragments.cp36-'+pfname+pyextsuffix,
'mosek/fusion/impl/fragments.cp37-'+pfname+pyextsuffix ]
else:
msksources = ['mosek/_msk.abi3'+pyextsuffix]
fragmentssources = ['mosek/fusion/impl/fragments.abi3'+pyextsuffix ]
def runsetup():
setup( name = 'Mosek',
version = '9.0.86',
#install_requires = ['numpy'],
packages = [ 'mosek', 'mosek.fusion','mosek.fusion.impl' ],
ext_modules = [ Extension('mosek._msk',
sources = msksources),
Extension('mosek.fusion.impl.fragments',
sources = fragmentssources) ],
cmdclass = { 'install' : install,
'build_ext' : build_ext },
description = 'Mosek/Python APIs',
long_description = 'Interface for MOSEK',
author = '<NAME>',
author_email = "<EMAIL>",
license = "See license.pdf in the MOSEK distribution",
url = 'http://www.mosek.com',
)
if __name__ == '__main__':
os.chdir(os.path.abspath(os.path.dirname(__file__)))
runsetup()
| StarcoderdataPython |
3230636 | from django.apps import AppConfig
class PollConfig(AppConfig):
name = "polls"
| StarcoderdataPython |
3263118 | #!/usr/bin/env python3
import sys
import numpy as np
for fname in sys.argv[1:]:
with open(fname) as f:
lines = f.readlines()
ls = (l.strip() for l in lines)
ls = [int(l, 16) for l in lines]
d = np.diff(ls)
print(fname.split('.')[0], np.median(d), sep='\t')
| StarcoderdataPython |
1798232 | from osrsmath.general.skills import *
import unittest
class TestExperience(unittest.TestCase):
def test_experience_for_levels_below_1_raises(self):
self.assertRaises(ValueError, lambda:experience(0))
self.assertRaises(ValueError, lambda:experience(-3))
def test_experience_for_levels_above_level_cap_with_no_flag_raises(self):
self.assertRaises(ValueError, lambda:experience(100, virtual_levels=False))
self.assertRaises(ValueError, lambda:experience(112, virtual_levels=False))
def test_experience_for_levels_above_virtual_cap_raises(self):
self.assertRaises(ValueError, lambda:experience(127))
self.assertRaises(ValueError, lambda:experience(140))
def test_experience_for_levels_below_level_cap(self):
self.assertEqual(experience(85), 3_258_594)
self.assertEqual(experience(34), 20_224)
def test_experience_for_levels_above_virtual_cap_with_flag(self):
self.assertEqual(experience(100, virtual_levels=True), 14_391_160)
self.assertEqual(experience(112, virtual_levels=True), 47_221_641)
class TestLevel(unittest.TestCase):
def test_experience_below_zero_raises(self):
self.assertRaises(ValueError, lambda:level(-1))
def test_experience_of_zero_is_lowest_level(self):
self.assertEqual(level(0), 1)
def test_experience_above_level_cap_returns_max_level_without_flag(self):
self.assertEqual(level(14_000_000, virtual_levels=False), 99)
self.assertEqual(level(200_000_000, virtual_levels=False), 99)
def test_experience_above_level_cap_with_flag(self):
self.assertEqual(level(14_000_000, virtual_levels=True), 99)
self.assertEqual(level(112_000_000, virtual_levels=True), 120)
self.assertEqual(level(200_000_000, virtual_levels=True), 126)
def test_experience_above_maximum_experience_raises(self):
self.assertRaises(ValueError, lambda:level(200_000_001))
self.assertRaises(ValueError, lambda:level(252_532_523))
def test_experience_within_bounds(self):
self.assertEqual(level(40_000), 40)
self.assertEqual(level(700_000), 69)
self.assertEqual(level(9_000_000), 95)
def test_invertability(self):
small_experience = 1
for l in range(1, 99+1):
with self.subTest(level=l):
self.assertEqual(level(experience(l)), l)
def test_experience_just_over_level_same_level(self):
small_experience = 1
for l in range(1, 99+1):
with self.subTest(level=l):
self.assertEqual(level(experience(l) + small_experience), l)
def test_experience_just_under_level_is_previous_level(self):
small_experience = 1
for l in range(2, 99+1):
with self.subTest(level=l):
if l == 1:
self.assertRaises(ValueError, lambda:level(experience(l) - small_experience))
else:
self.assertEqual(level(experience(l) - small_experience), l - 1)
| StarcoderdataPython |
1790007 | ## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
# Lithium_Ion_LiNiMnCoO2_18650.py
#
# Created: Feb 2020, <NAME>
# Modified: Sep 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units , Data
from .Lithium_Ion import Lithium_Ion
from SUAVE.Methods.Power.Battery.Cell_Cycle_Models.LiNiMnCoO2_cell_cycle_model import compute_NMC_cell_state_variables
from SUAVE.Methods.Power.Battery.compute_net_generated_battery_heat import compute_net_generated_battery_heat
import numpy as np
import os
from scipy.integrate import cumtrapz
from scipy.interpolate import RegularGridInterpolator
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
class Lithium_Ion_LiNiMnCoO2_18650(Lithium_Ion):
""" Specifies discharge/specific energy characteristics specific
18650 lithium-nickel-manganese-cobalt-oxide battery cells
Assumptions:
Convective Thermal Conductivity Coefficient corresponds to forced
air cooling in 35 m/s air
Source:
Automotive Industrial Systems Company of Panasonic Group, Technical Information of
NCR18650G, URL https://www.imrbatteries.com/content/panasonic_ncr18650g.pdf
convective heat transfer coefficient, h
Jeon, Dong Hyup, and Seung Man Baek. "Thermal modeling of cylindrical
lithium ion battery during discharge cycle." Energy Conversion and Management
52.8-9 (2011): 2973-2981.
thermal conductivity, k
Yang, Shuting, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
specific heat capacity, Cp
(axial and radial)
<NAME>, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
# Electrode Area
Muenzel, Valentin, et al. "A comparative testing study of commercial
18650-format lithium-ion battery cells." Journal of The Electrochemical
Society 162.8 (2015): A1592.
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
def __defaults__(self):
self.tag = 'Lithium_Ion_LiNiMnCoO2_Cell'
self.cell.diameter = 0.0185 # [m]
self.cell.height = 0.0653 # [m]
self.cell.mass = 0.048 * Units.kg # [kg]
self.cell.surface_area = (np.pi*self.cell.height*self.cell.diameter) + (0.5*np.pi*self.cell.diameter**2) # [m^2]
self.cell.volume = np.pi*(0.5*self.cell.diameter)**2*self.cell.height
self.cell.density = self.cell.mass/self.cell.volume # [kg/m^3]
self.cell.electrode_area = 0.0342 # [m^2]
self.cell.max_voltage = 4.2 # [V]
self.cell.nominal_capacity = 3.55 # [Amp-Hrs]
self.cell.nominal_voltage = 3.6 # [V]
self.cell.charging_voltage = self.cell.nominal_voltage # [V]
self.watt_hour_rating = self.cell.nominal_capacity * self.cell.nominal_voltage # [Watt-hours]
self.specific_energy = self.watt_hour_rating*Units.Wh/self.cell.mass # [J/kg]
self.specific_power = self.specific_energy/self.cell.nominal_capacity # [W/kg]
self.resistance = 0.025 # [Ohms]
self.specific_heat_capacity = 1108 # [J/kgK]
self.cell.specific_heat_capacity = 1108 # [J/kgK]
self.cell.radial_thermal_conductivity = 0.4 # [J/kgK]
self.cell.axial_thermal_conductivity = 32.2 # [J/kgK] # estimated
battery_raw_data = load_battery_results()
self.discharge_performance_map = create_discharge_performance_map(battery_raw_data)
return
def energy_calc(self,numerics,battery_discharge_flag = True ):
'''This is an electric cycle model for 18650 lithium-nickel-manganese-cobalt-oxide
battery cells. The model uses experimental data performed
by the Automotive Industrial Systems Company of Panasonic Group
Sources:
Internal Resistance Model:
<NAME>., <NAME>., <NAME>., and <NAME>., "Combined State of Charge and State of
Health estimation over lithium-ion battery cellcycle lifespan for electric
vehicles,"Journal of Power Sources, Vol. 273, 2015, pp. 793-803.
doi:10.1016/j.jpowsour.2014.09.146,URLhttp://dx.doi.org/10.1016/j.jpowsour.2014.09.146.
Battery Heat Generation Model and Entropy Model:
Jeon, <NAME>, and <NAME>. "Thermal modeling of cylindrical lithium ion
battery during discharge cycle." Energy Conversion and Management 52.8-9 (2011):
2973-2981.
Assumtions:
1) All battery modules exhibit the same themal behaviour.
Inputs:
battery.
I_bat (max_energy) [Joules]
cell_mass (battery cell mass) [kilograms]
Cp (battery cell specific heat capacity) [J/(K kg)]
t (battery age in days) [days]
T_ambient (ambient temperature) [Kelvin]
T_current (pack temperature) [Kelvin]
T_cell (battery cell temperature) [Kelvin]
E_max (max energy) [Joules]
E_current (current energy) [Joules]
Q_prior (charge throughput) [Amp-hrs]
R_growth_factor (internal resistance growth factor) [unitless]
inputs.
I_bat (current) [amps]
P_bat (power) [Watts]
Outputs:
battery.
current_energy [Joules]
cell_temperature [Kelvin]
resistive_losses [Watts]
load_power [Watts]
current [Amps]
battery_voltage_open_circuit [Volts]
cell_charge_throughput [Amp-hrs]
internal_resistance [Ohms]
battery_state_of_charge [unitless]
depth_of_discharge [unitless]
battery_voltage_under_load [Volts]
'''
# Unpack varibles
battery = self
I_bat = battery.inputs.current
P_bat = battery.inputs.power_in
electrode_area = battery.cell.electrode_area
As_cell = battery.cell.surface_area
T_current = battery.pack_temperature
T_cell = battery.cell_temperature
E_max = battery.max_energy
E_current = battery.current_energy
Q_prior = battery.cell_charge_throughput
battery_data = battery.discharge_performance_map
I = numerics.time.integrate
D = numerics.time.differentiate
# ---------------------------------------------------------------------------------
# Compute battery electrical properties
# ---------------------------------------------------------------------------------
# Calculate the current going into one cell
n_series = battery.pack_config.series
n_parallel = battery.pack_config.parallel
n_total = battery.pack_config.total
Nn = battery.module_config.normal_count
Np = battery.module_config.parallel_count
n_total_module = Nn*Np
if battery_discharge_flag:
I_cell = I_bat/n_parallel
else:
I_cell = -I_bat/n_parallel
# State of charge of the battery
initial_discharge_state = np.dot(I,P_bat) + E_current[0]
SOC_old = np.divide(initial_discharge_state,E_max)
# Make sure things do not break by limiting current, temperature and current
SOC_old[SOC_old < 0.] = 0.
SOC_old[SOC_old > 1.] = 1.
T_cell[T_cell<272.65] = 272.65
T_cell[T_cell>322.65] = 322.65
battery.cell_temperature = T_cell
battery.pack_temperature = T_cell
# ---------------------------------------------------------------------------------
# Compute battery cell temperature
# ---------------------------------------------------------------------------------
# Determine temperature increase
sigma = 139 # Electrical conductivity
n = 1
F = 96485 # C/mol Faraday constant
delta_S = -496.66*(SOC_old)**6 + 1729.4*(SOC_old)**5 + -2278 *(SOC_old)**4 + 1382.2 *(SOC_old)**3 + \
-380.47*(SOC_old)**2 + 46.508*(SOC_old) + -10.692
i_cell = I_cell/electrode_area # current intensity
q_dot_entropy = -(T_cell)*delta_S*i_cell/(n*F)
q_dot_joule = (i_cell**2)/sigma
Q_heat_gen = (q_dot_joule + q_dot_entropy)*As_cell
q_joule_frac = q_dot_joule/(q_dot_joule + q_dot_entropy)
q_entropy_frac = q_dot_entropy/(q_dot_joule + q_dot_entropy)
# Compute cell temperature
T_current = compute_net_generated_battery_heat(n_total,battery,Q_heat_gen,numerics)
# Power going into the battery accounting for resistance losses
P_loss = n_total*Q_heat_gen
P = P_bat - np.abs(P_loss)
# Compute State Variables
V_ul = compute_NMC_cell_state_variables(battery_data,SOC_old,T_cell,I_cell)
# Li-ion battery interal resistance
R_0 = 0.01483*(SOC_old**2) - 0.02518*SOC_old + 0.1036
# Voltage under load:
V_oc = V_ul + (I_cell * R_0)
# ---------------------------------------------------------------------------------
# Compute updates state of battery
# ---------------------------------------------------------------------------------
# Possible Energy going into the battery:
energy_unmodified = np.dot(I,P)
# Available capacity
capacity_available = E_max - battery.current_energy[0]
# How much energy the battery could be overcharged by
delta = energy_unmodified -capacity_available
delta[delta<0.] = 0.
# Power that shouldn't go in
ddelta = np.dot(D,delta)
# Power actually going into the battery
P[P>0.] = P[P>0.] - ddelta[P>0.]
E_bat = np.dot(I,P)
E_bat = np.reshape(E_bat,np.shape(E_current)) #make sure it's consistent
# Add this to the current state
if np.isnan(E_bat).any():
E_bat=np.ones_like(E_bat)*np.max(E_bat)
if np.isnan(E_bat.any()): #all nans; handle this instance
E_bat=np.zeros_like(E_bat)
E_current = E_bat + E_current[0]
# Determine new State of Charge
SOC_new = np.divide(E_current, E_max)
SOC_new[SOC_new<0] = 0.
SOC_new[SOC_new>1] = 1.
DOD_new = 1 - SOC_new
# Determine new charge throughput (the amount of charge gone through the battery)
Q_total = np.atleast_2d(np.hstack(( Q_prior[0] , Q_prior[0] + cumtrapz(I_cell[:,0], x = numerics.time.control_points[:,0])/Units.hr ))).T
# If SOC is negative, voltage under load goes to zero
V_ul[SOC_new < 0.] = 0.
# Pack outputs
battery.current_energy = E_current
battery.cell_temperature = T_current
battery.pack_temperature = T_current
battery.cell_joule_heat_fraction = q_joule_frac
battery.cell_entropy_heat_fraction = q_entropy_frac
battery.resistive_losses = P_loss
battery.load_power = V_ul*n_series*I_bat
battery.current = I_bat
battery.voltage_open_circuit = V_oc*n_series
battery.cell_voltage_open_circuit = V_oc
battery.cell_current = I_cell
battery.cell_charge_throughput = Q_total
battery.heat_energy_generated = Q_heat_gen*n_total_module
battery.internal_resistance = R_0*n_series
battery.state_of_charge = SOC_new
battery.depth_of_discharge = DOD_new
battery.voltage_under_load = V_ul*n_series
battery.cell_voltage_under_load = V_ul
return battery
def append_battery_unknowns(self,segment):
""" Appends unknowns specific to NMC cells which are unpacked from the mission solver and send to the network.
Assumptions:
None
Source:
N/A
Inputs:
segment.state.unknowns.battery_cell_temperature [Kelvin]
segment.state.unknowns.battery_state_of_charge [unitless]
segment.state.unknowns.battery_current [Amperes]
Outputs:
segment.state.conditions.propulsion.battery_cell_temperature [Kelvin]
segment.state.conditions.propulsion.battery_state_of_charge [unitless]
segment.state.conditions.propulsion.battery_current [Amperes]
Properties Used:
N/A
"""
propulsion = segment.state.conditions.propulsion
propulsion.battery_cell_temperature[1:,:] = segment.state.unknowns.battery_cell_temperature[1:,:]
propulsion.battery_state_of_charge[1:,0] = segment.state.unknowns.battery_state_of_charge[:,0]
propulsion.battery_current = segment.state.unknowns.battery_current
return
def append_battery_residuals(self,segment,network):
""" Packs the residuals specific to NMC cells to be sent to the mission solver.
Assumptions:
None
Source:
N/A
Inputs:
segment.state.conditions.propulsion:
battery_state_of_charge [unitless]
battery_cell_temperature [Kelvin]
battery_current [Amperes]
segment.state.unknowns.
battery_state_of_charge [unitless]
battery_cell_temperature [Kelvin]
battery_current [Amperes]
Outputs:
None
Properties Used:
None
"""
SOC_actual = segment.state.conditions.propulsion.battery_state_of_charge
SOC_predict = segment.state.unknowns.battery_state_of_charge
Temp_actual = segment.state.conditions.propulsion.battery_cell_temperature
Temp_predict = segment.state.unknowns.battery_cell_temperature
i_actual = segment.state.conditions.propulsion.battery_current
i_predict = segment.state.unknowns.battery_current
# Return the residuals
segment.state.residuals.network.SOC = SOC_predict - SOC_actual[1:,:]
segment.state.residuals.network.temperature = Temp_predict - Temp_actual
segment.state.residuals.network.current = i_predict - i_actual
return
def append_battery_unknowns_and_residuals_to_segment(self,segment,initial_voltage,
initial_battery_cell_temperature , initial_battery_state_of_charge,
initial_battery_cell_current):
""" Sets up the information that the mission needs to run a mission segment using this network
Assumptions:
None
Source:
N/A
Inputs:
initial_voltage [volts]
initial_battery_cell_temperature [Kelvin]
initial_battery_state_of_charge [unitless]
initial_battery_cell_current [Amperes]
Outputs
None
Properties Used:
N/A
"""
# setup the state
ones_row = segment.state.unknowns.ones_row
ones_row_m1 = segment.state.unknowns.ones_row_m1
parallel = self.pack_config.parallel
segment.state.unknowns.battery_state_of_charge = initial_battery_state_of_charge * ones_row_m1(1)
segment.state.unknowns.battery_cell_temperature = initial_battery_cell_temperature * ones_row(1)
segment.state.unknowns.battery_current = initial_battery_cell_current*parallel * ones_row(1)
return
def compute_voltage(self,state):
""" Computes the voltage of a single NMC cell or a battery pack of NMC cells
Assumptions:
None
Source:
N/A
Inputs:
self - battery data structure [unitless]
state - segment unknowns to define voltage [unitless]
Outputs
V_ul - under-load voltage [volts]
Properties Used:
N/A
"""
# Unpack battery properties
battery = self
battery_data = battery.discharge_performance_map
n_series = battery.pack_config.series
n_parallel = battery.pack_config.parallel
# Unpack segment state properties
SOC = state.conditions.propulsion.battery_state_of_charge
T_cell = state.conditions.propulsion.battery_cell_temperature
I_cell = state.conditions.propulsion.battery_current/n_parallel
# Link Temperature and update
battery.cell_temperature = T_cell
# Compute State Variables
V_ul_cell = compute_NMC_cell_state_variables(battery_data,SOC,T_cell,I_cell)
# Voltage under load
V_ul = n_series*V_ul_cell
return V_ul
def update_battery_state_of_health(self,segment,increment_battery_cycle_day = False):
""" This is an aging model for 18650 lithium-nickel-manganese-cobalt-oxide batteries.
Source:
Schmalstieg, Johannes, et al. "A holistic aging model for Li (NiMnCo) O2
based 18650 lithium-ion batteries." Journal of Power Sources 257 (2014): 325-334.
Assumptions:
None
Inputs:
segment.conditions.propulsion.
battery_cycle_day [unitless]
battery_cell_temperature [Kelvin]
battery_voltage_open_circuit [Volts]
battery_charge_throughput [Amp-hrs]
battery_state_of_charge [unitless]
Outputs:
segment.conditions.propulsion.
battery_capacity_fade_factor (internal resistance growth factor) [unitless]
battery_resistance_growth_factor (capactance (energy) growth factor) [unitless]
Properties Used:
N/A
"""
n_series = self.pack_config.series
SOC = segment.conditions.propulsion.battery_state_of_charge
V_ul = segment.conditions.propulsion.battery_voltage_under_load/n_series
t = segment.conditions.propulsion.battery_cycle_day
Q_prior = segment.conditions.propulsion.battery_cell_charge_throughput[-1,0]
Temp = np.mean(segment.conditions.propulsion.battery_cell_temperature)
# aging model
delta_DOD = abs(SOC[0][0] - SOC[-1][0])
rms_V_ul = np.sqrt(np.mean(V_ul**2))
alpha_cap = (7.542*np.mean(V_ul) - 23.75) * 1E6 * np.exp(-6976/(Temp))
alpha_res = (5.270*np.mean(V_ul) - 16.32) * 1E5 * np.exp(-5986/(Temp))
beta_cap = 7.348E-3 * (rms_V_ul - 3.667)**2 + 7.60E-4 + 4.081E-3*delta_DOD
beta_res = 2.153E-4 * (rms_V_ul - 3.725)**2 - 1.521E-5 + 2.798E-4*delta_DOD
E_fade_factor = 1 - alpha_cap*(t**0.75) - beta_cap*np.sqrt(Q_prior)
R_growth_factor = 1 + alpha_res*(t**0.75) + beta_res*Q_prior
segment.conditions.propulsion.battery_capacity_fade_factor = np.minimum(E_fade_factor,segment.conditions.propulsion.battery_capacity_fade_factor)
segment.conditions.propulsion.battery_resistance_growth_factor = np.maximum(R_growth_factor,segment.conditions.propulsion.battery_resistance_growth_factor)
if increment_battery_cycle_day:
segment.conditions.propulsion.battery_cycle_day += 1 # update battery age by one day
return
def create_discharge_performance_map(battery_raw_data):
""" Creates discharge and charge response surface for
LiNiMnCoO2 battery cells
Source:
N/A
Assumptions:
N/A
Inputs:
Outputs:
battery_data
Properties Used:
N/A
"""
# Process raw data
processed_data = process_raw_data(battery_raw_data)
# Create performance maps
battery_data = create_response_surface(processed_data)
return battery_data
def create_response_surface(processed_data):
battery_map = Data()
amps = np.linspace(0, 8, 5)
temp = np.linspace(0, 50, 6) + 272.65
SOC = np.linspace(0, 1, 15)
battery_map.Voltage = RegularGridInterpolator((amps, temp, SOC), processed_data.Voltage)
battery_map.Temperature = RegularGridInterpolator((amps, temp, SOC), processed_data.Temperature)
return battery_map
def process_raw_data(raw_data):
""" Takes raw data and formats voltage as a function of SOC, current and temperature
Source
N/A
Assumptions:
N/A
Inputs:
raw_Data
Outputs:
procesed_data
Properties Used:
N/A
"""
processed_data = Data()
processed_data.Voltage = np.zeros((5,6,15,2)) # current , operating temperature , SOC vs voltage
processed_data.Temperature = np.zeros((5,6,15,2)) # current , operating temperature , SOC vs temperature
# Reshape Data
raw_data.Voltage
for i, Amps in enumerate(raw_data.Voltage):
for j , Deg in enumerate(Amps):
min_x = 0
max_x = max(Deg[:,0])
x = np.linspace(min_x,max_x,15)
y = np.interp(x,Deg[:,0],Deg[:,1])
vec = np.zeros((15,2))
vec[:,0] = x/max_x
vec[:,1] = y
processed_data.Voltage[i,j,:,:]= vec
for i, Amps in enumerate(raw_data.Temperature):
for j , Deg in enumerate(Amps):
min_x = 0
max_x = max(Deg[:,0])
x = np.linspace(min_x,max_x,15)
y = np.interp(x,Deg[:,0],Deg[:,1])
vec = np.zeros((15,2))
vec[:,0] = x/max_x
vec[:,1] = y
processed_data.Temperature[i,j,:,:]= vec
return processed_data
def load_battery_results():
'''Load experimental raw data of NMC cells
Source:
Automotive Industrial Systems Company of Panasonic Group, Technical Information of
NCR18650G, URL https://www.imrbatteries.com/content/panasonic_ncr18650g.pdf
Assumptions:
N/A
Inputs:
N/A
Outputs:
battery_data
Properties Used:
N/A
'''
ospath = os.path.abspath(__file__)
separator = os.path.sep
rel_path = os.path.dirname(ospath) + separator
return SUAVE.Input_Output.SUAVE.load(rel_path+ 'NMC_Raw_Data.res')
| StarcoderdataPython |
3387849 | <gh_stars>1-10
#!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 <NAME> & <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# <NAME>
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
try:
from Plugins import Plugin
except:
from PEATDB.Plugins import Plugin
import math, numpy, sys, os, copy
import csv
import matplotlib
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from PEATDB.Ekin.Fitting import Fitting
from PEATSA import Core
class PCAPlugin(Plugin):
"""PCA plugin"""
#capabilities = ['gui']
menuentry = 'PCA Plugin'
gui_methods = {}
about = 'This plugin allows you to do PCA'
def main(self, parent=None, DB=None):
if parent==None:
self.DB = DB
else:
return
def writeToFile(self, matrix, filename):
stream = open(filename, 'w')
stream.write(matrix.csvRepresentation())
stream.close()
return
def doPCA(self,m,standardize=True):
'''Performs pca on the Core.Matrix.Matrix instance m.
Returns:
eigenvalues - A numpy 1-D array.
eigenvectors - A numpy 2-D array
transformedData - A numpy 2-D array'''
print >>sys.stderr, 'Calculating mean vector'
data = numpy.array(m.matrix)
if standardize==True:
data = self.standardize(data)
average = numpy.zeros(m.numberOfColumns())
for row in data:
row = numpy.array(row, dtype=numpy.float)
average = average + row
average /= m.numberOfRows()
temp = zip(m.columnHeaders(), average)
print >>sys.stderr, 'Result: '
for el in temp:
print >>sys.stderr, '\t%s: %lf' % tuple(el)
print >>sys.stderr, '\nMean-Centering'
data = data - numpy.tile(average, [m.numberOfRows(),1])
print >>sys.stderr, 'Calculating covariance matrix'
cov = numpy.cov(data, rowvar=0)
print >>sys.stderr, 'Performing eigenvalue decomposition'
eigenvalues, eigenvectors = numpy.linalg.linalg.eig(cov)
eigenvectors = eigenvectors.astype(numpy.float32)
print >>sys.stderr, 'Sorting'
x = range(len(eigenvalues))
x.sort(lambda x,y: cmp(eigenvalues[x], eigenvalues[y]), reverse=True)
eigenvalues = eigenvalues[x]
eigenvectors = eigenvectors[:,x]
print >>sys.stderr, 'Complete'
z = numpy.dot(data, eigenvectors)
return eigenvalues, eigenvectors, z
def standardize(self, data):
"""standardize data"""
import scipy.stats as st
newdata = copy.deepcopy(data)
i=0
for col in zip(*data):
newdata[:,i] = st.zs(col)
i+=1
print newdata
return newdata
def plotResults(self,evals,evecs,b,m):
"""Plot results to help visualize components"""
data = numpy.array(m.matrix)
labels = m.columnHeaders()
plt.rc('font', family='monospace')
#plot first 2 PCs in 3d score plot
'''x,y,z = b[:,0], b[:,1], b[:,2]
f=plt.figure()
ax = Axes3D(f)
ax.scatter(x,y,zs=z,marker='o',lw=2,alpha=0.5,c='b',s=30)
ax.set_xlabel('PC1')
ax.set_ylabel('PC2')
ax.set_zlabel('PC3')'''
#f.subplots_adjust(hspace=1,wspace=1)
f=plt.figure()
i=1
length = len(data[0])
if length>6: length=6
for i in range(0,length):
ax=f.add_subplot(3,3,i+1)
c=0
lines=[]
for v in zip(*data):
c+=1
if c>10: break
#v = [float(j)/(max(v)-min(v)) for j in v]
l=ax.plot(b[:,i],v,'x',mew=1,alpha=0.2)
lines.append(l)
ax.set_title('Ev%s' %str(i+1))
i+=1
f.legend(lines,labels,loc='lower right')
ax=f.add_subplot(337)
ind=numpy.array(range(len(evals)))
ax.plot(ind,evals,'-o',lw=2)
ax.set_xlabel('Eigenvalues')
f.savefig('PCAresults.png')
f.subplots_adjust(hspace=0.4,wspace=0.4)
print 'Eigenvalues: ', evals
print 'Eigenvectors: ', evecs
plt.show()
return
def test(self):
features=['stab','act','solv','res','asa']
x = numpy.random.normal(2, 6, 500)
y = numpy.random.normal(4, 1, 500)
#y = [i+numpy.random.normal(2,0.3) for i in x]
z = [i+numpy.random.normal(2,0.24) for i in y]
#z = numpy.random.normal(4, 1, 500)
s = numpy.random.gamma(4, 1, 500)
t = numpy.random.gamma(4, 1, 500)
filename = 'testdata.csv'
f=open(filename,'w')
cw=csv.writer(f)
cw.writerow(features)
for i in zip(x,y,z,s,t):
cw.writerow(i)
f.close()
'''A,X = Fitting.doFit(expdata=zip(x,y), model='Linear',silent=True)
fitx = numpy.arange(min(x),max(x),1)
fity = X.getFitLine(fitx)
A,X1 = Fitting.doFit(expdata=zip(x,z), model='Linear',silent=True)
fitz = X.getFitLine(fitx)'''
f=plt.figure()
ax = Axes3D(f)
ax.scatter(x,y,zs=z,marker='o',lw=2,alpha=0.5,c='b',s=30)
#ax.plot(fitx,fity,zs=fitz,alpha=0.6,lw=2,c='r',label='fit xyz')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.legend()
f.subplots_adjust(hspace=1,wspace=1)
m = Core.Matrix.matrixFromCSVFile(filename)
evals, evecs, z = self.doPCA(m)
self.plotResults(evals, evecs, z, m)
return
def main():
import os
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="file",
help="Open a local db")
parser.add_option("-t", "--test", dest="test", action='store_true',
help="test func", default=False)
parser.add_option("-s", "--start", dest="start", default=0, type="int",
help="start")
parser.add_option("-e", "--end", dest="end", default=0, type="int",
help="end")
parser.add_option("-z", "--standardize", dest="standardize", action='store_true',
help="end", default=False)
opts, remainder = parser.parse_args()
P = PCAPlugin()
if opts.file != None and os.path.exists(opts.file):
r = Core.Matrix.matrixFromCSVFile(opts.file)
if opts.start != None:
m = r[:, opts.start:]
print 'There are %d samples and %d variables (dof)' % (m.numberOfRows(), m.numberOfColumns())
evals, eigenvectors, z = P.doPCA(m, opts.standardize)
P.plotResults(evals, eigenvectors, z, m)
#Write out vectors
ev = Core.Data.Matrix.Matrix(rows=list(eigenvectors))
ev.addColumn(m.columnHeaders(), 0)
headers = ['Variables']
for i in range(m.numberOfColumns()):
headers.append('Ev%d' % i)
ev.setColumnHeaders(headers)
P.writeToFile(ev, 'Eigenvectors.csv')
#Write out new basis
basis = Core.Matrix.Matrix(rows=list(z))
basis.addColumn(r.column(0), 0)
#basis.addColumn(r.column(1), 1)
headers.pop(0)
headers = r.columnHeaders()[:1] + tuple(headers)
basis.setColumnHeaders(headers)
P.writeToFile(basis, 'NewBasis.csv')
if opts.test == True:
P.test()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1612556 | import unittest
from query_tools.base_tokenizer import WikidataTokenizer, DBpediaTokenizer
from query_tools.base_query import WikidataQuery
from templates.wikidata_template import WikidataTemplate
class TestQuery(unittest.TestCase):
def testWikidataQuery(self):
query_1 = "SELECT DISTINCT ?uri WHERE { <http://www.wikidata.org/entity/Q4072104> <http://www.wikidata.org/prop/direct/P184> ?uri }"
compressed_query_1 = "SELECT DISTINCT ?uri WHERE { wd:Q4072104 wdt:P184 ?uri }"
q1 = WikidataQuery(query_1)
cq1 = WikidataQuery(compressed_query_1)
self.assertFalse(q1.is_compressed())
self.assertTrue(cq1.is_compressed())
self.assertEqual(q1.compress(), cq1.get_query())
self.assertEqual(cq1.decompress(), q1.get_query())
class TestTokenizer(unittest.TestCase):
def testWikidataTokenizer(self):
tokenizer = WikidataTokenizer()
q1 = WikidataQuery("SELECT DISTINCT ?uri WHERE { <http://www.wikidata.org/entity/Q4072104> <http://www.wikidata.org/prop/direct/P184> ?uri }")
cq1 = WikidataQuery("SELECT DISTINCT ?uri WHERE { wd:Q4072104 wdt:P184 ?uri }")
encoded_query_1 = "select distinct var_uri where brack_open wd_q4072104 wdt_p184 var_uri brack_close"
self.assertEqual(tokenizer.encode(q1), encoded_query_1)
self.assertEqual(tokenizer.encode(cq1), encoded_query_1)
decoded_query_1 = "select distinct ?uri where { wd:Q4072104 wdt:P184 ?uri }"
self.assertEqual(tokenizer.decode(encoded_query_1).get_query(), decoded_query_1)
encoded_query_2 = "select distinct var_uri where brack_open wd_q3025443 wdt_p86 var_uri brack_close"
decoded_query_2 = "select distinct ?uri where { wd:Q3025443 wdt:P86 ?uri }"
self.assertEqual(tokenizer.decode(encoded_query_2).get_query(), decoded_query_2)
query_3 = "SELECT ?value WHERE { <x> p:P2128 ?s . ?s ps:P2128 ?x filter(contains(?x,'162.0')) . ?s pq:P459 ?value}"
encoded_query_3 = "select var_value where brack_open placeholder_x p_p2128 var_s sep_dot var_s ps_p2128 var_x filter attr_open contains attr_open var_x sep_comma apstrph_162_dot_0_apstrph attr_close attr_close sep_dot var_s pq_p459 var_value brack_close"
decoded_query_3 = "select ?value where { <x> p:P2128 ?s . ?s ps:P2128 ?x filter ( contains ( ?x , '162.0' ) ) . ?s pq:P459 ?value }"
q3 = WikidataQuery(query_3)
self.assertEqual(encoded_query_3, tokenizer.encode(q3))
self.assertEqual(tokenizer.decode(encoded_query_3).get_query(), decoded_query_3)
query_string_4 = "ASK WHERE { wd:Q658 wdt:P1108 ?obj filter(?obj < 1.2) }"
query_4 = WikidataQuery(query_string_4)
encoded_query_4 = "ask where brack_open wd_q658 wdt_p1108 var_obj filter attr_open var_obj math_lt 1_dot_2 attr_close brack_close"
decoded_query_4 = "ask where { wd:Q658 wdt:P1108 ?obj filter ( ?obj < 1.2 ) }"
self.assertEqual(tokenizer.encode(query_4), encoded_query_4)
self.assertEqual(tokenizer.decode(encoded_query_4).get_query(), decoded_query_4)
def testWikidataTokenizerWithStringCases(self):
tokenizer = WikidataTokenizer()
query_string_5 = "SELECT DISTINCT ?sbj ?sbj_label WHERE { ?sbj wdt:P31 wd:Q427626 . ?sbj rdfs:label ?sbj_label . FILTER(CONTAINS(lcase(?sbj_label), 'variety')) . FILTER (lang(?sbj_label) = 'en') } LIMIT 25"
query_5 = WikidataQuery(query_string_5)
encoded_query_5 = "select distinct var_sbj var_sbj_label where brack_open var_sbj wdt_p31 wd_q427626 sep_dot var_sbj rdfs_label var_sbj_label sep_dot filter attr_open contains attr_open lcase attr_open var_sbj_label attr_close sep_comma apstrph_variety_apstrph attr_close attr_close sep_dot filter attr_open lang attr_open var_sbj_label attr_close math_eq apstrph_en_apstrph attr_close brack_close limit 25"
decoded_query_5 = "select distinct ?sbj ?sbj_label where { ?sbj wdt:P31 wd:Q427626 . ?sbj rdfs:label ?sbj_label . filter ( contains ( lcase ( ?sbj_label ) , 'variety' ) ) . filter ( lang ( ?sbj_label ) = 'en' ) } limit 25"
self.assertEqual(encoded_query_5, tokenizer.encode(query_5))
self.assertEqual(decoded_query_5, tokenizer.decode(encoded_query_5).get_query())
query_string_6 = WikidataTemplate(query_string_5).get_query_template(query_5)
query_6 = WikidataQuery(query_string_6)
encoded_query_6 = "select distinct var_sbj var_sbj_label where brack_open var_sbj wdt_p31 placeholder_obj_1 sep_dot var_sbj rdfs_label var_sbj_label sep_dot filter attr_open contains attr_open lcase attr_open var_sbj_label attr_close sep_comma placeholder_str_value attr_close attr_close sep_dot filter attr_open lang attr_open var_sbj_label attr_close math_eq apstrph_en_apstrph attr_close brack_close limit 25"
decoded_query_6 = "select distinct ?sbj ?sbj_label where { ?sbj wdt:P31 <obj_1> . ?sbj rdfs:label ?sbj_label . filter ( contains ( lcase ( ?sbj_label ) , <str_value> ) ) . filter ( lang ( ?sbj_label ) = 'en' ) } limit 25"
self.assertEqual(encoded_query_6, tokenizer.encode(query_6))
self.assertEqual(decoded_query_6, tokenizer.decode(encoded_query_6).get_query(), )
def testDBpediaTokenizer(self):
encoded_query = "SELECT DISTINCT var_uri where brack_open dbr_Mad_River_ attr_open California attr_close dbo_city var_uri brack_close"
encoded_query_2 = "ask where brack_open dbr_Island_Barn_Reservoir dbo_areaTotal var_a1 sep_dot dbr_Arab_League dbo_areaTotal var_a2 sep_dot filter attr_open var_a1math_gtvar_a2 attr_close brack_close"
encoded_query_3 = "SELECT DISTINCT COUNT attr_open var_uri attr_close where brack_open var_uri dbp_distributor dbr_Electronic_Arts brack_close"
encoded_query_4 = "SELECT DISTINCT var_uri where brack_open dbr_Up_All_Night_ attr_open One_Direction_album attr_close dbp_writer var_uri sep_dot dbr_Air_Guitar_ attr_open McBusted_song attr_close dbo_writer var_uri brack_close"
encoded_query_5 = "SELECT DISTINCT COUNT attr_open var_uri attr_close where brack_open var_x dbo_builder dbr_Department_of_Public_Works_and_Highways_ attr_open Philippines attr_close sep_dot var_x dbo_builder var_uri brack_close"
encoded_query_6 = "SELECT DISTINCT COUNT attr_open var_uri attr_close where brack_open var_x dbo_team dbr_İzmir_Büyükşehir_Belediyesi_GSK_ attr_open men's_ice_hockey attr_close sep_dot var_x dbo_formerTeam var_uri sep_dot var_uri a dbo_SportsTeam brack_close"
encoded_query_7 = "SELECT DISTINCT var_uri where brack_open var_x dbo_hometown dbr_Île-de-France_ attr_open region attr_close sep_dot var_x dbp_genre var_uri sep_dot var_x a dbo_Band brack_close"
encoded_query_8 = "SELECT DISTINCT var_uri where brack_open dbr_ZFS_ attr_open z/OS_file_system attr_close dbp_developer var_uri sep_dot dbr_Maqetta dbo_author var_uri brack_close"
encoded_query_9 = "select distinct var_uri where brack_open brack_open var_uri dbo_field dbr_Jazz sep_dot brack_close union brack_open var_uri dc:description var_s sep_dot filter regex attr_open var_s,'dbr_Jazz','i' attr_close brack_close var_uri dbo_award dbr_Academy_Awards sep_dot brack_close"
encoded_query_10 = "ASK where brack_open dbr_ attr_open 12538 attr_close _1998_OH dbo_discoverer dbr_Near_Earth_Asteroid_Tracking brack_close"
encoded_query_11 = "ask where brack_open dbr_Alexis_Denisof dbo_spouse var_spouse sep_dot var_spouse rdfs_label var_name sep_dot filter attr_open regex attr_open var_name,'dbo_Station' attr_close attr_close brack_close"
tokenizer = DBpediaTokenizer()
print(tokenizer.decode(encoded_query).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_2).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_3).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_4).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_5).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_6).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_7).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_8).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_9).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_10).get_query(add_prefixes=False))
print(tokenizer.decode(encoded_query_11).get_query())
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1634542 | <reponame>QuickBase/QB_APIs<gh_stars>1-10
##################### SCRIPT FOR UPDATING TABLES IN QB APPS WITH DATA FROM SQL SERVER #####################
################################## written by <NAME> ##################################
## INSTRUCTIONS
# Modify the variables in the next section as specified below. All variables need to be inside single quotation marks, except sql_query:
#table_id ID of the table in QB app where you want to insert the data. Example: 'bp3dm9xwf'
#token User token in QB assigned to the app where you want to insert the data. Example: '<KEY>'
#cols List of QB column IDs where the data needs to be send.
# Numbers need to be separated by commas and the column order should correspond to the column order in the SQL query.
# Example: [6, 7, 8, 9, 10] will send the first column from the SQL query to the column with ID 6 in QuickBase.
#sql_query SQL query text. The query needs to be preceded and followed by three single quotation marks. Example: '''SELECT * FROM MyTable'''
#realm_hostname QB realm hostname, for example 'mycompany.quickbase.com
#log_table_id ID of the table in QB app where you want to insert your log data (optional)
#log_cols List of QB column IDs where the log data needs to be send.(optional)
#LASTLY, please do a Ctrl+F to search for the "Replace below!" string so that you can see the Driver & Server values that you should replace with your own. Additionally, we provide you with "Option 2" to write your log data to a SQL table (instead of a QB table) and you will need a Database & Schema name if you want to go this route
## VARIABLES
table_id = ''
token = ''
cols = [6, 7, 8, 9, 10]
sql_query = ''' '''
realm_hostname = ''
log_table_id = ''
log_cols = [6, 7, 8, 9, 10]
### IMPORTING PACKAGES
import pandas as pd
import sqlalchemy as sqla
import numpy as np
import requests
import unicodedata
from datetime import datetime
import urllib
import json
### EXTRACT
conn_str = (
#Replace below!... input your SQL Server driver
r'Driver=ODBC Driver 13 for SQL Server;'
#Replace below!... input your server name
r'Server=YOUR_SERVER_HERE;'
r'Trusted_Connection=yes;'
#r'UID=username;'
#r'PWD=password'
)
quoted_conn_str = urllib.parse.quote_plus(conn_str)
engine = sqla.create_engine('mssql+pyodbc:///?odbc_connect={}'.format(quoted_conn_str))
with engine.connect() as sql_conn:
mydata = pd.read_sql(sql_query, sql_conn)
del conn_str, sql_conn
print('Data import completed')
### TRANSFORM
# Replacing diacritics with ASCII characters
def decoder(x):
x = unicodedata.normalize('NFKD', x).encode('ascii', 'ignore')
x = x.decode('utf-8')
return x
for column in mydata.columns:
mydata[column] = mydata[column].astype(str).apply(decoder)
str_cols = [str(x) for x in cols]
mydata.columns = str_cols
### LOAD
# Deleting all records from the existing QB table
print('Sending Delete Records API request...')
headers = {'QB-Realm-Hostname': realm_hostname, 'Authorization': 'QB-USER-TOKEN ' + token}
data = eval(json.dumps({"from": table_id, "where": r'{3.GT.0}'}))
r1 = requests.delete(url='https://api.quickbase.com/v1/records', headers=headers, json=data)
# Preparing for export
step = 50000
dflength = len(mydata.index)
iter_np = np.arange(0, dflength, step)
iter = list(iter_np)
def slice_df(mydata, start_line):
end_line = start_line + step
slice = mydata.iloc[start_line:end_line,:]
return slice
req_total = int(np.ceil(dflength / step))
req_nr = 1
if str(r1) == '<Response [200]>':
errorlog1 = '0 no error '
else:
errorlog1 = 'FAILED to delete records '
errorlog2 = ""
# Loading Data to a QB table
for i in iter :
slice = slice_df(mydata, i)
print('Sending Insert/ Update Records API request ' + str(req_nr) + ' out of ' + str(req_total))
df_json = slice.to_json(orient='records')
df_json = json.loads(df_json)
df_json = [{key: {"value": value} for key, value in item.items()} for item in df_json]
headers = {'QB-Realm-Hostname': realm_hostname, 'Authorization': 'QB-USER-TOKEN ' + token}
data = {"to": table_id, "data": df_json}
r = requests.post(url='https://api.quickbase.com/v1/records', headers = headers, json = data)
print(str(r))
if str(r) == '<Response [200]>':
err_code = '0 no error '
else:
err_code = 'ERROR import failed '
errorlog2 += err_code
req_nr += 1
print('Delete records request: ' + errorlog1 + "\nExport to Quickbase requests: " + errorlog2)
# Creating log data
log_data = {'Upload Date': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'Process Name': "QB Upload",
'TableID': table_id,
'Delete-records Error Code': errorlog1,
'Insert/Update-records Error Code': errorlog2}
log_data = pd.DataFrame(log_data, index=[0])
# OPTION 1.... Loading log data to a QB table
str_cols = [str(x) for x in log_cols]
log_data.columns = str_cols
log_json = log_data.to_json(orient='records')
log_json = json.loads(log_json)
log_json = [{key: {"value": value} for key, value in item.items()} for item in log_json]
headers = {'QB-Realm-Hostname': realm_hostname, 'Authorization': 'QB-USER-TOKEN ' + token}
data = {"to": log_table_id, "data": log_json}
r3 = requests.post(url='https://api.quickbase.com/v1/records', headers=headers, json=data)
if str(r3) == '<Response [200]>':
print('Log data table has been updated')
else:
print('FAILED to upload log data to QB')
# OPTION 2.... Loading log data to a SQL table... uncomment the below lines if you want to leverage!
#conn_str = (
# #Replace below!... input your SQL Server driver
# r'Driver=ODBC Driver 13 for SQL Server;'
# #Replace below!... input your server name
# r'Server=YOUR_SERVER_HERE;'
# #Replace below!... input your database name
# r'Database=YOUR_DATABASE_HERE;'
# r'Trusted_Connection=yes:'
# #r'UID=username;'
# #r'PWD=password'
# )
#
#quoted_conn_str = urllib.parse.quote_plus(conn_str)
#engine = sqla.create_engine('mssql+pyodbc:///?odbc_connect={}'.format(quoted_conn_str))
#
#with engine.connect() as sql_conn:
# log_data.to_sql(name='python_upload_log',
# con=sql_conn,
# #Replace below!... input your schema name
# schema='YOUR_SCHEMA_NAME',
# if_exists='append',
# index=False,
# dtype={'Upload Date': sqla.types.DateTime(),
# 'Process Name': sqla.types.String(),
# 'TableID': sqla.types.String(),
# 'Delete-records Error Code': sqla.types.String(),
# 'Insert/Update-records Error Code': sqla.types.String()}
# )
print('Process completed')
| StarcoderdataPython |
89546 |
def fxp(number, fractionalBits):
"""
Returns a fixed point representation of a floating point number
rounded to a given number of fractional bits.
The returned value is a string of 1's and 0's and a dot. The
substring on the left of the dot gives the binary representation
of the integer part of the number. The right substring gives
the rounded fractional part.
"""
scaled = number * 2 ** fractionalBits
rounded = int(round(scaled))
binaryStr = bin(rounded)
binary = binaryStr[2:] # remove "0b" prefix
wordlength = len(binary)
integerBits = wordlength - fractionalBits
if integerBits >= 0:
return binary[:integerBits] + "." + binary[integerBits:]
else:
return "." + "0"*(-integerBits) + binary
data = [ 9.2, 1.0/3, 0.171875, 23.47 ]
# first print a row with our data.
print " ",
for v in data:
print "%-30f " % v,
print
# for each fractional bitwidth, print out the fxp strings for the data.
for f in [8, 9, 12, 15, 16, 24]:
print "%2d" % f,
for v in data:
print "%-30s " % fxp(v, f),
print
| StarcoderdataPython |
3289366 | <reponame>ksshanam/sahara<gh_stars>0
# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
from sahara.service.api import v10 as api
from sahara.service.validations import cluster_template_schema as ct_schema
from sahara.tests.unit.service.validation import utils as u
SAMPLE_DATA = {
'name': 'testname',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'is_public': False,
'is_protected': False
}
class TestClusterTemplateUpdateValidation(u.ValidationTestCase):
def setUp(self):
super(TestClusterTemplateUpdateValidation, self).setUp()
self._create_object_fun = mock.Mock()
self.scheme = ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA
api.plugin_base.setup_plugins()
def test_cluster_template_update_nothing_required(self):
self._assert_create_object_validation(
data={}
)
def test_cluster_template_update_schema(self):
create = copy.copy(ct_schema.CLUSTER_TEMPLATE_SCHEMA)
update = copy.copy(ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA)
# No required items for update
self.assertEqual([], update["required"])
# Other than required, schemas are equal
del update["required"]
del create["required"]
self.assertEqual(create, update)
def test_cluster_template_update(self):
self._assert_create_object_validation(
data=SAMPLE_DATA
)
extra = copy.copy(SAMPLE_DATA)
extra['dog'] = 'fido'
self._assert_create_object_validation(
data=extra,
bad_req_i=(1, "VALIDATION_ERROR",
"Additional properties are not allowed "
"('dog' was unexpected)")
)
| StarcoderdataPython |
113571 | import dataclasses
from typing import Any, ClassVar, Dict, Iterable, Optional
from dbdaora.data_sources.fallback import FallbackDataSource
@dataclasses.dataclass
class DictFallbackDataSource(FallbackDataSource[str]):
db: Dict[Optional[str], Dict[str, Any]] = dataclasses.field(
default_factory=dict
)
key_separator: ClassVar[str] = ':'
def make_key(self, *key_parts: str) -> str:
return self.key_separator.join([p for p in key_parts if p])
async def get(self, key: str) -> Optional[Dict[str, Any]]:
return self.db.get(key)
async def put(self, key: str, data: Dict[str, Any], **kwargs: Any) -> None:
self.db[key] = data
async def delete(self, key: str) -> None:
self.db.pop(key, None)
async def query(self, key: str, **kwargs: Any) -> Iterable[Dict[str, Any]]:
return self.db.values()
| StarcoderdataPython |
1673387 | <gh_stars>1-10
def super_fibonacci(n, m):
fib = [0] + [1 for i in range(m)]
for i in range(m + 1, n + 1):
val = 0
for k in fib[i - m:i]:
val += k
fib.append(val)
return fib[n]
print(super_fibonacci(2, 1))
print(super_fibonacci(3, 5))
print(super_fibonacci(8, 2))
print(super_fibonacci(9, 3)) | StarcoderdataPython |
3231996 | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.layers import *
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint
import numpy as np
import os
import sys
nnscript = os.path.abspath('../../scripts')
sys.path.append(nnscript)
from nnom_utils import *
model_name = 'mnist_simple_trained_model.h5'
save_dir = os.path.join(os.getcwd(), 'saved_models')
def image_to_cfile(data, label, size, file='image.h'):
# test
with open(file, 'w') as f:
num_of_image = size
for i in range(num_of_image):
selected = np.random.randint(0, 1000) # select 10 out of 1000.
f.write('#define IMG%d {'% (i))
np.round(data[selected]).flatten().tofile(f, sep=", ", format="%d") # convert 0~1 to 0~127
f.write('} \n')
f.write('#define IMG%d_LABLE'% (i))
f.write(' %d \n \n' % label[selected])
f.write('#define TOTAL_IMAGE %d \n \n'%(num_of_image))
f.write('static const int8_t img[%d][%d] = {' % (num_of_image, data[0].flatten().shape[0]))
f.write('IMG0')
for i in range(num_of_image -1):
f.write(',IMG%d'%(i+1))
f.write('};\n\n')
f.write('static const int8_t label[%d] = {' % (num_of_image))
f.write('IMG0_LABLE')
for i in range(num_of_image -1):
f.write(',IMG%d_LABLE'%(i+1))
f.write('};\n\n')
def octave_conv2d(xh, xl, ch=12):
# one octave convolution is consist of the 2 equations
# YH=f(XH;WH→H)+upsample(f(XL;WL→H),2)
# YL=f(XL;WL→L)+f(pool(XH,2);WH→L))
# f(XL;WL→L)
xhh = Conv2D(ch, kernel_size=(3, 3), strides=(1, 1), padding='same')(xh)
# f(XH;WH→H)
xll = Conv2D(ch, kernel_size=(3, 3), strides=(1, 1), padding='same')(xl)
# upsample(f(XL;WL→H),2)
xlh = Conv2D(ch, kernel_size=(3, 3), strides=(1, 1), padding='same')(xl)
xlh = UpSampling2D(size=(2, 2))(xlh)
# f(pool(XH,2);WH→L))
xhl = Conv2D(ch, kernel_size=(3, 3), strides=(1, 1), padding='same')(xh)
xhl = MaxPool2D(pool_size=(2, 2), padding='same')(xhl)
#xhl = AvgPool2D(pool_size=(2, 2), padding='same')(xhl)
# yh = xhh + xlh
# yl = xll + xhl
yh = add([xhh, xlh])
yl = add([xll, xhl])
return yh, yl
def train(x_train, y_train, x_test, y_test, batch_size= 64, epochs = 100):
inputs = Input(shape=x_train.shape[1:])
x = Conv2D(12, kernel_size=(3, 3), strides=(1, 1), padding='same')(inputs)
xh = ReLU()(x)
xl = MaxPool2D((2,2),strides=(2,2), padding="same")(x)
# octa 1
xh, xl = octave_conv2d(xh, xl, 12)
# max pool
xh = MaxPool2D()(xh)
xl = MaxPool2D()(xl)
# octa 2
xh, xl = octave_conv2d(xh, xl, 12)
# reduce xh dimention to fit xl
xh = MaxPool2D()(xh)
x = concatenate([xh, xl], axis=-1)
# reduce size
x = Conv2D(12, kernel_size=(3, 3), strides=(1, 1), padding='valid')(x)
x = Flatten()(x)
x = Dense(96)(x)
x = Dropout(0.2)(x)
x = ReLU()(x)
x = Dense(10)(x)
predictions = Softmax()(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
model.summary()
# Save model and weights
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)
# save best
checkpoint = ModelCheckpoint(filepath=model_path,
monitor='val_acc',
verbose=0,
save_best_only='True',
mode='auto',
period=1)
callback_lists = [checkpoint]
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_data=(x_test, y_test),
shuffle=True, callbacks=callback_lists)
del model
K.clear_session()
return history
if __name__ == "__main__":
# fixed the gpu error
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
epochs = 5
num_classes = 10
# The data, split between train and test sets:
(x_train, y_train_num), (x_test, y_test_num) = mnist.load_data()
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train_num, num_classes)
y_test = keras.utils.to_categorical(y_test_num, num_classes)
# reshape to 4 d becaue we build for 4d?
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
print('x_train shape:', x_train.shape)
# quantize the range to 0~255 -> 0~1
x_test = x_test/255
x_train = x_train/255
print("data range", x_test.min(), x_test.max())
# select a few image and write them to image.h
image_to_cfile(x_test*127, y_test_num, 10, file='image.h')
# train model, save the best accuracy model
history = train(x_train, y_train, x_test, y_test, batch_size=64, epochs=epochs)
# reload best model
model_path = os.path.join(save_dir, model_name)
model = load_model(model_path)
# evaluate
evaluate_model(model, x_test, y_test)
# save weight
generate_model(model, np.vstack((x_train, x_test)), name="weights.h")
# plot
acc = history.history['acc']
val_acc = history.history['val_acc']
plt.plot(range(0, epochs), acc, color='red', label='Training acc')
plt.plot(range(0, epochs), val_acc, color='green', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
| StarcoderdataPython |
1634404 |
import datetime
import os
import re
from typing import List, Optional, Sequence
from stramp.docstruct import DocFile, DocSection
# https://orgmode.org/worg/dev/org-syntax.html
re_heading = re.compile(br'(?m)^(\*+)[^\S\r\n](\S.*)$')
re_greater_block_delimiter = re.compile(br'(?mi)^[^\S\r\n]*#\+(begin|end)_(\S+)(?:[^\S\r\n].*)?$')
def find_headings(data: bytes) -> List[DocSection]:
headings = [DocSection(
text='(ROOT)',
level=0,
start_offset=0,
end_offset=len(data))]
i = 0
while i < len(data):
m_heading = re_heading.search(data, i)
m_block = re_greater_block_delimiter.search(data, i)
if m_heading is None and m_block is None:
break
if m_block is not None and (m_heading is None or m_block.start() < m_heading.start()):
i = m_block.end()
while i < len(data):
m = re_greater_block_delimiter.search(data, i)
if m is None:
i = len(data)
break
i = m.end()
if m[1].lower() == b'end' and m[2].lower() == m_block[2].lower():
break
continue
if m_heading is not None:
headings.append(DocSection(
text=m_heading[2].decode('UTF-8'),
level=len(m_heading[1]),
start_offset=m_heading.start()))
i = m_heading.end()
return headings
def link_sections(headings: Sequence[DocSection]) -> DocSection:
h_prev = None # type: Optional[DocSection]
root = None # type: Optional[DocSection]
for h in headings:
if h.level == 0:
# root
assert h_prev is None
root = h
elif h.level > h_prev.level:
# Descendant of h_prev
h.parent = h_prev
for level in range(h_prev.level + 1, h.level):
hx = DocSection(level, '', h_prev.start_offset)
hx.parent = h.parent
h.parent = hx
else:
hx = h_prev.parent
while hx.level > h.level - 1:
hx = hx.parent
h.parent = hx
hx = h_prev
while hx.level >= h.level:
hx.end_offset = h.start_offset
hx = hx.parent
h_prev = h
hx = h_prev
while hx is not None:
hx.end_offset = root.end_offset
hx = hx.parent
return root
def load_file(doc: DocFile):
if doc.file_data_path is None:
doc.file_data_path = doc.file_path
with doc.file_data_path.open('rb') as f:
doc.file_bytes = f.read()
doc.file_stat = os.stat(f.fileno())
doc.file_read_datetime = datetime.datetime.utcnow()
sections = find_headings(doc.file_bytes)
doc.root_heading = link_sections(sections)
| StarcoderdataPython |
107272 | from model.network import LeNet5
from saliency.vanilla_gradient import save_vanilla_gradient
from model.data import mnist_train_test_sets
import numpy as np
# Get MNIST dataset, preprocessed
train_images, train_labels, test_images, test_labels = mnist_train_test_sets()
# Load net and 98% acc weights
net = LeNet5(weights_path="15epoch_weights.pkl")
# Uncomment if you want to train or test
# net.train(training_data=train_images, training_labels=train_labels,
# batch_size=32, epochs=3, weights_path='weights.pkl')
# net.test(test_images, test_labels)
# Uncomment if you want to filter by class
# target_image_class = 7
# target_image_indexes = [i for i in range(len(test_labels))
# if np.argmax(test_labels[i]) == target_image_class]
# target_images = [test_images[index] for index in target_image_indexes]
# target_labels = [test_labels[index] for index in target_image_indexes]
# Generate saliency maps for the first 10 images
target_images = train_images[:10]
target_labels = train_labels[:10]
save_vanilla_gradient(network=net, data=target_images, labels=target_labels)
| StarcoderdataPython |
1628042 | <gh_stars>1-10
# COUNTING VALLEYS HACKERRANK SOLUTION:
# creating a function to calculate the number of valleys.
def countingValleys(steps, path):
# creating variables to store the values for the valleys crossed and the current level.
current_level = 0
valley_count = 0
# creating a for-loop to iterate for the length of the steps.
for i in range(steps):
# creating a nested if-statement to determine when the level goes up and down.
if path[i] == 'U':
current_level += 1
elif path[i] == 'D':
current_level -= 1
# creating an if-statement to increment the count for the valleys crossed if the level is below the initialized value.
if current_level == -1:
valley_count += 1
# returning the count for the number of valleys crossed.
return valley_count
# receiving input.
steps = int(input().strip())
path = input()
# printing the final output, which indicates the number of valleys crossed.
result = countingValleys(steps, path)
print(result) | StarcoderdataPython |
26597 | <reponame>Fairy-Phy/Relium
import random
from Relium import calcurate, classes, parser, constant
"""
1ラインづつランダムな位置に表示していきます
"""
source_file = r""
target_start_offset = 31999
target_end_offset = 34666
avgbpm = 180
# ノーツの高さの最大値(上げすぎると見えなくなります)
max_laneheight = 370
beat = 4
sample_set = 1
sample_index = 0
volume = 64
effects = 1
## Main ##
parsed_map = parser.parsefile(source_file)
target_hitobjects = [output for output in parsed_map.HitObjects if output.offset >= target_start_offset and output.offset <= target_end_offset]
last_process_offset = 0
result_object = classes.ParsedBeatmap([], [])
for target_hitobject in target_hitobjects:
if target_hitobject.offset == last_process_offset:
target_hitobjects.remove(target_hitobject)
last_process_offset = target_hitobject.offset
for target_hitobject_i in range(len(target_hitobjects)):
target_hitobject = target_hitobjects[target_hitobject_i]
if target_hitobject_i == 0:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, constant.inf_bpm, beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset + 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
elif target_hitobject_i == len(target_hitobjects) - 1:
if target_hitobject_i == 0:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
else:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, calcurate.line_notesposition(avgbpm, random.uniform(1, 370)), beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, calcurate.timingpoint(avgbpm), beat, sample_set, sample_index, volume, False, effects))
else:
if target_hitobject_i == 0:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
else:
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset - 1, calcurate.line_notesposition(avgbpm, random.uniform(1, 370)), beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset, constant.inf_bpm, beat, sample_set, sample_index, volume, False, effects))
result_object.TimingPoints.append(classes.TimingPoint(target_hitobject.offset + 1, constant.zero_bpm, beat, sample_set, sample_index, volume, False, effects))
# どうやらコンソールのパスからの指定らしい...
parser.parsesave(result_object, "export.txt")
| StarcoderdataPython |
3327441 | # -*- coding: utf-8 -*-
"""
Created on Sat Aug 21 14:36:08 2021
@author: Administrator
"""
#%%
# =============================================================================
# =============================================================================
# # 문제 11 유형(DataSet_11.csv 이용)
# 구분자 : comma(“,”), 470 Rows, 4 Columns, UTF-8 인코딩
# 세계 각국의 행복지수를 비롯한 여러 정보를 조사한 DS리서치는
# 취합된 자료의 현황 파악 및 간단한 통계분석을 실시하고자 한다.
# 컬 럼 / 정 의 / Type
# Country / 국가명 / String
# Happiness_Rank / 당해 행복점수 순위 / Double
# Happiness_Score / 행복점수 / Double
# year / 년도 / Double
# =============================================================================
# =============================================================================
#%%
import pandas as pd
data11=pd.read_csv('Dataset_11.csv')
#%%
# =============================================================================
# 1.분석을 위해 3년 연속 행복지수가 기록된 국가의 데이터를 사용하고자 한다.
# 3년 연속 데이터가 기록되지 않은 국가의 개수는?
# - 국가명 표기가 한 글자라도 다른 경우 다른 국가로 처리하시오.
# - 3년 연속 데이터가 기록되지 않은 국가 데이터는 제외하고 이를 향후 분석에서
# 활용하시오.(답안 예시) 1
# =============================================================================
data11.columns
# ['Country', 'Happiness_Rank', 'Happiness_Score', 'year']
q1_agg=data11.groupby('Country').apply(len)
len(q1_agg[q1_agg < 3])
# 3년 연속 기록되지 않은 국가 수 : 20
q1_tab=pd.pivot_table(data=data11,
index='Country',
columns='year',
values='Happiness_Score')
q1_tab2=pd.pivot_table(data=data11,
index='Country',
columns='year',
values='Happiness_Score',
aggfunc='count')
con_list=q1_agg[q1_agg < 3].index
q1=data11[~data11.Country.isin(con_list)]
len(data11) # 470
len(q1) # 438
#%%
# =============================================================================
# 2.(1번 산출물을 활용하여) 2017년 행복지수와 2015년 행복지수를 활용하여 국가별
# 행복지수 증감률을 산출하고 행복지수 증감률이 가장 높은 3개 국가를 행복지수가
# 높은 순서대로 차례대로 기술하시오.
# 증감률 = (2017년행복지수−2015년행복지수)/2
#
# - 연도는 년월(YEAR_MONTH) 변수로부터 추출하며, 연도별 매출금액합계는 1월부터
# 12월까지의 매출 총액을 의미한다. (답안 예시) Korea, Japan, China
# =============================================================================
q1_tab=pd.pivot_table(data=data11,
index='Country',
columns='year',
values='Happiness_Score')
q2=q1_tab.dropna()
q2.loc[:, 'ratio']=(q2.loc[:, 2017]-q2.loc[:,2015])/2
q2['ratio'].nlargest(3).index
# (정답) ['Latvia', 'Romania', 'Togo']
#%%
# =============================================================================
# 3.(1번 산출물을 활용하여) 년도별 행복지수 평균이 유의미하게 차이가 나는지
# 알아보고자 한다.
# 이와 관련하여 적절한 검정을 사용하고 검정통계량을 기술하시오.
# - 해당 검정의 검정통계량은 자유도가 2인 F 분포를 따른다.
# - 검정통계량은 소수점 넷째 자리까지 기술한다. (답안 예시) 0.1234
# =============================================================================
# (참고)
# from statsmodels.formula.api import ols
# from statsmodels.stats.anova import anova_lm
from scipy.stats import f_oneway
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
f_oneway(q2[2015].dropna(), q2[2016].dropna(), q2[2017].dropna())
# F_onewayResult(statistic=0.004276725037689305,
# pvalue=0.9957324489944479)
# H0: 모든 집단의 평균은 동일하다(mu1=mu2=mu3)
# H1: 적어도 하나의 그룹의 평균은 동일하지 않다(mu1=mu2, mu1!=m3)
ols1=ols('Happiness_Score~C(year)', data=q1).fit()
anova_lm(ols1)
# df sum_sq mean_sq F PR(>F)
# C(year)그룹간 2.0 0.011198 0.005599 0.004277 0.995732
# Residual그룹내 435.0 569.472307 1.309132 NaN NaN
# (정답) 0.004277 -> 0.0042
from statsmodels.stats.multicomp import pairwise_tukeyhsd
multi_out=pairwise_tukeyhsd(q1['Happiness_Score'], q1['year'])
print(multi_out)
#%%
# =============================================================================
# =============================================================================
# # 문제 12 유형(DataSet_12.csv 이용)
# 구분자 : comma(“,”), 5000 Rows, 7 Columns, UTF-8 인코딩
# 직장인의 독서 실태를 분석하기 위해서 수도권 거주자 5000명을
# 대상으로 간단한 인적 사항과 연간 독서량 정보를 취합하였다.
# 컬 럼 / 정 의 / Type
# Age / 나이 / String
# Gender / 성별(M: 남성) / String
# Dependent_Count / 부양가족 수 / Double
# Education_Level / 교육 수준 / String
# is_Married / 결혼 여부(1: 결혼) / Double
# Read_Book_per_Year / 연간 독서량(권) / Double
# Income_Range / 소득 수준에 따른 구간(A < B < C < D < E)이며 X는
# 정보 누락 / String
# =============================================================================
# =============================================================================
import pandas as pd
data12=pd.read_csv('Dataset_12.csv')
data12.columns
#%%
# =============================================================================
# 1.수치형 변수를 대상으로 피어슨 상관분석을 실시하고 연간 독서량과 가장
# 상관관계가 강한 변수의 상관계수를 기술하시오
# - 상관계수는 반올림하여 소수점 셋째 자리까지 기술하시오. (답안 예시) 0.123
# =============================================================================
data12.corr().drop('Read_Book_per_Year')['Read_Book_per_Year'].abs().nlargest(1)
# (정답) 0.797
#%%
# =============================================================================
# 2.석사 이상(석사 및 박사) 여부에 따라서 연간 독서량 평균이 유의미하게 다른지 가설
# 검정을 활용하여 알아보고자 한다. 독립 2표본 t검정을 실시했을 때
# 유의 확률(pvalue)의 값을 기술하시오.
# - 등분산 가정 하에서 검정을 실시한다.
# - 유의 확률은 반올림하여 소수점 셋째 자리까지 기술한다. (답안 예시) 0.123
# =============================================================================
data12["is_grad"] = (data12["Education_Level"].isin(["석사", "박사"]) + 0)
from scipy.stats import ttest_ind
stat, p = ttest_ind(data12.loc[data12["is_grad"] == 0, "Read_Book_per_Year"],
data12.loc[data12["is_grad"] == 1, "Read_Book_per_Year"],
equal_var = True)
round(p, 3)
# (정답) 0.269
#%%
# =============================================================================
# 3.독서량과 다른 수치형 변수의 관계를 다중선형회귀분석을 활용하여 알아보고자 한다.
# 연간 독서량을 종속변수, 나머지 수치형 자료를 독립변수로 한다. 이렇게 생성한
# 선형회귀 모델을 기준으로 다른 독립변수가 고정이면서 나이만 다를 때, 40살은 30살
# 보다 독서량이 얼마나 많은가?
# - 학사 이상이면서 소득 구간 정보가 있는 데이터만 사용하여 분석을 실시하시오.
# - 결과값은 반올림하여 정수로 표기하시오. (답안 예시) 1
# =============================================================================
# (참고)
# from statsmodels.formula.api import ols
var_list=data12.columns[data12.dtypes != 'object'].drop('Read_Book_per_Year')
form='Read_Book_per_Year~'+'+'.join(var_list)
ols1=ols(form, data=data12).fit()
ols1.summary()
q3_ans=0.7894 * 10
# (정답) 7.894
# (정답) 8
#%%
# =============================================================================
# =============================================================================
# # 문제 13 유형(DataSet13_train.csv / DataSet13_test.csv 이용)
# 구분자 :
# comma(“,”), 1500 Rows, 10 Columns, UTF-8 인코딩 /
# comma(“,”), 500 Rows, 10 Columns, UTF-8 인코딩
# 전국의 데이터 분석가 2000명을 대상으로 이직 관련 설문조사를 실시하였다.
# 설문 대상자의 특성 및 이직 의사와 관련 인자를 면밀히 살펴보기 위해 다양한
# 분석을 실시하고자 한다.
# 컬 럼 / 정 의 / Type
# city_development_index / 거주 도시 개발 지수 / Double
# gender / 성별 / String
# relevent_experience / 관련 직무 경험 여부(1 : 유경험) / Integer
# enrolled_university / 대학 등록 형태(1 : 풀타임/파트타임) / Integer
# education_level / 교육 수준 / String
# major_discipline / 전공 / String
# experience / 경력 / Double
# last_new_job / 현 직장 직전 직무 공백 기간 / Double
# training_hours / 관련 직무 교육 이수 시간 / Double
# target / 이직 의사 여부(1 : 의사 있음) / Integer
# =============================================================================
# =============================================================================
import pandas as pd
data13 = pd.read_csv("Dataset_13_train.csv")
#%%
# =============================================================================
# 1.(Dataset_13_train.csv를 활용하여) 경력과 최근 이직시 공백기간의 상관관계를 보고자
# 한다. 남여별 피어슨 상관계수를 각각 산출하고 더 높은 상관계수를 기술하시오.
# - 상관계수는 반올림하여 소수점 둘째 자리까지 기술하시오. (답안 예시) 0.12
# =============================================================================
data13.columns
# ['city_development_index', 'gender', 'relevent_experience',
# 'enrolled_university', 'education_level', 'major_discipline',
# 'experience', 'last_new_job', 'training_hours', 'target']
data13.groupby('gender')[['experience', 'last_new_job']].corr()
# (정답) 0.45
#%%
# =============================================================================
# 2.(Dataset_13_train.csv를 활용하여) 기존 데이터 분석 관련 직무 경험과 이직 의사가 서로
# 관련이 있는지 알아보고자 한다. 이를 위해 독립성 검정을 실시하고 해당 검정의 p-value를 기술하시오.
# - 검정은 STEM 전공자를 대상으로 한다.
# - 검정은 충분히 발달된 도시(도시 개발 지수가 제 85 백분위수 초과)에 거주하는 사람을
# 대상으로 한다.
# - 이직 의사 여부(target)은 문자열로 변경 후 사용한다.
# - p-value는 반올림하여 소수점 둘째 자리까지 기술하시오. (답안 예시) 0.12
# =============================================================================
# (1) 데이터 타입 변경
q2=data13.copy()
q2['target']=q2['target'].astype(str)
q2['target'].dtype
# (2) 조건에 해당하는 데이터 필터링
# ['city_development_index', 'gender', 'relevent_experience',
# 'enrolled_university', 'education_level', 'major_discipline',
# 'experience', 'last_new_job', 'training_hours', 'target']
q2['major_discipline'].value_counts()
base=q2['city_development_index'].quantile(0.85)
q2_1=q2[(q2['major_discipline']=='STEM') &
(q2['city_development_index'] > base)]
# (3) 범주형 데이터의 독립성 검정 : 카이스퀘어 검정
from scipy.stats import chi2_contingency
q2_tab=pd.crosstab(index=q2_1.relevent_experience,
columns=q2_1.target)
q2_out=chi2_contingency(q2_tab)[1]
# (41.16381604042102,
# 1.3999022544385146e-10,
# 1,
# array([[213.35891473, 73.64108527],
# [745.64108527, 257.35891473]]))
round(q2_out,2)
# (정답) 0.64
#%%
# =============================================================================
# 3.(Dataset_13_train.csv를 활용하여) 인사팀에서는 어떤 직원이 이직 의사를 가지고 있을지
# 사전에 파악하고 1:1 면담 등 집중 케어를 하고자 한다. 이를 위해 의사결정 나무를
# 활용하여 모델을 생성하고 그 정확도를 확인하시오.
# - target을 종속변수로 하고 나머지 변수 중 String이 아닌 변수를 독립변수로 한다.
# - 학습은 전부 기본값으로 실시한다.
# - 평가는 "Dataset_13_test.csv" 데이터로 실시한다.
# - 정확도는 반올림하여 소수점 둘째 자리까지 기술하시오. (답안 예시) 0.12
#
# =============================================================================
# (참고)
# from sklearn.tree import DecisionTreeClassifier
# random_state = 123
x_var= data13.columns[data13.dtypes != 'object'].drop('target')
from sklearn.tree import DecisionTreeClassifier
dt=DecisionTreeClassifier(random_state=123).fit(data13[x_var], data13.target)
test=pd.read_csv('Dataset_13_test.csv')
dt.score(test[x_var], test.target)
# (정답) 0.672
# (정답) 0.67 (이직 의사 변수를 문자열로 설정)
#%%
# =============================================================================
# =============================================================================
# # 문제 14 유형(DataSet_14.csv 이용)
#
# 구분자 : comma(“,”), 2000 Rows, 9 Columns, UTF-8 인코딩
#
# 온라인 교육업체 싱글캠퍼스에서 런칭한 교육 플랫폼을 보다
# 체계적으로 운영하기 위해 2014년부터 2016년 동안 개설된 강좌
# 2000개를 대상으로 강좌 실적 및 고객의 서비스 분석을 실시하려고
# 한다. 관련 데이터는 다음과 같다.
#
# 컬 럼 / 정 의 / Type
# id / 강좌 일련번호 / Double
# published / 강과 개설일 / String
# subject / 강좌 대주제 / String
# level / 난이도 / String
# price / 가격(만원) / Double
# subscribers / 구독자 수(결제 인원) / Double
# reviews / 리뷰 개수 / Double
# lectures / 강좌 영상 수 / Double
# duration / 강좌 총 길이(시간) / Double
# =============================================================================
# =============================================================================
import pandas as pd
data14 = pd.read_csv("Dataset_14.csv")
#%%
# =============================================================================
# 1.결제 금액이 1억 이상이면서 구독자의 리뷰 작성 비율이 10% 이상인 교육의 수는?
# - 결제 금액은 강좌 가격에 구독자 수를 곱한 값이다.
# - 리뷰 작성 비율은 리뷰 개수에 구독자 수를 나눈 값이다. (답안 예시) 1
# =============================================================================
data14["income"] = data14["price"] * data14["subscribers"]
data14["review_rate"] = data14["reviews"] / data14["subscribers"]
data14.head(2)
sum((data14["income"] >= 10000) & (data14["review_rate"] >= 0.1))
# (정답) 59
#%%
# =============================================================================
# 2.강좌 가격이 비쌀수록 구독자 숫자는 줄어든다는 가설을 확인하기 위해 상관분석을
# 실시하고자 한다. 2016년 개설된 Web Development 강좌를 대상으로 강좌 가격과
# 구독자 수의 피어슨 상관관계를 기술하시오.
# - 상관계수는 반올림하여 소수점 둘째 자리까지 기술하시오. (답안 예시) 0.12
# =============================================================================
data14["published"] = pd.to_datetime(data14["published"])
data14["year"] = data14["published"].dt.year
data14.head(2)
data14_sub = data14.loc[(data14["year"] == 2016) & (data14["subject"] == "Web Development"), ]
data14_sub.head(2)
data14_sub[["price", "subscribers"]].corr()
round(0.034392, 2)
# (정답) 0.03
#%%
# =============================================================================
# 3.유저가 서비스 사용에 익숙해지고 컨텐츠의 좋은 내용을 서로 공유하려는 경향이
# 전반적으로 증가하는 추세라고 한다. 이를 위해 먼저 강좌 개설 년도별 구독자의 리뷰
# 작성 비율의 평균이 강좌 개설 년도별로 차이가 있는지 일원 분산 분석을 통해서
# 알아보고자 한다. 이 때 검정통계량을 기술하시오.
# - 검정통계량은 반올림하여 소수점 첫째 자리까지 기술하시오. (답안 예시) 0.1
#
# (참고)
# from statsmodels.formula.api import ols
# from statsmodels.stats.anova import anova_lm
# =============================================================================
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
model = ols(formula = "review_rate ~ C(year)", data = data14).fit()
anova_lm(model)
round(18.542038, 1)
# (정답) 18.5
#%%
# =============================================================================
# =============================================================================
# # 문제 05 유형(Dataset_05_Mart_POS.csv / 이용)
#
# =============================================================================
# Dataset_05_Mart_POS.csv
# 구분자 : comma(“,”), 20488 Rows, 3 Columns, UTF-8 인코딩
# =============================================================================
#
# 원룸촌에 위치한 A마트는 데이터 분석을 통해 보다 체계적인 재고관리와
# 운영을 하고자 한다. 이를 위해 다음의 두 데이터 세트를 준비하였다.
#
# 컬 럼 / 정 의 / Type
# Member_number / 고객 고유 번호 / Double
# Date / 구매일 / String
# itemDescription / 상품명 / String
# =============================================================================
# Dataset_05_item_list.csv
# 구분자 : comma(“,”), 167 Rows, 4 Columns, UTF-8 인코
# =============================================================================
#
# 컬 럼 / 정 의 / Type
# prod_id / 상품 고유 번호 / Double
# prod_nm / 상품명 / String
# alcohol / 주류 상품 여부(1 : 주류) / Integer
# frozen / 냉동 상품 여부(1 : 냉동) / Integer
# =============================================================================
# =============================================================================
pos=pd.read_csv('Dataset_05_Mart_POS.csv')
list1=pd.read_csv('Dataset_05_item_list.csv')
#%%
# =============================================================================
# 1.(Dataset_05_Mart_POS.csv를 활용하여) 가장 많은 제품이 팔린 날짜에 가장 많이 팔린
# 제품의 판매 개수는? (답안 예시) 1
# =============================================================================
# Tip. 날짜별 제품수는 곧 날짜 빈도와 직결
q1=pos['Date'].value_counts().idxmax()
# 2015-01-21 96
pos[pos['Date'] == q1]['itemDescription'].value_counts().head(1)
# (정답) 7
#%%
# =============================================================================
# 2. (Dataset_05_Mart_POS.csv, Dataset_05_item_list.csv를 활용하여) 고객이 주류 제품을
# 구매하는 요일이 다른 요일에 비해 금요일과 토요일이 많을 것이라는 가설을 세웠다.
# 이를 확인하기 위해 금요일과 토요일의 일별 주류제품 구매 제품 수 평균과 다른
# 요일의 일별 주류제품 구매 제품 수 평균이 서로 다른지 비교하기 위해 독립 2표본
# t검정을 실시하시오.
# 해당 검정의 p-value를 기술하시오.
# - 1분기(1월 ~ 3월) 데이터만 사용하여 분석을 실시하시오.
# - 등분산 가정을 만족하지 않는다는 조건 하에 분석을 실시하시오.
# - p-value는 반올림하여 소수점 둘째 자리까지 기술하시오. (답안 예시) 0.12
# =============================================================================
# (1) 변수 생성: 요일, 월(1분기 추출)
pd.to_datetime(pos['Date']).dt.year
pd.to_datetime(pos['Date']).dt.month
pd.to_datetime(pos['Date']).dt.day
pd.to_datetime(pos['Date']).dt.day_name(locale='ko_kr')
q2=pos.copy()
q2['day']=pd.to_datetime(q2['Date']).dt.day_name(locale='ko_kr')
q2['month']=pd.to_datetime(q2['Date']).dt.month
# (2) 데이터 결합(주류 유무 포함)
q2_merge=pd.merge(q2, list1,
left_on='itemDescription',
right_on='prod_nm', how='left')
# (3) 금토/그외 변수 생성
q2_merge['week']=0
q2_merge.loc[q2_merge.day.isin(['금요일','토요일']), 'week']=1
q2_merge.columns
# (4) 독립 이표본 t 검정
# - 1,2,3월 데이터만 필터링
q2_merge2=q2_merge[q2_merge.month.isin([1,2,3])]
from scipy.stats import ttest_ind
# 일별 주류제품 구매 제품 수
q2_tab=pd.pivot_table(q2_merge2, index='Date',
columns='week',
values='alcohol',
aggfunc='sum')
q2_out=ttest_ind(q2_tab[0].dropna(),
q2_tab[1].dropna(),
equal_var=False)
q2_out.pvalue
# (정답) 0.023062611047582393 -> 0.02
#%%
# =============================================================================
# 3.(Dataset_05_Mart_POS.csv를 활용하여) 1년 동안 가장 많이 판매된 10개 상품을 주력
# 상품으로 설정하고 특정 요일에 프로모션을 진행할지 말지 결정하고자 한다. 먼저
# 요일을 선정하기 전에 일원 분산 분석을 통하여 요일별 주력 상품의 판매 개수의
# 평균이 유의미하게 차이가 나는지 알아보고자 한다. 이와 관련하여 일원 분산 분석을
# 실시하고 p-value를 기술하시오.
# - p-value는 반올림하여 소수점 둘째 자리까지 기술하시오. (답안 예시) 0.12
#
# (참고)
# from statsmodels.formula.api import ols
# from statsmodels.stats.anova import anova_lm
# =============================================================================
# (1) top10 제품 추출
pr_list=pos['itemDescription'].value_counts().head(10).index
q3=pos[pos['itemDescription'].isin(pr_list)]
# (2) 요일별 주력 상품의 판매 개수 만들기(일별로 주력상품의 개수를 구해야 됨)
q3_tab=pd.pivot_table(data=q3, index='Date',
values='itemDescription',
aggfunc='count')
q3_tab.reset_index(inplace=True)
q3_tab['day']=\
pd.to_datetime(q3_tab['Date']).dt.day_name(locale='ko_kr')
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
ols1=ols('itemDescription~day', data=q3_tab).fit()
anova_lm(ols1)
# (정답) 0.518128 -> 0.52
| StarcoderdataPython |
53110 | """empty message
Revision ID: 2d70b2b7f421
Revises: <PASSWORD>
Create Date: 2017-01-07 15:40:46.326596
"""
# revision identifiers, used by Alembic.
revision = '2d70b2b7f421'
down_revision = '<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('company', 'com_number')
op.drop_column('company', 'tax_number')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('company', sa.Column('tax_number', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('company', sa.Column('com_number', sa.INTEGER(), autoincrement=False, nullable=True))
### end Alembic commands ###
| StarcoderdataPython |
1773355 | """Test prepare run module."""
from math import isnan
import pytest
from haddock.gear.prepare_run import (
get_expandable_parameters,
populate_mol_parameters,
populate_topology_molecule_params,
)
from haddock.gear.yaml2cfg import read_from_yaml_config
from haddock.modules.topology.topoaa import DEFAULT_CONFIG
DEFAULT_DICT = read_from_yaml_config(DEFAULT_CONFIG)
@pytest.mark.parametrize(
"inp,expected",
[
(
{
"autohis": None,
"mol1": {"nhisd", "hisd_1", "hisd_2", "nhise", "hise_1"},
},
{"hisd_1", "hisd_2", "hise_1"},
)
]
)
def test_get_expandable_parameters_topoaa(inp, expected):
"""Test get blocks."""
result = get_expandable_parameters(inp, DEFAULT_DICT, "topoaa", 20)
assert result == expected
def test_populate_topoaa_molecules():
"""Test mols are polated."""
topoaa = {
"molecules": ["file1.pdb", "file2.pdb"],
"mol1": {"cyclicpept": True},
}
populate_topology_molecule_params(topoaa)
assert "mol2" in topoaa
assert topoaa["mol2"]["prot_segid"] == "B"
assert topoaa["mol1"]["prot_segid"] == "A"
assert topoaa["mol2"]["cyclicpept"] is False
assert topoaa["mol1"]["cyclicpept"] is True
assert isnan(topoaa["mol2"]["hisd_1"])
assert isnan(topoaa["mol1"]["hisd_1"])
assert isnan(topoaa["mol2"]["hise_1"])
assert isnan(topoaa["mol1"]["hise_1"])
assert topoaa["mol2"]["nhise"] == 0
assert topoaa["mol1"]["nhise"] == 0
assert topoaa["mol2"]["nhisd"] == 0
assert topoaa["mol1"]["nhisd"] == 0
def test_populate_topoaa_molecules_2():
"""Test mols are polated."""
topoaa = {
"molecules": ["file1.pdb", "file2.pdb"],
"mol2": {"cyclicpept": True, "prot_segid": "D"},
}
populate_topology_molecule_params(topoaa)
assert "mol1" in topoaa
assert topoaa["mol1"]["prot_segid"] == "A"
assert topoaa["mol2"]["prot_segid"] == "D"
assert topoaa["mol1"]["cyclicpept"] is False
assert topoaa["mol2"]["cyclicpept"] is True
assert isnan(topoaa["mol1"]["hisd_1"])
assert isnan(topoaa["mol2"]["hisd_1"])
assert isnan(topoaa["mol1"]["hise_1"])
assert isnan(topoaa["mol2"]["hise_1"])
assert topoaa["mol2"]["nhise"] == 0
assert topoaa["mol1"]["nhise"] == 0
assert topoaa["mol2"]["nhisd"] == 0
assert topoaa["mol1"]["nhisd"] == 0
def test_populate_topoaa_molecules_3():
"""Test mols are polated."""
topoaa = {
"molecules": ["file1.pdb", "file2.pdb", "file3.pdb"],
"mol2": {"cyclicpept": True, "prot_segid": "C"},
}
populate_topology_molecule_params(topoaa)
assert "mol1" in topoaa
assert topoaa["mol1"]["prot_segid"] == "A"
assert topoaa["mol2"]["prot_segid"] == "C"
assert topoaa["mol3"]["prot_segid"] == "B"
def test_populate_topoaa_molecules_4():
"""Test mols are polated with prot_segid sequence."""
topoaa = {
"molecules": ["file1.pdb", "file2.pdb", "file3.pdb", "file4.pdb"],
"mol3": {"cyclicpept": True, "prot_segid": "A"},
}
populate_topology_molecule_params(topoaa)
assert "mol1" in topoaa
assert topoaa["mol1"]["prot_segid"] == "B"
assert topoaa["mol2"]["prot_segid"] == "C"
assert topoaa["mol3"]["prot_segid"] == "A"
assert topoaa["mol4"]["prot_segid"] == "D"
def test_populate_mol_params():
"""Test populate mol."""
params = {
"topoaa": {"molecules": ["file1.pdb", "file2.pdb", "file3.pdb"]},
"flexref": {"mol_fix_origin_2": True},
"caprieval": {},
}
populate_mol_parameters(params)
assert "mol_fix_origin_1" in params["flexref"]
assert "mol_fix_origin_2" in params["flexref"]
assert "mol_fix_origin_3" in params["flexref"]
assert not ("mol_fix_origin_4" in params["flexref"])
assert params["flexref"]["mol_fix_origin_2"] is True
assert "mol_shape_1" in params["flexref"]
assert "mol_shape_2" in params["flexref"]
assert "mol_shape_3" in params["flexref"]
assert not ("mol_shape_4" in params["flexref"])
assert not params["caprieval"]
| StarcoderdataPython |
120135 | import argparse
import itertools
import logging
import os
import time
from types import SimpleNamespace
import falcon
import pandas
import torch
from falcon_cors import CORS
import waitress
import numpy as np
import json
import re
from torch.utils.data import DataLoader
from tqdm import tqdm
from data import Data
from model import BertSupportNetX
from utils import load_torch_model
from tools.utils import convert_to_tokens
MODEL_MAP={
"bert": BertSupportNetX,
"bertxl": BertSupportNetX
}
logging.basicConfig(level=logging.INFO, format='%(asctime)-18s %(message)s')
logger = logging.getLogger()
cors_allow_all = CORS(allow_all_origins=True,
allow_origins_list=['*'],
allow_all_headers=True,
allow_all_methods=True,
allow_credentials_all_origins=True
)
parser = argparse.ArgumentParser()
parser.add_argument(
'-p', '--port', default=58081,
help='falcon server port')
parser.add_argument(
'-c', '--config_file', default='config/bert_config-xl.json',
help='model config file')
args = parser.parse_args()
model_config=args.config_file
# def result_to_json(string, tags):
# item = {"string": string, "entities": []}
# entity_name = ""
# entity_start = 0
# idx = 0
# i = -1
# zipped = zip(string, tags)
# listzip = list(zipped)
# last = len(listzip)
# for char, tag in listzip:
# i += 1
# if tag == 3:
# item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
# elif tag == 0:
# entity_name += char
# entity_start = idx
# elif tag == 1:
# if (entity_name != "") and (i == last):
# entity_name += char
# item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
# entity_name = ""
# else:
# entity_name += char
# elif tag == 2: # or i == len(zipped)
# entity_name += char
# item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": 'bms'})
# entity_name = ""
# else:
# entity_name = ""
# entity_start = idx
# idx += 1
# return item
#
class TorchResource:
def __init__(self):
logger.info("...")
# 0. Load config
with open(model_config) as fin:
self.config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# 1. Load data
self.data = Data(vocab_file=os.path.join(self.config.model_path, 'vocab.txt'),
max_seq_len=self.config.max_seq_len,
model_type=self.config.model_type, config=self.config)
# 2. Load model
self.model = MODEL_MAP[self.config.model_type](self.config)
self.model = load_torch_model(
self.model, model_path=os.path.join(self.config.model_path, 'model.bin'))
self.model.to(self.device)
logger.info("###")
def flatten(self, ll):
return list(itertools.chain(*ll))
def cleanall(self, content):
return content.replace(" ", "", 10**10)
def process_context(self, line):
line = line.replace("·", "", 100)
spans = re.split('([,。])', line)
if len(spans) <= 2:
spans = re.split('([,。])', line)
if len(spans) <= 2:
spans = re.split('([;;,。,])', line)
assert len(spans) > 2, spans
# spans = [span for span in spans if len(span)>1]
spans_sep = []
for i in range(len(spans) // 2):
spans_sep.append(spans[2 * i] + spans[2 * i + 1])
assert len(spans_sep) > 0, spans
return [[spans_sep[0], spans_sep]]
def bert_classification(self, content, question):
logger.info('1:{}'.format( content))
conv_dic = {}
conv_dic['_id'] = 0
conv_dic['context'] = self.process_context(content)
conv_dic['question'] = question
conv_dic["answer"] = ""
conv_dic['supporting_facts'] = []
rows = [conv_dic]
filename = "data/{}.json".format(time.time())
with open(filename, 'w', encoding='utf8') as fw:
json.dump(rows, fw, ensure_ascii=False, indent=4)
exam, feats, dataset = self.data.load_file(filename, False)
data_loader = DataLoader(dataset, batch_size=self.config.batch_size)
self.model.eval()
answer_dict = {}
sp_dict = {}
tqdm_obj = tqdm(data_loader, ncols=80)
for step, batch in enumerate(tqdm_obj):
batch = tuple(t.to(self.device) for t in batch)
start_logits, end_logits, type_logits, sp_logits, start_position, end_position = self.model(*batch)
batchsize = batch[0].size(0)
# ids
answer_dict_ = convert_to_tokens(exam, feats, batch[5], start_position.data.cpu().numpy().tolist(),
end_position.data.cpu().numpy().tolist(),
np.argmax(type_logits.data.cpu().numpy(), 1))
answer_dict.update(answer_dict_)
predict_support_np = torch.sigmoid(sp_logits).data.cpu().numpy()
for i in range(predict_support_np.shape[0]):
cur_sp_pred = []
cur_id = batch[5][i].item()
cur_sp_logit_pred = [] # for sp logit output
for j in range(predict_support_np.shape[1]):
if j >= len(exam[cur_id].sent_names):
break
if predict_support_np[i, j] > self.config.sp_threshold:
cur_sp_pred.append(exam[cur_id].sent_names[j])
sp_dict.update({cur_id: cur_sp_pred})
new_answer_dict = {}
for key, value in answer_dict.items():
new_answer_dict[key] = value.replace(" ", "")
prediction = {'answer': new_answer_dict, 'sp': sp_dict}
return {"data": prediction}
def on_get(self, req, resp):
logger.info("...")
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials','true')
content = req.get_param('c', True)
question = req.get_param('q', True)
# clean_content =
#clean_content = self.cleanall(content)
resp.media = self.bert_classification(content, question)
logger.info("###")
def on_post(self, req, resp):
"""Handles POST requests"""
resp.set_header('Access-Control-Allow-Origin', '*')
resp.set_header('Access-Control-Allow-Methods', '*')
resp.set_header('Access-Control-Allow-Headers', '*')
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header("Cache-Control", "no-cache")
data = req.stream.read(req.content_length)
data = data.decode('utf-8')
# regex = re.compile(r'\\(?![/u"])')
# data = regex.sub(r"\\", data)
jsondata = json.loads(data)
# clean_title = shortenlines(jsondata['1'])
# clean_content = cleanall(jsondata['2'])
content = jsondata['context']
question = jsondata['question']
# clean_content = self.cleanall(content)
resp.media = self.bert_classification(content, question)
logger.info("###")
if __name__=="__main__":
api = falcon.API(middleware=[cors_allow_all.middleware])
api.req_options.auto_parse_form_urlencoded = True
api.add_route('/z', TorchResource())
waitress.serve(api, port=args.port, threads=48, url_scheme='http')
| StarcoderdataPython |
2930 | <reponame>youngmg1995/NES-Music-Maker
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:14:19 2020
@author: Mitchell
nesm_generator.py
~~~~~~~~~~~~~~~~~
This file serves as a script for using our pre-trained VAE model to generate
brand new NES music soundtracks. NOTE - using the reduced model we only
generate the first melodic voice for each track rather than each of the four
voices present in an NESM track. To do so we first reconstruct our model using
the file VAE class defined in `VAE.py` and the same parameters used in
`model_training`. Then we use functions from the file `generation_utils` to
have our trained model create entirely new and original NES music.
"""
# Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# NOTE - nesmdb folder manually added to environment libraries
from dataset_utils import load_training
from VAE import VAE
from generation_utils import generate_seprsco, latent_SVD, get_latent_vecs,\
plot_track, filter_tracks
import nesmdb
from nesmdb.vgm.vgm_to_wav import save_vgmwav
import tensorflow as tf
import numpy as np
import os, json
### Load Mappings
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Parameters for shape of dataset (note these are also used for model def.)
measures = 8
measure_len = 96
# load data
training_foldername = '../../nesmdb24_seprsco/train/'
train_save_filename = 'transformed_dataset.json'
dataset , labels2int_map , int2labels_map = \
load_training(training_foldername, train_save_filename,
measures = measures, measure_len = measure_len)
### Reinitiate Model
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### Model Parameters
latent_dim = 124
input_dim = len(int2labels_map) - 1
dropout = .1
maxnorm = None
vae_b1 , vae_b2 = .02 , .1
print('Reinitiating VAE Model')
# Build Model
model = VAE(latent_dim, input_dim, measures, measure_len, dropout,
maxnorm, vae_b1 , vae_b2)
# Reload Saved Weights
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "model_ckpt")
model.load_weights(checkpoint_prefix)
model.build(tf.TensorShape([None, measures, measure_len, ]))
# Print Summary of Model
model.summary()
### Sample Latent Variable Distributions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Here we use SVD to more effectively sample from the orthogonal components
# of our latent space
# Parameters for sampling
num_songs = 10
print('Generating Latent Samples to Generate {} New Tracks'.format(num_songs))
# Grab distributions of dataset over latent space
# Have to run in batches due to size of the dataset
batch_size = 300
latent_vecs = get_latent_vecs(model, dataset, batch_size)
# Sample from normal distribution
rand_vecs = np.random.normal(0.0, 1.0, (num_songs, latent_dim))
# perform SVD
plot_eigenvalues = True
sample_vecs = latent_SVD(latent_vecs, rand_vecs, plot_eigenvalues)
### Generate New Tracks
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create new seprsco tracks using our model and the random samples
# Seprsco files can later be converted to valid NES music format
# Parameters for track generation (specifically filtering)
p_min = .5
print('Generating New Tracks from Latent Samples')
# Decode samples using VAE
decoded_tracks = model.decoder(sample_vecs)
# Plot first decoded track
print("Example Model Generated Track")
plot_track(decoded_tracks[0])
# Filter Track
decoded_tracks = filter_tracks(decoded_tracks, p_min)
# Plot first filtered track
print("Example Filtered Track")
plot_track(decoded_tracks[0])
# Convert tracks to seprsco format
print('Converting Model Output to Seprsco')
seprsco_tracks = generate_seprsco(decoded_tracks, int2labels_map)
### Convert to WAV
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Convert seprsco tracks to WAV files so we can listen!!!
print('Converting Seprsco to WAV Audio')
wav_tracks = []
for track in seprsco_tracks:
wav = nesmdb.convert.seprsco_to_wav(track)
wav_tracks.append(wav)
### Save WAV Files
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save our wav tracks to appropriate files (be sure not to overwrite existing)
# Also save latent variables so we can reproduce songs we like
# Save WAV tracks
save_wav = False
if save_wav:
print('Saving Generated WAV Audio Tracks')
wav_folder = 'model_gen_files/'
for i in range(len(wav_tracks)):
wav_file = wav_folder+'VAE_NESM_{}.wav'.format(i)
save_vgmwav(wav_file, wav_tracks[i])
# Save Latent Variables
save_latent_var = False
if save_latent_var:
print('Saving Latent Variables for Generated Tracks')
latent_filename = os.path.join(wav_folder, "latent_variables.json")
with open(latent_filename, 'w') as f:
json.dump({
'VAE_NESM_{}.wav'.format(i): sample_vecs[i].tolist()
for i in range(sample_vecs.shape[0])
}, f)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#----------------------------------END FILE------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | StarcoderdataPython |
3394943 | import numpy as np
from copy import copy
from typing import Union, Tuple, Iterable, Sequence
from ._constants import DimsMode
from ...util.event import EmitterGroup
class Dims:
"""Dimensions object modeling multi-dimensional slicing, cropping, and
displaying in Napari
Parameters
----------
init_ndim : int, optional
Initial number of dimensions
Attributes
----------
events : EmitterGroup
Event emitter group
range : list of 3-tuple
List of tuples (min, max, step), one for each dimension
point : list of float
List of floats, one for each dimension
interval : list of 2-tuple
List of tuples (min, max), one for each dimension
mode : list of DimsMode
List of DimsMode, one for each dimension
display : list of bool
List of bool indicating if dimension displayed or not, one for each
dimension
ndim : int
Number of dimensions
displayed : list of int
Array of the displayed dimensions
"""
def __init__(self, init_ndim=0):
super().__init__()
# Events:
self.events = EmitterGroup(source=self, auto_connect=True, axis=None,
ndim=None)
self._range = []
self._point = []
self._interval = []
self._mode = []
self._display = []
self.ndim = init_ndim
def __str__(self):
return "~~".join(map(str, [self.range, self.point, self.interval,
self.mode, self.display]))
@property
def range(self):
"""list of 3-tuple: List of tuples (min, max, step), one for each
dimension
"""
return copy(self._range)
@range.setter
def range(self, range):
if range == self.range:
return
self.ndim = len(range)
self._range = range
@property
def point(self):
"""list of float: List of floats, one for each dimension
"""
return copy(self._point)
@property
def interval(self):
"""list of 2-tuple: List of tuples (min, max), one for each dimension
"""
return copy(self._interval)
@property
def mode(self):
"""list of DimsMode: List of DimsMode, one for each dimension
"""
return copy(self._mode)
@property
def display(self):
"""list: List of bool indicating if dimension displayed or not, one for
each dimension
"""
return copy(self._display)
@property
def ndim(self):
"""Returns the number of dimensions
Returns
-------
ndim : int
Number of dimensions
"""
return len(self.point)
@ndim.setter
def ndim(self, ndim):
if ndim > self.ndim:
for i in range(self.ndim, ndim):
self._range.insert(0, (0.0, 1.0, 0.01))
self._point.insert(0, 0.0)
self._interval.insert(0, (0.3, 0.7))
self._mode.insert(0, DimsMode.POINT)
self._display.insert(0, False)
# Notify listeners that the number of dimensions have changed
self.events.ndim()
# Notify listeners of which dimensions have been affected
for axis_changed in range(ndim - self.ndim):
self.events.axis(axis=axis_changed)
elif ndim < self.ndim:
self._range = self._range[-ndim:]
self._point = self._point[-ndim:]
self._interval = self._interval[-ndim:]
self._mode = self._mode[-ndim:]
self._display = self._display[-ndim:]
# Notify listeners that the number of dimensions have changed
self.events.ndim()
@property
def displayed(self):
"""Returns the displayed dimensions
Returns
-------
displayed : list
Displayed dimensions
"""
displayed = [i for i, d in enumerate(self.display) if d is True]
return displayed
@property
def indices(self):
"""Tuple of slice objects for slicing arrays on each layer. There is
one slice object for each layer
"""
slice_list = []
z = zip(self.mode, self.display, self.point, self.interval, self.range)
for (mode, display, point, interval, range) in z:
if mode == DimsMode.POINT or mode is None:
if display:
slice_list.append(slice(None, None, None))
else:
slice_list.append(int(round(point)))
elif mode == DimsMode.INTERVAL:
if display:
if interval is None:
slice_list.append(slice(None))
else:
slice_list.append(slice(int(round(interval[0])),
int(round(interval[1]))))
else:
if interval is None:
slice_list.append(slice(None))
else:
slice_list.append(slice(int(round(interval[0])),
int(round(interval[1]))))
return tuple(slice_list)
def set_range(self, axis: int, range: Sequence[Union[int, float]]):
"""Sets the range (min, max, step) for a given axis (dimension)
Parameters
----------
axis : int
Dimension index
range : tuple
Range specified as (min, max, step)
"""
if self.range[axis] != range:
self._range[axis] = range
self.events.axis(axis=axis)
def set_point(self, axis: int, value: Union[int, float]):
"""Sets the point at which to slice this dimension
Parameters
----------
axis : int
Dimension index
value : int or float
Value of the point
"""
if self.point[axis] != value:
self._point[axis] = value
self.events.axis(axis=axis)
def set_interval(self, axis: int, interval: Sequence[Union[int, float]]):
"""Sets the interval used for cropping and projecting this dimension
Parameters
----------
axis : int
Dimension index
interval : tuple
INTERVAL specified with (min, max)
"""
if self.interval[axis] != interval:
self._interval[axis] = interval
self.events.axis(axis=axis)
def set_mode(self, axis: int, mode: DimsMode):
"""Sets the mode: POINT or INTERVAL
Parameters
----------
axis : int
Dimension index
mode : POINT or INTERVAL
Whether dimension is in the POINT or INTERVAL mode
"""
if self.mode[axis] != mode:
self._mode[axis] = mode
self.events.axis(axis=axis)
def set_display(self, axis: int, display: bool):
"""Sets the display boolean flag for a given axis
Parameters
----------
axis : int
Dimension index
display : bool
Bool which is `True` for display and `False` for slice or project.
"""
if self.display[axis] != display:
self._display[axis] = display
self.events.axis(axis=axis)
def _set_2d_viewing(self):
"""Sets the 2d viewing
"""
for i in range(len(self.display)-2):
self.set_display(i, False)
if len(self.display) >= 2:
self.set_display(-1, True)
self.set_display(-2, True)
| StarcoderdataPython |
131208 | # encoder.py
# <NAME>
# Encoder Class Adapted from py-gaugette rotary encoder library
import RPi.GPIO as GPIO
import time
import math
import threading
class Encoder:
def __init__(self, pin_a, pin_b):
self.pin_a = pin_b; # Our Encoders are inverted,
self.pin_b = pin_a; # Which is why this looks weird
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.pin_a, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.pin_b, GPIO.IN, pull_up_down=GPIO.PUD_UP)
self.steps = 0
self.last_delta = 0
self.r_seq = self.rotation_sequence()
self.steps_per_cycle = 4
self.remainder = 0
def rotation_state(self):
a_state = GPIO.input(self.pin_a)
b_state = GPIO.input(self.pin_b)
r_state = a_state | b_state << 1
return r_state
def rotation_sequence(self):
a_state = GPIO.input(self.pin_a)
b_state = GPIO.input(self.pin_b)
r_seq = (a_state ^ b_state) | b_state << 1
return r_seq
def update(self):
delta = 0
r_seq = self.rotation_sequence()
if (r_seq != self.r_seq):
delta = (r_seq - self.r_seq) % 4
if delta == 3:
delta = -1
elif delta == 2:
delta = int(math.copysign(delta, self.last_delta))
self.last_delta = delta
self.r_seq = r_seq
self.steps += delta
def get_steps(self):
steps = self.steps
self.steps = 0
return steps
def get_cycles(self):
self.remainder += self.get_steps()
cycles = self.remainder // self.steps_per_cycle
self.remainder %= self.steps_per_cycle
return cycles
def start(self):
def isr():
self.update()
GPIO.add_event_detect(self.pin_a, GPIO.BOTH, isr)
GPIO.add_event_detect(self.pin_b, GPIO.BOTH, isr)
class Worker(threading.Thread):
def __init__(self, pin_a, pin_b):
threading.Thread.__init__(self)
self.lock = threading.Lock()
self.stopping = False
self.encoder = Encoder(pin_a, pin_b)
self.daemon = True
self.delta = 0
self.delay = 0.001
def run(self):
while not self.stopping:
self.encoder.update()
time.sleep(self.delay)
def stop(self):
self.stopping = True
def get_steps(self):
return self.encoder.get_steps()
| StarcoderdataPython |
1674228 | <gh_stars>0
from django.shortcuts import render_to_response
from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from .forms import NewUserForm
def index (request):
return render_to_response('laravel_course/index.html')
def logout_request(request):
logout(request)
messages.info(request, "Logged out successfully!")
return redirect("laravel_course:homepage")
def login_request(request):
form = AuthenticationForm()
return render(request = request,
template_name = "laravel_course/login.html",
context={"form":form})
def login_request(request):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = <PASSWORD>_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}")
return redirect('/')
else:
messages.error(request, "Invalid username or password.")
else:
messages.error(request, "Invalid username or password.")
form = AuthenticationForm()
return render(request = request,
template_name = "laravel_course/login.html",
context={"form":form}) | StarcoderdataPython |
1628958 | <gh_stars>1-10
def init_actions_(service, args):
"""
this needs to returns an array of actions representing the depencies between actions.
Looks at ACTION_DEPS in this module for an example of what is expected
"""
# some default logic for simple actions
return {
'test': ['install']
}
def test(job):
"""
Test recurring actions with hanging jobs
"""
import sys
import os
import time
import json
RESULT_OK = 'OK : %s'
RESULT_FAILED = 'FAILED : %s'
RESULT_ERROR = 'ERROR : %s %%s' % job.service.name
model = job.service.model
model.data.result = RESULT_OK % job.service.name
failures = []
repos = []
try:
expected_nr_of_jobs = 0
curdir = os.getcwd()
ays_client = j.clients.atyourservice.get()
repo_name = 'sample_repo_recurring'
repos.append(repo_name)
bp_name = 'test_recurring_actions_hanging_jobs.yaml'
execute_bp_res = ays_client.api.ays.executeBlueprint(data={}, blueprint=bp_name, repository=repo_name)
if execute_bp_res.status_code == 200:
# create run
data = json.loads(ays_client.api.ays.createRun(data={}, repository=repo_name).text)
runid = data['key']
# execute run
start_time = time.time()
data = json.loads(ays_client.api.ays.executeRun(data={}, runid=runid, repository=repo_name).text)
time.sleep(35) # 30 seconds configured job timeout + 5 seconds
end_time = time.time()
nr_of_jobs = len(j.core.jobcontroller.db.jobs.find(actor='test_recurring_actions_1', service='hanging',
action='execute_hanging_job', fromEpoch=start_time,
toEpoch=end_time))
if nr_of_jobs != expected_nr_of_jobs:
failures.append('Wrong number of jobs found. Expected [%s] found [%s]' % (expected_nr_of_jobs, nr_of_jobs))
else:
failures.append('Failed to execute blueprint [%s]' % bp_name)
if failures:
model.data.result = RESULT_FAILED % '\n'.join(failures)
except:
model.data.result = RESULT_ERROR % str(sys.exc_info()[:2])
finally:
job.service.save()
if repos:
for repo in repos:
try:
ays_client.api.ays.destroyRepository(data={}, repository=repo)
except Exception as e:
j.logger.logging.error('Error while destroying repo %s. Error %s' % (repo, e) )
| StarcoderdataPython |
185807 | from django.contrib import admin
from .models import JobListing
class JobListingAdmin(admin.ModelAdmin):
list_display = ("title", "employer_name", "status", "created")
list_filter = ("status",)
ordering = ("-created",)
admin.site.register(JobListing, JobListingAdmin)
| StarcoderdataPython |
63126 | sexo = str(input('Informe seu sexo [M/F]: ')).upper().strip()[0]
while sexo not in 'MmFf':
sexo = str(input('Informação incorreta, digite novamente [M/F]: ')).upper().strip()[0]
print(f'Obrigado! Seu sexo foi computado como {sexo}!')
| StarcoderdataPython |
96434 | <reponame>ApproxEng/approxeng.holochassis
from abc import ABCMeta, abstractmethod
from math import asin, pi, sqrt
from time import sleep
from approxeng.chassis import rotate_vector, Motion, DeadReckoning, rotate_point
from approxeng.chassis.dynamics import MotionLimit
from euclid import Vector2, Point2
class Waypoints:
def __init__(self, drive, sleep_time=0.1, speed=200, turn_speed=pi, min_distance=10):
self.finished = False
self.sleep_time = sleep_time
self.drive = drive
self.speed = speed
self.turn_speed = turn_speed
self.min_distance = min_distance
def follow(self, waypoints):
if len(waypoints) == 0:
return
else:
self.finished = False
while not self.finished:
sleep(self.sleep_time)
def on_approach():
self.finished = True
self.drive.drive_at_world(waypoints[0][0], waypoints[0][1], speed=self.speed, on_approach=on_approach)
self.follow(waypoints[1:])
class Drive:
"""
High level class to manage the robot's motion, aggregates the chassis, motors and a bit of planning logic.
"""
__metaclass__ = ABCMeta
def __init__(self, chassis, counts_per_revolution=1.0):
"""
Create a new Drive instance
:param chassis:
A :class:`approxeng.holochassis.chassis.HoloChassis` used to compute kinematics
:param counts_per_revolution:
Counts per revolution used by the dead reckoning code
"""
self.chassis = chassis
# Maximum translation speed in mm/s
self.max_trn = chassis.get_max_translation_speed()
# Maximum rotation speed in radians/2
self.max_rot = chassis.get_max_rotation_speed()
self.front = 0.0
self.dead_reckoning = DeadReckoning(chassis=chassis, counts_per_revolution=counts_per_revolution)
self.motion_limit = None
def set_motion_limit(self, accel_time):
"""
Set a motion limit, or remove an existing one. The limit fixes the maximum rate of change in the requested
motion.
:param accel_time:
Either None to set no limit, or a number of seconds which will set the minimum time required to go from
a standing start to full speed in any component of the requested motion.
"""
if accel_time is None:
self.motion_limit = None
else:
self.motion_limit = MotionLimit(
linear_acceleration_limit=self.max_trn / accel_time,
angular_acceleration_limit=self.max_rot / accel_time)
def set_motion(self, motion):
"""
Set the motor speeds according to the supplied motion relative to the robot's front.
:param motion:
A motion, in robot space and relative to self.front. Any motion limit defined will be applied
to the supplied motion. If this is None nothing will be done.
"""
if motion is None:
return
if self.front != 0.0:
motion = Motion(translation=rotate_vector(motion.translation, self.front), rotation=motion.rotation)
if self.motion_limit is not None:
motion = self.motion_limit.limit_and_return(motion)
self.set_wheel_speeds_from_motion(motion)
def reset_dead_reckoning(self):
"""
Reads encoder values, then resets the pose held by the dead reckoning module. We do this because otherwise
any hardware implementations which track absolute position will lead to a huge incorrect reading for the first
dead reckoning period after a startup.
"""
self.update_dead_reckoning()
self.dead_reckoning.reset()
def estimated_pose(self):
"""
Return the pose from the dead reckoning managed by this class. Convenience method so we don't have to do
e.g. drive.dead_reckoning.pose all the time
:return:
"""
return self.dead_reckoning.pose
def drive_at(self, x, y, speed, turn_speed=pi, min_distance=10, on_approach=None):
"""
Set and return a motion to get to a target specified relative to the robot's coordinate system. Note
that the 'front' is added when the motion is applied to the robot, so this implicitly is relative to that,
with positive y axis in the direction of the robot's front.
:param x:
X coordinate of the target in mm
:param y:
Y coordinate of the target in mm
:param speed:
Desired linear speed
:param turn_speed:
If a motion can't be calculated then turn on the spot instead, at turn_speed radians per second
:param min_distance:
If defined, and the target is closer, then stop
:param on_approach:
If defined, and min_distance is defined and satisfied, call this function when we hit min_distance
:return:
The :class:`approxeng.holochassis.chassis.Motion` that was applied.
"""
# Explicitly cast to floats in case we're not...
x = float(x)
y = float(y)
speed = float(speed)
turn_speed = float(turn_speed)
min_distance = float(min_distance)
if min_distance is not None and sqrt(x * x + y * y) < min_distance:
motion = Motion(translation=Vector2(0, 0), rotation=0)
if on_approach is not None:
on_approach()
else:
if x == 0:
# Straight ahead, avoid future division by zero!
motion = Motion(translation=Vector2(0, speed), rotation=0)
elif abs(y) < abs(x) or y <= 0:
# Turn first without moving
if x > 0:
motion = Motion(translation=Vector2(0, 0), rotation=turn_speed)
else:
motion = Motion(translation=Vector2(0, 0), rotation=-turn_speed)
else:
radius = y * y / x
# Angle is clockwise rotation
angle = asin(x / y)
arc_length = angle * radius
print(x, y, angle, arc_length)
motion = Motion(translation=Vector2(0, speed), rotation=angle * speed / arc_length)
self.set_motion(motion)
return motion
def drive_at_world(self, x, y, speed, turn_speed=pi, min_distance=10, on_approach=None):
"""
Similar to drive_at, but x and y are specified in world coordinates, and the method uses the dead reckoning
logic to map from world to robot coordinates
:param x:
:param y:
:param speed:
:param turn_speed:
:param min_distance:
:param on_approach:
:return:
"""
p = self.dead_reckoning.pose.position
target_point = Point2(x=x - p.x, y=y - p.y)
target_point = rotate_point(target_point, -self.front - self.dead_reckoning.pose.orientation)
return self.drive_at(x=target_point.x, y=target_point.y, speed=speed, turn_speed=turn_speed,
min_distance=min_distance, on_approach=on_approach)
@abstractmethod
def set_wheel_speeds_from_motion(self, motion):
"""
Set wheel speeds based on a :class:`approxeng.holochassis.chassis.Motion` instance
"""
pass
@abstractmethod
def update_dead_reckoning(self):
"""
Read angles from the motors and use them to update the current dead reckoning pose.
:returns:
A :class:`approxeng.holochassis.chassis.Pose` containing the current dead reckoning pose
"""
pass
| StarcoderdataPython |
18302 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojo_lexer
import unittest
# Try to load the ply module, if not, then assume it is in the third_party
# directory.
try:
# Disable lint check which fails to find the ply module.
# pylint: disable=F0401
from ply import lex
except ImportError:
# This assumes this file is in src/mojo/public/tools/bindings/pylib/parse/.
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, os.pardir, os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, 'third_party')
sys.path.append(third_party)
# pylint: disable=F0401
from ply import lex
# This (monkey-patching LexToken to make comparison value-based) is evil, but
# we'll do it anyway. (I'm pretty sure ply's lexer never cares about comparing
# for object identity.)
def _LexTokenEq(self, other):
return self.type == other.type and self.value == other.value and \
self.lineno == other.lineno and self.lexpos == other.lexpos
setattr(lex.LexToken, '__eq__', _LexTokenEq)
def _MakeLexToken(type, value, lineno=1, lexpos=0):
"""Makes a LexToken with the given parameters. (Note that lineno is 1-based,
but lexpos is 0-based.)"""
rv = lex.LexToken()
rv.type, rv.value, rv.lineno, rv.lexpos = type, value, lineno, lexpos
return rv
def _MakeLexTokenForKeyword(keyword, **kwargs):
"""Makes a LexToken for the given keyword."""
return _MakeLexToken(keyword.upper(), keyword.lower(), **kwargs)
class MojoLexerTest(unittest.TestCase):
"""Tests mojo_lexer (in particular, Lexer)."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Clone all lexer instances from this one, since making a lexer is slow.
self._zygote_lexer = lex.lex(mojo_lexer.Lexer("my_file.mojom"))
def testValidSingleKeywords(self):
"""Tests valid, single keywords."""
self.assertEquals(self._SingleTokenForInput("handle"),
_MakeLexTokenForKeyword("handle"))
self.assertEquals(self._SingleTokenForInput("data_pipe_consumer"),
_MakeLexTokenForKeyword("data_pipe_consumer"))
self.assertEquals(self._SingleTokenForInput("data_pipe_producer"),
_MakeLexTokenForKeyword("data_pipe_producer"))
self.assertEquals(self._SingleTokenForInput("message_pipe"),
_MakeLexTokenForKeyword("message_pipe"))
self.assertEquals(self._SingleTokenForInput("import"),
_MakeLexTokenForKeyword("import"))
self.assertEquals(self._SingleTokenForInput("module"),
_MakeLexTokenForKeyword("module"))
self.assertEquals(self._SingleTokenForInput("struct"),
_MakeLexTokenForKeyword("struct"))
self.assertEquals(self._SingleTokenForInput("interface"),
_MakeLexTokenForKeyword("interface"))
self.assertEquals(self._SingleTokenForInput("enum"),
_MakeLexTokenForKeyword("enum"))
def testValidSingleTokens(self):
"""Tests valid, single (non-keyword) tokens."""
self.assertEquals(self._SingleTokenForInput("asdf"),
_MakeLexToken("NAME", "asdf"))
self.assertEquals(self._SingleTokenForInput("@123"),
_MakeLexToken("ORDINAL", "@123"))
self.assertEquals(self._SingleTokenForInput("456"),
_MakeLexToken("INT_CONST_DEC", "456"))
self.assertEquals(self._SingleTokenForInput("0765"),
_MakeLexToken("INT_CONST_OCT", "0765"))
self.assertEquals(self._SingleTokenForInput("0x01aB2eF3"),
_MakeLexToken("INT_CONST_HEX", "0x01aB2eF3"))
self.assertEquals(self._SingleTokenForInput("123.456"),
_MakeLexToken("FLOAT_CONST", "123.456"))
self.assertEquals(self._SingleTokenForInput("'x'"),
_MakeLexToken("CHAR_CONST", "'x'"))
self.assertEquals(self._SingleTokenForInput("\"hello\""),
_MakeLexToken("STRING_LITERAL", "\"hello\""))
self.assertEquals(self._SingleTokenForInput("+"),
_MakeLexToken("PLUS", "+"))
self.assertEquals(self._SingleTokenForInput("-"),
_MakeLexToken("MINUS", "-"))
self.assertEquals(self._SingleTokenForInput("*"),
_MakeLexToken("TIMES", "*"))
self.assertEquals(self._SingleTokenForInput("/"),
_MakeLexToken("DIVIDE", "/"))
self.assertEquals(self._SingleTokenForInput("%"),
_MakeLexToken("MOD", "%"))
self.assertEquals(self._SingleTokenForInput("|"),
_MakeLexToken("OR", "|"))
self.assertEquals(self._SingleTokenForInput("~"),
_MakeLexToken("NOT", "~"))
self.assertEquals(self._SingleTokenForInput("^"),
_MakeLexToken("XOR", "^"))
self.assertEquals(self._SingleTokenForInput("<<"),
_MakeLexToken("LSHIFT", "<<"))
self.assertEquals(self._SingleTokenForInput(">>"),
_MakeLexToken("RSHIFT", ">>"))
self.assertEquals(self._SingleTokenForInput("="),
_MakeLexToken("EQUALS", "="))
self.assertEquals(self._SingleTokenForInput("=>"),
_MakeLexToken("RESPONSE", "=>"))
self.assertEquals(self._SingleTokenForInput("("),
_MakeLexToken("LPAREN", "("))
self.assertEquals(self._SingleTokenForInput(")"),
_MakeLexToken("RPAREN", ")"))
self.assertEquals(self._SingleTokenForInput("["),
_MakeLexToken("LBRACKET", "["))
self.assertEquals(self._SingleTokenForInput("]"),
_MakeLexToken("RBRACKET", "]"))
self.assertEquals(self._SingleTokenForInput("{"),
_MakeLexToken("LBRACE", "{"))
self.assertEquals(self._SingleTokenForInput("}"),
_MakeLexToken("RBRACE", "}"))
self.assertEquals(self._SingleTokenForInput("<"),
_MakeLexToken("LANGLE", "<"))
self.assertEquals(self._SingleTokenForInput(">"),
_MakeLexToken("RANGLE", ">"))
self.assertEquals(self._SingleTokenForInput(";"),
_MakeLexToken("SEMI", ";"))
self.assertEquals(self._SingleTokenForInput(","),
_MakeLexToken("COMMA", ","))
self.assertEquals(self._SingleTokenForInput("."),
_MakeLexToken("DOT", "."))
def _TokensForInput(self, input):
"""Gets a list of tokens for the given input string."""
lexer = self._zygote_lexer.clone()
lexer.input(input)
rv = []
while True:
tok = lexer.token()
if not tok:
return rv
rv.append(tok)
def _SingleTokenForInput(self, input):
"""Gets the single token for the given input string. (Raises an exception if
the input string does not result in exactly one token.)"""
toks = self._TokensForInput(input)
assert len(toks) == 1
return toks[0]
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1765456 | <gh_stars>100-1000
'''
Register modules here. Module-specific parameters in the config .ini file
can be added under a section with the same name as the module.
2019-2020 <NAME>
'''
# set up Celery configuration
import celery_worker
from .LabelUI.app import LabelUI
from .Database.app import Database
from .FileServer.app import FileServer
from .UserHandling.app import UserHandler
from .Reception.app import Reception
from .ProjectAdministration.app import ProjectConfigurator
from .ProjectStatistics.app import ProjectStatistics
from .DataAdministration.app import DataAdministrator
from .StaticFiles.app import StaticFileServer
from .AIDEAdmin.app import AIDEAdmin
from .ModelMarketplace.app import ModelMarketplace
from .TaskCoordinator.app import TaskCoordinator
#TODO
from .AIController.app import AIController
from .AIWorker.app import AIWorker
REGISTERED_MODULES = {
'LabelUI': LabelUI,
'AIController': AIController,
'AIWorker': AIWorker,
'Database': Database,
'FileServer': FileServer,
'UserHandler': UserHandler,
'Reception': Reception,
'ProjectConfigurator': ProjectConfigurator,
'ProjectStatistics': ProjectStatistics,
'DataAdministrator': DataAdministrator,
'StaticFileServer': StaticFileServer,
'AIDEAdmin': AIDEAdmin,
'ModelMarketplace': ModelMarketplace,
'TaskCoordinator': TaskCoordinator
} | StarcoderdataPython |
4833451 | from ..web import Links
from . import WebEntity
__all__ = ['Image']
class Image (WebEntity):
"""
Represents an image.
"""
def __init__ (self, board, tim, ext, filename, md5, fsize, w, h):
self.board = board
self.tim = tim
self.ext = ext
self.filename = filename
self.md5 = md5
self.fsize = fsize
self.w = w
self.h = h
def __str__ (self):
"""
Returns a string representation of the object.
"""
return self.url
def __repr__ (self):
"""
Returns a string representation of the object fit for eval.
"""
return (
'{self.__class__.__name__}({})'.format (
', '.join(map (
repr, (
self.board, self.tim,
self.ext, self.filename,
self.md5, self.fsize,
self.w, self.h
)
)),
self=self
)
)
@property
def url (self):
"""
Returns an url to the image.
"""
return Links.createImageURL (
'/{self.board}/{self.tim}{self.ext}'.format(self=self)
)
@property
def apiurl (self):
"""
Returns an url to the image, included for webcache download.
"""
return self.url
| StarcoderdataPython |
118870 | from pyradioconfig.parts.jumbo.calculators.calc_synth import CALC_Synth_jumbo
class CALC_Synth_nixi(CALC_Synth_jumbo):
pass | StarcoderdataPython |
197693 | <filename>build/cpp/verify_runtime_deps.py
#!/usr/bin/env python2.7
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import sys
def has_packaged_file(needed_file, deps):
"""Returns true if the given file could be found in the given deps."""
for dep in deps:
for file in dep['files']:
if needed_file == os.path.normpath(file['source']):
return True
return False
def has_missing_files(runtime_files, package_deps):
"""Returns true if a runtime file is missing from the given deps."""
has_missing_files = False
for file in runtime_files:
# Some libraries are only known to GN as ABI stubs, whereas the real
# runtime dependency is generated in parallel as a ".so.impl" file.
if (not has_packaged_file(file, package_deps) and
not has_packaged_file('%s.impl' % file, package_deps)):
print('No package dependency generates %s' % file)
has_missing_files = True
return has_missing_files
def main():
parser = argparse.ArgumentParser(
"Verifies a prebuilt library's runtime dependencies")
parser.add_argument(
'--root-build-dir',
help='Path to the root build directory',
required=True)
parser.add_argument(
'--runtime-deps-file',
help='Path to the list of runtime deps',
required=True)
parser.add_argument(
'--manifest',
help='Path to the target\'s SDK manifest file',
required=True)
parser.add_argument(
'--stamp', help='Path to the stamp file to generate', required=True)
args = parser.parse_args()
# Read the list of runtime dependencies generated by GN.
def normalize_dep(dep):
return os.path.normpath(os.path.join(args.root_build_dir, dep.strip()))
with open(args.runtime_deps_file, 'r') as runtime_deps_file:
runtime_files = map(normalize_dep, runtime_deps_file.readlines())
# Read the list of package dependencies for the library's SDK incarnation.
with open(args.manifest, 'r') as manifest_file:
manifest = json.load(manifest_file)
atom_id = manifest['ids'][0]
def find_atom(id):
return next(a for a in manifest['atoms'] if a['id'] == id)
atom = find_atom(atom_id)
deps = map(lambda a: find_atom(a), atom['deps'])
deps += [atom]
# Check whether all runtime files are available for packaging.
if has_missing_files(runtime_files, deps):
return 1
with open(args.stamp, 'w') as stamp:
stamp.write('Success!')
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
149740 | __author__ = "<NAME>"
__copyright__ = "Copyright 2022, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "mit"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
from ..item.PDFObject import PDFObject
from ..item.PDFDictionary import PDFDictionary
from ..item.PDFList import PDFList
from ..item.PDFStream import PDFStream
from ..item.PDFReference import PDFReference
from ..pdf.PDF import PDF
def group_contents(pdf: PDF):
"""Groups contents objects contained in the same page object.
:param pdf: The PDF file for which to group contents objects.
:type pdf: PDF
"""
# Find pages with contents list.
def any_page_with_contents_list(_, item):
return (
type(item) == PDFObject and type(item.value) == PDFDictionary and
b"Type" in item and item[b"Type"] == b"Page" and
b"Contents" in item and type(item[b"Contents"]) == PDFList and
len(item[b"Contents"]) > 1
)
objects_to_remove = []
for _, item in pdf.find(any_page_with_contents_list):
grouped_contents = b" ".join([
pdf.get(reference).decode_stream()
for reference in item[b"Contents"]
])
# The first content object will receive the groupd contents stream.
first_content = pdf.get(item[b"Contents"][0])
first_content.stream = PDFStream(grouped_contents)
if b"Filter" in first_content:
first_content.value.delete(b"Filter")
if b"DecodeParms" in first_content:
first_content.value.delete(b"DecodeParms")
for index in range(1, len(item[b"Contents"])):
objects_to_remove.append(item[b"Contents"][index].obj_num)
item.value[b"Contents"] = PDFReference(first_content)
# Remove contents that have been grouped.
def any_grouped_contents(_, item):
return type(item) == PDFObject and item.obj_num in objects_to_remove
to_remove = [index for index, _ in pdf.find(any_grouped_contents)]
to_remove.sort(reverse=True)
for index in to_remove:
pdf.pop(index)
| StarcoderdataPython |
183211 | <reponame>bmjhit/tf-estimator-tutorials
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import apache_beam as beam
import tensorflow as tf
import tensorflow_transform as tft
import tensorflow_transform.coders as tft_coders
from tensorflow_transform.beam import impl
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import dataset_metadata
###########################################################
# Preprocess Reuter Dataset
###########################################################
def get_paths(file_pattern, test_mode=False):
"""
glob_pattern = './data/*.sgm'
"""
import tensorflow as tf
paths = tf.gfile.Glob(file_pattern)
if test_mode:
paths = paths[:1]
return paths
def get_articles(file_path):
"""
file_path = './data/reut2-000.sgm'
"""
import bs4
import tensorflow as tf
data = tf.gfile.GFile(file_path).read()
soup = bs4.BeautifulSoup(data, "html.parser")
articles = []
for raw_article in soup.find_all('reuters'):
article = {
'title': get_title(raw_article),
'content': get_content(raw_article),
'topics': get_topics(raw_article),
}
if None not in article.values():
if [] not in article.values():
articles.append(article)
return articles
def get_title(article):
title = article.find('text').title
if title != None:
title = title.text.encode('ascii', 'ignore')
return title
def get_content(article):
import nltk
content = article.find('text').body
if content != None:
content = content.text.encode('ascii', 'ignore')
content = content.replace('\n Reuter\n\x03', '')
content = content.replace('\n', ' ')
try:
content = '\n'.join(nltk.sent_tokenize(content))
except LookupError:
nltk.download('punkt')
content = '\n'.join(nltk.sent_tokenize(content))
return content
def get_topics(article):
topics = []
for topic in article.topics.children:
topics.append(topic.text.encode('ascii', 'ignore'))
if len(topics) > 0:
return ','.join(topics)
else:
return ''
###########################################################
# TensorFlow Transform
###########################################################
def get_metadata():
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import dataset_metadata
metadata = dataset_metadata.DatasetMetadata(dataset_schema.Schema({
'title': dataset_schema.ColumnSchema(
tf.string, [], dataset_schema.FixedColumnRepresentation()),
'content': dataset_schema.ColumnSchema(
tf.string, [], dataset_schema.FixedColumnRepresentation()),
'topics': dataset_schema.ColumnSchema(
tf.string, [], dataset_schema.FixedColumnRepresentation()),
}))
return metadata
def preprocess_fn(input_features):
import tensorflow_transform as tft
title_embed = tft.apply_function(get_embed_content, input_features['content'])
content_embed = tft.apply_function(get_embed_title, input_features['title'])
output_features = {
'topics': input_features['topics'],
'title': input_features['title'],
'content': input_features['content'],
'title_embed': title_embed,
'content_embed': content_embed,
}
return output_features
def get_embed_title(
title,
module_url='https://tfhub.dev/google/universal-sentence-encoder/1'):
import tensorflow as tf
import tensorflow_hub as hub
module = hub.Module(module_url)
embed = module(title)
return embed
def get_embed_content(
content, delimiter='\n',
module_url='https://tfhub.dev/google/universal-sentence-encoder/1'):
import tensorflow as tf
import tensorflow_hub as hub
module = hub.Module(module_url)
def _map_fn(t):
t = tf.cast(t, tf.string)
t = tf.string_split([t], delimiter).values
e = module(t)
e = tf.reduce_mean(e, axis=0)
return tf.squeeze(e)
embed = tf.map_fn(_map_fn, content, dtype=tf.float32)
return embed
###########################################################
# Write data to files or bq table
###########################################################
def to_bq_row(entry):
# might not need to round...
entry['title_embed'] = [round(float(e), 3) for e in entry['title_embed']]
entry['content_embed'] = [round(float(e), 3) for e in entry['content_embed']]
return entry
def get_bigquery_schema():
"""
Returns a bigquery schema.
"""
from apache_beam.io.gcp.internal.clients import bigquery
table_schema = bigquery.TableSchema()
columns = (('topics', 'string', 'nullable'),
('title', 'string', 'nullable'),
('content', 'string', 'nullable'),
('title_embed', 'float', 'repeated'),
('content_embed', 'float', 'repeated'))
for column in columns:
column_schema = bigquery.TableFieldSchema()
column_schema.name = column[0]
column_schema.type = column[1]
column_schema.mode = column[2]
table_schema.fields.append(column_schema)
return table_schema
###########################################################
# Dataflow Pipeline
###########################################################
def run(pipeline_options, known_args):
pipeline = beam.Pipeline(options=pipeline_options)
with impl.Context(known_args.transform_temp_dir):
articles = (
pipeline
| 'Get Paths' >> beam.Create(get_paths(known_args.file_pattern))
| 'Get Articles' >> beam.Map(get_articles)
| 'Get Article' >> beam.FlatMap(lambda x: x)
)
dataset = (articles, get_metadata())
transform_fn = (
dataset
| 'Analyse dataset' >> impl.AnalyzeDataset(preprocess_fn)
)
transformed_data_with_meta = (
(dataset, transform_fn)
| 'Transform dataset' >> impl.TransformDataset()
)
transformed_data, transformed_metadata = transformed_data_with_meta
transform_fn | 'Export Transform Fn' >> transform_fn_io.WriteTransformFn(
known_args.transform_export_dir)
(
transformed_data
| 'Convert to Insertable data' >> beam.Map(to_bq_row)
| 'Write to BigQuery table' >> beam.io.WriteToBigQuery(
project=known_args.bq_project,
dataset=known_args.bq_dataset,
table=known_args.bq_table,
schema=get_bigquery_schema(),
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE)
)
if known_args.enable_tfrecord:
transformed_data | 'Write TFRecords' >> beam.io.tfrecordio.WriteToTFRecord(
file_path_prefix='{0}/{1}'.format(known_args.tfrecord_export_dir, 'reuter'),
file_name_suffix='.tfrecords',
coder=tft_coders.example_proto_coder.ExampleProtoCoder(transformed_metadata.schema))
if known_args.enable_debug:
transformed_data | 'Debug Output' >> beam.io.textio.WriteToText(
file_path_prefix=known_args.debug_output_prefix, file_name_suffix='.txt')
job = pipeline.run()
if pipeline_options.get_all_options()['runner'] == 'DirectRunner':
job.wait_until_finish()
| StarcoderdataPython |
172919 | <reponame>SanUni2020/ProjetoIntegrador-I
from flask import Flask, render_template, request, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, template_folder='templates')
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///comentarios.sqlite3'
db = SQLAlchemy(app)
class Comentario(db.Model):
id = db.Column('id', db.Integer, primary_key=True, autoincrement=True)
nome = db.Column(db.String(50), nullable=False)
comentario = db.Column(db.String(300), nullable=False)
def __init__(self, nome, comentario):
self.nome = nome
self.comentario = comentario
@app.route('/')
def index():
comentarios = Comentario.query.all()
return render_template('index.html', comentarios=comentarios)
@app.route("/mensagem")
def mensagem():
return render_template("mensagem.html")
@app.route("/obrigado")
def obrigado():
return render_template("obrigado.html")
@app.route('/<id>')
def comenta_pelo_id(id):
comenta = Comentario.query.get(id)
return render_template('index.html', comenta=comenta)
@app.route('/novo', methods=['GET', 'POST'])
def add():
if request.method == 'POST':
comenta = Comentario(request.form['nome'], request.form['comentario'])
db.session.add(comenta)
db.session.commit()
return redirect(url_for('index'))
return render_template('novo.html')
@app.route('/edita/<int:id>', methods=['GET', 'POST'])
def edit(id):
comenta = Comentario.query.get(id)
if request.method == 'POST':
comenta.nome = request.form['nome']
comenta.comentario = request.form['comentario']
db.session.commit()
return redirect(url_for('index'))
return render_template('edita.html', comenta=comenta)
@app.route('/delete/<int:id>')
def delete(id):
comenta = Comentario.query.get(id)
db.session.delete(comenta)
db.session.commit()
return redirect(url_for('index'))
if __name__ == '__main__':
db.create_all()
app.run(debug=True)
| StarcoderdataPython |
3201918 | import pandas as pd
import numpy as np
from .extractors import TfIdfExtractor
from .encoders import GenericLabelBinarizer
from typing import List, Tuple
from .extractors import BaseExtractor
from .encoders import BaseEncoder
def check_data_alignment(labeled_df, unlabeled_df):
if labeled_df is None and unlabeled_df is None:
raise ValueError("Labeled and unlabeled data cannot be both None.")
for column in ["annotation_unit_id", "text", "label", "train"]:
if labeled_df is not None and column not in labeled_df:
print(f"Missing required column {column} in the labeled dataframe.")
raise ValueError("Received labeled data doesn't contain required columns.")
for column in ["annotation_unit_id", "text"]:
if unlabeled_df is not None and column not in unlabeled_df:
print(f"Missing required column {column} in the unlabeled dataframe.")
raise ValueError("Received unlabeled data doesn't contain required columns.")
class BasicStore():
__labeled_df = None
__unlabeled_df = None
__extractor: BaseExtractor = None
__encoder: BaseEncoder = None
def __init__(self, extractor: BaseExtractor=None, encoder: BaseEncoder=None):
super().__init__()
self.__labeled_df = pd.DataFrame(columns=[ 'annotation_unit_id', 'text', 'label', 'train' ])
self.__unlabeled_df = pd.DataFrame(columns=[ 'annotation_unit_id', 'text' ])
# Cannot do as a default parameter, otherwise same instance would be shared
# across different stores.
if extractor is None: extractor = TfIdfExtractor()
if encoder is None: encoder = GenericLabelBinarizer()
self.__extractor = extractor.assign_store(self)
self.__encoder = encoder .assign_store(self)
def append_data(self, labeled_df=None, unlabeled_df=None):
check_data_alignment(labeled_df, unlabeled_df)
if labeled_df is not None:
self.__labeled_df = self.__labeled_df.append(labeled_df, ignore_index=True, sort=False)
if unlabeled_df is not None:
self.__unlabeled_df = self.__unlabeled_df.append(unlabeled_df, ignore_index=True, sort=False)
# Send the delta to the extractor, so it can decide what to do
self.__extractor.extract(labeled_delta=labeled_df, unlabeled_delta=unlabeled_df)
# Ony call re-encode if there is new labeled data.
if labeled_df is not None:
self.__encoder.encode(labeled_delta_df=labeled_df)
def update_with_annotation(self, labeled_delta: pd.DataFrame):
"""Move annotations from the unlabeled dataframe to the train."""
# Add the new labels to the labeled dataframe
self.__labeled_df = self.__labeled_df.append(labeled_delta, ignore_index=True, sort=False)
# Find the filter of data to be now removed from the unlabeled set
filter = ~self.__unlabeled_df['text'].isin(labeled_delta['text'])
# Filter it out from the unlabeled dataset
self.__unlabeled_df = self.__unlabeled_df[filter]
self.__extractor.refit()
self.__encoder.refit()
@property
def train_df(self):
return self.__labeled_df[ self.__labeled_df['train'] == True ]
@property
def test_df(self):
return self.__labeled_df[ self.__labeled_df['train'] == False ]
@property
def labeled_df(self):
return self.__labeled_df
@property
def unlabeled_df(self):
return self.__unlabeled_df
@property
def labeled_Xs(self) -> List[int]:
return self.__extractor.labeled_Xs
@property
def unlabeled_Xs(self) -> List[int]:
return self.__extractor.unlabeled_Xs
@property
def Ys(self) -> List[int]:
return self.__encoder.Ys
@property
def XYs(self) -> Tuple[List[int], List[int]]:
return (self.labeled_Xs, self.Ys)
@property
def word_to_index_map(self):
return self.__extractor.word_to_index_map
@property
def index_to_label_map(self) -> dict:
return self.__encoder.index_to_label_map
@property
def available_labels(self) -> list:
return list(self.index_to_label_map.values())
# TODO: performance could be improved
# train_filter could be replaced with the index position or other more efficient techniques
# (e.g. having two dataframe directly). We are avoiding index since at the moment we don't
# want to assume that index are coherent (e.g. for 10 elements, index is 0...9, that could
# not be the case sometime). This is future work, but probably the easiest is to have two
# dataframes directly (anyway, that's how stuff look externally), with the caveaut of merging
# them on labeled stuff.
def _filter_XYs(self, train: bool):
train_filter = self.__labeled_df['train'] == train
# We have to ravel: https://stackoverflow.com/questions/29778035/scipy-sparse-csr-matrix-row-filtering-how-to-properly-achieve-it
to_keep_train = np.ravel(np.array(train_filter))
filtered_Xs = self.labeled_Xs[to_keep_train, :]
filtered_Ys = self.Ys[train_filter]
return filtered_Xs, filtered_Ys
@property
def train_XYs(self) -> Tuple[List[int], List[int]]:
Xs = self._filter_XYs(True)
return Xs
@property
def test_XYs(self) -> Tuple[List[int], List[int]]:
Xs = self._filter_XYs(False)
return Xs
# def get_data(self, split_type=None):
#
# if split_type not in [None, "train", "test", "unlabeled"]:
# raise ValueError(f"split_type must be None or one of 'train', 'test', or 'unlabeled', received {split_type}")
#
# if split_type is None:
# return self.__df
# else:
# return self.__df[ self.__df['split'] == split_type ]
# def get_annotation_unit_ids(self, split_type):
# df = self.get_data(split_type=split_type)
# return df['annotation_unit_id'].unique()
# def get_labeled_data(self):
# annotation_ids = set(self.get_annotation_unit_ids('label'))
# instance_idx = self.__df.loc[ self.__df['annotation_unit_id' ].isin( annotation_ids )].index
# return self.__Xs[instance_idx, :], self.__Ys[instance_idx, :]
#
# def get_test_data(self):
# instance_idx = self.df.loc[self.df['annotation_unit_id'].isin(self.test_idx)].index
# return self.features[instance_idx, :], self.labels[instance_idx, :]
#
# def get_unlabeled_data(self):
# instance_idx = self.df.loc[self.df['annotation_unit_id'].isin(self.unlabeled_unit_idx)].index
# return self.features[instance_idx, :], instance_idx | StarcoderdataPython |
3317622 | # MIT License
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Functions that transform the raw data into trainable data."""
from itertools import product, starmap
from functools import partial
from typing import List, Tuple
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances, cosine_similarity
from tqdm import tqdm
def get_claim_map(
n:int,
source_locations: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
) -> np.ndarray:
def get_n_closest_sources_encode(
n:int, # max number of sources to include
source_locations:np.ndarray, # locations of sources as an array of y,x idxs
ij:Tuple[int,int], # the index to retrieve the sources for
) -> Tuple[List[int], Tuple[int, int]]: # The n sources ordered by proximity idxs and the idx ij
distances = np.linalg.norm(source_locations-np.array(ij), axis=1)
closest_n_sources = np.argsort(distances)[:n]
if len(closest_n_sources) < n:
closest_n_sources = np.pad(
closest_n_sources,
(0, n-len(closest_n_sources)),
mode="constant",
constant_values = len(model_src_vals),
)
assert closest_n_sources.shape[0] == n
return (closest_n_sources, ij)
def get_src_flx(
scarlet_data:List[np.ndarray], # SCARLET data
src_idxs:List[int], # idx of source in the SCARLET output
ij:Tuple[List[np.ndarray], Tuple[int, int]], # idx in image space
) -> np.ndarray: # the source value at i,j in each of the bands
i, j = ij
# each element in this list is an array of the flux
# values that belong to each source
# [n, b, 1, 1]
src_flux_values = None
try:
src_flux_values = np.array([scarlet_data[src_idx][:, i, j] for src_idx in src_idxs if (src_idx != len(scarlet_data))])
except:
print(src_idxs)
print(len(scarlet_data))
raise ValueError("")
# this should be [n, b]
if src_flux_values.shape[0] < len(src_idxs):
src_flux_values = np.pad(
src_flux_values,
(
(0, len(src_idxs)-src_flux_values.shape[0]),
(0, 0)
),
mode="constant",
constant_values=0,
)
assert src_flux_values.shape[0]==len(src_idxs), f"{src_flux_values.shape}, {src_idxs}"
assert src_flux_values.shape[1]==scarlet_data[0].shape[0], f"{src_flux_values.shape}, {scarlet_data[0].shape}"
return (src_flux_values, ij)
def update_image(
output_array:np.ndarray, # [h, w, b, n]
normed_flux_vals:np.ndarray, # [n, b]
ij:Tuple[int, int], # pixel location
) -> None:
i, j = ij
output_array[i, j, ...] = normed_flux_vals.T[:]
def normed_combined_flux(
src_flux_values:np.ndarray, # [n, bands]
ij:Tuple[int, int]
) -> Tuple[List[np.ndarray], Tuple[int, int]]:
# restrict flux to positive values
src_flux_cmb = np.clip(np.array(src_flux_values), a_min=0, a_max=None) # [n, b]
flux_norm = src_flux_cmb.sum(axis=0) # [b,] total flux for each band
normed = src_flux_cmb / flux_norm
try:
normed[np.isnan(normed)] = 1 / src_flux_cmb.shape[0]
except:
print(src_flux_values)
print(src_flux_values.shape)
print(src_flux_cmb)
print(src_flux_cmb.shape)
raise ValueError()
return (normed, ij)
out_shape = list(model_src_vals[0].shape[1:]) + [model_src_vals[0].shape[0], n]
output_array = np.zeros(out_shape, dtype=np.float32)
get_n_src_f = partial(get_n_closest_sources_encode, n, source_locations)
get_src_flx_f = partial(get_src_flx, model_src_vals)
update_output_f = partial(update_image, output_array)
img_shape = model_src_vals[0].shape[1:]
idxs = product(range(img_shape[0]), range(img_shape[1]))
n_srcs_per_pixel = map(get_n_src_f, idxs)
src_flx_per_pixel = starmap(get_src_flx_f, n_srcs_per_pixel)
normed_src_flx_per_pixel = starmap(normed_combined_flux, src_flx_per_pixel)
for _ in starmap(update_output_f, normed_src_flx_per_pixel):pass
return output_array
# ==============================================================================
# Discretize claim vector directions
# ==============================================================================
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_claim_vector_magnitudes_single_pixel(
neighborhood_vectors: np.ndarray,
claim_vector_magnitude: np.ndarray,
claim_map: np.ndarray,
model_vals: List[np.ndarray],
src_centers: np.ndarray,
y: int,
x: int,
b: int
) -> None:
relative_vectors = src_centers - np.array([y, x])
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in range(len(model_vals))])
max_flux = src_fluxes.max()
normed_flux = src_fluxes / max_flux if max_flux > 0 else src_fluxes
flx_sum = src_fluxes.sum()
uniform_dist = np.ones_like(src_fluxes) / src_fluxes.shape[0]
normed_sum_to_one = src_fluxes / src_fluxes.sum() if flx_sum > 0 else uniform_dist
cosine_measure = cosine_similarity(neighborhood_vectors, relative_vectors)
euclidean_distance = euclidean_distances(neighborhood_vectors, relative_vectors)
euclidean_norm = np.maximum(euclidean_distance.max(axis=1, keepdims=True), 1e-5)
normed_euclidean_distance = euclidean_distance / euclidean_norm
metric = cosine_measure * (1 - normed_euclidean_distance) * (normed_flux[np.newaxis, :])
closest_srcs = np.argmax(metric, axis=1)
selected_srcs = relative_vectors[closest_srcs, :]
_claim_magnitudes = (selected_srcs * neighborhood_vectors).sum(axis=1)
idxs, counts = np.unique(closest_srcs, return_counts=True)
coefs = np.reciprocal(counts.astype(np.float32))
_claim_map = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_sum_to_one[i],
closest_srcs
)))
claim_vector_magnitude[y, x, b, :] = _claim_magnitudes
claim_map[y, x, b, :] = _claim_map
def get_claim_vector_image_and_map_discrete_directions(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
):
b, h, w = bhw
idxs = product(range(h), range(w), range(b))
neighborhood_vectors = np.array(list(product([0, -1, 1], [0, -1, 1]))[1:], dtype=np.float32)
neighborhood_vectors /= np.linalg.norm(neighborhood_vectors, axis=-1)[:, np.newaxis]
claim_vector_magnitude = np.zeros([h, w, b, 8], dtype=np.float32)
claim_map = np.zeros([h, w, b, 8], dtype=np.float32)
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
encode_f = partial(
get_claim_vector_magnitudes_single_pixel,
neighborhood_vectors,
claim_vector_magnitude,
claim_map,
model_src_vals,
src_centers
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector_magnitude, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_discrete_vectors_single_pixel(
output:np.ndarray, # [n, h, w, b]
neighborhood_vectors: np.ndarray, # [8, 2]
flux:np.ndarray, # [h, w, b]
claim_vector_magnitude:np.ndarray, # [h, w, b, 8]
claim_map:np.ndarray, # [h, w, b, 8]
src_centers:np.ndarray, # [n, 2]
y:int,
x:int,
b:int
) -> None:
pixel_flux = flux[y, x, b]
pixel_magnitudes = claim_vector_magnitude[y, x, b, :].copy()
pixel_claim_map = claim_map[y, x, b, :].copy()
relative_vectors = neighborhood_vectors * pixel_magnitudes[:, np.newaxis]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(relative_vectors, relative_centers) # [n_neighborhood, n_centers]
closest_src = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_src, distributed_flux)):
pass
def get_sources_discrete_directions(
flux_image: np.ndarray, # [h, w, b]
claim_vector_magnitude: np.ndarray, # [h, w, b, 8]
claim_map: np.ndarray, # [h, w, b, 8]
background_map: np.ndarray, # [h, w]
center_of_mass: np.ndarray, # [h, w]
bkg_thresh_coef: float = 0.7,
) -> np.ndarray: # [n, h, w, b]
y, x, b = flux_image.shape
src_locations = non_maximum_suppression(7, 0.1, center_of_mass) # [h, w]
src_centers = np.stack(np.nonzero(src_locations), axis=1) + 0.5 # [n, 2]
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
neighborhood_vectors = np.array(list(product([0, -1, 1], [0, -1, 1]))[1:], dtype=np.float32)
neighborhood_vectors /= np.linalg.norm(neighborhood_vectors, axis=-1)[:, np.newaxis]
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_discrete_vectors_single_pixel,
output,
neighborhood_vectors,
flux_image,
claim_vector_magnitude,
claim_map,
src_centers
)
for _ in starmap(decode_f, idxs):
pass
#filter out background pixels
#bkg_filter = background_map[np.newaxis, :, :, np.newaxis] > bkg_thresh_coef
#return output * bkg_filter
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Discretize claim vector directions
# ==============================================================================
# ==============================================================================
# Closest n-sources claim vector
# ==============================================================================
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_claim_vectors_single_pixel(
claim_vectors: np.ndarray, # [h, w, b, n, 2]
claim_map: np.ndarray, # [h, w, b, n]
model_vals: List[np.ndarray],
src_centers: np.ndarray, # [n, 2]
n: int,
y: int,
x: int,
b: int,
) -> None:
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
raw_closest_sources = np.argsort(relative_distances)[:n] # [n, ]
num_pad = n - raw_closest_sources.shape[0]
if num_pad > 0:
n_closest_sources = np.pad(raw_closest_sources, (0, num_pad), mode="edge")
else:
n_closest_sources = raw_closest_sources
selected_srcs = relative_vectors[n_closest_sources]
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in raw_closest_sources])
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux
else:
raw_n = raw_closest_sources.shape[0]
normed_flux = np.ones([raw_n], dtype=np.float32) / raw_n
idxs, counts = np.unique(n_closest_sources, return_counts=True)
coefs = np.reciprocal(counts.astype(np.float32))
claim = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_flux[i==raw_closest_sources][0],
n_closest_sources
)))
claim_vectors[y, x, b, ...] = selected_srcs
claim_map[y, x, b, ...] = claim
def get_n_closest_claim_vectors(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, b, n], [h, w, b, n, 2]
b, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x), range(b))
claim_vector = np.zeros([y, x, b, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, b, n], dtype=np.float32)
encode_f = partial(
get_n_closest_claim_vectors_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_sources_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, b, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, b, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_sources(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
background_map: np.ndarray, # [h, w]
center_of_mass: np.ndarray, # [h, w]
bkg_thresh_coef: float = 0.7,
) -> np.ndarray:
src_locations = non_maximum_suppression(7, 0.1, center_of_mass) # [h, w]
src_centers = np.stack(np.nonzero(src_locations), axis=1) + 0.5 # [n, 2]
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_sources_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector
# ==============================================================================
# ==============================================================================
# Closest flux-weighted n-sources claim vector
# ==============================================================================
# vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_fw_claim_vectors_single_pixel(
claim_vectors: np.ndarray, # [h, w, b, n, 2]
claim_map: np.ndarray, # [h, w, b, n]
model_vals: List[np.ndarray], # list(n)
src_centers: np.ndarray, # [n, 2]
n: int,
y: int,
x: int,
b: int,
) -> None:
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
normed_distances = relative_distances / relative_distances.max() # [n_srcs, ]
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in range(len(model_vals))]) # [n_srcs, ]
max_flux = src_fluxes.max()
if max_flux <= 0:
normed_flux = np.ones([src_fluxes.shape[0]]) / src_fluxes.shape[0] # [n_srcs, ]
normed_sum_to_one = np.ones([src_fluxes.shape[0]]) / src_fluxes.shape[0] # [n_srcs, ]
else:
normed_flux = src_fluxes / src_fluxes.max() # [n_srcs, ]
normed_sum_to_one = src_fluxes / src_fluxes.sum() # [n_srcs, ]
metric = (1 - normed_distances) * normed_flux # [n_srcs, ]
top_srcs = np.argsort(-metric)[:n] # [min(n, n_srcs), ]
num_pad = n - top_srcs.shape[0]
if num_pad > 0:
n_closest_sources = np.pad(top_srcs, (0, num_pad), mode="edge") # [n, ]
else:
n_closest_sources = top_srcs # [n, ]
selected_srcs = relative_vectors[n_closest_sources] # [n, 2]
src_fluxes = np.array([max(model_vals[i][b, y, x], 0) for i in top_srcs]) # [min(n, n_srcs), ]
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux # [min(n, n_srcs), ]
else:
normed_flux = np.ones([src_fluxes.shape[0]], dtype=np.float32) / n # [min(n, n_srcs), ]
idxs, counts = np.unique(n_closest_sources, return_counts=True) # [min(n, n_srcs), ], [min(n, n_srcs), ]
coefs = np.reciprocal(counts.astype(np.float32)) # [min(n, n_srcs), ]
claim = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_flux[i==top_srcs][0],
n_closest_sources
)))
claim_vectors[y, x, b, ...] = selected_srcs
claim_map[y, x, b, ...] = claim
def get_n_closest_fw_claim_vectors_maps(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, b, n], [h, w, b, n, 2]
b, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x), range(b))
claim_vector = np.zeros([y, x, b, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, b, n], dtype=np.float32)
encode_f = partial(
get_n_closest_fw_claim_vectors_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_fw_sources_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, b, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, b, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_fw_sources(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray
) -> np.ndarray:
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_fw_sources_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest flux-weighted n-sources claim vector
# ==============================================================================
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector avg map
# ==============================================================================
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_avg_claim_vector_single_pixel(
claim_vectors: np.ndarray, # [h, w, n, 2]
claim_map: np.ndarray, # [h, w, n]
model_vals: List[np.ndarray], # list(n)
src_centers: np.ndarray, # [n, 2]
n: int,
y: int,
x: int,
) -> None:
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
raw_closest_sources = np.argsort(relative_distances)[:n] # [n, ]
num_pad = n - raw_closest_sources.shape[0]
if num_pad > 0:
n_closest_sources = raw_closest_sources
else:
n_closest_sources = np.pad(raw_closest_sources, (0, num_pad), mode="edge")
selected_srcs = relative_vectors[n_closest_sources]
claim_vectors[y, x, ...] = selected_srcs
def get_normed_src_fluxes(band:int):
src_fluxes = np.array([max(model_vals[i][band, y, x], 0) for i in raw_closest_sources])
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux
else:
normed_flux = np.ones([n], dtype=np.float32) / n
return normed_flux
n_bands = model_vals[0].shape[0]
avg_flux_contrib = np.array(list(map(get_normed_src_fluxes, range(n_bands)))).mean(axis=0)
normed_avg_flux_contrib = avg_flux_contrib / avg_flux_contrib.sum()
idxs, counts = np.unique(n_closest_sources, return_counts=True)
coefs = np.reciprocal(counts.astype(np.float32))
claim = np.array(list(map(
lambda i: coefs[idxs==i][0] * normed_avg_flux_contrib[i==raw_closest_sources][0],
n_closest_sources
)))
claim_map[y, x, ...] = claim
def get_n_closest_avg_claim_vector(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, n], [h, w, n, 2]
_, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x))
claim_vector = np.zeros([y, x, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, n], dtype=np.float32)
encode_f = partial(
get_n_closest_avg_claim_vector_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
)
for _ in starmap(encode_f, idxs):
pass
claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_avg_claim_vector_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_fw_sources(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray
) -> np.ndarray:
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_avg_claim_vector_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector avg map
# ==============================================================================
# ==============================================================================
# Closest n-sources claim vector limit bands
# ==============================================================================
# ENCODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_n_closest_claim_vector_map_limit_bands_single_pixel(
claim_vectors: np.ndarray, # [h, w, n, 2]
claim_map: np.ndarray, # [h, w, b, n]
model_vals: List[np.ndarray],
src_centers: np.ndarray, # [n_srcs, 2]
n: int,
n_bands: int,
y: int,
x: int,
) -> None:
# Claim vectors ============================================================
relative_vectors = src_centers - np.array([y, x]) # [n_srcs, 2]
relative_distances = np.linalg.norm(relative_vectors, axis=-1) # [n_srcs,]
raw_closest_sources = np.argsort(relative_distances)[:n] # [min(n_srcs, n), ]
num_pad = n - raw_closest_sources.shape[0]
if num_pad > 0:
n_closest_sources = np.pad(raw_closest_sources, (0, num_pad), mode="edge") # [n,]
else:
n_closest_sources = raw_closest_sources # [n,]
selected_srcs = relative_vectors[n_closest_sources] # [n, 2]
claim_vectors[y, x, ...] = selected_srcs
# Claim vectors ============================================================
# Claim maps ===============================================================
raw_n = raw_closest_sources.shape[0]
def get_normed_src_fluxes(band:int):
src_fluxes = np.array([max(model_vals[i][band, y, x], 0) for i in raw_closest_sources])
sum_flux = src_fluxes.sum()
if sum_flux > 0:
normed_flux = src_fluxes / sum_flux
else:
normed_flux = np.ones([raw_n], dtype=np.float32) / raw_n
return normed_flux
band_normed_flux = np.array(list(map(get_normed_src_fluxes, range(n_bands)))) # [n_bands, min(n_src, n)]
if num_pad > 0:
padded_band_normed_flux = np.pad(band_normed_flux, ((0, 0), (0, num_pad)), mode="edge")
else:
padded_band_normed_flux = band_normed_flux
idxs, counts = np.unique(n_closest_sources, return_counts=True) # [min(n_srcs, n), ], [min(n_srcs, n), ]
coefs = np.reciprocal(counts.astype(np.float32)) # [min(n_srcs, n), ]
coef_map = np.array([coefs[idxs==i][0] for i in n_closest_sources])[np.newaxis, :] #[1, n]
try:
claim = padded_band_normed_flux * coef_map
except:
print("raw_closest_sources: ", raw_closest_sources.shape)
print("selected_srcs_shape: ", selected_srcs.shape)
print("band_normed_flux: ", band_normed_flux.shape)
print("padded_band_normed_flux: ", padded_band_normed_flux.shape)
print("coefs: ", coefs.shape)
print("coef_map: ", coef_map.shape)
raise ValueError("Things Broke! Oh Man!")
claim_map[y, x, ...] = claim
# Claim maps ===============================================================
def get_n_closest_claim_vector_map_limit_bands(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
n: int,
n_bands:int,
) -> Tuple[np.ndarray, np.ndarray]: # [h, w, bands, n], [h, w, bands, n, 2]
b, y, x = bhw
src_ys, src_xs = np.nonzero(source_locations)
src_centers = np.array([src_ys, src_xs]).T # [n, 2]
idxs = product(range(y), range(x))
claim_vector = np.zeros([y, x, n, 2], dtype=np.float32)
claim_map = np.zeros([y, x, n_bands, n], dtype=np.float32)
encode_f = partial(
get_n_closest_claim_vector_map_limit_bands_single_pixel,
claim_vector,
claim_map,
model_src_vals,
src_centers,
n,
n_bands,
)
for _ in starmap(encode_f, idxs):
pass
return claim_vector, claim_map
# ENCODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# DECODER vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def decode_n_closest_claim_vector_map_limit_bands_single_pixel(
output:np.ndarray,
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray,
y:int,
x:int,
b:int,
) -> None:
pixel_flux = flux[y, x, b]
pixel_vectors = claim_vector[y, x, b, ...].copy() # [n, 2]
pixel_claim_map = claim_map[y, x, b, ...].copy() # [n,]
relative_centers = src_centers - np.array([y, x])
distances = euclidean_distances(pixel_vectors, relative_centers) #[n, n_src_centers]
closest_srcs = np.argmin(distances, axis=1)
distributed_flux = pixel_flux * pixel_claim_map
def update_output(src_idx:int, flx:float):
output[src_idx, y, x, b] += flx
for _ in starmap(update_output, zip(closest_srcs, distributed_flux)):
pass
def decode_n_closest_claim_vector_map_limit_bands(
flux:np.ndarray,
claim_vector:np.ndarray,
claim_map:np.ndarray,
src_centers:np.ndarray
) -> np.ndarray:
y, x, b = flux.shape
output = np.zeros([src_centers.shape[0], y, x, b], dtype=np.float32)
idxs = product(range(y), range(x), range(b))
decode_f = partial(
decode_n_closest_fw_sources_single_pixel,
output,
flux,
claim_vector,
claim_map,
src_centers,
)
for _ in starmap(decode_f, tqdm(idxs, total=y*x*b)):
pass
return output
# DECODER ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# ==============================================================================
# Closest n-sources claim vector limit bands
# ==============================================================================
def get_claim_vector_image_and_map(
source_locations: np.ndarray,
bkg: np.ndarray,
bhw: Tuple[int, int, int],
model_src_vals: List[np.ndarray],
):
# Updates claim_vector_image and claim_map_image in place
def single_pixel_vector(
claim_vector_image: np.ndarray,
claim_map_image: np.ndarray,
centers: np.ndarray,
bkg:np.ndarray,
i: int,
j: int,
b: int,
) -> None:
ijb_src_flux = np.array([m[b, i, j] for m in model_src_vals])
ijb_src_flux_mask = ijb_src_flux > 0
if bkg[i, j, 0] > 0.9 or ijb_src_flux_mask.sum()==0:
idxs = list(product([-1, 0, 1], [-1, 0, 1]))
idxs.remove((0, 0))
claim_vector_image[i, j, b, ...] = np.array(idxs)
claim_map_image[i, j, b, ...] = np.array([1/8 for _ in range(8)])
else:
connected_idxs = list(product([i, i - 1, i + 1], [j, j - 1, j + 1]))
connected_idxs.remove((i, j))
connected_array = np.array(connected_idxs)
ijb_normed_src_flux = (ijb_src_flux * ijb_src_flux_mask) / (
ijb_src_flux * ijb_src_flux_mask
).sum()
def closest_center(centers: np.array, flux_mask: np.ndarray, idx: np.ndarray):
dist = np.linalg.norm(centers - idx, axis=1)
masked_dist = np.where(flux_mask, dist, np.inf)
return centers[np.argmin(masked_dist)]
closest_f = partial(closest_center, centers, ijb_src_flux_mask)
closest_sources = np.array(list(map(closest_f, connected_array)))
claim_vector = connected_array - closest_sources # [8]
claim_vector_image[i, j, b, ...] = claim_vector
def convert_to_claim_map(
centers: np.ndarray, normed_flux: np.ndarray, src: np.ndarray
):
return (
(src == centers).all(axis=1).astype(np.float32) * ijb_normed_src_flux
).sum()
convert_to_map_f = partial(convert_to_claim_map, centers, ijb_normed_src_flux)
raw_claim_map = np.array(list(map(convert_to_map_f, closest_sources)))
claim_map = raw_claim_map / raw_claim_map.sum()
claim_map_image[i, j, b, ...] = claim_map
n_bands, height, width = bhw
claim_vector_image = np.zeros([height, width, n_bands, 8, 2], dtype=np.float32)
claim_map_image = np.zeros([height, width, n_bands, 8], dtype=np.float)
src_ys, src_xs = np.nonzero(source_locations)
centers = np.array([src_ys, src_xs]).T # [n, 2]
single_pixel_f = partial(
single_pixel_vector, claim_vector_image, claim_map_image, centers, bkg
)
idxs = product(range(height), range(width), range(n_bands))
for _ in starmap(single_pixel_f, idxs):
pass
return claim_vector_image, claim_map_image
# use peak_local_max?, its much faster
def non_maximum_suppression(kernel_size: int, threshold: float, image: np.ndarray):
image[image < threshold] = 0
pad = (kernel_size - 1) // 2
padded = np.pad(image, pad)
output = np.zeros_like(image)
idxs = product(
range(padded.shape[0] - kernel_size), range(padded.shape[1] - kernel_size)
)
def update_max(y, x):
output[y, x] = padded[y : y + kernel_size, x : x + kernel_size].max()
for _ in starmap(update_max, idxs):
pass
output[image != output] = 0
return output
def get_sources(
flux_image: np.ndarray, # [h, w, b]
claim_vectors: np.ndarray, # [h, w, b, 8, 2]
claim_maps: np.ndarray, # [h, w, b, 8]
background_map: np.ndarray, # [h, w]
center_of_mass: np.ndarray, # [h, w]
) -> np.ndarray: # [n, h, w, b]
src_locations = non_maximum_suppression(7, 0.1, center_of_mass) # [h, w]
src_centers = np.stack(np.nonzero(src_locations), axis=1) # [n, 2]
n_srcs = src_centers.shape[0]
height, width, bands = flux_image.shape
src_image = np.zeros([n_srcs, height, width, bands], dtype=np.float32)
def distribute_source_flux(i, j, b):
if background_map[i, j] > 0.9:
return
adj_idxs = list(product([i, i - 1, i + 1], [j, j - 1, j + 1]))
adj_idxs.remove((i, j))
adj_idx_array = np.array(adj_idxs)
pixel_claim_vectors = (
adj_idx_array - claim_vectors[i, j, b, ...].copy()
) # [8, 2]
pixel_claim_map = claim_maps[i, j, b, :].copy() # [8,]
pixel_flux = flux_image[i, j, b] # [0,]
def closest_center(k): # k of 8
dist = np.linalg.norm(src_centers - pixel_claim_vectors[k, :], axis=1)
closest_center = np.argmin(dist)
return closest_center
pixel_claim_src_idxs = np.array(list(map(closest_center, range(8))))
def nth_flux(i):
claim_mask = pixel_claim_src_idxs == i # [8,]
claim = (pixel_claim_map * claim_mask).sum() # [0,]
return claim * pixel_flux # [0,]
src_separation = np.array(list(map(nth_flux, range(n_srcs)))) # [n, ]
src_image[:, i, j, b] = src_separation
idxs = tqdm(
product(range(height), range(width), range(bands)), total=height * width * bands
)
for _ in starmap(distribute_source_flux, idxs):
pass
return src_image # [n, h, w, b]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.