id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
237912 | #!/usr/bin/env python
"""
Created on 2015-09-27T16:51:39
"""
from __future__ import division, print_function
import sys
import argparse
import re
import time
try:
import numpy as np
except ImportError:
print('You need numpy installed')
sys.exit(1)
import pandas as pd
from splinter.browser import Browser
import connect_aws_db as cadb
__author__ = "<NAME> (github: @mattgiguere)"
__license__ = "MIT"
__version__ = '0.0.1'
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = " Development NOT(Prototype or Production)"
# change default encoding to handle utf characters
reload(sys)
sys.setdefaultencoding('utf8')
def get_hotel_urls(city, state, engine):
"""Retrieve the hotels for the given city and state"""
# manipulate the city string into the proper form
citystr = (' ').join(city.lower().split('_'))
cmd = "SELECT hotel_id, business_id, hotel_url FROM ta_hotels WHERE "
cmd += "hotel_city='"+citystr+"' AND "
cmd += "hotel_state='"+state.lower()+"'"
result = engine.execute(cmd)
return [(row['hotel_id'], row['business_id'], row['hotel_url']) for row in result]
def return_results(url, page, br):
br.visit(url)
sleep_amount = np.random.uniform(8, 20)
print('sleeping for {} seconds before continuing.'.format(sleep_amount))
time.sleep(sleep_amount)
full_reviews = br.find_by_xpath('//div[contains(@class, "reviewSelector")]')
page_usernames = []
page_memberids = []
page_locations = []
page_titles = []
page_ratings = []
page_dates = []
page_reviews = []
page_review_ids = []
for fullrev in full_reviews:
# user name:
try:
member_info = fullrev.find_by_xpath('div/div[contains(@class, "col1of2")]/div[contains(@class, "member_info")]')
member_str = member_info.find_by_xpath('div[contains(@class, "memberOverlayLink")]')['id']
member_id = re.findall('UID_(.*)-', member_str)[0]
usrnm = member_info.find_by_xpath('div/div[contains(@class, "username mo")]')
except:
print('member_info does not exist')
member_id = ''
usrnm = ''
review = fullrev.find_by_xpath('div/div[@class="col2of2"]/div[@class="innerBubble"]')[0]
title = review.find_by_xpath('div/div[contains(@class, "quote")]').text.strip()[1:-1]
rating = review.find_by_xpath('div/div[contains(@class, "rating")]/span/img')['alt'].split(' ')[0]
date = review.find_by_xpath('div/div[contains(@class, "rating")]/span[contains(@class, "ratingDate")]')['title']
rev = review.find_by_xpath('div/div[contains(@class, "entry")]').text.strip().replace("\n", "")
if len(usrnm) > 0:
susrnm = usrnm[0].text
username = susrnm.decode('utf-8', 'ignore').strip()
print('Username: {}'.format(username))
else:
username = ''
print('Username: A Trip Advisor Member')
locationel = member_info.find_by_xpath('div[contains(@class, "location")]')
if len(locationel) > 0:
location = str(locationel[0].text).strip()
print('Location: {}'.format(location))
else:
location = ''
print('Location: ')
#print('full review_id: {}'.format(fullrev['id']))
try:
rev_id = re.search('review_(\d+)$', fullrev['id']).group(1)
except AttributeError:
rev_id = ''
# print('review_id: {}'.format(rev_id))
# print('Title: {}'.format(title))
# print('Rating: {}'.format(rating))
# print('Date: {}'.format(date))
# print('Review:')
# print(rev)
# print('*'*50)
# remove 4-byte unicode text:
try:
highpoints = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
# UCS-2 build
highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
username = highpoints.sub(u'', username)
title = highpoints.sub(u'', title)
rev = highpoints.sub(u'', rev)
page_usernames.append(username)
page_memberids.append(member_id)
page_locations.append(location)
page_titles.append(title)
page_ratings.append(rating)
page_dates.append(date)
page_reviews.append(rev)
page_review_ids.append(rev_id)
if len(br.find_by_xpath('//a[contains(@class, "next")]')) > 0:
url = br.find_by_xpath('//a[contains(@class, "next")]')['href']
more_reviews = True
page += 1
# print('url and page updated.')
else:
more_reviews = False
ret_dict = {'usrnms': page_usernames,
'mmbrids': page_memberids,
'locs': page_locations,
'ttls': page_titles,
'rtngs': page_ratings,
'dts': page_dates,
'rvws': page_reviews,
'revids': page_review_ids,
'url': url,
'more_reviews': more_reviews,
'page': page}
return ret_dict
def get_done_business_ids(city, engine):
cmd = 'select distinct r.business_id from '
cmd += 'ta_reviews r inner join ta_hotels h on r.business_id = '
cmd += 'h.business_id where h.hotel_city = "'
cmd += (' ').join(city.split('_'))+'" '
donebids = [int(bid[0]) for bid in pd.read_sql_query(cmd, engine).values]
return donebids
def get_biz_review_ids(city, engine):
cmd = 'select biz_review_id from ta_reviews r inner join '
cmd += 'ta_hotels h on r.business_id=h.business_id '
cmd += 'where h.hotel_city = '
cmd += '"'+(' ').join(city.split('_'))+'"'
try:
xstng_revs = [int(rev_id[0]) for rev_id in pd.read_sql_query(cmd, engine).values]
except:
engine = cadb.connect_aws_db(write_unicode=True)
xstng_revs = [int(rev_id[0]) for rev_id in pd.read_sql_query(cmd, engine).values]
return xstng_revs
def remove_duplicates(bigdf, city, engine):
xstng_revs = get_biz_review_ids(city, engine)
if len(xstng_revs) > 0:
bigdf = bigdf[~bigdf['biz_review_id'].isin(xstng_revs)].copy()
return bigdf
def scrape_hotel(url, br, engine):
columns = ['review_id',
'hotel_id',
'business_id',
'biz_review_id',
'biz_member_id',
'username',
'review_title',
'review_rating',
'review_text',
'review_date']
bigdf = pd.DataFrame(columns=columns)
more_reviews = True
page = 1
while more_reviews:
print('*'*50)
print('Now on page {}'.format(page))
#print('*'*50)
df = pd.DataFrame(columns=columns)
ret_dict = return_results(url, page, br)
#print(ret_dict['locs'])
#print(ret_dict['ttls'])
df['biz_review_id'] = ret_dict['revids']
df['biz_member_id'] = ret_dict['mmbrids']
df['username'] = ret_dict['usrnms']
df['review_title'] = ret_dict['ttls']
df['review_rating'] = ret_dict['rtngs']
df['review_date'] = ret_dict['dts']
df['review_text'] = ret_dict['rvws']
url = ret_dict['url']
more_reviews = ret_dict['more_reviews']
page = ret_dict['page']
print('successfully completed page {}'.format(page))
bigdf = bigdf.append(df)
# more_reviews = False
return bigdf
def splinter_scrape_ta_reviews(city='', state='', write_to_db=False, start_num=0, end_num=-1):
"""PURPOSE: To """
engine = cadb.connect_aws_db(write_unicode=True)
blinks = get_hotel_urls(city, state, engine)
# only do the specified hotel range
if start_num != 0:
blinks = blinks[start_num:]
if end_num != -1:
if len(blinks) < end_num:
print('end_num exceeded number of hotels. resetting to max.')
end_num = len(blinks)
blinks = blinks[:end_num]
br = Browser()
donebids = get_done_business_ids(city, engine)
for hotel_id, biz_id, link in blinks:
# check to see if there are already reviews for that hotel
if int(biz_id) not in donebids:
bigdf = scrape_hotel(link, br, engine)
bigdf['hotel_id'] = hotel_id
bigdf['business_id'] = biz_id
bigdf['biz_review_id'] = np.int64(bigdf['biz_review_id'].values)
bigdf = remove_duplicates(bigdf, city, engine)
if write_to_db:
try:
bigdf.to_sql('ta_reviews', engine, if_exists='append', index=False)
except:
print('WRITING TO DB FAILED!!!')
else:
print('business_id {} already scraped.'.format(biz_id))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='argparse object.')
parser.add_argument(
'--city_url',
help='The url of the city to scrape.',
nargs='?', default='')
parser.add_argument(
'-c', '--city',
help='The url of the city to scrape.',
nargs='?', default='')
parser.add_argument(
'-s', '--state',
help='This name of the state to scrape.',
nargs='?', default='')
parser.add_argument(
'--start_num',
help='The starting number within the list of hotels for a city ' +
'to start with. For example, if there are ten hotels for the city, ' +
'and you only want to add reviews for hotels 5 through 10, set ' +
'start_num to 5.',
nargs='?', default=0)
parser.add_argument(
'--end_num',
help='The ending number within the list of hotels for a city. ' +
'For example, if there are ten hotels for the city, ' +
'and you only want to add reviews for hotels 0 through 4, set ' +
'end_num to 5.',
nargs='?', default=-1)
parser.add_argument(
'-w', '--write_to_db',
help='Set if you want to write the results to the DB.',
default=False, action='store_true')
if len(sys.argv) > 11:
print('use the command')
print('python splinter_scrape_bf.py city state')
print('For example:')
print('python splinter_scrape_ta_reviews.py -c new_haven -s ct')
sys.exit(2)
args = parser.parse_args()
splinter_scrape_ta_reviews(city=args.city,
state=args.state,
write_to_db=args.write_to_db,
start_num=int(args.start_num),
end_num=int(args.end_num))
| StarcoderdataPython |
6481185 | <gh_stars>0
class XmlSchema:
pass
class XmlDeclaration:
pass
class XmlDefinition:
pass
| StarcoderdataPython |
6597175 | from kubeadm import Kubeadm # noqa
| StarcoderdataPython |
6584232 | """
@author: wangguanan
@contact: <EMAIL>
"""
import os, copy
from .reid_samples import ReIDSamples
import torchvision
class OccludedReID(ReIDSamples):
"""Occluded ReID
Only include query and gallery dataset
Suppose all query images belong to camera0, and gallery images camera 1
"""
def __init__(self, data_path, combineall=False, download=False, **kwargs):
assert combineall is False, \
'unsupport combineall for {} dataset'.format(self.__class__.__name__)
assert download is False, \
'unsupport download, please automatically download {} dataset'.format(self.__class__.__name__)
self.probe_path = os.path.join(data_path, 'occluded_body_images/')
self.gallery_path = os.path.join(data_path, 'whole_body_images/')
probe_samples = torchvision.datasets.ImageFolder(self.probe_path).samples
query = [list(probe_sample)+[0] for probe_sample in probe_samples]
gallery_samples = torchvision.datasets.ImageFolder(self.gallery_path).samples
gallery = [list(gallery_sample)+[1] for gallery_sample in gallery_samples]
# init
train = None
super(OccludedReID, self).__init__(train, query, gallery)
| StarcoderdataPython |
9605444 | <reponame>arthurshmidt/website<filename>sql_com_class.py
from random import randint
import mariadb
import datetime
import time
# general syntax for connecting to MariaDB server
# mysql -u root -p
# USE to connect
# SHOW to display
# SELECT FROM to read
# DROP to delete
class database:
def __init__(self):
self.table = None
self.conn = None
self.C = None
def connect(self):
self.conn = mariadb.connect(
user="blog",
password="<PASSWORD>",
host="localhost",
database="blog_posts"
)
self.C = self.conn.cursor()
def addTable(self,table_name):
"""
Basic table layout: (name of table) = P200723 -> date, title, summary, post
"""
d = datetime.datetime.now()
new_table = "CREATE TABLE " + table_name + " (Date LONGTEXT, Title LONGTEXT, Summary LONGTEXT, Post LONGTEXT)"
self.C.execute(new_table)
def addData(self,table_name,post_date,post_title,post_summary,post_data):
sql_entry_data = "INSERT INTO " + table_name + " (Date, Title, Summary, Post) VALUES (%s, %s, %s, %s)"
val = (post_date, post_title, post_summary, post_data)
self.C.execute(sql_entry_data, val)
self.conn.commit()
def disconnect(self):
self.conn.close()
# returns an array of table names
def returnTables(self):
self.C.execute("SHOW TABLES")
myresult = self.C.fetchall()
tables_list = []
for x in myresult:
tables_list.append(x[0])
return tables_list
# returns the contents (tuple) of a table
def returnTableValues(self,table_name):
self.C.execute("SELECT * FROM "+table_name)
myresult = self.C.fetchall()
return myresult[0]
if __name__ == "__main__":
# testing of the database class
# test connection to database and the instertion of a post
data = database()
data.connect()
# Test Entering data into sql
#data.addTable("D2020_07_28")
#summary_post = "This summary post is test my sql database entry and retrieval"
#body_post = "This text is meant to represent the body of the blog post. Yay!!!!!. Better than Lorem Ipsum -> These are the voyages of the starship Enterprise. Its five-year mission: to explore strange new worlds, to seek out new life and new civilizations, to boldly go where no man has gone before"
#data.addData("D2020_07_23", summary_post, body_post)
#print("Entered Data into sql")
# retrieve and display blog posts from sql
tables_list = data.returnTables()
for i in range(len(tables_list)):
table_data = data.returnTableValues(tables_list[i])
print("Post Date: {}\n".format(tables_list[i]))
print("Summary:")
print(table_data[0])
print("\nBlog Post:")
print(table_data[1])
#print("tables_list: {}\n".format(tables_list))
#table_data = data.returnTableValues(tables_list[0])
#print(table_data)
# close out the connection to database
data.disconnect()
| StarcoderdataPython |
11382021 | from __future__ import print_function
import contextlib
import errno
import hashlib
import shutil
import subprocess
import tempfile
from distutils.spawn import find_executable
from os import makedirs, utime, system
from os.path import basename
from sys import stderr
from ._elapsed import BeginEnd
def rmtree(folder):
system("rm -rf %s" % folder)
@contextlib.contextmanager
def temp_folder():
folder = tempfile.mkdtemp()
try:
yield folder
finally:
rmtree(folder)
def make_sure_path_exists(path):
"""Creates a path recursively if necessary.
"""
try:
makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def cp(folder_src, folder_dst):
"""Uses cp shell command to copy files."""
retcode = subprocess.call(
"cp " + folder_src + "/* " + folder_dst, shell=True)
if retcode < 0:
print("Child was terminated by signal %d" % retcode, file=stderr)
def touch(fname, times=None):
"""Creates an empty file."""
with open(fname, 'a'):
utime(fname, times)
def folder_hash(folder, exclude_files=None):
"""Recursively hash all files in a folder and sum it up."""
if exclude_files is None:
exclude_files = []
if not _bin_exists('md5deep'):
raise EnvironmentError("Couldn't not find md5deep.")
with BeginEnd("Hashing folder %s" % folder):
out = subprocess.check_output('md5deep -r %s' % folder, shell=True)
lines = sorted(out.strip(b'\n').split(b'\n'))
m = hashlib.md5()
for line in lines:
hash_ = line[0:32]
fp = line[34:]
if basename(fp) not in exclude_files:
m.update(hash_)
return m.hexdigest()
def _bin_exists(name):
"""Checks whether an executable file exists."""
return find_executable(name) is not None
| StarcoderdataPython |
4847474 | from django.http import HttpResponse
from django.shortcuts import render
# Create your views here.
from front.models import Category, Article
def index(request):
category = Category(name='国产')
category.save()
article = Article(title='论母猪的产后抚养', content='遇见你的时候你是傻逼')
article.category = category
article.save()
return HttpResponse("首页")
def index1(request):
article = Article.objects.get(pk=1)
print(article)
return HttpResponse("index1")
| StarcoderdataPython |
3250763 | <reponame>zhongxinghong/Botzone-Tank2<filename>core/_backup/march_into_enemy_base.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# @Author: Administrator
# @Date: 2019-04-28 03:31:43
# @Last Modified by: Administrator
# @Last Modified time: 2019-04-30 04:33:13
"""
不顾一切冲向敌方基地
Step:
1. 通过 BFS 查找到达对方基地的最近路径
2. 优先移动
3. 如果遇到路障,则射击(遇到队友除外)
4. 如果不能射击,则原地等待
"""
__all__ = [
"MarchIntoEnemyBaseStrategy",
]
from ..const import DEBUG_MODE
from ..global_ import pprint
from ..utils import debug_print
from ..action import Action
from ..field import Field, TankField, SteelField, BaseField
from ._utils import get_destroyed_fields
from ._bfs import find_shortest_route_for_shoot, DEFAULT_BLOCK_TYPES, DEFAULT_DESTROYABLE_TYPES
from .abstract import SingleTankStrategy
#{ BEGIN }#
class MarchIntoEnemyBaseStrategy(SingleTankStrategy):
def make_decision(self):
tank = self._tank
map_ = self._map
matrix_T = map_.matrix_T
_dx = Action.DIRECTION_OF_ACTION_X
_dy = Action.DIRECTION_OF_ACTION_Y
side = tank.side
oppSide = 1 - tank.side
oppBase = map_.bases[oppSide]
cMatrixMap = matrix_T.copy() # 模拟一个没有敌人 tank 的地图
debug_print("cMatrixMap:\n", cMatrixMap)
debug_print(map_.tanks)
for oppTank in map_.tanks[oppSide]:
if cMatrixMap[x, y] == Field.TANK + 1 + oppTank.side:
cMatrixMap[oppTank.xy] = Field.EMPTY # 先将对方 tank 设为空!
debug_print("cMatrixMap:\n", cMatrixMap)
route = find_shortest_route_for_shoot(
tank.xy,
oppBase.xy,
cMatrixMap,
block_types=DEFAULT_BLOCK_TYPES+(
Field.BASE + 1 + side, # 己方基地
Field.TANK + 1 + side, # 己方坦克
Field.MULTI_TANK, # 多重坦克,不能移动
),
destroyable_types=DEFAULT_DESTROYABLE_TYPES+(
Field.BASE + 1 + oppSide, # 对方基地
#Field.TANK + 1 + oppSide, # 对方坦克
#Field.MULTI_TANK,
))
if len(route) == 0: # 没有找到路线,这种情况不可能
return Action.STAY
if len(route) == 1: # 说明 start 和 end 相同
return Action.STAY # 停留不动
x1, y1 = tank.xy
x3, y3, _, _ = route[1] # 跳过 start
action = Action.get_action(x1, y1, x3, y3)
## 优先移动 ##
if map_.is_valid_move_action(tank, action):
# 但是,如果正前方就是基地,则不移动,只射击
hasEnemyBaseInFront = False
x, y = tank.xy
while True:
x += _dx[action]
y += _dy[action]
if not map_.in_map(x, y):
break
currentFields = map_[x, y]
foundSteelField = False
for field in currentFields:
if isinstance(field, SteelField):
foundSteelField = True
break
elif field is oppBase: # 是敌方
hasEnemyBaseInFront = True
break
else: # 继续寻找
continue
if foundSteelField: # 钢墙不能击穿,因此该方向不在往下找
break
if hasEnemyBaseInFront: # 地方基地在前方,且没有钢墙阻拦
if map_.is_valid_shoot_action(tank, action + 4): # 如果自己可以射击
action += 4
destroyedFields = get_destroyed_fields(tank, action, map_)
for field in destroyedFields: # 为了防止射到队友
if isinstance(field, TankField) and field.side == tank.side:
return Action.STAY # 原地不动
return action # 否则射击
else:
return Action.STAY # 不能射击,则等待
return action # 否则,正常移动
## 遇到墙/敌方基地/坦克 ##
action += 4 # 尝试射击
if map_.is_valid_shoot_action(tank, action):
destroyedFields = get_destroyed_fields(tank, action, map_)
# 仅仅需要防止射到自己队友
if len(destroyedFields) == 1:
field = destroyedFields[0]
if isinstance(field, TankField) and field.side == tank.side:
# TODO: 这种情况下可能会出现两架坦克互相等待的情况?
return Action.STAY
return action # 到此说明可以射击
return Action.STAY # 不能射击,只好等待
#{ END }# | StarcoderdataPython |
3249347 | import sys
import struct
STX=0x02
def get_bcc(telegram_bytes):
bcc = 0
for c in telegram_bytes:
bcc = bcc ^ c
return bcc
class USSUndefinedAddressException(Exception):
pass
class USSEmptyNetDataException(Exception):
pass
class USSIncorrectBCCException(Exception):
pass
class MasterTelegram(bytearray):
"""
net_data is of tpye bytearray
"""
def __init__(self, addr, net_data, mirror=False):
if len(net_data) == 0:
raise USSEmptyNetDataException
if addr > 31:
raise USSUndefinedAddressException
_raw = bytearray(len(net_data)+4)
_raw[3:-1] = net_data
_raw[0] = STX #STX
_raw[1] = len(net_data)+2 #LGE
_raw[2] = addr #ADR
if mirror:
_raw[2] = _raw[2] | 0b01000000
_raw[-1] = 0 #BCC
_raw[-1] = get_bcc(_raw)
super(MasterTelegram, self).__init__( _raw )
class SlaveTelegram(bytearray):
def __init__(self, raw):
if get_bcc(raw) != 0x0:
raise USSIncorrectBCCException
self.LGE = raw[1]
self.ADR = raw[2]
super(SlaveTelegram, self).__init__( raw[3:-1] )
| StarcoderdataPython |
6556284 | <reponame>jianzhnie/d2nlp
'''
Author: jianzhnie
Date: 2021-12-23 16:23:02
LastEditTime: 2022-01-05 16:26:43
LastEditors: jianzhnie
Description:
'''
import os
import d2l.torch as d2l
import torch
import torch.nn as nn
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
class GloveModel(nn.Module):
def __init__(self, vocab_size, embedding_dim):
super(GloveModel, self).__init__()
# 词嵌入及偏置向量
self.w_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.w_biases = nn.Embedding(vocab_size, 1)
# 上下文嵌入及偏置向量
self.c_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.c_biases = nn.Embedding(vocab_size, 1)
def forward_w(self, words):
w_embeds = self.w_embeddings(words)
w_biases = self.w_biases(words)
return w_embeds, w_biases
def forward_c(self, contexts):
c_embeds = self.c_embeddings(contexts)
c_biases = self.c_biases(contexts)
return c_embeds, c_biases
def forward(self, words, contexts):
w_embeds = self.w_embeddings(words)
w_biases = self.w_biases(words)
c_embeds = self.c_embeddings(contexts)
c_biases = self.c_biases(contexts)
return w_embeds, w_biases, c_embeds, c_biases
def init_weights(self):
for param in self.parameters():
torch.nn.init.uniform_(param, a=-0.1, b=0.1)
class GloveTokenEmbedding(object):
"""Token Embedding."""
def __init__(self, embedding_name):
"""Defined in :numref:`sec_synonyms`"""
self.idx_to_token, self.idx_to_vec = self._load_embedding(
embedding_name)
self.unknown_idx = 0
self.token_to_idx = {
token: idx
for idx, token in enumerate(self.idx_to_token)
}
def _load_embedding(self, embedding_name):
idx_to_token, idx_to_vec = ['<unk>'], []
data_dir = d2l.download_extract(embedding_name)
# GloVe website: https://nlp.stanford.edu/projects/glove/
# fastText website: https://fasttext.cc/
with open(os.path.join(data_dir, 'vec.txt'), 'r') as f:
for line in f:
elems = line.rstrip().split(' ')
token, elems = elems[0], [float(elem) for elem in elems[1:]]
# Skip header information, such as the top row in fastText
if len(elems) > 1:
idx_to_token.append(token)
idx_to_vec.append(elems)
idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
return idx_to_token, d2l.tensor(idx_to_vec)
def __getitem__(self, tokens):
indices = [
self.token_to_idx.get(token, self.unknown_idx) for token in tokens
]
vecs = self.idx_to_vec[d2l.tensor(indices)]
return vecs
def __len__(self):
return len(self.idx_to_token)
| StarcoderdataPython |
5157843 | <filename>cose/messages/sign1message.py
from typing import Optional, Union, TYPE_CHECKING
import cbor2
from cose import utils
from cose.messages.cosemessage import CoseMessage
from cose.messages.signcommon import SignCommon
if TYPE_CHECKING:
from cose.keys.ec2 import EC2
from cose.keys.okp import OKP
from cose.keys.rsa import RSA
CBOR = bytes
@CoseMessage.record_cbor_tag(18)
class Sign1Message(SignCommon):
context = "Signature1"
cbor_tag = 18
@classmethod
def from_cose_obj(cls, cose_obj, *args, **kwargs) -> 'Sign1Message':
msg = super().from_cose_obj(cose_obj)
msg._signature = cose_obj.pop(0)
return msg
def __init__(self,
phdr: Optional[dict] = None,
uhdr: Optional[dict] = None,
payload: bytes = b'',
external_aad: bytes = b'',
key: Optional[Union['EC2', 'OKP', 'RSA']] = None):
if phdr is None:
phdr = {}
if uhdr is None:
uhdr = {}
super().__init__(phdr, uhdr, payload, external_aad, key)
self._signature = b''
@property
def signature(self):
return self._signature
@property
def _sig_structure(self):
"""
Create the sig_structure that needs to be signed
:return: to_be_signed
"""
sig_structure = [self.context]
sig_structure = self._base_structure(sig_structure)
sig_structure.append(self.payload)
return cbor2.dumps(sig_structure)
def encode(self, tag: bool = True, sign: bool = True, *args, **kwargs) -> CBOR:
""" Encodes the message into a CBOR array with or without a CBOR tag. """
if sign:
message = [self.phdr_encoded, self.uhdr_encoded, self.payload, self.compute_signature()]
else:
message = [self.phdr_encoded, self.uhdr_encoded, self.payload]
if tag:
res = cbor2.dumps(cbor2.CBORTag(self.cbor_tag, message), default=self._custom_cbor_encoder)
else:
res = cbor2.dumps(message, default=self._custom_cbor_encoder)
return res
def __repr__(self) -> str:
phdr, uhdr = self._hdr_repr()
return f'<COSE_Sign1: [{phdr}, {uhdr}, {utils.truncate(self._payload)}, ' \
f'{utils.truncate(self._signature)}]>'
| StarcoderdataPython |
1650134 | # -*- coding: utf-8 -*-
import logging
from abc import ABC, abstractmethod
import numpy as np
import cvxpy as cp
def cvx_desc_to_solver(solver_desc):
if solver_desc == "SCS":
return cp.SCS
elif solver_desc == "MOSEK":
return cp.MOSEK
elif solver_desc == "CVXOPT":
return cp.CVXOPT
elif solver_desc == "OSQP":
return cp.OSQP
elif solver_desc == "ECOS":
return cp.ECOS
elif solver_desc == "CPLEX":
return cp.CPLEX
elif solver_desc == "CBC":
return cp.CBC
elif solver_desc == "NAG":
return cp.NAG
elif solver_desc == "GLPK":
return cp.GLPK
elif solver_desc == "GLPK_MI":
return cp.GLPK_MI
elif solver_desc == "GUROBI":
return cp.GUROBI
elif solver_desc == "SCIP":
return cp.SCIP
elif solver_desc == "XPRESS":
return cp.XPRESS
else:
raise ValueError(f"Solver '{solver_desc}' is not supported or unkown.")
class MathematicalProgram():
"""Base class for a mathematical program.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
@abstractmethod
def solve(self):
raise NotImplementedError()
class SupportAffinePreprocessing():
"""Base class for a mathematical programs that support an affine preprocessing.
"""
def __init__(self, **kwds):
self.A = None
self.b = None
super().__init__(**kwds)
def set_affine_preprocessing(self, A, b):
self.A = A
self.b = b
def get_affine_preprocessing(self):
return {"A": self.A, "b": self.b}
def is_affine_preprocessing_set(self):
return self.A is not None and self.b is not None
def _apply_affine_preprocessing_to_var(self, var_x):
if self.A is not None and self.b is not None:
return self.A @ var_x + self.b
else:
return var_x
def _apply_affine_preprocessing_to_const(self, x):
if self.A is not None and self.b is not None:
return np.dot(self.A, x) + self.b
else:
return x
class ConvexQuadraticProgram(ABC, SupportAffinePreprocessing):
"""Base class for a convex quadratic program - for computing counterfactuals.
Attributes
----------
epsilon : `float`
"Small" non-negative number for relaxing strict inequalities.
"""
def __init__(self, **kwds):
self.epsilon = 1e-2
self.solver = cp.SCS
self.solver_verbosity = False
super().__init__(**kwds)
@abstractmethod
def _build_constraints(self, var_x, y):
"""Creates and returns all constraints.
Parameters
----------
var_x : `cvx.Variable`
Optimization variable.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
Returns
-------
`list`
List of cvxpy constraints.
"""
raise NotImplementedError()
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=self.solver_verbosity)
def build_solve_opt(self, x_orig, y, features_whitelist=None, mad=None, optimizer_args=None):
"""Builds and solves the convex quadratic optimization problem.
Parameters
----------
x_orig : `numpy.ndarray`
The original data point.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
features_whitelist : `list(int)`, optional
List of feature indices (dimensions of the input space) that can be used when computing the counterfactual.
If `features_whitelist` is None, all features can be used.
The default is None.
mad : `numpy.ndarray`, optional
Weights for the weighted Manhattan distance.
If `mad` is None, the Euclidean distance is used.
The default is None.
optimizer_args : `dict`, optional
Dictionary for overriding the default hyperparameters of the optimization algorithm.
The default is None.
Returns
-------
`numpy.ndarray`
The solution of the optimization problem.
If no solution exists, `None` is returned.
"""
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
if "solver_verbosity" in optimizer_args:
self.solver_verbosity = optimizer_args["solver_verbosity"]
dim = x_orig.shape[0]
# Variables
x = cp.Variable(dim)
beta = cp.Variable(dim)
# Constants
c = np.ones(dim)
z = np.zeros(dim)
I = np.eye(dim)
# Construct constraints
constraints = self._build_constraints(x, y)
# If requested, fix some features
if features_whitelist is not None:
A = []
a = []
for j in range(dim):
if j not in features_whitelist:
t = np.zeros(dim)
t[j] = 1.
A.append(t)
a.append(x_orig[j])
if len(A) != 0:
A = np.array(A)
a = np.array(a)
constraints += [A @ x == a]
# If necessary, construct the weight matrix for the weighted Manhattan distance
Upsilon = None
if mad is not None:
alpha = 1. / mad
Upsilon = np.diag(alpha)
# Build the final program
f = None
if mad is not None:
f = cp.Minimize(c.T @ beta) # Minimize (weighted) Manhattan distance
constraints += [Upsilon @ (x - x_orig) <= beta, (-1. * Upsilon) @ (x - x_orig) <= beta, I @ beta >= z]
else:
f = cp.Minimize((1/2)*cp.quad_form(x, I) - x_orig.T@x) # Minimize L2 distance
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
return x.value
class SDP(ABC):
"""Base class for a semi-definite program (SDP) - for computing counterfactuals.
Attributes
----------
epsilon : `float`
"Small" non-negative number for relaxing strict inequalities.
"""
def __init__(self, **kwds):
self.epsilon = 1e-2
self.solver = cp.SCS
self.solver_verbosity = False
super().__init__(**kwds)
@abstractmethod
def _build_constraints(self, var_X, var_x, y):
"""Creates and returns all constraints.
Parameters
----------
var_X : `cvx.Variable`
The artificial optimization variable X - a symmetric matrix (see paper for details).
var_x : `cvx.Variable`
Optimization variable.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
Returns
-------
`list`
List of cvxpy constraints.
"""
raise NotImplementedError()
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=self.solver_verbosity)
def build_solve_opt(self, x_orig, y, features_whitelist=None, optimizer_args=None):
"""Builds and solves the SDP.
Parameters
----------
x_orig : `numpy.ndarray`
The original data point.
y : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
features_whitelist : `list(int)`, optional
List of feature indices (dimensions of the input space) that can be used when computing the counterfactual.
If `features_whitelist` is None, all features can be used.
The default is None.
optimizer_args : `dict`, optional
Dictionary for overriding the default hyperparameters of the optimization algorithm.
The default is None.
Returns
-------
`numpy.ndarray`
The solution of the optimization problem.
If no solution exists, `None` is returned.
"""
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
if "solver_verbosity" in optimizer_args:
self.solver_verbosity = optimizer_args["solver_verbosity"]
dim = x_orig.shape[0]
# Variables
X = cp.Variable((dim, dim), symmetric=True)
x = cp.Variable((dim, 1))
one = np.array([[1]]).reshape(1, 1)
I = np.eye(dim)
# Construct constraints
constraints = self._build_constraints(X, x, y)
constraints += [cp.bmat([[X, x], [x.T, one]]) >> 0]
# If requested, fix some features
if features_whitelist is not None:
A = []
a = []
for j in range(dim):
if j not in features_whitelist:
t = np.zeros(dim)
t[j] = 1.
A.append(t)
a.append(x_orig[j])
if len(A) != 0:
A = np.array(A)
a = np.array(a)
constraints += [A @ x == a]
# Build the final program
f = cp.Minimize(cp.trace(I @ X) - 2. * x.T @ x_orig)
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
return x.value.reshape(dim)
class DCQP(SupportAffinePreprocessing):
"""Class for a difference-of-convex-quadratic program (DCQP) - for computing counterfactuals.
.. math:: \\underset{\\vec{x} \\in \\mathbb{R}^d}{\\min} \\vec{x}^\\top Q_0 \\vec{x} + \\vec{q}^\\top \\vec{x} + c - \\vec{x}^\\top Q_1 \\vec{x} \\quad \\text{s.t. } \\vec{x}^\\top A0_i \\vec{x} + \\vec{x}^\\top \\vec{b_i} + r_i - \\vec{x}^\\top A1_i \\vec{x} \\leq 0 \\; \\forall\\,i
Attributes
----------
pccp : instance of :class:`ceml.optim.cvx.PenaltyConvexConcaveProcedure`
Implementation of the penalty convex-concave procedure for approximately solving the DCQP.
epsilon : `float`
"Small" non-negative number for relaxing strict inequalities.
"""
def __init__(self, **kwds):
self.pccp = None
super().__init__(**kwds)
def build_program(self, model, x_orig, y_target, Q0, Q1, q, c, A0_i, A1_i, b_i, r_i, features_whitelist=None, mad=None, optimizer_args=None):
"""Builds the DCQP.
Parameters
----------
model : `object`
The model that is used for computing the counterfactual - must provide a method `predict`.
x : `numpy.ndarray`
The data point `x` whose prediction has to be explained.
y_target : `int` or `float`
The requested prediction of the counterfactual - e.g. a class label.
Q0 : `numpy.ndarray`
The matrix Q_0 of the DCQP.
Q1 : `numpy.ndarray`
The matrix Q_1 of the DCQP.
q : `numpy.ndarray`
The vector q of the DCQP.
c : `float`
The constant c of the DCQP.
A0_i : `list(numpy.ndarray)`
List of matrices A0_i of the DCQP.
A1_i : `list(numpy.ndarray)`
List of matrices A1_i of the DCQP.
b_i : `list(numpy.ndarray)`
List of vectors b_i of the DCQP.
r_i : `list(float)`
List of constants r_i of the DCQP.
features_whitelist : `list(int)`, optional
List of feature indices (dimensions of the input space) that can be used when computing the counterfactual.
If `features_whitelist` is None, all features can be used.
The default is None.
mad : `numpy.ndarray`, optional
Weights for the weighted Manhattan distance.
If `mad` is None, the Euclidean distance is used.
The default is None.
optimizer_args : `dict`, optional
Dictionary for overriding the default hyperparameters of the optimization algorithm.
The default is None.
"""
self.x_orig = x_orig
self.y_target = y_target
self.pccp = PenaltyConvexConcaveProcedure(model, Q0, Q1, q, c, A0_i, A1_i, b_i, r_i, features_whitelist, mad, optimizer_args)
def solve(self, x0):
"""Approximately solves the DCQP by using the penalty convex-concave procedure.
Parameters
----------
x0 : `numpy.ndarray`
The initial data point for the penalty convex-concave procedure - this could be anything, however a "good" initial solution might lead to a better result.
"""
self.pccp.set_affine_preprocessing(**self.get_affine_preprocessing())
return self.pccp.compute_counterfactual(self.x_orig, self.y_target, x0)
class PenaltyConvexConcaveProcedure(SupportAffinePreprocessing):
"""Implementation of the penalty convex-concave procedure for approximately solving a DCQP.
"""
def __init__(self, model, Q0, Q1, q, c, A0_i, A1_i, b_i, r_i, features_whitelist=None, mad=None, optimizer_args=None, **kwds):
self.model = model
self.mad = mad
self.features_whitelist = features_whitelist
self.Q0 = Q0
self.Q1 = Q1
self.q = q
self.c = c
self.A0s = A0_i
self.A1s = A1_i
self.bs = b_i
self.rs = r_i
self.dim = None
self.epsilon = 1e-2
self.tao = 1.2
self.tao_max = 100
self.mu = 1.5
self.solver = cp.SCS
self.solver_verbosity = False
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "tao" in optimizer_args:
self.tao = optimizer_args["tao"]
if "tao_max" in optimizer_args:
self.tao_max = optimizer_args["tao_max"]
if "mu" in optimizer_args:
self.mu = optimizer_args["mu"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
if "solver_verbosity" in optimizer_args:
self.solver_verbosity = optimizer_args["solver_verbosity"]
if not(len(self.A0s) == len(self.A1s) and len(self.A0s) == len(self.bs) and len(self.rs) == len(self.bs)):
raise ValueError("Inconsistent number of constraint parameters")
super().__init__(**kwds)
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=self.solver_verbosity)
def solve_aux(self, xcf, tao, x_orig):
try:
self.dim = x_orig.shape[0]
# Variables
var_x = cp.Variable(self.dim)
s = cp.Variable(len(self.A0s))
var_x_prime = self._apply_affine_preprocessing_to_var(var_x)
# Constants
s_z = np.zeros(len(self.A0s))
s_c = np.ones(len(self.A0s))
c = np.ones(self.dim)
# Build constraints
constraints = []
for i in range(len(self.A0s)):
A = cp.quad_form(var_x_prime, self.A0s[i])
q = var_x_prime.T @ self.bs[i]
c = self.rs[i] + np.dot(xcf, np.dot(xcf, self.A1s[i])) - 2. * var_x_prime.T @ np.dot(xcf, self.A1s[i]) - s[i]
constraints.append(A + q + c + self.epsilon <= 0)
# If requested, fix some features
if self.features_whitelist is not None:
A = []
a = []
for j in range(self.dim):
if j not in self.features_whitelist:
t = np.zeros(self.dim)
t[j] = 1.
A.append(t)
a.append(x_orig[j])
if len(A) != 0:
A = np.array(A)
a = np.array(a)
constraints += [A @ var_x == a]
# Build the final program
f = None
if self.mad is not None: # TODO: Right now, mad != 1 is not supported.
f = cp.Minimize(cp.norm(var_x - x_orig, 1) + s.T @ (tao*s_c))
else:
f = cp.Minimize(cp.quad_form(var_x_prime, self.Q0) + self.q.T @ var_x_prime + self.c + np.dot(xcf, np.dot(xcf, self.Q1)) - 2. * var_x_prime.T @ np.dot(xcf, self.Q1) + s.T @ (tao*s_c))
constraints += [s >= s_z]
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
if var_x.value is None:
raise Exception("No solution found!")
else:
return var_x.value
except Exception as ex:
logging.debug(str(ex))
return x_orig
def compute_counterfactual(self, x_orig, y_target, x0):
####################################
# Penalty convex-concave procedure #
####################################
# Initial feasible solution
xcf = x0
# Hyperparameters
cur_tao = self.tao
# Solve a bunch of CCPs
while cur_tao < self.tao_max:
cur_xcf = xcf
if cur_xcf.shape == x_orig.shape: # Apply transformation is necessary - xcf is computed in the original space which can be different from the space the model works on!
cur_xcf = self._apply_affine_preprocessing_to_const(cur_xcf)
xcf_ = self.solve_aux(cur_xcf, cur_tao, x_orig)
xcf = xcf_
if y_target == self.model.predict([self._apply_affine_preprocessing_to_const(xcf_)])[0]:
break
# Increase penalty parameter
cur_tao *= self.mu
return xcf
#################################################
# Stuff for computing plausible counterfactuals #
#################################################
class HighDensityEllipsoids:
def __init__(self, X, X_densities, cluster_probs, means, covariances, density_threshold=None, optimizer_args=None, **kwds):
self.X = X
self.X_densities = X_densities
self.density_threshold = density_threshold if density_threshold is not None else float("-inf")
self.cluster_probs = cluster_probs
self.means = means
self.covariances = covariances
self.epsilon = 1e-5
self.solver = cp.SCS
if optimizer_args is not None:
if "epsilon" in optimizer_args:
self.epsilon = optimizer_args["epsilon"]
if "solver" in optimizer_args:
self.solver = cvx_desc_to_solver(optimizer_args["solver"])
super().__init__(**kwds)
def compute_ellipsoids(self):
return self.build_solve_opt()
def _solve(self, prob):
prob.solve(solver=self.solver, verbose=False)
def build_solve_opt(self):
n_ellipsoids = self.cluster_probs.shape[1]
n_samples = self.X.shape[0]
# Variables
r = cp.Variable(n_ellipsoids, pos=True)
# Construct constraints
constraints = []
for i in range(n_ellipsoids):
mu_i = self.means[i]
cov_i = np.linalg.inv(self.covariances[i])
for j in range(n_samples):
if self.X_densities[j][i] >= self.density_threshold: # At least as good as a requested NLL
x_j = self.X[j,:]
a = (x_j - mu_i)
b = np.dot(a, np.dot(cov_i, a))
constraints.append(b <= r[i])
# Build the final program
f = cp.Minimize(cp.sum(r))
prob = cp.Problem(f, constraints)
# Solve it!
self._solve(prob)
return r.value
class PlausibleCounterfactualOfHyperplaneClassifier():
def __init__(self, w, b, n_dims, **kwds):
self.hyperplane_w = w
self.hyperplane_b = b
self.n_dims = n_dims
self.gmm_weights = None
self.gmm_means = None
self.gmm_covariances = None
self.ellipsoids_r = None
self.projection_matrix = None
self.projection_mean_sub = None
self.density_constraint = None
self.min_density = None
self.epsilon = 1e-2
self.solver = cp.SCS
self.gmm_cluster_index = 0 # For internal use only!
super().__init__(**kwds)
def setup_plausibility_params(self, ellipsoids_r, gmm_weights, gmm_means, gmm_covariances, projection_matrix=None, projection_mean_sub=None, density_constraint=True, density_threshold=-85):
self.gmm_weights = gmm_weights
self.gmm_means = gmm_means
self.gmm_covariances = gmm_covariances
self.ellipsoids_r = ellipsoids_r
self.projection_matrix = np.eye(self.n_dims) if projection_matrix is None else projection_matrix
self.projection_mean_sub = np.zeros(self.n_dims) if projection_mean_sub is None else projection_mean_sub
self.density_constraint = density_constraint
self.min_density = density_threshold
def _build_constraints_plausibility_opt(self, var_x, y):
constraints = []
if self.hyperplane_w.shape[0] > 1:
for i in range(self.hyperplane_w.shape[0]):
if i != y:
constraints += [(self.projection_matrix @ (var_x - self.projection_mean_sub)).T @ (self.hyperplane_w[i,:] - self.hyperplane_w[y,:]) + (self.hyperplane_b[i] - self.hyperplane_b[y]) + self.epsilon <= 0]
else:
if y == 0:
return [(self.projection_matrix @ (var_x - self.projection_mean_sub)).T @ self.hyperplane_w.reshape(-1, 1) + self.hyperplane_b + self.epsilon <= 0]
else:
return [(self.projection_matrix @ (var_x - self.projection_mean_sub)).T @ self.hyperplane_w.reshape(-1, 1) + self.hyperplane_b - self.epsilon >= 0]
return constraints
def compute_plausible_counterfactual(self, x, y, regularizer="l1"):
mad = None
if regularizer == "l1":
mad = np.ones(x.shape[0])
xcf = None
s = float("inf")
for i in range(self.gmm_weights[y].shape[0]):
try:
self.gmm_cluster_index = i
xcf_ = self.build_solve_plausibility_opt(x, y, mad)
if xcf_ is None:
continue
s_ = None
if regularizer == "l1":
s_ = np.sum(np.abs(xcf_ - x))
else:
s_ = np.linalg.norm(xcf_ - x, ord=2)
if s_ <= s:
s = s_
xcf = xcf_
except Exception as ex:
pass # TODO: Proper exception handling
return xcf
def _solve_plausibility_opt(self, prob):
prob.solve(solver=self.solver, verbose=False)
def build_solve_plausibility_opt(self, x_orig, y, mad=None):
dim = x_orig.shape[0]
# Variables
x = cp.Variable(dim)
beta = cp.Variable(dim)
# Constants
c = np.ones(dim)
z = np.zeros(dim)
I = np.eye(dim)
# Construct constraints
constraints = self._build_constraints_plausibility_opt(x, y)
if self.density_constraint is True:
i = self.gmm_cluster_index
x_i = self.gmm_means[y][i]
cov = self.gmm_covariances[y][i]
cov = np.linalg.inv(cov)
constraints += [cp.quad_form(self.projection_matrix @ (x - self.projection_mean_sub) - x_i, cov) - self.ellipsoids_r[i] <= 0] # Numerically much more stable than the explicit density component constraint
# If necessary, construct the weight matrix for the weighted Manhattan distance
Upsilon = None
if mad is not None:
alpha = 1. / mad
Upsilon = np.diag(alpha)
# Build the final program
f = None
if mad is not None:
f = cp.Minimize(c.T @ beta) # Minimize (weighted) Manhattan distance
constraints += [Upsilon @ (x - x_orig) <= beta, (-1. * Upsilon) @ (x - x_orig) <= beta, I @ beta >= z]
else:
f = cp.Minimize((1/2)*cp.quad_form(x, I) - x_orig.T@x) # Minimize L2 distance
prob = cp.Problem(f, constraints)
# Solve it!
self._solve_plausibility_opt(prob)
return x.value | StarcoderdataPython |
1683992 | <filename>build/django-blog-zinnia/zinnia/urls/search.py
"""Urls for the Zinnia search"""
from django.conf.urls import url
from django.conf.urls import patterns
from zinnia.views.search import EntrySearch
urlpatterns = patterns(
'',
url(r'^$', EntrySearch.as_view(),
name='zinnia_entry_search'),
)
| StarcoderdataPython |
318426 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
第一节
+ Python解释器
+ Python运行
+ Python输入输出
+ Python字符串
+ Python整数和浮点数
'''
#%% Python 解释器
# Python拥有许多解释器
# + CPython 是默认的python解释器,用C语言编写。
# + ipython 基于 CPython 的交互式解释器
# + PyPy 采用JIT(动态编译)方法,加速Python运行
# + JPython 可以把 python 代码编译成 java 字节码,在JVM上运行
# + IronPython 把 python 编译成 .NET
#%% 运行相关
#!/usr/bin/env python3
# 配合
# $ chmod a+x ls01.py
# 可以直接运行
# 上面的写法表示从系统的环境变量里搜索 Python3
# 而
# !/usr/bin/python
# 则要/usr/bin下一定要有python
# Python3 默认支持 UTF8,而 Python2 还需要自己指定
# 如下写法是python2
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# 但是装了插件之后完全不需要这么麻烦呢
import sys
print(sys.version)
print('hello world')
#%% 输出
# 特点是默认一个 print 都是自带换行的呢
# 对于输出,除了默认的方法
print('hello')
# 还有一些Python特有的输出方法
# 1. 多个字符串用一个 print 输出,会用空格连接
print('hello', 'world')
# 2. 可以指定字符串不转义
print('\n', r'\n')
# 3. 可以支持多行字符串,原来这个不是注释是多行字符串啊,震惊!但是也常用作多行注释
print('''UC
震惊部报道''')
#%% 输入
# 关于输入的话,简单的形式就是
# 输入的内容是字符串
x = input()
print(x)
# 更加友好一点,还能增加提示
x = input('please input integer : ')
print('Input is', x)
#%% 字符串
# 单引号和双引号都可以,当字符串中有一种引号时,使用另一种引号则不需要转义
print('"haha"', "I'm OK")
# 以及上一节介绍的指定不转义和多行字符串
print(r'快来\n', '''UC
震惊部报道''')
# 转义规则,和C语言相同
# 格式化采用 % 连接字符串和元组
print('%d %s' % (1, 'abc'))
# 字符编码
# b'...' 表示一个 bytes 数组
# 对 bytes 和 str 使用 len() 函数可以分别得到 字节数 和 字符数
x = '中文'.encode('utf-8')
y = x.decode('utf-8')
print(x, len(x))
print(y, len(y))
# ord 和 chr 函数实现字符和 unicode 的转换
x = ord('中')
print(x)
print(chr(x))
#%% 布尔变量
# 逻辑操作回布尔变量
# True False 注意大小写
# 逻辑运算 and or not
print(True, False, True and False, not False, 1 > 2)
# Python 不能直接声明常量
#%% 整数,浮点数
# 和C不同的是,Python的/默认采用浮点数的除法
# 而 // 才是整数的除法
# 另外一个特有的操作是 ** 表示幂次
print(10 / 3)
print(10 // 3)
print(10 % 3)
print(2**10)
| StarcoderdataPython |
8119877 | <reponame>glowlex/pydashlite
from typing import Dict, TypeVar, Hashable, List
V = TypeVar('V')
K = TypeVar('K', bound=Hashable)
def chunkDict(obj: Dict[K, V], size: int = 1) -> List[Dict[K, V]]:
res = []
if size < 1:
raise ValueError("size must be greater 0")
ks = list(obj)
for i in range(len(ks)//size +(len(ks)%size != 0)):
res.append({k: obj[k] for k in ks[i*size:(i+1)*size]})
return res
| StarcoderdataPython |
8074684 | <filename>modules/db.py
# Get vulns to sync
temp1 = 'select * from "ANALYSIS" INNER JOIN "VULNERABILITY" ON "ANALYSIS"."VULNERABILITY_ID" = "VULNERABILITY"."ID" where "ANALYSIS"."STATE" in (\'EXPLOITABLE\', \'IN_TRIAGE\');'
# Get vulns to sync with properties
ex_get_vulns_to_sync='''
select * from "ANALYSIS" inner join "VULNERABILITY"
on "ANALYSIS"."VULNERABILITY_ID" = "VULNERABILITY"."ID" inner join
(select "PROJECT_ID","NAME","GROUPNAME","PROPERTYNAME","PROPERTYTYPE","PROPERTYVALUE" from "PROJECT"
INNER JOIN "PROJECT_PROPERTY" ON "PROJECT"."ID"="PROJECT_PROPERTY"."PROJECT_ID"
where "GROUPNAME" = 'integrations' and "PROPERTYNAME" = 'defectdojo.productid'
) as prop on prop."PROJECT_ID"="ANALYSIS"."PROJECT_ID" inner join
(select "ID","NAME","VERSION","PURL" from "COMPONENT"
) as comp on comp."ID" = "ANALYSIS"."COMPONENT_ID"
where "ANALYSIS"."STATE" = 'EXPLOITABLE';
'''
# Set indicator that vuln sent
def update_triage(id):
return '''
update "ANALYSIS" SET "STATE" = '{}' where "ID" = '{}'
'''.format('IN_TRIAGE', id)
# Add commnet
def add_comment(id):
return '''
insert into "ANALYSISCOMMENT"("ANALYSIS_ID", "COMMENT", "COMMENTER", "TIMESTAMP") values ({}, '{}', '{}', %s)
'''.format(int(id), 'EXPLOITABLE → IN_TRIAGE', 'sync') | StarcoderdataPython |
4954130 | __version__ = "0.1.0"
from .mws import MWS
from .datatypes import ShipFromAddress, InboundShipmentHeader
__all__ = ['MWS', 'ShipFromAddress', 'InboundShipmentHeader']
| StarcoderdataPython |
6545334 | from rest_framework.response import Response
from rest_framework.decorators import api_view
from .models import Pessoa
import random
# Create your views here.
@api_view(['GET'])
def pessoas_view(request):
if request.method == 'GET':
pessoas = Pessoa.objects.all()
output = [{
'login': cadastro.login,
'senha': cadastro.senha,
'data': cadastro.data
}for cadastro in pessoas]
return Response(output)
def generate_random_password (length):
characters = ('abcdefghijklmnopqrstuvxyz')
random_password = ''
for x in range(length):
random_password+= random.choice(characters)
return random_password
@api_view(['POST'])
def new_pessoa_view(request):
if request.method == 'POST':
login=request.data.get('login')
requested_password = request.data.get('senha')
date = request.data.get('data')
password = requested_password if requested_password != '' else generate_random_password(10)
pessoa = Pessoa.objects.create(login=login,
senha=password, data=date)
return Response({
'login': pessoa.login,
'senha': pessoa.senha,
'data': pessoa.data
})
return Response(request.data) | StarcoderdataPython |
11342946 | # python 3
import matplotlib
# matplotlib.use('pgf')
# pgf_with_pdflatex = {
# "pgf.texsystem": "pdflatex",
# "pgf.preamble": [
# r"\usepackage[utf8x]{inputenc}",
# r"\usepackage[T1]{fontenc}",
# r"\usepackage{cmbright}",
# ]
# }
# matplotlib.rcParams.update(pgf_with_pdflatex)
import pandas
import re
from matplotlib2tikz import save as tikz_save
import numpy
from matplotlib import pyplot
matplotlib.style.use('ggplot')
pyplot.interactive(False)
def to_min_secs(x, pos):
x = int(x)
minutes = x // 60
seconds = x % 60
return '{:02d}:{:02d}'.format(minutes, seconds)
def get_speed_stats(speed_data_path):
df = pandas.read_csv(speed_data_path, sep=',', thousands=',')
try:
node_nr = re.search('ProvidenciaExampleScenario.(.+?).veinsmobility.speed', df.columns[1]).group(1)
except AttributeError:
node_nr = '??' # apply your error handling
df.columns = ['time', 'speed']
mean = df['speed'].mean()
std = df['speed'].std()
return (node_nr, mean, std)
def build_dataframe_case(case):
# mobility data
mobility_columns = ['module', 'max_speed', 'min_speed', 'start_time', 'stop_time',
'total_co2', 'total_dist', 'total_time']
case_df_mobility = pandas.read_csv(case + '_stats_veinsmobility.csv')
case_df_mobility.columns = mobility_columns
mobility_search_re = 'ProvidenciaExampleScenario.(.+?).veinsmobility'
case_df_mobility['module'] = case_df_mobility['module'].map(lambda x: re.search(mobility_search_re, x).group(1))
case_df_mobility.set_index(['module'], inplace=True)
# appl data (sent warnings, arrived at dest)
appl_columns = ['module', 'arrived', 'rcvd_warnings', 'sent_warnings']
case_df_appl = pandas.read_csv(case + '_stats_appl.csv')
case_df_appl.columns = appl_columns
appl_search_re = 'ProvidenciaExampleScenario.(.+?).appl'
case_df_appl['module'] = case_df_appl['module'].map(lambda x: re.search(appl_search_re, x).group(1))
case_df_appl['arrived'] = case_df_appl['arrived'].map({1: True, 0: False})
case_df_appl.set_index(['module'], inplace=True)
case_df_speed = pandas.DataFrame()
case_df_speed['mean_speed'] = case_df_mobility['total_dist'] / case_df_mobility['total_time']
# join all tables
case_df = pandas.merge(case_df_mobility, case_df_appl, left_index=True, right_index=True, how='outer')
case_df = pandas.merge(case_df, case_df_speed, left_index=True, right_index=True, how='outer')
return case_df
def buid_csv():
for case in ['per0.0', 'per1.0', 'base_case', 'per0.5', 'per0.75', 'per0.25']:
df = build_dataframe_case(case)
df.to_csv(case + '_total_stats.csv')
def analysis_arrived_vhc():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
base_arrived_cnt = base['arrived'].sum()
per00_arrived_cnt = per00['arrived'].sum()
per10_arrived_cnt = per10['arrived'].sum()
per05_arrived_cnt = per05['arrived'].sum()
per075_arrived_cnt = per075['arrived'].sum()
per025_arrived_cnt = per025['arrived'].sum()
objects = ('Caso Base', 'PER 0.0', 'PER 0.25', 'PER 0.75', 'PER 1.0')
#objects = ('Caso Base', 'PER 0.0', 'PER 1.0')
x_ax = numpy.arange(len(objects))
#bars = [base_arrived_cnt, per00_arrived_cnt, per025_arrived_cnt,
# per05_arrived_cnt, per075_arrived_cnt, per10_arrived_cnt]
bars = [base_arrived_cnt, per00_arrived_cnt, per025_arrived_cnt, per075_arrived_cnt, per10_arrived_cnt]
pyplot.bar(x_ax, bars)
#pyplot.yscale('log')
pyplot.yticks(bars)
pyplot.xticks(x_ax, objects)
for a, b in zip(x_ax, bars):
pyplot.text(a, b, str(b))
#pyplot.ylabel('N° de vehículos que alcanzaron su destino')
pyplot.title('N° de vehículos que alcanzaron su destino (escala log)')
pyplot.show()
def analysis_speed():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
y = [base.loc[base['arrived'] == False]['mean_speed'].mean(),
per00.loc[per00['arrived'] == False]['mean_speed'].mean(),
per025.loc[per025['arrived'] == False]['mean_speed'].mean(),
per05.loc[per05['arrived'] == False]['mean_speed'].mean(),
per075.loc[per075['arrived'] == False]['mean_speed'].mean(),
per10.loc[per10['arrived'] == False]['mean_speed'].mean()]
objects = ('Caso Base', 'PER 0.0', 'PER 0.25', 'PER 0.5', 'PER 0.75', 'PER 1.0')
x = numpy.arange(len(objects))
pyplot.bar(x, y)
pyplot.yscale('log')
#pyplot.yticks(y)
pyplot.xticks(x, objects)
pyplot.ylabel('Velocidad m/s')
pyplot.title('Velocidades promedio de vehículos que NO alcanzaron su destino.')
for a, b in zip(x, y):
pyplot.text(a, b, str(b))
pyplot.show()
def analysis_distance():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
# filter
data = [base.loc[base['arrived'] != True]['total_dist'], per00.loc[per00['arrived'] != True]['total_dist'], per025.loc[per025['arrived'] != True]['total_dist'],
per05.loc[per05['arrived'] != True]['total_dist'], per075.loc[per075['arrived'] != True]['total_dist'], per10.loc[per10['arrived'] != True]['total_dist']]
labels = ['Caso Base', 'PER 0.0', 'PER 0.25',
'PER 0.5', 'PER 0.75', 'PER 1.0']
bins = numpy.linspace(0, base['total_dist'].max(), 50)
fig, axes = pyplot.subplots(nrows=2, ncols=3, sharey=True)
fig.suptitle("Frecuencia relativa de distancias recorridas - autos que NO llegaron a su destino.")
for idx, ax in enumerate(axes.ravel()):
x, y, _ = ax.hist(data[idx], bins, label=labels[idx], normed=True)
pyplot.setp(ax.get_yticklabels(), visible=True)
ax.legend(loc='upper right')
pyplot.show()
def analysis_time():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
base = pandas.read_csv('base_case_total_stats.csv').set_index(['module'])
per05 = pandas.read_csv('per0.5_total_stats.csv').set_index(['module'])
per075 = pandas.read_csv('per0.75_total_stats.csv').set_index(['module'])
per025 = pandas.read_csv('per0.25_total_stats.csv').set_index(['module'])
# filter
data = [base.loc[base['arrived'] == False]['total_time'], per00.loc[per00['arrived'] == False]['total_time'],
per025.loc[per025['arrived'] == False]['total_time'],
per05.loc[per05['arrived'] == False]['total_time'], per075.loc[per075['arrived'] == False]['total_time'],
per10.loc[per10['arrived'] == False]['total_time']]
labels = ['Caso Base', 'PER 0.0', 'PER 0.25',
'PER 0.5', 'PER 0.75', 'PER 1.0']
bins = numpy.linspace(0, base['total_dist'].max(), 50)
fig, axes = pyplot.subplots(nrows=2, ncols=3)
for idx, ax in enumerate(axes.ravel()):
ax.hist(data[idx], bins, label=labels[idx], normed=True)
ax.legend(loc='upper right')
pyplot.show()
def per00_vs_per10_distancetime():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
fig, ax = pyplot.subplots()
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
ax.scatter(per00['total_dist'], per00['total_time'], marker='o', s=4, alpha=0.75, label='PER 0.0', color='#ff0000')
ax.scatter(per10['total_dist'], per10['total_time'], marker='o', s=4, alpha=0.75, label='PER 1.0', color='#33cc22')
ax.legend(loc='lower right')
formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
ax.yaxis.set_major_formatter(formatter)
ax.yaxis.set_major_formatter(formatter)
pyplot.xlabel('Distancia Total [m]')
#pyplot.ylabel('Tiempo Total [MM:SS]')
ax.set_ylabel('Tiempo Total [MM:SS]')
ax.set_ylabel('Tiempo Total [MM:SS]')
#pyplot.savefig('per00per10_timedistance.pgf')
tikz_save('per00per10_timedistance.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
pyplot.show()
def per00_vs_per10_co2distance():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
fig, ax = pyplot.subplots()
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
ax.scatter(per00['total_dist'], per00['total_co2'], marker='o', s=4, alpha=0.75, label='PER 0.0', color='#ff0000')
ax.scatter(per10['total_dist'], per10['total_co2'], marker='o', s=4, alpha=0.75, label='PER 1.0', color='#33cc22')
ax.legend(loc='lower right')
#formatter = matplotlib.ticker.FuncFormatter(to_min_secs)
#ax.yaxis.set_major_formatter(formatter)
ax.set_ylabel('CO2 Total [g]')
pyplot.xlabel('Distancia Total')
#pyplot.savefig('per00per10_co2.pgf')
tikz_save('per00per10_co2.tex',
figureheight='\\figureheight',
figurewidth='\\figurewidth')
pyplot.show()
def per00_vs_per10_speedhist():
per00 = pandas.read_csv('per0.0_total_stats.csv').set_index(['module'])
per10 = pandas.read_csv('per1.0_total_stats.csv').set_index(['module'])
# per00 = per00.loc[per00['arrived']]
#
# index1 = per00.index
# index2 = per10.index
#
# per10 = per10[index2.isin(index1)]
# index2 = per10.index
# per00 = per00[index1.isin(index2)]
per00 = per00[numpy.isfinite(per00['mean_speed'])]
per10 = per10[numpy.isfinite(per10['mean_speed'])]
bins = numpy.linspace(0, per00['mean_speed'].max(), 100)
fig, ax = pyplot.subplots()
ax.set_facecolor('white')
ax.grid(color='#a1a1a1', linestyle='-', alpha=0.1)
ax.hist(per00['mean_speed'], bins, alpha=1.0, label='PER 0.0', color='#ff0000')
ax.hist(per10['mean_speed'], bins, alpha=0.75, label='PER 1.0', color='#33cc22')
ax.legend(loc='lower right')
pyplot.xlabel('Velocidad Promedio [m/s]')
pyplot.ylabel('Frecuencia')
pyplot.show()
if __name__ == '__main__':
# buid_csv()
#analysis_arrived_vhc()
#analysis_distance()
#analysis_time()
# analysis_speed()
per00_vs_per10_distancetime()
per00_vs_per10_co2distance()
#per00_vs_per10_speedhist()
| StarcoderdataPython |
8176507 | <reponame>demon-xxi/r8
#!/usr/bin/env python
# Copyright (c) 2017, the R8 project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import sys
import toolhelper
if __name__ == '__main__':
sys.exit(toolhelper.run('r8', sys.argv[1:]))
| StarcoderdataPython |
1627644 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='cabot-alert-xoxzo',
version='1.0.0',
description='A XoxZo plugin for Cabot by Arachnys',
author='Shaurya',
author_email='<EMAIL>',
url='http://cabotapp.com',
packages=find_packages(),
)
| StarcoderdataPython |
6565611 | <filename>container/model.py
import collections
'''
processing_callback: callable; called with (in_fh, out_fh)
'''
ContainerImageUploadRequest = collections.namedtuple(
'ContainerImageUploadRequest',
['source_ref', 'target_ref', 'processing_callback'],
# defaults=[None], XXX re-enable after upgrading to Python3.7
)
| StarcoderdataPython |
6470895 | import pandas as pd
import numpy as np
import random
import csv
def isNan(string):
return string != string
def occurences(model):
"""returns percentage of each color in each model as a color: percent dictionary"""
nulls = 0
notnulls = 0
occurence = {}
#finds occurence of each color
for i in range(len(df)):
#find df model that matches and get colors that arent null
if isNan(df['paint_color'][i]) == False and df['model'][i] == model:
#update occurence of model
if df['paint_color'][i] in occurence:
occurence[df['paint_color'][i]] += 1
else:
occurence[df['paint_color'][i]] = 1
notnulls += 1
#nan for model
elif df['model'][i] == model:
nulls += 1
for key in occurence:
occurence[key] = round((occurence[key] / notnulls) * nulls)
return occurence
def fillNan(dictionary):
"""takes dictionary of percentages and returns list of colors
of size nulls"""
nullList = []
sumValues = sum(dictionary.values())
for i in range(sumValues):
while True:
key = random.choice(list(dictionary))
if dictionary[key] > 0:
nullList.append(key)
dictionary[key] -= 1
break
return nullList
def listOfModelColours(model):
"""return list of colours in order of appearance of model in csv"""
modelOccurenceDict = occurences(model)
#NOTE: NUMBER OF VALUES IN DICT MAY BE HIGHER OR LOWER BY A NUMBER OR SO.
newNullList = fillNan(modelOccurenceDict)
return newNullList
df = pd.read_csv("out.csv", dtype={"numbers":"string", "condition": "string", "id": "string", "odometer":"string", "price":"string","year":"string"})
models = list(set(df['model']))
modelDict = {}
for model in models:
"""fill in every value for model if there is at least one of these models with
a colour to base off of. Find ratio of color, then create list of size nan model colors"""
modelDict[model] = listOfModelColours(model)
filledInList = []
#iterate through and append each corresponding list
for i in range(len(df)):
#i is model and color index
#check if color is null
if isNan(df['paint_color'][i]):
#check if the modelDict has color values to append with
if len(modelDict[df['model'][i]]) > 0:
#take modelDict value list and append
filledInList.append([df['model'][i], modelDict[df['model'][i]].pop()])
else:
#take df model and value and append
filledInList.append([df['model'][i], df['paint_color'][i]])
else:
#take df model and value and append
filledInList.append([df['model'][i], df['paint_color'][i]])
#filledInList contains [model, color]
df = pd.DataFrame(filledInList)
df.to_csv("dummy.csv")
| StarcoderdataPython |
107130 | <reponame>choderalab/gin
import gin
import flow
import tensorflow as tf
import numpy as np
import lime
import chinese_postman_routes
mols = [gin.i_o.from_smiles.to_mol(idx * 'C') for idx in range(2, 4)]
mols = [gin.deterministic.hydrogen.add_hydrogen(mol) for mol in mols]
_chinese_postman_routes = [chinese_postman_routes.chinese_postman(mol[1]) for mol in mols]
n_postmen = [tf.shape(route)[0] for route in _chinese_postman_routes]
graph_flow = flow.GraphFlow(flow_depth=3, whiten=True)
optimizer = tf.keras.optimizers.Adam(1e-3)
# x = tf.Variable(
# tf.random.normal(
# shape=(32, 8, 3),
# dtype=tf.float32))
#
# print('================================')
# print('conformation initialization')
# print('================================')
#
# for _ in range(300):
# with tf.GradientTape() as tape:
# bond_energy, angle_energy, one_four_energy, nonbonded_energy = gin.deterministic.mm.alkane_energy.alkane_energy(
# mol[0], mol[1], x)
#
# energy = tf.reduce_sum(bond_energy) + tf.reduce_sum(angle_energy)
#
# print('E=', energy.numpy())
# grads = tape.gradient(energy, [x])
# mol_optimizer.apply_gradients(zip(grads, [x]))
for epoch_idx in range(10000):
with tf.GradientTape(persistent=True) as tape:
# lamb = epoch_idx / 10000.
#
# z, log_det = graph_flow.f_xz(
# x,
# mol[0],
# mol[1],
#
# tf.gather(
# chinese_postman_routes,
# tf.random.categorical(
# tf.ones((1, 36), dtype=tf.float32),
# 32)[0]))
#
# h_xz = tf.reduce_sum(tf.square(z))
# ts_xz = tf.reduce_sum(log_det)
# loss_xz = tf.reduce_sum(tf.square(z)) - tf.reduce_sum(log_det)
#
loss = 0.
for idx in range(2):
mol = mols[idx]
z = tf.random.normal((8, 2 * (idx+2) + 2, 3))
x_, log_det = graph_flow.f_zx(
z,
mol[0],
mol[1],
tf.gather(
_chinese_postman_routes[idx],
tf.random.categorical(
tf.ones((1, n_postmen[idx]), dtype=tf.float32),
8)[0]))
bond_energy, angle_energy, one_four_energy, nonbonded_energy = gin.deterministic.mm.alkane_energy.alkane_energy(
mol[0], mol[1], x_)
h_zx = tf.reduce_sum(bond_energy) + tf.reduce_sum(angle_energy) # + tf.reduce_sum(one_four_energy)
ts_zx = tf.reduce_sum(log_det)
# # loss_zx = tf.reduce_sum(h_zx) - tf.reduce_sum(ts_zx)
#
# bond_energy, angle_energy, one_four_energy, nonbonded_energy = gin.deterministic.mm.alkane_energy.alkane_energy(
# mol[0], mol[1], x)
#
# energy = tf.reduce_sum(bond_energy) + tf.reduce_sum(angle_energy)
loss += h_zx - ts_zx
print(loss)
grads = tape.gradient(loss, graph_flow.variables)
optimizer.apply_gradients(zip(grads, graph_flow.variables))
# baoab.apply_gradients(
# zip(
# tape.gradient(energy, [x]),
# [x]))
if epoch_idx % 100 == 0:
graph_flow.save_weights('graph_flow.h5')
| StarcoderdataPython |
1910116 | <reponame>pschulam/lmbases
import numpy as np
import lmbases
def test_against_r_splines_uniform():
'''Compare BSplines class against R's bsplines with uniform knots.
Generate the ground truth with the following R commands:
> library(splines)
> x <- c(1.5, 3.3, 5.1, 7.2, 9.9)
> k <- c(2.5, 5.0, 7.5)
> b <- bs(x, knots=k, degree=2, intercept=TRUE, Boundary.knots=c(0, 10))
> print(b)
1 2 3 4 5 6
[1,] 0.16 0.6600 0.1800 0.0000 0.0000 0.0000
[2,] 0.00 0.2312 0.7176 0.0512 0.0000 0.0000
[3,] 0.00 0.0000 0.4608 0.5384 0.0008 0.0000
[4,] 0.00 0.0000 0.0072 0.6056 0.3872 0.0000
[5,] 0.00 0.0000 0.0000 0.0008 0.0776 0.9216
attr(,"degree")
[1] 2
attr(,"knots")
[1] 2.5 5.0 7.5
attr(,"Boundary.knots")
[1] 0 10
attr(,"intercept")
[1] TRUE
attr(,"class")
[1] "bs" "basis" "matrix"
'''
b_ground_truth = np.array(
[[0.16, 0.6600, 0.1800, 0.0000, 0.0000, 0.0000],
[0.00, 0.2312, 0.7176, 0.0512, 0.0000, 0.0000],
[0.00, 0.0000, 0.4608, 0.5384, 0.0008, 0.0000],
[0.00, 0.0000, 0.0072, 0.6056, 0.3872, 0.0000],
[0.00, 0.0000, 0.0000, 0.0008, 0.0776, 0.9216]])
x = np.array([1.5, 3.3, 5.1, 7.2, 9.9])
bs = lmbases.BSplines(low=0.0, high=10.0, num_bases=6, degree=2)
assert np.allclose(bs.design(x), b_ground_truth)
def test_against_r_splines_quantiles():
'''Compare BSplines class against R's bsplines with quantile knots.
Generate the ground truth with the following R commands:
> library(splines)
> x <- c(1.5, 3.3, 5.1, 7.2, 9.9)
> b <- bs(x, degree=2, df=6, intercept=TRUE, Boundary.knots=c(0, 10))
> print(b)
1 2 3 4 5 6
[1,] 0.2975207 0.5687895 0.1336898 0.000000000 0.0000000 0.0000000
[2,] 0.0000000 0.3529412 0.6470588 0.000000000 0.0000000 0.0000000
[3,] 0.0000000 0.0000000 0.5384615 0.461538462 0.0000000 0.0000000
[4,] 0.0000000 0.0000000 0.0000000 0.571428571 0.4285714 0.0000000
[5,] 0.0000000 0.0000000 0.0000000 0.000728863 0.0694242 0.9298469
attr(,"degree")
[1] 2
attr(,"knots")
25% 50% 75%
3.3 5.1 7.2
attr(,"Boundary.knots")
[1] 0 10
attr(,"intercept")
[1] TRUE
attr(,"class")
[1] "bs" "basis" "matrix"
'''
b_ground_truth = np.array(
[[0.2975207, 0.5687895, 0.1336898, 0.000000000, 0.0000000, 0.0000000],
[0.0000000, 0.3529412, 0.6470588, 0.000000000, 0.0000000, 0.0000000],
[0.0000000, 0.0000000, 0.5384615, 0.461538462, 0.0000000, 0.0000000],
[0.0000000, 0.0000000, 0.0000000, 0.571428571, 0.4285714, 0.0000000],
[0.0000000, 0.0000000, 0.0000000, 0.000728863, 0.0694242, 0.9298469]])
x = np.array([1.5, 3.3, 5.1, 7.2, 9.9])
bs = lmbases.BSplines(low=0.0, high=10.0, num_bases=6, degree=2, x=x)
assert np.allclose(bs.design(x), b_ground_truth)
| StarcoderdataPython |
12857364 | import json
import logging
import os
import pdb
import re
from helpers.app_helpers import *
from helpers.page_helpers import *
from helpers.jinja2_helpers import *
from helpers.telegram_helpers import *
#from main import *
#from flask import request
################################################################################
# Setup helper functions
################################################################################
def get_machine_status(log_string):
rdp_re = re.compile("Machine \[(?P<box_name>.+)\] RDP session has \[(?P<box_ip>.*)\]")
result = rdp_re.match(log_string)
if result is None:
return result
box_name = result.group("box_name")
box_ip = result.group("box_ip").split(":")[0]
return (box_name, box_ip)
def get_box_statuses():
cwd = os.getcwd()
#os.path.relpath("data/box_statuses.json")
outfile_path = os.path.join(os.getcwd(), os.path.relpath("static/data/box_statuses.json"))
box_statuses = None
if os.path.exists(outfile_path):
# Read file
with open(outfile_path, "rb") as outfile:
json_data = outfile.read()
box_statuses = json.loads(json_data)
else:
box_statuses = {}
return box_statuses
def save_box_statuses(box_statuses):
logging.debug("IN save_box_statuses()")
cwd = os.getcwd()
#os.path.relpath("data/box_statuses.json")
outfile_path = os.path.join(os.getcwd(), os.path.relpath("static/data/box_statuses.json"))
# Write to file
try:
with open(outfile_path, "w+") as outfile:
outfile.write(json.dumps(box_statuses))
logging.debug("Saved!")
except Exception as ex:
logging.error(ex)
def update_box_statuses(log_string):
logging.debug("IN update_box_statuses()")
result = get_machine_status(log_string)
if result is not None:
logging.debug("IN result is not None")
# We got a machine status log entry; update json
# Get box statues
box_statuses = get_box_statuses()
box_name = result[0]
box_ip = result[1]
logging.debug("box_name: %s, box_ip: %s" % (box_name, box_ip))
# Update box_statuses.json
if not box_statuses.has_key(box_name):
box_statuses[box_name] = {}
box_statuses[box_name]["status"] = "In use" if len(box_ip) > 0 else "Available"
box_statuses[box_name]["comment"] = box_ip
save_box_statuses(box_statuses)
################################################################################
# Setup routes
################################################################################
@route('/api/telegram/updates', method='POST')
def api_telegram_plato_dev_post():
logging.debug("IN api_telegram_plato_dev_post()")
# ZX: Support to get an Update object from the content of the response?
# logging.info("should dump content here")
json_data = request.json
if json_data is None:
return None
try:
logging.info(str(json_data))
message_text = ""
if json_data.has_key("message"):
message_text = json_data["message"]["text"]
if json_data.has_key("channel_post"):
message_text = json_data["channel_post"]["text"]
logging.debug("message_text is:" + message_text)
update_box_statuses(message_text)
except Exception as ex:
logging.error(ex)
#send_message(appconfig["telegram"]["token"], "<PASSWORD>", "i received message")
#return json.dumps("api_telegram_plato_dev_post")
return str(json_data)
#
# cwd = os.getcwd()
# logging.info(cwd)
# rdp_re = re.compile("Machine \[(?P<box_name>.+)\] RDP session has \[(?P<ip>.*)\]")
# result = rdp_re.match(str(json_data["message"]["text"]))
# if result is None:
# pass
# else:
# pass
#send_message(appconfig["telegram"]["token"], "<PASSWORD>", "i received message")
#return json.dumps("api_telegram_plato_dev_post")
# return str(json_data)
@route('/api/telegram/brahman-devops/sendMessage', method='POST')
def api_telegram_plato_dev_send_message_post():
logging.debug("IN api_telegram_plato_dev_send_message_post()")
chat_id = None
message = None
if 'chat_id' in request.json.keys():
chat_id = request.json['chat_id']
if 'message' in request.json.keys():
message = request.json['message']
if chat_id is None or message is None:
response.set_header('Content-Type', 'application/json')
return json.dumps("{}")
json_response_string = send_message(appconfig["telegram"]["token"], chat_id, message)
json_response_object = json.loads(json_response_string)
response.set_header('Content-Type', 'application/json')
return json_response_object
@route('/api/telegram/setWebhook', method='POST')
def api_telegram_set_webhook_post():
logging.debug("IN api_telegram_set_webhook_post()")
json_data = set_webhook(appconfig["telegram"]["token"])
response.set_header('Content-Type', 'application/json')
return json_data
@route('/api/telegram/getme', method='POST')
def api_telegram_getme_get():
#
# {"ok": true, "result": {"username": "plato_dev_bot", "first_name": "plato-dev-bot", "is_bot": true, "id": 407476479}}
logging.debug("IN api_telegram_getme_get()")
json_data = get_me(appconfig["telegram"]["token"])
response.set_header('Content-Type', 'application/json')
return json_data
| StarcoderdataPython |
1638341 | from collections import OrderedDict
import json
import errno
import os
import re
class ConfigError(Exception):
pass
class FileNotFoundError(Exception):
def __init__(self, filename):
Exception.__init__(self, '[Errno %s] %s: \'%s\'' % (errno.ENOENT, os.strerror(errno.ENOENT), filename))
class FileFormatError(Exception):
pass
def load_json(path):
"""Load a JSON file from path, and returns an ordered dictionary or throws exceptions on formatting errors"""
try:
with open(path, 'r') as f:
try:
return json.loads(f.read(), object_pairs_hook=OrderedDict)
except ValueError:
raise FileFormatError(path)
except IOError as e:
if e.errno == errno.ENOENT:
raise FileNotFoundError(path)
else:
raise e
def makedirs(path):
"""Create a directory on path if it does not exist"""
# https://stackoverflow.com/a/5032238
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# https://stackoverflow.com/a/295466
def slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
import unicodedata
value = value.decode('unicode-escape')
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return unicode(re.sub('[-\s]+', '-', value))
| StarcoderdataPython |
6663661 | # Copyright 2021 cedar.ai. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@rules_python//python:defs.bzl", "py_binary")
load("@rules_pyvenv_deps//:requirements.bzl", "requirement")
def _py_venv_deps_impl(ctx):
imports = []
for dep in ctx.attr.deps:
if PyInfo not in dep:
continue
imports.extend([i for i in dep[PyInfo].imports.to_list() if i not in imports])
deps = depset(transitive = [dep[DefaultInfo].default_runfiles.files for dep in ctx.attr.deps])
out = ctx.outputs.output
files = []
for dep in deps.to_list():
if dep.is_directory:
continue
typ = "S" if dep.is_source else "G"
files.append({"t": typ, "p": dep.short_path})
doc = {
"imports": imports,
"files": files,
"commands": ctx.attr.commands,
}
ctx.actions.write(out, json.encode(doc))
return [DefaultInfo(files = depset(direct = [out]))]
_py_venv_deps = rule(
implementation = _py_venv_deps_impl,
attrs = {
"deps": attr.label_list(),
"commands": attr.string_list(),
"output": attr.output(),
},
)
def py_venv(name, deps = None, extra_pip_commands = None):
deps = deps or []
extra_pip_commands = extra_pip_commands or []
deps_name = "_" + name + "_deps"
out_name = deps_name + ".json"
out_label = ":" + out_name
_py_venv_deps(
name = deps_name,
deps = deps,
commands = extra_pip_commands,
output = out_name,
)
py_binary(
name = name,
srcs = ["@rules_pyvenv//:build_env.py"],
deps = [requirement("entrypoints")],
data = [out_label] + deps,
main = "@rules_pyvenv//:build_env.py",
env = {
"BUILD_ENV_INPUT": "$(location " + out_label + ")",
},
)
| StarcoderdataPython |
8173871 | #!/usr/bin/env python
import re
from datetime import datetime, timedelta, date
from requests.exceptions import ConnectionError, ReadTimeout, SSLError
import time, sys, traceback
import mysql.connector
from tweet_getter import TweetGetter
from requests_oauthlib import OAuth1Session
'''
crontab -eの場合は以下のimport
'''
DB_USER = ***
DB_PASSWORD = ***
DB_HOST = ***
DB_NAME = ***
CHARSET = ***
TARGET_WORD = ***
CONSUMER_KEY = ***
CONSUMER_SECRET = ***
ACCESS_TOKEN = ***
ACCESS_TOKEN_SECRET = ***
twitter = OAuth1Session(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
tw = TweetGetter(twitter)
# TwitterAPIのアクセスキートークン
exception_list = []
# 関係の無い文字列のリスト
noise_list = ['*']
def now_unix_time():
return time.mktime(datetime.now().timetuple())
def execute_sql(sql, db_info, is_commit=False):
'''
SQL文の実行
'''
connector = mysql.connector.connect(
host=db_info["host"],
port=*,
user=db_info["user"],
password=db_info["password"],
db=db_info["db_name"],
charset="utf8"
)
cursor = connector.cursor()
cursor.execute(sql)
if is_commit:
connector.commit()
cursor.close()
connector.close()
return True
def create_hashtag_serch_table(db_info):
'''
database内にtableを作成
'''
sql = """
CREATE TABLE IF NOT EXISTS
initial_day(
tweet_id BIGINT,
day_id DATETIME,
created_at DATETIME,
user_id BIGINT,
user_name VARCHAR(50),
user_friends MEDIUMINT,
user_followers MEDIUMINT,
retweet_count MEDIUMINT,
favorite_count MEDIUMINT,
text VARCHAR(255)
)
;
"""
execute_sql(sql, db_info, is_commit=True)
return True
def insert_into_hashtag_search(db_info, hashtag_search_dict):
'''
作成したテーブル内にデータを挿入
'''
sql = """
INSERT INTO
initial_day
VALUES(
'%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s' , '%s'
)
;
""" % (
hashtag_search_dict["tweet_id"],
hashtag_search_dict['day_id'],
hashtag_search_dict["created_at"],
hashtag_search_dict["user_id"],
hashtag_search_dict["user_name"],
hashtag_search_dict["user_friends"],
hashtag_search_dict["user_followers"],
hashtag_search_dict["retweet_count"],
hashtag_search_dict["favorite_count"],
hashtag_search_dict["text"]
)
execute_sql(sql, db_info, is_commit=True)
return True
def tweet_main():
sid = -1
mid = -1
count = 0
week_ago = date.today() - timedelta(days=1)
local_db = {
"host": DB_HOST,
"user": DB_USER,
"password": <PASSWORD>,
"db_name": DB_NAME
}
# テーブル作成
create_hashtag_serch_table(local_db)
while True:
try:
count += 1
sys.stdout.write('%d, ' % count)
# ここにデータベースがもし存在していたら,そのマックスの時間を入れて,その時間以上のものからログデータを取らせる様にする
tweet_data = tw.get_zeroday_tweet_data(u'学研', max_id=mid, since_id=sid, start_date=week_ago)
if tweet_data['result'] == False:
print("status_code{}".format(tweet_data['status_code']))
break
if int(tweet_data['limit']) == 0:
print('Adding created_at field')
diff_sec = int(tweet_data['reset_time_unix']) - now_unix_time()
print("sleep %d sec." % (diff_sec + 5))
break
else:
# metadata処理
if len(tweet_data['statuses']) == 0:
sys.stdout.write("statuses is none.")
break
elif 'next_results' in tweet_data['metadata']:
# 結果をMySQLに格納する
tweet_data_st = tweet_data['statuses']
for tweet in tweet_data_st:
if (tweet['user']['screen_name'] not in exception_list) & (len(re.findall(r'【*】', tweet['text'])) == 0):
if len([s for s in noise_list if s in tweet['text']]) == 0:
# データのストア
tweet['text'] = tw.tweet_cleaner(tweet['text'])
hashtag_search_dict = {
"tweet_id": u"{}".format(tweet['id']),
"day_id": u"{}".format(date.today()),
"created_at": u"{}".format(tweet['created_at']),
"user_id": u"{}".format(tweet['user']['id']),
"user_name": u"{}".format(tweet['user']['screen_name']),
"user_friends": u"{}".format(tweet['user']['friends_count']),
"user_followers": u"{}".format(tweet['user']['followers_count']),
'retweet_count': u'{}'.format(tweet['retweet_count']),
'favorite_count': u'{}'.format(tweet['favorite_count']),
"text": u"{}".format(tweet['text'])
}
insert_into_hashtag_search(local_db, hashtag_search_dict)
next_url = tweet_data['metadata']['next_results']
pattern = r".*max_id=([0-9]*)\&.*"
ite = re.finditer(pattern, next_url)
for i in ite:
mid = i.group(1)
break
else:
sys.stdout.write("next is none. finished.")
break
except SSLError:
print("SSLError")
print("waiting 5mins")
time.sleep(5 * 60)
except ConnectionError:
print("ConnectionError")
print("waiting 5mins")
time.sleep(5 * 60)
except ReadTimeout:
print("ReadTimeout")
print("waiting 5mins")
time.sleep(5 * 60)
except:
print("Unexpected error:{}".format(sys.exc_info()[0]))
traceback.format_exc(sys.exc_info()[2])
raise
finally:
info = sys.exc_info()
return True
tweet_main()
| StarcoderdataPython |
6665505 | #!/usr/bin/env python3
import argparse
from enum import Enum
class Actions(Enum):
ARCHIVE = 0
DELETE = 1
class SelectorTypes(Enum):
FROM = 0
TO = 1
SUBJECT_CONTAINS = 2
CSV_ACTIONS_2_ENUM_VALUE = {
"deleted": Actions.ARCHIVE,
"archive": Actions.DELETE,
}
CSV_SELECTOR_TYPES_2_ENUM_VALUE = {
"from": SelectorTypes.FROM,
"to": SelectorTypes.TO,
"subject": SelectorTypes.SUBJECT_CONTAINS,
}
# Turn csv columns into a 'rule' dict
def csv_row_2_rule(action, _, selector_type, *args):
# The last value is the usage count, all others are part of the selector
*selector_values, usage_count = args
selector = {
"type": CSV_SELECTOR_TYPES_2_ENUM_VALUE[selector_type]
}
if selector['type'] in [SelectorTypes.FROM, SelectorTypes.TO]:
selector['email_address'] = selector_values[0]
elif selector['type'] == SelectorTypes.SUBJECT_CONTAINS:
selector['subject_substrings'] = selector_values
else:
raise Error("Unknown selector type: '%s'/%s" % (selector_type, selector['type']))
return {
"action": CSV_ACTIONS_2_ENUM_VALUE[action],
"selector": selector,
"usage_count": int(usage_count),
}
# Open and parse a mailbox CSV file and produce an array of 'rule' dicts
def parse_mailbox_csv(input_filename):
print("Opening '%s'..." % input_filename)
with open(input_filename) as f:
print("Parsing '%s'..." % input_filename)
return [csv_row_2_rule(*(value.strip().strip('"') for value in line.split(','))) for line in f]
# Accepts an array of rule-dicts (like the ones produced by csv_row_2_rule)
def rule_2_gmail_xml_node(rule):
from lxml import etree
rule_node = etree.Element('entry')
rule_node.append(etree.Element('category', { 'term': 'filter' }))
rule_node.append(etree.Element('title', { 'text': 'Mail Filter' }))
rule_node.append(etree.Element('content'))
# Selector #
if rule['selector']['type'] == SelectorTypes.TO:
rule_node.append(etree.Element('{http://schemas.google.com/apps/2006}property', { 'name': 'to', 'value': rule['selector']['email_address'] }))
elif rule['selector']['type'] == SelectorTypes.FROM:
rule_node.append(etree.Element('{http://schemas.google.com/apps/2006}property', { 'name': 'from', 'value': rule['selector']['email_address'] }))
elif rule['selector']['type'] == SelectorTypes.SUBJECT_CONTAINS:
# Build a selector string which google will parse
subject_selector = "(%s)" % ' AND '.join(('"%s"' % subject_substring) for subject_substring in rule['selector']['subject_substrings'])
rule_node.append(etree.Element('{http://schemas.google.com/apps/2006}property', { 'name': 'subject', 'value': subject_selector }))
# Action #
if rule['action'] == Actions.ARCHIVE:
rule_node.append(etree.Element('{http://schemas.google.com/apps/2006}property', { 'name': 'shouldArchive', 'value': 'true' }))
elif rule['action'] == Actions.DELETE:
rule_node.append(etree.Element('{http://schemas.google.com/apps/2006}property', { 'name': 'shouldTrash', 'value': 'true' }))
return rule_node
# Accepts an array of rule-dicts (like the ones produced by csv_row_2_rule)
def dump_gmail_xml(rules, output_filename):
from lxml import etree
print("Exporting to '%s'..." % output_filename)
root = etree.Element('feed', nsmap= { 'apps': 'http://schemas.google.com/apps/2006' })
root.append(etree.Element('title', { 'text': 'Mail Filters' }))
for rule in rules:
root.append(
rule_2_gmail_xml_node(rule)
)
with open(output_filename, 'wb') as f:
f.write(
etree.tostring(root, encoding="utf8", xml_declaration=True)
)
if __name__ == '__main__':
# Parse args
parser = argparse.ArgumentParser(description='Convert an exported CSV of mailbox autoswipe patterns to a gmail-compatible CSV file.')
parser.add_argument('input_filename', nargs='?', default='./autoswipe_rules.csv', help='path to the Mailbox CSV file')
parser.add_argument('--output-filename', '-o', dest='output_filename', default='./filters.xml', help='output path for Gmail XML filters file')
args = parser.parse_args()
# Do it
dump_gmail_xml(
parse_mailbox_csv(input_filename=args.input_filename),
output_filename=args.output_filename,
)
print("done!")
| StarcoderdataPython |
6519563 | <filename>integrator/fitbit_client.py<gh_stars>0
__author__ = 'Tauren'
import os
import requests
import base64
class FitbitClient:
def __init__(self):
pass
def exchange_refresh_token(self, current_refresh_token):
""" Exchange a refresh token for new access and refresh token
:return: refresh_token, access_token
"""
decoded_string = os.getenv('CLIENT_ID') + ':' + os.getenv('CLIENT_SECRET')
auth_header = b'Basic ' + base64.urlsafe_b64encode( decoded_string.encode('utf-8'))
payload = {'grant_type': 'refresh_token',
'refresh_token': current_refresh_token}
headers = {'Authorization': auth_header, 'Content-Type': 'application/x-www-form-urlencoded'}
r = requests.post('https://api.fitbit.com/oauth2/token', headers=headers, data=payload)
request_json = r.json()
print('Results: %s' % request_json)
return request_json.get('refresh_token', ''), request_json.get('access_token', '')
def query_hr_time_series(self, access_token, date):
""" Query heart rate intra-day time series endpoint
:param access_token:
:param date:
:return: json
"""
headers = {'Authorization': 'Bearer %s' % access_token}
hr_url = 'https://api.fitbit.com/1/user/-/activities/heart/date/%s/1d/1min.json' % date
result = requests.get(hr_url, headers=headers)
return result.json()
| StarcoderdataPython |
9665228 | import subprocess
import os
import argparse
import sys
parser = argparse.ArgumentParser(description='Profile Codegen')
parser.add_argument('--cuda-bin', default='/usr/local/cuda/bin/', type=str, help='Cuda Path')
parser.add_argument('--task-name', default='MRPC', type=str, help='Glue Benchmark task.')
parser.add_argument('--profile', action='store_true', help='Profile run.')
parser.add_argument('--ten_steps', action='store_true', help='Run for just 10 steps.')
parser.add_argument('--pad', action='store_true', help='Do not create dynamic batches.')
parser.add_argument('--print-kernel', action='store_true', help='Print Fused Kernels.')
parser.add_argument('--nojit', action='store_true', help='Turn off jit.')
parser.add_argument('--te', action='store_true', help='Turn off jit.')
parser.add_argument('--batch_size', default='256', type=str, help='Batch size.')
parser.add_argument('--epochs', default='20.0', type=str, help='Number of epochs.')
parser.add_argument('--fused_adam', action='store_true', help='Use fused adam. You have to turn this on because APEX might not be installed.')
args = parser.parse_args()
env_args = ['TOKENIZERS_PARALLELISM=true']
if args.print_kernel :
env_args += ['PYTORCH_NVFUSER_DUMP=cuda_kernel']
if args.nojit :
env_args += ['PYTORCH_JIT_ENABLE=0']
else :
env_args += ['PYTORCH_JIT_ENABLE=1']
if args.te :
env_args += ['PYTORCH_NVFUSER_ENABLE=0']
else :
env_args += ['PYTORCH_NVFUSER_ENABLE=1']
if args.fused_adam :
env_args += ['USE_FUSED_ADAM=1']
else :
env_args += ['USE_FUSED_ADAM=0']
benchmark_cmd = ['python', 'run_glue.py', '--overwrite_output_dir', '--model_name_or_path', 'bert-base-cased', \
'--task_name', args.task_name, '--do_train', '--max_seq_length', '128', '--seed', '0', \
'--per_device_train_batch_size', args.batch_size, '--learning_rate', '2e-5', \
'--num_train_epochs', args.epochs, '--output_dir', os.getcwd() + '/' + args.task_name ]
prof_prefix = ['nsys', 'nvprof', '--print-gpu-trace']
prof_options = ['--max_steps', '10' ]
run_options = ['--do_eval' ]
pad_options = ['--pad_to_max_length']
cmd_list = env_args
if args.profile :
cmd_list += prof_prefix
cmd_list += benchmark_cmd
if args.pad :
cmd_list += pad_options
if args.profile or args.ten_steps :
cmd_list += prof_options
else :
cmd_list += run_options
print(cmd_list)
cmd_str = ''
for item in cmd_list :
cmd_str += ' ' + item
subprocess.run(cmd_str, stdout=sys.stdout, stderr=sys.stderr, universal_newlines=True, shell=True, cwd=os.getcwd())
| StarcoderdataPython |
8131396 | <gh_stars>0
from .torch_dataset import TorchDataset
__all__ = ['TorchDataset']
| StarcoderdataPython |
1966023 | <gh_stars>0
#1017
#ENTRADA
tempo_horas = int(input())
velocidade = int(input())
#CALCULO
#ANÁLISE BIDIMENSIONAL>> SE KM/H * H = KM, então:
distancia_percorrida = velocidade*tempo_horas
#ANÁLISE BIDIMENSIONAL>> SE TEMOS KM e QUEREMOS KM/L, então KM/KM/L = L:
combustivel = distancia_percorrida/12
print('{:.3f}'.format(combustivel))
| StarcoderdataPython |
58093 | from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
import unittest.mock as mock
from zsl.service.service import SessionFactory
from zsl.testing.db import TestSessionFactory as DbTestTestSessionFactory
from zsl.utils.injection_helper import bind
def mock_db_session():
mock_sess = mock.MagicMock()
def session_holder():
return mock_sess
class TestSessionFactory(DbTestTestSessionFactory):
def __init__(self):
super(TestSessionFactory, self).__init__()
self._session_holder = session_holder
bind(SessionFactory, to=TestSessionFactory)
bind(DbTestTestSessionFactory, to=TestSessionFactory)
return mock_sess
| StarcoderdataPython |
4827367 | <filename>server/front.py<gh_stars>0
from flask import Flask
from flask import jsonify
from flask import render_template
from api import api
app = Flask(__name__)
@app.route("/domain/<domain>/<lang>/<page>")
@app.route("/")
def main(**kwargs):
return render_template("app.html")
if __name__ == "__main__":
app.register_blueprint(api)
app.run(debug=True)
| StarcoderdataPython |
5014938 | <reponame>ministryofjustice/analytics-platform-atlantis-example<filename>modules/lambda_function/hello_world/hello.py
import os
name = os.environ.get('NAME')
def hello_handler(event, context):
return f"Hello {name}"
| StarcoderdataPython |
11237572 | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isBalanced(self, root):
def getDepth(node):
if not node:
return 0
left = getDepth(node.left)
right = getDepth(node.right)
if abs(left - right) > 1:
self.res = False
return 1 + max(left, right)
self.res = True
getDepth(root)
return self.res
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.right.right = TreeNode(4)
res = Solution().isBalanced(root)
print(res) | StarcoderdataPython |
1622673 | #!/usr/bin/env python
# encoding: utf-8
""" """
from web import Storage
from teslafaas.container.webpy.context_manager import ContextManager
from teslafaas.container.webpy.http_error_process import customize_http_error
import os
import sys
import web
import json
import pkgutil
import logging
import importlib
from codecs import open
from teslafaas.container.webpy.init import init_all
from teslafaas.container.webpy.common import urls as global_urls
__author__ = 'adonis'
init_all(use_gevent=False)
class InvalidUserApp(Exception):
pass
class DummyModule(dict):
def __init__(self, **kw):
super(DummyModule, self).__init__(**kw)
def __getattr__(self, key):
# FIXME: vars() support, vars() will not come here
# if key == '__dict__':
# return {k: v for k, v in self.iteritems()}
try:
return self[key]
except KeyError:
raise AttributeError(
r"'DummyModule' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
# FIXME: vars() support
self.__dict__[key] = value
def webpy_wsgifunc_wrapper(self):
conf = self.conf
webpy_app = self.webpy_app
# TODO: load dynamic resource
manager = ContextManager(webpy_app, conf, raw_mod=self.raw_mod)
manager.load_middle_ware()
manager.set_context_hook()
return webpy_app.wsgifunc()
class ContainerLoader(object):
def __init__(self, init_context):
self.conf = {}
self.init_context = init_context
def wrap_webpy_raw_module(self, mod, conf, urls):
wsgi_mod = DummyModule()
app = web.application(urls, globals(), autoreload=False)
customize_http_error(app)
# Unable to set a FunctionType to instance and void conflict
# app.wsgi = FunctionType app.wsgifunc()
# TODO: wrap wsgifunc to do dynamic resource reload, refresh automatically
wsgi_mod.conf = conf
wsgi_mod.raw_mod = mod
wsgi_mod.webpy_app = app
manager = ContextManager(app, conf, raw_mod=mod)
manager.load_middle_ware()
manager.set_context_hook()
wsgi_mod.wsgifunc = app.wsgifunc()
# wsgi_mod.refresh =
return wsgi_mod
def _import_sub_modules(self, mod):
for importer, modname, ispkg in pkgutil.walk_packages(path=mod.__path__, prefix=mod.__name__+'.'):
__import__(modname)
def load_module(self, src_path, container_name=None, env="common"):
"""
:param src_path: user module dir path
:param container_name: used for url router and unix socket name,
default: last dir name of src_path
:return: 2-item tuple, wsgi module name and container name
Gunicorn used python app module name, with wsgifunc()
entry function
"""
base_dir = os.path.abspath(src_path.rstrip('/'))
module_name = dir_name = os.path.basename(base_dir)
src_parent_path = os.path.dirname(base_dir)
if not container_name:
container_name = dir_name
if src_parent_path not in sys.path:
sys.path.append(src_parent_path)
# TODO: add src to python path
if src_parent_path not in sys.path:
sys.path.append(src_parent_path)
# if module_name in sys.modules:
# raise InvalidUserApp(
# "Invalid module name '%s': conflict with a already exist "
# "module(%s), please change a name"
# % (module_name, sys.modules[module_name].__file__))
try:
raw_module = __import__(module_name)
except ImportError:
raise InvalidUserApp(
"Can't import '%s' after add '%s' to PYTHONPATH. "
"Please check if __init__.py exist in '%s'"
% (dir_name, src_parent_path, src_path))
# conf_suffix = ""
# if env != "common":
# if env == "DAILY":
# conf_suffix = "_daily"
# elif env == "PRE":
# conf_suffix = "_pre"
# elif env == "PRODUCTION":
# conf_suffix = "_prod"
# if os.path.exists(os.path.join(x, 'conf%s.ini' % conf_suffix)):
# config.update(self.parse_ini_conf(os.path.join(x, 'conf%s.ini' % conf_suffix)))
# shutil.copyfile(os.path.join(x, 'conf%s.ini' % conf_suffix), os.path.join(x, 'conf.ini'))
# if os.path.exists(os.path.join(x, 'conf%s.json' % conf_suffix)):
# config.update(self.parse_json_conf(os.path.join(x, 'conf%s.json' % conf_suffix)))
# shutil.copyfile(os.path.join(x, 'conf%s.json' % conf_suffix), os.path.join(x, 'conf.json'))
# if os.path.exists(os.path.join(x, 'conf%s.py' % conf_suffix)):
# config.update(self.parse_py_conf("%s.%s" % (mod_name, 'conf%s' % conf_suffix)))
# shutil.copyfile(os.path.join(x, 'conf%s.py' % conf_suffix), os.path.join(x, 'conf.py'))
# # FIXME: conflict for multiple containers
# if src_path not in sys.path:
# sys.path.append(src_path)
urls = self.load_urls(raw_module, self.init_context.config)
# 兼容老的 sub factory 用法
try:
__import__('%s.factory' % raw_module.__name__)
except ImportError:
pass
else:
print 'importing module and submodules: %s.factory' % raw_module.__name__
self._import_sub_modules(getattr(raw_module, 'factory'))
wsgi_module_name = "%s.%s" % ('teslafaas.container.wsgi_mods', module_name)
wsgi_module = self.wrap_webpy_raw_module(raw_module, conf=self.init_context.config, urls=urls)
sys.modules[wsgi_module_name] = wsgi_module
return wsgi_module, container_name
def load_urls(self, raw_mod, config):
handlers = self.load_handlers(raw_mod, config)
global global_urls
if os.path.exists(os.path.join(raw_mod.__path__[0] + '/urls.py')):
logging.info('importing module: %s.urls', raw_mod.__name__)
__import__('%s.urls' % raw_mod.__name__)
if hasattr(raw_mod.urls, 'urls'):
m_urls = getattr(raw_mod.urls, 'urls')
if not global_urls is m_urls:
global_urls.extend(m_urls)
if config.get('disable_index', False) is True:
from teslafaas.container.webpy.common.IndexHandler import IndexHandler
global_urls += [r"/", IndexHandler]
for i in xrange(1, len(global_urls), 2):
if type(global_urls[i]) == str:
hname = global_urls[i]
if hname not in handlers:
print 'Error: cannot find handler class for %s' % hname
global_urls[i] = handlers[hname]
return global_urls
def load_handlers(self, raw_mod, config):
handlers = {}
path = os.path.join(raw_mod.__path__[0] + '/handlers')
if not os.path.exists(path):
return handlers
from teslafaas.container.webpy.common.BaseHandler import BaseHandler
for f in os.listdir(path):
if f.endswith('.py'):
name, postfix = os.path.splitext(f)
m = importlib.import_module('%s.handlers.%s' % (raw_mod.__name__, name))
for obj in dir(m):
classobj = getattr(m, obj)
try:
if issubclass(classobj, BaseHandler):
name = classobj.__name__
if name not in handlers:
handlers[name] = classobj
except:
pass
return handlers
def reset_container_log(self):
#
"""
初始化日志配置
"""
with open(os.path.join(self.root_path, 'conf/logging.json')) as f:
try:
log_config = json.loads(f.read())
except ValueError:
raise ValueError('Invalid logging config, cannot loads to JSON')
logging.dictConfig(log_config)
self.logger = logging.getLogger() # set default root logger
| StarcoderdataPython |
1871711 | <gh_stars>1-10
# coding: utf-8
get_ipython().magic(u'pylab inline')
import csv, twitter, json, nltk
import networkx as nx
from functools import reduce
from matplotlib import pyplot as plt
from wordcloud import WordCloud
CONSUMER_KEY, CONSUMER_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET = "", "", "", ""
def accede_a_tw(fuente):
(CONSUMER_KEY,
CONSUMER_SECRET,
OAUTH_TOKEN,
OAUTH_TOKEN_SECRET) = open(
fuente, 'r').read().splitlines()
auth = twitter.oauth.OAuth(OAUTH_TOKEN,
OAUTH_TOKEN_SECRET,
CONSUMER_KEY,
CONSUMER_SECRET)
return twitter.Twitter(auth=auth)
def carga_lista(archivo):
try:
f = open(archivo, "r")
lista = [elemento.replace(" ", "") for elemento in reduce(lambda x,y: x + y,
[linea for linea in csv.reader(f, dialect="unix")])]
except IOError:
lista = []
else:
f.close()
return lista
def busqueda_tw(tw, termino):
return tw.search.tweets(q=termino, lang="es", count="500")["statuses"]
def guarda_tuits(tuits, archivo):
with open(archivo, "w") as f:
json.dump(tuits, f, indent=1)
def carga_tuits(archivo):
try:
f = open(archivo, "r")
resultado = json.load(f)
except IOError:
resultado = []
else:
f.close()
return resultado
def mezcla_tuits(actuales, nuevos):
for tuit in nuevos:
if tuit["id"] not in [actual["id"] for actual in actuales]:
actuales.append(tuit)
return actuales
def limpiar(texto):
tokenizer = nltk.RegexpTokenizer(r'\w+')
limpio = tokenizer.tokenize(texto)
return limpio
def analiza_menciones(tuits):
pares = []
nodos = []
for tuit in tuits:
usuario = tuit["user"]["screen_name"]
nodos.append(usuario)
menciones = [mencion["screen_name"] for mencion in tuit["entities"]["user_mentions"]]
for mencion in menciones:
if mencion != [] and usuario != mencion:
par = (usuario, mencion)
pares.append(par)
nodos = list(set(nodos))
pares = list(set(pares))
G = nx.Graph()
G.add_nodes_from(nodos)
G.add_edges_from(pares)
plt.figure(figsize=(32,32))
nx.draw_networkx(G)
def refina_texto(tuits, lista, termino):
lista_negra = carga_lista(lista) + [palabra.replace("@", "") for palabra in termino.split()]
texto =""
for i in range(0, len(lista_negra)):
lista_negra[i] = lista_negra[i].lower()
for tuit in tuits:
texto += (tuit["text"] + " ")
depurador = nltk.RegexpTokenizer(r'\w+')
limpio = depurador.tokenize(texto)
texto = ""
for termino in limpio:
termino = termino.lower()
if termino not in lista_negra:
texto += (termino + " ")
return str(texto)
def nube(texto):
wordcloud = WordCloud().generate(texto)
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
def main(archivo="tuits.json", lista="lista_negra.csv"):
termino = input("Término de búsqueda: ")
tuits_previos = carga_tuits(archivo)
tw = accede_a_tw("credenciales.txt")
tuits_recientes = busqueda_tw(tw, termino)
tuits = mezcla_tuits(tuits_previos, tuits_recientes)
guarda_tuits(tuits, archivo)
analiza_menciones(tuits)
return refina_texto(tuits, lista, termino)
| StarcoderdataPython |
13297 | flat_x = x.flatten()
flat_y = y.flatten()
flat_z = z.flatten()
size = flat_x.shape[0]
filename = 'landscapeData.h'
open(filename, 'w').close()
f = open(filename, 'a')
f.write('#include "LinearMath/btScalar.h"\n#define Landscape01VtxCount 4\n#define Landscape01IdxCount 4\nbtScalar Landscape01Vtx[] = {\n')
for i in range(size):
f.write(str(flat_x[i])+'f,'+str(flat_y[i])+'f,'+str(flat_z[i])+'f,\n')
f.write('};\n')
f.write('btScalar Landscape01Nml[] = {\n')
for i in range(size):
f.write('1.0f,1.0f,1.0f,\n')
f.write('};\n')
f.write('btScalar Landscape01Tex[] = {\n')
for i in range(size):
f.write('1.0f,1.0f,1.0f,\n')
f.write('};\n')
f.write('unsigned short Landscape01Idx[] = {\n')
for i in range(size):
f.write(str(i)+','+str(i+1)+','+str(i+2)+',\n')
f.write('};\n')
f.close()
| StarcoderdataPython |
8197254 | #Set up file logging
import datetime
import sys
args = sys.argv
#If we're to print the output
if 'print' in args:
#Just raise exceptions
def logexe(e):
raise e
#Print out all logging
logfun = print
#Otherwise, we're logging the output:
else:
import logging
#Log to logs/bot <time>.log
logging.basicConfig(filename='logs/bot/bot '+str(datetime.datetime.now())+'.log', format='%(asctime)s %(message)s')
#Get a logger
logger = logging.getLogger('Logger')
#Get the level from the first argument, if it exists. (args[0] is always the filename)
if len(args) > 1:
logger.setLevel(args[1])
else:
logger.setLevel('INFO')
#Set the general logging functions
logexe = logger.exception
logfun = logger.info
#Start the imports
logfun('------------------------------------------------------------------------------------')
logfun('Boot Start')
logfun('------------------------------------------------------------------------------------')
import json
logfun('json imported')
import asyncio
logfun('asyncio imported')
import asyncpg
logfun('asyncpg imported')
import discord
logfun('discord imported')
from discord.ext import commands
logfun('commands imported')
from cogs.utils.SimplePaginator import SimplePaginator
import serverfetcher
# Get the prefixes for the bot
async def command_prefix(bot, message):
#If there's a guild:
if message.guild:
#Get its prefixes
extras = await bot.serverfetcher.prefixes_for(message, bot.user.id)
#Otherwise, we're in a PM
else:
#Ping the bot, or have no prefix.
extras = ['', '<@'+str(bot.user.id)+'> ', '<@'+str(bot.user.id)+'>']
#Pass it along
return commands.when_mentioned_or(*extras)(bot, message)
# All cogs that will be loaded on bots startup
startup_extensions = [
'cogs.character',
'cogs.general',
'cogs.ref',
'cogs.roll',
'cogs.server_settings',
'cogs.init'
]
def evensplit(l, n):
for i in range(0, len(l), n):
yield l[i:i+n]
def splittext(count, text):
if count > 10:
return list(map(lambda x: '\n'.join(x), evensplit(text.split('\n'), 10)))
else:
return text
def titlemake(tup):
if type(tup[1]) == str:
return [{'name':tup[0], 'value':tup[1]}]
else:
return [{'name':tup[0], 'value':tup[1][0]}] + [{'name':tup[0]+' (cont.)', 'value':i} for i in tup[1][1:]]
def splitbigfields(l):
newlist = [(i, splittext(j, k)) for i, j, k in l]
newlist = [titlemake(i) for i in newlist]
outlist = []
for i in newlist:
outlist += i
return outlist
def toembed(d, printFun):
fields = d.pop('fields', [])
image = d.pop('image', None)
footer = d.pop('footer', None)
printFun(image)
em = discord.Embed(**d)
for i in fields:
em.add_field(**i)
if image:
em.set_image(url=image)
if footer:
em.set_footer(**footer)
return em
class RPBot(commands.Bot):
def __init__(self, sf):
super().__init__(command_prefix=command_prefix, case_insensitive=True, pm_help=None)
#Load the settings json
with open("botsettings.json") as data_file:
self.settings = json.load(data_file)
#Set up the waiting dictionary
self.waiting = {}
#Provide serverfetcher to the cogs
self.serverfetcher = sf
#Provide logfun to the cogs
self.logger = logfun
#Set up the bot stat logging server
self.botdataserver = {}
self.botdataserver['credentials'] = {"user": self.settings['sql'][0], "password": self.settings['sql'][2], "database": self.settings['botDataServer'], "host": self.settings['sql'][1]}
self.botdataserver['commands'] = {}
self.botdataserver['commands']['increment_command'] = 'INSERT INTO command_usage (name, uses) VALUES ($1, 1) ON CONFLICT (name) DO UPDATE SET uses = command_usage.uses + 1;'
self.botdataserver['commands']['upsert'] = lambda x: 'INSERT INTO '+x+ '(id) VALUES ($1) ON CONFLICT DO NOTHING;' #unique_guilds, unique_users
#Initializing some objects to prevent errors should their cogs fail to load:
self.inline_roller = lambda x: None
self.refserver = None
self.charserver = None
self.statserver = None
#Load extensions
for extension in startup_extensions:
try:
self.load_extension(extension)
except Exception as e:
exc = '{}: {}'.format(type(e).__name__, e)
self.logger('Failed to load extension {}\n{}'.format(extension, exc))
# Print bot information, update status and set uptime when bot is ready
async def on_ready(self):
#Log the login
self.logger('Username: ' + str(self.user.name))
self.logger('Client ID: ' + str(self.user.id))
#Post-serverfetcher set up functions
self.serverdata = self.serverfetcher.serverdata
self.systemlist = self.serverfetcher.systemlist
self.upsert_entry = self.serverfetcher.upsert_entry
#Get pool for the bot stat server
self.botdataserver['pool'] = await asyncpg.create_pool(**self.botdataserver['credentials'])
#Set the current activity
activity = discord.Activity(name='"<@bot_ping> init" to help newcomers.', type = discord.ActivityType.listening)
await self.change_presence(activity=activity)
async def on_command_error(self, ctx, e):
logexe(e)
return
# Prevent bot from replying to other bots
async def on_message(self, message):
if not message.author.bot:
#Response commands. Format is (prefix, funlist yes, funlist no, funlist invalid)
#Each entry is a dictionary of the form: {'prefix':ctx.prefix, 'function':generalFun, 'options':optionsList}
#'prefix' is, well, the prefix
#'function' is the function carried out by the entry
#'options' is a list of valid arguments to the function
#If the message's channel has a waiting entry, and the author is in that entry, and the message starts with the entries prefix:
if message.channel.id in self.waiting and message.author.id in self.waiting[message.channel.id] and message.content.startswith(self.waiting[message.channel.id][message.author.id]['prefix']):
#Pop out the author's entry
entry = self.waiting[message.channel.id].pop(message.author.id)
#If the contents of the message match an option
if message.content.strip()[len(entry['prefix']):] in entry['options']:
#Set option to it
option = message.content.strip()[len(entry['prefix']):]
#Otherwise
else:
#Set option to None
option = None
#Feed option to function
await entry['function'](option)
#If the channel's entry is empty
if not(len(self.waiting[message.channel.id])):
#Delete it
del self.waiting[message.channel.id]
#Otherwise
else:
#Get the context of the message
ctx = await self.get_context(message)
#Invoke the context
await self.invoke(ctx)
#If the bot can send messages in the channel
if ctx.me.permissions_in(ctx.channel).send_messages:
#Invoke the inline roller
await self.inline_roller(ctx)
#After a command successfully executes
async def on_command_completion(self, ctx):
#Get a connection from the stat server
async with self.botdataserver['pool'].acquire() as conn:
#If the message isn't a PM
if ctx.guild:
#Log the guild
await conn.execute(self.botdataserver['commands']['upsert']('unique_guilds'), ctx.guild.id)
#Log the user
await conn.execute(self.botdataserver['commands']['upsert']('unique_users') , ctx.author.id)
#Log the command
await conn.execute(self.botdataserver['commands']['increment_command'] , ctx.command.name)
@staticmethod
async def smartSend(ctx,initial,message,begin='', end=''):
"""Static method to send messages that might be longer than the limit.
Arguments:
ctx: context to send to
initial: An initial message to send to context
message: The message that might go over the maximum
begin: A beginning prefix to each individual text block message is split into.
end: An ending prefix to the same. If non-existent while begin exists, it's set to begin"""
#Send the initial message
await ctx.send(initial)
#If end is empty and begin is not
if begin and not(end):
#Set end to begin
end = begin
#The longest we can allow a message to be is 2000 - the combined length of the bookends
maxlength = 2000-(len(begin)+len(end))
#While message is longer than allowed
while len(message)>maxlength:
#Find the last newline in the allowed length of message
pos = message[:maxlength].rfind('\n')
#If we couldn't find one:
if pos==-1:
#Cut it off at the maximumlength
await ctx.send(begin + message[:maxlength] + end)
message = message[maxlength:]
#Otherwise
else:
#Cut it off at the newline
await ctx.send(begin+message[:pos]+end)
#And continue after the newline
message = message[pos+1:]
#Send what's left
await ctx.send(begin+message+end)
@staticmethod
async def pageOrEmbed(ctx, info, printFun, freeze, forceEmbed = False):
def maybeover(key, l, n):
if n < len(l):
return {key:l[n]}
else:
return {}
counts = {'description':info['description'].count('\n')+1 if 'description' in info else None, 'fields':[str(i['value']).count('\n')+1 for i in info['fields']], 'image':len(info['image']) if 'image' in info else 0}
maxlines = max([counts['description'] if counts['description'] else 1]+[i for i in counts['fields']])
baseembed = {'title':info['title']} ; iterables = {}
if 'footer' in info:
baseembed['footer'] = info['footer']
if not(forceEmbed) and (len(info['fields'])>3 or maxlines>10 or ('image' in info and len(info['image'])>1)):
if counts['description']:
desc = splittext(counts['description'], info['description'])
if type(desc) == str:
baseembed['description'] = desc
else:
iterables['description'] = desc
littlefields = splitbigfields([(i['name'], counts['fields'][ind], str(i['value'])) for ind, i in enumerate(info['fields'])])
if len(littlefields)>3:
iterables['fields'] = list(evensplit(littlefields, 3))
else:
baseembed['fields'] = littlefields
if 'image' in info and type(info['image']) == str:
printFun('baseImage')
baseembed['image'] = info['image']
elif 'image' in info and len(info['image']) == 1:
printFun('baseImage')
baseembed['image'] = info['image'][0]
elif 'image' in info and len(info['image']) > 1:
printFun('iterImage')
iterables['image'] = info['image']
repfields = ('description', 'fields', 'image')
embeds = [baseembed]
for i in repfields:
if i in iterables:
embeds = [{**(embeds[j] if j<len(embeds) else baseembed), **maybeover(i, iterables[i], j)} for j in range(max(len(embeds), len(iterables[i])))]
embeds = [toembed(i, printFun) for i in embeds]
if len(embeds) == 1:
await ctx.send(None, embed = embeds[0])
else:
await SimplePaginator(extras=embeds, freeze=freeze).paginate(ctx)
else:
if 'image' in info and len(info['image']) == 1:
info['image'] = info['image'][0]
await ctx.send(None, embed = toembed(info, printFun))
#Overwriting the default ger_prefix coroutine to allow for blank prefixes in DM Channels
@asyncio.coroutine
def get_prefix(self, message):
"""|coro|
Retrieves the prefix the bot is listening to
with the message as a context.
Parameters
-----------
message: :class:`discord.Message`
The message context to get the prefix of.
Raises
--------
:exc:`.ClientException`
The prefix was invalid. This could be if the prefix
function returned None, the prefix list returned no
elements that aren't None, or the prefix string is
empty.
Returns
--------s
Union[List[str], str]
A list of prefixes or a single prefix that the bot is
listening for.
"""
prefix = ret = self.command_prefix
if callable(prefix):
ret = prefix(self, message)
if asyncio.iscoroutine(ret):
ret = yield from ret
if isinstance(ret, (list, tuple)):
if not(isinstance(message.channel, discord.DMChannel)):
ret = [p for p in ret if p]
if not ret:
raise discord.ClientException('invalid prefix (could be an empty string, empty list, or None)')
return ret
#Initializing function
async def run():
logfun('starting fetcher')
sf = serverfetcher.ServerFetcher()
logfun('making bot')
bot = RPBot(sf)
try:
logfun('connecting fetcher')
await sf.init_pool(bot.settings)
logfun('starting bot')
await bot.start(bot.settings['token'])
except KeyboardInterrupt:
if bot.refserver:
await bot.refserver.close()
if bot.charserver:
await bot.charserver.close()
if bot.statserver:
await bot.statserver.close()
await botdataserver['pool'].close()
await sf.pool.close()
await bot.logout()
#Run everything
loop = asyncio.get_event_loop()
loop.run_until_complete(run())
| StarcoderdataPython |
8036737 | <filename>model/components/__init__.py
from model.components.binarizer import Binarizer | StarcoderdataPython |
3335019 | #!/usr/bin/env python3
import sys
TARGETS = {
'A': 3,
'B': 5,
'C': 7,
'D': 9
}
COSTS = {
'A': 1,
'B': 10,
'C': 100,
'D': 1000
}
def blocked(cmap, x, y):
a = min(x, y)
b = max(x, y)
for i in range(a, b):
if cmap[1][i] != '.':
return True
return False
def available(cmap, a, b):
colheight = len(cmap) - 3
result = []
temps = [1, 2, 4, 6, 8, 10, 11]
c = cmap[a][b]
if a == 1:
if b < TARGETS[c] and blocked(cmap, b + 1, TARGETS[c]):
return None
if b > TARGETS[c] and blocked(cmap, TARGETS[c], b):
return None
for i in range(colheight):
d = cmap[colheight + 1 - i][TARGETS[c]]
if d == '.':
return [[colheight + 1 - i, TARGETS[c]]]
if d != c:
return None
for p in temps:
if cmap[1][p] != '.':
continue
if p < b and not blocked(cmap, p, b):
result.append([1, p])
if p > b and not blocked(cmap, p, b + 1):
result.append([1, p])
return result
def ishome(cmap, y, x):
for i in range(y, len(cmap) - 1):
c = cmap[i][x]
if TARGETS[c] != x:
return False
return True
def print_map(cmap):
for line in cmap:
print(line)
def moveable(cmap):
result = []
for j in range(1, len(cmap[1]) - 1):
if cmap[1][j] in 'ABCD':
if available(cmap, 1, j):
result.append([1, j])
for item in TARGETS:
j = TARGETS[item]
i = 2
while cmap[i][j] == '.':
i += 1
c = cmap[i][j]
if c not in 'ABCD':
continue
if ishome(cmap, i, j):
continue
if available(cmap, i, j):
result.append([i, j])
return result
def charsub(string, ind, c):
return string[:ind] + c + string[ind + 1:]
def move(cmap, ax, ay, bx, by):
c = cmap[ay][ax]
cmap[ay] = charsub(cmap[ay], ax, '.')
cmap[by] = charsub(cmap[by], bx, c)
return (abs(ax - bx) + abs(ay - by)) * COSTS[c]
def final(cmap):
for item in TARGETS:
c = TARGETS[item]
for i in range(2, len(cmap) - 1):
if cmap[i][c] != item:
return False
return True
def tokey(cmap):
result = ''
for i in range(1, len(cmap) - 1):
result += cmap[i]
return result
def attempt(cmap, score, seen):
result = []
kmap = tokey(cmap)
if kmap in seen and seen[kmap] <= score:
return None
if 'final' in seen and score >= seen['final']:
return None
seen[kmap] = score
if final(cmap):
if 'final' in seen:
if score < seen['final']:
seen['final'] = score
else:
seen['final'] = score
return score
for item in moveable(cmap):
for tgt in available(cmap, item[0], item[1]):
nmap = list(cmap)
val = move(nmap, item[1], item[0], tgt[1], tgt[0])
res = attempt(nmap, score + val, seen)
if res:
result.append(res)
if result:
val = min(result)
return val
return None
def main():
seen = {}
cmap = []
with open(sys.argv[1], 'r') as fhan:
for line in fhan.readlines():
cmap.append(line.strip("\n"))
print("Part 1:", attempt(cmap, 0, seen))
cmap.insert(3, ' #D#B#A#C#')
cmap.insert(3, ' #D#C#B#A#')
seen = {}
print("Part 2:", attempt(cmap, 0, seen))
if __name__ == '__main__':
main()
| StarcoderdataPython |
6427348 | <filename>feedback/backend/routes/user.py
from datetime import datetime
import urllib.parse
from bcrypt import hashpw
from flask import Blueprint, request, jsonify, redirect
from flask_login import login_user, logout_user, current_user, login_required
from feedback.backend.models import Role, Volunteer
user_bp = Blueprint('user_bp', __name__)
@user_bp.route('/api/users', methods={"GET"})
def list_volunteers():
admin_only = bool(request.args.get('admin', False) == "true")
if admin_only:
volunteers = Role.query \
.filter(Role.year == datetime.now().year, Volunteer.admin == True) \
.join(Volunteer, Role.volunteer == Volunteer.id) \
.with_entities(Volunteer.id, Volunteer.name)
else:
volunteers = Role.query \
.filter(Role.year == datetime.now().year) \
.join(Volunteer, Role.volunteer == Volunteer.id) \
.with_entities(Volunteer.id, Volunteer.name)
results = [{"id": row.id, "name": row.name} for row in volunteers]
results = [{"id": 0, "name": "Anonymous"}] + results
return jsonify(
{
"volunteers": results
}
)
@user_bp.route("/api/login", methods={"POST"})
def login():
user_info = request.get_json(force=True)
req_params = ["volunteer_id", "password", "<PASSWORD>"]
for param in req_params:
if param not in user_info:
return jsonify(
{
"success": False,
"error": "No value '{}' value provided.".format(param)
}
), 400
volunteer = Volunteer.query.filter(Volunteer.id == user_info["volunteer_id"]).first()
if volunteer.password:
enc_entered = user_info["password"].encode("utf-8")
enc_stored = volunteer.password.encode("utf-8")
# hashpw will return the same hash using the stored salt
if hashpw(enc_entered, enc_stored) == enc_stored:
login_user(volunteer, remember=True)
return jsonify({
"success": True,
"next": urllib.parse.unquote(user_info["next"])
})
else:
return jsonify({
"success": False,
"error": "Password did not match!"
}), 400
else:
return jsonify({
"success": False,
"error": "User does not have a password set."
}), 400
@user_bp.route('/api/logout')
def logout():
logout_user()
return redirect("/")
@user_bp.route("/api/user/self", methods={"GET"})
@login_required
def get_self():
return jsonify({
"success": True,
"current_user": {
"name": current_user.name,
"id": current_user.id
}
})
| StarcoderdataPython |
9660387 | from setuptools import setup, find_packages
import unittest
import codecs
import compare_mt
def test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover("compare_mt/tests", pattern="test_*.py")
return test_suite
setup(
name="compare_mt",
version=compare_mt.__version__,
description="Holistic comparison of the output of text generation models",
long_description=codecs.open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
url="https://github.com/neulab/compare-mt",
author="<NAME>",
license="BSD 3-Clause",
test_suite="setup.test_suite",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Text Processing",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
entry_points={
"console_scripts": [
"compare-mt=compare_mt.compare_mt_main:main",
"compare-ll=compare_mt.compare_ll_main:main",
],
},
install_requires=[
"nltk>=3.2",
"numpy",
"matplotlib",
"absl-py",
"sacrebleu"
],
include_package_data=True,
)
| StarcoderdataPython |
12837623 | <gh_stars>1-10
import django
from os import path
SECRET_KEY = 'not secret'
INSTALLED_APPS = ('response_timeout', 'test')
TEMPLATE_DEBUG = DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'response_timeout.db',
},
}
ROOT_URLCONF = 'test.urls'
# Testing
if django.VERSION[:2] < (1, 6):
INSTALLED_APPS += ('discover_runner',)
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_TOP_LEVEL = path.dirname(path.dirname(__file__))
# Cache
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'response_timeout'
},
}
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'response_timeout.middleware.SetCacheTimeoutMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
CACHE_MIDDLEWARE_ALIAS = 'default'
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 1
RESPONSE_CACHE_SECONDS = 2
| StarcoderdataPython |
1744678 | <reponame>Fabrice-64/OC_Project_8
"""
These tests are for the views in food_items.
RequestFactory has been selected in order to generate
a request and check the transfer of data between the views
and the templates.
"""
from django.test import TestCase, RequestFactory
from django.contrib.auth.models import User
from . import fixture as f
from food_items import views as v
class SimpleTest(TestCase):
def setUp(self):
f.set_up_db()
self.factory = RequestFactory()
self.user = User.objects.get(username="user")
def test_product_details(self):
request = self.factory.get('/food_items/product_details')
response = v.product_details(request, "01234567891011")
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('food_items/product_details.html')
def test_search_results(self):
request = self.factory.get('/food_items/search_results/')
request.user = self.user
response = v.search_results(request)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('food_items/search_results.html')
def test_record_product(self):
request = self.factory.get('food_items/record_product/')
request.user = self.user
response = v.record_product(request, "01234567891011")
# code 302 due to a redirection in the record_product view
self.assertEqual(response.status_code, 302)
self.assertTemplateUsed('food_items/search_results.html')
def test_favorites(self):
request = self.factory.get('/food_items/favorites/')
request.user = self.user
response = v.fetch_favorites(request)
self.assertTemplateUsed('food_items/favorites.html')
self.assertEqual(response.status_code, 200)
| StarcoderdataPython |
4857399 | """
Convert an LAS LIDAR file to a shapefile
by creating a 3D triangle mesh using
Delaunay Triangulation.
"""
# http://git.io/vOE4f
# cPickle is used to store
# tessalated triangles
# to save time writing
# future shapefiles
import pickle
import os
import time
import math
import numpy as np
import shapefile
# laspy for Python 3: pip install http://git.io/vOER9
from laspy.file import File
# voronoi.py for Python 3: pip install https://git.io/Je3qu
import voronoi
# Source LAS file
source = "clippedLAS.las"
# Output shapefile
target = "mesh"
# Triangles archive
archive = "triangles.p"
class Point:
"""Point class required by the voronoi module"""
def __init__(self, x, y):
self.px = x
self.py = y
def x(self):
return self.px
def y(self):
return self.py
# The triangle array holds tuples
# 3 point indicies used to retrieve the points.
# Load it from a pickle
# file or use the voronoi module
# to create the triangles.
triangles = None
# Open LIDAR LAS file
las = File(source, mode="r")
points = []
print("Assembling points...")
# Pull points from LAS file
for x, y in np.nditer((las.x, las.y)):
points.append(Point(x, y))
print("Composing triangles...")
# Delaunay Triangulation
triangles = voronoi.computeDelaunayTriangulation(points)
# Save the triangles to save time if we write more than
# one shapefile.
f = open(archive, "wb")
pickle.dump(triangles, f, protocol=2)
f.close()
print("Creating shapefile...")
# PolygonZ shapefile (x, y, z, m)
w = shapefile.Writer(target, shapefile.POLYGONZ)
w.field("X1", "C", "40")
w.field("X2", "C", "40")
w.field("X3", "C", "40")
w.field("Y1", "C", "40")
w.field("Y2", "C", "40")
w.field("Y3", "C", "40")
w.field("Z1", "C", "40")
w.field("Z2", "C", "40")
w.field("Z3", "C", "40")
tris = len(triangles)
# Loop through shapes and
# track progress every 10 percent
last_percent = 0
for i in range(tris):
t = triangles[i]
percent = int((i/(tris*1.0))*100.0)
if percent % 10.0 == 0 and percent > last_percent:
last_percent = percent
print("{} % done - Shape {}/{} at {}".format(percent, i, tris,
time.asctime()))
part = []
x1 = las.x[t[0]]
y1 = las.y[t[0]]
z1 = las.z[t[0]]
x2 = las.x[t[1]]
y2 = las.y[t[1]]
z2 = las.z[t[1]]
x3 = las.x[t[2]]
y3 = las.y[t[2]]
z3 = las.z[t[2]]
# Check segments for large triangles
# along the convex hull which is an common
# artificat in Delaunay triangulation
max = 3
if math.sqrt((x2-x1)**2+(y2-y1)**2) > max:
continue
if math.sqrt((x3-x2)**2+(y3-y2)**2) > max:
continue
if math.sqrt((x3-x1)**2+(y3-y1)**2) > max:
continue
part.append([x1, y1, z1, 0])
part.append([x2, y2, z2, 0])
part.append([x3, y3, z3, 0])
w.polyz([part])
w.record(x1, x2, x3, y1, y2, y3, z1, z2, z3)
print("Saving shapefile...")
print("Done.")
| StarcoderdataPython |
5071609 | <filename>2-Mandelbrot/cffi-out-of-line/build_mandelbrot.py<gh_stars>0
import numpy
from cffi import FFI
ffi = FFI()
Ccode = open('C_fmandel.c', 'r').read()
ffi.set_source("_mandelbrot", Ccode)
Cdefs = open('C_fmandel.h', 'r').read()
ffi.cdef(Cdefs)
if __name__ == "__main__":
ffi.compile()
| StarcoderdataPython |
6478670 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import argparse
import csv
import split_acti
import pickle
import time
import keras.backend as K
from numpy.random import seed
from sklearn.metrics import accuracy_score
import tensorflow
def main(pca_dims, compress_rate, s_rate):
# time.sleep(2)
rate = 1
seed(2020)
X_train, X_test, y_train, y_test = split_acti.main(pca_dims, rate, 0.3)
moderated = str(int(20 // rate))
# start_time = time.time()
model = pickle.load(open('model/' + moderated + 'Hz/pickled/' + 'pickled_' + 'decompressed_pca=' + str(
pca_dims) + '_compress_rate=' + str(compress_rate) + '_sparse_rate=' + str(s_rate) + '.hdf5', 'rb'))
# load_time = time.time() - start_time
# y_train = y_train - 1
y_test = y_test - 1
# print(y_test.shape)
start_time = time.process_time()
pred_test = model.predict(np.expand_dims(X_test, axis=2), batch_size=32)
pred_time = time.process_time() - start_time
# print("------ TEST ACCURACY ------")
testing_acc = (accuracy_score(y_test, np.argmax(pred_test, axis=1)))
# print(testing_acc)
# print((confusion_matrix(y_test, np.argmax(pred_test, axis=1))))
# inference_time = (time.time() - start_time)
# print("Accuracy, Load Time, Inference Time")
# print(testing_acc, load_time, inference_time)
del model
print(pca_dims, compress_rate, testing_acc, pred_time)
with open('testing_results_final_wisdm.txt', 'a', newline='') as f_out:
writer = csv.writer(f_out)
# writer.writerow(['Sampling rate', 'PCA dims', 'Time'])
writer.writerow(['main(', pca_dims, compress_rate, '0.1)']) # testing_acc, pred_time])
f_out.close()
K.clear_session()
if __name__ == '__main__':
# parser = argparse.ArgumentParser(description='CNN testing on WISDM')
# parser.add_argument("--pca", default=30, type=int, help="pca dimensions: [7, 8, 10, 15, 20, 30]")
# parser.add_argument("--c_rate", default=0.3, type=float, help="Compression rate [0.1 - 0.9] with an increment of 0.1")
# parser.add_argument("--s_rate", default=0.3, type=float, help="Sparsity rate [0.1 - 0.9] with an increment of 0.1")
#
# args = parser.parse_args()
# main(args.pca, args.c_rate, args.s_rate)
with open('testing_results_final_wisdm.txt', 'w', newline='') as f_out:
writer = csv.writer(f_out)
# writer.writerow(['Sampling rate', 'PCA dims', 'Compress rate', 'Accuracy', 'Prediction time'])
f_out.close()
# x = [30] #[7, 8, 10, 15, 20, 30]
x = [7, 8, 10, 20, 30]
# x = [0, 20, 30, 40, 50]
# y = [0.1]
y = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# y = [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
m = 0
for pca_dims in x:
# time.sleep(1)
for compress_rate in y:
main(pca_dims, compress_rate, 0.1)
# time.sleep(2)
# pca_dims = 7
# compress_rate = 0.1
#
# while m <3:
# main(pca_dims, compress_rate, 0.1)
# m += 1
# main(7, 0.1, 0.1)
# main(7, 0.2, 0.1)
# main(7, 0.3, 0.1)
# main(7, 0.4, 0.1)
# main(7, 0.5, 0.1)
# main(7, 0.6, 0.1)
# main(7, 0.7, 0.1)
# main(7, 0.8, 0.1)
# main(7, 0.9, 0.1)
# main(8, 0.1, 0.1)
# main(8, 0.2, 0.1)
# main(8, 0.3, 0.1)
# main(8, 0.4, 0.1)
# main(8, 0.5, 0.1)
# main(8, 0.6, 0.1)
# main(8, 0.7, 0.1)
# main(8, 0.8, 0.1)
# main(8, 0.9, 0.1)
# main(10, 0.1, 0.1)
# main(10, 0.2, 0.1)
# main(10, 0.3, 0.1)
# main(10, 0.4, 0.1)
# main(10, 0.5, 0.1)
# main(10, 0.6, 0.1)
# main(10, 0.7, 0.1)
# main(10, 0.8, 0.1)
# main(10, 0.9, 0.1)
# main(20, 0.1, 0.1)
# main(20, 0.2, 0.1)
# main(20, 0.3, 0.1)
# main(20, 0.4, 0.1)
# main(20, 0.5, 0.1)
# main(20, 0.6, 0.1)
# main(20, 0.7, 0.1)
# main(20, 0.8, 0.1)
# main(20, 0.9, 0.1)
# main(30, 0.1, 0.1)
# main(30, 0.2, 0.1)
# main(30, 0.3, 0.1)
# main(30, 0.4, 0.1)
# main(30, 0.5, 0.1)
# main(30, 0.6, 0.1)
# main(30, 0.7, 0.1)
# main(30, 0.8, 0.1)
# main(30, 0.9, 0.1)
# # #
| StarcoderdataPython |
9635356 | #!/usr/bin/env fontforge
#
# Copyright (c) 2017, <NAME> (https://sungsit.com | gibbozer [at] gmail [dot] com).
#
# This Font Software is licensed under the SIL Open Font License, Version 1.1 (OFL).
# You should have received a copy of the OFL License along with this file.
# If not, see http://scripts.sil.org/OFL
#
# This script will create SFD files from multi-layers source to prepare for later build process
# and it will only work with FontForge's Python extension.
import fontforge
import os
import subprocess
import shutil
import time
import datetime
# Font props
family = 'BoonBaan'
version = '2.0'
foundry = 'FontUni'
os2_vendor = 'FUni'
foundry_url = 'https://fontuni.com/'
designer = '<NAME>'
designer_url = 'https://sungsit.com/'
license_url = 'http://scripts.sil.org/OFL'
copyright = 'Copyright 2014-2017, <NAME> (https://fontuni.com | <EMAIL>). This Font Software is licensed under the SIL Open Font License, Version 1.1 (http://scripts.sil.org/OFL).'
# Building sources
feature_dir = 'sources/'
sources = ['sources/boonbaan-master.sfd', 'sources/boonbaan-master-oblique.sfd']
features = ['boonbaan-roman', 'boonbaan-oblique']
layers = ['400', '700']
# Dir names
build_dir = 'fonts/'
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
sfd_dir = 'sfd/'
if os.path.exists(sfd_dir):
shutil.rmtree(sfd_dir)
# Release packages
pkgs = ['otf', 'ttf', 'woff-otf', 'woff-ttf', 'woff2-otf', 'woff2-ttf']
# PS private values
def BlueValues(weight):
switcher = {
400: (-20, 0, 600, 620, 780, 800, 840, 840),
700: (-20, 0, 600, 620, 780, 800, 840, 840)
}
return switcher.get(weight)
def OtherBlues(weight):
switcher = {
400: (-260, -240),
700: (-260, -240)
}
return switcher.get(weight)
def StdHW(weight):
switcher = {
400: (80,),
700: (120,)
}
return switcher.get(weight)
def StdVW(weight):
switcher = {
400: (95,),
700: (160,)
}
return switcher.get(weight)
| StarcoderdataPython |
392188 | <reponame>mpi-array/mpi_array
"""
=========================================
The :mod:`mpi_array.globale_ufunc` Module
=========================================
Defines :obj:`numpy.ufunc` functions for :obj:`mpi_array.globale.gndarray`.
Classes
=======
.. autosummary::
:toctree: generated/
GndarrayArrayUfuncExecutor - Creates :obj:`gndarray` outputs and forwards to `numpy.ufunc`.
Functions
=========
.. autosummary::
:toctree: generated/
get_dtype_and_ndim - Return :obj:`numpy.dtype` and :samp:`ndim` properties for an object.
ufunc_result_type - Like :func:`numpy.result_type`.
broadcast_shape - Calculates broadcast shape from sequence of shape arguments.
shape_extend_dims - Prepend ones to 1D *shape* sequence to make it a specified dimension.
gndarray_array_ufunc - A :obj:`numpy.ndarray` like distributed array.
"""
from __future__ import absolute_import
import sys as _sys
import numpy as _np
import mpi4py.MPI as _mpi
from .license import license as _license, copyright as _copyright, version as _version
from . import logging as _logging # noqa: E402,F401
from . import globale_creation as _globale_creation
from . import comms as _comms
from .distribution import ScalarLocaleExtent, ScalarGlobaleExtent, LocaleExtent, GlobaleExtent
__author__ = "<NAME>"
__license__ = _license()
__copyright__ = _copyright()
__version__ = _version()
def get_dtype_and_ndim(array_like):
"""
Returns :samp:`(dtype, ndim)` pair for the given :samp:`{array_like}` argument.
If the :samp:`{array_like}` has *both* :samp:`"dtype"` and :samp:`"ndim"`
attributes, then the return tuple is :samp:`({array_like}.dtype, {array_like}.ndim)`.
Otherwise,
returns :samp:`(numpy.asanyarray({array_like}).dtype, numpy.asanyarray({array_like}).ndim)`.
:type array_like: castable to :obj:`numpy.ndarray`
:param array_like: Returns dtype and ndim for this object.
:rtype: two element :obj:`tuple`
:return: The :obj:`numpy.dtype` and integer :samp:`ndim` properties for :samp:`{array_like}`.
Example::
>>> get_dtype_and_ndim(1.0)
(dtype('float64'), 0)
>>> get_dtype_and_ndim((1.0, 2.0, 3.0, 4.0))
(dtype('float64'), 1)
>>> get_dtype_and_ndim([(1.0, 2.0, 3.0, 4.0), (5.0, 6.0, 7.0, 8.0)])
(dtype('float64'), 2)
"""
dt, nd = None, None
if not ((hasattr(array_like, "dtype") and hasattr(array_like, "ndim"))):
array_like = _np.asanyarray(array_like)
dt, nd = array_like.dtype, array_like.ndim
return dt, nd
def ufunc_result_type(
ufunc_types,
inputs,
outputs=None,
casting="safe",
input_match_casting="safe"
):
"""
Attempts to calculate the result type from given ufunc :samp:`{inputs}`
and ufunc types (:attr:`numpy.ufunc.types`).
Like :obj:`numpy.result_type`, but
handles :obj:`mpi_array.globale.gndarray` in the :samp:`{inputs}`
and handles multiple :samp:`{outputs}` cases.
:type ufunc_types: sequence of `str`
:param ufunc_types: The :attr:`numpy.ufunc.types` attribute,
e.g. :samp:`['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', ..., 'mm->m', 'mM->M', 'OO->O']`.
:type inputs: sequence of :obj:`object`
:param inputs: The inputs (e.g. :obj:`numpy.ndarray`, scalars
or :obj:`mpi_array.globale.gndarray`) to a :obj:`numpy.ufunc` call.
:type outputs: :samp:`None` or sequence of :obj:`object`
:param outputs: The output arrays these are explicitly checked casting correctness.
:type casting: :obj:`str` :samp:`{'no', 'equiv', 'safe', 'same_kind', 'unsafe'}`
:param casting: Casting mode applied to outputs. See :func:`numpy.can_cast`.
:type input_match_casting: :obj:`str` :samp:`{'no', 'equiv', 'safe', 'same_kind', 'unsafe'}`
:param input_match_casting: Casting mode applied to match :samp:`{ufunc_types}` inputs
with the :samp:`{inputs}`. See :func:`numpy.can_cast`.
:rtype: :obj:`tuple` of :obj:`numpy.dtype`
:return: A tuple of :obj:`numpy.dtype` indicating the output types produced for
the given inputs.
:raises ValueError: If the the inputs (and outputs) cannot be cast to an
appropriate element of :samp:`{ufunc_types}`.
Example::
>>> import numpy as np
>>> import mpi_array as mpia
>>> inp = (
... np.zeros((10,10,10), dtype='float16'),
... 16.0,
... mpia.zeros((10,10,10), dtype='float32'),
... )
>>> ufunc_result_type(['eee->e?', 'fff->f?', 'ddd->d?'], inputs=inp)
(dtype('float32'), dtype('bool'))
>>> out = (mpia.zeros((10,10,10), dtype="float64"),)
>>> ufunc_result_type(['eee->e?', 'fff->f?', 'ddd->d?'], inputs=inp, outputs=out)
(dtype('float64'), dtype('bool'))
>>> out += (mpia.zeros((10, 10, 10), dtype="uint16"),)
>>> ufunc_result_type(['eee->e?', 'fff->f?', 'ddd->d?'], inputs=inp, outputs=out)
(dtype('float64'), dtype('uint16'))
>>> mpia.free_all(inp + out)
"""
logger = _logging.get_rank_logger(__name__)
result_dtypes = None
ufunc_in_types = tuple(in2out_str.split("->")[0] for in2out_str in ufunc_types)
ufunc_in_dtypes = \
_np.asarray(
tuple(
tuple(_np.dtype(c) for c in ufunc_in_types[i])
for i in range(len(ufunc_in_types))
)
)
ufunc_out_types = tuple(in2out_str.split("->")[1] for in2out_str in ufunc_types)
ufunc_out_dtypes = \
_np.asarray(
tuple(
tuple(_np.dtype(c) for c in ufunc_out_types[i])
for i in range(len(ufunc_out_types))
)
)
in_dtypes_and_ndims = \
_np.asarray(tuple(get_dtype_and_ndim(input) for input in inputs))
in_dtypes = in_dtypes_and_ndims[:, 0]
in_ndims = in_dtypes_and_ndims[:, 1]
logger.debug("inputs=%s", inputs)
logger.debug("in_dtypes=%s", in_dtypes)
logger.debug("in_ndims=%s", in_ndims)
logger.debug("ufunc_in_dtypes=%s", ufunc_in_dtypes)
out_dtypes = None
if (outputs is not None) and (len(outputs) > 0):
out_dtypes = \
_np.asarray(
tuple(
output.dtype
if hasattr(output, "dtype") else _np.asarray(output).dtype
for output in outputs
)
)
idx = None
idxs = _np.where(_np.logical_and.reduce(ufunc_in_dtypes == in_dtypes, axis=1))
if len(idxs) > 0 and len(idxs[0]) > 0:
idx = idxs[0][0]
if idx is None:
in_scalars_and_dtypes = \
tuple(
inputs[i]
if in_ndims[i] <= 0 else in_dtypes[i]
for i in range(len(inputs))
)
idxs = \
_np.where(
_np.asarray(
tuple(
_np.all(
tuple(
_np.can_cast(
in_scalars_and_dtypes[j],
ufunc_in_dtypes[i, j],
casting=input_match_casting
)
for j in range(ufunc_in_dtypes.shape[1])
)
)
for i in range(ufunc_in_dtypes.shape[0])
)
)
)
if len(idxs) > 0 and len(idxs[0]) > 0:
idx = idxs[0][0]
if idx is not None:
ufunc_out_dtypes_for_in = ufunc_out_dtypes[idx]
if (
(out_dtypes is not None)
and
_np.any(ufunc_out_dtypes_for_in[:len(out_dtypes)] != out_dtypes)
):
if (
_np.any(
tuple(
not _np.can_cast(ufunc_out_dtypes_for_in[i], out_dtypes[i], casting=casting)
for i in range(len(out_dtypes))
)
)
):
raise ValueError(
"Could not cast ufunc-output-types %s to desired output-types = %s."
%
(
tuple(ufunc_out_dtypes_for_in),
tuple(out_dtypes)
)
)
if out_dtypes is None:
out_dtypes = _np.array((), dtype='O')
result_dtypes = \
tuple(
out_dtypes.tolist()
+
ufunc_out_dtypes_for_in[len(out_dtypes):].tolist()
)
else:
raise ValueError(
"Could not cast (with input_match_casting='%s') inputs types = %s to ufunc types=\n%s"
%
(input_match_casting, in_dtypes, ufunc_in_dtypes, )
)
return result_dtypes
def broadcast_shape(*shape_args):
"""
Returns
the :mod:`numpy` `broadcast <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_
shape for the give shape arguments.
:type shape1, shape2, ...: sequence of `int`
:param shape1, shape2, ...: Array shapes to be broadcast.
:rtype: sequence of `int`
:return: The broadcast shape.
Examples::
>>> broadcast_shape((4,), (4,))
(4,)
>>> broadcast_shape((4, 1), (1, 5))
(4, 5)
>>> broadcast_shape((4, 1, 3, 7), (1, 8, 1, 7))
(4, 8, 3, 7)
>>> broadcast_shape((3, 7), ())
(3, 7)
"""
ndim = _np.max(tuple(len(shape) for shape in shape_args))
bcast_shape = ()
if ndim > 0:
ndim_shapes = \
_np.asarray(tuple((1,) * (ndim - len(shape)) + tuple(shape) for shape in shape_args))
bcast_shape = _np.amax(ndim_shapes, axis=0)
if (_np.any(_np.logical_and(ndim_shapes != 1, ndim_shapes != bcast_shape))):
raise ValueError(
"shape mismatch - objects cannot be broadcast to a single shape:\n%s"
%
(shape_args,)
)
bcast_shape = tuple(bcast_shape)
return bcast_shape
def shape_extend_dims(ndim, shape):
"""
Returns :obj:`shape` pre-prepended with ones so returned 1D array has length :samp:`{ndim}`.
:type ndim: :obj:`int`
:param ndim: Length of returned 1D sequence.
:type shape: sequence of :obj:`object`
:param shape: Length of returned 1D sequence.
:rtype: :obj:`tuple`
:return: Sequence pre-pended with one elements so that sequence length equals :samp:`{ndim}`.
Example::
>>> shape_extend_dims(5, (3, 1, 5))
(1, 1, 3, 1, 5)
>>> shape_extend_dims(3, (3, 1, 5))
(3, 1, 5)
>>> shape_extend_dims(1, (3, 1, 5))
(3, 1, 5)
"""
return (1,) * (ndim - len(shape)) + tuple(shape)
def get_extents(input, locale_info):
"""
Returns a :samp:`(locale_extent, globale_extent)` pair for
the given :samp:`input`, where :samp:`locale_extent` is
a :obj:`mpi_array.distribution.LocaleExtent` instance and :samp:`globale_extent` is
a :obj:`mpi_array.distribution.GlobaleExtent` instance.
:type input: scalar, array like or :obj:`mpi_array.globale.gndarray`
:param input: Return extents for this input.
:type locale_info: :obj:`mpi_array.comms.ThisLocaleInfo`
:param locale_info: The rank info required for constructing
a :obj:`mpi_array.distribution.LocaleExtent` instance
for :samp:`input` types which are not :obj:`mpi_array.globale.gndarray`.
:rtype: :obj:`tuple`
:return: A :samp:`(locale_extent, globale_extent)` pair indicating the
extents of the :samp:`{input}` array-like.
"""
locale_extent = None
globale_extent = None
if not (hasattr(input, "shape") and hasattr(input, "ndim")):
input = _np.asanyarray(input)
if hasattr(input, "lndarray_proxy") and hasattr(input, "distribution"):
locale_extent = input.lndarray_proxy.locale_extent
globale_extent = input.distribution.globale_extent
elif input.ndim > 0:
start = (0,) * input.ndim
globale_extent = GlobaleExtent(start=start, stop=input.shape)
locale_extent = \
LocaleExtent(
peer_rank=locale_info.peer_rank,
inter_locale_rank=locale_info.inter_locale_rank,
globale_extent=globale_extent,
start=start,
stop=input.shape
)
else:
locale_extent = \
ScalarLocaleExtent(
peer_rank=locale_info.peer_rank,
inter_locale_rank=locale_info.inter_locale_rank
)
globale_extent = ScalarGlobaleExtent()
return (locale_extent, globale_extent)
def calc_matching_locale_slices(out_locale_extent, out_globale_extent, inp_locale_extents):
"""
Returns :obj:`tuple` of :obj:`slice` (one tuple for each pair-element
in :samp:`{inp_locale_extents}`). The returned *slices* indicate the
portion of the corresponding input extent which broadcasts
to the output extent :samp:`{out_locale_extent}`.
Assumes :samp:`{out_locale_extent}.ndim >= {inp_locale_extents}[i].ndim`
for :samp:`i in range(0, len({inp_locale_extents})`.
:type out_locale_extent: :obj:`mpi_array.distribution.LocaleExtent`
:param out_locale_extent: A locale extent of the output array.
:type out_globale_extent: :obj:`mpi_array.distribution.GlobaleExtent`
:param out_globale_extent: The globale extent of the output :obj:`mpi_array.globale.gndarray`.
:type inp_locale_extents: sequence of extent pairs
:param inp_locale_extents: This is the sequence
of :samp:`(inp_locale_extent, inp_globale_extent)` pairs, one pair for
each ufunc input.
:rtype: :obj:`tuple` of :obj:`tuple` elements
:return: For each pair :samp:`(inp_locale_extent, inp_globale_extent)`
in :samp:`{inp_locale_extents}` returns a :obj:`tuple`-of-:obj:`slice`
indicating the portion of :samp:`inp_locale_extent` which is to be broadcast
with :samp:`{out_locale_extent}`. Tuple indices are globale.
"""
slice_list = []
out_loc_start = out_locale_extent.start
out_loc_shape = out_locale_extent.shape
for inp_loc, inp_glb in inp_locale_extents:
slc_tuple = None
if inp_glb.ndim >= 1:
inp_glb_shape = inp_glb.shape
inp_loc_start = inp_loc.start
inp_loc_shape = inp_loc.shape
inp_slc_start = _np.zeros_like(inp_loc_start)
inp_slc_shape = inp_loc_shape.copy()
slc_tuple = []
for a in range(-1, -(len(inp_loc_shape) + 1), -1):
if inp_glb_shape[a] == 1:
inp_slc_start[a] = 0
inp_slc_shape[a] = 1
else:
inp_slc_start[a] = out_loc_start[a]
inp_slc_shape[a] = out_loc_shape[a]
slc = slice(inp_slc_start[a], inp_slc_start[a] + inp_slc_shape[a])
slc_tuple.insert(0, slc)
slc_tuple = tuple(slc_tuple)
slice_list.append(slc_tuple)
return tuple(slice_list)
def calc_matching_peer_rank_slices(out_slice, inp_arys):
"""
For each input array in :samp:`{inp_arys}, calculates the portion
which broadcasts to the :samp:`{out_slice}`.
Returns :obj:`tuple` of :obj:`slice` (one tuple for each array/scalar element
in :samp:`{inp_arys}`). The returned *slices* indicate the
portion of the input which matches the specified :samp:`{out_slice}`
for broadcasting.
Assumes :samp:`len({out_slice}) >= {inp_arys}[i].ndim`
for :samp:`i in range(0, len({inp_arys})`.
:type out_slice: :obj:`tuple` of :obj:`slice`
:param out_slice: Slice indicating a portion (sub-array) of an output array.
:type inp_arys: Sequence of :obj:`numpy.ndarray`
:param inp_arys: The ufunc input arrays.
"""
slice_list = []
for inp_ary in inp_arys:
slc_tuple = None
if hasattr(inp_ary, "ndim") and (inp_ary.ndim >= 1):
inp_shape = _np.array(inp_ary.shape)
inp_slc_start = _np.zeros_like(inp_shape)
inp_slc_stop = inp_slc_start + inp_shape
slc_tuple = []
for a in range(-1, -(len(inp_shape) + 1), -1):
if inp_shape[a] == 1:
inp_slc_start[a] = 0
inp_slc_stop[a] = 1
else:
inp_slc_start[a] = out_slice[a].start
inp_slc_stop[a] = out_slice[a].stop
slc = slice(inp_slc_start[a], inp_slc_stop[a])
slc_tuple.insert(0, slc)
slc_tuple = tuple(slc_tuple)
slice_list.append(slc_tuple)
return tuple(slice_list)
def convert_to_array_like(inputs):
"""
Uses :obj:`numpy.asanyarray` to convert input ufunc arguments
to array-like objects.
:type inputs: sequence of :obj:`object`
:param inputs: Elements of this sequence which to not have both :samp:`"shape"`
and :samp:`"ndim"` attributes are converted to a new object
using :obj:`numpy.asanyarray`.
:rtype: sequence of :obj:`object`
:return: Sequence where elements of :samp:`{inputs}` have been converted to array-like objects.
Example::
>>> import numpy as np
>>> inputs = (np.array([1, 2, 3, 4], dtype="uint8"), 32.0, [[1, 2], [3, 4], [5, 6]])
>>> convert_to_array_like(inputs)
(array([1, 2, 3, 4], dtype=uint8), array(32.0), array([[1, 2],
[3, 4],
[5, 6]]))
>>> converted = convert_to_array_like(inputs)
>>> converted[0] is inputs[0]
True
>>> converted[1] is inputs[1]
False
>>> converted[2] is inputs[2]
False
"""
return \
tuple(
input
if hasattr(input, "shape") and hasattr(input, "ndim") else _np.asanyarray(input)
for input in inputs
)
def check_equivalent_inter_locale_comms(
gndarrays,
equivalent_compare=(_mpi.IDENT, _mpi.CONGRUENT)
):
"""
Checks that all the :obj:`mpi_array.globale.gndarray` elements
of :samp:`{gndarrays}` have equivalent inter-locale communicators.
:raises ValueError: if the arrays do not have equivalent inter-locale communicators.
"""
if (gndarrays is not None) and (len(gndarrays) > 0):
inter_locale_comm0 = gndarrays[0].locale_comms.inter_locale_comm
for c in (gndary.locale_comms.inter_locale_comm for gndary in gndarrays[1:]):
if (
(
(c == _mpi.COMM_NULL)
and
(inter_locale_comm0 != _mpi.COMM_NULL)
)
or
(
(c != _mpi.COMM_NULL)
and
(inter_locale_comm0 == _mpi.COMM_NULL)
)
or
_mpi.Comm.Compare(inter_locale_comm0, c) not in equivalent_compare
):
raise ValueError(
(
"Got inter_locale_comm=%s (name=%s) non-congruent with "
+
" inter_locale_comm=%s (name=%s)."
)
%
(
inter_locale_comm0,
inter_locale_comm0.name if inter_locale_comm0 != _mpi.COMM_NULL else "",
c,
c.name if c != _mpi.COMM_NULL else ""
)
)
class GndarrayArrayUfuncExecutor(object):
"""
Instances execute a ufunc for a :obj:`mpi_array.globale.gndarray`.
Takes care of creating outputs, remote fetching of required parts of inputs
and forwarding call to :obj:`numpy.ufunc` instance to perform
the computation on the locale :obj:`numpy.ndarray` instances.
"""
def __init__(self, array_like_obj, ufunc, method, *inputs, **kwargs):
"""
Initialise.
:type array_like_obj: :obj:`mpi_array.globale.gndarray`
:param array_like_obj: The :obj:`mpi_array.globale.gndarray` which
triggered the :samp:`__array_ufunc__` call.
:type ufunc: :obj:`numpy.ufunc`
:param ufunc: The ufunc to be executed.
:type method: :obj:`str`
:param method: The name of the method of :samp:`{ufunc}` which is
to be executed.
:type inputs: array like
:param inputs: The ufunc inputs.
:type kwargs: keyword args
:param kwargs: The ufunc keyword arguments.
"""
self._array_like_obj = array_like_obj
self._ufunc = ufunc
self._method = method
self._inputs = convert_to_array_like(inputs)
self._kwargs = kwargs
self._outputs = None
if "out" in self._kwargs.keys():
self._outputs = self._kwargs["out"]
self._casting = None
if "casting" in self._kwargs.keys():
self._casting = self._kwargs["casting"]
else:
self._casting = "same_kind"
@property
def array_like_obj(self):
"""
The :obj:`mpi_array.globale.gndarray` object which triggered the
construction of this :obj:`GndarrayArrayUfuncExecutor` object.
"""
return self._array_like_obj
@property
def peer_comm(self):
"""
The peer :obj:`mpi4py.MPI.Comm` communicator.
"""
return self._array_like_obj.locale_comms.peer_comm
@property
def intra_locale_comm(self):
"""
The intra-locale :obj:`mpi4py.MPI.Comm` communicator.
"""
return self._array_like_obj.locale_comms.intra_locale_comm
@property
def inter_locale_comm(self):
"""
The inter-locale :obj:`mpi4py.MPI.Comm` communicator.
"""
return self._array_like_obj.locale_comms.inter_locale_comm
@property
def ufunc(self):
"""
The :obj:`numpy.ufunc` to be executed.
"""
return self._ufunc
@property
def outputs(self):
"""
The ufunc :obj:`mpi_array.globale.gndarray` output arrays.
"""
return self._outputs
@property
def inputs(self):
"""
The sequence of ufunc inputs.
"""
return self._inputs
@property
def casting(self):
"""
A :obj:`str` indicating the casting mode.
"""
return self._casting
@property
def method(self):
"""
A :obj:`str` indicating the method of the :attr:`ufunc` to be executed.
"""
return self._method
def get_inputs_shapes(self):
"""
Returns a *shape* :obj:`tuple` for each element of :attr:`inputs`.
:rtype: :obj:`tuple`
:return: Shape of each ufunc input.
"""
return \
tuple(
input.shape
if hasattr(input, "shape") else
_np.asarray(input).shape
for input in self._inputs
)
def get_best_match_input(self, result_shape):
"""
Returns the element of :attr:`inputs` whose globale shape
best matches :samp:`{result_shape}`.
:rtype: :samp:`None` or :obj:`mpi_array.globale.gndarray`.
:return: The input array whose shape matches :samp:`{result_shape}`,
or :samp:`None` if none of the inputs are a good match.
"""
best_input = None
result_shape = _np.array(result_shape, dtype="int64")
input_shapes = self.get_inputs_shapes()
are_same_shape = \
_np.array(
tuple(
(len(result_shape) == len(in_shape)) and _np.all(result_shape == in_shape)
for in_shape in input_shapes
)
)
if _np.any(are_same_shape):
best_input = self._inputs[_np.where(are_same_shape)[0][0]]
else:
input_shapes = \
_np.array(
tuple(
_np.array(shape_extend_dims(len(result_shape), in_shape))
for in_shape in input_shapes
),
dtype="int64"
)
d = input_shapes - result_shape
d *= d
d = d.sum(axis=1)
best_input = self._inputs[_np.argmin(d)]
return best_input
def create_outputs(self, outputs, result_shape, result_types):
"""
Returns list of output :obj:`mpi_array.globale.gndarray` instances.
:type outputs: :samp:`None` or :obj:`tuple` of :obj:`mpi_array.globale.gndarray`
:param outputs: Output arrays passed in as the :samp:`out` argument
of the :obj:`numpy.ufunc`.
:type result_shape: sequence of :obj:`int`
:param result_shape: The shape of all output arrays.
:type result_types: sequence of :samp:`numpy.dtype`
:param result_types: The :samp:`dtype` of each output array. Note
that this is the list for all outputs including any
in the :samp:`outputs` argument. This determines the
number of output arrays.
:rtype: :obj:`list` of :obj:`mpi_array.globale.gndarray`
:return: A list of length :samp:`len(result_types)` elements,
each element is a :obj:`mpi_array.globale.gndarray`.
"""
template_output_gary = None
if (outputs is not None) and (len(outputs) > 0):
check_equivalent_inter_locale_comms(outputs)
template_output_gary = outputs[-1]
else:
best_match_input = self.get_best_match_input(result_shape)
comms_distrib = None
if best_match_input is not None:
comms_distrib = \
_comms.reshape_comms_distribution(
best_match_input.comms_and_distrib,
result_shape
)
if comms_distrib is not None:
template_output_gary = \
_globale_creation.empty(
result_shape,
comms_and_distrib=comms_distrib,
dtype=result_types[0]
)
else:
template_output_gary = \
_globale_creation.empty(
result_shape,
dtype=result_types[0],
peer_comm=self.peer_comm,
intra_locale_comm=self.intra_locale_comm,
inter_locale_comm=self.inter_locale_comm
)
outputs = (template_output_gary,)
outputs = \
(
outputs
+
tuple(
_globale_creation.empty_like(template_output_gary, dtype=result_types[i])
for i in range(len(outputs), len(result_types))
)
)
return outputs
def get_input_extents(self, locale_info):
"""
Returns tuple of :samp:`(locale_extent, globale_extent)` pairs,
one for each of the :attr:`inputs`.
:type locale_info: :obj:`mpi_array.comms.ThisLocaleInfo`
:param locale_info: The rank info required for constructing
a :obj:`mpi_array.distribution.LocaleExtent` instance
for :samp:`input` types which are not :obj:`mpi_array.globale.gndarray`.
:rtype: :obj:`tuple`
:return: Pairs which indicate the locale extent of the ufunc :attr:`inputs`.
.. seealso:: :func:`get_extents`
"""
return \
tuple(
get_extents(inp, locale_info) for inp in self.inputs
)
def get_numpy_ufunc_peer_rank_inputs_outputs(self, gndarray_outputs):
"""
Returns two element tuple of :samp:`(input_arrays, output_arrays)` which
are to be passed to the :obj:`numpy.ufunc` object :attr:`ufunc`.
:type gndarray_outputs: sequence of :obj:`mpi_array.globale.gndarray`
:param gndarray_outputs: The output arrays. All arrays should be the
same shape and same distribution.
:rtype: :samp:`None` or :obj:`tuple`
:return: A tuple :samp:`(input_arrays, output_arrays)` of inputs and
outputs which are to be passed to :obj:`numpy.ufunc` call.
Returns :samp:`None` if the output locale extents are empty (i.e. no
array elements to compute on this locale).
"""
# First fetch/slice the parts of the input required for the locale extent
out_gndarray = gndarray_outputs[0]
out_globale_extent = out_gndarray.distribution.globale_extent
out_locale_extent = out_gndarray.lndarray_proxy.locale_extent
ret = None
if _np.product(out_locale_extent.shape_n) > 0:
inp_locale_extents = \
self.get_input_extents(out_gndarray.comms_and_distrib.this_locale)
inp_locale_slices = \
calc_matching_locale_slices(
out_locale_extent,
out_globale_extent,
inp_locale_extents
)
inp_locale_arys = [None, ] * len(self.inputs)
for i in range(len(self.inputs)):
input = self.inputs[i]
slice_tuple = inp_locale_slices[i]
if slice_tuple is not None:
if hasattr(input, "locale_get"):
# is a gndarray
inp_locale_arys[i] = input.locale_get(slice_tuple)
else:
# is a numpy array (or similar)
inp_locale_arys[i] = input[slice_tuple]
else:
# is a scalar
inp_locale_arys[i] = input
# Now slice the locale input arrays to match the peer-rank portions of the output.
out_peer_rank_slice = out_gndarray.lndarray_proxy.intra_partition.rank_view_slice_n
out_peer_rank_slice = out_locale_extent.locale_to_globale_slice_h(out_peer_rank_slice)
out_peer_rank_slice = out_locale_extent.globale_to_locale_slice_n(out_peer_rank_slice)
inp_peer_rank_slices = calc_matching_peer_rank_slices(
out_peer_rank_slice, inp_locale_arys)
inp_peer_rank_arys = [None, ] * len(inp_locale_arys)
for i in range(len(inp_locale_arys)):
input = inp_locale_arys[i]
slice_tuple = inp_peer_rank_slices[i]
if slice_tuple is not None:
# is a numpy array (or similar)
inp_peer_rank_arys[i] = input[slice_tuple]
else:
# is a scalar
inp_peer_rank_arys[i] = input
ret = \
(
tuple(inp_peer_rank_arys),
tuple(
out_gndarray.view_n[out_peer_rank_slice]
for out_gndarray in gndarray_outputs
)
)
return ret
def need_remote_data(self, gndarray_outputs):
"""
Returns :samp:`True` if any locale needs to fetch remote
input data in order to compute the all elements of the
outputs :samp:`{gndarray_outputs}`.
:type gndarray_outputs: sequence of :obj:`mpi_array.globale.gndarray`
:param gndarray_outputs: Check whether any of the locales require remote
data in order to compute these outputs.
:rtype: :obj:`bool`
:return: :samp:`True` if remote fetch of input data is required
in order to compute ufunc for the given outputs.
"""
out_gndary = gndarray_outputs[0]
need_remote = False
if out_gndary.locale_comms.inter_locale_comm != _mpi.COMM_NULL:
START_STR = LocaleExtent.START_N_STR
STOP_STR = LocaleExtent.STOP_N_STR
gndarray_inputs = \
tuple(
input for input in self.inputs
if hasattr(input, "distribution") and hasattr(input, "locale_comms")
)
out_s_ext = out_gndary.distribution.struct_locale_extents
for inp_gndary in gndarray_inputs:
need_remote = \
(
_mpi.Comm.Compare(
out_gndary.locale_comms.inter_locale_comm,
inp_gndary.locale_comms.inter_locale_comm
)
==
_mpi.UNEQUAL
)
if not need_remote:
# first make sure that the inter_locale_comm is compatible
# between input and output
translated_ranks = \
_mpi.Group.Translate_ranks(
out_gndary.locale_comms.inter_locale_comm.group,
_np.arange(out_gndary.locale_comms.inter_locale_comm.group.size),
inp_gndary.locale_comms.inter_locale_comm.group
)
inp_s_ext = \
inp_gndary.distribution.struct_locale_extents[_np.asarray(translated_ranks)]
# Now check that the output locale extent is contained
# within the input locale extent.
# Dimension of input can be smaller than the output
# because of broadcasting rules.
need_remote = True
not_out_empty = \
_np.product(out_s_ext[STOP_STR] - out_s_ext[START_STR], axis=1) > 0
ndim = inp_gndary.ndim
beyond_out_extent = \
_np.logical_or.reduce(
(out_s_ext[START_STR][:, -ndim:] < inp_s_ext[START_STR])
|
(out_s_ext[STOP_STR][:, -ndim:] <= inp_s_ext[START_STR])
|
(out_s_ext[START_STR][:, -ndim:] >= inp_s_ext[STOP_STR])
|
(out_s_ext[STOP_STR][:, -ndim:] > inp_s_ext[STOP_STR]),
axis=1
)
need_remote = \
_np.any(
not_out_empty
&
beyond_out_extent
)
if need_remote:
break
# All ranks in the locale need to know the result, broadcast.
need_remote = out_gndary.locale_comms.intra_locale_comm.bcast(need_remote, 0)
return need_remote
def execute___call__(self):
"""
"""
from .globale import gndarray as _gndarray
# Calculate the shape of the output arrays.
result_shape = broadcast_shape(*(self.get_inputs_shapes()))
self.array_like_obj.rank_logger.debug("result_shape=%s", result_shape)
# Calculate the result dtype for each output array
result_types = ufunc_result_type(self.ufunc.types, self.inputs, self.outputs, self.casting)
self.array_like_obj.rank_logger.debug("result_types=%s", result_types)
# Create the output gndarray instances
gndarray_outputs = self.create_outputs(self.outputs, result_shape, result_types)
self.array_like_obj.rank_logger.debug(
"output shapes=%s", [o.shape for o in gndarray_outputs]
)
# Check whether remote fetch of data is needed
# for any locale before calling this barrier. If all locales
# have local data then this barrier isn't be necessary.
# Otherwise, we have to sync to make sure that remote ranks have
# finished writing data before starting to fetch it.
if self.need_remote_data(gndarray_outputs):
for i in self.inputs:
if isinstance(i, _gndarray):
i.initialise_windows()
gndarray_outputs[0].inter_locale_barrier()
# Fetch the peer-rank sub-arrays of the input arrays needed
# to calculate the corresponding sub-array of the outputs.
np_ufunc_inputs_and_outputs = \
self.get_numpy_ufunc_peer_rank_inputs_outputs(gndarray_outputs)
if np_ufunc_inputs_and_outputs is not None:
np_ufunc_inputs, np_ufunc_outputs = np_ufunc_inputs_and_outputs
# Call the self.ufunc.__call__ method to perform the computation
# in the sub-arrays
kwargs = dict()
kwargs.update(self._kwargs)
kwargs["out"] = np_ufunc_outputs
self.array_like_obj.rank_logger.debug(
"Calling numpy.ufunc=%s:\ninputs=%s\noutputs=%s",
self.ufunc, np_ufunc_inputs, kwargs["out"]
)
self.ufunc.__call__(*np_ufunc_inputs, **kwargs)
self.array_like_obj.rank_logger.debug(
"Finished numpy.ufunc=%s:\noutputs=%s",
self.ufunc,
kwargs["out"]
)
else:
self.array_like_obj.rank_logger.debug(
"Locale output extent is empty, skipping call to self.ufunc=%s:\nOutput extent=%s",
self.ufunc,
gndarray_outputs[0].lndarray_proxy.locale_extent
)
gndarray_outputs[0].intra_locale_barrier()
# return the outputs
if len(gndarray_outputs) == 1:
gndarray_outputs = gndarray_outputs[0]
return gndarray_outputs
def execute_accumulate(self):
"""
Not implemented.
"""
return NotImplemented
def execute_reduce(self):
"""
Not implemented.
"""
return NotImplemented
def execute_reduceat(self):
"""
Not implemented.
"""
return NotImplemented
def execute_at(self):
"""
Not implemented.
"""
return NotImplemented
def execute_outer(self):
"""
Not implemented.
"""
return NotImplemented
def execute(self):
"""
Perform the ufunc operation. Call is forwarded to one
of: :meth:`execute___call__`, :meth:`execute_accumulate`, :meth:`execute_at`
, :meth:`execute_outer`, :meth:`execute_reduce` or :meth:`execute_reduceat`.
"""
return getattr(self, "execute_" + self.method)()
#: Factory for generating instance of :obj:`GndarrayArrayUfuncExecutor`.
gndarray_ufunc_executor_factory = GndarrayArrayUfuncExecutor
def gndarray_array_ufunc(array_like_obj, ufunc, method, *inputs, **kwargs):
"""
The implementation for :meth:`mpi_array.globale.gndarray.__array_ufunc__`.
"""
ufunc_executor = \
gndarray_ufunc_executor_factory(
array_like_obj,
ufunc,
method,
*inputs,
**kwargs
)
return ufunc_executor.execute()
def set_numpy_ufuncs_as_module_attr(set_attr_module, search_module):
"""
Finds all :obj:`numpy.ufunc` attributes in the :samp:`{search_module}` :obj:`module`
and sets corresponding attributes of :samp:`{set_attr_module}` :obj:`module`.
:type set_attr_module: :obj:`module`
:param set_attr_module: Set ufunc attributes of this module to those found
in the :samp:`{search_module}` module
:type search_module: :obj:`module`
:param search_module: Find :obj:`numpy.ufunc` attributes in this module.
"""
for attr in dir(search_module):
numpy_attr_value = getattr(search_module, attr)
if isinstance(numpy_attr_value, _np.ufunc):
setattr(set_attr_module, attr, numpy_attr_value)
set_numpy_ufuncs_as_module_attr(_sys.modules[__name__], _np)
__all__ = [s for s in dir() if not s.startswith('_')]
| StarcoderdataPython |
5029653 | <gh_stars>10-100
# ----------------------------------------------------------------------
# Service documentation request handler
# ----------------------------------------------------------------------
# Copyright (C) 2007-2015 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
import tornado.web
class DocRequestHandler(tornado.web.RequestHandler):
def initialize(self, service):
self.service = service
def get(self):
r = ["%s documentation" % self.service.name]
for s in self.service.api:
r += [s.__doc__]
for m in dir(s):
h = getattr(s, m)
if hasattr(h, "api"):
r += [m]
r += [h.__doc__]
self.write("\n".join(r))
| StarcoderdataPython |
113823 | <reponame>subhadarship/GermEval2021
import pandas as pd
import os
if __name__ == "__main__":
LABEL_COLUMN_NAMES = ['Sub1_Toxic', 'Sub2_Engaging', 'Sub3_FactClaiming']
PRED_DIR = os.path.join('../predictions/best_models/')
SUBMISSION_DIR = os.path.join('../submission')
TEST_INP_FILE_PATH = os.path.join('../data/GermEval21_Toxic_Test/GermEval21_Toxic_TestData.csv')
test_df = pd.read_csv(TEST_INP_FILE_PATH, encoding='utf-8')[['comment_id']]
for folder in os.listdir(PRED_DIR):
preds = \
pd.read_csv(os.path.join(PRED_DIR, folder, f'test.Sub3_FactClaiming.out'), encoding='utf-8', header=None)[
0].to_list()
submission_df = test_df.copy(deep=True)
submission_df[LABEL_COLUMN_NAMES[0]] = [0] * len(submission_df)
submission_df[LABEL_COLUMN_NAMES[1]] = [0] * len(submission_df)
submission_df[LABEL_COLUMN_NAMES[2]] = preds
os.makedirs(os.path.join(SUBMISSION_DIR, folder), exist_ok=True)
submission_df.to_csv(os.path.join(SUBMISSION_DIR, folder, 'answer.csv'), encoding='utf-8', index=False)
| StarcoderdataPython |
1871565 | from typing import Union
from math import floor
from .strings import SCALE, NUMBER_TEXT, TYPO_LIST, JOINERS, PREFIXES, UNITS, TEN, MAGNITUDE
from ..ordinal_suffix import add as add_ordinal_suffix, remove as remove_ordinal_suffix
from ..separator import add as add_separator
class LANGUAGES:
EN = 'en'
FA = 'fa'
AR = 'ar'
SUPPORTED_CHARS = {
LANGUAGES.EN: '0123456789',
LANGUAGES.FA: '۰۱۲۳۴۵۶۷۸۹',
LANGUAGES.AR: '٠١٢٣٤٥٦٧٨٩',
}
def _conversion(number: Union[int, float, str],
destination_lang: str = LANGUAGES.FA) -> Union[str, None]:
if destination_lang not in SUPPORTED_CHARS.keys():
return None
result = str(number)
source_chars = SUPPORTED_CHARS.copy()
destination_chars = source_chars.pop(destination_lang)
for chars in source_chars.values():
for i in range(10):
result = result.replace(chars[i], destination_chars[i])
return result
def convert_to_fa(number: Union[int, float, str]) -> Union[str, None]:
return _conversion(number, LANGUAGES.FA)
def convert_to_ar(number: Union[int, float, str]) -> Union[str, None]:
return _conversion(number, LANGUAGES.AR)
def convert_to_en(number: Union[int, float, str]) -> Union[str, None]:
return _conversion(number, LANGUAGES.EN)
def convert_to_word(number: int, ordinal: bool = False) -> Union[str, None]:
def to_word(num: int) -> str:
res = ''
for unit in [100, 10, 1]:
if floor(num / unit) * unit != 0:
if num in NUMBER_TEXT.keys():
res += NUMBER_TEXT[num]
break
else:
res += NUMBER_TEXT[floor(num / unit) * unit] + ' و '
num %= unit
return res
if number == 0:
return 'صفر'
is_negative = number < 0
number = abs(number)
base = 1000
result = []
while number > 0:
result.append(to_word(number % base))
number = floor(number / base)
if len(result) > 6:
return None
for i in range(len(result)):
if result[i] != '':
result[i] += ' ' + SCALE[i] + ' و '
result = list(reversed(result))
words = ''.join(result)
while words.endswith(' و '):
words = words[:-3]
if is_negative:
words = 'منفی ' + words
words = words.strip()
if ordinal:
words = add_ordinal_suffix(words)
return words
def convert_from_word(text: Union[str, None], digits: str = LANGUAGES.EN, separator: bool = False) -> Union[str, None]:
def tokenize(_text: str) -> list:
for typo in TYPO_LIST.keys():
if typo in _text:
_text = _text.replace(typo, TYPO_LIST[typo])
slitted_text = _text.split(' ')
slitted_text = [txt for txt in slitted_text if txt != JOINERS[0]]
return slitted_text
def compute(tokens: list) -> int:
result = 0
is_negative = False
for token in tokens:
token = convert_to_en(token)
if token == PREFIXES[0]:
is_negative = True
elif UNITS.get(token):
result += UNITS[token]
elif TEN.get(token):
result += TEN[token]
elif token.isdigit():
result += int(token)
elif MAGNITUDE.get(token):
result *= MAGNITUDE[token]
if is_negative:
result *= -1
return result
if text == '' or text is None:
return None
text = remove_ordinal_suffix(text)
computed = compute(tokenize(text))
if separator:
computed = add_separator(computed)
if digits != LANGUAGES.EN:
computed = _conversion(computed, digits)
return computed
| StarcoderdataPython |
6547860 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-20 20:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('showcase', '0007_gallery_total_rankers'),
]
operations = [
migrations.RenameField(
model_name='gallery',
old_name='rank',
new_name='rating',
),
migrations.RenameField(
model_name='gallery',
old_name='total_rankers',
new_name='total_raters',
),
]
| StarcoderdataPython |
5045709 | import sdl2
from ui_element import UIElement
from ui_dialog import UIDialog
class PagedInfoDialog(UIDialog):
"dialog that presents multiple pages of info w/ buttons to navigate next/last page"
title = 'Info'
# message = list of page strings, each can be triple-quoted / contain line breaks
message = ['']
tile_width = 54
confirm_caption = '>>'
other_caption = '<<'
cancel_caption = 'Done'
other_button_visible = True
extra_lines = 1
def __init__(self, ui, options):
self.page = 0
UIDialog.__init__(self, ui, options)
self.reset_art()
def update(self):
# disable prev/next buttons if we're at either end of the page list
if self.page == 0:
self.other_button.can_hover = False
self.other_button.set_state('dimmed')
elif self.page == len(self.message) - 1:
self.confirm_button.can_hover = False
self.confirm_button.set_state('dimmed')
else:
for button in [self.confirm_button, self.other_button]:
button.can_hover = True
button.dimmed = False
if button.state != 'normal':
button.set_state('normal')
UIElement.update(self)
def handle_input(self, key, shift_pressed, alt_pressed, ctrl_pressed):
keystr = sdl2.SDL_GetKeyName(key).decode()
if keystr == 'Left':
self.other_pressed()
elif keystr == 'Right':
self.confirm_pressed()
elif keystr == 'Escape':
self.cancel_pressed()
def get_message(self):
return self.message[self.page].rstrip().split('\n')
def confirm_pressed(self):
# confirm repurposed to "next page"
if self.page < len(self.message) - 1:
self.page += 1
# redraw, tell reset_art not to resize
self.reset_art(False)
def cancel_pressed(self):
self.dismiss()
def other_pressed(self):
# other repurposed to "previous page"
if self.page > 0:
self.page -= 1
self.reset_art(False)
about_message = [
# max line width 50 characters!
"""
by JP LeBreton (c) 2014-2020 |
Playscii was made with the support of many nice
people.
Patrons:
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>,
<NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>
""",
"""
Programming Contributions:
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>,
<NAME>
Technical Advice:
<NAME>, <NAME>, <NAME>,
<NAME>, Goldbuick, <NAME>,
Raigan Burns
Tool Design Inspiration:
<NAME>, <NAME>, <NAME>,
<NAME> (ZZT), <NAME> (Kid Pix),
<NAME> (HyperCard)
""",
"""
Love, Encouragement, Moral Support:
<NAME>
<NAME>, and <NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
#tool-design
"""
]
class AboutDialog(PagedInfoDialog):
title = 'Playscii'
message = about_message
game_mode_visible = True
all_modes_visible = True
def __init__(self, ui, options):
self.title += ' %s' % ui.app.version
PagedInfoDialog.__init__(self, ui, options)
| StarcoderdataPython |
1682809 | <gh_stars>0
import datetime
from typing import List, Optional
import climsoft_api.api.station.schema as station_schema
from climsoft_api.api.schema import BaseSchema, Response
from pydantic import constr, Field
class CreateStationLocationHistory(BaseSchema):
belongsTo: constr(max_length=255) = Field(title="Belongs To")
openingDatetime: str = Field(title="Opening Datetime")
stationType: constr(max_length=255) = Field(title="Station Type")
geoLocationMethod: constr(max_length=255) = Field(title="Geolocation Method")
geoLocationAccuracy: float = Field(title="Geolocation Accuracy")
closingDatetime: str = Field(title="Closing Datetime")
latitude: float = Field(title="Latitude")
longitude: float = Field(title="Longitude")
elevation: int = Field(title="Elevation")
authority: constr(max_length=255) = Field(title="Authority")
adminRegion: constr(max_length=255) = Field(title="Admin Region")
drainageBasin: constr(max_length=255) = Field(title="Drainage Basin")
class Config:
fields = {
"belongsTo": "belongs_to",
"stationType": "station_type",
"geolocationMethod": "geolocation_method",
"openingDatetime": "opening_datetime",
"closingDatetime": "closing_datetime",
"adminRegion": "admin_region",
"drainageBasin": "drainage_basin",
}
class UpdateStationLocationHistory(BaseSchema):
stationType: constr(max_length=255) = Field(title="Station Type")
geoLocationMethod: constr(max_length=255) = Field(title="Geolocation Method")
geoLocationAccuracy: float = Field(title="Geolocation Accuracy")
closingDatetime: str = Field(title="Closing Datetime")
latitude: float = Field(title="Latitude")
longitude: float = Field(title="Longitude")
elevation: int = Field(title="Elevation")
authority: constr(max_length=255) = Field(title="Authority")
adminRegion: constr(max_length=255) = Field(title="Admin Region")
drainageBasin: constr(max_length=255) = Field(title="Drainage Basin")
class Config:
fields = {
"stationType": "station_type",
"geolocationMethod": "geolocation_method",
"closingDatetime": "closing_datetime",
"adminRegion": "admin_region",
"drainageBasin": "drainage_basin",
}
class StationLocationHistory(BaseSchema):
belongsTo: Optional[constr(max_length=255)] = Field(title="Belongs To")
openingDatetime: Optional[datetime.datetime] = Field(title="Opening Datetime")
stationType: Optional[constr(max_length=255)] = Field(title="Station Type")
geoLocationMethod: Optional[constr(max_length=255)] = Field(title="Geolocation Method")
geoLocationAccuracy: Optional[float] = Field(title="Geolocation History")
closingDatetime: Optional[datetime.datetime] = Field(title="Closing Datetime")
latitude: Optional[float] = Field(title="Latitude")
longitude: Optional[float] = Field(title="Longitude")
elevation: Optional[int] = Field(title="Elevation")
authority: Optional[constr(max_length=255)] = Field(title="Authority")
adminRegion: Optional[constr(max_length=255)] = Field(title="Admin Region")
drainageBasin: Optional[constr(max_length=255)] = Field(title="Drainage Basin")
class Config:
orm_mode = True
allow_population_by_field_name = True
fields = {
"belongsTo": "belongs_to",
"stationType": "station_type",
"geolocationMethod": "geolocation_method",
"geolocationAccuracy": "geolocation_accuracy",
"openingDatetime": "opening_datetime",
"closingDatetime": "closing_datetime",
"adminRegion": "admin_region",
"drainageBasin": "drainage_basin",
}
json_encoders = {
datetime.datetime: lambda dt: dt.strftime("%Y-%m-%d %H:%M:%S")
}
class StationLocationHistoryWithStation(StationLocationHistory):
station: station_schema.Station = Field(title="Code")
class StationLocationHistoryResponse(Response):
result: List[StationLocationHistory] = Field(title="Result")
class StationLocationHistoryWithStationResponse(Response):
result: List[StationLocationHistoryWithStation] = Field(title="Result")
class StationLocationHistoryQueryResponse(StationLocationHistoryResponse):
limit: int = Field(title="Limit")
page: int = Field(title="Page")
pages: int = Field(title="Pages")
| StarcoderdataPython |
1938423 | <gh_stars>1-10
import unittest
import doctest
import unifhy._utils
if __name__ == '__main__':
test_loader = unittest.TestLoader()
test_suite = unittest.TestSuite()
test_suite.addTests(doctest.DocTestSuite(unifhy._utils.compass))
runner = unittest.TextTestRunner(verbosity=2)
runner.run(test_suite)
| StarcoderdataPython |
6552822 | <filename>char_scripts/migrations/0001_initial.py
# Generated by Django 3.2.6 on 2021-09-25 17:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Character',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=30)),
('char_type', models.CharField(blank=True, max_length=30)),
('desc1', models.CharField(max_length=120)),
('desc2', models.CharField(blank=True, max_length=120)),
('desc3', models.CharField(blank=True, max_length=120)),
('night_first_desc', models.TextField(blank=True)),
('night_other_desc', models.TextField(blank=True)),
('night_first_order', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('night_other_order', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('custom', models.BooleanField(default=False)),
('visible', models.BooleanField(default=True)),
('playable', models.BooleanField(default=True)),
('image', models.ImageField(default='icon.png', upload_to='char_icons')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Script',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(blank=True, max_length=60)),
('description', models.TextField(blank=True)),
('author', models.CharField(blank=True, max_length=60)),
('script_type', models.CharField(default='Normal', max_length=30)),
('visible', models.BooleanField(default=True)),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='scripts', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-created_at'],
},
),
migrations.CreateModel(
name='Version',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('version_num', models.IntegerField(default=1, editable=False)),
],
options={
'ordering': ['-version_num'],
},
),
migrations.CreateModel(
name='VersionChar',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('character', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='char_scripts.character')),
('version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='char_scripts.version')),
],
),
migrations.AddField(
model_name='version',
name='characters',
field=models.ManyToManyField(through='char_scripts.VersionChar', to='char_scripts.Character'),
),
migrations.AddField(
model_name='version',
name='script',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='char_scripts.script'),
),
]
| StarcoderdataPython |
12850421 | <filename>tasks/prime.py
# -*- coding: utf-8 -*-
""" TaskSets and tasks for the Prime & Support APIs """
import logging
import json
import random
from copy import deepcopy
from typing import Dict
from locust import tag, task, TaskSet
from utils.constants import (
INTERNAL_API_KEY,
TEST_PDF,
ZERO_UUID,
PRIME_API_KEY,
SUPPORT_API_KEY,
MOVE_TASK_ORDER,
MTO_SHIPMENT,
MTO_AGENT,
MTO_SERVICE_ITEM,
PAYMENT_REQUEST,
)
from .base import check_response, CertTaskMixin, ParserTaskMixin
logger = logging.getLogger(__name__)
def prime_path(url: str) -> str:
return f"/prime/v1{url}"
def support_path(url: str) -> str:
return f"/support/v1{url}"
class PrimeDataStorageMixin:
"""
TaskSet mixin used to store data from the Prime API during load testing so that it can be passed around and reused.
We store a number of objects in a local store that can be requested by tasks.
The tasks then hit an endpoint and call add or replace to update our local store with a list of viable objects.
This mixin allows storing multiple items of each kind.
"""
DATA_LIST_MAX: int = 50
# contains the ID values needed when creating moves using createMoveTaskOrder:
default_mto_ids: Dict[str, str] = {
"contractorID": "",
"destinationDutyStationID": "",
"originDutyStationID": "",
"uploadedOrdersID": "",
}
local_store: Dict[str, list] = {
MOVE_TASK_ORDER: [],
MTO_SHIPMENT: [],
MTO_SERVICE_ITEM: [],
PAYMENT_REQUEST: [],
} # data stored will be shared among class instances thanks to mutable dict
def get_stored(self, object_key, *args, **kwargs):
"""
Given an object_key that represents an object type from the MilMove app, returns an object of that type from the
list.
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
"""
data_list = self.local_store[object_key]
if len(data_list) > 0: # otherwise we return None
return random.choice(data_list)
def get_stored_shipment_address(self, mto_shipment=None):
"""
Grabs one of either pickupAddress or destinationAddress from a shipment and returns the specific field and
payload for that address.
:param mto_shipment: JSON/dict of a specific MTO Shipment payload (optional)
:return: tuple(str name of the address field, dict address payload)
"""
if not mto_shipment:
mto_shipment = self.get_stored(MTO_SHIPMENT) or {}
address_fields = ["pickupAddress", "destinationAddress"]
valid_addresses = [
(field, mto_shipment[field])
for field in address_fields
if mto_shipment.get(field) and mto_shipment[field].get("id", ZERO_UUID) != ZERO_UUID
]
if len(valid_addresses) > 0: # otherwise we return None
return random.choice(valid_addresses)
def add_stored(self, object_key, object_data):
"""
Adds data to the list for the object key provided. Also checks if the list is already at the max number of
elements, and if so, it randomly removes 1 to MAX number of elements so that the cycle can start again (and so
we don't hog too much memory).
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
:param object_data: JSON/dict
:return: None
"""
data_list = self.local_store[object_key]
if len(data_list) >= self.DATA_LIST_MAX:
num_to_delete = random.randint(1, self.DATA_LIST_MAX)
del data_list[:num_to_delete]
# Some creation endpoint auto-create multiple objects and return an array,
# but each object in the array should still be considered individually here:
if isinstance(object_data, list):
data_list.extend(object_data)
else:
data_list.append(object_data)
def update_stored(self, object_key, old_data, new_data):
"""
Given an object key, replaces a stored object in the local store with a new updated object.
:param object_key: str in [MOVE_TASK_ORDER, MTO_SHIPMENT, MTO_AGENT, MTO_SERVICE_ITEM, PAYMENT_REQUEST]
:param old_data: JSON/dict
:param new_data: JSON/dict
:return: None
"""
data_list = self.local_store[object_key]
# Remove all instances of the stored object, in case multiples were added erroneously:
while True:
try:
data_list.remove(old_data)
except ValueError:
break # this means we finally cleared the list
data_list.append(new_data)
def set_default_mto_ids(self, moves):
"""
Given a list of Move Task Orders, gets the four ID values needed to create more MTOs:
- contractorID
- uploadedOrdersID
- destinationDutyStationID
- originDutyStationID
To get these values, this function hits the getMoveTaskOrder endpoint in the Support API to get all of the
details on an MTO. The Prime API doesn't have access to all of this info, which is why we need to use the
Support API instead. It will go through and hit this endpoint for all of the moves in the list until it finally
gets a complete set of IDs.
CAN ONLY be used when subclassed with TaskSet and CertTaskMixin.
:param moves: list of JSON/dict objects
:return: None
"""
# Checks that we have a full set of MTO IDs already and halts processing if so:
if self.has_all_default_mto_ids():
return
headers = {"content-type": "application/json"}
for move in moves:
# Call the Support API to get full details on the move:
resp = self.client.get(
support_path(f"/move-task-orders/{move['id']}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
**self.cert_kwargs,
)
move_details, success = check_response(resp, "getMoveTaskOrder")
if not success:
continue # try again with the next move in the list
# Get the values we need from the move and set them in self.default_move_ids.
# If this move is missing any of these values, we default to using whatever value is already in
# self.default_mto_ids, which could be nothing, or could be a value gotten from a previous move.
# This way we never override good ID values from earlier moves in the list.
self.default_mto_ids["contractorID"] = move_details.get(
"contractorID", self.default_mto_ids["contractorID"]
)
if order_details := move_details.get("order"):
self.default_mto_ids["uploadedOrdersID"] = order_details.get(
"uploadedOrdersID", self.default_mto_ids["uploadedOrdersID"]
)
self.default_mto_ids["destinationDutyStationID"] = order_details.get(
"destinationDutyStationID", self.default_mto_ids["destinationDutyStationID"]
)
self.default_mto_ids["originDutyStationID"] = order_details.get(
"originDutyStationID", self.default_mto_ids["originDutyStationID"]
)
# Do we have all the ID values we need? Cool, then stop processing.
if self.has_all_default_mto_ids():
logger.info(f"☑️ Set default MTO IDs for createMoveTaskOrder: \n{self.default_mto_ids}")
break
# If we're in the local environment, and we have gone through the entire list without getting a full set of IDs,
# set our hardcoded IDs as the default:
if not self.has_all_default_mto_ids() and self.user.is_local:
logger.warning("⚠️ Using hardcoded MTO IDs for LOCAL env")
self.default_mto_ids.update(
{
"contractorID": "5db13bb4-6d29-4bdb-bc81-262f4513ecf6",
"destinationDutyStationID": "71b2cafd-7396-4265-8225-ff82be863e01",
"originDutyStationID": "1347d7f3-2f9a-44df-b3a5-63941dd55b34",
"uploadedOrdersID": "c26421b0-e4c3-446b-88f3-493bb25c1756",
}
)
def has_all_default_mto_ids(self) -> bool:
"""Boolean indicating that we have all the values we need for creating new MTOs."""
return self.default_mto_ids and all(self.default_mto_ids.values())
@tag("prime")
class PrimeTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet):
"""
Set of the tasks that can be called on the Prime API. Make sure to mark tasks with the `@task` decorator and add
tags where appropriate to make filtering for custom tests easier.
"""
def __init__(self, parent):
self.csrf_token = None
self.session_token = None
super().__init__(parent)
def customer_path(self, url: str) -> str:
return f"{self.user.alternative_host}{url}"
def on_start(self):
self.client.get(self.customer_path("/devlocal-auth/login"))
self.csrf_token = self.client.cookies.get("masked_gorilla_csrf")
self.client.headers.update({"x-csrf-token": self.csrf_token})
resp = self.client.post(
self.customer_path("/devlocal-auth/create"),
data={"userType": "milmove", "gorilla.csrf.Token": self.csrf_token},
)
self.session_token = self.client.cookies.get("mil_session_token")
if resp.status_code != 200:
self.interrupt()
logged_in_user = self.client.get(self.customer_path("/internal/users/logged_in"))
json_resp = logged_in_user.json()
service_member_id = json_resp["service_member"]["id"]
email = json_resp["email"]
user_id = json_resp["id"]
origin_duty_stations = self.client.get(self.customer_path("/internal/duty_stations?search=29"))
current_station_id = origin_duty_stations.json()[0]["id"]
overrides = {
"id": service_member_id,
"user_id": user_id,
"edipi": "9999999999",
"personal_email": email,
"email_is_preferred": True,
"current_station_id": current_station_id,
}
payload = self.fake_request("/service_members/{serviceMemberId}", "patch", INTERNAL_API_KEY, overrides, True)
self.client.patch(
self.customer_path(f"/internal/service_members/{service_member_id}"),
name="/internal/service_members/{serviceMemberId}",
data=json.dumps(payload),
headers={"content-type": "application/json"},
**self.user.cert_kwargs,
)
overrides = {"permission": "NONE"}
payload = self.fake_request(
"/service_members/{serviceMemberId}/backup_contacts", "post", INTERNAL_API_KEY, overrides
)
self.client.post(
self.customer_path(f"/internal/service_members/{service_member_id}/backup_contacts"),
name="/internal/service_members/{serviceMemberId}/backup_contacts",
data=json.dumps(payload),
headers={"content-type": "application/json"},
**self.user.cert_kwargs,
)
@tag(MOVE_TASK_ORDER, "listMoves")
@task
def list_moves(self):
timeout = {}
if self.user.is_local:
timeout["timeout"] = 15 # set a timeout of 15sec if we're running locally - just for this endpoint
resp = self.client.get(prime_path("/moves"), **self.cert_kwargs, **timeout)
moves, success = check_response(resp, "listMoves")
# Use these MTOs to set the ID values we'll need to create more MTOs
# (NOTE: we don't care about a failure here because we can set the default IDs instead,
# if this is running locally)
self.set_default_mto_ids(moves or [])
@tag(MTO_SERVICE_ITEM, "createMTOServiceItem")
@task
def create_mto_service_item(self, overrides=None):
# If mtoShipmentID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoShipmentID") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("createMTOServiceItem: ⚠️ No mto_shipment found")
return None
overrides_local = {
# override moveTaskOrderID because we don't want a random one
"moveTaskOrderID": mto_shipment["moveTaskOrderID"],
# override mtoShipmentID because we don't want a random one
"mtoShipmentID": mto_shipment["id"],
}
# Merge local overrides with passed-in overrides
overrides_local.update(overrides or {})
payload = self.fake_request("/mto-service-items", "post", PRIME_API_KEY, overrides_local)
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-service-items"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
mto_service_items, success = check_response(resp, f"createMTOServiceItem {payload['reServiceCode']}", payload)
if success:
self.add_stored(MTO_SERVICE_ITEM, mto_service_items)
return mto_service_items
@tag(MTO_SHIPMENT, "createMTOShipment")
@task
def create_mto_shipment(self, overrides=None):
def guarantee_unique_agent_type(agents):
agent_types = {agent["agentType"] for agent in agents}
if len(agents) >= 2 and len(agent_types) < 2:
possible_types = {"RELEASING_AGENT", "RECEIVING_AGENT"}
agents[1]["agentType"] = (possible_types - agent_types).pop()
# If moveTaskOrderID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("moveTaskOrderID") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("createMTOShipment: ⚠️ No move_task_order found")
return (
None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
)
overrides_local = {
# Override moveTaskorderID because we don't want a random one
"moveTaskOrderID": move_task_order["id"],
# Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation
"agents": {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID},
# Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation
"pickupAddress": {"id": ZERO_UUID},
# Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation
"destinationAddress": {"id": ZERO_UUID},
# Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation
"mtoServiceItems": [],
}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local)
guarantee_unique_agent_type(payload["agents"]) # modifies the payload directly
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-shipments"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
mto_shipment, success = check_response(resp, "createMTOShipment", payload)
if success:
self.add_stored(MTO_SHIPMENT, mto_shipment)
return mto_shipment
@tag(MTO_SHIPMENT, "createMTOShipment", "expectedFailure")
@task
def create_mto_shipment_with_duplicate_agents(self, overrides=None):
# If moveTaskOrderID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("moveTaskOrderID") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("createMTOShipment — expected failure: ⚠️ No move_task_order found")
return (
None # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
)
agent_type = random.choice(["RELEASING_AGENT", "RECEIVING_AGENT"])
agent_override = {"id": ZERO_UUID, "mtoShipmentID": ZERO_UUID, "agentType": agent_type}
overrides_local = {
# Override moveTaskorderID because we don't want a random one
"moveTaskOrderID": move_task_order["id"],
# Set agents UUIDs to ZERO_UUID because we can't actually set the UUID on creation and guarantee two agents
"agents": [agent_override, agent_override],
# Set pickupAddress to ZERO_UUID because we can't actually set the UUID on creation
"pickupAddress": {"id": ZERO_UUID},
# Set destinationAddress to ZERO_UUID because we can't actually set the UUID on creation
"destinationAddress": {"id": ZERO_UUID},
# Set mtoServiceItems to empty to let the createMTOServiceItems endpoint do the creation
"mtoServiceItems": [],
}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
payload = self.fake_request("/mto-shipments", "post", PRIME_API_KEY, overrides=overrides_local)
headers = {"content-type": "application/json"}
resp = self.client.post(
prime_path("/mto-shipments"),
name=prime_path("/mto-shipments — expected failure"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
check_response(resp, "createMTOShipmentFailure", payload, "422")
@tag(PAYMENT_REQUEST, "createUpload")
@task
def create_upload(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
payment_request = self.get_stored(PAYMENT_REQUEST, object_id)
if not payment_request:
return
upload_file = {"file": open(TEST_PDF, "rb")}
resp = self.client.post(
prime_path(f"/payment-requests/{payment_request['id']}/uploads"),
name=prime_path("/payment-requests/{paymentRequestID}/uploads"),
files=upload_file,
**self.user.cert_kwargs,
)
check_response(resp, "createUpload")
@tag(PAYMENT_REQUEST, "createPaymentRequest")
@task
def create_payment_request(self, overrides=None):
# If mtoServiceItemID was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoServiceItemID") if overrides else None
service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
if not service_item:
return
payload = {
"moveTaskOrderID": service_item["moveTaskOrderID"],
"serviceItems": [{"id": service_item["id"]}],
"isFinal": False,
}
shipment = self.get_stored(MTO_SHIPMENT, service_item["mtoShipmentID"])
if not shipment:
logger.info("unable to find shipment of payment request service item")
headers = {"content-type": "application/json"}
# if the actual weight hasn't been provided, creating the payment request will fail
if not shipment.get("primeActualWeight"):
self.client.post(
prime_path("/payment-requests"),
name=prime_path("/payment-requests — expected failure"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
return None
resp = self.client.post(
prime_path("/payment-requests"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
payment_request, success = check_response(resp, "createPaymentRequest", payload)
if success:
self.add_stored(PAYMENT_REQUEST, payment_request)
return payment_request
@tag(MTO_SHIPMENT, "updateMTOShipment")
@task
def update_mto_shipment(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return # can't run this task
payload = self.fake_request("/mto-shipments/{mtoShipmentID}", "patch", PRIME_API_KEY, overrides)
# Agents and addresses should not be updated by this endpoint, and primeEstimatedWeight cannot be updated after
# it is initially set (and it is set in create_mto_shipment)
fields_to_remove = [
"agents",
"pickupAddress",
"destinationAddress",
"secondaryPickupAddress",
"secondaryDeliveryAddress",
"primeEstimatedWeight",
]
# nts weight is only valid when the shipment type is nts release
if payload.get("ntsRecordedWeight"):
shipmentType = payload.get("shipmentType") or mto_shipment.get("shipmentType")
if shipmentType != "HHG_OUTOF_NTS_DOMESTIC":
fields_to_remove.append("ntsRecordedWeight")
for f in fields_to_remove:
payload.pop(f, None)
headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]}
resp = self.client.patch(
prime_path(f"/mto-shipments/{mto_shipment['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_mto_shipment, success = check_response(resp, "updateMTOShipment", payload)
if success:
self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment)
return new_mto_shipment
@tag(MTO_SHIPMENT, "updateMTOShipmentAddress")
@task
def update_mto_shipment_address(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return
address_tuple = self.get_stored_shipment_address(mto_shipment) # returns a (field_name, address_dict) tuple
if not address_tuple:
return # this shipment didn't have any addresses, we will try again later with a different shipment
field, address = address_tuple
overrides_local = {"id": address["id"]}
overrides_local.update(overrides or {})
payload = self.fake_request(
"/mto-shipments/{mtoShipmentID}/addresses/{addressID}", "put", PRIME_API_KEY, overrides=overrides_local
)
headers = {"content-type": "application/json", "If-Match": address["eTag"]}
# update mto_shipment address
resp = self.client.put(
prime_path(f"/mto-shipments/{mto_shipment['id']}/addresses/{address['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}/addresses/{addressID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_address, success = check_response(resp, "updateMTOShipmentAddress", payload)
if success:
# we only got the address, so we're gonna pop it back into the shipment to store
updated_shipment = deepcopy(mto_shipment)
updated_shipment[field] = updated_address
self.update_stored(MTO_SHIPMENT, mto_shipment, updated_shipment)
return updated_shipment
@tag(MTO_AGENT, "updateMTOAgent")
@task
def update_mto_agent(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("mtoShipmentID") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
return # can't run this task
if mto_shipment.get("agents") is None:
return # can't update agents if there aren't any
overrides = {}
mto_agents = mto_shipment["agents"]
mto_agent = mto_shipment["agents"][0]
if len(mto_agents) >= 2:
overrides = {"agentType": mto_agent["agentType"]} # ensure agentType does not change
payload = self.fake_request("/mto-shipments/{mtoShipmentID}/agents/{agentID}", "put", PRIME_API_KEY, overrides)
headers = {"content-type": "application/json", "If-Match": mto_agent["eTag"]}
resp = self.client.put(
prime_path(f"/mto-shipments/{mto_shipment['id']}/agents/{mto_agent['id']}"),
name=prime_path("/mto-shipments/{mtoShipmentID}/agents/{agentID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_agent, success = check_response(resp, "updateMTOAgent", payload)
if success:
# we only got the agent, so we're gonna pop it back into the shipment to store
new_shipment = deepcopy(mto_shipment)
new_shipment["agents"][0] = updated_agent
self.update_stored(MTO_SHIPMENT, mto_shipment, new_shipment)
return new_shipment
@tag(MTO_SERVICE_ITEM, "updateMTOServiceItem")
@task
def update_mto_service_item(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
if not mto_service_item:
return # can't run this task
try:
re_service_code = mto_service_item["reServiceCode"]
except KeyError:
logger.error(f"⛔️ update_mto_service_item recvd mtoServiceItem without reServiceCode \n{mto_service_item}")
return
if re_service_code not in ["DDDSIT", "DOPSIT"]:
logging.info(
"update_mto_service_item recvd mtoServiceItem from store. Discarding because reServiceCode not in "
"[DDDSIT, DOPSIT]"
)
return
payload = self.fake_request(
"/mto-service-items/{mtoServiceItemID}",
"patch",
overrides={
"id": mto_service_item["id"],
"sitDestinationFinalAddress": {
"id": mto_service_item["sitDestinationFinalAddress"]["id"]
if mto_service_item.get("sitDestinationFinalAddress")
and mto_service_item["sitDestinationFinalAddress"].get("id")
else ZERO_UUID,
},
},
)
headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]}
resp = self.client.patch(
prime_path(f"/mto-service-items/{mto_service_item['id']}"),
name=prime_path("/mto-service-items/{mtoServiceItemID}"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
updated_service_item, success = check_response(resp, f"updateMTOServiceItem {re_service_code}", payload)
if success:
self.update_stored(MTO_SERVICE_ITEM, mto_service_item, updated_service_item)
return updated_service_item
@tag(MOVE_TASK_ORDER, "updateMTOPostCounselingInformation")
@task
def update_post_counseling_information(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("updateMTOPostCounselingInformation: ⚠️ No move_task_order found")
return # we can't do anything else without a default value, and no pre-made MTOs satisfy our requirements
payload = self.fake_request("/move-task-orders/{moveTaskOrderID}/post-counseling-info", "patch", PRIME_API_KEY)
move_task_order_id = move_task_order["id"] # path parameter
headers = {"content-type": "application/json", "If-Match": move_task_order["eTag"]}
resp = self.client.patch(
prime_path(f"/move-task-orders/{move_task_order_id}/post-counseling-info"),
name=prime_path("/move-task-orders/{moveTaskOrderID}/post-counseling-info"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "updateMTOPostCounselingInformation", payload)
if success:
self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto)
return new_mto
@tag("support")
class SupportTasks(PrimeDataStorageMixin, ParserTaskMixin, CertTaskMixin, TaskSet):
"""
Set of the tasks that can be called on the Support API. Make sure to mark tasks with the `@task` decorator and add
tags where appropriate to make filtering for custom tests easier. Ex:
@tag('updates', 'shipments')
@task
def update_mto_shipment_status(self):
# etc.
"""
@tag(MTO_SHIPMENT, "updateMTOShipmentStatus")
@task(2)
def update_mto_shipment_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
# To avoid issues with the mto shipment being stale
# retrieve the move associated with the shipment
# and then use the newly fetched move to the find most up to date version of the shipment
move_id = mto_shipment["moveTaskOrderID"]
headers = {"content-type": "application/json"}
resp = self.client.get(
support_path(f"/move-task-orders/{move_id}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
)
move_details, success = check_response(resp, "getMoveTaskOrder")
if not move_details:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
for fetched_mto_shipment in move_details["mtoShipments"]:
if fetched_mto_shipment["id"] == mto_shipment["id"]:
# Generate fake payload based on the endpoint's required fields
payload = self.fake_request(
"/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides
)
if fetched_mto_shipment["status"] == "CANCELLATION_REQUESTED" and payload["status"] != "CANCELED":
return None
elif fetched_mto_shipment["status"] == "SUBMITTED" and payload["status"] not in [
"APPROVED",
"REJECTED",
]:
return None
elif fetched_mto_shipment["status"] == "DIVERSION_REQUESTED" and payload["status"] != "APPROVED":
return None
elif fetched_mto_shipment["status"] == "APPROVED" and payload["status"] != "DIVERSION_REQUESTED":
return None
elif fetched_mto_shipment["status"] in ["DRAFT", "REJECTED", "CANCELED"]:
return None
headers = {"content-type": "application/json", "If-Match": fetched_mto_shipment["eTag"]}
resp = self.client.patch(
support_path(f"/mto-shipments/{fetched_mto_shipment['id']}/status"),
name=support_path("/mto-shipments/{mtoShipmentID}/status"),
data=json.dumps(payload),
headers=headers,
)
new_mto_shipment, success = check_response(resp, "updateMTOShipmentStatus", payload)
if success:
self.update_stored(MTO_SHIPMENT, mto_shipment, new_mto_shipment)
return mto_shipment
@tag(MTO_SHIPMENT, "updateMTOShipmentStatus", "expectedFailure")
# run this task less frequently than the others since this is testing an expected failure
@task(1)
def update_mto_shipment_with_invalid_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_shipment = self.get_stored(MTO_SHIPMENT, object_id)
if not mto_shipment:
logger.debug("updateMTOShipmentStatus: ⚠️ No mto_shipment found.")
return None # can't run this task
overrides_local = {"status": "DRAFT"}
# Merge local overrides with passed-in overrides
if overrides:
overrides_local.update(overrides)
# Generate fake payload based on the endpoint's required fields
payload = self.fake_request("/mto-shipments/{mtoShipmentID}/status", "patch", SUPPORT_API_KEY, overrides_local)
payload["status"] = "DRAFT"
headers = {"content-type": "application/json", "If-Match": mto_shipment["eTag"]}
resp = self.client.patch(
support_path(f"/mto-shipments/{mto_shipment['id']}/status"),
name=support_path("/mto-shipments/{mtoShipmentID}/status — expected failure"),
data=json.dumps(payload),
headers=headers,
)
check_response(resp, "updateMTOShipmentStatusFailure", payload, "422")
@tag(MOVE_TASK_ORDER, "createMoveTaskOrder")
@task(2)
def create_move_task_order(self):
# Check that we have all required ID values for this endpoint:
if not self.has_all_default_mto_ids():
logger.debug(f"⚠️ Missing createMoveTaskOrder IDs for environment {self.user.env}")
return
overrides = {
"contractorID": self.default_mto_ids["contractorID"],
# Moves that are in DRAFT or CANCELED mode cannot be used by the rest of the load testing
"status": "SUBMITTED",
# If this date is set here, the status will not properly transition to APPROVED
"availableToPrimeAt": None,
"order": {
"status": "APPROVED",
"tac": "F8J1",
# We need these objects to exist
"destinationDutyStationID": self.default_mto_ids["destinationDutyStationID"],
"originDutyStationID": self.default_mto_ids["originDutyStationID"],
"uploadedOrdersID": self.default_mto_ids["uploadedOrdersID"],
# To avoid the overrides being inserted into these nested objects...
"entitlement": {},
"customer": {},
},
}
payload = self.fake_request("/move-task-orders", "post", SUPPORT_API_KEY, overrides)
headers = {"content-type": "application/json"}
resp = self.client.post(
support_path("/move-task-orders"), data=json.dumps(payload), headers=headers, **self.user.cert_kwargs
)
json_body, success = check_response(resp, "createMoveTaskOrder", payload)
if not success:
return # no point continuing if it didn't work out
move_task_order_id = json_body["id"]
e_tag = json_body["eTag"]
headers["if-match"] = e_tag
resp = self.client.patch(
support_path(f"/move-task-orders/{move_task_order_id}/available-to-prime"),
name=support_path("/move-task-orders/{moveTaskOrderID}/available-to-prime"),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "makeMoveTaskOrderAvailable")
if success:
self.add_stored(MOVE_TASK_ORDER, new_mto)
return new_mto
# @tag(MTO_SERVICE_ITEM, "updateMTOServiceItemStatus")
@task(2)
def update_mto_service_item_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
mto_service_item = self.get_stored(MTO_SERVICE_ITEM, object_id)
# if we don't have an mto shipment we can't run this task
if not mto_service_item:
logger.debug("updateMTOServiceItemStatus: ⚠️ No mto_service_item found")
return None
payload = self.fake_request("/mto-service-items/{mtoServiceItemID}/status", "patch", SUPPORT_API_KEY, overrides)
headers = {"content-type": "application/json", "If-Match": mto_service_item["eTag"]}
resp = self.client.patch(
support_path(f"/mto-service-items/{mto_service_item['id']}/status"),
name=support_path("/mto-service-items/{mtoServiceItemID}/status"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
mto_service_item, success = check_response(resp, "updateMTOServiceItemStatus", payload)
if success:
self.update_stored(MTO_SERVICE_ITEM, mto_service_item, mto_service_item)
return mto_service_item
@tag(PAYMENT_REQUEST, "updatePaymentRequestStatus")
@task(2)
def update_payment_request_status(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
payment_request = self.get_stored(PAYMENT_REQUEST, object_id)
if not payment_request:
return
payload = self.fake_request("/payment-requests/{paymentRequestID}/status", "patch", SUPPORT_API_KEY)
headers = {"content-type": "application/json", "If-Match": payment_request["eTag"]}
resp = self.client.patch(
support_path(f"/payment-requests/{payment_request['id']}/status"),
name=support_path("/payment-requests/{paymentRequestID}/status"),
data=json.dumps(payload),
headers=headers,
**self.user.cert_kwargs,
)
new_payment_request, success = check_response(resp, "updatePaymentRequestStatus", payload)
if success:
self.update_stored(PAYMENT_REQUEST, payment_request, new_payment_request)
return new_payment_request
@tag(MOVE_TASK_ORDER, "getMoveTaskOrder")
@task(2)
def get_move_task_order(self, overrides=None):
# If id was provided, get that specific one. Else get any stored one.
object_id = overrides.get("id") if overrides else None
move_task_order = self.get_stored(MOVE_TASK_ORDER, object_id)
if not move_task_order:
logger.debug("getMoveTaskOrder: ⚠️ No move_task_order found")
return
headers = {"content-type": "application/json"}
resp = self.client.get(
support_path(f"/move-task-orders/{move_task_order['id']}"),
name=support_path("/move-task-orders/{moveTaskOrderID}"),
headers=headers,
**self.user.cert_kwargs,
)
new_mto, success = check_response(resp, "getMoveTaskOrder")
if success:
self.update_stored(MOVE_TASK_ORDER, move_task_order, new_mto)
return new_mto
| StarcoderdataPython |
3271911 | # Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import argparse
import json
import uuid
import sys
import os
import signal
import time
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
class APIUnitTest:
# CLASS PROPERTIES #
args = {}
uid = str(uuid.uuid4())
url = ""
exit_code = 1
time_delay = 1
get_payloads = []
post_payloads = []
put_payloads = []
delete_payloads = []
get_responses = []
post_responses = []
put_responses = []
delete_responses = []
# CLASS METHODS #
def __init__(self):
signal.signal(signal.SIGINT, APIUnitTest.__safe_escape__)
self.__start_argparse__()
self.url = self.args.scheme + "://" + self.args.host + ":" + str(self.args.port) + self.url
self.auth_payload = {"client-id": self.args.username, "client-token": self.args.password}
# Run unit tests and exit on corresponding status code
self.post()
self.get()
self.put()
self.delete()
sys.exit(self.exit_code)
def get(self):
# Loop through each GET payload and check that it's response is expected
for payload in self.get_payloads:
self.pre_get()
self.get_responses.append(self.make_request("GET", payload))
self.post_get()
time.sleep(self.time_delay)
def post(self):
# Loop through each POST payload and check that it's response is expected
for payload in self.post_payloads:
self.pre_post()
self.post_responses.append(self.make_request("POST", payload))
self.post_post()
time.sleep(self.time_delay)
def put(self):
# Loop through each PUT payload and check that it's response is expected
for payload in self.put_payloads:
self.pre_put()
self.put_responses.append(self.make_request("PUT", payload))
self.post_put()
time.sleep(self.time_delay)
def delete(self):
# Loop through each DELETE payload and check that it's response is expected
for payload in self.delete_payloads:
self.pre_delete()
self.delete_responses.append(self.make_request("DELETE", payload))
self.post_delete()
time.sleep(self.time_delay)
def __start_argparse__(self):
# Custom port type for argparse
def port(value_string):
value = int(value_string)
if value not in range(1, 65535):
raise argparse.ArgumentTypeError("%s is out of range, choose from [1-65535]" % value)
return value
parser = argparse.ArgumentParser(
description="Check pfSense API's '" + self.url + "' endpoint for correct functionality."
)
parser.add_argument(
'--host',
dest="host",
type=str,
required=True,
help="The host to connect to"
)
parser.add_argument(
'--port',
dest="port",
type=port,
default=443,
help="The port to use when connecting",
metavar="{1-65535}"
)
parser.add_argument(
'--scheme',
dest="scheme",
type=str,
choices=["http", "https"],
default="https",
help="The URL scheme to use when connecting"
)
parser.add_argument(
'--auth_mode',
dest="auth_mode",
type=str,
choices=["local", "token", "jwt"],
default="local",
help="The API authentication mode to use."
)
parser.add_argument(
'--username',
dest="username",
type=str,
default="admin",
help='Username to authenticate as.'
)
parser.add_argument(
'--password',
dest="password",
type=str,
default="<PASSWORD>",
help='Password to authenticate with'
)
parser.add_argument(
'--timeout',
dest="timeout",
type=int,
default=10,
help="Connection timeout limit in seconds"
)
parser.add_argument(
'--verbose',
dest="verbose",
action="store_true",
required=False,
help='Display verbose output'
)
self.args = parser.parse_args()
def make_request(self, method, payload):
success = False
# Create authentication payload for local authentication
if self.args.auth_mode == "local":
payload.update(self.auth_payload)
headers = {}
# Create authentication headers for token authentication
elif self.args.auth_mode == "token":
headers = {"Authorization": self.args.username + " " + self.args.password}
# Create authentication headers for JWT authentication
elif self.args.auth_mode == "jwt":
headers = {"Authorization": "Bearer " + self.__request_jwt__()}
try:
req = requests.request(
method,
url=self.url,
data=json.dumps(payload),
verify=False,
timeout=self.args.timeout,
headers=headers
)
except requests.exceptions.ConnectTimeout:
print(self.__format_msg__(method, "Connection timed out"))
return None
# Check if our HTTP status code is expected
if req is not None and req.status_code == 200:
# Try to decode our request as JSON
try:
req.json()
is_json = True
except json.decoder.JSONDecodeError:
is_json = False
# Check if our response is JSON, if so proceed. Otherwise set error.
if is_json:
# Check if our API responses return code is 0. Otherwise set error.
if req.json()["return"] == 0:
msg = self.__format_msg__(method, "Response is valid", error=False)
success = True
else:
msg = self.__format_msg__(method, "Received non-zero return " + str(req.json()["return"]))
else:
msg = self.__format_msg__(method, "Expected JSON response, recieved " + str(req.content))
else:
msg = self.__format_msg__(method, "Expected status code 200, received " + str(req.status_code))
# Print our message to the console, if an error occurred
print(msg)
# Print request output if verbose mode
if self.args.verbose:
print(req.content.decode())
# Set exit code to one if this test failed
if success:
self.exit_code = 0
return req.json()
def __format_msg__(self, method, descr, error=True):
methods = {
"GET": "\33[32mGET\33[0m",
'POST': "\33[33mPOST\33[0m",
'PUT': "\33[34mPUT\33[0m",
'DELETE': "\33[31mDELETE\33[0m"
}
msg = "\33[31mFAILED -->\33[0m" if error else "\33[32mOK ------>\33[0m"
msg = msg + " [ " + methods[method] + " " + self.url + " ]: " + descr
return msg
def __request_jwt__(self):
try:
req = requests.request(
"POST",
url=self.args.scheme + "://" + self.args.host + ":" + str(self.args.port) + "/api/v1/access_token",
data=json.dumps({"client-id": self.args.username, "client-token": self.args.password}),
verify=False,
timeout=self.args.timeout
)
return req.json()["data"]["token"]
except Exception:
return ""
@staticmethod
def __safe_escape__(signum, frame):
try:
os._exit(0)
except OSError:
sys.exit(0)
# PRE/POST REQUEST METHODS. These are intended to be overwritten by a child class.
def pre_post(self):
pass
def post_post(self):
pass
def pre_get(self):
pass
def post_get(self):
pass
def pre_put(self):
pass
def post_put(self):
pass
def pre_delete(self):
pass
def post_delete(self):
pass | StarcoderdataPython |
1619595 | import pytest
from brownie import PriceFeed, accounts, network
def test_can_deploy_contract(mainnet_eth_usd_address):
# Arrange
if network.show_active() != 'mainnet-fork':
pytest.skip('Only works for mainnet-fork network')
# Act
price_feed = PriceFeed.deploy(
mainnet_eth_usd_address, {'from': accounts[0]})
# Assert
assert price_feed is not None
def test_can_get_latest_price(mainnet_eth_usd_address):
# Arrange
if network.show_active() != 'mainnet-fork':
pytest.skip('Only works for mainnet-fork network')
# Act
price_feed = PriceFeed.deploy(
mainnet_eth_usd_address, {'from': accounts[0]})
# Assert
value = price_feed.getLatestPrice({'from': accounts[0]})
assert isinstance(value, int)
| StarcoderdataPython |
5078578 | import os,sys,time
os.system('clear')
def babi(nob):
for e in nob:
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.1)
babi('<NAME> MY NAME IS <NAME>')
print
babi('YOU WELCOME TO VISIT OUR TOOL AND YOU DONT FORGET')
print
babi('FRIENDS THIS TOOL IT WAS CREATED BY MiSetya And Update With 080Hacker')
print
babi('YOU MAKE SURE KNOW IS THIS TOOL IT IS REALLY WORKING')
print
babi('BUT THE TOOL IT HAS KEY WHICH YOU REALLY NEED TO FIND')
print
print
babi('TO GET THE TOOL KEY CONTACT US VIA WHATSAPP WITH THIS NUMBER +2349069464271 >_<')
print
print
babi('YOU CAN ALSO CONTACT MISETYA @<EMAIL>.misetya...')
print ""
babi('AND YOU DO NOT FORGET TO GIVE US STAR ON GITHUB AFTER LOGIN THANKS A LOT')
babi('...............')
os.system('sh login.sh')
exit()
| StarcoderdataPython |
3553026 | <filename>src/command_modules/azure-cli-reservations/azure/cli/command_modules/reservations/_help.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.help_files import helps
helps['reservations'] = """
type: group
short-summary: Manage Azure Reservations.
"""
helps['reservations catalog'] = """
type: group
short-summary: See catalog of available reservations
"""
helps['reservations reservation'] = """
type: group
short-summary: Manage reservation entities
"""
helps['reservations reservation-order'] = """
type: group
short-summary: Manage reservation order, which is container for reservations
"""
helps['reservations reservation-order-id'] = """
type: group
short-summary: See reservation order ids that are applied to subscription
"""
helps['reservations reservation-order list'] = """
type: command
short-summary: Get all reservation orders
long-summary: |
List of all the reservation orders that the user has access to in the current tenant.
"""
helps['reservations reservation-order show'] = """
type: command
short-summary: Get a specific reservation order.
long-summary: Get the details of the reservation order.
parameters:
- name: --reservation-order-id
type: string
short-summary: Id of reservation order to look up
"""
helps['reservations reservation-order-id list'] = """
type: command
short-summary: Get list of applicable reservation order ids.
long-summary: |
Get applicable reservations that are applied to this subscription.
parameters:
- name: --subscription-id
type: string
short-summary: Id of the subscription to look up applied reservations
"""
helps['reservations catalog show'] = """
type: command
short-summary: Get catalog of available reservation.
long-summary: |
Get the regions and skus that are available for RI purchase for the specified Azure subscription.
parameters:
- name: --subscription-id
type: string
short-summary: Id of the subscription to get the catalog for
"""
helps['reservations reservation list'] = """
type: command
short-summary: Get all reservations.
long-summary: |
List all reservations within a reservation order.
parameters:
- name: --reservation-order-id
type: string
short-summary: Id of container reservation order
"""
helps['reservations reservation show'] = """
type: command
short-summary: Get details of a reservation.
parameters:
- name: --reservation-order-id
type: string
short-summary: Order id of reservation to look up
- name: --reservation-id
type: string
short-summary: Reservation id of reservation to look up
"""
helps['reservations reservation update'] = """
type: command
short-summary: Updates the applied scopes of the reservation.
parameters:
- name: --reservation-order-id
type: string
short-summary: Order id of reservation to update
- name: --reservation-id
type: string
short-summary: Reservation id of reservation to update
- name: --applied-scope-type -t
type: string
short-summary: 'Type is either Single or Shared'
- name: --applied-scopes -s
type: string
short-summary: 'If applied scope type is Single, this field must be provided'
"""
helps['reservations reservation split'] = """
type: command
short-summary: Split a reservation.
parameters:
- name: --reservation-order-id
type: string
short-summary: Order id of original reservation
- name: --reservation-id
type: string
short-summary: Reservation id of original reservation
- name: --quantity-1 -1
type: int
short-summary: Quantity of the first reservation that will be created from split operation
- name: --quantity-2 -2
type: int
short-summary: Quantity of the second reservation that will be created from split operation
"""
helps['reservations reservation merge'] = """
type: command
short-summary: Merge two reservations.
parameters:
- name: --reservation-order-id
type: string
short-summary: Order id of original reservation
- name: --reservation-id-1 -1
type: string
short-summary: Id of the first reservation to merge
- name: --reservation-id-2 -2
type: string
short-summary: Id of the second reservation to merge
"""
helps['reservations reservation list-history'] = """
type: command
short-summary: Get history of a reservation.
parameters:
- name: --reservation-order-id
type: string
short-summary: Order id of the reservation
- name: --reservation-id
type: string
short-summary: Reservation id of the reservation
"""
| StarcoderdataPython |
4980488 | #!/usr/bin/env python3
import scanner
# ------------------------------------------------------------------------------
# Classes for holding source code entities
class Module(object):
def __init__(self):
self.brief = ''
self.detail = ''
self.functions = set()
self.calls = set()
class Function(object):
def __init__(self):
self.order = 0
self.brief = ''
self.detail = ''
self.inputs = []
self.outputs = []
self.sideeffects = []
self.module = ''
self.private = True
self.calls = set()
# ------------------------------------------------------------------------------
# Factories for module and function filters
def hasname(name):
return lambda kv : kv[0] == name
def iscalledby(obj):
return lambda kv : kv[0] in obj.calls
def calls(name):
return lambda kv : name in kv[1].calls
def inmodule(mod_name):
return lambda kv : kv[1].module == mod_name
def ispublic():
return lambda kv : not kv[1].private
# ------------------------------------------------------------------------------
# Class for holding and and querying source code maps
class CodeMap(object):
def __init__(self):
self.mods = dict()
self.funcs = dict()
def mod(self, mod_name):
if mod_name not in self.mods:
self.mods[mod_name] = Module()
return self.mods[mod_name]
def func(self, func_name):
if func_name not in self.funcs:
self.funcs[func_name] = Function()
self.funcs[func_name].order = len(self.funcs)
return self.funcs[func_name]
def funcs_sorted(self, func_filter = lambda x : True):
filtered = list(filter(func_filter, self.funcs.items()))
filtered.sort(key = lambda x : x[1].order)
return filtered
def mods_sorted(self, mod_filter = lambda x : True):
filtered = list(filter(mod_filter, self.mods.items()))
filtered.sort(key = lambda x : x[0])
return filtered
# ------------------------------------------------------------------------------
# State machine to build a code map from scanned source
class MapBuilder(object):
B_NONE = 0
B_DETAIL = 1
B_PASS = 2
B_RETURN = 3
B_SIDEEFFECTS = 4
O_NONE = 0
O_MOD = 1
O_FUNC = 2
def __init__(self, code_map):
self.code_map = code_map
self._reset_state()
def _reset_state(self):
self.cur_mod = ''
self.cur_func = ''
self.cur_block = self.B_NONE
self.cur_obj = self.O_NONE
self.in_code = False
def absorb_token(self, token):
token_type = token[0]
if token_type == scanner.FILE_END:
self._reset_state()
elif token_type == scanner.SECTION:
section = token[1][0]
attrs = token[1][1:]
if 'text' in section or 'Text' in section or 'exec' in attrs:
self.in_code = True
else:
self.in_code = False
elif token_type == scanner.GLOBAL:
if self.in_code == True:
func_name = token[1]
self.code_map.func(func_name).private = False
elif token_type == scanner.LABEL:
label = token[1]
if self.in_code and not label.startswith('.'):
self.cur_func = label
elif token_type == scanner.CODE:
code = token[1]
if code.startswith('call'):
callee = code.split()[1]
self.code_map.func(self.cur_func).calls.add(callee)
elif token_type == scanner.COMMENT:
comment = token[1]
if comment.startswith('module: '):
self.cur_mod = comment.split(' ', 1)[1]
self.cur_obj = self.O_MOD
elif comment.startswith('function: '):
self.cur_func = comment.split(' ', 1)[1]
self.cur_obj = self.O_FUNC
if self.cur_mod:
self.code_map.func(self.cur_func).module = self.cur_mod
self.code_map.mod(self.cur_mod).functions.add(self.cur_func)
elif comment.startswith('brief: '):
brief = comment.split(' ', 1)[1]
if self.cur_obj == self.O_MOD:
self.code_map.mod(self.cur_mod).brief = brief
elif self.cur_obj == self.O_FUNC:
self.code_map.func(self.cur_func).brief = brief
elif comment.startswith('calls: '):
callee = comment.split(' ', 1)[1]
self.code_map.func(self.cur_func).calls.add(callee)
elif comment.startswith('detail:'):
if self.cur_block == self.B_NONE:
self.cur_block = self.B_DETAIL
elif comment.startswith('pass:'):
if self.cur_block == self.B_NONE:
self.cur_block = self.B_PASS
elif comment.startswith('return:'):
if self.cur_block == self.B_NONE:
self.cur_block = self.B_RETURN
elif comment.startswith('sideeffects:'): # TODO: check for "side effects"
if self.cur_block == self.B_NONE:
self.cur_block = self.B_SIDEEFFECTS
elif comment == '/' and self.cur_block != self.B_NONE:
self.cur_block = self.B_NONE
elif self.cur_block != self.B_NONE:
if self.cur_obj == self.O_MOD:
obj = self.code_map.mod(self.cur_mod)
elif self.cur_obj == self.O_FUNC:
obj = self.code_map.func(self.cur_func)
if self.cur_block == self.B_DETAIL:
obj.detail += comment + '\n'
elif self.cur_block == self.B_PASS:
obj.inputs.append(comment)
elif self.cur_block == self.B_RETURN:
obj.outputs.append(comment)
elif self.cur_block == self.B_SIDEEFFECTS:
obj.sideeffects.append(comment)
else:
pass
#print('{}:{}:\t{}'.format(cur_file, cur_line, token[1]))
elif token_type == scanner.FINISH:
for func_name, func in self.code_map.funcs_sorted():
for callee_name in func.calls:
callee = self.code_map.func(callee_name)
mod = self.code_map.mod(func.module)
callee_mod_name = callee.module
mod.calls.add(callee_mod_name)
for mod_name, mod in self.code_map.mods_sorted():
mod.calls -= set([mod_name])
# ------------------------------------------------------------------------------
def test():
pass
def main():
code_map = CodeMap()
builder = MapBuilder(code_map)
for token in scanner.scan_project('./src'):
builder.absorb_token(token)
#code_map.report_calls()
#print(code_map.make_callgraph_dotfile(lambda x:True, lambda x:True))
#print(code_map.make_module_callgraph_dotfile('Debug'))
#print(code_map.make_function_callgraph_dotfile('DebugPutChar'))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3487026 | <reponame>patrick-nanys/python_nlp_2020_fall
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2020 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
from lab_solutions.lab02_03_is_matrix import is_matrix
def rowwise_max(M):
if not is_matrix(M):
raise ValueError(f'Matrix {M} is not a valid matrix')
return [max(row) for row in M]
def main():
assert rowwise_max(
[
[1, 2, 3],
[1, 4, 3],
[1, 5, 3],
]
) == [3, 4, 5]
print("Tests passed.")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3242764 | <reponame>rodelrebucas/dev-overload-starterpack
"""
Thread or Thread of execution is a set of instructions that need to be executed.
Resides under a Process, each threads can share resource from other threads.
Threads are usually use for I/O bound task, to avoid an idle CPU and blocking
a main thread.
Notes:
- Os threads vs CPU threads (virtual cores)
- Concurrency: Executing tasks simultaneously by either:
- Multi-threading: Multiple threads is used when you want to divide a set of instructions into
another threads to achieve concurrency.
Preemptive scheduling(schedule an instruction for later execution) that is managed by the OS.
- Asynchronous: (Single threaded) In python, executing tasks happens by cooperative scheduling. The instructions/code tells
the OS when to pause an execution and give way to another instructions.
- Multi-processing: Multiple instance of an application that executes at the same time and contains one or more threads.
Limited to no. of cores and is usually use for CPU bound tasks.
- Parallelism: Multi-processing denotes parallelism which - tasks executes at the same time.
"""
from concurrent.futures.thread import ThreadPoolExecutor
from concurrent.futures import as_completed
import logging
logging.basicConfig(filename="err.log", level=logging.DEBUG)
def p(q):
if q == 7:
logging.info("Raise 7")
return None
return q * 2
## Create max 10 threads
with ThreadPoolExecutor(10) as executor:
## Create and execute futures
# executor.map(p, [n for n in range(1000)])
## Create list of futures
futures = [executor.submit(p, i) for i in range(10)]
for future in as_completed(futures):
print("res::", future.result())
## See also multiprocessing, ProcessPoolExecutor
| StarcoderdataPython |
12856159 | <filename>remove_empty_csv's.py
import psycopg2
import sys
from nltk.tokenize import sent_tokenize
import re
import csv
import os
# pmid {16300001 - 16400000}
try:
# starting_pmid = 16300001
# intermediate_pmid = 16400000
starting_pmid = 100001
intermediate_pmid = 200000
ending_pmid = 32078260
while 1:
if intermediate_pmid<ending_pmid:
#open existing csv files
with open('pmid {%s - %s}.csv' % (starting_pmid, intermediate_pmid), mode='r') as csv_file:
reader = csv.reader(csv_file)
if len(list(reader))==1:
#removing the file if there is only header in the file and there is no data
os.remove('pmid {%s - %s}.csv' % (starting_pmid, intermediate_pmid))
print ("File " + str(starting_pmid) + " - " + str(intermediate_pmid) + " has been removed.")
else:
print ("File " + str(starting_pmid) + " - " + str(intermediate_pmid) + " is not empty.")
starting_pmid = intermediate_pmid + 1
intermediate_pmid = intermediate_pmid + 100000
else:
print("Entering base case ...")
with open('pmid {%s - %s}.csv' % (starting_pmid, ending_pmid), mode='r') as csv_file:
reader = csv.reader(csv_file)
if len(list(reader))==1:
os.remove('pmid {%s - %s}.csv' % (starting_pmid, ending_pmid))
print ("File " + str(starting_pmid) + " - " + str(ending_pmid) + " has been removed.")
else:
print ("File " + str(starting_pmid) + " - " + str(ending_pmid) + " is not empty.")
break
#94357012, total rows
#51556076, null affiliation
#42800936, not null affiliation
#21, minimum pmid
#32078260, maximum pmid
# print(len(temp_row))
sys.exit('Script completed')
except (Exception, psycopg2.Error) as error:
sys.exit('Script failed')
| StarcoderdataPython |
10716 | import tensorflow as tf
def _smooth_l1_loss(y_true, y_pred):
t = tf.abs(y_pred - y_true)
return tf.where(t < 1, 0.5 * t ** 2, t - 0.5)
def MultiBoxLoss(num_class=2, neg_pos_ratio=3):
"""multi-box loss"""
def multi_box_loss(y_true, y_pred):
num_batch = tf.shape(y_true)[0]
num_prior = tf.shape(y_true)[1]
loc_pred = tf.reshape(y_pred[0], [num_batch * num_prior, 4])
landm_pred = tf.reshape(y_pred[1], [num_batch * num_prior, 8])
class_pred = tf.reshape(y_pred[2], [num_batch * num_prior, num_class])
loc_true = tf.reshape(y_true[..., :4], [num_batch * num_prior, 4])
landm_true = tf.reshape(y_true[..., 4:12], [num_batch * num_prior, 8])
landm_valid = tf.reshape(y_true[..., 12], [num_batch * num_prior, 1])
class_true = tf.reshape(y_true[..., 13], [num_batch * num_prior, 1])
# define filter mask: class_true = 1 (pos), 0 (neg), -1 (ignore)
# landm_valid = 1 (w landm), 0 (w/o landm)
mask_pos = tf.equal(class_true, 1)
mask_neg = tf.equal(class_true, 0)
mask_landm = tf.logical_and(tf.equal(landm_valid, 1), mask_pos)
# landm loss (smooth L1)
mask_landm_b = tf.broadcast_to(mask_landm, tf.shape(landm_true))
loss_landm = _smooth_l1_loss(tf.boolean_mask(landm_true, mask_landm_b),
tf.boolean_mask(landm_pred, mask_landm_b))
loss_landm = tf.reduce_mean(loss_landm)
# localization loss (smooth L1)
mask_pos_b = tf.broadcast_to(mask_pos, tf.shape(loc_true))
loss_loc = _smooth_l1_loss(tf.boolean_mask(loc_true, mask_pos_b),
tf.boolean_mask(loc_pred, mask_pos_b))
loss_loc = tf.reduce_mean(loss_loc)
# classification loss (crossentropy)
# 1. compute max conf across batch for hard negative mining
loss_class = tf.where(mask_neg,
1 - class_pred[:, 0][..., tf.newaxis], 0)
# 2. hard negative mining
loss_class = tf.reshape(loss_class, [num_batch, num_prior])
loss_class_idx = tf.argsort(loss_class, axis=1, direction='DESCENDING')
loss_class_idx_rank = tf.argsort(loss_class_idx, axis=1)
mask_pos_per_batch = tf.reshape(mask_pos, [num_batch, num_prior])
num_pos_per_batch = tf.reduce_sum(
tf.cast(mask_pos_per_batch, tf.float32), 1, keepdims=True)
num_pos_per_batch = tf.maximum(num_pos_per_batch, 1)
num_neg_per_batch = tf.minimum(neg_pos_ratio * num_pos_per_batch,
tf.cast(num_prior, tf.float32) - 1)
mask_hard_neg = tf.reshape(
tf.cast(loss_class_idx_rank, tf.float32) < num_neg_per_batch,
[num_batch * num_prior, 1])
# 3. classification loss including positive and negative examples
loss_class_mask = tf.logical_or(mask_pos, mask_hard_neg)
loss_class_mask_b = tf.broadcast_to(loss_class_mask,
tf.shape(class_pred))
filter_class_true = tf.boolean_mask(tf.cast(mask_pos, tf.float32),
loss_class_mask)
filter_class_pred = tf.boolean_mask(class_pred, loss_class_mask_b)
filter_class_pred = tf.reshape(filter_class_pred, [-1, num_class])
loss_class = tf.keras.losses.sparse_categorical_crossentropy(
y_true=filter_class_true, y_pred=filter_class_pred)
loss_class = tf.reduce_mean(loss_class)
return loss_loc, loss_landm, loss_class
return multi_box_loss
| StarcoderdataPython |
3316787 | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Question, Answer, QuestionAnswer
from .utils import notify_new_question
@receiver(post_save, sender=Question)
def new_question_handler(sender, instance, created, **kwargs):
subject = f'New question from {instance.questioner}'
message = f'A question with id {instance.question_id} is just sent by {instance.questioner}'
if created:
notify_new_question(subject, message)
# create answer instance
related_answer = Answer()
related_answer.save()
# create questionanswer instance
QuestionAnswer.objects.create(answer=related_answer, question=instance)
@receiver(post_save, sender=Answer)
def update_question(sender, instance, created, **kwargs):
if not created:
qa = QuestionAnswer.objects.get(answer=instance)
qa.question.answered = True
qa.question.save()
| StarcoderdataPython |
9689984 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from cloudify import ctx
from cloudify.decorators import operation
import cloudify_nsx.library.nsx_security_group as nsx_security_group
import cloudify_nsx.library.nsx_common as common
from cloudify import exceptions as cfy_exc
@operation
def create(**kwargs):
validation_rules = {
"scopeId": {
"default": "globalroot-0",
"required": True
},
"name": {
"required": True
},
"member": {
"set_none": True
},
"excludeMember": {
"set_none": True
},
"dynamicMemberDefinition": {
"set_none": True
}
}
use_existing, group = common.get_properties_and_validate(
'group', kwargs, validation_rules
)
resource_id = ctx.instance.runtime_properties.get('resource_id')
if resource_id:
ctx.logger.info("Reused %s" % resource_id)
return
# credentials
client_session = common.nsx_login(kwargs)
if not resource_id:
resource_id, _ = nsx_security_group.get_group(client_session,
group['scopeId'],
group['name'])
if use_existing and resource_id:
ctx.instance.runtime_properties['resource_id'] = resource_id
ctx.logger.info("Used existed %s" % resource_id)
elif resource_id:
raise cfy_exc.NonRecoverableError(
"Security group '%s' already exists" % group['name']
)
elif use_existing:
raise cfy_exc.NonRecoverableError(
"Security group '%s' does not exist" % group['name']
)
if not resource_id:
resource_id = nsx_security_group.add_group(
client_session,
group['scopeId'],
group['name'],
group['member'],
group['excludeMember'],
group['dynamicMemberDefinition']
)
ctx.instance.runtime_properties['resource_id'] = resource_id
ctx.logger.info("created %s" % resource_id)
@operation
def delete(**kwargs):
common.delete_object(
nsx_security_group.del_group, 'group',
kwargs
)
| StarcoderdataPython |
6577933 | """
A reader for sunpy map data.
"""
from qtpy import QtWidgets
import sunpy.map
from sunpy.map.mapbase import GenericMap
from glue.config import data_factory, importer, qglue_parser
from glue.core.data import Data
from glue.core.component import Component
from glue.core.visual import VisualAttributes
from glue.core.data_factories import is_fits
from .sunpy_maps.loader import QtSunpyMapImporter
__all__ = ['import_sunpy_map', 'read_sunpy_map', '_parse_sunpy_map']
@qglue_parser(GenericMap)
def _parse_sunpy_map(data, label):
"""
Parse SunPy map so that it can be loaded by ``glue``.
"""
scan_map = data
label = label + '-' + scan_map.name
result = Data(label=label)
result.coords = scan_map.wcs # preferred way, preserves more info in some cases
result.add_component(Component(scan_map.data),
scan_map.name)
result.meta = scan_map.meta
result.style = VisualAttributes(color='#FDB813', preferred_cmap=scan_map.cmap)
return result
@data_factory('SunPy Map', is_fits)
def read_sunpy_map(sunpy_map_file):
"""
For ``glue`` to read in parsed SunPy map.
"""
sunpy_map_data = _parse_sunpy_map(sunpy.map.Map(sunpy_map_file), 'sunpy-map')
return sunpy_map_data
def pick_directory(caption):
"""
Pick the directory to load SunPy map files from.
"""
dialog = QtWidgets.QFileDialog(caption=caption)
dialog.setFileMode(QtWidgets.QFileDialog.Directory)
directory = dialog.exec_()
if directory == QtWidgets.QDialog.Rejected:
return []
directory = dialog.selectedFiles()
return directory[0]
@importer("Import SunPy Map Directory")
def import_sunpy_map():
"""
Import SunPy maps with directory importer.
"""
caption = "Select a directory containing SunPy Map files."
directory = pick_directory(caption)
wi = QtSunpyMapImporter(directory)
wi.exec_()
return wi.datasets
| StarcoderdataPython |
343323 | <filename>robot/Cumulus/resources/AffiliationPageObject.py<gh_stars>0
from cumulusci.robotframework.pageobjects import ListingPage
from cumulusci.robotframework.pageobjects import DetailPage
from cumulusci.robotframework.pageobjects import pageobject
from BaseObjects import BaseNPSPPage
from NPSP import npsp_lex_locators
@pageobject("Listing", "Affiliation")
class AffiliationListingPage(BaseNPSPPage, ListingPage):
object_name = "npe5__Affiliation__c"
@pageobject("Details", "Affiliation")
class AffiliationDetailPage(BaseNPSPPage,DetailPage ):
object_name = "npe5__Affiliation__c"
def _is_current_page(self):
""" Verify we are on the Account detail page
by verifying that the url contains '/view'
"""
self.selenium.wait_until_location_contains("/view", timeout=60, message="Record view did not open in 1 min")
self.selenium.location_should_contain("/lightning/r/npe5__Affiliation__c/",message="Current page is not an Affiliation record view")
| StarcoderdataPython |
159102 | #Exercício025
name = str(input('Qual seu nome completo?: ')).strip().upper()
print('Seu nome tem a palavra SILVA?: {}'.format('SILVA'in name))
print('xD')
| StarcoderdataPython |
1991444 | from flask_wtf import Form
from wtforms.fields import StringField
class SearchForm(Form):
search_field = StringField('Search')
| StarcoderdataPython |
1858867 | <filename>profile_app/views.py
from django.shortcuts import render
from django.http import HttpResponseRedirect
from submit_site import models
from django.contrib.auth.models import User
# Create your views here.
def edit_profile(request):
if not request.user.is_authenticated():
return HttpRequestRedirect("/accounts")
User.objects.all().filter(request.user.email)
def view_profile(request):
if not request.user.is_authenticated():
return HttpRequestRedirect("/accounts")
user_info = (
("Email: ", request.user.email),
("??? ",request.user.username),
("Company:", request.user.first_name),
("Broker Name: ",request.user.last_name),
)
context = {
"user_info":user_info
}
template = "profile_page.html"
return render(request, template, context)
| StarcoderdataPython |
9716685 | """
Copyright 2016-present Nike, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
# Stuff for tests...
import json
import sys
import requests
from cerberus import CerberusClientException
from mock import patch
from nose.tools import raises, assert_equals, assert_dict_equal
from cerberus.user_auth import UserAuth
class TestUserAuth(object):
"""Test class fo user auth. This uses Mock to mock external API calls"""
@classmethod
def setup_class(cls):
""" set-up class """
cls.client = UserAuth("https://cerberus.fake.com", 'testuser', '<PASSWORD>')
cls.auth_resp = {
"status": "mfa_req",
"data": {
"username": "<EMAIL>",
"state_token": "0127a384d305138d4e",
"client_token": "None",
"user_id": "1325",
"devices": [{"id": "223", "name": "Google Authenticator"}]
}
}
cls.auth_resp_multi = {
"status": "mfa_req",
"data": {
"username": "<EMAIL>",
"state_token": "0127a384d305138d4e",
"client_token": "None",
"user_id": "1325",
"devices": [
{"id": "223", "name": "Google Authenticator"},
{"id": "224", "name": "OTP Authenticator"}
]
}
}
@staticmethod
def _mock_response(status=200, reason=None, content=''):
mock_resp = requests.Response()
mock_resp.status_code = status
# Reason the status code occurred.
mock_resp.reason = reason
# Raw content in byte
mock_resp._content = bytes(content.encode('utf-8'))
return mock_resp
def test_username(self):
""" Testing to make sure username match """
assert_equals(self.client.username, 'testuser')
@patch('cerberus.user_auth.UserAuth.get_auth')
def test_get_token(self, mock_get_auth):
""" Test to make sure the correct token is returned """
mock_get_auth.return_value = {
"status": "success",
"data": {
"client_token": {
"client_token": "<PASSWORD>",
}
}
}
token = self.client.get_token()
assert_equals(token, '<PASSWORD>')
@patch('requests.get')
def test_get_auth(self, mock_get):
"""" Test that correct response is returned by get_auth """
# mock return response
mock_resp = self._mock_response(content=json.dumps(self.auth_resp))
mock_get.return_value = mock_resp
response = self.client.get_auth()
# confirm response matches the mock
assert_dict_equal(response, self.auth_resp)
if sys.version_info[0] < 3:
input_module = '__builtin__.input'
else:
input_module = 'builtins.input'
@patch(input_module, return_value='0987654321')
@patch('requests.post')
def test_mfa_response(self, mock_post, mock_input=None):
""" Testing that mfa_response returns the correct json """
mfa_data = {
"status": "success",
"data": {
"user_id": "134",
"username": "<EMAIL>",
"state_token": None,
"devices": [],
"client_token": {
"client_token": "<PASSWORD>",
"policies": ["cloud-events-owner", "pixie-dust-owner"],
"metadata": {
"groups": "Rainbow.Playgroun.User,CareBear.users",
"is_admin": "false",
"username": "<EMAIL>"
},
"lease_duration": 3600,
"renewable": True
}
}
}
# mock all the things
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp)
# confirm the json matches
assert_dict_equal(response, mfa_data)
@patch(input_module, side_effect=[ '1', '0987654321'])
@patch('requests.post')
def test_multi_mfa_response(self, mock_post, mock_input=None):
""" Testing that mfa_response returns the correct json when there are multiple MFAs available """
mfa_data = {
"status": "success",
"data": {
"user_id": "134",
"username": "<EMAIL>",
"state_token": None,
"devices": [],
"client_token": {
"client_token": "<PASSWORD>",
"policies": ["cloud-events-owner", "pixie-dust-owner"],
"metadata": {
"groups": "Rainbow.Playgroun.User,CareBear.users",
"is_admin": "false",
"username": "<EMAIL>"
},
"lease_duration": 3600,
"renewable": True
}
}
}
# mock all the things
mock_post.return_value = self._mock_response(content=json.dumps(mfa_data))
response = self.client.get_mfa(self.auth_resp_multi)
# confirm the json matches
assert_dict_equal(response, mfa_data)
@raises(CerberusClientException)
@patch(input_module, return_value='a1')
def test_multi_mfa_response_text(self, mock_input=None):
""" Testing improper inputs for Multiple MFA selections, (a1) """
# mock all the things
response = self.client.get_mfa(self.auth_resp_multi)
@raises(CerberusClientException)
@patch(input_module, return_value='-1')
def test_multi_mfa_response_low(self, mock_input=None):
""" Testing improper inputs for Multiple MFA selections, (-1) """
# mock all the things
response = self.client.get_mfa(self.auth_resp_multi)
@raises(CerberusClientException)
@patch(input_module, return_value='2')
def test_multi_mfa_response_high(self, mock_input=None):
""" Testing improper inputs for Multiple MFA selections, (2) """
# mock all the things
response = self.client.get_mfa(self.auth_resp_multi)
@raises(CerberusClientException)
@patch('requests.get')
def test_when_not_200_status_code(self, mock_get):
""" test when 200 status code is not returned"""
data = json.dumps({"error_id": "123", "errors": []})
mock_resp = self._mock_response(status=404, reason='Not Found', content=data)
mock_get.return_value = mock_resp
self.client.get_auth()
| StarcoderdataPython |
8186761 | <filename>generate.py
from jinja2 import Environment, FileSystemLoader
import csv
def _render_template(name, seats):
file_loader = FileSystemLoader('templates')
env = Environment(
loader=file_loader,
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True,
)
template = env.get_template('remind.html')
return template.render(name=name, seats=seats)
| StarcoderdataPython |
1691834 | import numpy as np
class Activator(object):
def forward(self, z):
pass
def backward(self, z, a, delta):
pass
class Identity(Activator):
def forward(self, z):
return z
def backward(self, z, a, delta):
return delta, a
class Sigmoid(Activator):
def forward(self, z):
return 1.0 / (1.0 + np.exp(-z))
def backward(self, z, a, delta):
da = np.multiply(a, 1 - a)
dz = np.multiply(delta, da)
return dz, da
class Tanh(Activator):
def forward(self, z):
return 2.0 / (1.0 + np.exp(-2 * z)) - 1
def backward(self, z, a, delta):
da = 1 - np.multiply(a, a)
dz = np.multiply(delta, da)
return dz, da
class Relu(Activator):
def forward(self, z):
a = np.maximum(z, 0)
return a
def backward(self, z, a, delta):
da = np.zeros(z.shape)
da[z > 0] = 1
dz = da * delta
return dz, da
class BenIdentity(Activator):
def forward(self, z):
# (sqrt(z * z + 1) -1) / 2 + z
p1 = np.multiply(z, z)
p2 = np.sqrt(p1 + 1)
a = (p2 - 1) / 2 + z
return a
def backward(self, z, a, delta):
da = z / (2 * np.sqrt(z ** 2 + 1)) + 1
dz = np.multiply(da, delta)
return dz, da
class Elu(Activator):
def __init__(self, alpha):
self.alpha = alpha
def forward(self, z):
return np.array([x if x > 0 else self.alpha * (np.exp(x) - 1) for x in z])
def backward(self, z, a, delta):
da = np.array([1 if x > 0 else self.alpha * np.exp(x) for x in a])
dz = np.multiply(delta, da)
return dz, da
class LeakyRelu(Activator):
def __init__(self, alpha):
self.alpha = alpha
def forward(self, z):
return np.array([x if x > 0 else self.alpha * x for x in z])
def backward(self, z, a, delta):
da = np.array([1 if x > 0 else self.alpha for x in a])
dz = np.multiply(delta, da)
return dz, da
class SoftPlus(Activator):
def forward(self, z):
a = np.log(1 + np.exp(z))
return a
def backward(self, z, a, delta):
p = np.exp(z)
da = p / (1 + p)
dz = np.multiply(delta, da)
return dz, da
class Step(Activator):
def __init__(self, threshold):
self.threshold = threshold
def forward(self, z):
a = np.array([1 if x > self.threshold else 0 for x in z])
return a
def backward(self, z, a, delta):
da = np.zeros(a.shape)
dz = da
return dz, da
| StarcoderdataPython |
8074617 | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\routing\route_events\route_event_type_animation.py
# Compiled at: 2020-08-25 01:06:19
# Size of source mod 2**32: 11869 bytes
import functools, services
from animation.arb import Arb
from animation.arb_element import distribute_arb_element
from animation.posture_manifest import MATCH_NONE
from event_testing.resolver import SingleObjectResolver, DoubleSimResolver, SingleSimResolver
from event_testing.results import TestResult
from interactions import ParticipantType
from interactions.utils.animation_reference import TunableAnimationReference
from interactions.utils.routing import FollowPath
from postures import are_carry_compatible
from routing.route_events.route_event_mixins import RouteEventDataBase
from sims4.math import MAX_INT32
from sims4.tuning.tunable import HasTunableFactory, AutoFactoryInit, OptionalTunable, TunableRange, TunableEnumEntry, TunableList, TunableReference, TunableTuple, Tunable
import sims4.log
logger = sims4.log.Logger('RouteEvents', default_owner='rmccord')
class RouteEventTypeAnimation(RouteEventDataBase, HasTunableFactory, AutoFactoryInit):
FACTORY_TUNABLES = {'animation_elements':TunableList(description='\n List of animation elements that will be played.\n ',
tunable=TunableAnimationReference(description='\n The animation that Sims play during the Route Event.\n ',
callback=None,
class_restrictions=()),
minlength=1),
'_duration_override':OptionalTunable(description="\n If enabled, we override the must run duration we expect this route\n event to take. We do this for animations that will freeze the\n locomotion so that we don't actually take time away from the rest of\n the path where other route events could play.\n ",
tunable=TunableRange(description='\n The duration we want this route event to have. This modifies how\n much of the route time this event will take up to play the\n animation. For route events that freeze locomotion, you might\n want to set this to a very low value. Bear in mind that high\n values are less likely to be scheduled for shorter routes.\n ',
tunable_type=float,
default=0.1,
minimum=0.1)),
'target_participant':OptionalTunable(description='\n The target of the animation based on the resolver of the actor\n playing the route event.\n ',
tunable=TunableEnumEntry(description='\n The participant related to the actor that plays the route event.\n ',
tunable_type=ParticipantType,
default=(ParticipantType.ObjectChildren))),
'loots_on_xevt':TunableList(description='\n A list of loot operations that will be applied at an xevent\n during the route event animation. Using this tuning will modify the\n way we schedule the animation for this route event, so should only\n be used after discussion with a GPE.\n ',
tunable=TunableTuple(loot=TunableReference(description='\n Loot to be applied.\n ',
manager=(services.get_instance_manager(sims4.resources.Types.ACTION)),
pack_safe=True),
xevt=Tunable(description='\n The id of the xevent.\n ',
tunable_type=int,
default=101)))}
def __init__(self, *args, **kwargs):
(super().__init__)(*args, **kwargs)
self.arb = None
self._duration_total = MAX_INT32
self._duration_must_run = MAX_INT32
self._duration_repeat = MAX_INT32
self.defer_process_until_execute = False
self.target_loot_sim = None
@classmethod
def test(cls, actor, event_data_tuning, ignore_carry=False):
if actor is None:
return TestResult(False, 'Route Event Actor is None.')
if actor.is_sim:
for animation_element in event_data_tuning.animation_elements:
postures = animation_element.get_supported_postures()
sim_posture_state = actor.posture_state
provided_postures = sim_posture_state.body.get_provided_postures(surface_target=MATCH_NONE)
supported_postures = provided_postures.intersection(postures)
if not supported_postures:
return TestResult(False, 'Animation Route Event does not support {} for {}.', actor.posture_state, actor)
carry_state = ignore_carry or sim_posture_state.get_carry_state()
return any((are_carry_compatible(entry, carry_state) for entry in supported_postures)) or TestResult(False, 'Animation Route Event does not support {} for {}.', actor.posture_state, actor)
return TestResult.TRUE
@property
def duration_override(self):
if self._duration_override is not None:
return self._duration_override
return self._duration_must_run
def get_target(self, actor):
if self.target_participant is None:
return
elif actor.is_sim:
resolver = SingleSimResolver(actor.sim_info)
else:
resolver = SingleObjectResolver(actor)
targets = resolver.get_participants(self.target_participant)
if targets:
return next(iter(targets))
def prepare(self, actor, setup_asm_override=None):
def restart_asm(asm):
asm.set_current_state('entry')
return True
target = self.get_target(actor)
routing_component = actor.routing_component
if actor.is_sim:
route_interaction = routing_component.route_interaction
if route_interaction is None:
logger.error('Route Interaction was None for {}', actor)
return
self.arb = Arb()
for animation_element in self.animation_elements:
if actor.is_sim:
route_event_animation = animation_element(route_interaction, setup_asm_additional=(restart_asm if setup_asm_override is None else setup_asm_override),
enable_auto_exit=False)
asm = route_event_animation.get_asm()
if asm is not None:
if target is not None:
if not asm.set_actor(route_event_animation.target_name, target):
logger.error('Route Event {} Failed to setup target.', self)
return
if asm is None:
logger.warn('Unable to get a valid Route Event ASM ({}) for {}.', route_event_animation, actor)
return
else:
route_event_animation = animation_element(actor, target=target,
setup_asm_func=(restart_asm if setup_asm_override is None else setup_asm_override))
animation_context = routing_component.animation_context
asm = route_event_animation.get_asm(animation_context=animation_context)
if asm is None:
logger.warn('Unable to get a valid Route Event ASM ({}) for {}.', route_event_animation, actor)
return
route_event_animation.append_to_arb(asm, self.arb)
route_event_animation.append_exit_to_arb(asm, self.arb)
if self.arb is None:
logger.error('Unable to create arb for Route Event: {}', self)
return
self._duration_total, self._duration_must_run, self._duration_repeat = self.arb.get_timing()
def is_valid_for_scheduling(self, actor, path):
if self.arb is None or self.arb.empty:
return False
return True
def execute(self, actor, **kwargs):
def _event_handler(resolver, loot, *_, **__):
loot.apply_to_resolver(resolver)
if self.arb is not None:
if self.loots_on_xevt or self.defer_process_until_execute:
target_sim = self.target_loot_sim() if self.target_loot_sim is not None else None
if target_sim is not None:
resolver = DoubleSimResolver(actor.sim_info, target_sim.sim_info)
else:
resolver = SingleSimResolver(actor.sim_info)
for loot_tuning in self.loots_on_xevt:
callback = functools.partial(_event_handler, resolver, loot_tuning.loot)
self.arb.register_event_handler(callback, handler_id=(loot_tuning.xevt))
distribute_arb_element((self.arb), master=actor, immediate=True)
if actor.primitives:
for primitive in tuple(actor.primitives):
if isinstance(primitive, FollowPath):
primitive.set_animation_sleep_end(self._duration_must_run)
return
def process(self, actor):
if self.arb is not None:
if not self.loots_on_xevt:
if not self.defer_process_until_execute:
distribute_arb_element((self.arb), master=actor, immediate=True) | StarcoderdataPython |
256559 | # -*- coding: utf-8 -*-
"""
*This application demonstrates a simulation of a schedule of fires given geospatial locations and specified datetimes (at one minute resolution)*
The application contains a single :obj:`Environment` class which listens to the time status published by the manager application and publishes fire information at the specified ignition :obj:`datetime`. The application also contains callback messages that updates :obj:`datetime` in the fires :obj:`DataFrame` for each of ignition (including latitude-longitude :obj:`GeographicPosition`), detection, and reporting.
"""
import logging
from datetime import datetime, timezone, timedelta
from dotenv import dotenv_values
import pandas as pd
pd.options.mode.chained_assignment = None
import importlib.resources
from nost_tools.application_utils import ConnectionConfig, ShutDownObserver
from nost_tools.observer import Observer
from nost_tools.managed_application import ManagedApplication
from fire_config_files.schemas import FireState, FireStarted, FireDetected, FireReported
from fire_config_files.config import PREFIX, SCALE
logging.basicConfig(level=logging.INFO)
# define an observer to manage fire updates and record to a dataframe fires
class Environment(Observer):
"""
*The Environment object class inherits properties from the Observer object class in the NOS-T tools library*
Attributes:
app (:obj:`ManagedApplication`): An application containing a test-run namespace, a name and description for the app, client credentials, and simulation timing instructions
fires (:obj:`DataFrame`): Dataframe of scenario scheduled fires including fireId (*int*), fire ignition (:obj:`datetime`), and fire latitude-longitude location (:obj:`GeographicPosition`)
"""
def __init__(self, app, fires):
self.app = app
self.fires = fires
def on_change(self, source, property_name, old_value, new_value):
"""
*Standard on_change callback function format inherited from Observer object class*
In this instance, the callback function checks the simulation :obj:`datetime` against each scheduled fire ignition :obj:`datetime` for the scenario. If past the scheduled start of a fire, a :obj:`FireStarted` message is sent to *PREFIX/fire/location*:
.. literalinclude:: /../examples/firesat/fires/main_fire.py
:lines: 51-66
"""
if property_name == "time":
if property_name == "time":
new_fires = self.fires[
(self.fires.start <= new_value) & (self.fires.start > old_value)
]
for index, fire in new_fires.iterrows():
print(f"fireId: {fire.fireId}")
self.app.send_message(
"location",
FireStarted(
fireId=fire.fireId,
start=fire.start,
latitude=fire.latitude,
longitude=fire.longitude,
).json(),
)
def on_fire(self, client, userdata, message):
start = FireStarted.parse_raw(message.payload)
for key, fire in self.fires.iterrows():
if key == start.fireId:
self.fires["fireState"][key] = FireState.started
break
def on_detected(self, client, userdata, message):
detect = FireDetected.parse_raw(message.payload)
for key, fire in self.fires.iterrows():
if key == detect.fireId:
self.fires["fireState"][key] = FireState.detected
self.fires["detected"][key] = detect.detected
self.fires["detected_by"][key] = detect.detected_by
break
def on_reported(self, client, userdata, message):
report = FireReported.parse_raw(message.payload)
for key, fire in self.fires.iterrows():
if key == report.fireId:
self.fires["fireState"][key] = FireState.reported
self.fires["reported"][key] = report.reported
self.fires["reported_by"][key] = report.reported_by
self.fires["reported_to"][key] = report.reported_to
break
def on_fire(client, userdata, message):
"""
*Callback function parses a FireStarted message and switches FireState from "undefined" to "started"*
.. literalinclude:: /../examples/firesat/fires/main_fire.py
:lines: 68-73
"""
for index, observer in enumerate(app.simulator._observers):
if isinstance(observer, Environment):
app.simulator._observers[index].on_fire(client, userdata, message)
def on_detected(client, userdata, message):
"""
*Callback function parses a FireDetected message, switches FireState from "started" to "detected", and records time of first detection and name of satellite detecting the fire*
.. literalinclude:: /../examples/firesat/fires/main_fire.py
:lines: 75-82
"""
for index, observer in enumerate(app.simulator._observers):
if isinstance(observer, Environment):
app.simulator._observers[index].on_detected(client, userdata, message)
def on_reported(client, userdata, message):
"""
*Callback function parses a FireReported message, switches FireState from "detected" to "reported", and records time of first report, name of satellite reporting the fire, and groundId receiving the report*
.. literalinclude:: /../examples/firesat/fires/main_fire.py
:lines: 84-92
"""
for index, observer in enumerate(app.simulator._observers):
if isinstance(observer, Environment):
app.simulator._observers[index].on_reported(client, userdata, message)
# name guard used to ensure script only executes if it is run as the __main__
if __name__ == "__main__":
# Note that these are loaded from a .env file in current working directory
credentials = dotenv_values(".env")
HOST, PORT = credentials["SMCE_HOST"], int(credentials["SMCE_PORT"])
USERNAME, PASSWORD = credentials["SMCE_USERNAME"], credentials["SMCE_PASSWORD"]
# set the client credentials
config = ConnectionConfig(USERNAME, PASSWORD, HOST, PORT, True)
# create the managed application
app = ManagedApplication("fire")
# import csv file from fire_scenarios subdirectory with scenario defining locations and ignition datetimes of fires
csvFile = importlib.resources.open_text("fire_scenarios", "first5days.csv")
# Read the csv file and convert to a DataFrame with initial column defining the index
df = pd.read_csv(csvFile, index_col=0)
fires = pd.DataFrame(
data={
"fireId": df.index,
"start": pd.to_datetime(df["start_time"], utc=True),
"latitude": df["latitude"],
"longitude": df["longitude"],
}
)
# Add blank columns to data frame for logging state, detection time, reporting time, and detector satellite
fires.insert(1, "fireState", FireState.undefined)
fires.insert(3, "detected", datetime(1900, 1, 1, tzinfo=timezone.utc))
fires.insert(4, "detected_by", "Undetected")
fires.insert(5, "reported", datetime(1900, 1, 1, tzinfo=timezone.utc))
fires.insert(6, "reported_by", "Unreported")
fires.insert(7, "reported_to", None)
# add the environment observer to monitor for fire status events
app.simulator.add_observer(Environment(app, fires))
# add a shutdown observer to shut down after a single test case
app.simulator.add_observer(ShutDownObserver(app))
# start up the application on PREFIX, publish time status every 10 seconds of wallclock time
app.start_up(
PREFIX,
config,
True,
time_status_step=timedelta(seconds=10) * SCALE,
time_status_init=datetime(2020, 1, 1, 7, 20, tzinfo=timezone.utc),
time_step=timedelta(seconds=1) * SCALE,
)
# add message callbacks for fire ignition, detection, and report
app.add_message_callback("fire", "location", on_fire)
app.add_message_callback("constellation", "detected", on_detected)
app.add_message_callback("constellation", "reported", on_reported)
| StarcoderdataPython |
4934832 | # Generated by Django 2.0.6 on 2019-03-13 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('misclientes', '0048_auto_20190301_1753'),
]
operations = [
migrations.AlterField(
model_name='enterprise',
name='code',
field=models.CharField(max_length=15, null=True, verbose_name='Codigo'),
),
migrations.AlterField(
model_name='enterprise',
name='persons',
field=models.ManyToManyField(limit_choices_to={'cogido': False}, related_name='personas', to='misclientes.Cliente'),
),
]
| StarcoderdataPython |
6423241 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# === About ============================================================================================================
"""
getArticleURLController.py
Copyright © 2017 <NAME>.
This software is released under the MIT License.
Version: 1.0.0
TranslateAuthors: <NAME>
E-mail: <EMAIL>
Website: http://operantroom.com
Created: 2017/12/09
Device: MacBook Pro (Retina, 13-inch, Mid 2015)
OS: macOS Serria version 10.12.6
IDE: PyCharm Community Edition 2017.2.4
Python: 3.6.1
"""
# --- References ---
# --- notes ---
# --- Information ---
# --- Circumstances ---
# === import ===========================================================================================================
""" Standard library """
""" Third party library """
""" Local library """
from getdoi.getArticleURL.getArticleURLController import GetArticleURLControllerImpl
from getdoi.articleinfo.articleInfo import ArticleInfo
# === CONSTANTS ========================================================================================================
# === User Parameters ==================================================================================================
# === variables ========================================================================================================
# ======================================================================================================================
class ReadEnteredTextStandAloneImpl:
""" If user display_input, drop filesPath then return files, exit then return None """
# -- variables --
__display_input_text = '> '
__display_output_text = '>> Read: '
# reader = ReadEnteredTextStandAloneImpl()
# while True:
# print()
# print('Enter the characters...')
# text = reader.read()
# if text is None:
# # exit
# break
def read(self) -> str or None:
entered_str = input(self.__display_input_text)
if self.__decision_exit(entered_str):
# if user display_inputs exit meaning then exit
return None
else:
print('{0}{1}'.format(self.__display_output_text, entered_str))
return entered_str
def __decision_exit(self, text) -> bool:
# -- constants --
EXIT_TEXTS = ['e', '-e', 'exit', 'exit()', 'Exit', 'Exit()']
# decision match strings argument and EXIT_TEXTS
for exit_text in EXIT_TEXTS:
if text == exit_text:
return True
return False
# ======================================================================================================================
class GetArticleURLControllerStandAloneImpl:
print('-STAND ALONE MODE- getArticleURLController.py')
print('Get the article URL for your entered citation.')
getter = GetArticleURLControllerImpl()
reader = ReadEnteredTextStandAloneImpl()
# <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Operant models of relapse in zebrafish (Danio rerio): Resurgence, renewal, and reinstatement. Behavioural brain research, 335, 215-222.
# <NAME>.
# Operant models of relapse in zebrafish (Danio rerio).
while True:
print()
print('Enter the first author...')
first_author = reader.read()
if first_author is None:
# exit
break
else:
print()
print('Enter the article main title...')
main_title = reader.read()
if main_title is None:
# exit
break
else:
article_info = ArticleInfo()
article_info.first_author = first_author
article_info.article_main_title = main_title
print('1st author: {0}'.format(article_info.first_author))
print('Main Title: {0}'.format(article_info.article_main_title))
print()
print('Proceed (y/n)?')
proceed = reader.read()
if proceed is None or proceed == 'n':
# exit
break
elif proceed == 'y':
result = getter.get(article_info=article_info)
print('Article URL: {0}'.format(result))
else:
print('Your response ({0}) was not one of the expected responses: y, n'.format(proceed))
# ======================================================================================================================
if __name__ == '__main__':
main = GetArticleURLControllerStandAloneImpl()
| StarcoderdataPython |
376473 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <<EMAIL>uzen[at]gmail.com>
# File: AC_stack_n.py
# Create Date: 2015-07-26 10:53:38
# Usage: AC_stack_n.py
# Descripton:
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param {TreeNode} root
# @return {TreeNode}
def invertTree(self, root):
stack = [root]
while stack:
node = stack.pop()
if node:
node.left, node.right = node.right, node.left
stack.append(node.left)
stack.append(node.right)
return root
| StarcoderdataPython |
289026 | from dataclasses import dataclass, field
@dataclass
class TaskRunnerConfig:
onlyRunScenarioTags: list[str] = field(default_factory=list)
featureFiles: list[str] = field(default_factory=list)
| StarcoderdataPython |
6651955 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .base import Type
class Mp4(Type):
"""
Implements the MP4 video type matcher.
"""
MIME = 'video/mp4'
EXTENSION = 'mp4'
def __init__(self):
super(Mp4, self).__init__(
mime=Mp4.MIME,
extension=Mp4.EXTENSION
)
def match(self, buf):
return (len(buf) > 27 and
(buf[0] == 0x0 and buf[1] == 0x0 and
buf[2] == 0x0 and
((buf[3] == 0x18 or
buf[3] == 0x20) and
buf[4] == 0x66 and
buf[5] == 0x74 and buf[6] == 0x79 and
buf[7] == 0x70) or
(buf[0] == 0x33 and buf[1] == 0x67 and
buf[2] == 0x70 and buf[3] == 0x35) or
(buf[0] == 0x0 and buf[1] == 0x0 and
buf[2] == 0x0 and buf[3] == 0x1C and
buf[4] == 0x66 and buf[5] == 0x74 and
buf[6] == 0x79 and buf[7] == 0x70 and
buf[8] == 0x6D and buf[9] == 0x70 and
buf[10] == 0x34 and buf[11] == 0x32 and
buf[16] == 0x6D and buf[17] == 0x70 and
buf[18] == 0x34 and buf[19] == 0x31 and
buf[20] == 0x6D and buf[21] == 0x70 and
buf[22] == 0x34 and buf[23] == 0x32 and
buf[24] == 0x69 and buf[25] == 0x73 and
buf[26] == 0x6F and buf[27] == 0x6D)))
class M4v(Type):
"""
Implements the M4V video type matcher.
"""
MIME = 'video/x-m4v'
EXTENSION = 'm4v'
def __init__(self):
super(M4v, self).__init__(
mime=M4v.MIME,
extension=M4v.EXTENSION
)
def match(self, buf):
return (len(buf) > 10 and
buf[0] == 0x0 and buf[1] == 0x0 and
buf[2] == 0x0 and buf[3] == 0x1C and
buf[4] == 0x66 and buf[5] == 0x74 and
buf[6] == 0x79 and buf[7] == 0x70 and
buf[8] == 0x4D and buf[9] == 0x34 and
buf[10] == 0x56)
class Mkv(Type):
"""
Implements the MKV video type matcher.
"""
MIME = 'video/x-matroska'
EXTENSION = 'mkv'
def __init__(self):
super(Mkv, self).__init__(
mime=Mkv.MIME,
extension=Mkv.EXTENSION
)
def match(self, buf):
return ((len(buf) > 15 and
buf[0] == 0x1A and buf[1] == 0x45 and
buf[2] == 0xDF and buf[3] == 0xA3 and
buf[4] == 0x93 and buf[5] == 0x42 and
buf[6] == 0x82 and buf[7] == 0x88 and
buf[8] == 0x6D and buf[9] == 0x61 and
buf[10] == 0x74 and buf[11] == 0x72 and
buf[12] == 0x6F and buf[13] == 0x73 and
buf[14] == 0x6B and buf[15] == 0x61) or
(len(buf) > 38 and
buf[31] == 0x6D and buf[32] == 0x61 and
buf[33] == 0x74 and buf[34] == 0x72 and
buf[35] == 0x6f and buf[36] == 0x73 and
buf[37] == 0x6B and buf[38] == 0x61))
class Webm(Type):
"""
Implements the WebM video type matcher.
"""
MIME = 'video/webm'
EXTENSION = 'webm'
def __init__(self):
super(Webm, self).__init__(
mime=Webm.MIME,
extension=Webm.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x1A and
buf[1] == 0x45 and
buf[2] == 0xDF and
buf[3] == 0xA3)
class Mov(Type):
"""
Implements the MOV video type matcher.
"""
MIME = 'video/quicktime'
EXTENSION = 'mov'
def __init__(self):
super(Mov, self).__init__(
mime=Mov.MIME,
extension=Mov.EXTENSION
)
def match(self, buf):
return (len(buf) > 7 and
buf[0] == 0x0 and
buf[1] == 0x0 and
buf[2] == 0x0 and
buf[3] == 0x14 and
buf[4] == 0x66 and
buf[5] == 0x74 and
buf[6] == 0x79 and
buf[7] == 0x70)
class Avi(Type):
"""
Implements the AVI video type matcher.
"""
MIME = 'video/x-msvideo'
EXTENSION = 'avi'
def __init__(self):
super(Avi, self).__init__(
mime=Avi.MIME,
extension=Avi.EXTENSION
)
def match(self, buf):
return (len(buf) > 10 and
buf[0] == 0x52 and
buf[1] == 0x49 and
buf[2] == 0x46 and
buf[3] == 0x46 and
buf[8] == 0x41 and
buf[9] == 0x56 and
buf[10] == 0x49)
class Wmv(Type):
"""
Implements the WMV video type matcher.
"""
MIME = 'video/x-ms-wmv'
EXTENSION = 'wmv'
def __init__(self):
super(Wmv, self).__init__(
mime=Wmv.MIME,
extension=Wmv.EXTENSION
)
def match(self, buf):
return (len(buf) > 9 and
buf[0] == 0x30 and
buf[1] == 0x26 and
buf[2] == 0xB2 and
buf[3] == 0x75 and
buf[4] == 0x8E and
buf[5] == 0x66 and
buf[6] == 0xCF and
buf[7] == 0x11 and
buf[8] == 0xA6 and
buf[9] == 0xD9)
class Flv(Type):
"""
Implements the FLV video type matcher.
"""
MIME = 'video/x-flv'
EXTENSION = 'flv'
def __init__(self):
super(Flv, self).__init__(
mime=Flv.MIME,
extension=Flv.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x46 and
buf[1] == 0x4C and
buf[2] == 0x56 and
buf[3] == 0x01)
class Mpeg(Type):
"""
Implements the MPEG video type matcher.
"""
MIME = 'video/mpeg'
EXTENSION = 'mpg'
def __init__(self):
super(Mpeg, self).__init__(
mime=Mpeg.MIME,
extension=Mpeg.EXTENSION
)
def match(self, buf):
return (len(buf) > 3 and
buf[0] == 0x0 and
buf[1] == 0x0 and
buf[2] == 0x1 and
buf[3] >= 0xb0 and
buf[3] <= 0xbf)
| StarcoderdataPython |
5148484 | <reponame>chaosannals/trial-python
def selection_sort(source):
'''
选择排序
【不稳定排序】
平均时间复杂度:O(n^2)
'''
target = source[:]
count = len(target)
for i in range(0, count - 1):
minIndex = i
for j in range(i + 1, count):
minIndex = minIndex if target[minIndex] < target[j] else j
tmp = target[i]
target[i] = target[minIndex]
target[minIndex] = tmp
return target
| StarcoderdataPython |
12822564 | <reponame>motazsaad/face-count
# coding: utf-8
import numpy as np
import os
import warnings
import logging
#import xml.etree.ElementTree as ET
import scipy.io
import chainer
from chainercv.utils import read_image
class WIDERFACEDataset(chainer.dataset.DatasetMixin):
def __init__(self, data_dir, label_mat_file,
use_difficult=False, return_difficult=False,
exclude_file_list=None, logger=None):
# id_list_file = os.path.join(
# data_dir, 'ImageSets/Main/{0}.txt'.format(split))
# self.ids = [id_.strip() for id_ in open(id_list_file)]
self.data_dir = data_dir
self.label_mat_file = label_mat_file
self.use_difficult = use_difficult
self.return_difficult = return_difficult
self.logger = logger #for
# list up files
mat = scipy.io.loadmat(self.label_mat_file)
self.ids = []
self.bboxs = {}
self.labels = {}
self.difficult = {}
for i in range(len(mat['event_list'])):
event = mat['event_list'][i,0][0]
for j in range(len(mat['file_list'][i,0])):
file = mat['file_list'][i,0][j,0][0]
filename = "{}.jpg".format(file)
filepath = os.path.join(data_dir, 'images', event, filename)
if exclude_file_list != None and filename in exclude_file_list:
continue
# bounding boxes and labels of the picture file
bboxs = mat['face_bbx_list'][i,0][j,0]
# convert from (x, y, w, h) to (y1, x1, y2, x2)
swapped_bbxs = bboxs[:, [1,0,3,2]] # (y,x,h,w)
swapped_bbxs[:,2:4] = swapped_bbxs[:,2:4] + swapped_bbxs[:,0:2]
invalid_labels = mat['invalid_label_list'][i,0][j,0].ravel()
pose_labels = mat['pose_label_list'][i,0][j,0].ravel()
illum_labels = mat['illumination_label_list'][i,0][j,0].ravel()
occlusion_labels = mat['occlusion_label_list'][i,0][j,0].ravel()
blur_labels = mat['blur_label_list'][i,0][j,0].ravel()
expression_labels = mat['expression_label_list'][i,0][j,0].ravel()
self.ids.append(filepath)
self.bboxs[filepath] = swapped_bbxs.astype(np.float32)
self.labels[filepath] = np.zeros(len(bboxs), dtype=np.int32) #dummy, always 0
self.difficult[filepath] = invalid_labels
def __len__(self):
return len(self.ids)
def get_example(self, i):
"""Returns the i-th example.
Returns a color image and bounding boxes. The image is in CHW format.
The returned image is RGB.
Args:
i (int): The index of the example.
Returns:
tuple of an image and bounding boxes
"""
id_ = self.ids[i]
bbox = self.bboxs[id_].astype(np.float32)
label = self.labels[id_].astype(np.int32)
difficult = self.difficult[id_].astype(np.bool)
if not self.use_difficult:
bbox = bbox[np.where(difficult==False)]
label = label[np.where(difficult==False)]
difficult = difficult[np.where(difficult==False)]
# Load a image
img_file = id_
img = read_image(img_file, color=True)
#print(img_file)
if self.logger:
self.logger.debug(img_file)
if self.return_difficult:
return img, bbox, label, difficult
return img, bbox, label
def test():
a = WIDERFACEDataset('WIDER_train', 'wider_face_split/wider_face_train.mat')
id_ = r'WIDER_train\images\36--Football\36_Football_americanfootball_ball_36_571.jpg'
bbx = a.bboxs[id_]
lbl = a.labels[id_]
import pdb; pdb.set_trace()
if __name__ == '__main__':
test() | StarcoderdataPython |
9790572 | <filename>presqt/targets/osf/tests/views/resource/test_resource_collection.py<gh_stars>1-10
from django.test import SimpleTestCase
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
from config.settings.base import OSF_PRESQT_FORK_TOKEN, OSF_TEST_USER_TOKEN
class TestResourceCollection(SimpleTestCase):
"""
Test the 'api_v1/targets/osf/resources/' endpoint's GET method.
Testing OSF integration.
"""
def setUp(self):
self.client = APIClient()
self.header = {'HTTP_PRESQT_SOURCE_TOKEN': OSF_TEST_USER_TOKEN}
self.large_project_header = {'HTTP_PRESQT_SOURCE_TOKEN': OSF_PRESQT_FORK_TOKEN}
def test_success(self):
"""
Return a 200 if the GET method is successful when grabbing OSF resources.
"""
url = reverse('resource_collection', kwargs={'target_name': 'osf'})
response = self.client.get(url, **self.header)
# Verify the Status Code
self.assertEqual(response.status_code, 200)
# Verify the dict keys match what we expect
keys = ['kind', 'kind_name', 'id', 'container', 'title', 'links']
for data in response.data['resources']:
self.assertListEqual(keys, list(data.keys()))
self.assertEqual(len(data['links']), 1)
# Verify the count of resource objects is what we expect.
self.assertEqual(1, len(response.data['resources']))
self.assertEqual(response.data['pages']['total_pages'], 1)
def test_success_with_search(self):
"""
Return a 200 if the GET method is successful when grabbing OSF resources with a search query.
"""
url = reverse('resource_collection', kwargs={'target_name': 'osf'})
response = self.client.get(url+'?title=hcv+and+nhl+risk&page=1', **self.header)
# Verify the Status Code
self.assertEqual(response.status_code, 200)
# Verify the dict keys match what we expect
keys = ['kind', 'kind_name', 'id', 'container', 'title', 'links']
for data in response.data['resources']:
self.assertListEqual(keys, list(data.keys()))
# Ensure we are only get back the first page of results
response = self.client.get(url + '?title=egg', **self.header)
# Verify the Status Code
self.assertEqual(response.status_code, 200)
# Verify the dict keys match what we expect
keys = ['kind', 'kind_name', 'id', 'container', 'title', 'links']
for data in response.data['resources']:
self.assertListEqual(keys, list(data.keys()))
### Search by ID ###
response = self.client.get(url+'?id=zxbhs', **self.header)
# Verify the Status Code
self.assertEqual(response.status_code, 200)
# Verify the dict keys match what we expect
keys = ['kind', 'kind_name', 'id', 'container', 'title', 'links']
for data in response.data['resources']:
self.assertListEqual(keys, list(data.keys()))
# Search By Author
response = self.client.get(url+'?author=Prometheus&page=1', **self.header)
# Verify the Status Code
self.assertEqual(response.status_code, 200)
# Verify the dict keys match what we expect
keys = ['kind', 'kind_name', 'id', 'container', 'title', 'links']
for data in response.data['resources']:
self.assertListEqual(keys, list(data.keys()))
# Search by Keywords
response = self.client.get(url + "?keywords=egg", **self.header)
# Verify the status code
self.assertEqual(response.status_code, 200)
# Verify the dict keys match what we expect
keys = ['kind', 'kind_name', 'id', 'container', 'title', 'links']
for data in response.data['resources']:
self.assertListEqual(keys, list(data.keys()))
def test_success_large_project(self):
"""
Return a 200 if the GET method is successful when grabbing OSF resources.
"""
url = reverse('resource_collection', kwargs={'target_name': 'osf'})
response = self.client.get(url, **self.large_project_header)
# Verify the Status Code
self.assertEqual(response.status_code, 200)
# Verify the dict keys match what we expect
keys = ['kind', 'kind_name', 'id', 'container', 'title', 'links']
for data in response.data['resources']:
self.assertListEqual(keys, list(data.keys()))
# Verify the count of resource objects is what we expect.
self.assertEqual(1, len(response.data['resources']))
def test_error_401_invalid_token(self):
"""
` Return a 401 if the token provided is not a valid token.
"""
client = APIClient()
header = {'HTTP_PRESQT_SOURCE_TOKEN': 'bad_token'}
url = reverse('resource_collection', kwargs={'target_name': 'osf'})
response = client.get(url, **header)
# Verify the error status code and message
self.assertEqual(response.status_code, 401)
self.assertEqual(response.data,
{'error': "Token is invalid. Response returned a 401 status code."})
def test_error_400_bad_search_parameters(self):
"""
If a bad search request is made, we want to make the user aware.
"""
url = reverse('resource_collection', kwargs={'target_name': 'osf'})
# TOO MANY KEYS
response = self.client.get(url + '?title=hat&spaghetti=egg&banana=TRUE', **self.header)
self.assertEqual(response.data['error'],
'PresQT Error: The search query is not formatted correctly.')
self.assertEqual(response.status_code, 400)
# BAD KEY
response = self.client.get(url + '?spaghetti=egg', **self.header)
self.assertEqual(
response.data['error'], 'PresQT Error: OSF does not support spaghetti as a search parameter.')
self.assertEqual(response.status_code, 400)
# SPECIAL CHARACTERS IN THE REQUEST
response = self.client.get(url + '?title=egg:boi', **self.header)
self.assertEqual(response.data['error'],
'PresQT Error: The search query is not formatted correctly.')
self.assertEqual(response.status_code, 400)
# EMPTY SEARCH -- NOT AN ERROR
response = self.client.get(url + '?title=', **self.header)
# Should return users resources
self.assertEqual(response.status_code, 200)
# Verify the count of resource objects is what we expect.
self.assertEqual(1, len(response.data['resources']))
| StarcoderdataPython |
9679794 | <filename>models/multihead_builders.py
import sys
from pathlib import Path
from typing import Dict, NewType, Union
import numpy as np
import torch
import torch.nn as nn
root_path = Path(__file__).resolve().parents[1]
if str(root_path) not in sys.path:
print(f"Adding pipeline tf2 root in sys.path: {root_path}")
sys.path.append(str(root_path))
# from models.architectures.clova_resnet import ClovaResNetSE
# from models.architectures.magneto import MagNetOResNet
# from models.architectures.rawnet import RawNet
# from models.features.spectrograms import MelSpecAug, SpectrogramAug
# from models.features.spectrograms_tf import SpectralFeaturesTF
from models.poolings.stats import STAT_POOLINGS
class ClassificationHead(nn.Module):
def __init__(self,
num_classes: int,
input_features_chan: int,
head_hidden_layers=[
(256, 0.5, "ReLU"),
]):
super(ClassificationHead, self).__init__()
input_channels = input_features_chan
sequential = []
for ind, (num_units, dropout_rate, activ) in enumerate(head_hidden_layers):
sequential.append(nn.Linear(input_features_chan, num_units, bias=True))
sequential.append(nn.BatchNorm1d(num_units))
input_features_chan = num_units
if activ is not None:
sequential.append(getattr(nn, activ)())
if dropout_rate > 0:
sequential.append(nn.Dropout(p=dropout_rate))
if num_classes is not None:
sequential.append(nn.Linear(head_hidden_layers[-1][0], num_classes, bias=True))
self.fc_net = nn.Sequential(*sequential)
def forward(self, x):
return self.fc_net(x)
class StatsPooling2D(nn.Module):
def __init__(self, mode="var"):
super(StatsPooling2D, self).__init__()
self.mode = mode
def forward(self, x):
s = x.size()
# x = x.view(s[0],s[1]*s[2],s[3])
x = x.reshape(x.size()[0], -1, x.size()[-1])
x = STAT_POOLINGS[self.mode](x, dim=2)
return x
class StatsPooling1D(nn.Module):
def __init__(self, mode="var"):
super(StatsPooling1D, self).__init__()
self.mode = mode
def forward(self, x):
s = x.size()
x = STAT_POOLINGS[self.mode](x, dim=2)
return x
def get_params_count(model: nn.Module):
trainable_params_count = sum(p.numel() for p in model.parameters() if p.requires_grad)
all_params_count = sum(p.numel() for p in model.parameters())
return trainable_params_count, all_params_count
ModuleOrConfig = NewType("Strides", Union[nn.Module, Dict])
def resolve_model_or_conf(mod_or_conf: ModuleOrConfig):
print(mod_or_conf)
if mod_or_conf is None:
return mod_or_conf
if isinstance(mod_or_conf, dict):
module = eval(mod_or_conf["type"])(**mod_or_conf["params"])
trainable = mod_or_conf.get("trainable", True)
if trainable is not None:
for param in module.parameters():
param.requires_grad = trainable
print(
f"{mod_or_conf['type']} trainable {trainable}, params counts : {get_params_count(module)}"
)
return module
elif isinstance(mod_or_conf, nn.Module):
return mod_or_conf
else:
raise NotImplemented()
class MultiheadModel(nn.Module):
def __init__(
self,
features: ModuleOrConfig = {
"type":
"MelSpecAug",
"params":
dict(
extend_spec_channels=1,
use_specaug=False,
f_min=20.,
f_max=7500.,
n_fft=512,
win_length=400, # 0.025 sec * 16000
hop_length=160, # 0.010 sec * 16000
n_mels=80,
log_mels=True,
sample_rate=16000,
),
},
backbone: ModuleOrConfig = {
"type":
"MagNetOResNet",
"params":
dict(
init_conv_params=dict(
in_channels=1,
out_channels=32,
stride=1,
kernel_size=3,
padding=1,
),
block="BasicBlock",
block_setup=[
# filters, num_blocks, strides
(32, 32, 3, 1),
(32, 64, 4, 2),
(64, 128, 6, 2),
(128, 256, 3, 2)
],
norm_layer=None)
},
pooling: ModuleOrConfig = {
"type": "StatsPooling2D",
"params": dict()
},
cls_head_phone: ModuleOrConfig = {
"type":
"ClassificationHead",
"params":
dict(input_features_chan=256 * 10 * 2,
num_classes=15,
head_hidden_layers=[
(256, 0.0, "ReLU"),
])
},
cls_head_speaker: ModuleOrConfig = {
"type":
"ClassificationHead",
"params":
dict(input_features_chan=256 * 10 * 2,
num_classes=8,
head_hidden_layers=[
(256, 0.0, "ReLU"),
])
}):
super(MultiheadModel, self).__init__()
self.features = resolve_model_or_conf(features)
self.backbone = resolve_model_or_conf(backbone)
self.pooling = resolve_model_or_conf(pooling)
self.cls_head_phone = resolve_model_or_conf(cls_head_phone)
self.cls_head_speaker = resolve_model_or_conf(cls_head_speaker)
def forward(self, x):
if self.features is not None:
x = self.features(x)
if self.backbone is not None:
x = self.backbone(x)
if self.pooling is not None:
x = self.pooling(x)
x_phone = self.cls_head_phone(x)
x_speaker = self.cls_head_speaker(x)
return x_phone, x_speaker
if __name__ == "__main__":
model = MultiheadModel()
model.eval()
input = torch.from_numpy(np.random.rand(3, 1, 48000).astype(np.float32))
output = model(input)
print("output phones shape", output[0].shape)
print("output speaker shape", output[1].shape)
| StarcoderdataPython |
9616797 | <filename>models/mreasoner/mreasoner.py
import ccobra
import numpy as np
class MReasoner(ccobra.CCobraModel):
def __init__(self, name='mReasoner'):
super(MReasoner, self).__init__(name, ['syllogistic'], ['single-choice'])
# Prepare cache
self.cache = np.load('cache/2020-09-09-cache-11-10.npy')
self.n_epsilon, self.n_lambda, self.n_omega, self.n_sigma = self.cache.shape[:-2]
self.params = None
# Normalize cache
self.cache /= self.cache.sum(-1, keepdims=True)
def end_participant(self, identifier, model_log, **kwargs):
paramnames = ['epsilon', 'lambda', 'omega', 'sigma']
for pname, (_, pval) in zip(paramnames, self.params):
model_log[pname] = pval
def pre_train(self, dataset, **kwargs):
tdata = np.zeros((64, 9))
for subj_data in dataset:
for task_data in subj_data:
syl = ccobra.syllogistic.Syllogism(task_data['item'])
enc_task = syl.encoded_task
enc_resp = syl.encode_response(task_data['response'])
idx_task = ccobra.syllogistic.SYLLOGISMS.index(enc_task)
idx_resp = ccobra.syllogistic.RESPONSES.index(enc_resp)
tdata[idx_task, idx_resp] += 1
# Perform fitting
self.fit(tdata)
def pre_train_person(self, data, **kwargs):
self.pre_train([data])
def fit(self, tdata):
best_score = -1
best_params = None
# Iterate over parameterizations in the cache
for idx_epsilon, p_epsilon in enumerate(np.linspace(0, 1, self.n_epsilon)):
for idx_lambda, p_lambda in enumerate(np.linspace(0.1, 8, self.n_lambda)):
for idx_omega, p_omega in enumerate(np.linspace(0, 1, self.n_omega)):
for idx_sigma, p_sigma in enumerate(np.linspace(0, 1, self.n_sigma)):
params = (idx_epsilon, idx_lambda, idx_omega, idx_sigma)
cache_mat = self.cache[params]
# Compare cache with training data
score = np.sum(cache_mat * tdata)
if score > best_score:
best_score = score
best_params = list(zip(params, (p_epsilon, p_lambda, p_omega, p_sigma)))
# Set to best params
self.params = best_params
def predict(self, item, **kwargs):
# Obtain task information
syl = ccobra.syllogistic.Syllogism(item)
enc_task = syl.encoded_task
idx_task = ccobra.syllogistic.SYLLOGISMS.index(enc_task)
# Obtain prediction matrix
param_idxs = tuple(x[0] for x in self.params)
cache_mat = self.cache[param_idxs]
cache_pred = cache_mat[idx_task]
# Generate prediction
pred_idxs = np.arange(len(cache_pred))[cache_pred == cache_pred.max()]
pred = ccobra.syllogistic.RESPONSES[np.random.choice(pred_idxs)]
return syl.decode_response(pred)
| StarcoderdataPython |
9631919 | """HADDOCK3 modules."""
from abc import ABC, abstractmethod
from contextlib import contextmanager
from functools import partial
from pathlib import Path
from haddock import EmptyPath, log, modules_defaults_path
from haddock.core.defaults import MODULE_IO_FILE
from haddock.core.exceptions import ConfigurationError
from haddock.gear.config_reader import read_config
from haddock.gear.yaml2cfg import read_from_yaml_config
from haddock.libs.libhpc import HPCScheduler
from haddock.libs.libio import working_directory
from haddock.libs.libmpi import MPIScheduler
from haddock.libs.libontology import ModuleIO
from haddock.libs.libparallel import Scheduler
from haddock.libs.libutil import recursive_dict_update
modules_folder = Path(__file__).resolve().parent
_folder_match_regex = '[a-zA-Z]*/'
modules_category = {
module.name: category.name
for category in modules_folder.glob(_folder_match_regex)
for module in category.glob(_folder_match_regex)
}
"""Indexes each module in its specific category. Keys are Paths to the module,
values are their categories. Categories are the modules parent folders."""
category_hierarchy = [
"topology",
"sampling",
"refinement",
"scoring",
"analysis",
]
# this dictionary defines non-mandatory general parameters that can be defined
# as global parameters thus affect all modules, or, instead, can be defined per
# module where the module definition overwrites global definition. Not all
# modules will use these parameters. It is the responsibility of the module to
# extract the parameters it needs.
# the config file is in modules/defaults.cfg
non_mandatory_general_parameters_defaults = \
read_from_yaml_config(modules_defaults_path)
config_readers = {
".yaml": read_from_yaml_config,
".cfg": read_config,
}
@contextmanager
def _not_valid_config():
try:
yield
except KeyError as err:
emsg = (
"The configuration file extension is not supported. "
f"Supported types are {', '.join(config_readers.keys())}."
)
raise ConfigurationError(emsg) from err
class BaseHaddockModule(ABC):
"""HADDOCK3 module's base class."""
def __init__(self, order, path, params_fname):
"""
HADDOCK3 modules base class.
Parameters
----------
params : dict or path to HADDOCK3 configuration file
A dictionary or a path to a HADDOCK3 configuration file
containing the initial module parameters. Usually this is
defined by the default params.
"""
self.order = order
self.path = path
self.previous_io = self._load_previous_io()
# instantiate module's parameters
self._origignal_config_file = params_fname
self._params = {}
self.update_params(update_from_cfg_file=params_fname)
@property
def params(self):
"""Configuration parameters.""" # noqa: D401
return self._params
def reset_params(self):
"""Reset parameters to the ones used to instantiate the class."""
self._params.clear()
self.update_params(update_from_cfg_file=self._original_config_file)
def update_params(self, update_from_cfg_file=None, **params):
"""
Update the modules parameters.
Add/update to the current modules parameters the ones given in
the function call. If you want to enterily replace the modules
parameters to their default values use the `reset_params()`
method.
Update takes places recursively, that is, nested dictionaries
will be updated accordingly.
To update the current config with the parameters defined in an
HADDOCK3 configuration file use the `update_from_cfg_file`
parameter.
To update from a JSON file, first load the JSON into a
dictionary and unpack the dictionary to the function call.
Examples
--------
>>> m.update_params(param1=value1, param2=value2)
>>> m.update_params(**param_dict)
>>> m.update_params(update_from_cfg_file=path_to_file)
# if you wish to start from scratch
>>> m.reset_params()
>>> m.update_params(...)
"""
if update_from_cfg_file and params:
_msg = (
"You can not provide both `update_from_cfg_file` "
"and key arguments."
)
raise TypeError(_msg)
if update_from_cfg_file:
with _not_valid_config():
extension = Path(update_from_cfg_file).suffix
params = config_readers[extension](update_from_cfg_file)
# the updating order is relevant
_n = recursive_dict_update(
non_mandatory_general_parameters_defaults,
self._params)
self._params = recursive_dict_update(_n, params)
self._fill_emptypaths()
self._confirm_fnames_exist()
def add_parent_to_paths(self):
"""Add parent path to paths."""
# convert paths to relative by appending parent
for key, value in self.params.items():
if value and key.endswith('_fname'):
if not Path(value).is_absolute():
self.params[key] = Path('..', value)
return
def run(self, **params):
"""Execute the module."""
log.info(f'Running [{self.name}] module')
self.update_params(**params)
self.add_parent_to_paths()
with working_directory(self.path):
self._run()
log.info(f'Module [{self.name}] finished.')
@classmethod
@abstractmethod
def confirm_installation(self):
"""
Confirm the third-party software needed for the module is installed.
HADDOCK3's own modules should just return.
"""
return
def finish_with_error(self, reason="Module has failed."):
"""Finish with error message."""
if isinstance(reason, Exception):
raise RuntimeError("Module has failed.") from reason
else:
raise RuntimeError(reason)
def _load_previous_io(self):
if self.order == 0:
return ModuleIO()
io = ModuleIO()
previous_io = self.previous_path() / MODULE_IO_FILE
if previous_io.is_file():
io.load(previous_io)
return io
def previous_path(self):
"""Give the path from the previous calculation."""
previous = sorted(list(self.path.resolve().parent.glob('[0-9][0-9]*/')))
try:
return previous[self.order - 1]
except IndexError:
return self.path
def log(self, msg, level='info'):
"""
Log a message with a common header.
Currently the header is the [MODULE NAME] in square brackets.
Parameters
----------
msg : str
The log message.
level : str
The level log: 'debug', 'info', ...
Defaults to 'info'.
"""
getattr(log, level)(f'[{self.name}] {msg}')
def _confirm_fnames_exist(self):
for param, value in self._params.items():
if param.endswith('_fname') and value:
if not Path(value).exists():
raise FileNotFoundError(f'File not found: {str(value)!r}')
def _fill_emptypaths(self):
"""Fill empty paths."""
for param, value in list(self._params.items()):
if param.endswith('_fname') and not value:
self._params[param] = EmptyPath()
def get_engine(mode, params):
"""
Create an engine to run the jobs.
Parameters
----------
mode : str
The type of engine to create
params : dict
A dictionary containing parameters for the engine.
`get_engine` will retrieve from `params` only those parameters
needed and ignore the others.
"""
# a bit of a factory pattern here
# this might end up in another module but for now its fine here
if mode == 'hpc':
return partial(
HPCScheduler,
target_queue=params['queue'],
queue_limit=params['queue_limit'],
concat=params['concat'],
)
elif mode == 'local':
return partial(
Scheduler,
ncores=params['ncores'],
)
elif mode == "mpi":
return partial(MPIScheduler, ncores=params["ncores"])
else:
available_engines = ("hpc", "local", "mpi")
raise ValueError(
f"Scheduler `mode` {mode!r} not recognized. "
f"Available options are {', '.join(available_engines)}"
)
| StarcoderdataPython |
5128277 | <filename>deneme2.py
import locale
import sys
locale.setlocale(locale.LC_ALL, "tr_TR.utf-8")
a = ["ali", "veli", "ahmet", "cengiz"]
e = ["ayse", "vefa", "asli", "ceren"]
b = ""
c = "<NAME> VE iskata"
d = 13
dosya = open("yeni", "r+")
dosya.truncate(19) | StarcoderdataPython |
8155440 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index, date_range
import pandas._testing as tm
@pytest.mark.parametrize("func", ["ffill", "bfill"])
def test_groupby_column_index_name_lost_fill_funcs(func):
# GH: 29764 groupby loses index sometimes
df = pd.DataFrame(
[[1, 1.0, -1.0], [1, np.nan, np.nan], [1, 2.0, -2.0]],
columns=pd.Index(["type", "a", "b"], name="idx"),
)
df_grouped = df.groupby(["type"])[["a", "b"]]
result = getattr(df_grouped, func)().columns
expected = pd.Index(["a", "b"], name="idx")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("func", ["ffill", "bfill"])
def test_groupby_fill_duplicate_column_names(func):
# GH: 25610 ValueError with duplicate column names
df1 = pd.DataFrame({"field1": [1, 3, 4], "field2": [1, 3, 4]})
df2 = pd.DataFrame({"field1": [1, np.nan, 4]})
df_grouped = pd.concat([df1, df2], axis=1).groupby(by=["field2"])
expected = pd.DataFrame(
[[1, 1.0], [3, np.nan], [4, 4.0]], columns=["field1", "field1"]
)
result = getattr(df_grouped, func)()
tm.assert_frame_equal(result, expected)
def test_ffill_missing_arguments():
# GH 14955
df = pd.DataFrame({"a": [1, 2], "b": [1, 1]})
with pytest.raises(ValueError, match="Must specify a fill"):
df.groupby("b").fillna()
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(
index=pd.MultiIndex.from_product(
[["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
),
columns=Index(["1", "2"], name="id"),
)
df["1"] = [
np.nan,
1,
np.nan,
np.nan,
11,
np.nan,
np.nan,
2,
np.nan,
np.nan,
22,
np.nan,
]
df["2"] = [
np.nan,
3,
np.nan,
np.nan,
33,
np.nan,
np.nan,
4,
np.nan,
np.nan,
44,
np.nan,
]
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
| StarcoderdataPython |
9663796 | <filename>py/pe/pe8.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Discover the largest product of five consecutive digits
in the 1000-digit number.
"""
def pe8(fname="../../res/pe8.txt", n=5):
"""
Discover the largest product of five consecutive digits
in the 1000-digit number.
>>> pe8()
40824
"""
with open(fname, 'r') as f:
s = f.read()
s = s.replace('\n', '')
ls = len(s)
if ls < n:
raise ValueError
m = 0
for x in range(ls - n + 1):
t = 1
for y in range(n):
t *= int(s[x + y])
if m < t:
m = t
return(m)
if __name__ == "__main__":
import doctest
doctest.testmod()
try:
while True:
s = input('> ')
n = int(s)
print(pe8(n=n))
except (SyntaxError, EOFError, KeyboardInterrupt, NameError):
pass
| StarcoderdataPython |
11318318 | <gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
from .abstracts import BaseNetwork
class GymEnvModel(BaseNetwork):
def __init__(self, num_state=8, num_action=4, discrete_action=True, gru=True):
super(GymEnvModel, self).__init__()
self.num_action = num_action
self.fc1 = nn.Linear(num_state, 32)
self.use_gru = gru
if self.use_gru:
self.gru = nn.GRU(32, 32)
self.h = torch.zeros([1, 1, 32], dtype=torch.float)
self.fc2 = nn.Linear(32, num_action)
self.discrete_action = discrete_action
def forward(self, x):
with torch.no_grad():
x = torch.from_numpy(x).float()
x = x.unsqueeze(0)
x = torch.tanh(self.fc1(x))
if self.use_gru:
x, self.h = self.gru(x, self.h)
x = torch.tanh(x)
x = self.fc2(x)
if self.discrete_action:
x = F.softmax(x.squeeze(), dim=0)
x = torch.argmax(x)
else:
x = torch.tanh(x.squeeze())
x = x.detach().cpu().numpy()
return x
def reset(self):
if self.use_gru:
self.h = torch.zeros([1, 1, 32], dtype=torch.float)
def zero_init(self):
for param in self.parameters():
param.data = torch.zeros(param.shape)
def get_param_list(self):
param_list = []
for param in self.parameters():
param_list.append(param.data.numpy())
return param_list
def apply_param(self, param_lst: list):
count = 0
for p in self.parameters():
p.data = torch.tensor(param_lst[count]).float()
count += 1
| StarcoderdataPython |
5163974 | <reponame>borisgrafx/client
#!/usr/bin/env python
"""Code saving.
The main script will be saved if enabled in the users profile settings.
"""
import wandb
run = wandb.init()
run.finish()
| StarcoderdataPython |
6429359 | <reponame>vineetjohn/ctci-hackerrank
''' Sorting: Comparator '''
import json
from functools import cmp_to_key
class Player(object):
''' Player object containing name and score '''
def __init__(self, name, score):
self.name = name
self.score = score
def __repr__(self):
object_dict = {
"name": self.name,
"score": self.score
}
return json.dumps(object_dict)
@staticmethod
def comparator(player_1, player_2):
''' Custom comparator '''
if player_1.score > player_2.score:
return -1
if player_1.score < player_2.score:
return 1
if player_1.name < player_2.name:
return -1
if player_1.name > player_2.name:
return 1
return 0
def main():
''' Main function '''
player_count = int(input())
data = []
for i in range(player_count):
name, score = input().split()
score = int(score)
player = Player(name, score)
data.append(player)
data = sorted(data, key=cmp_to_key(Player.comparator))
for i in data:
print(i.name, i.score)
if __name__ == '__main__':
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.