index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
8,700 | 9c29f04746de6847ad1bbdf08964d14e6c3766db | from modeltranslation.translator import register, TranslationOptions
from .models import *
@register(PageTitleModel)
class TitleTranslationOptions(TranslationOptions):
fields = (
'name',
)
@register(NewsModel)
class ProjectTranslationOptions(TranslationOptions):
fields = (
'name',
'text',
)
|
8,701 | 3dc4e10145ad42c0168fec3462da0f87c1e661a5 | class Image:
def __init__(self, **kwargs):
self.ClientID = kwargs['ClientID']
self.DealerID = kwargs['DealerID']
self.VIN = kwargs['VIN']
self.UrlVdp = None
self.PhotoURL = kwargs['PhotoURL']
self.VdpActive = None
def __repr__(self):
return f"{self.DealerID} {self.VIN} {self.UrlVdp}"
class VehiclePhoto:
def __init__(self, **kwargs):
self.ClientID = kwargs['ClientID']
self.DealerID = kwargs['DealerID']
self.Domain = kwargs['Domain']
self.VehiclePhotoID = kwargs['VehiclePhotoID']
self.VIN = kwargs['VIN']
self.UrlVdp = kwargs['UrlVdp']
self.UrlImage = kwargs['UrlImage']
def __repr__(self):
return f"{self.VehiclePhotoID} {self.VIN} {self.UrlVdp}" |
8,702 | e8f090a02bfd5ee8a6832351357594af2d6692f9 | import calendar
import json
from datetime import datetime
from datapoller.download import download
from datapoller.settings import *
from messaging.Messaging import sendMessage
from messaging.settings import RABBIT_NOTIFY_QUEUE
from sessioncontroller.utils import is_level_interesting_for_kp
__author__ = 'arik'
sharedDict = {}
def registerModelStorage(dict):
global sharedDict
sharedDict = dict
def updateModel():
(lastLevels, validTime) = download(NOWCAST_DATA_URL)
sharedDict['lastLevels'] = lastLevels
sharedDict['validTime'] = validTime
def hasValidModel():
lastLevels = sharedDict.get('lastLevels')
validTime = sharedDict.get('validTime')
return lastLevels is not None and validTime is not None and \
getTimestamp(validTime) >= getTimestamp(datetime.utcnow())
def processUserLocation(geo_id, geo, kp_level, chat_id, bot):
if hasValidModel() is False:
return
lastLevels = sharedDict.get('lastLevels')
validTime = sharedDict.get('validTime')
level = lastLevels[geo_id]
if kp_level is None or is_level_interesting_for_kp(level, kp_level):
sendMessage(
RABBIT_NOTIFY_QUEUE,
json.dumps({"time": getTimestamp(validTime), "geo": geo, "chat_id": chat_id, "level": level, "bot": bot})
)
def getTimestamp(datetime):
return calendar.timegm(datetime.timetuple())
|
8,703 | e38be2890526c640ba8d9db5a376ff57ba9e0aa2 | import azure.functions as func
import json
from ..common import cosmos_client
def main(req: func.HttpRequest) -> func.HttpResponse:
return func.HttpResponse(
body = json.dumps(cosmos_client.DB.Goals),
mimetype="application/json",
charset="utf-8"
)
# [
# {'amount':1000, 'description': 'foo bar baz prize'},
# {'amount':2000, 'description': 'foo bar baz prize'}
# ] |
8,704 | 254afebcc909c805d1e4972a0910eb4451d1e64e | """Copied from http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt"""
STOP_WORDS = set(
"""
あそこ
あたり
あちら
あっち
あと
あな
あなた
あれ
いくつ
いつ
いま
いや
いろいろ
うち
おおまか
おまえ
おれ
がい
かく
かたち
かやの
から
がら
きた
くせ
ここ
こっち
こと
ごと
こちら
ごっちゃ
これ
これら
ごろ
さまざま
さらい
さん
しかた
しよう
すか
ずつ
すね
すべて
ぜんぶ
そう
そこ
そちら
そっち
そで
それ
それぞれ
それなり
たくさん
たち
たび
ため
だめ
ちゃ
ちゃん
てん
とおり
とき
どこ
どこか
ところ
どちら
どっか
どっち
どれ
なか
なかば
なに
など
なん
はじめ
はず
はるか
ひと
ひとつ
ふく
ぶり
べつ
へん
ぺん
ほう
ほか
まさ
まし
まとも
まま
みたい
みつ
みなさん
みんな
もと
もの
もん
やつ
よう
よそ
わけ
わたし
ハイ
上
中
下
字
年
月
日
時
分
秒
週
火
水
木
金
土
国
都
道
府
県
市
区
町
村
各
第
方
何
的
度
文
者
性
体
人
他
今
部
課
係
外
類
達
気
室
口
誰
用
界
会
首
男
女
別
話
私
屋
店
家
場
等
見
際
観
段
略
例
系
論
形
間
地
員
線
点
書
品
力
法
感
作
元
手
数
彼
彼女
子
内
楽
喜
怒
哀
輪
頃
化
境
俺
奴
高
校
婦
伸
紀
誌
レ
行
列
事
士
台
集
様
所
歴
器
名
情
連
毎
式
簿
回
匹
個
席
束
歳
目
通
面
円
玉
枚
前
後
左
右
次
先
春
夏
秋
冬
一
二
三
四
五
六
七
八
九
十
百
千
万
億
兆
下記
上記
時間
今回
前回
場合
一つ
年生
自分
ヶ所
ヵ所
カ所
箇所
ヶ月
ヵ月
カ月
箇月
名前
本当
確か
時点
全部
関係
近く
方法
我々
違い
多く
扱い
新た
その後
半ば
結局
様々
以前
以後
以降
未満
以上
以下
幾つ
毎日
自体
向こう
何人
手段
同じ
感じ
""".split()
)
|
8,705 | ceb714e949a72f621aec8b8728fbd1201e22afd1 | """
Copyright (c) Facebook, Inc. and its affiliates.
"""
# fmt: off
"""
Every template contains an ordered list of TemplateObjects.
TemplateObject is defined in template_objects.py
GetMemory templates are written for filters and have an answer_type
They represent the action of fetching from the memory using the filters.
Examples:
[Human, QueryBotCurrentAction],
- human: what are you doing
- human: what are you up to
[Human, QueryBot, MoveTarget],
- human: where you going
- human: where are you heading
"""
from template_objects import *
ANSWER_WITH_CORRECTION = [
## what is this + the thing at location ##
[[Human, What, Is, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, What, Is, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what size is X + the thing at location ##
[[Human, AskSize, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskSize, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## what color is X + the thing at location ##
[[Human, AskColour, BlockObjectThis],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, AbstractDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskColour, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
# Is X Y ##
[[Human, AskIs, BlockObjectThis, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
## Is X a Y ##
[[Human, AskIs, BlockObjectThis, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
[[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
[HumanReplace, The, AbstractDescription, BlockObjectLocation]],
]
ANSWER_TEMPLATES = [
# 1
## What is X ##
[Human, What, Is, BlockObjectThis],
[Human, What, Is, BlockObjectThis, AbstractDescription],
[Human, What, Is, BlockObjectThat],
[Human, What, Is, BlockObjectThat, AbstractDescription],
# 2
## What is at X ##
[Human, What, Is, BlockObjectLocation],
[Human, What, Is, The, AbstractDescription, BlockObjectLocation],
## What do you see at X ##
[Human, WhatSee, BlockObjectLocation],
# 3
# What size is X ##
[Human, AskSize, BlockObjectThis],
[Human, AskSize, BlockObjectThis, AbstractDescription],
[Human, AskSize, BlockObjectThis, ConcreteDescription],
[Human, AskSize, BlockObjectThat],
[Human, AskSize, BlockObjectThat, AbstractDescription],
[Human, AskSize, BlockObjectThat, ConcreteDescription],
# 4
## what size is X at Y ##
[Human, AskSize, The, AbstractDescription, BlockObjectLocation],
[Human, AskSize, The, ConcreteDescription, BlockObjectLocation],
# 5
# What colour is X ##
[Human, AskColour, BlockObjectThis],
[Human, AskColour, BlockObjectThis, AbstractDescription],
[Human, AskColour, BlockObjectThis, ConcreteDescription],
[Human, AskColour, BlockObjectThat],
[Human, AskColour, BlockObjectThat, AbstractDescription],
[Human, AskColour, BlockObjectThat, ConcreteDescription],
# 6
## what colour is X at Y ##
[Human, AskColour, The, AbstractDescription, BlockObjectLocation],
[Human, AskColour, The, ConcreteDescription, BlockObjectLocation],
# 7
## Is X Y ##
[Human, AskIs, BlockObjectThis, Size],
[Human, AskIs, BlockObjectThis, AbstractDescription, Size],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Size],
[Human, AskIs, BlockObjectThat, Size],
[Human, AskIs, BlockObjectThat, AbstractDescription, Size],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Size],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Size],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Size],
[Human, AskIs, BlockObjectThis, Colour],
[Human, AskIs, BlockObjectThis, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThis, ConcreteDescription, Colour],
[Human, AskIs, BlockObjectThat, Colour],
[Human, AskIs, BlockObjectThat, AbstractDescription, Colour],
[Human, AskIs, BlockObjectThat, ConcreteDescription, Colour],
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, Colour],
[Human, AskIs, The, ConcreteDescription, BlockObjectLocation, Colour],
# 8
## Is X a Y ##
[Human, AskIs, BlockObjectThis, ConcreteDescription],
[Human, AskIs, BlockObjectThis, AbstractDescription, ConcreteDescription],
[Human, AskIs, BlockObjectThat, ConcreteDescription],
[Human, AskIs, BlockObjectThat, AbstractDescription, ConcreteDescription],
# 9
## IS X at Y Z ##
[Human, AskIs, The, AbstractDescription, BlockObjectLocation, ConcreteDescription],
]
GET_MEMORY_TEMPLATES = [
## What are you Doing (Action name) ##
[Human, QueryBotCurrentAction],
## What are you Building (Action reference object name) ##
[Human, QueryBot, ActionReferenceObjectName],
## Where are you heading (Move target) ##
[Human, QueryBot, MoveTarget],
## Where are you (Bot location) ##
[Human, QueryBot, CurrentLocation],
] + ANSWER_TEMPLATES
|
8,706 | 022c8d6c31ad5494b03bfe93d17396eac25b011e | '''
This program will simulate leveling a DnD character, showing their ending HP, and stats.
'''
import argparse
import csv
import json
import re
import time
from openpyxl import load_workbook
from pandas import DataFrame
from src import classes, util
def import_race_data(file_path):
'''
This method imports data from the inputed CSV and returns a dictionary containing
all of the data formated by race and subrace
Arguments:
:param import_data: (str) The filepath to the data
Returns:
dict: The dictionary of all of the data
'''
retval = {}
# Open csv file and read in all data
with open(file_path) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
race = row['Race']
subrace = row['Subrace']
if(subrace):
if(race in retval):
if('Subraces' not in retval[race]):
retval[race]['Subraces'] = {}
retval[race]['Subraces'][subrace] = row
else:
retval[race] = {'Subraces':{}}
retval[race]['Subraces'][subrace] = row
else:
retval[race] = row
return retval
def update_mode(args):
'''
This method is the main method for running this program in Update mode.
Update mode takes in a specifically formated XLSX file and outputs a JSON
file containing all of the data for races and subraces needed by the
program in run mode
Arguments:
:param args: (dict) A dictionary containing the needed arguments
Returns:
bool: Whether or not the update completed successfully or not
'''
# Lets first open the workbook
try:
workbook = load_workbook(args['xlsx_file'])
except:
return False
# Now turn the Race sheet into a dataframe
df = DataFrame()
for name in workbook.sheetnames:
if('Race' in name):
df = DataFrame(workbook[name].values)
# If we find nothing, return failure
if(df.empty):
return False
# Lets remove the title row
df.drop(0, axis=0, inplace=True)
df.reset_index(inplace=True, drop=True)
# Now lets get the headers, find the last column, and remove this row
end_col = (df.iloc[0, :].values == None).argmax()
df.drop(df.iloc[:, end_col:], axis=1, inplace=True)
df.columns = list(df.iloc[0, :])
df.drop(0, axis=0, inplace=True)
df.reset_index(inplace=True, drop=True)
# Now lets resize this dataframe to only contain the information we want
# We first scroll down the rows to find the first blank cell, that is the
# end of the rows
end_row = (df.iloc[:, 0].values == None).argmax()
df.drop(df[end_row:].index, axis=0, inplace=True)
# Now let's get the race names and source names
hyperlink_re = re.compile(r'(?<=,")(.+)(?=")')
df['Race'] = df['Race'].apply(
lambda x: x if hyperlink_re.search(x) is None else hyperlink_re.search(x).group(1)
)
df['Source'] = df['Source'].apply(
lambda x: x if hyperlink_re.search(x) is None else hyperlink_re.search(x).group(1)
)
# Now make sure the stat fields are correct integers
# Loop through dataframe so we can assemble the json in the format we want
data = {}
asi_re = re.compile(r'ASI: ([+-]\d) \(x(\d)\)(?:\s{1}\((.+)\))?')
for index, row in df.iterrows():
# First lets index this record into the correct spot in the array
row = dict(row)
race = row['Race']
subrace = row['Subrace']
if(subrace):
if(race in data):
if('Subraces' not in data[race]):
data[race]['Subraces'] = {}
data[race]['Subraces'][subrace] = row
else:
data[race] = {'Subraces':{}}
data[race]['Subraces'][subrace] = row
else:
data[race] = row
# Now that we have added this row, check if there are any special ASI rules to note
if(row['Additional'] is not None):
matches = asi_re.search(row['Additional'])
if(matches):
# We found something
asi = {'size': matches.group(1), 'number': matches.group(2)}
# Check if we have restrictions
if(matches.group(3)):
# We either can put the point into a number of options, or not
# into one stat
if('-' in matches.group(3)):
# We cannot use this stat
asi['not_allowed'] = matches.group(3).split('-')[1]
if('|' in matches.group(3)):
# We can only use one or the other
asi['allowed'] = [x.capitalize() for x in matches.group(3).split(' | ')]
# Now add this to the row of data
if(subrace):
data[race]['Subraces'][subrace]['ASI'] = asi
else:
data[race]['ASI'] = asi
# Done! Let's dump this file
with open('race_data.json', 'w') as fp:
json.dump(data, fp, indent=2)
return True
def run_mode(args):
'''
This method is the main method for running this program in Run mode.
This mode goes through the character simulation
Arguments:
:param args: (dict) A dictionary containing the needed arguments
'''
char = classes.Character(
"Human", None, ['Str','Dex','Con','Int','Wis','Cha'],
classes.StatSelection.ROLL_4D6_DROP_ONE, classes.HPSelection.ROLL_HP,
classes.ASISelection.STRICT_FOCUS
)
print(char.id)
print(char.stats)
char = classes.Character(
"Human", "Variant", ['Str','Dex','Con','Int','Wis','Cha'],
classes.StatSelection.ROLL_3D6, classes.HPSelection.ROLL_HP,
classes.ASISelection.FOCUS_ODD_TO_EVEN
)
print(char.id)
print(char.stats)
if __name__ == "__main__":
# Setup argument parsers and parse arguments
main_parser = argparse.ArgumentParser(description='Character Simulator')
subparsers = main_parser.add_subparsers(help='Mode Help')
update_parser = subparsers.add_parser('update', help='Update Help')
update_parser.add_argument('xlsx_file', type=str, help='Path to the .xlsx race file')
run_parser = subparsers.add_parser('run', help='Run Help')
args = vars(main_parser.parse_args())
# If we are in update mode, update the json file
if('xlsx_file' in args):
update_mode(args)
else:
run_mode(args) |
8,707 | 2294dc21ede759e755e51471705fa8ef784528a7 | import requests
import json
import datetime
from bs4 import BeautifulSoup
from pymongo import MongoClient, UpdateOne
import sys
#usage: python freesound_crawler.py [from_page] [to_page]
SOUND_URL = "https://freesound.org/apiv2/sounds/"
SEARCH_URL = "https://freesound.org/apiv2/search/text/"
AUTORIZE_URL = "https://freesound.org/apiv2/oauth2/authorize"
#freesound account imformation
from freesound_account_info import *
#mongo db imformation
from mongodb_info import *
error = []
MAX_PAGE = 24086
#connect to mongodb, return None if connection failure
def getDB():
try:
client = MongoClient('mongodb://%s:%s@%s:%s/edudata' % (MONGO_USER, MONGO_PASSWORD, MONGO_HOST, MONGO_PORT))
client.server_info()
db = client.edudata
return db.freesound
except Exception as e:
print "Unexpected error:", e
return None
#send request with access token
def sendRequest(url, token):
try:
header = {'Authorization' : "Bearer " + token};
res = requests.get(url, headers = header);
return json.loads( res.text )
except Exception as e:
print "Failed to send request(" , url, "):", e
error.append({'url':url, 'type':'send request'})
return None
def getMaxPage(token):
data = sendRequest(SEARCH_URL,token)
try:
return data['count']/ 15 + 1
except:
print ("Failed to update max page")
return MAX_PAGE
#get sound info with access token
def getSoundInfo( sound_id, token ):
try:
data = {}
sound_data = sendRequest(SOUND_URL + str(sound_id), token)
if sound_data is None:
raise Exception('json is none')
data['_id'] = sound_data[ 'id' ];
data['url'] = sound_data[ 'url' ];
data['title'] = sound_data[ 'name' ];
data['creator'] = sound_data[ 'username' ];
data['createdate'] = sound_data[ 'created' ];
data['description'] = sound_data[ 'description' ];
data['download_url'] = sound_data['download']
data['keyword'] = []
for tag in sound_data[ 'tags' ]:
data['keyword'].append(tag)
data['previews'] = []
for i in sound_data['previews'].keys():
data['previews'].append({i:sound_data['previews'][i]})
data['type'] = sound_data[ 'type' ];
data['bitrate'] = sound_data[ 'bitrate' ];
data['channels'] = sound_data[ 'channels' ];
data['downlaod'] = sound_data[ 'num_downloads' ];
data['license'] = sound_data[ 'license' ];
data['filesize'] = sound_data[ 'filesize' ];
return data;
except Exception as e:
print "Error occurs while getting sound info", sound_id, ": ", sys.exc_info()
print sound_data
return None
#execute queries
def insertDB( db, query):
if query is not None:
result = db.bulk_write(query, ordered = False)
print result.bulk_api_result
def crawling(token, db, page=1, page_to = MAX_PAGE):
header = {'Authorization' : "Bearer " + token};
print "From page", page, "to page", page_to
for i in range(page, page_to + 1):
if i > MAX_PAGE:
print "Meet max page", MAX_PAGE
break;
url = SEARCH_URL + "?page=" + str(i)
list_data = sendRequest(url, token)
try:
update_queries = []
for d in list_data['results']:
data = getSoundInfo( d['id'], token);
if data is None:
error.append({'id': d['id']});
continue
print data
cuurent_time = datetime.datetime.utcnow();
data['update_at'] = cuurent_time
update_queries.append(UpdateOne({'_id':data['_id']}, {'$set': data, '$setOnInsert':{'created_at':cuurent_time}},True))
if db is not None:
insertDB(db, update_queries)
print "Page", i, "is Done"
except Exception as e:
print "Error in page", i, ":", e
error.append({'Exception':e, 'type':'parse data', 'data':list_data})
print list_data
page += 1
if __name__ == '__main__':
db = getDB();
if db is None:
print "No db connected"
exit()
ACCESS_TOKEN = getAccessToken();
if ACCESS_TOKEN is None:
print "Can't get access token"
exit()
MAX_PAGE = getMaxPage(ACCESS_TOKEN)
from_page = 1
to_page = MAX_PAGE
if len(sys.argv) > 1:
from_page = int(sys.argv[1])
if len(sys.argv) > 2:
to_page = int(sys.argv[2])
crawling(ACCESS_TOKEN, db, from_page, to_page)
print "Error log: ",error |
8,708 | 45335fa5d4773bdd0ef3e6c340fe06e84169be5e |
from flask import Flask, send_file
import StringIO
app = Flask(__name__)
@app.route('/')
def index():
strIO = StringIO.StringIO()
strIO.write('Hello from Dan Jacob and Stephane Wirtel !')
strIO.seek(0)
return send_file(strIO,
attachment_filename="testing.txt",
as_attachment=True)
app.run(debug=True)
|
8,709 | 11984027baf6d4c97b2976e4ac49a0e8ec62f893 | from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import numpy as np
def plot(actual, prediction):
plt.figure(figsize=(16,6))
plt.plot(actual, label='Actual',color='b',linewidth=3)
plt.plot((prediction), label='Prediction',color='y')
print("Plotting")
plt.legend()
plt.show()
timesteps = 2
params = 5
samples = 500000
# load dataset
dataset = read_csv('merged.csv', header=0, usecols = ['time', 'src', 'dst', 'length', 'protocol', 'people'])
values = dataset.values
encoder = LabelEncoder()
values[:,5] = encoder.fit_transform(values[:,5])
values = values.astype('float32')
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
labels = scaled.copy()
scaled = np.delete(scaled, 5, axis=1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = np.delete(labels, 0, axis =1)
labels = scaler.fit_transform(labels)
labels = labels[:(samples/timesteps)]
scaled = scaled[:samples]
reframed = np.reshape(scaled,(samples, params))
values = np.reshape(reframed,((samples/timesteps), timesteps,-1))
size = ((len(values))/timesteps)
sizeL = ((len(labels))/timesteps)
test_X = values[:size]
test_y = labels[:sizeL]
model = load_model("test50.h5")
#predicts
yhat = model.predict(test_X)
plot(test_y, yhat)
|
8,710 | 077c596f71aae22e85589fdaf78d5cdae8085443 | from django.conf.urls import url
from . import views
from .import admin
urlpatterns = [
url(r'^$', views.showberanda, name='showberanda'),
url(r'^sentimenanalisis/$', views.showsentimenanalisis, name='showsentimenanalisis'),
url(r'^bantuan/$', views.showbantuan, name='showbantuan'),
url(r'^tweets/', views.get_tweets),
] |
8,711 | 294b0dc7587ecd37887591da5a1afe96a4349f6b | # ?????
c=0
for i in range(12):
if 'r' in input():
c+=1
# ??
print(c) |
8,712 | 1ab690b0f9c34b1886320e1dfe8b54a5ec6cd4d1 | """Support for Deebot Vaccums."""
import logging
from typing import Any, Mapping, Optional
import voluptuous as vol
from deebot_client.commands import (
Charge,
Clean,
FanSpeedLevel,
PlaySound,
SetFanSpeed,
SetRelocationState,
SetWaterInfo,
)
from deebot_client.commands.clean import CleanAction, CleanArea, CleanMode
from deebot_client.commands.custom import CustomCommand
from deebot_client.events import (
BatteryEvent,
CustomCommandEvent,
ErrorEvent,
FanSpeedEvent,
ReportStatsEvent,
RoomsEvent,
StatusEvent,
)
from deebot_client.events.event_bus import EventListener
from deebot_client.models import Room, VacuumState
from deebot_client.vacuum_bot import VacuumBot
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_MAP,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
StateVacuumEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.util import slugify
from .const import (
DOMAIN,
EVENT_CLEANING_JOB,
EVENT_CUSTOM_COMMAND,
LAST_ERROR,
REFRESH_MAP,
REFRESH_STR_TO_EVENT_DTO,
VACUUMSTATE_TO_STATE,
)
from .entity import DeebotEntity
from .hub import DeebotHub
from .util import dataclass_to_dict, unsubscribe_listeners
_LOGGER = logging.getLogger(__name__)
SUPPORT_DEEBOT: int = (
SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_BATTERY
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_MAP
| SUPPORT_STATE
| SUPPORT_START
)
# Must be kept in sync with services.yaml
SERVICE_REFRESH = "refresh"
SERVICE_REFRESH_PART = "part"
SERVICE_REFRESH_SCHEMA = {
vol.Required(SERVICE_REFRESH_PART): vol.In(
[*REFRESH_STR_TO_EVENT_DTO.keys(), REFRESH_MAP]
)
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add entities for passed config_entry in HA."""
hub: DeebotHub = hass.data[DOMAIN][config_entry.entry_id]
new_devices = []
for vacbot in hub.vacuum_bots:
new_devices.append(DeebotVacuum(vacbot))
if new_devices:
async_add_entities(new_devices)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_REFRESH,
SERVICE_REFRESH_SCHEMA,
"_service_refresh",
)
class DeebotVacuum(DeebotEntity, StateVacuumEntity): # type: ignore
"""Deebot Vacuum."""
def __init__(self, vacuum_bot: VacuumBot):
"""Initialize the Deebot Vacuum."""
device_info = vacuum_bot.device_info
if device_info.nick is not None:
name: str = device_info.nick
else:
# In case there is no nickname defined, use the device id
name = device_info.did
super().__init__(vacuum_bot, StateVacuumEntityDescription(key="", name=name))
self._battery: Optional[int] = None
self._fan_speed: Optional[str] = None
self._state: Optional[VacuumState] = None
self._rooms: list[Room] = []
self._last_error: Optional[ErrorEvent] = None
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
await super().async_added_to_hass()
async def on_battery(event: BatteryEvent) -> None:
self._battery = event.value
self.async_write_ha_state()
async def on_custom_command(event: CustomCommandEvent) -> None:
self.hass.bus.fire(EVENT_CUSTOM_COMMAND, dataclass_to_dict(event))
async def on_error(event: ErrorEvent) -> None:
self._last_error = event
self.async_write_ha_state()
async def on_fan_speed(event: FanSpeedEvent) -> None:
self._fan_speed = event.speed
self.async_write_ha_state()
async def on_report_stats(event: ReportStatsEvent) -> None:
self.hass.bus.fire(EVENT_CLEANING_JOB, dataclass_to_dict(event))
async def on_rooms(event: RoomsEvent) -> None:
self._rooms = event.rooms
self.async_write_ha_state()
async def on_status(event: StatusEvent) -> None:
self._state = event.state
self.async_write_ha_state()
listeners: list[EventListener] = [
self._vacuum_bot.events.subscribe(BatteryEvent, on_battery),
self._vacuum_bot.events.subscribe(CustomCommandEvent, on_custom_command),
self._vacuum_bot.events.subscribe(ErrorEvent, on_error),
self._vacuum_bot.events.subscribe(FanSpeedEvent, on_fan_speed),
self._vacuum_bot.events.subscribe(ReportStatsEvent, on_report_stats),
self._vacuum_bot.events.subscribe(RoomsEvent, on_rooms),
self._vacuum_bot.events.subscribe(StatusEvent, on_status),
]
self.async_on_remove(lambda: unsubscribe_listeners(listeners))
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DEEBOT
@property
def state(self) -> StateType:
"""Return the state of the vacuum cleaner."""
if self._state is not None and self.available:
return VACUUMSTATE_TO_STATE[self._state]
@property
def battery_level(self) -> Optional[int]:
"""Return the battery level of the vacuum cleaner."""
return self._battery
@property
def fan_speed(self) -> Optional[str]:
"""Return the fan speed of the vacuum cleaner."""
return self._fan_speed
@property
def fan_speed_list(self) -> list[str]:
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [level.display_name for level in FanSpeedLevel]
@property
def extra_state_attributes(self) -> Optional[Mapping[str, Any]]:
"""Return entity specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
attributes: dict[str, Any] = {}
rooms: dict[str, Any] = {}
for room in self._rooms:
# convert room name to snake_case to meet the convention
room_name = slugify(room.subtype)
room_values = rooms.get(room_name)
if room_values is None:
rooms[room_name] = room.id
elif isinstance(room_values, list):
room_values.append(room.id)
else:
# Convert from int to list
rooms[room_name] = [room_values, room.id]
if rooms:
attributes["rooms"] = rooms
if self._last_error:
attributes[
LAST_ERROR
] = f"{self._last_error.description} ({self._last_error.code})"
return attributes
async def async_set_fan_speed(self, fan_speed: str, **kwargs: Any) -> None:
"""Set fan speed."""
await self._vacuum_bot.execute_command(SetFanSpeed(fan_speed))
async def async_return_to_base(self, **kwargs: Any) -> None:
"""Set the vacuum cleaner to return to the dock."""
await self._vacuum_bot.execute_command(Charge())
async def async_stop(self, **kwargs: Any) -> None:
"""Stop the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.STOP))
async def async_pause(self) -> None:
"""Pause the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.PAUSE))
async def async_start(self) -> None:
"""Start the vacuum cleaner."""
await self._vacuum_bot.execute_command(Clean(CleanAction.START))
async def async_locate(self, **kwargs: Any) -> None:
"""Locate the vacuum cleaner."""
await self._vacuum_bot.execute_command(PlaySound())
async def async_send_command(
self, command: str, params: Optional[dict[str, Any]] = None, **kwargs: Any
) -> None:
"""Send a command to a vacuum cleaner."""
_LOGGER.debug("async_send_command %s with %s", command, params)
if command in ["relocate", SetRelocationState.name]:
_LOGGER.warning("DEPRECATED! Please use relocate button entity instead.")
await self._vacuum_bot.execute_command(SetRelocationState())
elif command == "auto_clean":
clean_type = params.get("type", "auto") if params else "auto"
if clean_type == "auto":
_LOGGER.warning('DEPRECATED! Please use "vacuum.start" instead.')
await self.async_start()
elif command in ["spot_area", "custom_area", "set_water"]:
if params is None:
raise RuntimeError("Params are required!")
if command in "spot_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.SPOT_AREA,
area=str(params["rooms"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "custom_area":
await self._vacuum_bot.execute_command(
CleanArea(
mode=CleanMode.CUSTOM_AREA,
area=str(params["coordinates"]),
cleanings=params.get("cleanings", 1),
)
)
elif command == "set_water":
_LOGGER.warning("DEPRECATED! Please use water select entity instead.")
await self._vacuum_bot.execute_command(SetWaterInfo(params["amount"]))
else:
await self._vacuum_bot.execute_command(CustomCommand(command, params))
async def _service_refresh(self, part: str) -> None:
"""Service to manually refresh."""
_LOGGER.debug("Manually refresh %s", part)
event = REFRESH_STR_TO_EVENT_DTO.get(part, None)
if event:
self._vacuum_bot.events.request_refresh(event)
elif part == REFRESH_MAP:
self._vacuum_bot.map.refresh()
else:
_LOGGER.warning('Service "refresh" called with unknown part: %s', part)
|
8,713 | b5dba7c1566721f8bb4ec99bc2f13cae4ade4f0a | #!/usr/bin/python3 -S
# -*- coding: utf-8 -*-
import netaddr
from cargo.fields import MacAddress
from unit_tests.fields.Field import TestField
from unit_tests import configure
class TestMacAddress(configure.NetTestCase, TestField):
@property
def base(self):
return self.orm.mac
def test___call__(self):
base = MacAddress()
self.assertEqual(base.value, base.empty)
base('08-00-2b-01-02-03')
self.assertIsInstance(base.value, netaddr.EUI)
def test_insert(self):
self.base('08-00-2b-01-02-03')
val = self.orm.new().insert(self.base)
val = getattr(val, self.base.field_name)
self.assertEqual(str(val.value), '08-00-2B-01-02-03')
def test_select(self):
self.base('08-00-2b-01-02-03')
self.orm.insert(self.base)
val = getattr(self.orm.new().desc(self.orm.uid).get(),
self.base.field_name)
self.assertEqual(str(val.value), '08-00-2B-01-02-03')
def test_array_insert(self):
arr = ['08-00-2b-01-02-03', '08-00-2b-01-02-04']
self.base_array(arr)
val = self.orm.new().insert(self.base_array)
val = getattr(val, self.base_array.field_name)
self.assertListEqual(list(map(str, val.value)),
['08-00-2B-01-02-03', '08-00-2B-01-02-04'])
def test_array_select(self):
arr = ['08-00-2b-01-02-03', '08-00-2b-01-02-04']
self.base_array(arr)
val = getattr(self.orm.new().insert(self.base_array),
self.base_array.field_name)
val_b = getattr(self.orm.new().desc(self.orm.uid).get(),
self.base_array.field_name)
self.assertListEqual(list(map(str, val.value)),
list(map(str, val_b.value)))
def test_type_name(self):
self.assertEqual(self.base.type_name, 'macaddr')
self.assertEqual(self.base_array.type_name, 'macaddr[]')
class TestEncMacAddress(TestMacAddress):
@property
def base(self):
return self.orm.enc_mac
def test_init(self, *args, **kwargs):
pass
def test_type_name(self):
self.assertEqual(self.base.type_name, 'text')
self.assertEqual(self.base_array.type_name, 'text[]')
if __name__ == '__main__':
# Unit test
configure.run_tests(TestMacAddress,
TestEncMacAddress,
verbosity=2,
failfast=True)
|
8,714 | 5869669f1e3f648c0ddc68683f0b1d2754b40169 | import discord
from collections import Counter
from db import readDB, writeDB
INFO_DB_SUCCESS = 'Database updated successfully!'
ERROR_DB_ERROR = 'Error: Unable to open database for writing'
ERROR_DB_NOT_FOUND = 'Error: Database for specified game does not exist. Check your spelling or use !addgame first.'
ERROR_PLAYER_NOT_FOUND = 'Error: \"%s\" not found in database. Check your spelling or use !addplayer first.'
ERROR_WIN_IN_LOSE = 'Error: \"%s\" already specified as winner.'
ERROR_DUP_LOSER = 'Error: \"%s\" duplicated in losers list'
ERROR_IN_DB = 'Error: \"%s\" is already in the database'
ERROR_SORT_ERROR = 'Error while sorting list. Make sure all players have at least one win or loss.\n'
ERROR_INVALID_SORT = 'Error: Invalid sorting type. Displaying stats as stored.\n'
# desc: function to search a list of lists for a name
# args: name - the name to search the lists for
# searchList - a list of lists to search for a name
# retn: the index of the list containing the name or -1 if not found
def getIndex(name, searchList):
for i in range(0, len(searchList)):
if name in searchList[i]:
return i
return -1
# desc: function to round a number up to a specific increment. for example,
# rounding 11 to the nearest multiple of 2 would result in 12
# args: num - the number to round up
# multiple - the increment to round to
# retn: the rounded number
def roundMultiple(num, multiple):
if num % multiple:
return num + (multiple - (num % multiple))
return num
# desc: function to find duplicate items in a list
# args: inputList - a list to search for duplicates
# retn: a list containing the duplicates
def findDuplicates(inputList):
dupList = [k for k, v in Counter(inputList).items() if v > 1]
return dupList
# desc: function to update the database
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# winner - a string containing the winner's name
# losers - a list of strings containing the losers' names
# retn: a string indicating success or failure
def incrementStats(msgChannel, statsFile, winner, losers):
# read the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
# check if the winner is actually in the database
if getIndex(winner, rows) < 0:
print('[ERROR] Winner \"%s\" not found in database' % winner)
return (ERROR_PLAYER_NOT_FOUND % winner)
# check if losers are in database
for loser in losers:
# get loser index
loserIndex = getIndex(loser, rows)
# check against winner to see if the name was duplicated
if loser == winner:
print('[ERROR] Winner duplicated in losers field')
return (ERROR_WIN_IN_LOSE % loser)
# check if loser was not found in database
if loserIndex < 0:
print('[ERROR] Loser \"%s\" not found in database' % loser)
return (ERROR_PLAYER_NOT_FOUND % loser)
# check for duplicate losers
dupList = findDuplicates(losers)
if len(dupList) > 0:
print('[ERROR] Duplicate losers found')
return (ERROR_DUP_LOSER % dupList)
# update stats if we found the winner and all losers
# get index, get win count, increment and update
winnerIndex = getIndex(winner, rows)
winnerVal = int(rows[winnerIndex][1])
rows[winnerIndex][1] = str(winnerVal + 1)
# same as winner for each loser
for loser in losers:
loserIndex = getIndex(loser, rows)
loserVal = int(rows[loserIndex][2])
rows[loserIndex][2] = str(loserVal + 1)
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to add a player to the database or edit an existing player
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# player - the name of the player to either add to the db or edit
# editType - either 'ADD' or 'EDIT' or 'REMOVE' - sets type of change happening
# wins - the number of wins to assign the player
# losses - the number of losses to assign the player
# retn: a string indicating success or failure
def editPlayer(msgChannel, statsFile, player, editType, wins='0', losses='0'):
# open up the database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
playerIndex = getIndex(player, rows)
# check if player is already in database
if editType == 'ADD':
if playerIndex > -1:
print('[ERROR] \"%s\" already in database' % player)
print('[INFO] Database not updated')
return (ERROR_IN_DB % player)
else:
# add player to list and resort
rows.append([player, wins, losses])
rows.sort(key=lambda name: name[0].capitalize())
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" added to database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'EDIT':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
rows[playerIndex] = [rows[playerIndex][0], wins, losses]
# write the new data to the database file
if writeDB(statsFile, data.headers, rows):
print('[INFO] %s\'s data changed' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
elif editType == 'REMOVE':
if playerIndex < 0:
print('[ERROR] \"%s\" not found in database' % player)
print('[INFO] Database not updated')
return (ERROR_PLAYER_NOT_FOUND % player)
else:
# delete player from list
del(rows[playerIndex])
# write the new data to the database
if writeDB(statsFile, data.headers, rows):
print('[INFO] \"%s\" removed from database' % player)
return INFO_DB_SUCCESS
else:
print('[INFO] Database not updated')
return ERROR_DB_ERROR
# desc: function to display the stats
# args: msgChannel - the channel the invoking message was sent from
# statsFile - the name of the database file
# sortType - the order in which the results should be sorted.
# options are 'WINRATE', 'WINS', 'LOSSES', or 'NAME'.
# will revert to 'NAME' if invalid
# player - NOT IMPLEMENTED - the player to display stats for
# retn: a string formatted with the database stats
def dumpStats(msgChannel, statsFile, sortType='WINRATE', player='ALL'):
# read database
data = readDB(statsFile)
# return an error if database not found
if data == 0:
return ERROR_DB_NOT_FOUND
rows = data.rows
print('[INFO] Sort type is %s' % sortType)
returnMsg = ''
if sortType == 'WINRATE' or sortType == 'NONE':
# sort data by win rate
try:
rows.sort(key=lambda rate: float(rate[1]) / (float(rate[1]) + float(rate[2])), reverse=True)
except ZeroDivisionError:
print('[ERROR] Tried to divide by zero because of blank player data')
returnMsg = ERROR_SORT_ERROR
elif sortType == 'WINS':
# sort by number of wins and reverse so max is first
rows.sort(key=lambda wins: float(wins[1]), reverse=True)
elif sortType == 'LOSSES':
# sort by number of losses and reverse so max is first
rows.sort(key=lambda losses: float(losses[2]), reverse=True)
elif sortType == 'NAME':
# database is stored sorted by name so dont do anything
pass
else:
print('[ERROR] Invalid sorting type specified. Displaying stats as stored')
returnMsg = ERROR_INVALID_SORT
if player == 'ALL':
# get max player length
maxPlayerLen = 0
for player in rows:
if len(player[0]) > maxPlayerLen:
maxPlayerLen = len(player[0])
# construct a string with all the player info
playerString = ''
# adjust start spacing if player length is odd or even to align with pipe
startSpace = 4 if maxPlayerLen % 2 else 3
for player in rows:
playerName = player[0].capitalize().rjust(maxPlayerLen + startSpace)
winCount = player[1].rjust(7)
loseCount = player[2].rjust(9)
# calculate win rate
if float(winCount) <= 0:
winRate = '0'
elif float(loseCount) <= 0:
winRate = ' 100'
else:
winRate = str((float(winCount) / (float(winCount) + float(loseCount))) * 100)
# truncate win rate and create string with player info
winRate = winRate[0:4].rjust(9)
playerString += playerName + winCount + loseCount + winRate + ' %\n'
# calculate padding for name field and create header final strings
namePaddingLen = roundMultiple((maxPlayerLen + 2), 2)
header = ' |' + 'Name'.center(namePaddingLen) + '| Wins | Losses | Win Rate |\n'
divider = ('-' * len(header)) + '\n'
sendString = '```md\n' + header + divider + playerString + '```'
# return the constructed string
if len(returnMsg) > 0:
returnMsg = returnMsg + sendString
return returnMsg
return sendString
|
8,715 | 7c3798aa9cc5424656572dfaa87f7acb961613eb | #!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import unittest
import logging
from collections import Counter
from utility import token_util
class TestFileReadingFunctions(unittest.TestCase):
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
self.one_word_per_line_path = os.path.join(self.data_dir, "one_word_per_line.txt")
self.one_sent_per_line_path = os.path.join(self.data_dir, "one_sent_per_line.txt")
self.token2id_path = os.path.join(self.data_dir, "token2id.txt")
self.word_cnt_path_list = [self.one_sent_per_line_path, self.one_word_per_line_path]
self.logger = logging.getLogger("ReadingFunctions Test Logger")
def test_token_cnt(self):
one_word_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4})
one_sent_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4, "e_5": 5, "f_6": 6})
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=1, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path], separator=None, workers=3, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="size")
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="size")
self.assertEqual(c, one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="size")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=1, parallel_mode="file")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
c = token_util.gen_token_cnt_from_file([self.one_word_per_line_path, self.one_sent_per_line_path], separator=None, workers=3, parallel_mode="file")
self.assertEqual(c, one_word_per_line_counter + one_sent_per_line_counter)
def test_gen_token_id_from_file(self):
one_word_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4})
one_sent_per_line_counter = Counter({"a_1": 1, "b_2": 2, "c_3": 3, "d_4": 4, "e_5": 5, "f_6": 6})
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3", "b_2", "a_1"])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3"])
res_list = token_util.gen_token_id_from_file(one_sent_per_line_counter, min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ["f_6", "e_5"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3", "b_2", "a_1"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=2, max_size=-1, separator=None)
self.assertEqual(res_list, ["f_6", "e_5", "d_4", "c_3"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path], min_cnt=-1, max_size=2, separator=None)
self.assertEqual(res_list, ["f_6", "e_5"])
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=2, max_size=-1, separator=None)
self.assertAlmostEqual(res_list, ["d_4", "f_6", "c_3", "e_5", "b_2"], delta=2)
res_list = token_util.gen_token_id_from_file([self.one_sent_per_line_path, self.one_word_per_line_path], min_cnt=-1, max_size=3, separator=None)
self.assertAlmostEqual(res_list, ["d_4", "f_6", "c_3"], delta=2)
def test_load_token_id(self):
token2id, id2token = token_util.load_token_id(self.token2id_path)
self.assertEqual(token2id, {"a_0": 0, "b_1": 1, "c_2": 2, "d_3": 3, "UNK": 4})
self.assertEqual(id2token, ["a_0", "b_1", "c_2", "d_3", "UNK"])
if __name__ == "__main__":
unittest.main()
|
8,716 | 50c274e0365f2556a46eb58edcd1f0a7301e89db | # -*- coding: utf-8 -*-
#
# RPi.Spark KeyButton Demo
#
# Author: Kunpeng Zhang
# 2018.6.6
#
# See LICENSE for details.
from time import sleep
import RPi.GPIO as GPIO
from JMRPiSpark.Drives.Key.RPiKeyButtons import RPiKeyButtons
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_SHORT_MON
from JMRPiSpark.Drives.Key.RPiKeyButtons import DEF_BOUNCE_TIME_NORMAL
########################################################################
# Key buttons include Joystick buttons and Action buttons,
# use BCM mode, there are keyboard layout:
#
# [JOY UP]
# [JOY LEFT] [JOY RIGHT] [ACT_A] [ACT_B]
# [JOY DOWN]
#
class CONFIG_KEY:
# Action Buttons BCM_IO_NUM
BUTTON_ACT_A = 22
BUTTON_ACT_B = 23
# Joy Buttons BCM_IO_NUM
BUTTON_JOY_LEFT = 26
BUTTON_JOY_RIGHT = 27
BUTTON_JOY_UP = 5
BUTTON_JOY_DOWN = 6
BUTTON_JOY_OK = 24
class demo:
_myKey = None
def __init__(self):
self._myKey = RPiKeyButtons()
def _getKeyButtonName(self, keyBtn):
if keyBtn == CONFIG_KEY.BUTTON_ACT_A: return "BUTTON_A"
if keyBtn == CONFIG_KEY.BUTTON_ACT_B: return "BUTTON_B"
if keyBtn == CONFIG_KEY.BUTTON_JOY_UP: return "JOY_UP"
if keyBtn == CONFIG_KEY.BUTTON_JOY_DOWN: return "JOY_DOWN"
if keyBtn == CONFIG_KEY.BUTTON_JOY_RIGHT: return "JOY_RIGHT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_LEFT: return "JOY_LEFT"
if keyBtn == CONFIG_KEY.BUTTON_JOY_OK: return "JOY_CENTER"
return "UNKNOW"
def onKeyButtonDown(self, channel):
print("DOWN:\t{}".format(self._getKeyButtonName(channel)))
pass
def onKeyButtonUp(self, channel):
print("UP:\t{}\n".format(self._getKeyButtonName(channel)))
pass
def _callbackKeyButton(self, channel):
"""!
Key button interrupt event callback function
Inherit this method to implement your want
"""
if self._myKey.readKeyButton(channel) == 0:
self.onKeyButtonDown(channel)
return
if self._myKey.readKeyButton(channel) == 1:
self.onKeyButtonUp(channel)
return
def initKeyButtons(self, mode = "INT"):
"""!
Init all key buttons interrupt events or query mode.
Inherit the onKeyButtonDown and onKeyButtonUp to implement your want
@param mode: Can be { "INT" | "QUERY" }, default is "INT"
"""
if mode.upper() == "INT":
try:
self._myKey.configKeyButtons(
enableButtons = [
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":self._callbackKeyButton},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":self._callbackKeyButton}
],
bounceTime = DEF_BOUNCE_TIME_SHORT_MON )
except:
pass
if mode.upper() == "QUERY":
self._myKey.configKeyButtons([
{"id":CONFIG_KEY.BUTTON_ACT_A, "callback":None},
{"id":CONFIG_KEY.BUTTON_ACT_B, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_OK, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_UP, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_DOWN, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_LEFT, "callback":None},
{"id":CONFIG_KEY.BUTTON_JOY_RIGHT, "callback":None}
])
def releaseKeyButtons(self):
"""!
Release all key button events
"""
self._myKey.removeKeyButtonEvent([
CONFIG_KEY.BUTTON_ACT_A,
CONFIG_KEY.BUTTON_ACT_B,
CONFIG_KEY.BUTTON_JOY_UP,
CONFIG_KEY.BUTTON_JOY_DOWN,
CONFIG_KEY.BUTTON_JOY_LEFT,
CONFIG_KEY.BUTTON_JOY_RIGHT,
CONFIG_KEY.BUTTON_JOY_OK
])
def readKeyButton(self, keyBtn):
"""!
Read key button status, return 0 / 1
"""
if self._myKey.readKeyButton( keyBtn ) == 0:
sleep(0.02)
return 0 if self._myKey.readKeyButton( keyBtn ) else 1
return 0
def readExitButtonStatus(self):
"""!
Read Exit action ( button A and Joy UP press down same time )
"""
pressA = self.readKeyButton(CONFIG_KEY.BUTTON_ACT_A)
pressUp = self.readKeyButton(CONFIG_KEY.BUTTON_JOY_UP)
return pressA and pressUp
def run(self):
print("\nPress any key button to test ...\n < JOY UP + Button A to Exit >\n\n")
self.initKeyButtons("INT")
while True:
if self.readExitButtonStatus(): break
pass
self.releaseKeyButtons()
GPIO.cleanup()
if __name__ == "__main__":
demo().run()
print("Key buttons demo is end.") |
8,717 | 1a6f84835ec2f5fbbb064aef2cd872c24eb3839d | prompt = "Enter a message and I will repeat it to you: "
message = " "
while message != 'quit':
message = input(prompt)
if message != 'quit':
print(message)
# using the 'flag' variable
prompt = "Enter a message and I will repeat it to you: "
# active is the variable used in this case as flag
active = True
while active:
message = input(prompt)
if message == 'quit':
active = False
else:
print(message)
|
8,718 | 6645887b25d75f4657fb231b80d8ebdec2bac7c9 | from django.shortcuts import render
from django.views.generic import TemplateView
from django.conf import settings
import os, csv
class InflationView(TemplateView):
template_name = 'inflation.html'
def get(self, request, *args, **kwargs):
# чтение csv-файла и заполнение контекста
context = {}
file_path = os.path.join(settings.BASE_DIR, 'inflation_russia.csv')
with open(file_path, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile, delimiter=';')
context['head'] = next(reader)
context['data'] = []
for row in reader:
context['data'].append(row)
return render(request, self.template_name, context)
|
8,719 | e0fc7e5771f6cb8e0638bc8c9549cfe1a92d3d82 | from django.urls import path
from redjit.post.views import MyPost, PostView
urlpatterns = [
path('newpost/', MyPost.as_view(), name='newpost')
path('subredjit/<subredjit>/<post_id>/', PostView.as_view(), name='post')
] |
8,720 | f971302f39149bcdcbe4237cc71219572db600d4 | import numpy as np
from nn.feedforward_nn import Feed_Forward
class RMSprop(object):
def __init__(self,n_in,n_hid,n_out,regularization_coe):
self.nn = Feed_Forward(n_in,n_hid,n_out,regularization_coe)
def set_param(self,param):
if 'learning_rate' in param.keys():
self.learning_rate = param['learning_rate']
else:
self.learning_rate = 0.01
if 'n_iter' in param.keys():
self.n_iter = param['n_iter']
else:
self.n_iter = int(1000)
if 'rho' in param.keys():
self.rho = param['rho']
else:
self.rho = 0.9
if 'epsilon' in param.keys():
self.epsilon = param['epsilon']
else:
self.epsilon = 1e-8
def set_train_data(self,x:np.array,t:np.array):
self.nn.xlist = x
self.nn.tlist = t
def update(self,w,**kwargs):
self.set_param(kwargs)
rho = self.rho
epsilon = self.epsilon
lr = self.learning_rate
v = 0
for t in range(1,self.n_iter):
[gradE,E] = self.nn.gradE(w)
g = gradE
v = rho * v + (1 - rho) * g * g
eta = lr / (epsilon + np.sqrt(v))
w -= eta * g
return(w)
|
8,721 | c060cdb7730ba5c4d2240b65331f5010cac222fa | import copy
import sys
import os
from datetime import datetime,timedelta
from dateutil.relativedelta import relativedelta
import numpy as np
import pandas
import tsprocClass as tc
import pestUtil as pu
#update parameter values and fixed/unfixed
#--since Joe is so pro-America...
tc.DATE_FMT = '%m/%d/%Y'
#--build a list of template and model-equivalent files
tpl_dir = 'tpl\\'
modin_dir = 'par\\'
tpl_files,modin_files = [],[]
files = os.listdir(modin_dir)
for file in files:
modin_files.append(modin_dir+file)
tpl_files.append(tpl_dir+file.split('.')[0]+'.tpl')
modin_files.append('UMD.03\\SWRREF\\SWR_Dataset11.ref')
tpl_files.append('tpl\\SWR_Dataset11.tpl')
#--start and end
model_start = datetime(1996,1,1,hour=12)
obs_start = datetime(1997,1,1,hour=12)
#obs_end = datetime(2010,12,31,hour=12)
obs_end = datetime(year=1999,month=12,day=12,hour=12)
obs_start_str = obs_start.strftime(tc.DATE_FMT)
obs_end_str = obs_end.strftime(tc.DATE_FMT)
date_dir = 'date_files\\'
#--instance
tsproc_infile = 'tsproc_setup.dat'
tsp = tc.tsproc(tsproc_infile,out_file='processed.dat',out_fmt='long')
pest_oblocks,pest_mblocks = [],[]
#--stage sites
stg_obs_file = 'UMD.03\\obsref\\stage\\All_DBHYDRO_stage.smp'
stg_reach_file = 'setup_files\\UMD.03_StageStats.csv'
f = open(stg_reach_file,'r')
reach_dict = {}
header = f.readline()
for line in f:
raw = line.strip().split(',')
name = raw[0].upper().replace(' ','_').replace('-','')
if name.endswith('W'):
name = name[:-1]
reach_dict[name] = int(raw[1])
f.close()
#parser = lambda x: datetime.strptime(x,tc.DATE_FMT+' %H:%M:%S')
#stage_df = pandas.read_table(stg_obs_file,header=None,parse_dates=[[1,2]],date_parser=parser,sep='\s*')
#stage_df.columns = ['datetime','site','value']
stage_smp = pu.smp(stg_obs_file,date_fmt=tc.DATE_FMT,pandas=True,load=True)
stage_sites = stage_smp.records.keys()
for site in stage_sites:
if site not in reach_dict.keys():
print 'site not found in reach dict',site
obs_names = []
mod_names = []
reach_numbers = []
smp_site_names = []
for i,site in enumerate(reach_dict.keys()):
if site not in stage_sites:
print 'site not found in smp file',site
reach_dict.pop(site)
else:
obs_names.append('ost_{0:03.0f}or'.format(i+1))
mod_names.append('mst_{0:03.0f}or'.format(i+1))
reach_numbers.append(reach_dict[site])
smp_site_names.append(site)
mblocks = tsp.get_mul_series_swr(reach_numbers,None,'UMD.03\\Results\\UMD.stg',model_start,mod_names,swr_file_type='stage')
oblocks = tsp.get_mul_series_ssf(reach_dict.keys(),stg_obs_file,context=tc.PEST_CONTEXT,series_list=obs_names)
assert len(mblocks) == len(oblocks)
#--process each head record individually because of the variable record length
for i,[site,oblock,mblock] in enumerate(zip(smp_site_names,oblocks,mblocks)):
oblock = [oblock]
mblock = [mblock]
#--get the start and end of the observed record
ostart = stage_smp.records[site].dropna().index[0]
oend = stage_smp.records[site].dropna().index[-1]
dstart,dend = max(obs_start,ostart),min(obs_end,oend)
print site,dstart,dend
if dend > dstart:
full_file = date_dir+site+'_stg.dat'
tc.write_date_file(full_file,dstart,dend,None)
uniform_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=7),dend)
biweekly_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=14),dend,interval=14,suffix='ub')
#--model simulated block
reduced_block = tsp.reduce_time(mblock,dstart,end_dt=dend)
relative_block = tsp.drawdown(reduced_block,full_file,first=True)
interp_block = tsp.new_time_base(relative_block,uniform_days)
filter_block = tsp.baseflow_filter(interp_block)
diff_block = tsp.difference_2_series(interp_block,filter_block)
bi_block = tsp.new_time_base(diff_block,biweekly_days,suffix='bi')
#--copy the final processed block to have the same name as the original
#renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_r'],role='final',wght=100.0)
#pest_mblocks.extend(renamed_block)
renamed_block = tsp.copy_2_series(bi_block,[site],role='final',wght=100.0)
pest_mblocks.extend(renamed_block)
reduced_block = tsp.reduce_time(oblock,dstart,end_dt=dend,context=tc.PEST_CONTEXT)
relative_block = tsp.drawdown(reduced_block,full_file,first=True,context=tc.PEST_CONTEXT)
interp_block = tsp.new_time_base(relative_block,uniform_days,context=tc.PEST_CONTEXT)
filter_block = tsp.baseflow_filter(interp_block,context=tc.PEST_CONTEXT)
diff_block = tsp.difference_2_series(interp_block,filter_block,context=tc.PEST_CONTEXT)
bi_block = tsp.new_time_base(diff_block,biweekly_days,context=tc.PEST_CONTEXT,suffix='bi')
#--copy the final processed block to have the same name as the original
#renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_ro'],role='final',wght=100.0,context=tc.PEST_CONTEXT)
#pest_oblocks.extend(renamed_block)
renamed_block = tsp.copy_2_series(bi_block,[site+'_o'],role='final',wght=100.0,context=tc.PEST_CONTEXT)
pest_oblocks.extend(renamed_block)
#--baseflow obs
bf_obs_file = 'UMD.03\\Results\\UMDNetFlow_observed_Monthly.smp'
bf_mod_file = 'UMD.03\\Results\\UMDNetFlow_simulated_Monthly.smp'
bf_obs_smp = pu.smp(bf_obs_file,load=True,date_fmt=tc.DATE_FMT,pandas=True)
bf_mod_smp = pu.smp(bf_mod_file,load=True,date_fmt=tc.DATE_FMT,pandas=True)
bf_obs_sites = bf_obs_smp.records.keys()
bf_mod_sites = bf_mod_smp.records.keys()
assert len(bf_obs_sites) == len(bf_mod_sites)
bf_mod_sites = []
for osite in bf_obs_sites:
print osite
msite = osite[:-1]+'s'
assert msite in bf_mod_smp.records.keys()
bf_mod_sites.append(msite)
print bf_obs_smp.records[osite].shape,bf_mod_smp.records[osite[:-1]+'s'].shape
obs_names = []
mod_names = []
for i,s in enumerate(bf_obs_sites):
obs_names.append('obf_{0:03.0f}or'.format(i+1))
mod_names.append('mbf_{0:03.0f}or'.format(i+1))
bf_oblocks = tsp.get_series_ssf(bf_obs_sites,bf_obs_file,block_operation='load_bf_obs',series_list=obs_names,context=tc.PEST_CONTEXT)
bf_mblocks = tsp.get_series_ssf(bf_mod_sites,bf_mod_file,block_operation='load_bf_mod',series_list=mod_names)
time_str = '00:00:00'
for mblock,oblock,site in zip(bf_mblocks,bf_oblocks,bf_obs_sites):
#--baseflow accumulation
date_file_name = date_dir+site+'_bf.dat'
obs_df = bf_obs_smp.records[site].dropna()
obs_df = obs_df[obs_start:]
ostart,oend = obs_df.index[0],obs_df.index[-1]
print site,ostart,oend
f = open(date_file_name,'w',0)
f.write(ostart.strftime(tc.DATE_FMT)+' '+time_str+' '+oend.strftime(tc.DATE_FMT)+' '+time_str+'\n')
f.close()
vcalc_mblock = tsp.volume_calc([mblock],date_file_name)
vcalc_oblock = tsp.volume_calc([oblock],date_file_name,context=tc.PEST_CONTEXT)
vser_mblock = tsp.vol_2_series(vcalc_mblock)
vser_oblock = tsp.vol_2_series(vcalc_oblock,context=tc.PEST_CONTEXT)
renamed_mblock = tsp.copy_2_series(vser_mblock,[site[:-1]+'p'],role='final',wght=0.0)
renamed_oblock = tsp.copy_2_series(vser_oblock,[site[:-2]+'op'],role='final',wght=0.0,context=tc.PEST_CONTEXT)
pest_mblocks.extend(renamed_mblock)
pest_oblocks.extend(renamed_oblock)
#--the raw baseflow series
renamed_mblock = tsp.copy_2_series([mblock],[site[:-1]+'s'],role='final',wght=100.0)
renamed_oblock = tsp.copy_2_series([oblock],[site[:-1]+'o'],role='final',wght=100.0,context=tc.PEST_CONTEXT)
pest_mblocks.extend(renamed_mblock)
pest_oblocks.extend(renamed_oblock)
hobs_file = 'UMD.03\\obsref\\head\\heads.smp'
hobs_smp = pu.smp(hobs_file,date_fmt = tc.DATE_FMT,load=True)
hobs_start,hobs_end = hobs_smp.get_daterange(site_name='all',startmin=obs_start,endmax=obs_end)
mobs_file = 'UMD.03\\modref\\head\\mheads.smp'
mobs_smp = pu.smp(mobs_file,date_fmt = tc.DATE_FMT,load=True)
site_names = hobs_smp.records.keys()
#--generate base names for processing
obs_names = []
mod_names = []
for i,s in enumerate(site_names):
obs_names.append('ogw_{0:03.0f}or'.format(i+1))
mod_names.append('mgw_{0:03.0f}or'.format(i+1))
#--write the load series block
oblocks = tsp.get_mul_series_ssf(site_names,hobs_file,block_operation='load_heads',context=tc.PEST_CONTEXT,series_list=obs_names)
mblocks = tsp.get_mul_series_ssf(site_names,mobs_file,block_operation='load_heads',series_list=mod_names)
#--process each head record individually because of the variable record length
for i,[site_name,oblock,mblock] in enumerate(zip(site_names,oblocks,mblocks)):
oblock = [oblock]
mblock = [mblock]
#--get the starting and end date of each record within the reduced model sim time
rstart,rend = hobs_start[site_name],hobs_end[site_name]
if rend > obs_start:
#--find the date range for this record and write date files
dstart,dend = max(obs_start,rstart),min(obs_end,rend)
print site_name,dstart,dend
week_file = date_dir+site_name+'_wk.dat'
full_file = date_dir+site_name+'.dat'
dry_file = date_dir+site_name+'_dry.dat'
#tc.write_date_file(week_file,dstart+timedelta(days=7),dend-timedelta(days=7),timedelta(days=7))
tc.write_date_file(full_file,dstart,dend,None)
uniform_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=7),dend)
biweekly_days = tsp.new_series_uniform([oblock[0].name],dstart+timedelta(days=14),dend,interval=14,suffix='ub')
#weekly_block = tsp.series_avg(relative_block,week_file,context=tc.PEST_CONTEXT)
#--observation block
reduced_block = tsp.reduce_time(oblock,dstart,end_dt=dend,context=tc.PEST_CONTEXT)
relative_block = tsp.drawdown(reduced_block,full_file,first=True,context=tc.PEST_CONTEXT)
interp_block = tsp.new_time_base(relative_block,uniform_days,context=tc.PEST_CONTEXT)
filter_block = tsp.baseflow_filter(interp_block,context=tc.PEST_CONTEXT)
diff_block = tsp.difference_2_series(interp_block,filter_block,context=tc.PEST_CONTEXT)
bi_block = tsp.new_time_base(diff_block,biweekly_days,context=tc.PEST_CONTEXT,suffix='bi')
#--copy the final processed block to have the same name as the original
#renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_ro'],role='final',wght=100.0,context=tc.PEST_CONTEXT)
#pest_oblocks.extend(renamed_block)
renamed_block = tsp.copy_2_series(bi_block,[site_name+'_o'],role='final',wght=100.0,context=tc.PEST_CONTEXT)
pest_oblocks.extend(renamed_block)
#--model simulated block
reduced_block = tsp.reduce_time(mblock,dstart,end_dt=dend)
relative_block = tsp.drawdown(reduced_block,full_file,first=True)
interp_block = tsp.new_time_base(relative_block,uniform_days)
filter_block = tsp.baseflow_filter(interp_block)
diff_block = tsp.difference_2_series(interp_block,filter_block)
bi_block = tsp.new_time_base(diff_block,biweekly_days,suffix='bi')
#--copy the final processed block to have the same name as the original
#renamed_block = tsp.copy_2_series(reduced_block,[site_name+'_r'],role='final',wght=100.0)
#pest_mblocks.extend(renamed_block)
renamed_block = tsp.copy_2_series(bi_block,[site_name],role='final',wght=100.0)
pest_mblocks.extend(renamed_block)
else:
print 'no data for record in reduced sim time:',site_name
#if i > 100:
#break
#--write the model run tspoc file
tsp.set_context('model_run')
tsp.tsproc_file = 'tsproc_model_run.dat'
tsp.write_tsproc()
#--write the setup tsproc file
tsp.write_pest(tpl_files,modin_files,pest_oblocks,pest_mblocks,svd=True,parms='pst_components\\params.dat',parm_grp='pst_components\\param_groups.dat')
tsp.set_context(tc.PEST_CONTEXT)
tsp.tsproc_file = 'tsproc_setup.dat'
tsp.write_tsproc()
os.system('tsproc.exe <tsproc_setup.in >tsproc_screen.out')
os.system('addreg1.exe pest.pst umd03.pst')
|
8,722 | 0bce5d590b96e434cd8aee7531a321bc648c1981 | #!/usr/bin/python
try:
from Queue import Queue
except ImportError: # Python 3
from queue import Queue
class BFSWithQueue:
"""Breadth-First Search.
Attributes
----------
graph : input graph
color : dict with nodes, private
distance : dict with nodes (distances to source node)
parent : dict (BFS tree)
dag : graph (BFS tree)
Examples
--------
>>> from graphtheory.structures.edges import Edge
>>> from graphtheory.structures.graphs import Graph
>>> from graphtheory.traversing.bfs import BFSWithQueue
>>> G = Graph(n=10, False) # an exemplary undirected graph
# Add nodes and edges here.
>>> order = list()
>>> algorithm = BFSWithQueue(G)
>>> algorithm.run(source=0, pre_action=lambda node: order.append(node))
>>> order # visited nodes
>>> algorithm.distance[target] # distance from source to target
>>> algorithm.parent # BFS tree as a dict
>>> algorithm.dag # BFS tree as a directed graph
>>> algorithm.path(source, target)
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
https://en.wikipedia.org/wiki/Breadth-first_search
"""
def __init__(self, graph):
"""The algorithm initialization."""
self.graph = graph
self.color = dict(((node, "WHITE") for node in self.graph.iternodes()))
self.distance = dict(((node, float("inf")) for node in self.graph.iternodes()))
self.parent = dict(((node, None) for node in self.graph.iternodes()))
self.dag = self.graph.__class__(self.graph.v(), directed=True)
for node in self.graph.iternodes(): # isolated nodes are possible
self.dag.add_node(node)
def run(self, source=None, pre_action=None, post_action=None):
"""Executable pseudocode."""
if source is not None:
self._visit(source, pre_action, post_action)
else:
for node in self.graph.iternodes():
if self.color[node] == "WHITE":
self._visit(node, pre_action, post_action)
def _visit(self, node, pre_action=None, post_action=None):
"""Explore the connected component."""
self.color[node] = "GREY"
self.distance[node] = 0
self.parent[node] = None
Q = Queue()
Q.put(node) # node is GREY
if pre_action: # when Q.put
pre_action(node)
while not Q.empty():
source = Q.get()
for edge in self.graph.iteroutedges(source):
if self.color[edge.target] == "WHITE":
self.color[edge.target] = "GREY"
self.distance[edge.target] = self.distance[source] + 1
self.parent[edge.target] = source
self.dag.add_edge(edge)
Q.put(edge.target) # target is GREY
if pre_action: # when Q.put
pre_action(edge.target)
self.color[source] = "BLACK"
if post_action: # source became BLACK
post_action(source)
def path(self, source, target):
"""Construct a path from source to target."""
if source == target:
return [source]
elif self.parent[target] is None:
raise ValueError("no path to target")
else:
return self.path(source, self.parent[target]) + [target]
class SimpleBFS:
"""Breadth-First Search.
Attributes
----------
graph : input graph
parent : dict (BFS tree)
dag : graph (BFS tree)
Examples
--------
>>> from graphtheory.structures.edges import Edge
>>> from graphtheory.structures.graphs import Graph
>>> from graphtheory.traversing.bfs import SimpleBFS
>>> G = Graph(n=10, False) # an exemplary undirected graph
# Add nodes and edges here.
>>> order = list()
>>> algorithm = SimpleBFS(G)
>>> algorithm.run(source=0, pre_action=lambda node: order.append(node))
>>> order # visited nodes
>>> algorithm.parent # BFS tree as a dict
>>> algorithm.dag # BFS tree as a directed graph
>>> algorithm.path(source, target)
Notes
-----
Based on:
Cormen, T. H., Leiserson, C. E., Rivest, R. L., and Stein, C., 2009,
Introduction to Algorithms, third edition, The MIT Press,
Cambridge, London.
https://en.wikipedia.org/wiki/Breadth-first_search
"""
def __init__(self, graph):
"""The algorithm initialization."""
self.graph = graph
self.parent = dict()
self.dag = self.graph.__class__(self.graph.v(), directed=True)
for node in self.graph.iternodes(): # isolated nodes are possible
self.dag.add_node(node)
def run(self, source=None, pre_action=None, post_action=None):
"""Executable pseudocode."""
if source is not None:
self._visit(source, pre_action, post_action)
else:
for node in self.graph.iternodes():
if node not in self.parent:
self._visit(node, pre_action, post_action)
def _visit(self, node, pre_action=None, post_action=None):
"""Explore the connected component."""
Q = Queue()
self.parent[node] = None # before Q.put
Q.put(node)
if pre_action: # when Q.put
pre_action(node)
while not Q.empty():
source = Q.get()
for edge in self.graph.iteroutedges(source):
if edge.target not in self.parent:
self.parent[edge.target] = source # before Q.put
self.dag.add_edge(edge)
Q.put(edge.target)
if pre_action: # when Q.put
pre_action(edge.target)
if post_action:
post_action(source)
def path(self, source, target):
"""Construct a path from source to target."""
if source == target:
return [source]
elif self.parent[target] is None:
raise ValueError("no path to target")
else:
return self.path(source, self.parent[target]) + [target]
# EOF
|
8,723 | 96a4659f03879e051af95b5aa9c1e1364015fb86 | #coding=utf-8
import requests,sys
result_url=[]
def main():
counts=open(sys.argv[1]).readlines()
for line in open(sys.argv[1]):
line=line.strip("\n")
url=line
try:
#url="http://s6000.sgcc.com.cn/WebContent/s6000/main/index.jsp#no-back"
r=requests.get(url,verify=True,timeout=3)
print(url+" "+str(r.status_code))
print(str(r.text))
if r.status_code==200 and "MPEGVideo" in r.text:
result_url.append(url)
except Exception as e:
print(str(e))
for i in result_url:
print(i)
file_200.write(i+"\n")
if __name__ == '__main__':
file_200=open("result_uWSGI_file.txt","w")
main()
file_200.flush()
file_200.close()
|
8,724 | 3989b4c2a15fa8cd54fef86f9d7150fbd0fb74cf | import os
import sys
import shutil
import re
dl_dir = '/home/acarnec/Downloads/'
college_dir = '/home/acarnec/Documents/3rdYear'
latex_dir = '/home/acarnec/Documents/Latex/'
modules = ['mta', 'ana', 'met', 'log', 'mat',
'lin', 'min', 'pol', 'mic', 'mte']
michaelmas = ['mta', 'ana', 'met', 'log', 'mat']
hilary = ['lin', 'min', 'pol', 'mic', 'mte']
types = ['A', 'H', 'Q', 'N', 'R', 'T', 'S', 'M']
nonlinkables = ['Q','R','T', 'S', 'M']
exts = ['pdf', 'tex', 'djvu', 'xlsx', 'epub']
#script, path = sys.argv
def change_directory(path):
"""
Changes directory to path.
"""
os.chdir(path)
def list_files(path):
"""
Returns a list of the filenames in the directory.
"""
ls_output = os.listdir(path)
return ls_output
def move_file_to_dir(f, dest_dir):
"""
moves a file to dest_dir if it not already there
"""
ls = list_files(dest_dir)
if f not in ls:
shutil.move(f, dest_dir)
def get_key_from_values(f, catalog):
"""
Gets the full associated with the filename in the catalog.
"""
L_keys = []
L_values = []
for path, files in catalog.items():
if f in catalog[path]:
return path
def make_sym_links(f, source, dest_dir, catalog):
"""
Gets file origin and makes symlink at destination.
"""
source = get_key_from_values(f, catalog)
source_path = f"{source}/{f}"
dest_path = f"{dest_dir}/{f}"
ls = os.listdir(dest_dir)
if f not in ls:
os.symlink(source_path, dest_path)
def define_regex(module_names=modules, doc_types=types, exts=exts):
"""
Defines a regex in function of the global lists at the top of the program.
"""
letters = ''
module_codes = ''
file_exts = ''
# Populate code letter String
for letter in doc_types:
letters += letter
# Populate extension string
for ext in exts:
if ext != exts[-1]:
file_exts += f"{ext}|"
else:
file_exts += ext
# Populate modules string
for module in modules:
if module != modules[-1]:
module_codes += f"{module}|"
else:
module_codes += module
regex = r"(" + module_codes + "){1}[" + letters + "]{1}\_[^.]*\.(" + file_exts + ")"
return regex
def recognize_files(list_of_filenames):
"""
Matches list of filenames for pattern defined by the regex
and returns a list of those files
"""
reg_exp = define_regex()
pattern = re.compile(reg_exp)
matched = []
for filename in list_of_filenames:
match = pattern.match(filename)
if match != None:
matched.append(filename)
return matched
def catalog_files(directory):
"""
Returns a dictionary of matched files as values and
their respective directories as keys.
"""
catalog = {}
for dirpath, filename, files in os.walk(directory):
catalog[dirpath] = files
for dirpath, files in catalog.items():
matched_files = recognize_files(files)
catalog[dirpath] = matched_files
return catalog
def sort_into_modules(catalog, modules=modules, types=types):
"""
Returns a dictionary with module:associated file list as kv pair.
"""
subject_dict = {}
for code in modules:
subject_dict[code] = []
for files in catalog.values():
for f in files:
for code in modules:
if code == f[:3]:
subject_dict[code].append(f)
return subject_dict
def sort_into_type(subject_dict, modules=modules, types=types):
"""
Returns dictionary with (module, type code):associated file list
as kv pair.
"""
subject_type_dict = {}
for code in modules:
for t in types:
subject_type_dict[(code,t)] = []
for files in subject_dict.values():
for f in files:
# f[:3] is module and f[3] is type
subject_type_dict[(f[:3] ,f[3])].append(f)
# Take out empty lists
subject_type_dict = {t:l for t, l in subject_type_dict.items() if l != []}
return subject_type_dict
def sort_to_dest_dir(subject_type_dict,
catalog,
dest_dir=college_dir,
sym_link_check=nonlinkables,
michaelmas=michaelmas):
"""
Iterates through module type dictionary and specifies destination
in college_dir in accordance to filename, if file is of a certain
type, it is moved to destination, otherwise a symbolic link is made.
"""
for code_type, files in subject_type_dict.items():
if code_type[0] in michaelmas:
destination = f"{college_dir}/Michaelmas_Term/{code_type[0]}/{code_type[1]}"
else:
destination = f"{college_dir}/Hilary_Term/{code_type[0]}/{code_type[1]}"
for f in files:
if code_type[1] in nonlinkables:
move_file_to_dir(f, destination)
else:
if '.pdf' in f:
source = get_key_from_values(f, catalog)
make_sym_links(f, source, destination, catalog)
else:
pass
def main(sort_origin):
script, argument = sys.argv
# Change to directory to operate upon files
change_directory(sort_origin)
# Make a dictionary to track file location in sort_origin
catalog = catalog_files(sort_origin)
# Sort into dict with module:files
subject_dict = sort_into_modules(catalog)
# Sort into dict with (module, type):files
subject_type_dict = sort_into_type(subject_dict)
# Sort files into their respective folders
sort_to_dest_dir(subject_type_dict, catalog)
script, argument = sys.argv
if argument == 'lat':
sort_origin = latex_dir
main(sort_origin)
elif argument == 'dl':
sort_origin = dl_dir
main(sort_origin)
elif argument == 'all':
main(sort_origin=latex_dir)
main(sort_origin=dl_dir)
else:
msg = "dl for downloads\nlat for latex\nall for both"
print(msg)
|
8,725 | 1508697f93114d7f20182a3e9c1df5617904529a | # import libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import math
from sklearn.model_selection import cross_validate
# read the csv file
dataset = pd.read_csv('heart.csv')
#copy the dataset
df = dataset.copy()
# make X and Y
X = df.drop(['target'], axis=1).values
Y = df.target.values
# correleation matrix
corr_mat = df.corr()
# split based on training and test dataset
x_train, x_test, y_train, y_test = \
train_test_split(X,Y,test_size =0.3,random_state=1234,stratify=Y)
# Logistic regression
lr = LogisticRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
train_score = lr.score(x_train, y_train)
test_score = lr.score(x_test, y_test)
# accuracy score
acc_score = accuracy_score(y_test, y_predict)
rmse = math.sqrt(mean_squared_error(y_test, y_predict))
# Cross validation
lr_cross = LogisticRegression()
cv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)
test_cv_avg = np.average(cv_results_lr['test_score'])
train_cv_avg = np.average(cv_results_lr['train_score'])
pickle.dump(lr, open('model.pkl','wb'))
|
8,726 | de77fa677b3b200a41083e609d4da697f9e77f21 | def printall(s):
for i in s:
print i
n=str(raw_input("Enter Word:- "))
printall(n)
|
8,727 | 7ff029e2f0054146e438f4e4f13269e83e28c469 | import pytest
import kdlc
from shutil import rmtree
import os
# from .context import kdlc
test_generated_dir = os.path.dirname(__file__) + "/generated/"
@pytest.fixture(scope="session")
def my_setup(request):
print("\nDoing setup")
def fin():
print("\nDoing teardown")
if os.path.exists(test_generated_dir):
rmtree(test_generated_dir)
kdlc.cleanup()
request.addfinalizer(fin)
|
8,728 | 2e5d66033c2a049ba2423d01792a629bf4b8176d | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from typing import Dict
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_endpoint_util.client import Client as EndpointUtilClient
from alibabacloud_cdt20210813 import models as cdt20210813_models
from alibabacloud_tea_util import models as util_models
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
self.check_config(config)
self._endpoint = self.get_endpoint('cdt', self._region_id, self._endpoint_rule, self._network, self._suffix, self._endpoint_map, self._endpoint)
def get_endpoint(
self,
product_id: str,
region_id: str,
endpoint_rule: str,
network: str,
suffix: str,
endpoint_map: Dict[str, str],
endpoint: str,
) -> str:
if not UtilClient.empty(endpoint):
return endpoint
if not UtilClient.is_unset(endpoint_map) and not UtilClient.empty(endpoint_map.get(region_id)):
return endpoint_map.get(region_id)
return EndpointUtilClient.get_endpoint_rules(product_id, region_id, endpoint_rule, network, suffix)
def get_cdt_service_status_with_options(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtServiceStatusResponse(),
self.do_rpcrequest('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def get_cdt_service_status_with_options_async(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtServiceStatusResponse(),
await self.do_rpcrequest_async('GetCdtServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def get_cdt_service_status(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_service_status_with_options(request, runtime)
async def get_cdt_service_status_async(
self,
request: cdt20210813_models.GetCdtServiceStatusRequest,
) -> cdt20210813_models.GetCdtServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_service_status_with_options_async(request, runtime)
def open_cdt_service_with_options(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtServiceResponse(),
self.do_rpcrequest('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def open_cdt_service_with_options_async(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtServiceResponse(),
await self.do_rpcrequest_async('OpenCdtService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def open_cdt_service(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
) -> cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_service_with_options(request, runtime)
async def open_cdt_service_async(
self,
request: cdt20210813_models.OpenCdtServiceRequest,
) -> cdt20210813_models.OpenCdtServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_service_with_options_async(request, runtime)
def get_cdt_cb_service_status_with_options(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtCbServiceStatusResponse(),
self.do_rpcrequest('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def get_cdt_cb_service_status_with_options_async(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.GetCdtCbServiceStatusResponse(),
await self.do_rpcrequest_async('GetCdtCbServiceStatus', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def get_cdt_cb_service_status(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return self.get_cdt_cb_service_status_with_options(request, runtime)
async def get_cdt_cb_service_status_async(
self,
request: cdt20210813_models.GetCdtCbServiceStatusRequest,
) -> cdt20210813_models.GetCdtCbServiceStatusResponse:
runtime = util_models.RuntimeOptions()
return await self.get_cdt_cb_service_status_with_options_async(request, runtime)
def open_cdt_cb_service_with_options(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtCbServiceResponse(),
self.do_rpcrequest('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
async def open_cdt_cb_service_with_options_async(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
runtime: util_models.RuntimeOptions,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
UtilClient.validate_model(request)
req = open_api_models.OpenApiRequest(
body=UtilClient.to_map(request)
)
return TeaCore.from_map(
cdt20210813_models.OpenCdtCbServiceResponse(),
await self.do_rpcrequest_async('OpenCdtCbService', '2021-08-13', 'HTTPS', 'POST', 'AK', 'json', req, runtime)
)
def open_cdt_cb_service(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return self.open_cdt_cb_service_with_options(request, runtime)
async def open_cdt_cb_service_async(
self,
request: cdt20210813_models.OpenCdtCbServiceRequest,
) -> cdt20210813_models.OpenCdtCbServiceResponse:
runtime = util_models.RuntimeOptions()
return await self.open_cdt_cb_service_with_options_async(request, runtime)
|
8,729 | 2c8b8e9767ac8400fb6390e0851d9df10df7cd8c | import os
import torch
from collections import OrderedDict
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import image as mplimg
from torch.nn.functional import upsample
import networks.deeplab_resnet as resnet
from mypath import Path
from dataloaders import helpers as helpers
from maskRCNN.maskrcnn_benchmark.config import cfg
from maskRCNN.demo.predictor_person import COCODemo
from skimage import io
PAD_SIZE = 10
def maskRCNN_model():
config_file = "/home/raj/data/Raj/IndividualProject/maskRCNN/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"
# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cpu"])
coco_demo = COCODemo(
cfg,
min_image_size=800,
confidence_threshold=0.9,
)
return coco_demo
def get_maskRCNN_predictions(model, image_path):
image = io.imread(image_path)
predictions, bbox, masks, heatmap = model.run_on_opencv_image(image)
return predictions, bbox, masks, heatmap
modelName = 'dextr_pascal-sbd'
pad = 50
thres = 0.8
gpu_id = 0
device = torch.device("cpu")
#device = torch.device("cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
# Create the network and load the weights
net = resnet.resnet101(1, nInputChannels=4, classifier='psp')
print("Initializing weights from: {}".format(os.path.join(Path.models_dir(), modelName + '.pth')))
state_dict_checkpoint = torch.load(os.path.join(Path.models_dir(), modelName + '.pth'),
map_location=lambda storage, loc: storage)
# Remove the prefix .module from the model when it is trained using DataParallel
if 'module.' in list(state_dict_checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in state_dict_checkpoint.items():
name = k[7:] # remove `module.` from multi-gpu training
new_state_dict[name] = v
else:
new_state_dict = state_dict_checkpoint
net.load_state_dict(new_state_dict)
net.eval()
net.to(device)
# Read image and click the points
#plt.ion()
#plt.axis('off')
#plt.imshow(image)
#plt.title('Click the four extreme points of the objects\nHit enter when done (do not close the window)')
#results = []
def get_extreme_points(BBox):
x_min = np.int(BBox[0][0])
y_min = np.int(BBox[0][1])
x_max = np.int(BBox[0][2])
y_max = np.int(BBox[0][3])
# Mid point
#top = np.array([(x_max-x_min)/2, y_min])
#bottom = np.array([(x_max-x_min)/2, y_max])
#left = np.array([x_min, (y_max-y_min)/2])
#right = np.array([x_max, (y_max-y_min)/2])
# Original
#top = np.array([x_min, y_min])
#bottom = np.array([x_max, y_max])
#left = np.array([x_min, y_max])
#right = np.array([x_max, y_min])
# Customized
top = np.array([x_min+(x_max-x_min)*0.5, y_min-PAD_SIZE])
bottom = np.array([x_min+(x_max-x_min)*0.5, y_max+PAD_SIZE])
left = np.array([x_min-PAD_SIZE, y_min+(y_max-y_min)*0.95])
right = np.array([x_max+PAD_SIZE, y_min+(y_max-y_min)*0.95])
extreme_points = np.array([top, left, right, bottom]).astype(np.int)
return extreme_points
def get_EP_by_mask(mask):
mask = mask.squeeze()
idx = np.nonzero(mask)
left = [np.min(idx[1]), idx[0][np.argmin(idx[1])]]
right = [np.max(idx[1]), idx[0][np.argmax(idx[1])]]
top = [idx[1][np.argmin(idx[0])], np.min(idx[0])]
bottom = [idx[1][np.argmax(idx[0])], np.max(idx[0])+PAD_SIZE]
points = [top, left, right, bottom]
points = np.array(points).astype(np.int)
return points
with torch.no_grad():
model = maskRCNN_model()
for path, dirs, files in os.walk("./ims/"):
for filename in files:
#extreme_points_ori = np.array(plt.ginput(4, timeout=0)).astype(np.int)
#extreme_points_ori = np.array(bbox).astype(np.int)
image_path = path + "/" + filename
image = np.array(Image.open(image_path))
# Get the mask for person from maskRCNN and compute the extreme points using the mask
_, _, mask, _ = get_maskRCNN_predictions(model, image_path)
extreme_points_ori = get_EP_by_mask(mask)
#extreme_points_ori = get_extreme_points(BBox)
#extreme_points_ori = np.array([[205,60],[3,450],[275,475],[560,470]]).astype(np.int)
# Crop image to the bounding box from the extreme points and resize
bbox = helpers.get_bbox(image, points=extreme_points_ori, pad=pad, zero_pad=True)
crop_image = helpers.crop_from_bbox(image, bbox, zero_pad=True)
resize_image = helpers.fixed_resize(crop_image, (512, 512)).astype(np.float32)
# Generate extreme point heat map normalized to image values
extreme_points = extreme_points_ori - [np.min(extreme_points_ori[:, 0]), np.min(extreme_points_ori[:, 1])] + [pad,
pad]
extreme_points = (512 * extreme_points * [1 / crop_image.shape[1], 1 / crop_image.shape[0]]).astype(np.int)
extreme_heatmap = helpers.make_gt(resize_image, extreme_points, sigma=10)
extreme_heatmap = helpers.cstm_normalize(extreme_heatmap, 255)
# Concatenate inputs and convert to tensor
input_dextr = np.concatenate((resize_image, extreme_heatmap[:, :, np.newaxis]), axis=2)
inputs = torch.from_numpy(input_dextr.transpose((2, 0, 1))[np.newaxis, ...])
# Run a forward pass
inputs = inputs.to(device)
outputs = net.forward(inputs)
outputs = upsample(outputs, size=(512, 512), mode='bilinear', align_corners=True)
outputs = outputs.to(torch.device('cpu'))
pred = np.transpose(outputs.data.numpy()[0, ...], (1, 2, 0))
pred = 1 / (1 + np.exp(-pred))
pred = np.squeeze(pred)
result = helpers.crop2fullmask(pred, bbox, im_size=image.shape[:2], zero_pad=True, relax=pad) > thres
#results.append(result)
results = result
# Plot the results
#plt.imshow(//helpers.overlay_masks(image / 255, results))
#plt.plot(extreme_points_ori[:, 0], extreme_points_ori[:, 1], 'gx')
out_img = helpers.overlay_masks(image / 255, results)
mplimg.imsave("./output/output_" + filename, out_img)
|
8,730 | 5848273a76995825f01df53d6beed534e6f9f9fe | #############################################################################
## Crytek Source File
## Copyright (C) 2013, Crytek Studios
##
## Creator: Christopher Bolte
## Date: Oct 31, 2013
## Description: WAF based build system
#############################################################################
from waflib.Configure import conf
def load_linux_x64_common_settings(v):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations
"""
# Add common linux x64 defines
v['DEFINES'] += [ 'LINUX64' ]
@conf
def load_debug_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'debug' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
@conf
def load_profile_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'profile' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
@conf
def load_performance_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'performance' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
@conf
def load_release_linux_x64_settings(conf):
"""
Setup all compiler and linker settings shared over all linux_x64 configurations for
the 'release' configuration
"""
v = conf.env
load_linux_x64_common_settings(v)
|
8,731 | 7571e86be1077ae0f7ae542824cfcaaa2949dc83 | import numpy as np
from scipy.stats import loguniform
import sys
def generate_parameters(seed):
np.random.seed(seed)
out={}
out['nfeatures'] = np.random.randint(3, 25)
out['lr'] = float(loguniform.rvs(0.001, 0.01, size=1))
out['gamma'] = np.random.uniform(0.75, 0.05)
out['penalty'] = float(loguniform.rvs(0.00001, 0.1, size=1))
out['batch'] = np.random.choice([32,64])
return out
if __name__ == '__main__':
out = generate_parameters(int(sys.argv[1]))
out_str = '--nfeatures {} --lr {} --gamma {} --penalty {} --batch {}'.format(out['nfeatures'], out['lr'], out['gamma'], out['penalty'], out['batch'])
print(out_str)
|
8,732 | fe1a9804862942491b11b9baceecd37bf628fbb8 | # addtwo_run-py
"""
Train and test a TCN on the add two dataset.
Trying to reproduce https://arxiv.org/abs/1803.01271.
"""
print('Importing modules')
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import argparse
import sys
sys.path.append('')
sys.path.append("../../")
from data import AddTwoDataSet
from model import TCN
print('modules imported')
def parse():
parser = argparse.ArgumentParser(description='Adding Problem')
parser.add_argument(
'--N_train', type=int, default=50000, metavar='N_train')
parser.add_argument(
'--N_test', type=int, default=1000, metavar='N_test')
parser.add_argument(
'--seq_length', type=int, default=200, metavar='seq_length')
parser.add_argument(
'--batch_size', type=int, default=32, metavar='batch_size')
parser.add_argument(
'--num_layers', type=int, default=8, metavar='num_layers')
parser.add_argument(
'--in_channels', type=int, default=2, metavar='in_channels')
parser.add_argument(
'--out_channels', type=int, default=1, metavar='out_channels')
parser.add_argument(
'--kernel_size', type=int, default=7, metavar='kernel_size')
parser.add_argument(
'--res_block_size', type=int, default=30, metavar='res_block_size')
parser.add_argument(
'--bias', type=bool, default=True, metavar='bias')
parser.add_argument(
'--dropout', type=float, default=0.0, metavar='dropout')
parser.add_argument(
'--stride', type=int, default=1, metavar='stride')
parser.add_argument(
'--leveledinit', type=bool, default=False, metavar='leveledinit')
parser.add_argument(
'--model_save_path', type=str, default='adding_problem/models/tcn_addtwo.pt',
metavar='model_save_path')
parser.add_argument(
'--epochs', type=int, default=10, metavar='epochs')
parser.add_argument(
'--lr', type=float, default=2e-3, metavar='lr')
parser.add_argument(
'--clip', type=bool, default=False, metavar='clip')
parser.add_argument(
'--log_interval', type=int, default=100, metavar='log_interval')
parser.add_argument(
'--writer_path', type=str, default='adding_problem/sruns/add_two1',
metavar='writer_path')
parser.add_argument(
'--print', type=bool, default=False, metavar='print')
parser.add_argument(
'--num_workers', type=int, default=0, metavar='num_workers')
args = parser.parse_args()
return args
def run():
torch.manual_seed(1729)
""" Setup """
args = parse()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
""" Dataset """
train_dataset = AddTwoDataSet(N=args.N_train, seq_length=args.seq_length)
test_dataset = AddTwoDataSet(N=args.N_test, seq_length=args.seq_length)
train_loader = DataLoader(
dataset=train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
test_loader = DataLoader(
dataset=test_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
""" TCN """
tcn = TCN(
num_layers=args.num_layers,
in_channels=args.in_channels,
out_channels=args.out_channels,
kernel_size=args.kernel_size,
residual_blocks_channel_size=[args.res_block_size] * args.num_layers,
bias=args.bias,
dropout=args.dropout,
stride=args.stride,
dilations=None,
leveledinit=args.leveledinit)
tcn.to(device)
if args.print:
print(
f"""Number of learnable parameters : {
sum(p.numel() for p in tcn.parameters() if p.requires_grad)}""")
""" Training parameters"""
criterion = nn.MSELoss()
optimizer = optim.Adam(tcn.parameters(), lr=args.lr)
""" Tensorboard """
writer = SummaryWriter(args.writer_path)
for ep in range(1, args.epochs+1):
""" TRAIN """
tcn.train()
total_loss = 0
for i, data in enumerate(train_loader):
x, y = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
output = tcn(x)
loss = F.mse_loss(output, y)
loss.backward()
if args.clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
total_loss += loss.item()
if i % args.log_interval == 0:
cur_loss = total_loss / args.log_interval
processed = min(i*args.batch_size, args.N_train)
writer.add_scalar('training_loss', cur_loss, processed)
if args.print:
print(
(f"Train Epoch: {ep:2d}"
f"[{processed:6d}/{args.N_train:6d}"
f"({100.*processed/args.N_train:.0f}%)]"
f"\tLearning rate: {args.lr:.4f}\tLoss: {cur_loss:.6f}"))
total_loss = 0
""" EVALUATE """
tcn.eval()
with torch.no_grad():
for data in test_loader:
x, y = data[0].to(device), data[1].to(device)
output = tcn(x)
test_loss = criterion(output, y)
if args.print:
print(
f'\nTest set: Average loss: {test_loss.item():.6f}\n')
writer.add_scalar('test_loss', test_loss.item() , ep)
writer.close()
torch.save(tcn.state_dict(), args.model_save_path)
print('Finished Training')
return 0
if __name__ == "__main__":
run()
|
8,733 | 6f216420f641c042bb2772b79c10f904ffa21938 | import pygame
from random import randint, choice
BLACK = (0,0,0)
#----------------------------------------------------------
class Ball(pygame.sprite.Sprite):
#------------------------------------------------------
def __init__(self, color, width, height):
# Initialize as a Sprite
super().__init__()
# Draw the ball
self.image = pygame.Surface([width, height])
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
pygame.draw.rect(self.image, color, [0, 0, width, height])
# Set initial velocity (speed and direction)
self.velocity = [choice([-8,-7,-6,-5,-4,-3,-2,-1,1,2,3,4,5,6,7,8]), \
randint(-8,8)]
# Get the dimensions/location
self.rect = self.image.get_rect()
#------------------------------------------------------
def update(self):
# Update the position
self.rect.x += self.velocity[0]
self.rect.y += self.velocity[1]
#------------------------------------------------------
def bounce(self):
# Bounce off the paddle with random y velocity
self.velocity[0] = -self.velocity[0]
self.velocity[1] = randint(-8,8)
#------------------------------------------------------
def destroy(self):
# Destroy the object. Happens if
# the left or right wall is hit
self.kill()
#----------------------------------------------------------
|
8,734 | a0310b1bab339064c36ff0fe92d275db7a6c5ba9 | from _math import Vector2, Vector3, Quaternion, Transform, Vector3Immutable, QuaternionImmutable, minimum_distance
from _math import mod_2pi
from math import pi as PI, sqrt, fmod, floor, atan2, acos, asin, ceil, pi, e
import operator
from sims4.repr_utils import standard_repr
import enum
import native.animation
import sims4.hash_util
from singletons import DEFAULT
TWO_PI = PI*2
EPSILON = 1.192092896e-07
QUATERNION_EPSILON = 0.001
MAX_FLOAT = 3.402823466e+38
MAX_UINT64 = 18446744073709551615
MAX_INT64 = 922337203685477580
MAX_UINT32 = 4294967295
MAX_INT32 = 2147483647
MAX_UINT16 = 65535
MAX_INT16 = 32767
POS_INFINITY = float('inf')
NEG_INFINITY = float('-inf')
FORWARD_AXIS = Vector3.Z_AXIS()
UP_AXIS = Vector3.Y_AXIS()
def clamp(lower_bound, x, upper_bound):
if x < lower_bound:
return lower_bound
if x > upper_bound:
return upper_bound
return x
def interpolate(a, b, fraction):
return a*fraction + (1 - fraction)*b
def linear_seq_gen(start, stop, step, max_count=None):
delta = stop - start
num = floor(abs(delta/step))
if max_count is not None:
num = min(num, max_count - 1)
if num > 0:
for i in range(0, num + 1):
yield start + i*delta/num
else:
yield start
if stop != start:
yield stop
def deg_to_rad(deg):
return deg*PI/180
def rad_to_deg(rad):
return rad*180/PI
def angle_abs_difference(a1, a2):
delta = sims4.math.mod_2pi(a1 - a2)
if delta > sims4.math.PI:
delta = sims4.math.TWO_PI - delta
return delta
def vector_dot(a, b):
return a.x*b.x + a.y*b.y + a.z*b.z
def vector_dot_2d(a, b):
return a.x*b.x + a.z*b.z
def vector_cross(a, b):
return Vector3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x)
def vector_cross_2d(a, b):
return a.z*b.x - a.x*b.z
def vector_normalize(v):
return v/v.magnitude()
def vector_flatten(v):
return Vector3(v.x, 0, v.z)
def almost_equal(a, b, epsilon=EPSILON):
return abs(a - b) < epsilon
def vector3_almost_equal(v1, v2, epsilon=EPSILON):
return abs(v1.x - v2.x) < epsilon and (abs(v1.y - v2.y) < epsilon and abs(v1.z - v2.z) < epsilon)
def vector3_almost_equal_2d(v1, v2, epsilon=EPSILON):
return abs(v1.x - v2.x) < epsilon and abs(v1.z - v2.z) < epsilon
def quaternion_almost_equal(q1, q2, epsilon=QUATERNION_EPSILON):
if abs(q1.x - q2.x) < epsilon and (abs(q1.y - q2.y) < epsilon and abs(q1.z - q2.z) < epsilon) and abs(q1.w - q2.w) < epsilon:
return True
if abs(q1.x + q2.x) < epsilon and (abs(q1.y + q2.y) < epsilon and abs(q1.z + q2.z) < epsilon) and abs(q1.w + q2.w) < epsilon:
return True
return False
def transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):
if epsilon_orientation is DEFAULT:
epsilon_orientation = epsilon
return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)
def transform_almost_equal_2d(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):
if epsilon_orientation is DEFAULT:
epsilon_orientation = epsilon
return vector3_almost_equal_2d(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)
def vector3_rotate_axis_angle(v, angle, axis):
q = Quaternion.from_axis_angle(angle, axis)
return q.transform_vector(v)
def vector3_angle(v):
return atan2(v.x, v.z)
def angle_to_yaw_quaternion(angle):
return Quaternion.from_axis_angle(angle, UP_AXIS)
def yaw_quaternion_to_angle(q):
if almost_equal(q.y, 0.0):
return 0
angle = acos(q.w)*2.0
if q.y > 0:
return angle
return -angle
def get_closest_point_2D(segment, p):
a1 = segment[0]
a2 = segment[1]
(x1, x2) = (a1.x, a2.x)
x3 = p.x
(z1, z2) = (a1.z, a2.z)
z3 = p.z
dx = x2 - x1
dz = z2 - z1
t = ((x3 - x1)*dx + (z3 - z1)*dz)/(dx*dx + dz*dz)
t = sims4.math.clamp(0, t, 1)
x0 = x1 + t*dx
z0 = z1 + t*dz
return Vector3(x0, p.y, z0)
def invert_quaternion(q):
d = 1.0/(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w)
return Quaternion(-d*q.x, -d*q.y, -d*q.z, d*q.w)
def get_difference_transform(transform_a, transform_b):
v = transform_b.translation - transform_a.translation
a_q_i = invert_quaternion(transform_a.orientation)
q = Quaternion.concatenate(transform_b.orientation, a_q_i)
v_prime = Quaternion.transform_vector(a_q_i, v)
return Transform(v_prime, q)
class Location:
__qualname__ = 'Location'
__slots__ = ('transform', 'routing_surface', '_parent_ref', 'joint_name_or_hash', 'slot_hash')
def __init__(self, transform, routing_surface, parent=None, joint_name_or_hash=None, slot_hash=0):
self.transform = transform
self.routing_surface = routing_surface
self.parent = parent
self.joint_name_or_hash = joint_name_or_hash
self.slot_hash = slot_hash
def __repr__(self):
return standard_repr(self, self.transform, self.routing_surface, parent=self.parent, joint_name_or_hash=self.joint_name_or_hash, slot_hash=self.slot_hash)
def __eq__(self, other):
if type(self) is not type(other):
return False
if self.transform != other.transform:
return False
if self.parent != other.parent:
return False
if self.routing_surface != other.routing_surface:
return False
slot_hash0 = self.joint_name_or_hash or self.slot_hash
slot_hash1 = other.joint_name_or_hash or other.slot_hash
if slot_hash0 != slot_hash1:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def parent(self):
if self._parent_ref is not None:
return self._parent_ref()
@parent.setter
def parent(self, value):
if value is not None:
self._parent_ref = value.ref()
self.routing_surface = None
else:
self._parent_ref = None
@property
def joint_name_hash(self):
if self.joint_name_or_hash is None:
return 0
if isinstance(self.joint_name_or_hash, int):
return self.joint_name_or_hash
return sims4.hash_util.hash32(self.joint_name_or_hash)
@property
def world_routing_surface(self):
if self.parent is not None:
return self.parent.location.world_routing_surface
return self.routing_surface
@property
def zone_id(self):
if self.world_routing_surface.type == 1:
return self.world_routing_surface.primary_id
return sims4.zone_utils.get_zone_id()
@property
def level(self):
return self.world_routing_surface.secondary_id
@property
def world_transform(self):
if self.parent is None:
return self.transform
transform = self.transform
parent = self.parent
if parent.is_part:
parent_transform = parent.part_owner.transform
else:
parent_transform = parent.transform
if self.joint_name_or_hash is None:
if transform is None:
return parent_transform
return sims4.math.Transform.concatenate(transform, parent_transform)
joint_transform = native.animation.get_joint_transform_from_rig(self.parent.rig, self.joint_name_or_hash)
if transform is None:
return sims4.math.Transform.concatenate(joint_transform, parent_transform)
local_transform = sims4.math.Transform.concatenate(transform, joint_transform)
return sims4.math.Transform.concatenate(local_transform, parent_transform)
def duplicate(self):
return type(self)(self.transform, self.routing_surface, self.parent, self.joint_name_or_hash, self.slot_hash)
def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=DEFAULT, routing_surface=DEFAULT, parent=DEFAULT, joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):
if transform is DEFAULT:
transform = self.transform
if transform is not None:
if translation is DEFAULT:
translation = transform.translation
if orientation is DEFAULT:
orientation = transform.orientation
transform = Transform(translation, orientation)
if routing_surface is DEFAULT:
routing_surface = self.routing_surface
if parent is DEFAULT:
parent = self.parent
if joint_name_or_hash is DEFAULT:
joint_name_or_hash = self.joint_name_or_hash
if slot_hash is DEFAULT:
slot_hash = self.slot_hash
return type(self)(transform, routing_surface, parent, joint_name_or_hash, slot_hash)
class LinearCurve:
__qualname__ = 'LinearCurve'
__slots__ = ('points',)
def __init__(self, points):
self.points = points
self.points.sort(key=lambda i: i[0])
def get(self, val):
p_max = len(self.points) - 1
if val <= self.points[0][0]:
return self.points[0][1]
if val >= self.points[p_max][0]:
return self.points[p_max][1]
i = p_max - 1
while i > 0:
while val < self.points[i][0]:
i -= 1
p1 = self.points[i]
p2 = self.points[i + 1]
percent = (val - p1[0])/(p2[0] - p1[0])
return (p2[1] - p1[1])*percent + p1[1]
class WeightedUtilityCurve(LinearCurve):
__qualname__ = 'WeightedUtilityCurve'
def __init__(self, points, max_y=0, weight=1):
if max_y == 0:
max_y = self._find_largest_y(points)
transformed_points = [(point[0], point[1]/max_y*weight) for point in points]
super().__init__(transformed_points)
def _find_largest_y(self, points):
max_y = 0
for point in points:
while point[1] > max_y:
max_y = point[1]
return max_y
class CircularUtilityCurve(LinearCurve):
__qualname__ = 'CircularUtilityCurve'
def __init__(self, points, min_x, max_x):
super().__init__(points)
self._min_x = min_x
self._max_x = max_x
last_point = self.points[-1]
distance_to_end = max_x - last_point[0]
total_length = distance_to_end + self.points[0][1]
distance_to_pivot_point = distance_to_end/total_length
pivot_y_value = (self.points[0][1] - last_point[1])*distance_to_pivot_point + self.points[0][1]
self.points.insert(0, (0, pivot_y_value))
self.points.insert(len(self.points), (self._max_x, pivot_y_value))
def get(self, val):
return super().get(val)
class Operator(enum.Int):
__qualname__ = 'Operator'
GREATER = 1
GREATER_OR_EQUAL = 2
EQUAL = 3
NOTEQUAL = 4
LESS_OR_EQUAL = 5
LESS = 6
@staticmethod
def from_function(fn):
if fn == operator.gt:
return Operator.GREATER
if fn == operator.ge:
return Operator.GREATER_OR_EQUAL
if fn == operator.eq:
return Operator.EQUAL
if fn == operator.ne:
return Operator.NOTEQUAL
if fn == operator.le:
return Operator.LESS_OR_EQUAL
if fn == operator.lt:
return Operator.LESS
@property
def function(self):
if self.value == Operator.GREATER:
return operator.gt
if self.value == Operator.GREATER_OR_EQUAL:
return operator.ge
if self.value == Operator.EQUAL:
return operator.eq
if self.value == Operator.NOTEQUAL:
return operator.ne
if self.value == Operator.LESS_OR_EQUAL:
return operator.le
if self.value == Operator.LESS:
return operator.lt
@property
def inverse(self):
if self == Operator.GREATER:
return Operator.LESS_OR_EQUAL
if self == Operator.GREATER_OR_EQUAL:
return Operator.LESS
if self == Operator.EQUAL:
return Operator.NOTEQUAL
if self == Operator.NOTEQUAL:
return Operator.EQUAL
if self == Operator.LESS_OR_EQUAL:
return Operator.GREATER
if self == Operator.LESS:
return Operator.GREATER_OR_EQUAL
@property
def symbol(self):
if self == Operator.GREATER:
return '>'
if self == Operator.GREATER_OR_EQUAL:
return '>='
if self == Operator.EQUAL:
return '=='
if self == Operator.NOTEQUAL:
return '!='
if self == Operator.LESS_OR_EQUAL:
return '<='
if self == Operator.LESS:
return '<'
@property
def category(self):
if self == Operator.GREATER:
return Operator.GREATER
if self == Operator.GREATER_OR_EQUAL:
return Operator.GREATER
if self == Operator.EQUAL:
return Operator.EQUAL
if self == Operator.NOTEQUAL:
return Operator.EQUAL
if self == Operator.LESS_OR_EQUAL:
return Operator.LESS
if self == Operator.LESS:
return Operator.LESS
class InequalityOperator(enum.Int):
__qualname__ = 'InequalityOperator'
GREATER = Operator.GREATER
GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL
LESS_OR_EQUAL = Operator.LESS_OR_EQUAL
LESS = Operator.LESS
with InequalityOperator.__reload_context__(InequalityOperator, InequalityOperator):
InequalityOperator.from_function = Operator.from_function
InequalityOperator.function = Operator.function
InequalityOperator.inverse = Operator.inverse
InequalityOperator.symbol = Operator.symbol
InequalityOperator.category = Operator.category
class Threshold:
__qualname__ = 'Threshold'
__slots__ = ('value', 'comparison')
def __init__(self, value=None, comparison=None):
self.value = value
self.comparison = comparison
def compare(self, source_value):
if self.value is not None and self.comparison is not None:
return self.comparison(source_value, self.value)
return False
def compare_value(self, source_value):
if self.value is not None and self.comparison is not None:
return self.comparison(source_value.value, self.value.value)
return False
def inverse(self):
return Threshold(self.value, Operator.from_function(self.comparison).inverse.function)
def __str__(self):
if self.comparison is None:
return 'None'
return '{} {}'.format(Operator.from_function(self.comparison).symbol, self.value)
def __repr__(self):
return '<Threshold {}>'.format(str(self))
def __eq__(self, other):
if not isinstance(other, Threshold):
return False
if not self.value == other.value:
return False
if not self.comparison == other.comparison:
return False
return True
def __hash__(self):
return hash((self.value, self.comparison))
|
8,735 | 923a2979df3c37583eec712880ad821541bd898b | import numpy as np
import matplotlib.pyplot as plt
conf_arr = [[ 2987, 58, 955, 832, 1991, 181, 986],
[ 142, 218, 195, 44, 235, 11, 27],
[ 524, 8, 3482, 478, 2406, 708, 588],
[ 140, 0, 386, 12491, 793, 182, 438],
[ 368, 15, 883, 635, 6331, 71, 1357],
[ 77, 0, 942, 394, 223, 4530, 176],
[ 224, 7, 601, 929, 2309, 99, 5761]]
conf_arr = np.transpose( np.array(conf_arr) )
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j)/float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet,
interpolation='nearest')
width, height = conf_arr.shape
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x),
horizontalalignment='center',
verticalalignment='center')
cb = fig.colorbar(res)
alphabet = '0123456789'
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.savefig('confusion_matrix.png', format='png')
|
8,736 | c664257d64b269002964ce95c05f132e563a65d4 | from __future__ import division
rates = { "GBP-EUR":1.10, "EUR-USD":1.11, "GBP-USD":1.22, "GBP-YEN": 129.36 }
def find(rates, fx):
try:
return rates[fx]
except:
return -1
def getInputs():
amount = raw_input("Enter amount: ")
firstCurrency = raw_input("Enter Currency To Convert From: ")
secCurrency = raw_input("Enter Currency To Convert To: ")
try:
fAmount = float(amount)
sFirst = str(firstCurrency)
sSecond = str(secCurrency)
if fAmount>0 and len(sFirst)==3 and len(sSecond)==3:
return fAmount, sFirst, sSecond
except Exception as e:
print e
else:
print "Please specify a positive number and a Currency Symbol e.g. USD"
if amount=="-999" or firstCurrency=="-999" or secCurrency=="-999":
return 0,"","" #Something to escape the recursion
return getInputs()
def main():
amount,currency1,currency2 = getInputs()
rate = find(rates,currency1 + "-" + currency2)
if rate == -1:
rate = 1/ find(rates,currency2 + "-" + currency1) #Try the other way around
if rate == -1:
print "Currency Pair Not Found" #Neither way works, wrong inputs
return main()
return "{} {} converted to {} is: {:.2f}".format(amount,currency1,currency2,rate*amount)
print main()
|
8,737 | 1dd5c25cd3b7bc933ba0b63d9a42fdddc92b8531 | import os
import lasagne
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import Conv2DLayer,\
MaxPool2DLayer,\
InputLayer
from lasagne.nonlinearities import elu, sigmoid, rectify
from lasagne.regularization import l2, regularize_layer_params
from utils.maxpool_multiply import MaxPoolMultiplyLayer
from models.cascade_base import CascadeBase
class FaceTrigger(CascadeBase):
def build_network(self):
net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.img_shape),
self.input_X,
name='network input'))
convs = []
# Build network
for i in range(self.num_cascades):
net = lasagne.layers.batch_norm(Conv2DLayer(net,
nonlinearity=elu,
num_filters=self.num_filters[i],
filter_size=self.filter_sizes[i],
pad='same',
name='conv {}'.format(i + 1)))
convs.append(net)
net = MaxPool2DLayer(net,
pool_size=self.pool_sizes[i],
name='Max Pool {} {}'.format(i + 1, i + 2))
out = Conv2DLayer(net,
nonlinearity=sigmoid,
num_filters=1,
filter_size=1,
pad='same',
name='prediction layer')
branches = [None] * self.num_cascades
# Build branches
for i in range(self.num_cascades):
branches[i] = Conv2DLayer(convs[i],
num_filters=1,
filter_size=1,
nonlinearity=sigmoid,
name='decide network {} output'.format(i + 1))
downsampled_activation_layers = [branches[0]]
for i in range(self.num_cascades - 1):
downsampled_activation_layers.append(MaxPoolMultiplyLayer(branches[i + 1],
downsampled_activation_layers[-1],
self.pool_sizes[i]))
masked_out = MaxPoolMultiplyLayer(out,
downsampled_activation_layers[-1],
self.pool_sizes[-1])
return out, downsampled_activation_layers, masked_out |
8,738 | 70f2fc6873a78305c74e3c3ad04cb24d72019d56 | i = 0
real_value = 8
while i <= 3:
guess = int(input('Guess: '))
if guess == real_value:
print('You Win!')
break
else:
print('You lose')
|
8,739 | 876e9f03c908338a247b6bf1f23011e609bbc2a5 | #!/usr/bin/python
__author__ = "morganlnance"
'''
Analysis functions using PyRosetta4
'''
def get_sequence(pose, res_nums=None):
# type: (Pose, list) -> str
"""
Return the sequence of the <pose>, or, return the sequence listed in <res_nums>
:param pose: Pose
:param res_nums: list() of Pose residue numbers
:return: str(Pose sequence)
"""
# if no res_nums were given, return the pose's sequence
if res_nums is None:
return str(pose.sequence())
# else, return the sequence of the specified res_nums
else:
return str(''.join([pose.residue(r).name1() for r in res_nums]))
def get_atom_pair_distance(pose, res1, atom1, res2, atom2):
"""
Get the xyz distance between <atom1> of <res1> to <atom2> in <res2> in the <pose>
:param pose: Pose
:param res1: int(residue number)
:param atom1: int(atom number)
:param res2: int(residue number)
:param atom2: int(atom number)
:return: float(xyz distance)
"""
# pull out the atom objects from pose
atom1 = pose.residue(res1).atom(atom1)
atom2 = pose.residue(res2).atom(atom2)
# calculate and return the distance between atom1 and atom2
return float(atom1.xyz().distance(atom2.xyz()))
def write_fasta_file(pdb_names, pdb_sequences, filename, dump_dir=''):
"""
Use a list of <pdb_names> and their corresponding <pdb_sequences> to write out a FASTA formatted file
Need a <filename> to work with. Include a path to a dump directory, if desired
:param pdb_names: list(pdb names)
:param pdb_sequences: list(pdb sequences)
:param filename: str(filename)
:return: Bool
"""
# ensure that the pdb_names and pdb_sequences lists are the same length
if len(pdb_names) != len(pdb_sequences):
return False
# add .txt to the filename, if needed
if not filename.endswith(".txt"):
filename += ".txt"
# write out the fasta file
with open(filename, 'w') as fh:
for pdb_name, pdb_seq in zip(pdb_names, pdb_sequences):
fh.write(">%s\n%s\n") %(pdb_name, pdb_seq)
|
8,740 | a9e5d4d48f96974da772f47a4c20ebc96bc31d85 | #! /usr/bin/env python
import os
import glob
import math
from array import array
import sys
import time
import subprocess
import ROOT
mass=[600,700,800,900,1000]
cprime=[01,02,03,05,07,10]
BRnew=[00,01,02,03,04,05]
for i in range(len(mass)):
for j in range(len(cprime)):
for k in range(len(BRnew)):
command="hadd -f cards_combo/higgsCombinehwwlvj_pval_exp_ggH%03d_combo_%02d_%02d_unbin.ProfileLikelihood.mH%03d.root cards_combo/higgsCombinehwwlvj_pval_exp_ggH%03d_combo_%02d_%02d_unbin_*"%(mass[i],cprime[j],BRnew[k],mass[i],mass[i],cprime[j],BRnew[k]);
os.system(command);
|
8,741 | 8b7894e274647e48e3a1fe12473937bd6c62e943 | from torch.utils.data import DataLoader
from config import Config
from torchnet import meter
import numpy as np
import torch
from torch import nn
from tensorboardX import SummaryWriter
from Funcs import MAvgMeter
from vae.base_vae import VAE
from vae.data_util import Zinc_dataset
import time
import torch.optim
class Trainer():
def __init__(self, model=None, opt=Config()):
self.model = model
self.opt = opt
self.criterion = opt.criterion
self.pred_id = self.opt.predictor_id
self.optimizer = opt.optimizer(self.model.parameters(), lr=opt.lr)
self.log_path = opt.LOGS_PATH
self.writer = SummaryWriter(log_dir=self.opt.LOGS_PATH + '/' + time.strftime('%m%d_%H_%M'))
if opt.use_gpu:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
def train(self, train_data, val_data=None):
print('Now Begin Training!')
train_loader = DataLoader(train_data, batch_size=self.opt.batch_size, shuffle=True)
if self.opt.use_gpu:
self.model.cuda()
# meter initialize
loss_meter = meter.AverageValueMeter()
abs_losses = MAvgMeter(self.pred_id)
previous_loss = 1e10
for epoch in range(self.opt.max_epoch):
loss_meter.reset()
abs_losses.reset()
# train
for i, (N, A, label) in enumerate(train_loader):
if self.opt.use_gpu:
N = N.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
# label = torch.unsqueeze(label, 1) # 数据预处理问题补丁
self.optimizer.zero_grad()
output = self.model(N, A, label)
loss = output['loss']
loss.backward()
self.optimizer.step()
abs_losses.add(output['visloss'])
loss_meter.add(loss.data.cpu())
# tensorboard visulize module
if i % self.opt.print_feq == self.opt.print_feq - 1:
nither = epoch * len(train_loader) + i
print('EPOCH:{0},i:{1},loss:{2}'.format(epoch, i, loss.data.cpu()), end=' ')
self.writer.add_scalar('train_loss', loss_meter.value()[0], nither)
for key in self.pred_id:
self.writer.add_scalar(key, abs_losses.value(key)[0], nither)
print(key, float(abs_losses.value(key)[0]), end=' ')
print('\n')
if val_data:
val_loss = self.test(val_data, val=True)
print('val loss:', val_loss)
self.writer.add_scalar('val_loss', val_loss, epoch)
print('!!!!!!now{0},previous{1}'.format(loss_meter.value()[0], previous_loss))
if loss_meter.value()[0] >= previous_loss:
self.opt.lr = self.opt.lr * self.opt.lr_decay
print('!!!!!LR:', self.opt.lr)
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.opt.lr
previous_loss = loss_meter.value()[0]
def test(self, test_data, val=False):
if self.opt.use_gpu:
self.model.cuda()
self.model.eval()
test_loader = DataLoader(test_data, batch_size=self.opt.batch_size, shuffle=True)
result = []
loss_meter = meter.AverageValueMeter()
for i, (H, A, label) in enumerate(test_loader):
# 数据格式转换
if self.opt.use_gpu:
H = H.type(torch.long).cuda()
A = A.cuda()
label = {key: value.cuda() for key, value in label.items()}
# label = torch.unsqueeze(label, 1) # 数据预处理问题补丁
loss = self.model(H, A, label)['loss']
loss_meter.add(loss.data.cpu().detach().numpy())
#
#
# if not val:
# result.append(score.cpu().detach().numpy())
#
#
#
# self.model.train()
if val:
return loss_meter.value()[0]
# else:
# result = np.stack(result)
# return result,loss_meter.value()[0]
# begin main training
torch.set_default_tensor_type(torch.FloatTensor)
dconfig = Config()
dconfig.optimizer = torch.optim.Adam
dconfig.lr = 5e-3
dconfig.res_connection = True
dconfig.encoder_layers = 40
dconfig.node_feature_dim = 100
dconfig.batch_size = 50
zinc_path = '/home/jeffzhu/nips_gail/MCTs/dataset/datasets/zinc_dataset_clean.pkl'
load_path = None
GAVAE = VAE(dconfig)
if load_path:
GAVAE.load_state_dict(torch.load(load_path))
train_data, val_data, test_data = Zinc_dataset(zinc_path, 150000, 1000, dconfig.predictor_id).Get_data()
VAE_trainer = Trainer(model=GAVAE, opt=dconfig)
print(GAVAE)
VAE_trainer.train(train_data, val_data=val_data)
GAVAE.save()
print('save success')
|
8,742 | 4e86dd74374297c3b0ce8fea93910003dac7d5d7 | import random
from PyQt4.QtGui import (
QWidget, QHBoxLayout, QPushButton, QMainWindow, QIcon, QAction, QShortcut,
QKeySequence, QFileDialog, QMessageBox)
from PyQt4 import QtCore
class Controls(QWidget):
def __init__(self, parent):
super(Controls, self).__init__(parent)
self.layout = QHBoxLayout(self)
self.openButton = QPushButton('Open', self)
self.layout.addWidget(self.openButton)
self.playPauseButton = QPushButton('Play', self) # TODO implement pausing
self.layout.addWidget(self.playPauseButton)
self.nextButton = QPushButton('Next', self)
self.layout.addWidget(self.nextButton)
self.__nextShortcut = QShortcut(QKeySequence.MoveToNextChar, self)
self.__nextShortcut.activated.connect(self.nextButton.click)
self.__playPauseShortcut = QShortcut(QKeySequence.fromString(' '), self)
self.__playPauseShortcut.activated.connect(self.playPauseButton.click)
class MainWindow(QMainWindow):
playSong = QtCore.pyqtSignal(str) # arg is path to file
def __init__(self, music_dir):
super(MainWindow, self).__init__()
self.__music_dir = music_dir
self.resize(400, 70)
self.move(0, 0)
self.setWindowTitle('Drink')
self.setWindowIcon(QIcon('icon.png'))
self.controls = Controls(self)
self.setCentralWidget(self.controls)
self.controls.openButton.clicked.connect(self.open)
self.show()
def open(self):
try:
fileName = QFileDialog.getOpenFileName(
self, "Open", self.__music_dir, "Mp3 Files (*.mp3)")
self.playSong.emit(fileName)
except Exception as error:
QMessageBox.critical(self, "Open error", error.message)
|
8,743 | c7768e44464703552f579a1ec68b58fd9746a381 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 24 18:50:16 2018
@author: User
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 19:05:42 2018
@author: User
"""
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import lxml
import html5lib
import csv
path = 'E:/Data Science/BI/Rocket Project/0000001750/0000001750__2006-09-01.htm'
path1='E:/Data Science/BI/Rocket Project/0000001750/Output_2006.csv'
#extracting the summary compensation table from html file
dfhtml = pd.read_html(path,match="Bonus")
len(dfhtml)
dfhtml
type(dfhtml)
#Converting list to string and removing the NaN
htmltxt=str(dfhtml)
txtnew=htmltxt.replace("NaN","")
print(txtnew)
#writing the list to text file
f=open('E:/Data Science/BI/Rocket Project/0000001750/Output_2006.txt','w')
f.writelines(str(txtnew))
f.close()
#df1=dfhtml[0].replace(np.NaN,np.nan)
df2=dfhtml[0].dropna(axis=1, how='all')
df2=df2.dropna(thresh=1)
#df2.iloc[0:2,:] # Displaying the Rows with the Titles only.
|
8,744 | aa24442624aebeb2777f16a826cf59859d7870ba | import torch.nn as nn
from torch.autograd import Variable
import torch
import string
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
#Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
#Only for demonstation
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
#Turn a line into a tensor
#or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
all_categories = ['Medical Term', 'Common English Term']
n_hidden = 128
n_categories = 2
rnn = RNN(n_letters, n_hidden, n_categories)
rnn.load_state_dict(torch.load('medicalTermsModel'))
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
def predict(input_line, n_predictions=1):
output = evaluate(Variable(lineToTensor(input_line)))
# Get top N categories
topv, topi = output.data.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i]
category_index = topi[0][i]
predictions.append([value, all_categories[category_index]])
if category_index == 0:
# print('\n> %s' % input_line)
predictions = (str(input_line), str(all_categories[category_index]))
else:
predictions = (str(input_line), str(all_categories[category_index]))
return predictions
|
8,745 | 7057b882ca1ce2c08e9ba7add5f115636b9b319e | import easyocr
import cv2
import json
import numpy as np
import os
import os.path
import glob
def convert(o):
if isinstance(o, np.generic): return o.item()
raise TypeError
readers = [
easyocr.Reader(['la', 'en', 'de', 'fr', 'es', 'cs', 'is'], gpu = False),
#easyocr.Reader(['ch_tra'], gpu = False),
#easyocr.Reader(['fa'], gpu = False),
#easyocr.Reader(['hi'], gpu = False),
#easyocr.Reader(['ja'], gpu = False),
#easyocr.Reader(['ko'], gpu = False),
#easyocr.Reader(['th'], gpu = False),
]
basedir = "keyframes/"
dirs = os.listdir(basedir)
for d in dirs:
outfile = 'ocr/' + d + '.json'
if os.path.isfile(outfile):
print("found " + outfile + ", skipping")
continue
files = glob.glob(basedir + d + "/*.png")
ocr = {}
for f in files:
i = f.split("_")[-2]
img = cv2.imread(f)
results = []
for reader in readers:
results = results + reader.readtext(img)
h = list(filter(lambda result : len(result) > 2 and len(result[1]) > 0 and result[2] >= 0.1, results))
if len(h) > 0:
ocr[i] = h
with open(outfile,'w') as f:
json.dump(ocr, f, indent=1, default=convert)
print(d)
|
8,746 | c645461effe288a1959b783473d62ff99ca29547 | def test_logsources_model(self):
"""
Comprobacion de que el modelo de la fuente de seguridad coincide con su asociado
Returns:
"""
log_source = LogSources.objects.get(Model="iptables v1.4.21")
self.assertEqual(log_source.get_model(), "iptables v1.4.21")
|
8,747 | 061a78650e2abf6a9d1e4796dd349174a8df5cb8 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Copyright © YXC
# CreateTime: 2016-03-09 10:06:02
"""
Example of functions with arbitrary number arguments
"""
def optional_argument_func(arg1='', arg2=''):
"""
Function with two optional arguments
"""
print("arg1:{0}".format(arg1))
print("arg2:{0}".format(arg2))
def arbitrary_argument_func(*args):
"""
just use "*" to collect all remaining arguments into a tuple
"""
numargs = len(args)
print("Number of arguments:{0}".format(numargs))
for i, arg in enumerate(args):
print("Argument {0} is : {1}".format(i, arg))
if __name__ == "__main__":
optional_argument_func("Hello", "World")
arbitrary_argument_func()
arbitrary_argument_func("hello")
arbitrary_argument_func("hello", "world", "again")
|
8,748 | 1f4d9f5406b91fd687c0ace8ed29e3c4dfb4d3d2 | n=int(input("val : "))
def fact(n):
c=1;
for i in range(1,n+1):
c*=i;
return c;
print(fact(n)); |
8,749 | 21d2de5719fafd94605f31bc07231644f4be18c5 | from datetime import datetime
from unittest import TestCase
from vpnmupd import versions
class TestClass01(TestCase):
"""Software dependency versions compared"""
def setUp(self) -> None:
super().setUp()
self.any_string = "Some string containing v1.1.1"
def test_case01(self):
"""Version extraction"""
version = versions.extract_version(self.any_string)
self.assertEqual(version, "1.1.1")
def test_case02(self):
"""Version power calculation"""
version = versions.get_version_power("1.1.1")
self.assertEqual(version, 111)
def test_case03(self):
"""Version power calculation compared"""
version1 = versions.get_version_power("1.1.1")
version2 = versions.get_version_power("0.2.1")
self.assertGreater(version1, version2)
def test_case04(self):
"""Datetime version"""
version = versions.get_version_power("2021.1.1")
self.assertTrue(isinstance(version, datetime))
def test_case05(self):
"""Datetime versions compare"""
version = versions.get_version_power("2020.1.1")
version2 = versions.get_version_power("2021.1.1")
self.assertGreater(version2, version)
|
8,750 | 603d904404ace88205a524d8bfbe3e621b65f425 | #!/usr/bin/python
import os
from subprocess import Popen, PIPE, STDOUT
import time
import re
import telnetlib
from get_sys_info import get_node_list, get_spec_node_list, get_active_tcu, get_ru_list, is_active_ru
g_rg_list = [
'/SGWNetMgr',
'/SS7SGU',
'/MGW_CMRG',
'/MGW_OMURG',
'/Directory',
]
status_dict={
"administrative": "UNLOCKED",
"operational": "ENABLED",
"usage": "ACTIVE",
"procedural": '',
"availability": '',
"unknown": "FALSE",
"alarm": '',
"role": "ACTIVE"
}
def get_mo_status(mo_name):
cmd = 'fshascli -s ' + mo_name
output = os.popen(cmd).readlines()
mo_status = {}
for line in output:
if len(line) > 1:
p = re.compile(r'(\S*)\((\S*)\)')
m = p.search(line)
if m:
mo_status[m.group(1)] = m.group(2)
return mo_status
def cmp_mo_status(mo_status):
ret = True
error_info = ''
for k, v in mo_status.items():
if k != 'role' and status_dict[k] != v :
error_info = " " + k + " should be \"" + status_dict[k] + "\" But is \"" + v +"\""
ret = False
return ret, error_info
return ret, error_info
def is_ru_active(mo_status):
return 'role' in mo_status and mo_status['role'] == 'ACTIVE'
def check_mo_status(mo_name, mo_status):
status, error_info = cmp_mo_status(mo_status)
if status:
print("%-40s OK"%(mo_name))
else:
print("%-40s NOK:"%(mo_name))
print(error_info)
return status
def check_mo_list(ru_list):
status = True
for ru in ru_list:
mo_status = get_mo_status(ru)
if is_ru_active(mo_status):
status = check_mo_status(ru, mo_status) and status
return status
def check_rg_status(rg_name):
# print("start to check RG " + rg_name + " ...")
mo_status = get_mo_status(rg_name)
status = check_mo_status(rg_name, mo_status)
if status:
ru_list = get_ru_list(rg_name)
if ru_list:
status = check_mo_list(ru_list) and status
return status
def check_clock():
cmd = 'fsclish -c "show mgw synchronization inputreference"'
ret = os.popen(cmd).read()
print(ret)
r_list = ret.split()
if 'yes' in r_list and 'ok' in r_list:
print("Clock is ok")
return True
else:
print "================================================================="
print "CLOCK IS NOT OK !!!"
print "================================================================="
return False
def is_needed_node_available(node_list):
num_tcu = 0
num_tdm = 0
num_cla = 1
for node in node_list:
if node.startswith("TCU"):
num_tcu += 1
if node.startswith("TDM"):
num_tdm += 1
# if node.startswith("CLA"):
# num_cla += 1
if num_tcu == 0:
print "No Working DSP available"
if num_tdm == 0:
print "No Working TDM available"
if num_cla == 0:
print "No Working CLA available"
return num_tcu and num_cla and num_tdm
def check_needed_rg(rg_list):
result = True
for rg in rg_list:
result = check_rg_status(rg) and result
return result
def check_node():
result = True
node_list = get_node_list()
if not is_needed_node_available(node_list):
print "Please first make the node working!"
return
for node in node_list:
if not check_rg_status("/"+node):
result = False
return result
def check_node_list(node_list):
result = True
for node in node_list:
result = check_rg_status("/"+node) and result
return result
def check_all(node_list_all):
ret = True
ret = check_needed_rg(g_rg_list) and ret
ret = check_node_list(node_list_all) and ret
ret = check_clock() and ret
return ret
def check_for_link(node_list_all):
tcu_list = get_spec_node_list(node_list_all, "TCU")
tdm_list = get_spec_node_list(node_list_all, "TDM")
active_tcu_list = get_active_tcu(tcu_list)
ret = True
ret = check_node_list(tdm_list) and ret
ret = check_node_list(active_tcu_list) and ret
ret = check_needed_rg(g_rg_list) and ret
check_clock()
return ret
from optparse import OptionParser
if __name__ == '__main__':
usage = "usage: %prog [options] arg"
parser = OptionParser(usage)
parser.add_option("-a", "--all",
action="store_true", dest="check_all_flag",
default=False)
opts, args = parser.parse_args()
node_list = get_node_list()
ret = False
if(opts.check_all_flag):
ret = check_all(node_list)
else:
ret = check_for_link(node_list)
# os.system('tail -f /srv/Log/log/syslog | grep srm')
if ret:
print ("Check ok")
else:
print("Not all check passed, please first check the RU and clock status")
|
8,751 | 57972e6368aa5749edeab94e45d84f7897ca14ab | """
@file
@brief Various function to clean files.
"""
from __future__ import print_function
import os
import re
def clean_exts(folder=".", fLOG=print, exts=None, fclean=None):
"""
Cleans files in a folder and subfolders with a given extensions.
@param folder folder to clean
@param fLOG logging function
@param exts extensions to clean
@param fclean if not None, ``fclean(name) -> True`` to clean
@return list of removed files
If *exts* is None, it will be replaced by
``{".pyd", ".so", ".o", ".def", ".obj"}``.
"""
if exts is None:
exts = {".pyd", ".so", ".o", ".def", ".obj"}
rem = []
for root, _, files in os.walk(folder):
for f in files:
ext = os.path.splitext(f)[-1]
if (ext in exts and "exe.win" not in root and "site-packages" not in root and
"_venv" not in root): # pragma: no cover
filename = os.path.join(root, f)
if fclean is not None and not fclean(filename):
continue
fLOG("[clean_exts] removing ", filename)
os.remove(filename)
rem.append(filename)
return rem
def clean_files(folder=".", posreg='.*[.]((py)|(rst))$',
negreg=".*[.]git/.*", op="CR", fLOG=print):
"""
Cleans ``\\r`` in files a folder and subfolders with a given extensions.
Backslashes are replaces by ``/``. The regular expressions
applies on the relative path starting from *folder*.
:param folder: folder to clean
:param posreg: regular expression to select files to process
:param negreg: regular expression to skip files to process
:param op: kind of cleaning to do, options are CR, CRB, pep8,
see below for more details
:param fLOG: logging function
:return: list of processed files
The following cleaning are available:
* ``'CR'``: replaces ``'\\r\\n'`` by ``'\\n'``
* ``'CRB'``: replaces end of lines ``'\\n'`` by ``'\\r\\n'``
* ``'pep8'``: applies :epkg:`pep8` convention
"""
def clean_file_cr(name):
with open(name, "rb") as f:
content = f.read()
new_content = content.replace(b"\r\n", b"\n")
if new_content != content:
with open(name, "wb") as f:
f.write(new_content)
return True
return False
def clean_file_cr_back(name):
with open(name, "rb") as f:
lines = f.read().split(b'\n')
new_lines = []
changes = False
for li in lines:
if not li.endswith(b'\r'):
new_lines.append(li + b'\r')
changes = True
else:
new_lines.append(li)
if changes:
with open(name, "wb") as f:
f.write(b'\n'.join(new_lines))
return changes
if op == 'CR':
clean_file = clean_file_cr
elif op == 'CRB':
clean_file = clean_file_cr_back
elif op == 'pep8':
from .code_helper import remove_extra_spaces_and_pep8
clean_file = remove_extra_spaces_and_pep8
else:
raise ValueError(f"Unknown cleaning '{op}'.")
if posreg and isinstance(posreg, str):
posreg = re.compile(posreg)
if negreg and isinstance(negreg, str):
negreg = re.compile(negreg)
res = []
for root, _, files in os.walk(folder):
for f in files:
full = os.path.join(root, f)
rel = os.path.relpath(full, folder)
fn = rel.replace("\\", "/")
if posreg is None or posreg.search(fn):
if negreg is None or not negreg.search(fn):
r = clean_file(full)
if r and fLOG:
fLOG(f"[clean_files] processed '{fn}'")
res.append(rel)
return res
|
8,752 | ce5f91aa04065aac4d4bc7bdbaab3b74c5a85a93 | import unittest2 as unittest
from zope.component import getUtility
from plone.registry.interfaces import IRegistry
from plone.testing.z2 import Browser
from plone.app.testing import SITE_OWNER_NAME, SITE_OWNER_PASSWORD
from openmultimedia.imagewatchdog.configlet import IImageWatchDogSettings
from openmultimedia.imagewatchdog.testing import \
OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING
class TestConfiglet(unittest.TestCase):
layer = OPENMULTIMEDIA_IMAGEWATCHDOG_FUNCTIONAL_TESTING
def setUp(self):
self.app = self.layer['app']
self.portal = self.layer['portal']
def test_default_config(self):
""" Validate the default values
"""
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertEqual(settings.source_formats, ['JPEG', 'GIF'])
self.assertFalse(settings.optimize)
self.assertFalse(settings.enabled)
def test_change_config(self):
""" Validate the default values
"""
browser = Browser(self.app)
portalURL = self.portal.absolute_url()
browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Optimize PNG').selected = True
browser.getControl('Enabled').selected = True
browser.getControl('Save').click()
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertTrue(settings.optimize)
self.assertTrue(settings.enabled)
def test_cancel_config(self):
""" Validate the default values
"""
browser = Browser(self.app)
portalURL = self.portal.absolute_url()
browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Optimize PNG').selected = True
browser.getControl('Enabled').selected = True
browser.getControl('Cancel').click()
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertFalse(settings.optimize)
self.assertFalse(settings.enabled)
def test_migrate_button(self):
""" Check for the migrate button
"""
browser = Browser(self.app)
portalURL = self.portal.absolute_url()
browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Enabled').selected = True
browser.getControl('Save').click()
# Now there is a migrate button
browser.open(portalURL + '/@@overview-controlpanel')
browser.getLink('Image WatchDog settings').click()
browser.getControl('Optimize PNG').selected = True
browser.getControl('Migrate').click()
registry = getUtility(IRegistry)
settings = registry.forInterface(IImageWatchDogSettings)
self.assertTrue(settings.optimize)
self.assertTrue(settings.enabled)
|
8,753 | 67380fb8b1557b0ed6779009e5f9ae93fd81aedd | #!/usr/bin/python3
"""
module that has
fucntions that
shows attributes
"""
def lookup(obj):
"""
function that returns attributes and methods of an object
"""
return(dir(obj))
|
8,754 | 28a920072bad1b411d71f7f70cd991cb7dfbeb8c | # -*- coding:utf-8 -*-
import time
class Base:
def getTime(self):
'''
获取时间戳
:return:
'''
return str(time.time()).split('.')[0] |
8,755 | df317e914073f5b236f73b616b87f86ae378ef38 | #This is just a test
print("this is something new")
for a in range(10):
print(sum(a))
print("the loop worked")
|
8,756 | b1ae3abb6decf4d70bc2372e70cf4f5b868e805d | # coding: utf-8
# 2019/11/27 @ tongshiwei
import pytest
def test_api(env):
assert set(env.parameters.keys()) == {"knowledge_structure", "action_space", "learning_item_base"}
@pytest.mark.parametrize("n_step", [True, False])
def test_env(env, tmp_path, n_step):
from EduSim.Envs.KSS import kss_train_eval, KSSAgent
agent = KSSAgent(env.action_space)
kss_train_eval(
agent,
env,
max_steps=20,
max_episode_num=10,
level="summary",
)
|
8,757 | 93953f025fed2bcabf29433591689c0a7adf9569 | #!/usr/bin/python
#encoding=utf-8
import os, sys
rules = {
'E': ['A'],
'A': ['A+M', 'M'],
'M': ['M*P', 'P'],
'P': ['(E)', 'N'],
'N': [str(i) for i in range(10)],
}
#st为要扫描的字符串
#target为终止状态,即最后的可接受状态
def back(st, target):
reduced_sets = set()
#cur为当前规约后的字符串,hist为记录的规约规则
def _back(cur, hist, idx):
print "----Enter _back----\n"
print "cur:%s\n" % cur
print "hist:%s\n" % hist
print "idx:%d\n" % idx
ans = []
if cur in reduced_sets:
return []
reduced_sets.add(cur)
hist.append(cur)
if cur == target:
ans.append(hist)
#遍历字符串中每个字符,当根据sub[i:j+1]获取栈中字符串在规约规则中未找到满足条件的规则则递增i,根据子串再次查找规约规则
for i in range(len(cur)):
#从位置j向后扫描token,因为例子中的token都是单字符,所以此处实际省略了词法分析获取token的过程。idx即代表当前栈中的最后一个token的索引。如果当轮查找未找到可以规约的规则则不会递归进入_back函数,递增j,相当于向前看一个token,继续查找规则。如果j遍历到结尾也没有查找到规则则递增i,使用栈中字符串后缀继续查找
for j in range(max(i, idx), len(cur)):
#读取已规约字符串,相当于栈中存在的字符串,j+1位置为lookahead token,sub获取的字符串不包含j+1指向的字符
sub = cur[i:j+1]
print "sub:%s\n" % sub
#遍历每条规则,根据栈中token字符串查找移动或者规约规则
for r in rules:
print "r:%s\n" % r
#查找用于规约的规则,rr为规约规则
for rr in rules[r]:
print "rules[r]:%s rr:%s\n" % (rules[r], rr)
work = False
if i == 0:
#栈中字符串为规约规则的后缀,则shift
work = (work or rr[-(j-i+1):] == sub)
if work:
print "%d|%d|%s|%s|rr[-(j-i+1):] == sub\n" % (i, j, rr, sub)
if j == len(cur) - 1:
#当前栈中已规约字符串是rr规约规则的前缀,则可以进行规约
work = (work or rr[:(j-i+1)] == sub)
if work:
print "%d|%d|%s|%s|rr[:(j-i+1)] == sub\n" % (i, j, rr, sub)
#整个栈中的字符串被包含在某条规约规则中,相当于是一个完整语句中的中间片段,没有头部和尾部,只有整个字符串扫描完毕这种情况才成立,如果字符串还未扫描完,则包含情况不能规约,只有是后缀时才能规约。
if i == 0 and j == len(cur) - 1:
#当前规约规则包含栈中
work = (work or (sub in rr))
if work:
print "%d|%d|%s|%s|sub in rr\n" % (i,j, rr, sub)
#规约规则右边字符串等于栈中字符串,可以规约
work = (work or (sub == rr))
if work:
#cur中需要按照规约规则把规约后字符替换规则右边字符,第三个参数i+len(r)-1决定了_back函数中内层迭代j的起始值。i为当前栈中的token个数,len(r)为规约后的token的长度,i+len(r)即为当前栈中token数目,i+len(r)-1为栈中索引
print "%d|%d|%s|%s|sub == rr\n" % (i,j, rr, sub)
vv = _back(
cur[:i] + r + cur[j+1:]
, hist + ['(%d, %d) %s => %s\n' % (i, j, r,
rr)], i + len(r) -1)
ans += vv
print "\n"
return ans
return _back(st, [], 0)
#1+1) is valid, it can be reduced to E
# 1+1) (0, 0) N => 1
# N+1) (0, 0) P => N
# P+1) (0, 0) M => M*P
# M+1) (0, 0) A => A+M
# A+1) (2, 2) N => 1
# A+N) (2, 2) P => N
# A+P) (2, 2) M => P
# A+M) (0, 2) A => A+M
# A) (0, 0) E => A
# E) (0, 1) P => (E)
# P (0, 0) M => M*P
# M (0, 0) A => A+M
# A (0, 0) E => A
# E
if __name__ == '__main__':
if len(sys.argv) < 2:
print "examples: "
print " %s '1+1)'" % sys.argv[0]
sys.exit()
s = sys.argv[1]
vv = back(s, 'E')
if vv:
print s + ' is valid, it can be reduced to E'
for i in vv:
print '\t' + '\t'.join(map(str, i))
else:
print s + ' is invalid, and cannot be reduced to E'
|
8,758 | 6726c8f1b3ef9a0df74c25c1921203af3aaacb12 | #------------------------------------------------------------------------
#
# @Author : EV2 CHEVALLIER
#
# @Date : 16.09.20
# @Location : École Navale / Chaire de Cyberdéfense des systèmes navals
# @Project : Projet de Fin d'Études
# @Subject : # Real time detection of cyber anomalies upon a NMEA network by using machine learning methods
#
#------------------------------------------------------------------------
# @Title : Training
#------------------------------------------------------------------------
# @Description : # This programm get the training dataset, extract the interesting features ( mean and standard deviation of variations of latitude,
# longitude, heading and distance )
# and put it in a python dictionnary and save it in a binary file with the pickle module.
#------------------------------------------------------------------------
import traitement as tr
import pickle as pk
import model as md
def training(dict):
model={}
model["µ"]={}
model["sigma"]={}
for x in dict: # loop with speed
model["µ"][x]={}
model["sigma"][x]={}
for y in dict[x]: # loop with heading
model["µ"][x][y] = {}
model["sigma"][x][y] = {}
doc=tr.load(dict[x][y]) # open the json file
phi_l=doc[0]
g_l=doc[1] # get a list of phi,g,t
t_l=doc[2]
dphi_l=tr.delta(phi_l,t_l) # compute the differences
dg_l=tr.delta(g_l,t_l)
dheading_l=tr.delta(tr.heading(phi_l,g_l),t_l)
d_distance=tr.delta_distance(phi_l,g_l)
# we build a model with the statistical values of the features : variation of latitude, longitude, heading and distance
model["µ"][x][y]["phi"] = tr.parameters(dphi_l)["mean"]
model["µ"][x][y]["g"] = tr.parameters(dg_l)["mean"] # met à jour le modele
model["sigma"][x][y]["phi"] = tr.parameters(dphi_l)["standard_deviation"]
model["sigma"][x][y]["g"] = tr.parameters(g_l)["standard_deviation"]
model["µ"][x][y]["heading"] = tr.parameters(dheading_l)["mean"]
model["µ"][x][y]["distance"] = tr.parameters(d_distance)["mean"]
model["sigma"][x][y]["heading"] = tr.parameters(dheading_l)["standard_deviation"]
model["sigma"][x][y]["distance"] = tr.parameters(d_distance)["standard_deviation"]
with open('model.sauv','wb' ) as model_sauv_file:
pk.dump(model, model_sauv_file) # save the model in a binary file
return model
training(md.model())
|
8,759 | bb3c4039ff224c0ca0305778b938ef969c196033 | from app import app
from flask import render_template, request
from app.models import model, formopener
@app.route('/', methods=['GET', 'POST'])
@app.route('/index')
def index():
return render_template("index.html")
@app.route('/personality', methods=['GET', 'POST'])
def personfont():
user_input=dict(request.form)
print(user_input)
x=user_input["personality"]
print(x)
output=model.personfont(x)
print(output)
return render_template('index2.html', output=output)
|
8,760 | 48369e1ed826a9a50c0fd9f63b7cc10b8225ce2b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Implements the webservice calls of the command
like rest apis or other network related methods
""" |
8,761 | 6b32f829648b92da4b638ffd79692ffb86be80fe | import cv2
import os
import numpy as np
import sys
from os.path import expanduser
np.random.seed(0)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Generate artificial videos with one subject in Casia-B')
parser.add_argument('--dataset', type=str, required=False,
default="casiab", choices=['casiab', 'tumgaid', 'other'],
help="Dataset name. Used tho select metadata and default folder. "
"Try 'casiab', 'tumgaid' or 'other'.")
parser.add_argument('--datasetdir', type=str, required=False,
help='Full path to dataset directory')
parser.add_argument('--siltdir', type=str, required=False,
help='Full path to silhouettes directory')
parser.add_argument('--idsdir', type=str, requiered=False,
help="Id file")
parser.add_argument('--outputdir', type=str, required=False,
help='Full path to output directory')
parser.add_argument('--background', type=str, required=False,
help='Full path to background image')
parser.add_argument('--videotypes', type=str, nargs='+', required=False,
help='Types of videos for augmentation')
parser.add_argument('--height', type=int, required=False,
help='Video height.')
parser.add_argument('--width', type=int, required=False,
help='Video width.')
parser.add_argument('--framerate', type=int, required=False,
help='Video frame rate.')
script_path = os.path.dirname(os.path.abspath(__file__))
args = parser.parse_args()
dataset = args.dataset
datasetdir = args.datasetdir
siltdir = args.siltdir
idsdir = args.idsdir
outputdir = args.outputdir
background = args.background
videotypes = args.videotypes
height = args.height
width = args.width
framerate = args.framerate
if dataset == 'casiab':
datasetdir = script_path + "/casiab/" if datasetdir is None else datasetdir
siltdir = script_path + "/casiab_silhouettes/" if siltdir is None else siltdir
idsdir = script_path + "casiab_ids.txt" if idsdir is None else idsdir
outputdir = script_path + "/mupeg_one_person/" if outputdir is None else outputdir
background = script_path + "/casiab_background.png" if background is None else background
videotypes = ["nm-05-090", "nm-06-090"] if videotypes is None else videotypes
height = 240 if height is None else height
width = 320 if width is None else width
framerate = 25 if framerate is None else framerate
elif dataset == 'tumgaid':
datasetdir = script_path + "/tumgaid/" if datasetdir is None else datasetdir
siltdir = script_path + "/tumgaid_silhouettes/" if siltdir is None else siltdir
idsdir = script_path + "tumgaid_ids.txt" if idsdir is None else idsdir
outputdir = script_path + "/mupeg_one_person/" if outputdir is None else outputdir
background = script_path + "/casiab_background.png" if background is None else background
videotypes = ["nm-05-090", "nm-06-090"] if videotypes is None else videotypes
height = 240 if height is None else height
width = 320 if width is None else width
framerate = 25 if framerate is None else framerate
else:
if not all(v is not None for v in [datasetdir, siltdir, outputdir, background, videotypes, height, width, framerate]):
raise argparse.ArgumentTypeError('If you select "others" in dataset, you need to complete all the input arguments.')
generate_one_subject_with_videos(datasetdir, siltdir, idsdir, outputdir, background, videotypes, height, width, framerate)
|
8,762 | e375501e6b815530e61af9181d4cade83d4588ca | #a list of functions/Classes to be inported when a user imports * from swarmpose
__all__ = ['Swarmpose'] |
8,763 | c327f8f7aece1a9c25079613809df52e9a8e7a52 | from rdflib import Graph
from rdflib.plugins.sparql import prepareQuery
def is_file_ontology(file_path):
"""
Method that, given a file, returns its URI.
This method is in a separate file in case we want to extract additional metadata if required
Parameters
----------
@param file_path: path of the candidate ontology
Returns
-------
@return: The URI of the target ontology (if there is one)
"""
# load in rdf lib
try:
g = Graph()
g.parse(file_path)
q1 = prepareQuery('''
SELECT ?onto
WHERE {
?onto a <http://www.w3.org/2002/07/owl#Ontology>.
}
''')
# TO DO: extract title, preferred ns.
# there should be only one ontology per file
for r in g.query(q1):
# print("Found that %s is an ontology" % file_path)
return r.onto
except Exception:
# If the candidate file could not be read, pass
pass
|
8,764 | f98f2ef0d94839711b473ad1ca32b85645d4014e | """A lightweight Python wrapper of SoX's effects."""
import shlex
from io import BufferedReader, BufferedWriter
from subprocess import PIPE, Popen
import numpy as np
from .sndfiles import (
FileBufferInput,
FileBufferOutput,
FilePathInput,
FilePathOutput,
NumpyArrayInput,
NumpyArrayOutput,
logger,
)
def mutually_exclusive(*args):
return sum(arg is not None for arg in args) < 2
class AudioEffectsChain:
def __init__(self):
self.command = []
def equalizer(self, frequency, q=1.0, db=-3.0):
"""equalizer takes three parameters: filter center frequency in Hz, "q"
or band-width (default=1.0), and a signed number for gain or
attenuation in dB.
Beware of clipping when using positive gain.
"""
self.command.append('equalizer')
self.command.append(frequency)
self.command.append(str(q) + 'q')
self.command.append(db)
return self
def bandpass(self, frequency, q=1.0):
"""bandpass takes 2 parameters: filter center frequency in Hz and "q"
or band-width (default=1.0).
It gradually removes frequencies outside the band specified.
"""
self.command.append('bandpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def bandreject(self, frequency, q=1.0):
"""bandreject takes 2 parameters: filter center frequency in Hz and "q"
or band-width (default=1.0).
It gradually removes frequencies within the band specified.
"""
self.command.append('bandreject')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def lowshelf(self, gain=-20.0, frequency=100, slope=0.5):
"""lowshelf takes 3 parameters: a signed number for gain or attenuation
in dB, filter frequency in Hz and slope (default=0.5, maximum=1.0).
Beware of Clipping when using positive gain.
"""
self.command.append('bass')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self
def highshelf(self, gain=-20.0, frequency=3000, slope=0.5):
"""highshelf takes 3 parameters: a signed number for gain or
attenuation in dB, filter frequency in Hz and slope (default=0.5).
Beware of clipping when using positive gain.
"""
self.command.append('treble')
self.command.append(gain)
self.command.append(frequency)
self.command.append(slope)
return self
def highpass(self, frequency, q=0.707):
"""highpass takes 2 parameters: filter frequency in Hz below which
frequencies will be attenuated and q (default=0.707).
Beware of clipping when using high q values.
"""
self.command.append('highpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def lowpass(self, frequency, q=0.707):
"""lowpass takes 2 parameters: filter frequency in Hz above which
frequencies will be attenuated and q (default=0.707).
Beware of clipping when using high q values.
"""
self.command.append('lowpass')
self.command.append(frequency)
self.command.append(str(q) + 'q')
return self
def limiter(self, gain=3.0):
"""limiter takes one parameter: gain in dB.
Beware of adding too much gain, as it can cause audible
distortion. See the compand effect for a more capable limiter.
"""
self.command.append('gain')
self.command.append('-l')
self.command.append(gain)
return self
def normalize(self):
"""normalize has no parameters.
It boosts level so that the loudest part of your file reaches
maximum, without clipping.
"""
self.command.append('gain')
self.command.append('-n')
return self
def compand(self, attack=0.2, decay=1, soft_knee=2.0, threshold=-20, db_from=-20.0, db_to=-20.0):
"""compand takes 6 parameters:
attack (seconds), decay (seconds), soft_knee (ex. 6 results
in 6:1 compression ratio), threshold (a negative value
in dB), the level below which the signal will NOT be companded
(a negative value in dB), the level above which the signal will
NOT be companded (a negative value in dB). This effect
manipulates dynamic range of the input file.
"""
self.command.append('compand')
self.command.append(str(attack) + ',' + str(decay))
self.command.append(str(soft_knee) + ':' + str(threshold) + ',' + str(db_from) + ',' + str(db_to))
return self
def sinc(self,
high_pass_frequency=None,
low_pass_frequency=None,
left_t=None,
left_n=None,
right_t=None,
right_n=None,
attenuation=None,
beta=None,
phase=None,
M=None,
I=None,
L=None):
"""sinc takes 12 parameters:
high_pass_frequency in Hz,
low_pass_frequency in Hz,
left_t,
left_n,
right_t,
right_n,
attenuation in dB,
beta,
phase,
M,
I,
L
This effect creates a steep bandpass or
bandreject filter. You may specify as few as the first two
parameters. Setting the high-pass parameter to a lower value
than the low-pass creates a band-reject filter.
"""
self.command.append("sinc")
if not mutually_exclusive(attenuation, beta):
raise ValueError("Attenuation (-a) and beta (-b) are mutually exclusive arguments.")
if attenuation is not None and beta is None:
self.command.append('-a')
self.command.append(str(attenuation))
elif attenuation is None and beta is not None:
self.command.append('-b')
self.command.append(str(beta))
if not mutually_exclusive(phase, M, I, L):
raise ValueError("Phase (-p), -M, L, and -I are mutually exclusive arguments.")
if phase is not None:
self.command.append('-p')
self.command.append(str(phase))
elif M is not None:
self.command.append('-M')
elif I is not None:
self.command.append('-I')
elif L is not None:
self.command.append('-L')
if not mutually_exclusive(left_t, left_t):
raise ValueError("Transition bands options (-t or -n) are mutually exclusive.")
if left_t is not None:
self.command.append('-t')
self.command.append(str(left_t))
if left_n is not None:
self.command.append('-n')
self.command.append(str(left_n))
if high_pass_frequency is not None and low_pass_frequency is None:
self.command.append(str(high_pass_frequency))
elif high_pass_frequency is not None and low_pass_frequency is not None:
self.command.append(str(high_pass_frequency) + '-' + str(low_pass_frequency))
elif high_pass_frequency is None and low_pass_frequency is not None:
self.command.append(str(low_pass_frequency))
if not mutually_exclusive(right_t, right_t):
raise ValueError("Transition bands options (-t or -n) are mutually exclusive.")
if right_t is not None:
self.command.append('-t')
self.command.append(str(right_t))
if right_n is not None:
self.command.append('-n')
self.command.append(str(right_n))
return self
def bend(self, bends, frame_rate=None, over_sample=None):
"""TODO Add docstring."""
self.command.append("bend")
if frame_rate is not None and isinstance(frame_rate, int):
self.command.append('-f %s' % frame_rate)
if over_sample is not None and isinstance(over_sample, int):
self.command.append('-o %s' % over_sample)
for bend in bends:
self.command.append(','.join(bend))
return self
def chorus(self, gain_in, gain_out, decays):
"""TODO Add docstring."""
self.command.append("chorus")
self.command.append(gain_in)
self.command.append(gain_out)
for decay in decays:
modulation = decay.pop()
numerical = decay
self.command.append(' '.join(map(str, numerical)) + ' -' + modulation)
return self
def delay(self,
gain_in=0.8,
gain_out=0.5,
delays=None,
decays=None,
parallel=False):
"""delay takes 4 parameters: input gain (max 1), output gain
and then two lists, delays and decays.
Each list is a pair of comma seperated values within
parenthesis.
"""
if delays is None:
delays = list((1000, 1800))
if decays is None:
decays = list((0.3, 0.25))
self.command.append('echo' + ('s' if parallel else ''))
self.command.append(gain_in)
self.command.append(gain_out)
self.command.extend(list(sum(zip(delays, decays), ())))
return self
def echo(self, **kwargs):
"""TODO Add docstring."""
return self.delay(**kwargs)
def fade(self):
"""TODO Add docstring."""
raise NotImplementedError()
def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5, shape='sine', phase=25, interp='linear'):
"""TODO Add docstring."""
raise NotImplementedError()
def gain(self, db):
"""gain takes one paramter: gain in dB."""
self.command.append('gain')
self.command.append(db)
return self
def mcompand(self):
"""TODO Add docstring."""
raise NotImplementedError()
def noise_reduction(self, amount=0.5):
"""TODO Add docstring."""
# TODO Run sox once with noiseprof on silent portions to generate a noise profile.
raise NotImplementedError()
def oops(self):
"""TODO Add docstring."""
raise NotImplementedError()
def overdrive(self, gain=20, colour=20):
"""overdrive takes 2 parameters: gain in dB and colour which effects
the character of the distortion effet.
Both have a default value of 20. TODO - changing color does not seem to have an audible effect
"""
self.command.append('overdrive')
self.command.append(gain)
self.command.append(colour)
return self
def phaser(self,
gain_in=0.9,
gain_out=0.8,
delay=1,
decay=0.25,
speed=2,
triangular=False):
"""phaser takes 6 parameters: input gain (max 1.0), output gain (max
1.0), delay, decay, speed and LFO shape=trianglar (which must be set to
True or False)"""
self.command.append("phaser")
self.command.append(gain_in)
self.command.append(gain_out)
self.command.append(delay)
self.command.append(decay)
self.command.append(speed)
if triangular:
self.command.append('-t')
else:
self.command.append('-s')
return self
def pitch(self, shift,
use_tree=False,
segment=82,
search=14.68,
overlap=12):
"""pitch takes 4 parameters: user_tree (True or False), segment, search
and overlap."""
self.command.append("pitch")
if use_tree:
self.command.append('-q')
self.command.append(shift)
self.command.append(segment)
self.command.append(search)
self.command.append(overlap)
return self
def loop(self):
"""TODO Add docstring."""
self.command.append('repeat')
self.command.append('-')
return self
def reverb(self,
reverberance=50,
hf_damping=50,
room_scale=100,
stereo_depth=100,
pre_delay=20,
wet_gain=0,
wet_only=False):
"""reverb takes 7 parameters: reverberance, high-freqnency damping,
room scale, stereo depth, pre-delay, wet gain and wet only (True or
False)"""
self.command.append('reverb')
if wet_only:
self.command.append('-w')
self.command.append(reverberance)
self.command.append(hf_damping)
self.command.append(room_scale)
self.command.append(stereo_depth)
self.command.append(pre_delay)
self.command.append(wet_gain)
return self
def reverse(self):
"""reverse takes no parameters.
It plays the input sound backwards.
"""
self.command.append("reverse")
return self
def speed(self, factor, use_semitones=False):
"""speed takes 2 parameters: factor and use-semitones (True or False).
When use-semitones = False, a factor of 2 doubles the speed and raises the pitch an octave. The same result is achieved with factor = 1200 and use semitones = True.
"""
self.command.append("speed")
self.command.append(factor if not use_semitones else str(factor) + "c")
return self
def synth(self):
raise NotImplementedError()
def tempo(self,
factor,
use_tree=False,
opt_flag=None,
segment=82,
search=14.68,
overlap=12):
"""tempo takes 6 parameters: factor, use tree (True or False), option
flag, segment, search and overlap).
This effect changes the duration of the sound without modifying
pitch.
"""
self.command.append("tempo")
if use_tree:
self.command.append('-q')
if opt_flag in ('l', 'm', 's'):
self.command.append('-%s' % opt_flag)
self.command.append(factor)
self.command.append(segment)
self.command.append(search)
self.command.append(overlap)
return self
def tremolo(self, freq, depth=40):
"""tremolo takes two parameters: frequency and depth (max 100)"""
self.command.append("tremolo")
self.command.append(freq)
self.command.append(depth)
return self
def trim(self, positions):
"""TODO Add docstring."""
self.command.append("trim")
for position in positions:
# TODO: check if the position means something
self.command.append(position)
return self
def upsample(self, factor):
"""TODO Add docstring."""
self.command.append("upsample")
self.command.append(factor)
return self
def vad(self):
raise NotImplementedError()
def vol(self, gain, type="amplitude", limiter_gain=None):
"""vol takes three parameters: gain, gain-type (amplitude, power or dB)
and limiter gain."""
self.command.append("vol")
if type in ["amplitude", "power", "dB"]:
self.command.append(type)
else:
raise ValueError("Type has to be dB, amplitude or power.")
if limiter_gain is not None:
self.command.append(str(limiter_gain))
print(self.command)
return self
def custom(self, command):
"""Run arbitrary SoX effect commands.
Examples:
custom('echo 0.8 0.9 1000 0.3') for an echo effect.
References:
- https://linux.die.net/man/1/soxexam
- http://sox.sourceforge.net/sox.html
- http://tldp.org/LDP/LG/issue73/chung.html
- http://dsl.org/cookbook/cookbook_29.html
"""
self.command.append(command)
return self
def __call__(
self,
src,
dst=np.ndarray,
sample_in=44100, # used only for arrays
sample_out=None,
encoding_out=None,
channels_out=None,
allow_clipping=True):
# depending on the input, using the right object to set up the input data arguments
stdin = None
if isinstance(src, str):
infile = FilePathInput(src)
stdin = src
elif isinstance(src, np.ndarray):
infile = NumpyArrayInput(src, sample_in)
stdin = src
elif isinstance(src, BufferedReader):
infile = FileBufferInput(src)
stdin = infile.data # retrieving the data from the file reader (np array)
else:
infile = None
# finding out which output encoding to use in case the output is ndarray
if encoding_out is None and dst is np.ndarray:
if isinstance(stdin, np.ndarray):
encoding_out = stdin.dtype.type
elif isinstance(stdin, str):
encoding_out = np.float32
# finding out which channel count to use (defaults to the input file's channel count)
if channels_out is None:
if infile is None:
channels_out = 1
else:
channels_out = infile.channels
if sample_out is None: # if the output samplerate isn't specified, default to input's
sample_out = sample_in
# same as for the input data, but for the destination
if isinstance(dst, str):
outfile = FilePathOutput(dst, sample_out, channels_out)
elif dst is np.ndarray:
outfile = NumpyArrayOutput(encoding_out, sample_out, channels_out)
elif isinstance(dst, BufferedWriter):
outfile = FileBufferOutput(dst, sample_out, channels_out)
else:
outfile = None
cmd = shlex.split(
' '.join([
'sox',
'-N',
'-V1' if allow_clipping else '-V2',
infile.cmd_prefix if infile is not None else '-d',
outfile.cmd_suffix if outfile is not None else '-d',
] + list(map(str, self.command))),
posix=False,
)
logger.debug("Running command : %s" % cmd)
if isinstance(stdin, np.ndarray):
stdout, stderr = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate(stdin.tobytes(order='F'))
else:
stdout, stderr = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if stderr:
raise RuntimeError(stderr.decode())
elif stdout:
outsound = np.frombuffer(stdout, dtype=encoding_out)
if channels_out > 1:
outsound = outsound.reshape((channels_out, int(len(outsound) / channels_out)), order='F')
if isinstance(outfile, FileBufferOutput):
outfile.write(outsound)
return outsound
|
8,765 | b08cface601ee07125090f3ae03a3120974688f2 | from PyQt5.QtWidgets import *
import sys
import math
Data = ''
class Button:
def __init__(self, text, results):
self.b = QPushButton(str(text))
self.text = text
self.results = results
self.b.clicked.connect(lambda: self.handleInput(
self.text)) # Important because we need to pass only function name with arguments here that is why we use lambda here
def handleInput(self, v):
global Data
try:
if self.results.text() == 'INVALID!':
self.results.setText("")
if self.results.text() != '':
if self.results.text()[-1] in ['*', '+', '-', '/'] and v in ['-', '*', '+', '/', '√', 'CBRT', "SIN",
"COS", "LOG", "MOD", "TAN", "MOD"]:
return
elif v == 'CBRT':
self.results.setText(str(round(float(eval(self.results.text())) ** (1 / 3), 4), ))
elif v == 'MOD':
if '.' in self.results.text():
self.results.setText(str(abs(float(self.results.text()))))
else:
self.results.setText(str(abs(int(self.results.text()))))
elif v == 'LOG':
self.results.setText(str(math.log10(abs(float(eval(self.results.text()))))))
elif v == 'SQUARE':
if '.' in self.results.text():
self.results.setText(str(float(self.results.text()) ** 2))
else:
self.results.setText(str(int(self.results.text()) ** 2))
elif v == "SIN":
self.results.setText(str(math.sin(float(eval(self.results.text())))))
elif v == "COS":
self.results.setText(str(math.cos(float(eval(self.results.text())))))
elif v == "TAN":
self.results.setText(str(math.tan(float(eval(self.results.text())))))
elif v == 'x!':
if '.' in str(eval(self.results.text())):
self.results.setText("INVALID!")
else:
self.results.setText(str(math.factorial(abs(int(eval(self.results.text()))))))
elif self.results.text()[-1] == '/' and v == 0:
return
elif v == "=":
if self.results.text()[-1] in ['*', '-', '.', '+', '/']:
return
res = eval(self.results.text())
self.results.setText(str(res))
elif v == "AC":
self.results.setText("")
elif v == "DEL":
self.results.setText(self.results.text()[:-1])
elif v == "√" and self.results.text() != '':
self.results.setText(str(float(self.results.text()) ** 0.5))
elif v == "√" and self.results.text() == '':
return
else:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
else:
if type(v) == int:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
except:
self.results.setText("INVALID!")
Data = self.results.text()
class Widget1():
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(["Basic Mode", "Advanced Mode"])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ["AC", "DEL", "√", "/",
7, 8, 9, "*",
4, 5, 6, "-",
1, 2, 3, "+",
0, ".", "="]
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
if button == 0:
self.grid.addWidget(buttonObject.b, row, col, 1, 2)
col += 1
else:
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class Widget2():
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(["Advance Mode", "Normal Mode"])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ["AC", "DEL", "SIN", "COS",
7, 8, 9, "MOD",
4, 5, 6, "TAN",
1, 2, 3, "LOG",
0, "SQUARE", "CBRT", 'x!']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Calculator")
self.widget1 = Widget1()
self.widget2 = Widget2()
self.startWidget1("")
def startWidget1(self, res):
global Data
self.widget1.setup(self, res)
Data = self.widget1.results.text()
self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)
self.show()
def startWidget2(self, res):
global Data
self.widget2.setup(self, res)
Data = self.widget2.results.text()
self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)
self.show()
def selectionchange1(self, i):
global Data
res = Data
self.startWidget2(res)
def selectionchange2(self, i):
global Data
res = Data
self.startWidget1(res)
if __name__ == "__main__":
app = QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
|
8,766 | d111f93144a1d2790470365d0ca31bcea17713d7 | import json
# No llego a solucionarlo entero.
#Aparcamientos que estan cubiertos en el centro de deportes .
from pprint import pprint
with open('Aparcamientos.json') as data_file:
data = json.load(data_file)
for x in data['docs']:
if x['TIPOLOGIA'] == 'Cubierto':
print(x['NOMBRE'])
elif x['TIPOLOGIA'] == 'Pabellón de deportes':
print(x['NOMBRE'])
print(x['TIPOLOGIA'])
|
8,767 | 1f6176e9285d810934ae745cf8759b5cd6f408c8 | import typing
from pydantic import AnyUrl
from .base import FBObject
class MediaPayload(FBObject):
url: AnyUrl
class Coors(FBObject):
lat: float
long: float
class LocationPayload(FBObject):
coordinates: Coors
class AttachmentFallback(FBObject):
title: str
url: AnyUrl
payload: typing.Any = None
type: str = 'fallback'
class Attachment(FBObject):
type: str # template, audio, fallback, file, image, location or video
payload: typing.Union[MediaPayload, Coors, None]
|
8,768 | 69e8601a387d0987fbb6d1da5ac0f9412fffc63d | import sys
num = int(input())
odd_sum = 0
even_sum = 0
odd_smallest = sys.maxsize
even_smallest = sys.maxsize
odd_biggest = -sys.maxsize
even_biggest = -sys.maxsize
for i in range(0, num):
element = float(input())
if i % 2 != 0:
even_sum += element
if element <= even_smallest:
even_smallest = element
if element > even_biggest:
even_biggest = element
else:
odd_sum += element
if element <= odd_smallest:
odd_smallest = element
if element > odd_biggest:
odd_biggest = element
if num == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
elif odd_sum == 0:
print(f'OddSum=0.00,')
print(f'OddMin=No,')
print(f'OddMax=No,')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}')
elif even_sum == 0:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum=0.00,')
print(f'EvenMin=No,')
print(f'EvenMax=No')
else:
print(f'OddSum={odd_sum:.2f},')
print(f'OddMin={odd_smallest:.2f},')
print(f'OddMax={odd_biggest:.2f},')
print(f'EvenSum={even_sum:.2f},')
print(f'EvenMin={even_smallest:.2f},')
print(f'EvenMax={even_biggest:.2f}') |
8,769 | 192c44540018b9e1ab857bdbfba6fdb39bb74431 | # -*- coding: utf-8 -*-
import json
import os
import io
import shutil
import pytest
from chi_annotator.algo_factory.common import TrainingData
from chi_annotator.task_center.config import AnnotatorConfig
from chi_annotator.task_center.data_loader import load_local_data
from chi_annotator.task_center.model import Interpreter
from chi_annotator.task_center.model import Trainer
from tests.utils.txt_to_json import create_tmp_test_jsonfile, rm_tmp_file
class TestTrainer(object):
@classmethod
def setup_class(cls):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
pass
@classmethod
def teardown_class(cls):
""" teardown any state that was previously setup with a call to
setup_class.
"""
pass
"""
test Trainer and Interpreter
"""
def ignore_test_load_local_data(self):
"""
test load local json format data
:return:
"""
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
assert train_data is not None
assert len(train_data.training_examples) == 1000
assert "text" not in train_data.training_examples[0].data
assert "label" in train_data.training_examples[0].data
def ignore_test_load_config(self):
"""
test load config
:return:
"""
config = AnnotatorConfig(\
filename="chi_annotator/user_instance/examples/classify/spam_email_classify_config.json")
assert config["name"] == "email_spam_classification"
def ignor_test_load_default_config(self):
"""
test load default config
:return:
"""
config = AnnotatorConfig()
assert config["config"] == "config.json"
def ignore_test_trainer_init(self):
"""
test trainer
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
def ignore_test_pipeline_flow(self):
"""
test trainer's train func for pipeline
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
interpreter = trainer.train(train_data)
assert interpreter is not None
out1 = interpreter.parse(("点连接拿红包啦"))
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
out2 = interpreter_loaded.parse("点连接拿红包啦")
assert out1.get("classifylabel").get("name") == out2.get("classifylabel").get("name")
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=True)
def ignore_test_trainer_persist(self):
"""
test pipeline persist, metadata will be saved
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# char_tokenizer component should been created
assert trainer.pipeline[0] is not None
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
trainer.train(train_data)
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
# load persisted metadata
metadata_path = os.path.join(persisted_path, 'metadata.json')
with io.open(metadata_path) as f:
metadata = json.load(f)
assert 'trained_at' in metadata
# rm tmp files and dirs
shutil.rmtree(config['path'], ignore_errors=False)
def ignore_test_train_model_empty_pipeline(self):
"""
train model with no component
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
config['pipeline'] = []
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
with pytest.raises(ValueError):
trainer = Trainer(config)
trainer.train(train_data)
def ignore_test_handles_pipeline_with_non_existing_component(self):
"""
handle no exist component in pipeline
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
config['pipeline'].append("unknown_component")
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
rm_tmp_file("tmp.json")
with pytest.raises(Exception) as execinfo:
trainer = Trainer(config)
trainer.train(train_data)
assert "Failed to find component" in str(execinfo.value)
def ignore_test_load_and_persist_without_train(self):
"""
test save and load model without train
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
tmp_path = create_tmp_test_jsonfile("tmp.json")
train_data = load_local_data(tmp_path)
# rm tmp train set
rm_tmp_file("tmp.json")
# interpreter = trainer.train(train_data)
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
assert interpreter_loaded.pipeline
assert interpreter_loaded.parse("hello") is not None
assert interpreter_loaded.parse("Hello today is Monday, again!") is not None
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=False)
def ignore_test_train_with_empty_data(self):
"""
test train with empty train data
:return:
"""
test_config = "tests/data/test_config/test_config.json"
config = AnnotatorConfig(test_config)
trainer = Trainer(config)
assert len(trainer.pipeline) > 0
# create tmp train set
train_data = TrainingData([])
# rm tmp train set
trainer.train(train_data)
# test persist and load
persisted_path = trainer.persist(config['path'],
config['project'],
config['fixed_model_name'])
interpreter_loaded = Interpreter.load(persisted_path, config)
assert interpreter_loaded.pipeline
assert interpreter_loaded.parse("hello") is not None
assert interpreter_loaded.parse("Hello today is Monday, again!") is not None
# remove tmp models
shutil.rmtree(config['path'], ignore_errors=False)
|
8,770 | a9efa258c223460b2b79861acdde89161706ad9a | '''
Given an infinite sorted array (or an array with unknown size), find if a given number ‘key’ is present in the array. Write a function to return the index of the ‘key’ if it is present in the array, otherwise return -1.
Since it is not possible to define an array with infinite (unknown) size, you will be provided with an interface ArrayReader to read elements of the array. ArrayReader.get(index) will return the number at index; if the array’s size is smaller than the index, it will return Integer.MAX_VALUE.
Example 1:
Input: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 16
Output: 6
Explanation: The key is present at index '6' in the array.
Example 2:
Input: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 11
Output: -1
Explanation: The key is not present in the array.
Example 3:
Input: [1, 3, 8, 10, 15], key = 15
Output: 4
Explanation: The key is present at index '4' in the array.
Example 4:
Input: [1, 3, 8, 10, 15], key = 200
Output: -1
Explanation: The key is not present in the array.
'''
import math
class ArrayReader:
def __init__(self, arr):
self.arr = arr
def get(self, index):
if index > len(self.arr):
return math.inf
return self.arr[index]
def search_in_infinite_array(reader, key):
# first find the bounds
low = 0
high = 1
while reader.get(high) < key:
new_low = high + 1
high = (high - low + 1)*2
low = new_low
return binary_search_array(reader, key, low, high)
def binary_search_array(reader, key, low, high):
while low <= high:
mid = (low + high) // 2
if key == reader.get(mid):
return mid
if key > reader.get(mid):
low = mid + 1
else:
high = mid - 1
return - 1
reader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
print(search_in_infinite_array(reader, 16))
|
8,771 | 821e89730fde2e12b24b52b04701c1f3501e0d57 | from flask import abort
from flask_restx import Resource, Namespace, Model, fields, reqparse
from infraestructura.lineas_repo import LineasRepo
from infraestructura.equipos_repo import EquiposRepo
from infraestructura.clientes_lep_repo import ClientesLepRepo
from infraestructura.lineaequipoplan_repo import LineaEquipoPlanRepo
repoLep= LineaEquipoPlanRepo()
repoLepCliente = ClientesLepRepo()
repo = LineasRepo()
repoEquipo = EquiposRepo()
nsLinea = Namespace('lineas', description='Administrador de lineas')
modeloLineaSinN = Model('LineaSinNumero',{
'numero': fields.String(),
'estado': fields.String(),
'activa': fields.Boolean()
})
modeloLinea = modeloLineaSinN.clone('Linea', {
'id': fields.Integer()
})
modeloBusqueda = Model('BusquedaFechas', {
'desde': fields.Date(),
'hasta': fields.Date()
})
nsLinea.models[modeloLinea.name] = modeloLinea
nsLinea.models[modeloLineaSinN.name] = modeloLineaSinN
nsLinea.models[modeloBusqueda.name] = modeloBusqueda
nuevaLineaParser = reqparse.RequestParser(bundle_errors=True)
nuevaLineaParser.add_argument('numero', type=str, required=True)
nuevaLineaParser.add_argument('estado', type=str, required=True)
##PEDRO LOOK AT THIS
##PEDRO LOOK AT THIS
nuevaLineaParser.add_argument('activa', type=bool, required=False)
editarLineaParser = nuevaLineaParser.copy()
editarLineaParser.add_argument('id', type=int, required=True)
buscarLineasParser = reqparse.RequestParser(bundle_errors=True)
buscarLineasParser.add_argument('desde', type=str, required=True)
buscarLineasParser.add_argument('hasta', type=str, required=True)
@nsLinea.route('/')
class LineasResource(Resource):
@nsLinea.marshal_list_with(modeloLinea)
def get(self):
return repo.get_all()
@nsLinea.expect(modeloLineaSinN)
@nsLinea.marshal_with(modeloLinea)
def post(self):
data = nuevaLineaParser.parse_args()
##PEDRO LOOK AT THIS
if(data.estado =="Activada"):
data.activa = True
else:
data.activa = False
f = repo.agregar(data)
if f:
return f, 201
abort(500)
@nsLinea.route('/<int:id>')
class LineasResource(Resource):
@nsLinea.marshal_with(modeloLinea)
def get(self, id):
f = repo.get_by_numero(id)
if f:
return f, 200
abort(404)
@nsLinea.expect(modeloLinea)
def put(self, numero):
data = editarLineaParser.parse_args()
if repo.modificar(numero, data):
return 'Linea modificada', 200
abort(404)
@nsLinea.route('/baja/<int:id>')
class LineasResource(Resource):
def put(self, id):
if repo.baja(id):
# doy de baja en lineaEquipoPlan
repoLep.baja_by_linea(id)
# busco para darle de baja al equipo
# y tener tmb el id pa la tabla cliente_lep
lineaeqplan = repoLep.buscar_by_linea(id)
#doy de baja el equipo
repoEquipo.baja(lineaeqplan.equipo_id)
#doy de baja en tabla cliente_lep
repoLepCliente.bajalep(lineaeqplan.id)
return 'Linea dada de baja', 200
abort(400)
|
8,772 | e4bc2e97b70e2dc91dc86457866ec6b3531ef803 | from pyspark.sql import SQLContext, Row
from pyspark import SparkContext, SparkConf
from pyspark.sql.functions import col
import collections
# Create a Spark Session (the config bit is only for windows)
#conf = SparkConf().setAppName("SQL App").setMaster("local")
sc = SparkContext()
sqlCtx = SQLContext(sc)
def mapper(line):
fields = line.split(",")
return Row(ID = int(fields[0]), name = fields[1].encode("utf-8"), age = int(fields[2]), numFriends = int(fields[3]))
lines = sc.textFile("fakefriends.csv")
people = lines.map(mapper)
# Infer the schema and register the DataFrame as a table
schemaPeople = sqlCtx.createDataFrame(people).cache()
schemaPeople.registerTempTable("people")
# SQL can be run over DataFrames that have been registered as a table
teenagers = sqlCtx.sql("SELECT * FROM people WHERE age >= 13 AND age <= 19")
print(teenagers.dtypes)
for teen in teenagers.collect():
print(teen)
schemaPeople.groupBy("age").count().orderBy(col("age").desc()).show()
|
8,773 | 933f74e4fda0b30bdf70ff3f3dbde2383b10c694 | # -*- coding:utf-8 -*-
'''
Created on 2018/2/23
@author : xxfore
'''
import time
import sys
import re
sys.dont_write_bytecode = True
class TimeUtils(object):
@staticmethod
def convert_timestamp_to_date(timestamp):
time_local = time.localtime(timestamp)
dt = time.strftime("%Y-%m-%d %H:%M:%S",time_local)
return dt
class StringUtils(object):
@staticmethod
def remove_emoji_from_string(text):
co = re.compile(u'[\U00010000-\U0010ffff]')
return co.sub(u'',text) |
8,774 | ca0616694b30f69263db48282bf8b8c130de0fbb | /home/co/Documents/ImageClassifier/tensorflow/tensorflow/contrib/tfprof/__init__.py |
8,775 | 9f34f94422f4847859e9111f34ade2e1274cb543 | """Visit module to add odoo checks
"""
import os
import re
import astroid
import isort
from pylint.checkers import utils
from six import string_types
from .. import misc, settings
ODOO_MSGS = {
# C->convention R->refactor W->warning E->error F->fatal
# Visit odoo module with settings.BASE_OMODULE_ID
'C%d02' % settings.BASE_OMODULE_ID: (
'Missing ./README.rst file. Template here: %s',
'missing-readme',
settings.DESC_DFLT
),
'E%d01' % settings.BASE_OMODULE_ID: (
'%s %s',
'rst-syntax-error',
settings.DESC_DFLT
),
'E%d02' % settings.BASE_OMODULE_ID: (
'%s error: %s',
'xml-syntax-error',
settings.DESC_DFLT
),
'W%d01' % settings.BASE_OMODULE_ID: (
'%s Dangerous filter without explicit `user_id` in xml_id %s',
'dangerous-filter-wo-user',
settings.DESC_DFLT
),
'W%d02' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml record id "%s" in %s',
'duplicate-xml-record-id',
settings.DESC_DFLT
),
'W%d03' % settings.BASE_OMODULE_ID: (
'%s',
'javascript-lint',
settings.DESC_DFLT
),
'W%d04' % settings.BASE_OMODULE_ID: (
'%s Deprecated <openerp> xml node',
'deprecated-openerp-xml-node',
settings.DESC_DFLT
),
'W%d05' % settings.BASE_OMODULE_ID: (
'%s record res.users without '
'context="{\'no_reset_password\': True}"',
'create-user-wo-reset-password',
settings.DESC_DFLT
),
'W%d06' % settings.BASE_OMODULE_ID: (
'%s Duplicate id "%s"',
'duplicate-id-csv',
settings.DESC_DFLT
),
'W%d07' % settings.BASE_OMODULE_ID: (
'%s Duplicate xml field "%s" in lines %s',
'duplicate-xml-fields',
settings.DESC_DFLT
),
'W%d08' % settings.BASE_OMODULE_ID: (
'%s Missing newline',
'missing-newline-extrafiles',
settings.DESC_DFLT
),
'W%d09' % settings.BASE_OMODULE_ID: (
'%s Redundant name module reference in xml_ids "%s".',
'redundant-modulename-xml',
settings.DESC_DFLT
),
'W%d10' % settings.BASE_OMODULE_ID: (
'%s Use wrong tabs indentation instead of four spaces',
'wrong-tabs-instead-of-spaces',
settings.DESC_DFLT
),
'R%d80' % settings.BASE_OMODULE_ID: (
'Consider merging classes inherited to "%s" from %s.',
'consider-merging-classes-inherited',
settings.DESC_DFLT
),
'W%d50' % settings.BASE_OMODULE_ID: (
'Same Odoo module absolute import. You should use '
'relative import with "." '
'instead of "openerp.addons.%s"',
'odoo-addons-relative-import',
settings.DESC_DFLT
),
'W%d40' % settings.BASE_OMODULE_ID: (
'%s Dangerous use of "replace" from view '
'with priority %s < %s. '
'Increase priority or don\'t use "replace". '
'For more information see https://odoo-development.readthedocs.io/en/latest/dev/xml/inherit.html#collisions-and-priority ',
'dangerous-view-replace-wo-priority',
settings.DESC_DFLT
),
'W%d30' % settings.BASE_OMODULE_ID: (
'%s not used from manifest',
'file-not-used',
settings.DESC_DFLT
),
'W%d35' % settings.BASE_OMODULE_ID: (
'External dependency "%s" without ImportError. More info: '
'https://odoo-development.readthedocs.io/en/latest/dev/py/external-imports.html'
'#external-dependencies',
'missing-import-error',
settings.DESC_DFLT
),
'W%d36' % settings.BASE_OMODULE_ID: (
'Missing external dependency "%s" from manifest. More info: '
'https://github.com/OCA/odoo-community.org/blob/master/website/'
'Contribution/CONTRIBUTING.rst'
'#external-dependencies',
'missing-manifest-dependency',
settings.DESC_DFLT
),
'W%d38' % settings.BASE_OMODULE_ID: (
'pass into block except. '
'If you really need to use the pass consider logging that exception',
'except-pass',
settings.DESC_DFLT
),
'W%d37' % settings.BASE_OMODULE_ID: (
'%s The xml attribute is missing the translation="off" tag %s',
'xml-attribute-translatable',
settings.DESC_DFLT
),
'W%d42' % settings.BASE_OMODULE_ID: (
'%s Deprecated <tree> xml attribute "%s"',
'xml-deprecated-tree-attribute',
settings.DESC_DFLT
),
'W%d43' % settings.BASE_OMODULE_ID: (
'%s Deprecated QWeb directive "%s". Use "t-options" instead',
'xml-deprecated-qweb-directive',
settings.DESC_DFLT
),
'W%d39' % settings.BASE_OMODULE_ID: (
'%s Use <odoo> instead of <odoo><data> or use <odoo noupdate="1">'
'instead of <odoo><data noupdate="1">',
'deprecated-data-xml-node',
settings.DESC_DFLT
),
'W%d44' % settings.BASE_OMODULE_ID: (
'%s The resource in in src/href contains a not valid chararter',
'character-not-valid-in-resource-link',
settings.DESC_DFLT
),
}
DFTL_README_TMPL_URL = 'https://github.com/OCA/maintainer-tools' + \
'/blob/master/template/module/README.rst'
DFTL_README_FILES = ['README.rst', 'README.md', 'README.txt']
DFTL_MIN_PRIORITY = 99
# Files supported from manifest to convert
# Extracted from openerp/tools/convert.py:def convert_file
DFLT_EXTFILES_CONVERT = ['csv', 'sql', 'xml', 'yml']
DFLT_EXTFILES_TO_LINT = DFLT_EXTFILES_CONVERT + [
'po', 'js', 'mako', 'rst', 'md', 'markdown']
DFLT_IMPORT_NAME_WHITELIST = [
# self-odoo
'odoo', 'openerp',
# packages for unit tests only
'requests_mock',
# Known external packages of odoo
'PIL', 'anybox.testing.openerp', 'argparse', 'babel',
'dateutil', 'decorator', 'docutils', 'faces', 'feedparser',
'gdata', 'gevent', 'greenlet', 'jcconv', 'jinja2',
'ldap', 'lxml', 'mako', 'markupsafe', 'mock', 'odf',
'ofxparse', 'openid', 'passlib', 'pkg_resources',
'psutil', 'psycogreen', 'psycopg2', 'pyPdf', 'pychart',
'pydot', 'pyparsing', 'pytz', 'qrcode', 'reportlab',
'requests', 'serial', 'simplejson', 'six', 'suds',
'unittest2', 'usb', 'vatnumber', 'vobject', 'werkzeug',
'wsgiref', 'xlsxwriter', 'xlwt', 'yaml',
]
DFTL_JSLINTRC = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'examples', '.jslintrc'
)
DFLT_DEPRECATED_TREE_ATTRS = ['colors', 'fonts', 'string']
DFTL_MANIFEST_DATA_KEYS = ['data', 'demo', 'demo_xml', 'init_xml', 'test',
'update_xml']
class ModuleChecker(misc.WrapperModuleChecker):
name = settings.CFG_SECTION
msgs = ODOO_MSGS
options = (
('readme_template_url', {
'type': 'string',
'metavar': '<string>',
'default': DFTL_README_TMPL_URL,
'help': 'URL of README.rst template file',
}),
('extfiles_to_lint', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_TO_LINT,
'help': 'List of extension files to check separated by a comma.'
}),
('min-priority', {
'type': 'int',
'metavar': '<int>',
'default': DFTL_MIN_PRIORITY,
'help': 'Minimum priority number of a view with replace of fields.'
}),
('extfiles_convert', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_EXTFILES_CONVERT,
'help': 'List of extension files supported to convert '
'from manifest separated by a comma.'
}),
('import_name_whitelist', {
'type': 'csv',
'metavar': '<comma separated values>',
'default': DFLT_IMPORT_NAME_WHITELIST,
'help': 'List of known import dependencies of odoo,'
' separated by a comma.'
}),
('jslintrc', {
'type': 'string',
'metavar': '<path to file>',
'default': os.environ.get('PYLINT_ODOO_JSLINTRC') or DFTL_JSLINTRC,
'help': ('A path to a file that contains a configuration file of '
'javascript lint. You can use the environment variable '
'"PYLINT_ODOO_JSLINTRC" too. Default: %s' % DFTL_JSLINTRC)
}),
('deprecated_tree_attributes', {
'type': 'multiple_choice',
'metavar': '<attributes>',
'default': DFLT_DEPRECATED_TREE_ATTRS,
'choices': DFLT_DEPRECATED_TREE_ATTRS,
'help': 'List of deprecated list view attributes,'
' separated by a comma. Valid values: %s' % ', '.join(
DFLT_DEPRECATED_TREE_ATTRS)
}),
)
odoo_check_versions = {
'missing-import-error': {
'max_odoo_version': '11.0',
},
}
class_inherit_names = []
@utils.check_messages('consider-merging-classes-inherited')
def visit_assign(self, node):
if not self.odoo_node:
return
if not self.linter.is_message_enabled(
'consider-merging-classes-inherited', node.lineno):
return
node_left = node.targets[0]
if not isinstance(node_left, astroid.node_classes.AssignName) or \
node_left.name not in ('_inherit', '_name') or \
not isinstance(node.value, astroid.node_classes.Const) or \
not isinstance(node.parent, astroid.ClassDef):
return
if node_left.name == '_name':
node.parent.odoo_attribute_name = node.value.value
return
_name = getattr(node.parent, 'odoo_attribute_name', None)
_inherit = node.value.value
if _name and _name != _inherit:
# Skip _name='model.name' _inherit='other.model' because is valid
return
key = (self.odoo_node, _inherit)
node.file = self.linter.current_file
self.inh_dup.setdefault(key, []).append(node)
def _build_whitelist_module_patterns(self):
known_patterns = []
for known_pattern in self.config.import_name_whitelist:
pattern = known_pattern.replace('*', '.*').replace('?', '.?')
known_patterns.append(re.compile('^' + pattern + '$'))
return known_patterns
def open(self):
"""Define variables to use cache"""
self.inh_dup = {}
patterns = self._build_whitelist_module_patterns()
self._whitelist_module_patterns = patterns
super(ModuleChecker, self).open()
def close(self):
"""Final process get all cached values and add messages"""
for (odoo_node, class_dup_name), nodes in self.inh_dup.items():
if len(nodes) == 1:
continue
path_nodes = []
for node in nodes[1:]:
relpath = os.path.relpath(node.file,
os.path.dirname(odoo_node.file))
path_nodes.append("%s:%d" % (relpath, node.lineno))
self.add_message('consider-merging-classes-inherited',
node=nodes[0],
args=(class_dup_name, ', '.join(path_nodes)))
def _get_odoo_module_imported(self, node):
odoo_module = []
if isinstance(node, astroid.ImportFrom) and \
('openerp.addons' in node.modname or
'odoo.addons' in node.modname):
packages = node.modname.split('.')
if len(packages) >= 3:
# from openerp.addons.odoo_module import models
odoo_module.append(packages[2])
else:
# from openerp.addons import odoo_module
odoo_module.append(node.names[0][0])
elif isinstance(node, astroid.Import):
for name, _ in node.names:
if 'openerp.addons' not in name and 'odoo.addons' not in name:
continue
packages = name.split('.')
if len(packages) >= 3:
# import openerp.addons.odoo_module
odoo_module.append(packages[2])
return odoo_module
def check_odoo_relative_import(self, node):
if self.odoo_module_name in self._get_odoo_module_imported(node):
self.add_message('odoo-addons-relative-import', node=node,
args=(self.odoo_module_name))
@staticmethod
def _is_absolute_import(node, name):
modnode = node.root()
importedmodnode = ModuleChecker._get_imported_module(node, name)
if importedmodnode and importedmodnode.file and \
modnode is not importedmodnode and \
importedmodnode.name != name:
return True
return False
@staticmethod
def _get_imported_module(importnode, modname):
try:
return importnode.do_import_module(modname)
except:
pass
def _is_module_name_in_whitelist(self, module_name):
# Try to find most specific placement instruction match (if any)
# (from isort place_module() method)
parts = module_name.split('.')
module_names_to_check = [
'.'.join(parts[:first_k])
for first_k in range(len(parts), 0, -1)
]
# Check if one of the module name is part of the whitelist.
# For an module name such as 'anybox.testing.openerp', the
# modules names to check will be:
# ['anybox.testing.openerp', 'anybox.testing', 'anybox']
# Only one of them has to be in the whitelist to be accepted.
for module_name_to_check in module_names_to_check:
for pattern in self._whitelist_module_patterns:
if pattern.match(module_name_to_check):
return True
return False
def _check_imported_packages(self, node, module_name):
"""Check if the import node is a external dependency to validate it"""
if not module_name:
# skip local packages because is not a external dependency.
return
if not self.manifest_dict:
# skip if is not a module of odoo
return
if not isinstance(node.parent, astroid.Module):
# skip nested import sentences
return
if self._is_absolute_import(node, module_name):
# skip absolute imports
return
if self._is_module_name_in_whitelist(module_name):
# ignore whitelisted modules
return
isort_obj = isort.SortImports(file_contents='')
import_category = isort_obj.place_module(module_name)
if import_category not in ('FIRSTPARTY', 'THIRDPARTY'):
# skip if is not a external library or is a white list library
return
relpath = os.path.relpath(
node.parent.file, os.path.dirname(self.manifest_file))
if os.path.dirname(relpath) == 'tests':
# import errors rules don't apply to the test files
# since these files are loaded only when running tests
# and in such a case your
# module and their external dependencies are installed.
return
self.add_message('missing-import-error', node=node,
args=(module_name,))
ext_deps = self.manifest_dict.get('external_dependencies') or {}
py_ext_deps = ext_deps.get('python') or []
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
return
if module_name not in py_ext_deps and \
module_name.split('.')[0] not in py_ext_deps:
self.add_message('missing-manifest-dependency', node=node,
args=(module_name,))
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_importfrom(self, node):
self.check_odoo_relative_import(node)
if isinstance(node.scope(), astroid.Module):
package = node.modname
self._check_imported_packages(node, package)
@utils.check_messages('odoo-addons-relative-import',
'missing-import-error',
'missing-manifest-dependency')
def visit_import(self, node):
self.check_odoo_relative_import(node)
for name, _ in node.names:
if isinstance(node.scope(), astroid.Module):
self._check_imported_packages(node, name)
@utils.check_messages('except-pass')
def visit_tryexcept(self, node):
"""Visit block try except"""
for handler in node.handlers:
if (not handler.name and
len(handler.body) == 1 and
isinstance(handler.body[0], astroid.node_classes.Pass)):
self.add_message('except-pass', node=handler)
def _check_rst_syntax_error(self):
"""Check if rst file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
rst_files = self.filter_files_ext('rst')
self.msg_args = []
for rst_file in rst_files:
errors = self.check_rst_syntax(
os.path.join(self.module_path, rst_file))
for error in errors:
msg = error.full_message
res = re.search(
r'No directive entry for "([\w|\-]+)"|'
r'Unknown directive type "([\w|\-]+)"|'
r'No role entry for "([\w|\-]+)"|'
r'Unknown interpreted text role "([\w|\-]+)"', msg)
# TODO: Add support for sphinx directives after fix
# https://github.com/twolfson/restructuredtext-lint/issues/29
if res:
# Skip directive errors
continue
self.msg_args.append((
"%s:%d" % (rst_file, error.line or 0),
msg.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _check_missing_readme(self):
"""Check if exists ./README.{rst,md,txt} file
:return: If exists return True else False
"""
self.msg_args = (self.config.readme_template_url,)
for readme in DFTL_README_FILES:
if os.path.isfile(os.path.join(self.module_path, readme)):
return True
return False
def _check_xml_syntax_error(self):
"""Check if xml file there is syntax error
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
result = self.parse_xml(os.path.join(self.module_path, xml_file))
if isinstance(result, string_types):
self.msg_args.append((
xml_file, result.strip('\n').replace('\n', '|')))
if self.msg_args:
return False
return True
def _get_duplicate_xml_record_id(self, records):
"""Get duplicated records based on attribute id
:param records list: List of lxml.etree.Element "<record"
:return: Duplicated items.
e.g. {record.id: [record_node1, record_node2]}
:rtype: dict
"""
all_records = {}
for record in records:
record_id = "%s/%s_noupdate_%s" % (
record.attrib.get('section', ''),
record.attrib.get('id', ''),
record.getparent().attrib.get('noupdate', '0'),
)
all_records.setdefault(record_id, []).append(record)
# Remove all keys which not duplicated
records = {}
for key, items in all_records.items():
if not len(items) < 2:
records[key] = items
return records
def _check_duplicate_xml_record_id(self):
"""Check duplicated XML-IDs inside of the files of
each manifest-section treated them separately
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_records = []
for fname, section in self._get_manifest_referenced_files().items():
if os.path.splitext(fname)[1].lower() != '.xml':
continue
fname = os.path.join(self.module_path, fname)
for xml_record in self.get_xml_records(fname):
xml_record.attrib['section'] = section
xml_records.append(xml_record)
for name, fobjs in \
self._get_duplicate_xml_record_id(xml_records).items():
self.msg_args.append((
"%s:%d" % (os.path.relpath(fobjs[0].base, self.module_path),
fobjs[0].sourceline),
name,
', '.join([os.path.relpath(fobj.base, self.module_path) +
':' + str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_duplicate_id_csv(self):
"""Check duplicate xml id in ir.model.access.csv files of a odoo module.
:return: False if exists errors and
add list of errors in self.msg_args
"""
all_csv_ids = []
self.msg_args = []
for csv_file_rel in self.filter_files_ext('csv', relpath=True):
csv_file = os.path.join(self.module_path, csv_file_rel)
if os.path.basename(csv_file) == 'ir.model.access.csv':
all_csv_ids.extend(self.get_field_csv(csv_file))
duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)
for duplicated_id_csv in duplicated_ids_csv:
self.msg_args.append((csv_file_rel, duplicated_id_csv))
if duplicated_ids_csv:
return False
return True
def _check_redundant_modulename_xml(self):
"""Check redundant module name in xml file.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file_rel in self.filter_files_ext('xml', relpath=True):
xml_file = os.path.join(self.module_path, xml_file_rel)
for xml_id, lineno in self.get_xml_redundant_module_name(
xml_file, self.module):
self.msg_args.append(
("%s:%d" % (xml_file_rel, lineno), xml_id))
if self.msg_args:
return False
return True
def _check_character_not_valid_in_resource_link(self):
"""The resource in in src/href contains a not valid chararter"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml'):
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
for name, attr in (('link', 'href'), ('script', 'src')):
nodes = (doc.xpath('.//%s[@%s]' % (name, attr))
if not isinstance(doc, string_types) else [])
for node in nodes:
resource = node.get(attr, '')
ext = os.path.splitext(os.path.basename(resource))[1]
if (resource.startswith('/') and not
re.search('^[.][a-zA-Z]+$', ext)):
self.msg_args.append(("%s:%s" % (xml_file,
node.sourceline)))
if self.msg_args:
return False
return True
def _get_duplicate_xml_fields(self, fields):
"""Get duplicated xml fields based on attribute name
:param fields list: List of lxml.etree.Element "<field"
:return: Duplicated items.
e.g. {field.name: [field_node1, field_node2]}
:rtype: dict
"""
all_fields = {}
for field in fields:
field_xml = field.attrib.get('name')
if not field_xml:
continue
all_fields.setdefault(
(field_xml, field.attrib.get('context'),
field.attrib.get('filter_domain'),
field.getparent()), []).append(field)
# Remove all keys which not duplicated by excluding them from the
return dict(((name, context, filter_domain, parent_node), nodes) for
(name, context, filter_domain, parent_node), nodes in
all_fields.items() if len(nodes) >= 2)
def _check_duplicate_xml_fields(self):
"""Check duplicate field in all record of xml files of a odoo module.
Important note: this check does not work with inherited views.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file)):
if record.xpath('field[@name="inherit_id"]'):
continue
for xpath in ['field', 'field/*/field',
'field/*/field/tree/field',
'field/*/field/form/field']:
for name, fobjs in self._get_duplicate_xml_fields(
record.xpath(xpath)).items():
self.msg_args.append((
"%s:%d" % (xml_file, fobjs[0].sourceline), name[0],
', '.join([str(fobj.sourceline)
for fobj in fobjs[1:]]),
))
if self.msg_args:
return False
return True
def _check_dangerous_filter_wo_user(self):
"""Check dangerous filter without a user assigned.
:return: False if exists errors and
add list of errors in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
ir_filter_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.filters')
for ir_filter_record in ir_filter_records:
ir_filter_fields = ir_filter_record.xpath(
"field[@name='name' or @name='user_id']")
# if exists field="name" then is a new record
# then should be field="user_id" too
if ir_filter_fields and len(ir_filter_fields) == 1:
# TODO: Add a list of msg_args before of return
# TODO: Add source lineno in all xml checks
self.msg_args = (
"%s:%d" % (xml_file, ir_filter_record.sourceline),
ir_filter_record.get('id'),)
return False
return True
@staticmethod
def _get_priority(view):
try:
priority_node = view.xpath("field[@name='priority'][1]")[0]
return int(priority_node.get('eval', priority_node.text) or 0)
except (IndexError, ValueError):
# IndexError: If the field is not found
# ValueError: If the value found is not valid integer
pass
return 0
@staticmethod
def _is_replaced_field(view):
try:
arch = view.xpath("field[@name='arch' and @type='xml'][1]")[0]
except IndexError:
return None
replaces = \
arch.xpath(".//field[@name='name' and @position='replace'][1]") + \
arch.xpath(".//xpath[@position='replace'][1]")
return bool(replaces)
def _check_dangerous_view_replace_wo_priority(self):
"""Check dangerous view defined with low priority
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
views = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='ir.ui.view')
for view in views:
priority = self._get_priority(view)
is_replaced_field = self._is_replaced_field(view)
if is_replaced_field and priority < self.config.min_priority:
self.msg_args.append((
"%s:%s" % (xml_file, view.sourceline), priority,
self.config.min_priority))
if self.msg_args:
return False
return True
def _check_create_user_wo_reset_password(self):
"""Check xml records of user without the context
'context="{'no_reset_password': True}"'
This context avoid send email and mail log warning
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
xml_files = self.filter_files_ext('xml')
for xml_file in xml_files:
user_records = self.get_xml_records(
os.path.join(self.module_path, xml_file), model='res.users')
# if exists field="name" then is a new record
# then should be context
self.msg_args.extend([
("%s:%s" % (xml_file, user_record.sourceline))
for user_record in user_records
if user_record.xpath("field[@name='name']") and
'no_reset_password' not in (user_record.get('context') or '')])
if self.msg_args:
return False
return True
def _check_javascript_lint(self):
"""Check javascript lint
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for js_file_rel in self.filter_files_ext('js', relpath=True):
js_file = os.path.join(self.module_path, js_file_rel)
errors = self.check_js_lint(js_file, self.config.jslintrc)
for error in errors:
self.msg_args.append((js_file_rel + error,))
if self.msg_args:
return False
return True
def _check_deprecated_data_xml_node(self):
"""Check deprecated <data> xml node inside <odoo> xml node
:return: False if found <data> xml node inside <odoo> xml node"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
odoo_nodes = doc.xpath("/odoo") \
if not isinstance(doc, string_types) else []
children, data_node = ((odoo_nodes[0].getchildren(),
odoo_nodes[0].findall('data'))
if odoo_nodes else ([], []))
if len(children) == 1 and len(data_node) == 1:
lineno = odoo_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_deprecated_openerp_xml_node(self):
"""Check deprecated <openerp> xml node
:return: False if exists <openerp> node and
add list of xml files in self.msg_args
"""
xml_files = self.filter_files_ext('xml')
self.msg_args = []
for xml_file in xml_files:
doc = self.parse_xml(os.path.join(self.module_path, xml_file))
openerp_nodes = doc.xpath("/openerp") \
if not isinstance(doc, string_types) else []
if openerp_nodes:
lineno = openerp_nodes[0].sourceline
self.msg_args.append(("%s:%s" % (xml_file, lineno)))
if self.msg_args:
return False
return True
def _check_wrong_tabs_instead_of_spaces(self):
"""Check wrong tabs character instead of four spaces.
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
countline = 0
with open(ext_file, 'rb') as fp:
for line in fp:
countline += 1
line_space_trip = line.lstrip(b' ')
if line_space_trip != line_space_trip.lstrip(b'\t'):
self.msg_args.append(
("%s:%d" % (ext_file_rel, countline)))
if self.msg_args:
return False
return True
def _check_missing_newline_extrafiles(self):
"""Check missing newline in other ext files (.xml, .csv, .po)
:return: False if exists errors and
add list of errors in self.msg_args
"""
self.msg_args = []
for type_file in self.config.extfiles_to_lint:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
ext_file = os.path.join(self.module_path, ext_file_rel)
last_line = ''
# NOTE: SEEK_END just is supported with 'rb' mode for py3
with open(ext_file, 'rb') as fp:
if os.stat(ext_file).st_size > 1:
fp.seek(-2, os.SEEK_END)
last_line = fp.readline()
if not (last_line.endswith(b'\n') or
last_line.endswith(b'\r')):
self.msg_args.append((ext_file_rel,))
if self.msg_args:
return False
return True
def _get_manifest_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
referenced_files[fname] = data_type
return referenced_files
def _get_xml_referenced_files(self):
referenced_files = {}
for data_type in DFTL_MANIFEST_DATA_KEYS:
for fname in self.manifest_dict.get(data_type) or []:
if not fname.endswith('.xml'):
continue
referenced_files.update(
self._get_xml_referenced_files_report(fname, data_type)
)
return referenced_files
def _get_xml_referenced_files_report(self, fname, data_type):
return {
# those files are relative to the addon path
os.path.join(
*record.attrib[attribute].split(os.sep)[1:]
): data_type
for attribute in ['xml', 'xsl']
for record in self.parse_xml(
os.path.join(self.module_path, fname)
)
.xpath('//report[@%s]' % attribute)
}
def _get_module_files(self):
module_files = []
for type_file in self.config.extfiles_convert:
for ext_file_rel in self.filter_files_ext(type_file, relpath=True):
module_files.append(ext_file_rel)
return module_files
def _check_file_not_used(self):
"""Check if a file is not used from manifest"""
module_files = set(self._get_module_files())
referenced_files = set(self._get_manifest_referenced_files()).union(
set(self._get_xml_referenced_files())
)
excluded_dirs = ['static', 'test', 'tests', 'migrations']
no_referenced_files = [
f for f in (module_files - referenced_files)
if f.split(os.path.sep)[0] not in excluded_dirs
]
self.msg_args = no_referenced_files
return not no_referenced_files
def _check_xml_attribute_translatable(self):
"""The xml attribute is missing the translation="off" tag
Example <attribute name="groups">sale.group</attribute>
"""
if (self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions != ['8.0']):
return True
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file), None,
'//attribute[not(@name="string") and not(@translation)]'):
self.msg_args.append(
("%s:%d" % (xml_file, record.sourceline), 'xml_id'))
if self.msg_args:
return False
return True
def _check_xml_deprecated_tree_attribute(self):
"""The tree-view declaration is using a deprecated attribute.
Example <tree string="Partners"></tree>
"""
checks = [
{
'attr': 'colors',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@colors]',
},
{
'attr': 'fonts',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0', '8.0'},
'xpath': './/tree[@fonts]',
},
{
'attr': 'string',
'skip_versions': {'4.2', '5.0', '6.0', '6.1', '7.0'},
'xpath': './/tree[@string]',
},
]
valid_versions = set(
self.linter._all_options['valid_odoo_versions'].config
.valid_odoo_versions)
applicable_checks = [check for check in checks if (
check['attr'] in self.config.deprecated_tree_attributes and
bool(valid_versions - check['skip_versions']))]
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=True):
for record in self.get_xml_records(
os.path.join(self.module_path, xml_file),
model='ir.ui.view'):
for check in applicable_checks:
if record.xpath(check['xpath']):
self.msg_args.append((
'%s:%d' % (xml_file, record.sourceline),
check['attr']))
if self.msg_args:
return False
return True
def _check_xml_deprecated_qweb_directive(self):
"""Check for use of deprecated QWeb directives t-*-options.
:return: False if deprecated directives are found, in which case
self.msg_args will contain the error messages.
"""
valid_versions = set(self.linter._all_options[
'valid_odoo_versions'].config.valid_odoo_versions)
if not valid_versions & {'10.0', '11.0'}:
return True
deprecated_directives = {
't-esc-options',
't-field-options',
't-raw-options',
}
directive_attrs = '|'.join('@%s' % d for d in deprecated_directives)
xpath = '|'.join(
'/%s//template//*[%s]' % (tag, directive_attrs)
for tag in ('odoo', 'openerp')
)
self.msg_args = []
for xml_file in self.filter_files_ext('xml', relpath=False):
doc = self.parse_xml(xml_file)
if isinstance(doc, string_types):
continue
for node in doc.xpath(xpath):
# Find which directive was used exactly.
directive = next(
iter(set(node.attrib) & deprecated_directives))
self.msg_args.append((
'%s:%d' % (xml_file, node.sourceline), directive))
return not bool(self.msg_args)
|
8,776 | 05e57ed95427f0de74ea5b0589c5cd56e4a96f73 | # https://github.com/openai/gym/blob/master/gym/envs/__init__.py#L449
import gym
import numpy as np
from rl_main.conf.names import EnvironmentName, DeepLearningModelName
from rl_main.environments.environment import Environment
from rl_main.main_constants import DEEP_LEARNING_MODEL
class BreakoutDeterministic_v4(Environment):
def __init__(self):
self.env = gym.make(EnvironmentName.BREAKOUT_DETERMINISTIC_V4.value)
super(BreakoutDeterministic_v4, self).__init__()
self.action_shape = self.get_action_shape()
self.state_shape = self.get_state_shape()
self.cnn_input_height = self.state_shape[0]
self.cnn_input_width = self.state_shape[1]
self.cnn_input_channels = self.state_shape[2]
self.continuous = False
self.last_ball_lives = -1
self.skipping_state_fq = 3
self.skipping_state_index = 0
@staticmethod
def to_grayscale(img):
r, g, b = img[:, :, 0], img[:, :, 1], img[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
@staticmethod
def downsample(img):
return img[::2, ::2]
@staticmethod
def transform_reward(reward):
return np.sign(reward)
def preprocess(self, img):
gray_frame = self.to_grayscale(self.downsample(img))
if DEEP_LEARNING_MODEL == DeepLearningModelName.ActorCriticCNN:
state = np.expand_dims(gray_frame, axis=0)
elif DEEP_LEARNING_MODEL == DeepLearningModelName.ActorCriticMLP:
state = gray_frame.flatten()
else:
state = None
return state
def get_n_states(self):
if DEEP_LEARNING_MODEL == DeepLearningModelName.ActorCriticCNN:
return 1, 105, 80 # input_channels, input_height, input_width
elif DEEP_LEARNING_MODEL == DeepLearningModelName.ActorCriticMLP:
return 8400
else:
return None
def get_n_actions(self):
return self.env.action_space.n - 1
@property
def action_meanings(self):
action_meanings = self.env.get_action_meanings()
action_meanings.remove('FIRE')
return action_meanings
def get_state_shape(self):
state_shape = (int(self.env.observation_space.shape[0]/2), int(self.env.observation_space.shape[1]/2), 1)
return state_shape
def get_action_shape(self):
action_shape = self.env.action_space.n - 1
return action_shape,
def get_action_space(self):
return self.env.action_space
def reset(self):
self.env.reset()
next_state, reward, done, info = self.env.step(1)
self.last_ball_lives = info['ale.lives']
info["dead"] = False #if a ball fall down, dead is true
return self.preprocess(next_state)
def step(self, action):
if action == 1:
env_action = 2
elif action == 2:
env_action = 3
else:
env_action = 0
next_state, reward, done, info = self.env.step(env_action)
if self.last_ball_lives != info['ale.lives']:
env_action = 1
self.last_ball_lives = info['ale.lives']
next_state, reward, done, info = self.env.step(env_action)
info["dead"] = True
reward = -5.0
# info["skipping"] = True
# if self.skipping_state_index == self.skipping_state_fq:
# self.skipping_state_index = 0
# info["skipping"] = False
adjusted_reward = self.transform_reward(reward)
# self.skipping_state_index += 1
return self.preprocess(next_state), reward, adjusted_reward, done, info
def render(self):
self.env.render()
def close(self):
self.env.close()
|
8,777 | b54f47de85fe95d47a1b1be921997ad86d7b450d | # nomer7
import no2_modul2 # Atau apapun file-nya yang kamu buat tadi
class MhsTIF(no2_modul2.Mahasiswa): # perhatikan class induknya : Mahasiswa
"""Class MhsTIF yang dibangun dari class Mahasiswa"""
def kataKanPy(self):
print('Python is cool.')
"Apakah metode / state itu berasal dari class Manusia, Mahasiswa, atau MhsTIF?
"Jawab :
"Metoode atau state yang muncul berasal dari semua class baik Manusia, Mahasiswa, atau MhsTIF.
"Ini karena MhsTIF yang merupakan anak class dari Mahasiswa, dan itu membuat MhsTIF mewarisi
"semua properties dari Mahasiswa dan Manusia.
|
8,778 | e08820ff4fb35a3770fcb110ef7181aad1abbae5 | from django.conf.urls import url
from django.contrib import admin
from comments.api.views import CommentListAPIView, CommentDetailAPIView
urlpatterns = [
url(r'^$', CommentListAPIView.as_view(), name='list'),
url(r'^(?P<pk>\d+)/$', CommentDetailAPIView, name='detail'),
]
|
8,779 | 00312f57e8a78444937f46cecb62a2b684b4fc91 | a = int(input("Enter no. of over: "))
print("total ball:",a*6 )
import random
comp_runs = random.randint(0,36)
print("computer's run:" ,comp_runs)
comp_runs = comp_runs+1
print("runs need to win:",comp_runs)
chances_1 = a*6
no_of_chances_1 = 0
your_runs = 0
print("-----------------------------------------------\nYour Batting\n")
while no_of_chances_1 < chances_1:
runs = int(input("Enter Runs for Your Batting Turn: "))
comp_bowl = random.randint(1,6)
if runs == comp_bowl:
print("Computer Guess: ", comp_bowl)
print("You are Out. Your Total Runs= ", your_runs, "\n")
break
elif runs > 10:
print("ALERT!! Support No only till 10\n")
continue
else:
your_runs = your_runs + runs
print("Computer Guess: ", comp_bowl)
print("Your runs Now are: ", your_runs, "\n")
if comp_runs < your_runs:
break
no_of_chances_1 = no_of_chances_1 + 1
#after the over ends now result time
print("\n-----------------------------------------------\nRESULTS: ")
if comp_runs < your_runs:
print("You won the Game.")
elif comp_runs == your_runs:
print("The Game is a Tie")
else:
print("Computer won the Game.")
|
8,780 | 2d4187ab5d178efa4920110ccef61c608fdb14c0 | """
# System of national accounts (SNA)
This is an end-to-end example of national accounts sequence,
from output to net lending. It is based on Russian Federation data
for 2014-2018.
Below is a python session transcript with comments.
You can fork [a github repo](https://github.com/epogrebnyak/sna-ru)
to replicate calculations.
"""
"""
## Chart
A short mnemonic chart to accompaign the calculations:
```
[controlling for factor income and transfers]
| |
V V
X -> GDP -> GNI -> GNDI = C + S (+ net capital transfers)
| |
Ch + I + Cg + NX S = I + Net lending
|
W + t' + P Always a mystery:
| S - I = NX = Net lending
X - AX (See Open Economy identitites below)
```
"""
"""
## Preparations
"""
import pandas as pd
import handout
doc = handout.Handout("handout") # handout: exclude
"""
`eq` function will check identities considering some rounding error.
"""
def eq(df1, df2, precision=0.5) -> bool:
"""Compare two dataframes by element with precision margin."""
return ((df1 - df2).abs() < precision).all()
"""
Read dataset from file.
"""
df = pd.read_csv("data/sna.csv", index_col=0)
"""
## 1. Output at market prices
Output at market prices is output at basic prices
plus tax on products less subsidy on products.
"""
df["X"] = df.Xb + df.Tp - df.Sp
"""
## 2. Production of goods and services account
Output and import are resources,
consumption, investment (I) and export are uses.
Consumption is intermediate (AX) and final (C).
"""
resources = df.X + df.IM
uses = df.AX + df.C + df.I + df.EX
doc.add_image("res_use.png", "png", width=1) # handout: exclude
doc.show() # handout: exclude
"""
Resources and uses are equal, controlling for
[statistical discrepancy](https://www.stat.fi/meta/kas/tilastollinen_e_en.html).
"""
assert eq(resources, uses + df.desc)
"""
## 3. Gross domestic product (GDP)
There are three ways to calculate a GDP.
With some luck they yield to similar values.
"""
gdp1 = df.X - df.AX
gdp2 = (df.C + df.I - df.IM) + df.EX + df.desc
gdp3 = df.W + df.Tf - df.Sf + df.GP
assert eq(gdp1, gdp2)
assert eq(gdp2, df.GDP)
assert eq(gdp3, df.GDP)
"""```
>> gdp1.divide(10**6).round(1)
2014 79.1
2015 83.1
2016 86.0
2017 92.1
2018 103.9
```"""
"""
## 4. Controlling for income and current transfers from abroad
Gross national income (GNI) is GDP and
net property and labor ("factor") income
form rest of the world (ROW).
"""
gni = (
df.GDP
+ df.ROW_property_income_recieved
- df.ROW_property_income_paid
+ df.ROW_wage_net
)
assert eq(gni.iloc[1:,], df.GNI.iloc[1:,])
"""
Gross national disposable income (GNDI)
is GNI and net current transfers from abroad
"""
gndi = gni + df.CT_recieved - df.CT_paid
assert eq(gndi, df.GNDI)
"""
## 5. Savings
Savings is gross domestic income
less household and government consumption.
"""
S = gndi - (df.HH + df.G)
assert eq(df.C, df.HH + df.G)
assert eq(S, df.S)
"""
Investment is gross fixed capital formation
and change in inventories.
"""
I = df.GFCF + df.inv
assert eq(I, df.I)
"""
## 6. Net lending
Net lending is S-I, and a balance of capital transfers
and a non-produced non-material asset aquisition (K.2).
"""
NL = S + df.d9_recieved - df.d9_paid - I - df.k2
assert eq(NL, df.NL0)
"""
Net lending is an entry value into financial account (flow of funds).
Is usually contains a statistical error, later netted in flow of funds.
"""
"""
## Links
- [SNA 2008 manual](https://unstats.un.org/unsd/nationalaccount/docs/SNA2008.pdf)
- [Russian national accounts data](https://www.gks.ru/folder/210/document/13221)
- [Open economy identitites](https://github.com/hisamsabouni/macroLectures/blob/master/lecture_6.pdf)
"""
doc.show() # handout: exclude
|
8,781 | d1ce6c081dce2e4bdb6087cd61d7f857dbb1348d | a=float.input('Valor da conta')
print('Valor da conta com 10%: R$',(a))
|
8,782 | a92384a6abee9e231092ee0e4dbdb60bafcc9979 | import glob
import csv
import math
import pandas
# this is used to train the model, try different model, generate the csv file of the result
import pandas
import pandas as pd
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
import csv
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifierCV
import attr
# from pycm import *
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import cohen_kappa_score
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
import numpy as np
# evaluation_path = '/aul/homes/qli027/projects/RNN/evaluation/random/'
# # activity = ['work','go_back_home','baby_present','entertainment','smoke','alexa','others','print','check_body_condition']
# for i in range (0,9):
# with open(evaluation_path + str(i) +'.csv', 'w') as new:
# realnames = ['model','TP','FN','TN','FP']
# writer = csv.DictWriter(new, fieldnames = realnames)
# writer.writeheader()
# new.close()
def naiveBayes(X_train, y_train):
model = GaussianNB()
model = model.fit(X_train, y_train)
return (model)
def knn(X_train, y_train):
model = KNeighborsClassifier()
model = model.fit(X_train, y_train)
return (model)
def decisionTree(X_train, y_train):
model = tree.DecisionTreeClassifier(class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def svm_linear(X_train, y_train):
model = SVC(kernel='linear', class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def svm_2(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=2, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_3(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=3, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_4(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=4, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_5(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=5, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_6(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=6, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_7(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=7, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_8(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=8, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def logisticRegression(X_train, y_train):
model = LogisticRegression(class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def passiveAggressiveClassifier(X_train, y_train):
model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=1e-3, class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def svm_rbf(X_train, y_train):
model = SVC(kernel='rbf', class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def random_forest(X_train, y_train):
model = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0, class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def ridgeClassifierCV(X_train, y_train):
model = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1], class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def evaluation_result(y_test, y_pred, model):
cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
print(cnf_matrix)
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(int)
FN = FN.astype(int)
TP = TP.astype(int)
TN = TN.astype(int)
print(TP, TN, FP, FN)
evaluation_path = 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/'
for i in range(0, 13):
with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])
csvfile.close()
#
data = pd.read_csv("C:/penv/unsw/csvfiles/labeled/count/useractivity/new.csv")
data = data.dropna()
feature_cols = ['Size', 'Amazon Echo', 'Belkin Motion',
'Belkin Switch', 'Blipcare BloodPressure Meter', 'HP Printer', 'Dropcam', 'Insteon Camera',
'LIFX Smart Bulb', 'NEST Smoke Alarm', 'Netatmo Welcome Camera', 'Netatmo Weather Station',
'PIX-STAR Photo-frame', 'Samsung SmartCam', 'Smart Things', 'TP-Link Day Night Cloud camera',
'TP-Link Smart plug', 'Triby Speaker', 'Withings Smart Baby Monitor', 'Withings Smart scale',
'Withings Aura smart sleep sensor', 'iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',
'Laptop', 'MacBook', 'Android Phone 2', 'iPhone', 'MacBook/iPhone']
# feature_cols = [ 'Amazon Echo', 'Belkin Motion',
# 'Belkin Switch','Blipcare BloodPressure Meter','HP Printer','Dropcam','Insteon Camera',
# 'LIFX Smart Bulb', 'NEST Smoke Alarm','Netatmo Welcome Camera', 'Netatmo Weather Station',
# 'PIX-STAR Photo-frame','Samsung SmartCam','Smart Things', 'TP-Link Day Night Cloud camera',
# 'TP-Link Smart plug','Triby Speaker','Withings Smart Baby Monitor','Withings Smart scale',
# 'Withings Aura smart sleep sensor','iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',
# 'Laptop', 'MacBook', 'Android Phone 2','iPhone','MacBookiPhone']
# feature_cols = ['Size']
X = data[feature_cols]
scaler = StandardScaler()
X = scaler.fit_transform(X) # Features
y = data['User Activity'] # Target variable
# instantiate the model (using the default parameters)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# d = [decisionTree, logisticRegression,knn, svm_linear, svm_2,svm_3,svm_rbf,ridgeClassifierCV,naiveBayes,cnn_3layers,random_forest]
model = decisionTree(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'decisionTree')
model = logisticRegression(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'logisticRegression')
model = knn(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'knn')
model = svm_linear(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_linear')
model = svm_2(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_2')
model = svm_3(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_3')
model = svm_rbf(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_rbf')
model = naiveBayes(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'naiveBayes')
model = random_forest(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'random_forest')
model = ridgeClassifierCV(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'ridgeClassifierCV')
model = passiveAggressiveClassifier(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'passiveAggressiveClassifier')
|
8,783 | 2cbdb828ab6e0ad44154f0c5b2a1d807fd0d2520 | from redis_db import RedisClient
from setting import TEST_URL
import requests
class Test_Proxy():
def __init__(self):
self.db=RedisClient()
def proxy_test(self, proxy):
url = TEST_URL
proxies={
"http":proxy,
"https":proxy
}
# print("{}(测试中)".format(proxy))
try:
r = requests.get(url, proxies=proxies, timeout=5)
if r.status_code ==200:
# print("{}(可用)".format(proxy))
self.db.max(proxy)
except requests.exceptions.ConnectionError:
self.db.decrease(proxy)
# print("{}(减一)".format(proxy))
|
8,784 | 2b746d89d34435eb5f3a5b04da61c5cc88178852 | __author__ = 'simsun'
|
8,785 | afb09f9d5860994f38e8553b19e7ebc339cc2df6 | """
These are data input download and prep scripts. They download and massage the data for the UBM calculations (calc.py)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import time
import urllib
try:
# For Python 3.0 and later
import urllib.request
except ImportError:
# Fall back to Python 2's urllib2
import urllib2
import re
import glob
import os
import arcpy
from arcpy.sa import *
def get_modis(tiles, save_path, months='', years=''):
"""The following script automatically retrieves monthly MODIS16 hdf file from the ntsg website.
:param tiles: Tile number in format h##v##; based on grid from https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
:param save_path: name of output file name
:param months: months of interest; defaults to [1,12]
:param years: years of interest; defaults to [2000,2015]
:return: saves files in outpath
"""
from bs4 import BeautifulSoup
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
mons = [str(i).zfill(2) for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for tile in tiles:
for yr in yrs:
for m in mons:
base_url = "http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16A2_MONTHLY.MERRA_GMAO_1kmALB/"
dir_path = "Y{:}/M{:}/".format(yr, m)
url = base_url + dir_path
soup = BeautifulSoup(urllib2.urlopen(url), "lxml")
hdf_name = soup.find_all('', {
'href': re.compile('MOD16A2.A{:}M{:}.{:}.105'.format(yr, m, tile), re.IGNORECASE)})
files = urllib.urlretrieve(url + hdf_name[0].text, save_path + hdf_name[0].text)
print(save_path + hdf_name[0].text)
time.sleep(0.5)
def get_file_list(save_path, wld='*.105*.hdf'):
"""
Args:
save_path: path to folder where raw MODIS files are
wld: common wildcard in all of the raw MODIS files
Returns:
list of files to analyze in the raw folder
"""
return glob.glob(os.path.join(save_path, wld))
def reproject_modis(files, save_path, data_type, eight_day=True, proj=102003):
"""Iterates through MODIS files in a folder reprojecting them.
Takes the crazy MODIS sinusoidal projection to a user defined projection.
Args:
files: list of file paths of MODIS hdf files; created using files = glob.glob(os.path.join(save_path, '*.105*.hdf'))
save_path: folder to store the reprojected files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
eight_day: time span of modis file; Bool where default is true (input 8-day rasters)
proj: projection of output data by epsg number; default is nad83 zone 12
Returns:
Reprojected MODIS files
..notes:
The EPSG code for NAD83 Zone 12 is 26912.
The EPSG code for Albers Equal Area is 102003
http://files.ntsg.umt.edu/data/NTSG_Products/MOD16/MOD16_global_evapotranspiration_description.pdf
https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
https://lpdaac.usgs.gov/dataset_discovery/modis/modis_products_table/mod16a2_v006<
https://search.earthdata.nasa.gov/search/granules?p=C1000000524-LPDAAC_ECS&m=36.87890625!-114.50390625!5!1!0!0%2C2&tl=1503517150!4!!&q=MOD16A2+V006&sb=-114.29296875%2C36.80859375%2C-109.96875%2C42.2578125
"""
import pymodis
# dictionary to designate a directory
datadir = {'ET': '/ET/', 'PET': '/PET/', 'LE': '/LE/', 'PLE': '/PLE/'}
# dictionary to select layer from hdf file that contains the datatype
matrdir = {'ET': [1, 0, 0, 0], 'LE': [0, 1, 0, 0], 'PET': [0, 0, 1, 0], 'PLE': [0, 0, 0, 1]}
# check for file folder and make it if it doesn't exist
if not os.path.exists(save_path + datadir[data_type]):
os.makedirs(save_path + datadir[data_type])
print('created {:}'.format(save_path + datadir[data_type]))
for f in files:
year = f.split('\\')[1].split('.')[1][1:5]
v = f.split('\\')[1].split('.')[2][-2:] # parse v (cell coordinate) from hdf filename
h = f.split('\\')[1].split('.')[2][1:3] # parse h (cell coordinate) from hdf filename
# names file based on time span of input rasters; 8-day by default
if eight_day:
doy = f.split('\\')[1].split('.')[1][-3:] # parse day of year from hdf filename
fname = 'A' + year + 'D' + doy + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
else:
month = f.split('\\')[1].split('.')[1][-2:] # parse month from hdf filename
fname = 'A' + year + 'M' + month + 'h' + h + 'v' + v
pref = os.path.join(save_path + datadir[data_type] + fname)
convertsingle = pymodis.convertmodis_gdal.convertModisGDAL(hdfname=f, prefix=pref,
subset=matrdir[data_type],
res=1000, epsg=proj)
# [ET,LE,PET,PLE]
try:
convertsingle.run()
except:
print(fname + ' failed!')
pass
def clip_and_fix(path, outpath, data_type, area=''):
"""Clips raster to Utah's Watersheds and makes exception values null.
Args:
path: folder of the reprojected MODIS files
outpath: ESRI gdb to store the clipped files
data_type: type of MODIS16 data being reprojected; options are 'ET','PET','LE', and 'PLE'
area: path to polygon used to clip tiles
"""
# Check out the ArcGIS Spatial Analyst extension license
arcpy.CheckOutExtension("Spatial")
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
arcpy.env.mask = area
arcpy.CheckOutExtension("spatial")
for rast in arcpy.ListRasters():
calc = SetNull(arcpy.Raster(rast) > 32700, arcpy.Raster(rast))
calc.save(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
print(outpath + data_type + rast[1:5] + rast[6:8] + 'h' + rast[10:11] + 'v' + rast[13:14])
def merge_rasts(path, data_type='ET', monthRange='', yearRange='', outpath=''):
"""Mosaics (merges) different MODIS cells into one layer.
"""
if monthRange == '':
monthRange = [1, 12]
if yearRange == '':
yearRange = [2000, 2015]
if outpath == '':
outpath = path
arcpy.env.workspace = path
outCS = arcpy.SpatialReference('NAD 1983 UTM Zone 12N')
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
rlist = []
for rast in arcpy.ListRasters(nm + '*'):
rlist.append(rast)
try:
arcpy.MosaicToNewRaster_management(rlist, outpath, nm + 'c', outCS, \
"16_BIT_UNSIGNED", "1000", "1", "LAST", "LAST")
print(path + nm + 'c')
except:
print(nm + ' failed!')
pass
def scale_modis(path, out_path, scaleby=10000.0, data_type='ET', monthRange=[1, 12], yearRange=[2000, 2014]):
"""
:param path: directory to unconverted modis tiles
:param out_path: directory to put output in
:param scaleby: scaling factor for MODIS data; default converts to meters/month
:param data_type: type of MODIS16 data being scaled; used for file name; options are 'ET','PET','LE', and 'PLE'
:param monthRange: range of months to process data
:param yearRange: range of years to process data
:return:
"""
arcpy.CheckOutExtension("spatial")
for y in range(yearRange[0], yearRange[-1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[-1] + 1): # set months converted here
nm = data_type + str(y) + str(m).zfill(2)
calc = Divide(nm + 'c', scaleby)
calc.save(out_path + nm)
def untar(filepath, outfoldername='.', compression='r', deletesource=False):
"""
Given an input tar archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import tarfile
with tarfile.open(filepath, compression) as tfile:
filelist = tfile.getnames()
tfile.extractall(path=outfoldername)
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete tar archive {0}.".format(filepath))
return filelist
def ungz(filepath, compression='rb', deletesource=False):
"""
Given an input gz archive filepath, extracts the files.
Required: filepath -- the path to the tar archive
Optional: outfoldername -- the output directory for the files; DEFAULT is directory with tar archive
compression -- the type of compression used in the archive; DEFAULT is 'r'; use "r:gz" for gzipped archives
deletesource -- a boolean argument determining whether to remove the archive after extraction; DEFAULT is false
Output: filelist -- the list of all extract files
"""
import gzip
with gzip.open(filepath, compression) as f:
outF = open(filepath[:-3], 'wb')
outF.write(f.read())
f.close()
outF.close()
if deletesource:
try:
os.remove(filepath)
except:
raise Exception("Could not delete gz archive {0}.".format(filepath))
return filepath[:-3]
def replace_hdr_file(hdrfile):
"""
Replace the .hdr file for a .bil raster with the correct data for Arc processing
Required: hdrfile -- filepath for .hdr file to replace/create
Output: None
"""
# hdr file replacment string
HDRFILE_STRING = "byteorder M\nlayout bil\nnbands 1\nnbits 16\nncols 6935\nnrows 3351\n\
ulxmap -124.729583333331703\nulymap 52.871249516804028\nxdim 0.00833333333\nydim 0.00833333333\n"
with open(hdrfile, 'w') as o:
o.write(HDRFILE_STRING)
def get_snodas(out_dir, months='', years=''):
"""Downloads daily SNODAS data from ftp. This is slow.
:param out_dir: directory to store downloaded SNODAS zip files
:param months: months desired for download
:param years: years desired for download
:return: saved zip files in out_dir
.. note:
Use polaris: http://nsidc.org/data/polaris/
"""
import ftplib
if months == '':
months = [1, 12]
if years == '':
years = [2000, 2015]
monnames = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
mons = [str(i).zfill(2) + "_" + monnames[i - 1] for i in range(months[0], months[1] + 1)]
yrs = [str(i) for i in range(years[0], years[1] + 1)]
for yr in yrs:
for m in mons:
ftp_addr = "sidads.colorado.edu"
ftp = ftplib.FTP(ftp_addr)
ftp.login()
dir_path = "pub/DATASETS/NOAA/G02158/masked/" + yr + "/" + m + "/"
ftp.cwd(dir_path)
files = ftp.nlst()
for f in files:
if len(f) > 4:
save_file = open(out_dir + "/" + f, 'wb')
ftp.retrbinary("RETR " + f, save_file.write)
save_file.close()
print(f)
ftp.close()
def rename_polaris_snodas(path):
prodcode = {'us_ssmv11038wS__A': 'SPAT', 'us_ssmv11044bS__T': 'SNML', 'us_ssmv11050lL00T': 'SPSB',
'us_ssmv11034tS__T': 'SWEQ', 'us_ssmv01025SlL00': 'RAIN', 'us_ssmv01025SlL01': 'SNOW',
'us_ssmv11036tS__T': 'SNOD', 'us_ssmv11039lL00T': 'BSSB'}
for filename in os.listdir(path):
if filename.startswith("us_ssmv"):
code = prodcode[filename[0:17]]
yrsrt = filename.find('TNATS') + 5
yr = filename[yrsrt:yrsrt + 4]
mo = filename[yrsrt + 4:yrsrt + 6]
dy = filename[yrsrt + 6:yrsrt + 8]
try:
os.rename(os.path.join(path, filename), os.path.join(path, code + yr + mo + dy + filename[-4:]))
except:
pass
def snow_summary(code, scalingFactor, statistics="SUM", outcellsize='1000', monthRange='', yearRange='',
path="H:/GIS/SNODAS/SNWDS/", outpath="H:/GIS/SNODAS.gdb/", area=''):
"""
summarizes daily SNODAS data to monthly values
INPUT
-----
code = text; prefix of dataset to use; choices are 'RAIN','SWEQ','SNOD','SPAT','BSSB','SNML', or 'SPSB'
scalingFactor = float; table 1 at http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/
statistics = text; from arcpy sa CellStatistics; choices are MEAN, MAJORITY, MAXIMUM, MEDIAN, MINIMUM, MINORITY,
RANGE, STD, SUM, or VARIETY
monthRange = len 2 list; begin and end month of data you wish to analyze
yearRange = len 2 list; bengin and end year of data you wish to analyze
path = directory where raw geoTiffs are located
outpath = directory where final data will be stored
OUTPUT
------
projected and scaled monthly rasters
"""
if monthRange == '':
months = [1, 12]
if yearRange == '':
years = [2000, 2015]
g = {}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
if area == '':
area = 'H:/GIS/Calc.gdb/WBD_UT'
# arcpy.env.mask = area
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
for y in range(yearRange[0], yearRange[1] + 1): # set years converted here
for m in range(monthRange[0], monthRange[1] + 1): # set months converted here
g[code + str(y) + str(m).zfill(2)] = [] # this defines the dictionary key based on data type month and year
for name in sorted(
glob.glob(path + code + '*.tif')): # pick all tiff files from raw data folder of a data type
rast = os.path.basename(name)
if rast[0:4] == code and int(rast[4:8]) == y and int(rast[8:10]) == m:
g[code + str(y) + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + str(y) + str(m).zfill(2)]) > 0:
# print(g[code+str(y)+str(m).zfill(2)])
# ifnull = 'in_memory/ifnull'
# arcpy sa functions that summarize the daily data to monthly data
cellstats = CellStatistics(g[code + str(y) + str(m).zfill(2)], statistics_type=statistics,
ignore_nodata="DATA")
div = Divide(cellstats, scalingFactor) # scale factor, converts to kg/m2 10 then to m 0.001
calc = Con(div < 0.0, 0.0, div) # remove negative and null values
ifnull = Con(IsNull(calc), 0, calc) # remove null
# WKID 102039
outCS = arcpy.SpatialReference(102039) # change coordinate units to m for spatial analysis
# define save path for file
outnm = outpath + rast[0:4] + str(y).zfill(2) + str(m).zfill(2) + statstype[statistics]
memoryFeature = "in_memory/myMemoryFeature"
# memoryFeature = outnm
arcpy.ProjectRaster_management(ifnull, memoryFeature, outCS, 'BILINEAR', outcellsize,
'WGS_1984_(ITRF00)_To_NAD_1983', '#', '#')
# Execute ExtractByMask to clip snodas data to Utah watersheds
extrc = arcpy.sa.ExtractByMask(memoryFeature, area)
extrc.save(outnm)
print(outnm)
arcpy.Delete_management("in_memory")
def totalavg(code, statistics="MEAN", monthRange=[1, 12], yearRange=[2003, 2016],
path="H:/GIS/SNODAS/SNODASproj.gdb/", outpath="H:/GIS/SNODAS/SNODASproj.gdb/"):
"""Summarizes daily raster data into monthly data.
INPUT
-----
code = string with four letters represting data type to summarize (example 'BSSB')
statistics = how data will be summarized; defaults to monthly averages; options are
['MEAN','MAJORITY','MAXIMUM','MEDIAN','MINIMUM','MINORITY','RANGE','STD','SUM','VARIETY']
Most common are 'MEAN','MEDIAN', and 'SUM'
These are inputs that will be used in the ArcPy CellStatistics function.
See http://pro.arcgis.com/en/pro-app/tool-reference/spatial-analyst/cell-statistics.htm for documentation
monthRange = beginning and end months of summary statistics
yearRange = beginning and end years of summary statistics
path = location of geodatabase of data to summarize
outpath = location of geodatabase where output data should be stored
OUTPUT
------
summary raster(s) stored in outpath
"""
g = {}
statstype = {'MEAN': 'AVG', 'MAJORITY': 'MAJ', 'MAXIMUM': 'MAX', 'MEDIAN': 'MED', 'MINIMUM': 'MIN',
'MINORITY': 'MNR',
'RANGE': 'RNG', 'STD': 'STD', 'SUM': 'SUM', 'VARIETY': 'VAR'}
arcpy.env.workspace = path
arcpy.env.overwriteOutput = True
# iterate over month range set here; default is 1 to 12 (Jan to Dec)
for m in range(monthRange[0], monthRange[1] + 1):
# this defines the dictionary key based on data type, month, and year
g[code + '0000' + str(m).zfill(2)] = []
# pick all tiff files from raw data folder of a data type
for rast in arcpy.ListRasters():
yrrng = range(yearRange[0], yearRange[1] + 1) # set years converted here
# create a list of rasters with the right code and month and year
if rast[0:4] == code and int(rast[4:8]) in yrrng and int(rast[8:10]) == m:
g[code + '0000' + str(m).zfill(2)].append(rast) # create a list of rasters for each month
else:
pass
if len(g[code + '0000' + str(m).zfill(2)]) > 0:
# arcpy sa functions that summarize the daily data to monthly data
calc = CellStatistics(g[code + '0000' + str(m).zfill(2)], statistics_type=statistics, ignore_nodata="DATA")
calc.save(code + '0000' + str(m).zfill(2) + statstype[statistics])
print(code + '0000' + str(m).zfill(2) + statstype[statistics])
if __name__ == '__main__':
main()
|
8,786 | 4927a440093e822250af25dfd6a2ce62d7cc099e | input = open('input').read()
stacks_input, instructions = input.split('\n\n')
stacks_input_lines = stacks_input.split('\n')
stack_numbers = map(int, stacks_input_lines[-1].split())
stacks = []
for _ in stack_numbers:
stacks.append([])
for line in stacks_input_lines[:-1]:
for stack_index, i in enumerate(range(1, len(line), 4)):
crate = line[i]
if crate != ' ':
stacks[stack_index].insert(0, crate)
for instruction in instructions.strip().split('\n'):
_move, crate_count, _from, from_stack_index, _to, to_stack_index = instruction.split()
crate_count = int(crate_count)
from_stack_index = int(from_stack_index) - 1
to_stack_index = int(to_stack_index) - 1
crates = stacks[from_stack_index][-crate_count:]
stacks[from_stack_index] = stacks[from_stack_index][:-crate_count]
stacks[to_stack_index].extend(reversed(crates))
result = ''
for stack in stacks:
result += stack[-1]
print(result)
|
8,787 | c7ecf8ada74b3e401c2144457d4fa1050f598727 | from collections import deque
for case in xrange(input()):
cards = input()
indexes = map(int, raw_input().split())
deck = [0 for i in xrange(cards)]
index = -1
for i in xrange(1, cards + 1):
while True:
index = (index + 1)%cards
if deck[index] == 0:
break
for j in xrange(i - 1):
while True:
index = (index + 1)%cards
if deck[index] == 0:
break
deck[index] = i
#--------------------------------------------------
# for case in xrange(input()):
# k = input()
# indexes = map(int, raw_input().split())
#
# deck = deque()
# for card in xrange(k, 0, -1):
# deck.appendleft(card)
# print deck
# deck.rotate(card - 1)
# print deck
#--------------------------------------------------
print 'Case #%d: %s' % (case + 1, ' '.join(str(deck[i - 1])
for i in indexes[1:]))
|
8,788 | e1c902ef340a0a5538b41a03cc93686e0dd31672 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from prettytable import PrettyTable
from time import sleep
from customization import *
import urllib.request,json
chrome_options=webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--incognito")
chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])
chromeBrowser = webdriver.Chrome(chromePath, options=chrome_options)
def bio_shortener(bio):
lines=[]
x=len(bio)/30
y=0
Status=True
while Status:
y=y+1
lines.append(bio[0:30])
lines.append("\n")
bio=bio[30:]
if y==int(x)+1:
Status=False
A=''.join(lines)
return A
def nb_checker(nb):
if nb!='None':
return nb.text
else:
nb
def quick_search(username):
print("Collecting username information...")
insta_url="https://instagram.com/"+username+"/"
chromeBrowser.get(insta_url)
WebDriverWait(chromeBrowser,5).until(lambda d: d.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input'))
chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div/div[1]/div/label/input').send_keys(i_email)
chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div/div[2]/div/label/input').send_keys(i_password)
chromeBrowser.find_element_by_xpath('//*[@id="loginForm"]/div[1]/div[3]/button').click()
WebDriverWait(chromeBrowser,10).until(lambda d: d.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button'))
chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/div/div/div/button').click()
try:
instaName=chromeBrowser.find_element_by_class_name('rhpdm').text
except:
instaName="None"
try:
instaBio=chromeBrowser.find_element_by_xpath('/html/body/div[1]/section/main/div/header/section/div[2]/span').text
except:
instaBio="None"
try:
instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/div[2]/a[1]').text
except NameError:
instaPersonalSite=chromeBrowser.find_element_by_xpath('//*[@id="react-root"]/section/main/div/header/section/div[2]/a').text
except:
instaPersonalSite='None'
sleep(1)
chromeBrowser.get('https://stackoverflow.com/users/')
WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input'))
chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[1]/div[1]/input').send_keys(username)
sleep(1)
try:
Name=chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a')
if str(Name.text.lower())==username.lower():
placeholder=True
except:
placeholder=False
try:
sofLocation=chromeBrowser.find_element_by_class_name('user-location').text
except:
sofLocation='None'
try:
sofUser_tag = chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[3]').text
except:
sofUser_tag='None'
try:
chromeBrowser.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[3]/div[1]/div[1]/div[2]/a').click()
WebDriverWait(chromeBrowser, 10).until(lambda d: d.find_element_by_xpath('/html/body/div[4]/div[2]/div/div[2]/div[1]/div/div[2]/div/div[1]/div/div[2]'))
except:
placeholder=True
try:
sofBio=chromeBrowser.find_element_by_xpath('//*[@id="user-card"]/div/div[2]/div/div[1]/div/div[2]').text
except:
sofBio='None'
githubUrl = "https://api.github.com/users/" + username
try:
with urllib.request.urlopen(githubUrl) as url:
githubData = json.loads(url.read().decode())
gitName=str(githubData['name'])
gitCompany=str(githubData['company'])
gitBlog=str(githubData['blog'])
gitEmail=str(githubData['email'])
gitBio=str(githubData['bio'])
gitTwitter=str(githubData['twitter_username'])
gitLocation=str(githubData['location'])
except:
placeholder=True
pt = PrettyTable(
[' ', ' Instagram ', ' StackOverflow ', ' GitHub '])
pt.add_row(["Name", instaName,"X", gitName])
pt.add_row(["Email", "X","X",gitEmail])
pt.add_row(["Company","X","X", gitCompany])
pt.add_row(["Personal Site", instaPersonalSite,"X", gitBlog])
pt.add_row(["Location", "X", sofLocation, gitLocation])
pt.add_row(["Twitter", "X", "X", gitTwitter])
pt.add_row(["Tags", "X", sofUser_tag, "X"])
pt.add_row(["Biography", bio_shortener(instaBio), bio_shortener(sofBio), bio_shortener(gitBio)])
print(pt)
input()
|
8,789 | 315fed1806999fed7cf1366ef0772318a0baa84d | # settings
import config
# various modules
import sys
import time
import multiprocessing
import threading
from queue import Queue
import time
import os
import signal
import db
import time
from random import randint
# telepot's msg loop & Bot
from telepot.loop import MessageLoop
from telepot import Bot
import asyncio
from handle_msg import handle_msg, get_image
# bot object
bot = Bot(config.token)
mgr = multiprocessing.Manager()
shared_dict = mgr.dict()
def thread(fork_process,thread_queue,shared_dict):
thread = threading.currentThread()
post = db.DB(config.username,config.password,config.dbname,config.host,config.port)
post.connect()
print('fork process - %s, thread - %s' % (fork_process,thread.getName()))
while 1:
msg = thread_queue.get()
print('received msg from fork_process - {}, thread - {}, msg - {}'.format(fork_process,thread.getName(),msg,post))
if 'forward' in shared_dict:
if shared_dict['forward'] != msg['chat']['id']:
bot.sendMessage(shared_dict['forward'],'{}'.format(msg))
if 'scheduler' not in msg:
handle_msg(msg,bot,shared_dict,post)
else:
if str(msg['chat_id']) + 'n' not in shared_dict:
shared_dict[str(msg['chat_id']) + 'n'] = 0
get_image(msg['chat_id'],msg['keyword'],shared_dict,post,bot,False)
def worker(parent_process,fork_queue,shared_dict):
fork_process = multiprocessing.current_process()
thread_queue = Queue()
for i in range(config.threads_qt):
t = threading.Thread(target=thread,args=(fork_process.name,thread_queue,shared_dict))
t.setDaemon(True)
t.start()
try:
#print 'Starting:',fork_process.name,fork_process.pid
while 1:
data = fork_queue.get()
thread_queue.put(data)
except KeyboardInterrupt as e:
pass
def handle(msg):
fork_queue.put(msg)
fork_queue = multiprocessing.Queue()
parent_process = os.getpid()
for i in range(config.forks_qt):
p = multiprocessing.Process(target=worker,args=(parent_process,fork_queue,shared_dict))
p.daemon = True
p.start()
@asyncio.coroutine
def scheduler():
while True:
yield None
for i in config.pida_groups:
time.sleep(int(str(3) + str(randint(100,999))))
bot.sendMessage(i,'kuku pidarugi')
fork_queue.put({'scheduler':1,'chat_id':i,'keyword':'victoria secret'})
@asyncio.coroutine
def telepuzik():
MessageLoop(bot,handle).run_as_thread()
yield None
if __name__ == "__main__":
try:
tasks = asyncio.gather(asyncio.async(telepuzik()),asyncio.async(scheduler()))
loop = asyncio.get_event_loop()
loop.run_forever()
except KeyboardInterrupt as e:
print("keyboard interrupted")
|
8,790 | 68904be892968d4a1d82a59a31b95a8133a30832 | '''
* @Author: Mohammad Fatha.
* @Date: 2021-09-17 19:50
* @Last Modified by: Mohammad Fatha
* @Last Modified time: 2021-09-17 19:55
* @Title: Gambler Game
'''
import random
def gamblerProblem():
"""
Description:
This function Simulates a gambler who start with stake and place fair 1 bets until
he/she goes broke (i.e. has no money) or reach $goal. Keeps track of the number of
times he/she wins and the number of bets he/she makes. Run the experiment N
times, averages the results, print the results.
"""
stake=int(input("Enter The The Stake Amount:"))
goal=int(input("Enter The Amount You Want To Win:"))
bet_made=int(input("Enter The The Number Of Bets You Want To Make:"))
no_of_times_won=0
no_of_time_lost=0
no_of_bets_made=0
while(stake >= 0 and stake <= goal and no_of_bets_made < bet_made):
no_of_bets_made+=1
gambler_choice=random.randint(0, 1) #generates a random number 0 or 1
if gambler_choice==1: #if the random number generated is 0
no_of_times_won+=1
stake=stake+1
else:
no_of_time_lost+=1
stake=stake-1
percentage_win = (no_of_times_won/bet_made)*100
print("Number Of Times Won",no_of_times_won)
print("Percentage Of Win", percentage_win)
print("Percentage Of Loss", 100-percentage_win)
print("Number Of Bets Made", no_of_bets_made)
if __name__ == '__main__':
gamblerProblem() |
8,791 | 0f55b598058b65c9dbf9cd4761d1ff6fc7091b19 | __author__ = 'NikolaiEgorov'
def Lad(a1, a2, b1, b2):
if (a1 == b1) | (a2 == b2):
return 'YES'
else:
return 'NO'
a1 = int(input())
a2 = int(input())
b1 = int(input())
b2 = int(input())
print(Lad(a1,a2,b1,b2))
|
8,792 | 5493887e32dbe7ae27eca79d28da8488183b37a3 | import string
fhand = open("romeo-full.txt")
counts = dict()
for line in fhand:
line.tranc |
8,793 | 707c83bc83f606b570af973094574e6675cfc83f | # Copyright (c) 2011-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
r"""Equality Set Projection (ESP).
Non-vertex polytope projection method from
- https://web.archive.org/web/20150103142532/
https://www-control.eng.cam.ac.uk/~cnj22/research/projection.html
- https://infoscience.epfl.ch/record/169768
Very unstable, can not handle complex polytopes.
Reference
=========
\cite{Jones04}
"""
# Created by P. Nilsson, 8/2/11
import pickle
import numpy as np
from scipy import io as sio
from scipy import linalg
from polytope import solvers
class Ridge(object):
"""A ridge.
Attributes:
- `E_r`: Equality set of a facet
- `ar, br`: Affine hull of the facet
s.t. P_{E_0} = P intersection {x | ar x = br}.
"""
def __init__(self, E, a, b):
self.E_r = E
self.ar = a
self.br = b
class Ridge_Facet(object):
"""A ridge facet.
Attributes:
- `E_r`: Equality set of a ridge
- `ar,br`: Affine hull of the ridge s.t.
P_{E_f} intersection {x | ar x = br}
defines the ridge, where E_f is the
equality set of the facet.
- `E_0`: Equality set of a facet
- `af,bf`: Affine hull of the facet.
"""
def __init__(self, E_r, ar, br, E_0, af, bf):
self.E_r = E_r
self.ar = ar
self.br = br
self.E_0 = E_0
self.af = af
self.bf = bf
def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):
"""Project polytope [C D] x <= b onto C coordinates.
Projects the polytope [C D] x <= b onto the
coordinates that correspond to C. The projection of the polytope
P = {[C D]x <= b} where C is M x D and D is M x K is
defined as proj(P) = {x in R^d | exist y in R^k s.t Cx + Dy < b}
"""
if 'glpk' not in solvers.installed_solvers:
raise Exception(
"projection_esp error:"
" Equality set projection requires `cvxopt.glpk` to run.")
# Remove zero columns and rows
nonzerorows = np.nonzero(
np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]
nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]
nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]
C = CC[nonzerorows, :].copy()
D = DD[nonzerorows, :].copy()
C = C[:, nonzeroxcols]
D = D[:, nonzeroycols]
b = bb[nonzerorows].copy()
# Make sure origo is inside polytope
if not centered:
xc0, yc0, trans = cheby_center(C, D, b)
if trans:
b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()
else:
b = b
else:
trans = False
d = C.shape[1]
k = D.shape[1]
if verbose > 0:
print("Projecting from dim " + str(d + k) + " to " + str(d))
if k == 0:
# Not projecting
return C, bb, []
if d == 1:
# Projection to 1D
c = np.zeros(d + k)
c[0] = 1
G = np.hstack([C, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
if sol['status'] != "optimal":
raise Exception(
"esp: projection to 1D is not full-dimensional, "
"LP returned status " + str(sol['status']))
min_sol = np.array(sol['x']).flatten()
min_dual_sol = np.array(sol['z']).flatten()
sol = solvers.lpsolve(-c, G, b, solver='glpk')
if sol['status'] != "optimal":
raise Exception(
"esp: projection to 1D is not full-dimensional, " +
"LP returned status " + str(sol['status']))
max_sol = np.array(sol['x']).flatten()
max_dual_sol = np.array(sol['z']).flatten()
# min, max
x_min = min_sol[0]
x_max = max_sol[0]
y_min = min_sol[range(1, k + 1)]
y_max = max_sol[range(1, k + 1)]
if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):
# Min case, relax constraint a little to avoid infeasibility
E_min = unique_equalityset(
C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)
else:
E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]
if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):
# Max case, relax constraint a little to avoid infeasibility
E_max = unique_equalityset(
C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)
else:
E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]
G = np.array([[1.], [-1.]])
g = np.array([x_max, -x_min])
# Relocate
if trans:
g = g + np.dot(G, xc0)
# Return zero cols/rows
E_max = nonzerorows[E_max]
E_min = nonzerorows[E_min]
if verbose > 0:
print(
"Returning projection from dim " +
str(d + k) + " to dim 1 \n")
return G, g, [E_max, E_min]
E = []
L = []
E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)
ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)
for i in range(len(ridge_list)):
r = ridge_list[i]
L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))
G = af.T
g = bf
if verbose > 0:
print("\nStarting eq set " + str(E_0) + "\nStarting ridges ")
for rr in L:
print(str(rr.E_r))
E.append(E_0)
while len(L) > 0:
rid_fac1 = L[0]
if verbose > 0:
print("\nLooking for neighbors to " + str(rid_fac1.E_0) +
" and " + str(rid_fac1.E_r) + " ..")
E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)
if verbose > 0:
print("found neighbor " + str(E_adj) +
". \n\nLooking for ridges of neighbor..")
ridge_list = ridge(
C, D, b, E_adj, a_adj, b_adj,
abs_tol=abs_tol, verbose=verbose)
if verbose > 0:
print("found " + str(len(ridge_list)) + " ridges\n")
found_org = False
for i in range(len(ridge_list)):
r = ridge_list[i]
E_r = r.E_r
ar = r.ar
br = r.br
found = False
for j in range(len(L)):
rid_fac2 = L[j]
A_r = rid_fac2.E_r
if len(A_r) != len(E_r):
continue
t1 = np.sort(np.array(A_r))
t2 = np.sort(np.array(E_r))
if np.sum(np.abs(t1 - t2)) < abs_tol:
found = True
break
if found:
if verbose > 0:
print("Ridge " + str(E_r) +
" already visited, removing from L..")
if rid_fac2 == rid_fac1:
found_org = True
L.remove(rid_fac2)
else:
if verbose > 0:
print("Adding ridge-facet " + str(E_adj) +
" " + str(E_r) + "")
L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))
if not found_org:
print("Expected ridge " + str(rid_fac1.E_r))
print("but got ridges ")
for rid in ridge_list:
print(rid.E_r)
raise Exception(
"esp: ridge did not return neighboring ridge as expected")
G = np.vstack([G, a_adj])
g = np.hstack([g, b_adj])
E.append(E_adj)
# Restore center
if trans:
g = g + np.dot(G, xc0)
# Return zero rows
for Ef in E:
Ef = nonzerorows[Ef]
return G, g, E
def shoot(C, D, b, maxiter=1000, abs_tol=1e-7):
"""Return random equality set of P that projects on a projection facet.
Returns randomly selected equality set E_0 of P such
that the projection of the equality set is a facet of the projection.
@param C: Matrix defining the polytope Cx+Dy <= b
@param D: Matrix defining the polytope Cx+Dy <= b
@param b: Vector defining the polytope Cx+Dy <= b
@return: `E_0,af,bf`: Equality set and affine hull
"""
d = C.shape[1]
k = D.shape[1]
iter = 0
while True:
if iter > maxiter:
raise Exception(
"shoot: could not find starting equality set")
gamma = np.random.rand(d) - 0.5
c = np.zeros(k + 1)
c[0] = -1
G = np.hstack([np.array([np.dot(C, gamma)]).T, D])
sol = solvers.lpsolve(c, G, b, solver='glpk')
opt_sol = np.array(sol['x']).flatten()
opt_dual = np.array(sol['z']).flatten()
r_opt = opt_sol[0]
y_opt = np.array(opt_sol[range(1, len(opt_sol))]).flatten()
x_opt = r_opt * gamma
E_0 = np.nonzero(
np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol)[0]
DE0 = D[E_0, :]
CE0 = C[E_0, :]
b0 = b[E_0]
if rank(np.dot(null_space(DE0.T).T, CE0)) == 1:
break
iter += 1
af, bf = proj_aff(CE0, DE0, b0, abs_tol=abs_tol)
if is_dual_degenerate(c, G, b, None, None, opt_sol,
opt_dual, abs_tol=abs_tol):
E_0 = unique_equalityset(C, D, b, af, bf, abs_tol=abs_tol)
af, bf = proj_aff(C[E_0, :], D[E_0, :], b[E_0])
if len(bf) > 1:
raise Exception("shoot: wrong dimension of affine hull")
return E_0, af.flatten(), bf
def ridge(C, D, b, E, af, bf, abs_tol=1e-7, verbose=0):
"""Compute all ridges of a facet in the projection.
Input:
`C,D,b`: Original polytope data
`E,af,bf`: Equality set and affine hull of a facet in the projection
Output:
`ridge_list`: A list containing all the ridges of
the facet as Ridge objects
"""
d = C.shape[1]
k = D.shape[1]
Er_list = []
q = C.shape[0]
E_c = np.setdiff1d(range(q), E)
# E slices
C_E = C[E, :]
D_E = D[E, :]
b_E = b[E, :]
# E_c slices
C_Ec = C[E_c, :]
D_Ec = D[E_c, :]
b_Ec = b[E_c]
# dots
S = C_Ec - np.dot(np.dot(D_Ec, linalg.pinv(D_E)), C_E)
L = np.dot(D_Ec, null_space(D_E))
t = b_Ec - np.dot(D_Ec, np.dot(linalg.pinv(D_E), b_E))
if rank(np.hstack([C_E, D_E])) < k + 1:
if verbose > 1:
print("Doing recursive ESP call")
u, s, v = linalg.svd(np.array([af]), full_matrices=1)
sigma = s[0]
v = v.T * u[0, 0] # Correct sign
V_hat = v[:, [0]]
V_tilde = v[:, range(1, v.shape[1])]
Cnew = np.dot(S, V_tilde)
Dnew = L
bnew = t - np.dot(S, V_hat).flatten() * bf / sigma
Anew = np.hstack([Cnew, Dnew])
xc2, yc2, cen2 = cheby_center(Cnew, Dnew, bnew)
bnew = bnew - np.dot(Cnew, xc2).flatten() - np.dot(Dnew, yc2).flatten()
Gt, gt, E_t = esp(
Cnew, Dnew, bnew,
centered=True, abs_tol=abs_tol, verbose=0)
if (len(E_t[0]) == 0) or (len(E_t[1]) == 0):
raise Exception(
"ridge: recursive call did not return any equality sets")
for i in range(len(E_t)):
E_f = E_t[i]
er = np.sort(np.hstack([E, E_c[E_f]]))
ar = np.dot(Gt[i, :], V_tilde.T).flatten()
br0 = gt[i].flatten()
# Make orthogonal to facet
ar = ar - af * np.dot(af.flatten(), ar.flatten())
br = br0 - bf * np.dot(af.flatten(), ar.flatten())
# Normalize and make ridge equation point outwards
norm = np.sqrt(np.sum(ar * ar))
ar = ar * np.sign(br) / norm
br = br * np.sign(br) / norm
# Restore center
br = br + np.dot(Gt[i, :], xc2) / norm
if len(ar) > d:
raise Exception("ridge: wrong length of new ridge!")
Er_list.append(Ridge(er, ar, br))
else:
if verbose > 0:
print("Doing direct calculation of ridges")
X = np.arange(S.shape[0])
while len(X) > 0:
i = X[0]
X = np.setdiff1d(X, i)
if np.linalg.norm(S[i, :]) < abs_tol:
continue
Si = S[i, :]
Si = Si / np.linalg.norm(Si)
if np.linalg.norm(af - np.dot(Si, af) * Si) > abs_tol:
test1 = null_space(
np.vstack([
np.hstack([af, bf]),
np.hstack([S[i, :], t[i]])]),
nonempty=True)
test2 = np.hstack([S, np.array([t]).T])
test = np.dot(test1.T, test2.T)
test = np.sum(np.abs(test), 0)
Q_i = np.nonzero(test > abs_tol)[0]
Q = np.nonzero(test < abs_tol)[0]
X = np.setdiff1d(X, Q)
# Have Q_i
Sq = S[Q_i, :]
tq = t[Q_i]
c = np.zeros(d + 1)
c[0] = 1
Gup = np.hstack([-np.ones([Sq.shape[0], 1]), Sq])
Gdo = np.hstack([-1, np.zeros(Sq.shape[1])])
G = np.vstack([Gup, Gdo])
h = np.hstack([tq, 1])
Al = np.zeros([2, 1])
Ar = np.vstack([af, S[i, :]])
A = np.hstack([Al, Ar])
bb = np.hstack([bf, t[i]])
sol = solvers._solve_lp_using_cvxopt(
c, G, h, A=A, b=bb)
if sol['status'] == 'optimal':
tau = sol['x'][0]
if tau < -abs_tol:
ar = np.array([S[i, :]]).flatten()
br = t[i].flatten()
# Make orthogonal to facet
ar = ar - af * np.dot(af.flatten(), ar.flatten())
br = br - bf * np.dot(af.flatten(), ar.flatten())
# Normalize and make ridge equation point outwards
norm = np.sqrt(np.sum(ar * ar))
ar = ar / norm
br = br / norm
# accumulate
Er_list.append(
Ridge(np.sort(np.hstack([E, E_c[Q]])), ar, br))
return Er_list
def adjacent(C, D, b, rid_fac, abs_tol=1e-7):
"""Compute the (unique) adjacent facet.
@param rid_fac: A Ridge_Facet object containing the parameters for
a facet and one of its ridges.
@return: (E_adj,a_adj,b_adj): The equality set and parameters for
the adjacent facet such that::
P_{E_adj} = P intersection {x | a_adj x = b_adj}
"""
E = rid_fac.E_0
af = rid_fac.af
bf = rid_fac.bf
#
E_r = rid_fac.E_r
ar = rid_fac.ar
br = rid_fac.br
# shape
d = C.shape[1]
k = D.shape[1]
# E_r slices
C_er = C[E_r, :]
D_er = D[E_r, :]
b_er = b[E_r]
# stack
c = -np.hstack([ar, np.zeros(k)])
G = np.hstack([C_er, D_er])
h = b_er
A = np.hstack([af, np.zeros(k)])
sol = solvers._solve_lp_using_cvxopt(
c, G, h, A=A.T, b=bf * (1 - 0.01))
if sol['status'] != "optimal":
print(G)
print(h)
print(af)
print(bf)
print(ar)
print(br)
print(np.dot(af, ar))
data = {}
data["C"] = C
data["D"] = D
data["b"] = b
sio.savemat("matlabdata", data)
with open('polytope.p', 'wb') as f:
pickle.dump(data, f)
raise Exception(
"adjacent: Lp returned status " + str(sol['status']))
opt_sol = np.array(sol['x']).flatten()
dual_opt_sol = np.array(sol['z']).flatten()
x_opt = opt_sol[range(d)]
y_opt = opt_sol[range(d, d + k)]
if is_dual_degenerate(
c.flatten(), G, h, A, bf * (1 - 0.01),
opt_sol, dual_opt_sol, abs_tol=abs_tol):
# If degenerate, compute affine hull and take preimage
E_temp = np.nonzero(np.abs(np.dot(G, opt_sol) - h) < abs_tol)[0]
a_temp, b_temp = proj_aff(
C_er[E_temp, :], D_er[E_temp, :], b_er[E_temp],
expected_dim=1, abs_tol=abs_tol)
E_adj = unique_equalityset(C, D, b, a_temp, b_temp, abs_tol=abs_tol)
if len(E_adj) == 0:
data = {}
data["C"] = C
data["D"] = D
data["b"] = b
data["Er"] = E_r + 1
data["ar"] = ar
data["br"] = br
data["Ef"] = E + 1
data["af"] = af
data["bf"] = bf
sio.savemat("matlabdata", data)
raise Exception(
"adjacent: equality set computation returned empty set")
else:
r = np.abs(np.dot(C, x_opt) + np.dot(D, y_opt) - b) < abs_tol
E_adj = np.nonzero(r)[0]
C_eadj = C[E_adj, :]
D_eadj = D[E_adj, :]
b_eadj = b[E_adj]
af_adj, bf_adj = proj_aff(C_eadj, D_eadj, b_eadj, abs_tol=abs_tol)
return E_adj, af_adj, bf_adj
def proj_aff(Ce, De, be, expected_dim=None, abs_tol=1e-7):
"""Affine projection.
Compute the set aff = {x | Ce x + De y = be} on the form
aff = ({x | a x = b} intersection {Ce x + De y < be}).
Input: Polytope parameters Ce, De and be
Output: Constants a and b
"""
# Remove zero columns
ind = np.nonzero(np.sum(np.abs(De), axis=0) > abs_tol)[0]
D = De[:, ind]
if D.shape[1] == 0:
a = Ce
b = be
a_n, b_n = normalize(a, b)
if expected_dim is not None:
if expected_dim != b_n.size:
raise Exception(
"proj_aff: wrong dimension calculated in 1")
return a_n.flatten(), b_n
sh = np.shape(D.T)
m = sh[0]
n = sh[1]
nDe = null_space(D.T)
a = np.dot(nDe.T, Ce)
b = np.dot(nDe.T, be)
a_n, b_n = normalize(a, b)
if expected_dim is not None:
if expected_dim != b_n.size:
raise Exception("proj_aff: wrong dimension calculated in 2")
return a_n, b_n
def is_dual_degenerate(c, G, h, A, b, x_opt, z_opt, abs_tol=1e-7):
"""Return `True` if pair of dual problems is dual degenerate.
Checks if the pair of dual problems::
(P): min c'x (D): max h'z + b'y
s.t Gx <= h s.t G'z + A'y = c
Ax = b z <= 0
is dual degenerate, i.e. if (P) has several optimal solutions.
Optimal solutions x* and z* are required.
Input:
`G,h,A,b`: Parameters of (P)
`x_opt`: One optimal solution to (P)
`z_opt`: The optimal solution to (D) corresponding to
_inequality constraints_ in (P)
Output:
`dual`: Boolean indicating whether (P) has many optimal solutions.
"""
D = - G
d = - h.flatten()
mu = - z_opt.flatten() # mu >= 0
# Active constraints
I = np.nonzero(np.abs(np.dot(D, x_opt).flatten() - d) < abs_tol)[0]
# Positive elements in dual opt
J = np.nonzero(mu > abs_tol)[0]
# i, j
i = mu < abs_tol # Zero elements in dual opt
i = i.astype(int)
j = np.zeros(len(mu), dtype=int)
j[I] = 1 # 1 if active
# Indices where active constraints have 0 dual opt
L = np.nonzero(i + j == 2)[0]
# sizes
nI = len(I)
nJ = len(J)
nL = len(L)
# constraints
DI = D[I, :] # Active constraints
DJ = D[J, :] # Constraints with positive lagrange mult
DL = D[L, :] # Active constraints with zero dual opt
dual = 0
if A is None:
test = DI
else:
test = np.vstack([DI, A])
if rank(test) < np.amin(DI.shape):
return True
else:
if len(L) > 0:
if A is None:
Ae = DJ
else:
Ae = np.vstack([DJ, A])
be = np.zeros(Ae.shape[0])
Ai = - DL
bi = np.zeros(nL)
sol = solvers._solve_lp_using_cvxopt(
c= - np.sum(DL, axis=0), G=Ai,
h=bi, A=Ae, b=be)
if sol['status'] == "dual infeasible":
# Dual infeasible -> primal unbounded -> value>epsilon
return True
if sol['primal objective'] > abs_tol:
return True
return False
def unique_equalityset(C, D, b, af, bf, abs_tol=1e-7, verbose=0):
"""Return equality set E with the following property:
P_E = {x | af x = bf} intersection P
where P is the polytope C x + D y < b
The inequalities have to be satisfied with equality everywhere on
the face defined by af and bf.
"""
if D is not None:
A = np.hstack([C, D])
a = np.hstack([af, np.zeros(D.shape[1])])
else:
A = C
a = af
E = []
for i in range(A.shape[0]):
A_i = np.array(A[i, :])
b_i = b[i]
sol = solvers._solve_lp_using_cvxopt(
c=A_i, G=A, h=b,
A=a.T, b=bf)
if sol['status'] != "optimal":
raise Exception(
"unique_equalityset: LP returned status " +
str(sol['status']))
if np.abs(sol['primal objective'] - b_i) < abs_tol:
# Constraint is active everywhere
E.append(i)
if len(E) == 0:
raise Exception("unique_equalityset: empty E")
return np.array(E)
def unique_equalityset2(C, D, b, opt_sol, abs_tol=1e-7):
A = np.hstack([C, D])
E0 = np.nonzero(np.abs(np.dot(A, opt_sol) - b) < abs_tol)[0]
af, bf = proj_aff(C[E0, :], D[E0, :], b[E0], expected_dim=1)
# stack
ineq = np.hstack([af, np.zeros(D.shape[1])])
G = np.vstack([A, np.vstack([ineq, -ineq])])
h = np.hstack([b, np.hstack([bf, -bf])])
# shape
m = G.shape[0]
n = G.shape[1]
# ht
e = 1e-3
v = np.vstack([np.zeros([1, n]), np.eye(n)]).T
v = v - np.array([np.mean(v, axis=1)]).T
v = v * e
ht = h + np.amin(-np.dot(G, v), axis=1)
# stack
H1 = np.hstack([G, -np.eye(m)])
H2 = np.hstack([G, np.zeros([m, m])])
H3 = np.hstack([np.zeros([m, n]), -np.eye(m)])
H = np.vstack([H1, np.vstack([H2, H3])])
h = np.hstack([ht, np.hstack([h, np.zeros(m)])])
c = np.hstack([np.zeros(n), np.ones(m)])
sol = solvers.lpsolve(c, H, h, solver='glpk')
if not sol['status'] == "optimal":
raise Exception(
"unique_equalityset: LP returned status " +
str(sol['status']))
opt_sol2 = np.array(sol['x']).flatten()
x = opt_sol2[range(n)]
s = opt_sol2[range(n, len(opt_sol2))]
E = np.nonzero(s > abs_tol)[0]
print(E)
E = np.sort(E[np.nonzero(E < C.shape[0])])
# Check that they define the same projection
at, bt = proj_aff(C[E, :], D[E, :], b[E])
if bt.size != 1 or np.sum(np.abs(at - af)) + np.abs(bt - bf) > abs_tol:
raise Exception("unique_equalityset2: affine hulls not the same")
return E
def cheby_center(C, D, b):
"""Calculate Chebyshev center for the polytope `C x + D y <= b`.
Input:
`C, D, b`: Polytope parameters
Output:
`x_0, y_0`: The chebyshev centra
`boolean`: True if a point could be found, False otherwise.
"""
d = C.shape[1]
k = D.shape[1]
A = np.hstack([C, D])
dim = np.shape(A)[1]
c = - np.r_[np.zeros(dim), 1]
norm2 = np.sqrt(np.sum(A * A, axis=1))
G = np.c_[A, norm2]
sol = solvers.lpsolve(c, G, h=b, solver='glpk')
if sol['status'] == "optimal":
opt = np.array(sol['x'][0:-1]).flatten()
return opt[range(d)], opt[range(d, d + k)], True
else:
return np.zeros(d), np.zeros(k), False
def normalize(AA, bb, abs_tol=1e-7):
"""Normalize `A x = b` such that `A'A = 1` and `b > 0`.
Also, remove duplicate lines.
"""
if AA.size == 0:
return AA, bb
dim = AA.size / bb.size
A = AA.copy().reshape(bb.size, dim)
b = bb.copy().reshape(bb.size, 1)
# Remove zero lines
keepind = np.nonzero(
np.sum(np.abs(np.hstack([A, b])), axis=1) > abs_tol)[0]
A = A[keepind, :]
b = b[keepind]
# Normalize
anorm = np.sqrt(np.sum(A * A, axis=1))
for i in range(len(anorm)):
A[i, :] = A[i, :] * np.sign(b[i, 0]) / anorm[i]
b[i, 0] = np.sign(b[i, 0]) * b[i, 0] / anorm[i]
# Remove duplicate rows
keep_row = []
for i in range(len(anorm)):
unique = True
for j in range(i + 1, len(anorm)):
test = (np.sum(np.abs(A[i, :] - A[j, :])) +
np.abs(b[i, 0] - b[j, 0]))
if test < abs_tol:
unique = False
break
if unique:
keep_row.append(i)
A_n = A[keep_row, :]
b_n = b[keep_row, 0]
# Return flat A if only one row
if A_n.size == dim:
A_n = A_n.flatten()
return A_n, b_n.flatten()
def rank(A, eps=1e-15):
u, s, vh = linalg.svd(A)
m = A.shape[0]
n = A.shape[1]
tol = np.amax([m, n]) * np.amax(s) * eps
return np.sum(s > tol)
def null_space(A, eps=1e-15, nonempty=False):
"""Returns the null space N_A to matrix A such that A N_A = 0."""
u, s, v = linalg.svd(A, full_matrices=1)
m = A.shape[0]
n = A.shape[1]
tol = np.amax([m, n]) * np.amax(s) * eps
rank = np.sum(s > tol)
N_space = v[range(rank, n), :].T
if nonempty and (len(N_space) == 0):
N_space = v[range(np.amax(n - 1, 1), n), :]
return N_space
|
8,794 | 43b5936ca9368dcae8d41b44fd9dc927fe18c9bc | from django.db import transaction
from django.contrib.auth.models import Group
from drf_yasg import openapi
from drf_yasg.utils import swagger_auto_schema
from rest_framework import status, mixins
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from rest_access_policy import AccessViewSetMixin
from .models import CustomUsuario, PasswordResetToken
from .serializers import (
GroupSerializer,
CustomUsuarioSerializer,
CustomUsuarioMudarPasswordSerializer,
CustomUsuarioMudarPasswordAposResetSerializer,
CustomUsuarioMudarEmailSerializer,
CustomUsuarioMudarGrupoSerializer,
CustomUsuarioMudarAtivacaoSerializer,
PasswordResetTokenSerializer
)
from .views_access_policies import GroupAccessPolicy, CustomUsuarioAccessPolicy, PasswordResetTokenAccessPolicy
class CustomUsuarioViewSet(AccessViewSetMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
CustomUsuario ViewSet description:
create: Criar usuário.
retrieve: Consultar usuário.
list: Listar usuários.
ativar: Ativar usuário.
desativar: Desativar usuário.
mudar_password_apos_reset: Mudar a password do usuário após a solicitação de resetá-la. Consequentemente,
é desativado o token que permitiu a alteração.
mudar_password: Atualiza a password do usuário.
mudar_email: Atualiza o e-mail do usuário.
mudar_grupo: Atualiza o(s) grupo(s) do usuário.
"""
access_policy = CustomUsuarioAccessPolicy
serializer_class = CustomUsuarioSerializer
def get_queryset(self):
return CustomUsuario.objects.all().order_by('id')
def perform_create(self, serializer):
serializer.save(usuario_modificacao=self.request.user)
def perform_update(self, serializer):
serializer.save(usuario_modificacao=self.request.user)
@swagger_auto_schema(method='patch', manual_parameters=[openapi.Parameter('token',
openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True)])
@transaction.atomic
@action(detail=True, methods=['patch'], serializer_class=CustomUsuarioMudarPasswordAposResetSerializer)
def mudar_password_apos_reset(self, request, pk=None):
usuario = self.get_object()
try:
token = request.query_params['token']
except KeyError:
return Response({'status': 'Token não informado.'},
status=status.HTTP_400_BAD_REQUEST)
try:
token_instance = usuario.password_reset_tokens.get(token=token)
except PasswordResetToken.DoesNotExist:
return Response({'status': 'Token inválido.'},
status=status.HTTP_400_BAD_REQUEST)
serializer_token = PasswordResetTokenSerializer(token_instance,
data={'ativo': False},
partial=True)
if serializer_token.is_valid():
serializer_token.save()
else:
return Response(serializer_token.errors,
status=status.HTTP_400_BAD_REQUEST)
serializer_usuario = self.get_serializer(
usuario,
data=request.data,
partial=True
)
if serializer_usuario.is_valid():
serializer_usuario.save()
return Response({'status': 'A nova senha foi registrada.'},
status=status.HTTP_200_OK)
return Response(serializer_usuario.errors,
status=status.HTTP_400_BAD_REQUEST)
@action(detail=True, methods=['patch'], serializer_class=CustomUsuarioMudarPasswordSerializer)
def mudar_password(self, request, pk=None):
usuario = self.get_object()
serializer = self.get_serializer(usuario,
data=request.data,
partial=True)
if serializer.is_valid():
serializer.save()
return Response({'status': 'A nova senha foi registrada.'},
status=status.HTTP_200_OK)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@action(detail=True, methods=['patch'], serializer_class=CustomUsuarioMudarEmailSerializer)
def mudar_email(self, request, pk=None):
usuario = self.get_object()
if 'password' not in request.data:
return Response({'status': 'Para mudar o e-mail é necessário '
'informar a senha atual.'},
status=status.HTTP_400_BAD_REQUEST)
serializer = self.get_serializer(usuario, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response({'status': 'O e-mail foi alterado com sucesso.'}, status=status.HTTP_200_OK)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@action(detail=True, methods=['patch'], serializer_class=CustomUsuarioMudarGrupoSerializer)
def mudar_grupo(self, request, pk=None):
usuario = self.get_object()
serializer = self.get_serializer(usuario, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response({'status': 'O grupo foi alterado com sucesso.'}, status=status.HTTP_200_OK)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@action(detail=True, methods=['patch'], serializer_class=CustomUsuarioMudarAtivacaoSerializer)
def ativar(self, request, pk=None):
usuario = self.get_object()
serializer = self.get_serializer(
usuario,
data={'is_active': True},
partial=True
)
if serializer.is_valid():
serializer.save()
try:
usuario.perfil.ativo = True
usuario.perfil.save()
except Exception:
print("Não há perfil vinculado ao usuário.")
return Response({'status': 'Usuário ativado.'},
status=status.HTTP_200_OK)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
@action(detail=True, methods=['patch'], serializer_class=CustomUsuarioMudarAtivacaoSerializer)
def desativar(self, request, pk=None):
usuario = self.get_object()
serializer = self.get_serializer(
usuario,
data={'is_active': False},
partial=True
)
if serializer.is_valid():
serializer.save()
try:
usuario.perfil.ativo = False
usuario.perfil.save()
except Exception:
print("Não há perfil vinculado ao usuário.")
return Response({'status': 'Usuário desativado.'},
status=status.HTTP_200_OK)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
class GroupViewSet(AccessViewSetMixin, ReadOnlyModelViewSet):
"""
Group ViewSet description:
list: Listar grupos.
retrieve: Consultar grupos.
"""
access_policy = GroupAccessPolicy
serializer_class = GroupSerializer
def get_queryset(self):
return Group.objects.all().order_by('id')
class PasswordResetTokenViewSet(AccessViewSetMixin,
mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericViewSet):
"""
Password Reset Token ViewSet description:
create: Criar token.
retrieve: Consultar token.
list: Listar tokens.
"""
access_policy = PasswordResetTokenAccessPolicy
serializer_class = PasswordResetTokenSerializer
def get_queryset(self):
return PasswordResetToken.objects.all().order_by('id')
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response({'status': 'Token criado. E-mail enviado ao '
'usuário para criação de nova senha.'},
status=status.HTTP_201_CREATED, headers=headers)
|
8,795 | d70f77713abf4b35db9de72c1edbf4bf4580b2a4 | from random import shuffle, choice
from typing import Dict, List, Tuple
note_to_midi: Dict[int, int] = {
1: 0,
2: 2,
3: 4,
4: 5,
5: 7,
6: 9,
7: 11,
}
midi_to_note: Dict[int, int] = {
0: 1,
2: 2,
4: 3,
5: 4,
7: 5,
9: 6,
11: 7,
}
class Note:
num: int
@classmethod
def choice(cls, *args: int):
return Note(choice(args))
@classmethod
def from_midi(cls, midi: int, root: int):
note = midi_to_note.get(midi % root)
if isinstance(note, int):
return cls(note)
raise ValueError()
def __init__(self, num: int):
while num > 7:
num -= 7
while num <= 0:
num += 7
self.num = num
def __int__(self):
return self.num
def __repr__(self):
return str(self.num)
def __str__(self):
return f'Note: {self.num}'
def __hash__(self):
return hash(self.num)
def _distance(self, other):
if isinstance(other, Note):
return self.num - other.num
raise TypeError()
def __eq__(self, other):
return self._distance(other) == 0
def __lt__(self, other):
return self._distance(other) < 0
def __le__(self, other):
return self._distance(other) <= 0
def __gt__(self, other):
return self._distance(other) > 0
def __ge__(self, other):
return self._distance(other) >= 0
def _get_interval(self, interval: int):
return {Note(self.num - interval), Note(self.num + interval)}
def get_unison(self):
return self._get_interval(0)
def get_second(self):
return self._get_interval(1)
def get_thirds(self):
return self._get_interval(2)
def get_forth(self):
return self._get_interval(3)
def get_fifth(self):
return self._get_interval(4)
def get_sixth(self):
return self._get_interval(5)
def get_seventh(self):
return self._get_interval(6)
def inv(self):
return Note(6 - self.num)
def get_next_possible_notes(self, /, leap=True):
ret = [Note(self.num - 1), Note(self.num + 1)]
if leap:
ret += [Note(self.num - 2), Note(self.num + 2)]
shuffle(ret)
return ret
def __sub__(self, other) -> int:
dist = abs(self._distance(other))
if dist > 3:
dist = 7 - dist
return dist
def get_all_possible_midi(self, root: int) -> List[int]:
midi = self.convert_to_midi(root)
assert midi >= 0
ret: List[int] = []
while midi - 12 >= 0:
midi -= 12
while midi <= 127:
ret.append(midi)
midi += 12
return ret
def convert_to_midi(self, root: int) -> int:
return note_to_midi[self.num] + root
INVERSE_POSSIBLE_NOTE = {
Note(2), Note(3), Note(4),
}
def choose_from_inverse_possible_note():
return choice(list(INVERSE_POSSIBLE_NOTE))
ALL_NOTES = {
Note(1), Note(2), Note(3), Note(4), Note(5), Note(6), Note(7),
}
def choose_from_all_notes():
return choice(list(ALL_NOTES))
def fill_in_thirds() -> Tuple[Note, Note]:
first = choice(list(ALL_NOTES))
second = choice(list(first.get_thirds()))
return first, second
|
8,796 | a70dae504a4dfa3997a11e4c605accfab0024318 | from ttkwidgets import CheckboxTreeview
from tkinter import *
from tkinter.ttk import *
from tkinter import messagebox
import json
import os
from DbDataloader import *
class DataLoader():
def __init__(self,master):
self.anne={}
self.master=Toplevel(master)
master.wait_visibility(self.master)
self.master.grab_set()
self.master.minsize(900,680)
self.master.resizable(width=False,height=True)
self.tree = CheckboxTreeview(self.master,height=25)
os.chdir("DonneJson")
with open("DonneUtile.json","r") as rf:
self.anne.update(json.load(rf))
rf.close()
os.chdir("..")
self.Nomfichier=os.listdir("DonneJson")
self.Nomfichier.sort(reverse=True)
self.modeTransaction=False
self.db=DbDataloader(self.modeTransaction,self.master)
self.main()
#Fonction pour lire les fichiers json deja dans le module DataAcquisition
def main(self):
choice= messagebox.askyesno("askquestion","Cliquer sur Oui pour charger les données en mode Trasactionnel")
if choice :
self.modeTransaction=True
self.db.conn.start_transaction()
self.master.title("Data Loader : Mode Transanction=OUI")
self.create_widgets()
else:
self.modeTransaction=False
self.master.title("Data Loader : Mode Transanction=NON")
self.create_widgets("Non Transanction")
def lireFichier(self):
label_welcome1 = Label(self.master,text="Prévisualiser les données",
borderwidth = 7,
width = 40,
relief="groove"
)
label_welcome1.grid(row = 1, column = 0, padx = 50)
label_welcome2 = Label(self.master,text="Selectionner le fichier pour la lecture")
label_welcome2.grid(row = 2, column = 0, )
listbox = Listbox(self.master, width=40, height=20,selectmode=SINGLE)
i=0
for fichier in self.Nomfichier:
if "2" in fichier:
listbox.insert(i, fichier)
i=i+1
def afficherObjet(Obj):
try:
os.chdir("DonneJson")
textFichier={}
with open(Obj,"r") as rf:
textFichier.update(json.load(rf))
rf.close()
if textFichier:
texte="{\n"
for key,val in textFichier.items():
b ="\t{\n"
c="\t"+str(key)+" :\n"
d=""
for key1,val1 in val.items():
d=str(d)+"\t\t"+str(key1)+" :"+" "+str(val1)+"\n"
e="\t},\n"
texte=texte+b+c+d+e
texte=texte+"}\n"
texte=texte+"\n\n\t"+str(len(textFichier))+" Objets eenregistrer dans le fichier "+Obj
os.chdir("..")
return texte
except Exception as e:
print(e)
messagebox.showerror(title="Erreur !!!", message="Fichier "+Obj+" introuvable")
def selected_item():
try:
if listbox.get(listbox.curselection()):
textes=afficherObjet(listbox.get(listbox.curselection()))
if textes:
fil = Toplevel(self.master)
# fenetre blocante : empeche l’ouverture de fenetres identiques
self.master.wait_visibility(fil)
fil.grab_set()
# end fenetre blocante
fil.geometry("600x600")
fil.title("Fichier :"+listbox.get(listbox.curselection()))
yscroll = Scrollbar(fil)
yscroll.pack(side=RIGHT, fill=Y)
xscroll = Scrollbar(fil, orient=HORIZONTAL)
xscroll.pack(side=BOTTOM, fill=X)
text1 = Text(fil,wrap=NONE,height=30, width=100,yscrollcommand=yscroll.set,
xscrollcommand=xscroll.set)
text1.config(state="normal")
text1.insert("1.0",textes)
text1.pack(side=LEFT)
yscroll.config(command=text1.yview)
xscroll.config(command=text1.xview)
fil.mainloop()
fil.quit()
except :
messagebox.showerror(title="Erreur !!!", message="Vous selectionner un fichier d`abord")
listbox.grid(row = 3, column = 0, pady =20 )
btn = Button(self.master, text='Lire Le Fichier', command=selected_item)
btn.grid(row = 3, column = 1, pady =6 )
#Fonction pour cocher les dates ensuite enregistrer vers la bases de donneef
def CaseCocher(self,mode="Transanction"):
style = Style()
style.configure('W.TButton', font =
('calibri', 15, 'bold', 'underline'),
foreground = 'red')
style.configure('G.TButton', font =
('calibri', 15, 'bold','underline'),
foreground = 'green')
#recuperer les ligne selectionnes
def getCheckDict(obj):
selectDate={}
for t in obj:
try:
selectDate[t[:7]].append(t)
except:
selectDate[t[:7]]=[]
selectDate[t[:7]].append(t)
return selectDate
def valider():
if self.tree.get_checked():
#si il choisi oui (en transanction)
choice= messagebox.askyesno("Askquestion!!!","Vous etes sur pour la validation")
if choice==True:
self.db.Alldayselected =getCheckDict(self.tree.get_checked())
if self.modeTransaction == False:
#Mode Non Transactionnel
self.db.insertCommunique()
else:
#Mode Transaction
self.db.insertCommunique()
else:
messagebox.showerror(title="Erreur !!!", message="Cocher une case au moins !!!")
def commit():
choice= messagebox.askyesno("Askquestion!!!","Vouliez-vouz faire un commit?")
if choice==True:
messagebox.showinfo("Info","Mode Commit en cours")
self.db.conn.commit()
self.db.conn.start_transaction()
def rollback():
choice= messagebox.askyesno("Askquestion!!!","Vouliez-vouz faire un rollback?")
if choice==True:
messagebox.showinfo("Info","Mode rollback en cours ")
self.db.conn.rollback()
self.db.conn.start_transaction()
label_welcomec = Label(self.master,
text="La liste des fichiers json obtenus avec leur arborescence",
borderwidth = 7,
relief="groove")
label_welcomec.grid(row = 1, column = 3, pady = 8)
vsb = Scrollbar(self.master, orient="vertical", command=self.tree.yview)
vsb.place(relx=0.978, rely=0.175, relheight=0.713, relwidth=0.020)
self.tree.configure(yscrollcommand=vsb.set)
self.tree.insert("", "end", "ALL", text="SELECT ALL")
for key,val in self.anne.items():
self.tree.insert("ALL", "end", key, text=key)
for i in val:
self.tree.insert(key,"end", i, text=i)
self.tree.grid(row = 3, column = 3, pady = 2)
button_name=Button(self.master,text="Valider",command=valider)
button_name.grid(row = 3, column = 4, pady = 2)
if mode=="Transanction":
commit_buttoon_name=Button(self.master,text="COMMIT",command=commit,style="G.TButton"
)
commit_buttoon_name.grid(row = 4, column = 3, pady = 2)
rollback_buttoon_name=Button(self.master, text = 'ROLLBACK !',
style = 'W.TButton',command=rollback)
rollback_buttoon_name.grid(row = 4, column = 4, pady = 2)
def create_widgets(self,mode="Transanction"):
self.lireFichier()
self.CaseCocher(mode)
def mains(self,obj):
obj.master.mainloop()
obj.db.conn.rollback()
|
8,797 | b2d5b16c287dc76a088f6e20eca4a16dd0aad00f | from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
# Positional arguments
parser.add_argument('poll_id', nargs='+', type=int)
# Named (optional arguments)
parser.add_argument(
'--add',
action='store_true',
dest='add',
default=False,
help='add'
)
parser.add_argument(
'--substract',
action='store_true',
dest='substract',
default=False,
help='substract'
)
parser.add_argument(
'--multiply',
action='store_true',
dest='multiply',
default=False,
help='multiply'
)
parser.add_argument(
'--divide',
action='store_true',
dest='divide',
default=False,
help='divide'
)
def handle(self, *args, **options):
s = ''
result = 0
tag = sum([options[i] for i in ['add', 'substract', 'multiply', 'divide']])
if options['add'] or not tag:
for poll_id in options['poll_id']:
s += '{} + '.format(poll_id)
result += poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['substract']:
result += options['poll_id'][0]
s = '{} - '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} - '.format(poll_id)
result -= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['multiply']:
result = 1
for poll_id in options['poll_id']:
s += '{} × '.format(poll_id)
result *= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
elif options['divide']:
result = options['poll_id'][0]
s = '{} ÷ '.format(options['poll_id'][0])
for poll_id in options['poll_id'][1:]:
s += '{} ÷ '.format(poll_id)
result /= poll_id
self.stdout.write(self.style.SUCCESS('{}= {}'.format(s[:-2], result)))
|
8,798 | a995305cb5589fa0cbb246ae3ca6337f4f2c3ca1 | from django.apps import AppConfig
class ClassromConfig(AppConfig):
name = 'classrom'
|
8,799 | 8e74bd0c051b672bf22c2c8dfb03760805b105c5 | """Tests for Node objects."""
import numpy as np
import unittest
import optimus.core as core
import optimus.nodes as nodes
import optimus.util as util
def __relu__(x):
"Numpy Rectified Linear Unit."
return 0.5 * (np.abs(x) + x)
class NodeTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_Node(self):
pass
def test_Constant(self):
n = nodes.Constant(name='test', shape=None)
n.data.value = 1.0
n.transform()
fx = util.compile(inputs=[], outputs=[n.output])
np.testing.assert_equal(np.array(fx()[0]), 1.0)
def test_Add(self):
x1 = core.Input(name='x1', shape=(2, 2))
x2 = core.Input(name='x2', shape=(2, 2))
n = nodes.Add(name='accumulate', num_inputs=2)
n.input_0.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.input_1.connect(x2)
self.assertIsNone(n.output.shape)
n.transform()
self.assertEqual(n.output.shape, (2, 2))
fx = util.compile(inputs=[x1, x2],
outputs=[n.output])
a = np.array([[3, -1], [3, 7]])
b = np.array([[1, 2], [3, 4]])
z = fx(a, b)[0]
np.testing.assert_equal(z, np.array([[4, 1], [6, 11]]))
@unittest.skip("Not fully implemented yet.")
def test_Bincount(self):
x1 = core.Input(name='x1', shape=(None,))
n = nodes.Bincount(name='counter', max_int=3)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=[n.counts])
a = np.array([3, 0, 3, 1])
np.testing.assert_equal(n.counts.value, np.array([0, 0, 0, 0]))
np.testing.assert_equal(fx(a)[0], np.array([1, 1, 0, 2]))
np.testing.assert_equal(fx(a)[0], np.array([2, 2, 0, 4]))
def test_Concatenate(self):
x1 = core.Input(name='x1', shape=(2, 2))
x2 = core.Input(name='x2', shape=(2, 2))
a = np.array([[3, -1], [3, 7]])
b = np.array([[1, 2], [3, 4]])
for axis in range(2):
n = nodes.Concatenate(name='concatenate', num_inputs=2, axis=axis)
n.input_0.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.input_1.connect(x2)
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=[n.output])
z = fx(a, b)[0]
np.testing.assert_equal(z, np.concatenate([a, b], axis=axis))
def test_Stack(self):
x1 = core.Input(name='x1', shape=(2, 3))
x2 = core.Input(name='x2', shape=(2, 3))
a = np.arange(6).reshape(2, 3)
b = np.arange(6).reshape(2, 3) + 6
for axes in None, (1, 2, 0), (2, 1, 0):
n = nodes.Stack(name='stack', num_inputs=2, axes=axes)
n.input_1.connect(x2)
n.input_0.connect(x1)
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=[n.output])
z = fx(a, b)[0]
expected = np.array([a, b])
if axes:
expected = np.transpose(expected, axes)
np.testing.assert_equal(z, expected)
def test_Dimshuffle(self):
x1 = core.Input(name='x1', shape=(2, 3))
a = np.zeros([2, 3])
axes = [('x', 0, 1), (0, 1, 'x'), (1, 'x', 0)]
shapes = [(1, 2, 3), (2, 3, 1), (3, 1, 2)]
for ax, shp in zip(axes, shapes):
n = nodes.Dimshuffle('dimshuffle', ax)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0].shape, shp)
def test_Slice(self):
x1 = core.Input(name='x1', shape=(2, 3))
a = np.arange(6).reshape(2, 3)
slices = [(None, 1), (0, None), (1, 0)]
ans = [a[:, 1], a[0, :], a[1, 0]]
for slc, ans in zip(slices, ans):
n = nodes.Slice('slice', slc)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], ans)
def test_Log(self):
x1 = core.Input(name='x1', shape=(2, 2))
log = nodes.Log('log')
log.input.connect(x1)
log.transform()
fx = util.compile(inputs=log.inputs.values(),
outputs=log.outputs.values())
a = np.array([[3, 1], [4, 7]], dtype=np.float32)
z = fx(a)[0]
np.testing.assert_almost_equal(z, np.log(a))
def test_Multiply(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [3, 7]])
for w, shp in zip([-1, a], [None, a.shape]):
n = nodes.Multiply(name='gain', weight_shape=shp)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], np.zeros_like(a))
n.weight.value = w
np.testing.assert_equal(fx(a)[0], w*a)
n = nodes.Multiply(name='gain', weight_shape=(1, 2), broadcast=[0])
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], np.zeros_like(a))
n.weight.value = a[0].reshape(1, -1)
np.testing.assert_equal(fx(a)[0], a*a[0].reshape(1, -1))
def test_Max(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = 7, np.array([4, 7]), np.array([3, 7])
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Max('max', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_Min(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = -1, np.array([3, -1]), np.array([-1, 4])
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Min('min', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_Sum(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = 13, np.array([7, 6]), np.array([2, 11])
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Sum('sum', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_Mean(self):
x1 = core.Input(name='x1', shape=(2, 2))
a = np.array([[3, -1], [4, 7]])
res = 13 / 4.0, np.array([7, 6]) / 2.0, np.array([2, 11]) / 2.0
for idx, axis in enumerate([None, 0, 1]):
n = nodes.Mean('mean', axis=axis)
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], res[idx])
def test_NormalizeDim(self):
x1 = core.Input(name='x1', shape=(1, 2, 3))
a = np.array([[[3, 1, -1], [4, 0, 7]]], dtype=np.float32)
expected = [np.sign(a),
a / np.sqrt(np.array([25, 1, 50])).reshape(1, 1, 3),
a / np.sqrt(np.array([11, 65])).reshape(1, 2, 1)]
for axis, ans in enumerate(expected):
n = nodes.NormalizeDim('l2norm', axis=axis, mode='l2')
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=n.inputs.values(),
outputs=n.outputs.values())
np.testing.assert_almost_equal(fx(a)[0], ans)
def test_SelectIndex(self):
x1 = core.Input(name='x1', shape=(None, 2))
idx = core.Input(name='idx', shape=(None,), dtype='int32')
a = np.array([[3, -1], [4, 7]])
i = np.array([1, 0])
n = nodes.SelectIndex('select')
n.input.connect(x1)
n.index.connect(idx)
n.transform()
fx = util.compile(inputs=[x1, idx],
outputs=n.outputs.values())
np.testing.assert_equal(fx(a, i)[0], np.array([-1, 4]))
def test_SquaredEuclidean(self):
a1 = np.array([[3, -1], [4, 7]])
b1 = np.array([[1, -1], [4, 7]])
a2 = np.array([3, -1])
b2 = np.array([1, -1])
z1 = np.power(a1 - b1, 2.0).sum(axis=1)
z2 = np.power(a2 - b2, 2.0).sum()
for a, b, z in zip([a1, a2], [b1, b2], [z1, z2]):
x1 = core.Input(name='x1', shape=a.shape)
x2 = core.Input(name='x2', shape=b.shape)
n = nodes.SquaredEuclidean('sqeuclid')
n.input_a.connect(x1)
n.input_b.connect(x2)
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=n.outputs.values())
np.testing.assert_equal(fx(a, b)[0], z)
def test_Product(self):
a1 = np.array([[3, -1], [4, 7]])
b1 = np.array([[1, -1], [4, 7]])
a2 = np.array([3, -1])
b2 = np.array([1, -1])
for a, b in zip([a1, a2], [b1, b2]):
x1 = core.Input(name='x1', shape=a.shape)
x2 = core.Input(name='x2', shape=b.shape)
n = nodes.Product('product')
n.input_a.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.input_b.connect(x2)
self.assertTrue(n.is_ready())
n.transform()
fx = util.compile(inputs=[x1, x2],
outputs=n.outputs.values())
np.testing.assert_equal(fx(a, b)[0], a*b)
def test_Affine_linear(self):
x1 = core.Input(name='x1', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n.weights.value = w
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)
def test_Affine_relu(self):
x1 = core.Input(name='x1', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='relu')
n.weights.value = w
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], __relu__(np.dot(a, w) + b))
def test_Affine_dropout(self):
x1 = core.Input(name='x1', shape=(None, 2))
dropout = core.Input(name='dropout', shape=None)
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n.weights.value = w
n.bias.value = b
n.enable_dropout()
n.input.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.dropout.connect(dropout)
n.transform()
fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())
np.testing.assert_equal(fx(a, 0.0)[0], np.dot(a, w) + b)
self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)
def test_Affine_share_params(self):
x = core.Input(name='x1', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
b = np.ones(3)
n1 = nodes.Affine(
name='affine',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n2 = nodes.Affine(
name='affine_copy',
input_shape=(None, 2),
output_shape=(None, 3),
act_type='linear')
n2.share_params(n1)
n1.weights.value = w
n1.bias.value = b
np.testing.assert_equal(n1.weights.value, n2.weights.value)
np.testing.assert_equal(n1.bias.value, n2.bias.value)
n2.input.connect(x)
n2.transform()
fx = util.compile(inputs=[x], outputs=n2.outputs.values())
np.testing.assert_equal(fx(a)[0], np.dot(a, w) + b)
n1.weights.value *= 2
np.testing.assert_equal(fx(a)[0], np.dot(a, 2*w) + b)
def test_Conv3D_linear(self):
x1 = core.Input(name='x1', shape=(None, 1, 2, 3))
a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)
w = np.array([[[1], [-2]],
[[-3], [4]],
[[5], [-6]]]).reshape(3, 2, 1)
b = np.arange(3)
# Note that convolutions flip the kernels
z = np.array([[(a*wi[::-1]).sum(axis=0) + bi
for wi, bi in zip(w, b)]])
n = nodes.Conv3D(
name='conv3d',
input_shape=(None, 1, 2, 3),
weight_shape=(3, 1, 2, 1),
act_type='linear')
n.weights.value = w.reshape(3, 1, 2, 1)
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a.reshape(1, 1, 2, 3))[0],
z.reshape(1, 3, 1, 3))
def test_Conv3D_relu(self):
x1 = core.Input(name='x1', shape=(None, 1, 2, 3))
a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)
w = np.array([[[1], [-2]],
[[-3], [4]],
[[5], [-6]]]).reshape(3, 2, 1)
b = np.arange(3)
# Note that convolutions flip the kernels
z = np.array([[(a*wi[::-1]).sum(axis=0) + bi
for wi, bi in zip(w, b)]])
# Reshape from convenience
a = a.reshape(1, 1, 2, 3)
z = z.reshape(1, 3, 1, 3)
n = nodes.Conv3D(
name='conv3d',
input_shape=(None, 1, 2, 3),
weight_shape=(3, 1, 2, 1),
act_type='relu')
n.weights.value = w.reshape(3, 1, 2, 1)
n.bias.value = b
n.input.connect(x1)
n.transform()
fx = util.compile(inputs=[x1], outputs=n.outputs.values())
np.testing.assert_equal(fx(a)[0], __relu__(z))
def test_Conv3D_dropout(self):
x1 = core.Input(name='x1', shape=(None, 1, 2, 3))
dropout = core.Input(name='dropout', shape=None)
a = np.array([[3, -1], [4, 7], [2, -6]]).reshape(2, 3)
w = np.array([[[1], [-2]],
[[-3], [4]],
[[5], [-6]]]).reshape(3, 2, 1)
b = np.arange(3)
# Note that convolutions flip the kernels
z = np.array([[(a*wi[::-1]).sum(axis=0) + bi
for wi, bi in zip(w, b)]])
# Reshape from convenience
a = a.reshape(1, 1, 2, 3)
z = z.reshape(1, 3, 1, 3)
n = nodes.Conv3D(
name='conv3d',
input_shape=(None, 1, 2, 3),
weight_shape=(3, 1, 2, 1),
act_type='linear')
n.enable_dropout()
n.weights.value = w.reshape(3, 1, 2, 1)
n.bias.value = b
n.input.connect(x1)
with self.assertRaises(nodes.UnconnectedNodeError):
n.transform()
n.dropout.connect(dropout)
n.transform()
fx = util.compile(inputs=[x1, dropout], outputs=n.outputs.values())
np.testing.assert_equal(fx(a, 0.0)[0], z)
self.assertGreaterEqual(np.equal(fx(a, 0.9)[0], 0.0).sum(), 1)
def test_RadialBasis(self):
x = core.Input(name='x', shape=(None, 2))
a = np.array([[3, -1], [4, 7]])
w = np.array([[1, -1], [2, -2], [3, -3]]).T
n = nodes.RadialBasis(
name='radial',
input_shape=x.shape,
output_shape=(None, 3))
n.weights.value = w.reshape(2, 3)
n.input.connect(x)
n.transform()
fx = util.compile(inputs=[x], outputs=n.outputs.values())
z = np.power(a.reshape(2, 2, 1) - w.reshape(1, 2, 3),
2.0).sum(axis=1)
np.testing.assert_equal(fx(a)[0], z)
def test_SliceGT(self):
x = core.Input(name='x', shape=(None,))
n = nodes.SliceGT(name='slice-greater', value=0)
n.input.connect(x)
n.transform()
fx = util.compile(inputs=[x], outputs=n.outputs.values())
a = np.array([1, -2, 0])
np.testing.assert_equal(fx(a)[0], np.array([1]))
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.