blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
c185c5e08b65eb14afe9449e819eb7edcfeb8c1e | Python | moonlaughs/SQLiteInsertMany | /SQLITE3-V1/create.py | UTF-8 | 627 | 2.9375 | 3 | [] | no_license | import sqlite3
import uuid
import faker
fake = faker.Faker()
try:
db = sqlite3.connect("data.db")
except:
print('error - cannot connect to the database')
# INSERT MANY WITH FAKER
'''
try:
for _ in range(1000):
bulk = "INSERT INTO users VALUES "
for _ in range(500):
id = str(uuid.uuid4())
name = fake.name()
bulk += f"('{id}','{name}'),"
bulk = bulk.rstrip(",")
#print(bulk)
q = db.execute(bulk)
db.commit()
q = db.execute("SELECT COUNT(*) FROM users").fetchone()
print(f"COUNT: {q[0]}")
except:
print("error - cannot insert")
''' | true |
abc1907cd0d0e1ff5720f61e27b1e0aa4477112a | Python | SumTwilight/spider_bilibili_comment | /控制台源代码/spider_bilibili_comment.py | UTF-8 | 9,064 | 3.015625 | 3 | [] | no_license | import requests
import traceback
import re
import pandas as pd
import json
import time
import os
from os import path
from PIL import Image
def get_html_text(url, headers, code='utf-8'):
'''
通过指定url链接获取html页面,编码方法默认为utf-8。有简单的错误处理,但不会提示。
:param url: 指定url
:param headers:头
:param code: 默认为'utf-8'
:return: 返回相应的html页面信息
'''
try:
r = requests.get(url, headers=headers, timeout=30)
r.raise_for_status()
r.encoding = code
return r.text
except:
# traceback.print_exc()
print("获取html页面失败", r.raise_for_status())
return ''
def get_comment_info(oid, pn, headers):
'''
核心函数之一,爬取用户评论及相关数据并整理,使用DataFrame数据类型返回最终数据
:param oid:视频id
:param pn:评论页数
:param headers:requests 头
:return:
'''
start_url = 'https://api.bilibili.com/x/v2/reply?type=1&pn={}&oid=' + oid
# 遍历爬取pn页评论
dict_comment = {}
k = 0
for i in range(pn):
if i != 0:
try:
# 爬取数据
url = start_url.format(i)
data_json = get_html_text(url, headers)
# 爬取进度条
print('\r当前进度:{:.2f}%'.format(i * 100 / pn), '[', '*' * int(i * 50 / pn),
'-' * int(50 - i * 50 / pn), ']', end='')
# 整理数据
data_dict = json.loads(data_json)
comment_dict = data_dict["data"]["replies"]
for j in range(0, 20):
dict_temp = {'mid': comment_dict[j]['mid'], 'uname': comment_dict[j]['member']['uname'],
'sex': comment_dict[j]['member']['sex'], 'sign': comment_dict[j]['member']['sign'],
'current_level': comment_dict[j]['member']['level_info']['current_level'],
'vipType': comment_dict[j]['member']['vip']['vipType'],
'vipDueDate': comment_dict[j]['member']['vip']['vipDueDate'],
'ctime': comment_dict[j]['ctime'], 'rcount': comment_dict[j]['count'],
'message': comment_dict[j]['content']['message'],
'like': comment_dict[j]['like']}
# 修改时间戳 为 具体时间
timeTemp = dict_temp['ctime']
timeArray = time.localtime(timeTemp)
dict_temp['ctime'] = time.strftime("%Y-%m-%d", timeArray)
dict_temp['ctime_time'] = time.strftime("%H:%M:%S", timeArray)
timeTemp = int(dict_temp['vipDueDate'] / 1000)
timeArray = time.localtime(timeTemp)
dict_temp['vipDueDate'] = time.strftime("%Y-%m-%d", timeArray)
# 将数据存入主字典
dict_comment.update({k: dict_temp})
k = k + 1 # 为数据编号
except:
traceback.print_exc()
continue
# 爬取完成进度条
print('\r当前进度:{:.2f}%'.format(100), '[', '*' * 50, ']')
print('爬取完成,保存数据中.....')
# 将整理后的将数据放入DataFrame中
user_comment_data = pd.DataFrame(dict_comment).T # 转置一下
# user_comment_data.index = range((i - 1) * 20 + 1, (i - 1) * 20 + len(user_comment_data) + 1) # 为数据重新编号
# data = pd.read_json(path, orient='index')
return user_comment_data
def save_commment_to_json(datadf, data_name, img, data_path='./data/'):
'''
1)将数据datadf转为json文件后以data_name为名称存储到data_path路径下
2)从datadf中提取出评论信息以data_name为名称存储到'./data/comment_data/'路径下
3)将对应的图片img以data_name为名称存储到'./data/image/'路径下
:param datadf: 存储的文件(DataFrame)
:param data_name: 数据名 ex: quanzhi_comment
:param img:
:param data_path: 文件存储的路径 ex : ./data/
:return:
'''
# path = ./data/total_comment.json
try:
try:
# 写入完整json数据
if not path.exists(data_path):
os.makedirs(data_path)
data_path = data_path + data_name + '.json'
datadf_json = datadf.to_json(orient='index', force_ascii=False)
with open(data_path, "w", encoding="utf-8") as file_data:
file_data.write(datadf_json)
print('评论用户数据已保存在:' + data_path)
except:
print('json信息写入出错。')
try:
# 写入评论数据
data_path = './data/comment_data/'
if not path.exists(data_path):
os.makedirs(data_path)
data_path += data_name + '.txt'
with open(data_path, "w", encoding="utf-8") as file_data:
for i in datadf['message']:
file_data.write(i)
file_data.write('\n')
print('评论数据已保存在:' + data_path)
except:
print('评论数据写入出错。')
try:
# 保存图片信息 这部分暂时没用了
img_path = './data/image/'
if not path.exists(img_path):
os.makedirs(img_path)
img_path += data_name
with open(img_path+'.jpg', 'wb') as f:
f.write(img)
print('视频图片已保存在:' + img_path + '.jpg')
# 使用remove API 去掉图片的背景
# response = requests.post(
# 'https://api.remove.bg/v1.0/removebg',
# files={'image_file': open(img_path+'.jpg', 'rb')},
# data={'size': 'auto'},
# headers={'X-Api-Key': '52U8DP5PkSg6HhxmJzQcJbdf'},
# )
# if response.status_code == requests.codes.ok:
# with open(img_path+'.jpg', 'wb') as out:
# out.write(response.content)
# else:
# print("Error:图片背景去除失败")
except:
traceback.print_exc()
print('图片数据写入出错')
return True
except:
traceback.print_exc()
return False
def load_comment_from_json(data_name, data_path='./data/'):
'''
加载json评论数据,并且以DataFrame的数据结构返回
:param data_name:
:param data_path:
:return:
'''
data_path = data_path + data_name + '.json'
data = pd.read_json(data_path, orient='index', encoding='utf-8')
return data
def main_spider(url):
headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/68.0.3440.106 Safari/537.36',
}
try:
start_html = get_html_text(url, headers)
# with open('./start.html', 'w', encoding='utf-8') as f:
# f.write(start_html)
except:
# traceback.print_exc()
print('100:初始页面爬取失败,请重新输入链接或换一个视频')
return '爬取失败'
try:
# 得到视频AV(oid)号
oid = re.findall(r'av\d+|正片".{1,120}"aid":\d+', start_html)[0]
oid = re.findall(r'av\d+|"aid":\d+', oid)[0]
oid = re.findall(r'\d+', oid)[0]
# 得到视频的Name
AV_name = re.findall(r'<title>[\u4e00-\u9fa5|\d| |\w|·]+', start_html)[0][7:]
# 得到视频的图片
img = re.findall(r'og:image".+g">', start_html)[0]
img = re.findall(r'https.+g', img)[0]
img = requests.get(img).content
# 得到视频评论总页数pn
pn1_url = 'https://api.bilibili.com/x/v2/reply?type=1&pn=1&oid=' + oid
pn1_html = get_html_text(pn1_url, headers)
count = re.findall(r'20,"count":\d+', pn1_html)[0][11:]
pn = int(float(count) / 20)
except:
# traceback.print_exc()
print('200:获取视频信息失败')
return '爬取失败'
try:
print(AV_name + '一共有:' + str(pn) + '页评论。')
pn = int(input('请输入爬取页数:'))
# 得到视频评论的DataFrame信息,并返回。
user_comment_data = get_comment_info(oid, int(pn), headers) # int(pn)
except:
# traceback.print_exc()
print('300:获取评论信息失败')
return '爬取失败'
try:
save_commment_to_json(user_comment_data, AV_name, img)
except:
# traceback.print_exc()
print('400:保存爬取数据失败')
return '爬取失败'
return AV_name
if __name__ == '__main__':
url = input('Waiting For Input url')
main_spider(url)
| true |
a0eee40ddb81034119c1da234e439485404677b8 | Python | SilvesterHsu/CRISPR-Cas9-GuidingGeneEditing | /RecurrentNeuralNetwork/rnn.py | UTF-8 | 5,843 | 2.71875 | 3 | [] | no_license | import torch
import pandas as pd
import glob
import numpy as np
import matplotlib.pyplot as plt
train_data0 = pd.read_csv('data/danrer11_chopchop_train.csv')
test_data = pd.read_csv('data/danrer11_chopchop_test.csv')
print("loading successful!")
# train validation split
train_data = train_data0[:200000]
val_data = train_data0[200000:]
def transform_sequence(seq):
m = np.zeros((len(seq), 4))
for i, char in enumerate(seq):
if char == 'A':
m[i][0] = 1
elif char == 'T':
m[i][1] = 1
elif char == 'C':
m[i][2] = 1
elif char == 'G':
m[i][3] = 1
m = m.reshape(m.shape[0]*m.shape[1])
return m
def transform_sequence_rnn(seq):
m = np.zeros((len(seq), 4))
for i, char in enumerate(seq):
if char == 'A':
m[i][0] = 1
elif char == 'T':
m[i][1] = 1
elif char == 'C':
m[i][2] = 1
elif char == 'G':
m[i][3] = 1
return m
class GeneDataset(object):
def __init__(self, data, use_rnn=False):
self.target_sequence = data['GUIDE'].values
self.efficiency = data['EFFICIENCY'].values
self.use_rnn = use_rnn
self.seqs = [torch.as_tensor(transform_sequence_rnn(i), dtype=torch.float32) for i in self.target_sequence ]
self.effs = [torch.as_tensor(i / 100, dtype=torch.float32) for i in self.efficiency]
def __getitem__(self, idx):
if self.use_rnn:
#seq = torch.as_tensor(transform_sequence_rnn(self.target_sequence[idx]), dtype=torch.float32)
#seq = torch.as_tensor(self.seqs[idx], dtype=torch.float32)
seq = self.seqs[idx]
else:
seq = torch.as_tensor(transform_sequence(self.target_sequence[idx]), dtype=torch.float32)
#eff = torch.as_tensor(self.efficiency[idx] / 100, dtype=torch.float32)
eff = self.effs[idx]
return seq, eff
def __len__(self):
return len(self.target_sequence)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(92, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 1)
def forward(self, x):
x = x.reshape([x.shape[0], -1])
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = torch.sigmoid(x)
return x
class RNN_Net(nn.Module):
def __init__(self, dropout_prob=0.2):
super(RNN_Net, self).__init__()
self.lstm = nn.LSTM(input_size=4, hidden_size=16, num_layers=1, batch_first=True)
self.fc = nn.Linear(16, 1)
self.dropout_prob = dropout_prob
def forward(self, x):
x1, _ = self.lstm(x)
# Extract the mean of output channal as the final output
#x1 = nn.Dropout(p=self.dropout_prob)(x1)
x2 = torch.mean(x1, 1)
# Normalize the output using sigmoid to (0, 1)
x3 = torch.sigmoid(x2)
return x3
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Net().to(device)
#net.train()
#net = net.to(device)
criterion = nn.MSELoss().to(device)
optimizer = optim.SGD(net.parameters(), lr=1, momentum=0.9)
train_set = GeneDataset(train_data, use_rnn=True)
val_set = GeneDataset(val_data, use_rnn=True)
print("Creating dataset successful!")
trainloader = torch.utils.data.DataLoader(train_set, batch_size=256,
shuffle=True, num_workers=2)
valloader = torch.utils.data.DataLoader(val_set, batch_size=256,
shuffle=True, num_workers=2)
training_loss_history = []
validation_loss_history = []
for epoch in range(50): # loop over the dataset multiple times
running_loss = 0.0
for i, ele in enumerate(trainloader):
# get the inputs; data is a list of [inputs, labels]
seq, eff = ele[0].to(device), ele[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(seq)
loss = criterion(outputs[:, 0], eff)
loss.backward()
optimizer.step()
running_loss += loss.item()
train_loss = running_loss / len(trainloader)
training_loss_history.append(train_loss)
running_loss = 0.0
for i, ele in enumerate(valloader):
seq, eff = ele[0].to(device), ele[1].to(device)
outputs = net(seq)
loss = criterion(outputs[:, 0], eff)
running_loss += loss.item()
val_loss = running_loss / len(valloader)
validation_loss_history.append(val_loss)
print("Epoch {} , training loss: {:3f}, validation loss: {:3f}".format(epoch, train_loss, val_loss))
print('Finished Training')
# train_time[8] = time.time() - now
PATH = "nn.pth"
torch.save(net.state_dict(), PATH)
mse = 0
mae = 0
mse_error = []
eff_list = []
test_set = GeneDataset(test_data, use_rnn=True)
testloader = torch.utils.data.DataLoader(test_set, batch_size=1024, shuffle=True, num_workers=2)
criterion2 = torch.nn.L1Loss().to(device)
#net.eval()
rmae = 0
for i, ele in enumerate(testloader):
# get the inputs; data is a list of [inputs, labels]
seq, eff = ele[0].to(device), ele[1].to(device)
# forward + backward + optimize
outputs = net(seq)
print(outputs[:, 0], eff)
e = criterion(outputs[:, 0], eff)
e2 = criterion2(outputs[:, 0], eff)
rmae += np.sum(np.abs(outputs[:, 0].cpu().detach().numpy() / eff.cpu().detach().numpy()))
mse += e * len(eff)
mae += e2 * len(eff)
mse = mse / len(test_set)
mae = mae / len(test_set)
rmae = rmae / len(test_set)
print("MSE:", mse.cpu().detach().numpy())
print("MAE:", mae.cpu().detach().numpy())
print("RMAE:", rmae)
| true |
848989129be0785c6b89cbc4377ee73ce3c12fba | Python | plusoneee/spy.doc.example | /example05.py | UTF-8 | 705 | 2.515625 | 3 | [] | no_license | from lib.loads import spotify_auth
sp = spotify_auth()
theblackskirt = 'spotify:artist:6WeDO4GynFmK4OxwkBzMW8'
results = sp.artist_related_artists(theblackskirt)
for artist in results['artists']:
print("| * Name:", artist['name'])
print("| * Artist ID :", artist['id'])
print("| * External url:", artist['external_urls']['spotify'])
print("| * Followers number:", artist['followers']["total"])
print("| * Genres:", artist['genres'])
print("| * Popularity:", artist['popularity'])
print("| * Image Info:")
for img in artist['images']:
print("| * url:", img['url'])
print("| * height:", img['height'])
print("| * width:", img['width'])
print('||') | true |
7e2de44cdfd7e4eff3d06f7c97d6857704cbf989 | Python | rayliu419/machine_learning | /jupiter_notebook/common/data_loader/diabetes_helper.py | UTF-8 | 995 | 3.015625 | 3 | [] | no_license | import pandas as pd
import os
from sklearn.model_selection import train_test_split
def prepare_diabetes_raw_data_for_task():
"""
返回diabetes的raw data
:return: pandas data frame
"""
print("load data")
data_file = os.path.dirname(__file__) + "/input_data/diabetes.csv"
pima_data = pd.read_csv(data_file)
return pima_data
def prepare_diabetes_data_for_task(line_num=1000):
"""
返回的依然是ndarray类型
:param line_num:
:return:
"""
print("load data")
data_file = os.path.dirname(__file__) + "/input_data/diabetes.csv"
pima_data = pd.read_csv(data_file)
pima_data = pima_data[0:line_num]
# 前8列是feature, 第8列是标签
X = pima_data.iloc[:, 0: 8]
Y = pima_data.iloc[:, 8]
X_np = X.to_numpy()
Y_np = Y.to_numpy()
X_train_np, X_test_np, Y_train_np, Y_test_np = train_test_split(X_np, Y_np, test_size=0.20, random_state=42)
return X, Y, X_train_np, X_test_np, Y_train_np, Y_test_np
| true |
00e9b8883cafa879173a3e0825f224afe5090f8e | Python | 10mo8/get_proceedings | /cleaning_data.py | UTF-8 | 2,021 | 3.21875 | 3 | [] | no_license | #スクレイピングしてきたデータの整形を行います
import re
#発言の部分だけを元データから抽出します
with open("original.txt", "r", encoding="utf-8") as fin, open("extract_speech.txt", "w", encoding="utf-8") as fout:
texts = [text.strip() for text in fin.readlines()]
for text in texts:
matchobj = re.search(r"○", text)
if matchobj is not None:
fout.write(text)
#発言の中から第1文だけを抽出します
with open("extract_speech.txt", "r", encoding="utf-8") as fin, open("extract_fspeech.txt", "w", encoding="utf-8") as fout:
texts = fin.read()
texts = texts.replace("\n", "")
texts = texts.split("○")
for text in texts:
matchobj = re.search(r"。", text)
if matchobj is not None:
fout.write(text[:matchobj.start()] + "\n")
#特定の議員の発言とその議員の前の発言を抽出します
with open("extractf_speech.txt", "r", encoding="utf-8") as fin, open("conv.txt", "w", encoding="utf-8") as fout:
texts = [text.strip() for text in fin.readlines()]
prevname = ""
prevtext = ""
for text in texts:
matchobj = re.search(r" ", text)
if matchobj is not None:
#発言者の名前
name = text[0:matchobj.start()]
#発言内容
sentence = text[matchobj.end():]
#名前が抽出対象なら前のテキストを返す
#if name == "蓮舫君":
prevtext = prevtext.replace("、", "")
prevtext = prevtext.replace("…", "")
prevtext = re.sub(r"\(.*\)", "", prevtext)
sentence = sentence.replace("、", "")
sentence = sentence.replace("…", "")
sentence = re.sub(r"\(.*\)", "", sentence)
#特定の人物の応答を抽出します
fout.write(prevtext+ "," + sentence + "\n")
prevtext = ""
prevname = name
prevtext = sentence | true |
dd5536c5ba22e90c5f7c56f9fe6d0dc9f2816362 | Python | ksrntheja/08-Python-Core | /venv/inputoutputfunctions/09EvalDemo.py | UTF-8 | 615 | 3.296875 | 3 | [] | no_license | x = eval(input('Enter:'))
print(type(x))
print(x)
# Enter:10
# <class 'int'>
# Enter:10.5
# <class 'float'>
# Enter:True
# <class 'bool'>
# Enter:[10, 20, 30]
# <class 'list'>
# Enter:(10, 20, 30)
# <class 'tuple'>
# Enter:(10)
# <class 'int'>
# Enter:(10,)
# <class 'tuple'>
# Enter:10 + 20 + 30
# <class 'int'>
# 60
# Enter:10+20/3**4//5*40
# <class 'float'>
# 10.0
# Enter:Theja
# Traceback (most recent call last):
# File "/Code/venv/inputoutputfunctions/09EvalDemo.py", line <>, in <module>
# x = eval(input('Enter:'))
# File "<string>", line <>, in <module>
# NameError: name 'Theja' is not defined
| true |
4fa54c9f1f1a4ab4b6e1632b15ffade73dd745e9 | Python | lkp1996/pingpong | /pingpongproject/pingpong/models.py | UTF-8 | 975 | 3.203125 | 3 | [] | no_license | from django.db import models
class Player(models.Model):
name = models.CharField(max_length=50, unique=True)
def __unicode__(self):
return self.name
class Game(models.Model):
player_one = models.ForeignKey(Player, related_name='games_as_p1')
player_two = models.ForeignKey(Player, related_name='games_as_p2')
score_one = models.IntegerField()
score_two = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
winner = models.ForeignKey(Player, related_name='games_as_winner')
def save(self, *args, **kwargs):
if self.score_one > self.score_two:
self.winner = self.player_one
else:
self.winner = self.player_two
super(Game, self).save(*args, **kwargs)
def __unicode__(self):
return "{0} vs {1} score: {2} - {3} date: {4}".format(
self.player_one, self.player_two, self.score_one, self.score_two,
self.created
)
| true |
4da0c7d8b9dae66ad0e4705cee85317c1c152bb2 | Python | swryan/util | /github_handler.py | UTF-8 | 1,899 | 2.53125 | 3 | [] | no_license | #!/usr/bin/env python
from __future__ import print_function
from pprint import pprint
import os
import sys
import tornado.ioloop
import tornado.web
import json
from pivotal import Pivotal
#
# set up interface to Pivotal
#
token = os.getenv('PIVOTAL_TOKEN')
if not token:
msg = 'Please provide your Pivotal API token via an environment variable: PIVOTAL_TOKEN\n' \
'Your API Token can be found on your Profile page: https://www.pivotaltracker.com/profile'
raise RuntimeError(msg)
pivotal = Pivotal(project='1885757', token=token)
class GitHubHandler(tornado.web.RequestHandler):
def get(self):
"""
Receive and process a message from GitHub
"""
self.write("Well, Hello there!")
def post(self):
"""
Receive and process a message from GitHub
"""
print('--------------------------------------')
print('POST received:', self.request.headers.get('X-GitHub-Event'))
print('------- HEADERS -------')
pprint(dict(self.request.headers))
print('------- BODY -------')
data = json.loads(self.request.body)
pprint(list(data.keys()))
# pprint(data)
evt = self.request.headers.get('X-GitHub-Event')
if evt == 'pull_request':
print('ohhhh... a pull request!!')
data = json.loads(self.request.body)
print('action:', data['action'])
print('merged:', data['pull_request']['merged'])
if data['action'] == 'closed' and data['pull_request']['merged']:
print('ask pivotal to deliver PR #', data['number'])
pivotal.deliver(pull=data['number'])
sys.stdout.flush()
if __name__ == "__main__":
app = tornado.web.Application([
(r"/", GitHubHandler),
], debug=True)
app.listen(23997)
tornado.ioloop.IOLoop.current().start()
| true |
06a0dc68e91e244d63b1014ffcf503b45257eaa4 | Python | rubysash/sslcheck | /sslexpires.py | UTF-8 | 3,125 | 2.75 | 3 | [
"MIT"
] | permissive | '''
this script tests a list of urls for ssl expiry times
if they are self signed, it errors
if it's expired, it warns in red
if it's expiring in less than 20 days, it warns in yellow
if it is normal, not expired it prints results in grey
'''
import socket
import ssl
import datetime
# colors https://en.wikipedia.org/wiki/ANSI_escape_code
from colorama import init
init()
def ssl_expiry_datetime(hostname):
ssl_date_fmt = r'%b %d %H:%M:%S %Y %Z'
ctxt = ssl.create_default_context()
conn = ctxt.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=hostname,
)
# 3 second timeout because Lambda has runtime limitations
conn.settimeout(3.0)
conn.connect((hostname, 443))
ssl_info = conn.getpeercert()
# parse the string from the certificate into a Python datetime object
return datetime.datetime.strptime(ssl_info['notAfter'], ssl_date_fmt)
def ssl_valid_time_remaining(hostname):
expires = ssl_expiry_datetime(hostname)
return expires - datetime.datetime.utcnow()
def ssl_expires_in(hostname, buffer_days=20):
remaining = ssl_valid_time_remaining(hostname)
# if the cert expires in less than two weeks, we should reissue it
if remaining < datetime.timedelta(days=0):
# cert has already expired - uhoh!
return 1
elif remaining < datetime.timedelta(days=buffer_days):
# expires sooner than the buffer
return 2
else:
# everything is fine
return 3
# fixme: move list to json file
uris = {
'wellsfargo.com' : 443,
'rubysash.com' : 443,
'github.com' : 443
#'qvsslrca2-ev-r.quovadisglobal.com' : 443, # passes, but is revoked fixme
}
# check days remaining
passed = 1
for u in uris:
# normalize colors to grey
print('\033[93m', end='')
try:
ssl_valid_time_remaining(u)
except:
print('\033[31m'+"FAIL: "+'\033[31m',"?? days, 00:00:00.000000","\t",u)
passed = 0
else:
if ssl_expires_in(u) == 1:
print('\033[31m'+"EXP!: "+'\033[31m',ssl_valid_time_remaining(u),"\t",'\033[31m'+u)
elif ssl_expires_in(u) == 2:
print('\033[33m'+"PASS: "+'\033[33m',ssl_valid_time_remaining(u),"\t",'\033[33m'+u)
else:
print('\033[90m'+"PASS: "+'\033[90m',ssl_valid_time_remaining(u),"\t",'\033[90m'+u)
if passed == 1:
print('\033[32m'+"ALL PASSED"+'\033[31m')
else:
print('\033[31m'+"SOMETHING WRONG"+'\033[31m')
'''
Fix me:
is certificate revoked?
ex: https://revoked.grc.com
does domain name match?
is certificate expired?
ex: https://qvica1g3-e.quovadisglobal.com/
is it self signed?
ex: https://self-signed.badssl.com
is the RC4 cipher outdated?
ex: https://rc4.badssl.com/
is the DH key weak?
ex: https://dh480.badssl.com/
Does it pass vuln checks? (testssl.sh examples)
heartbleed, CCS, Ticketbleed, ROBOT, CRIME, Poodle, Logjam, Drown, Freak, Sweet32, Breach, Secure Fallback, Beast, etc?
Does it allow SSL v3?
What protocol does it use? TLSv1.2?
''' | true |
bef20a06a65f368cfc846932348294c74b2354ea | Python | benjaminocampo/clustering_planning | /notebooks/clustering/embeddings.py | UTF-8 | 1,521 | 2.578125 | 3 | [] | no_license | # %%
import matplotlib.pyplot as plt
import pandas as pd
from gensim.models import FastText
from sklearn.cluster import KMeans
from utils import (get_embedding, get_embedding_2DTSNE, get_object_types,
tokenize_plan, plot_vocabulary_kmeans,
plot_vocabulary_2DTSNE)
TRAIN_URL = "https://www.famaf.unc.edu.ar/~nocampo043/training-instances.parquet.gzip"
TEST_URL = "https://www.famaf.unc.edu.ar/~nocampo043/evaluation-instances.parquet.gzip"
df_train = pd.read_parquet(TRAIN_URL)
df_test = pd.read_parquet(TEST_URL)
# %% [markdown]
# ## FastText
# %%
sentences = df_train["relaxed_plan"].to_numpy()
fasttext = FastText(sentences=[tokenize_plan(s) for s in sentences],
min_count=1,
vector_size=100,
window=7)
# %% [markdown]
# ## TSNE
# %%
model = fasttext.wv
vocab = fasttext.wv.index_to_key
embedding = get_embedding(vocab, model)
embedding_TSNE = get_embedding_2DTSNE(vocab, model)
X = embedding.drop(columns=["word"]).to_numpy()
X_TSNE = embedding_TSNE.drop(columns=["word"]).to_numpy()
# %% [markdown]
# ## KMeans
# %%
kmeans = KMeans(n_clusters=4)
kmeans.fit(X)
# %% [markdown]
# ## Object type
# %%
obj_types, obj_indices = get_object_types(vocab)
# %% [markdown]
# ## Plots
# %%
_, (ax_kmeans, ax_tsne) = plt.subplots(1, 2, figsize=(30, 10))
plot_vocabulary_2DTSNE(X_TSNE, vocab, obj_types, obj_indices, ax_tsne)
plot_vocabulary_kmeans(X_TSNE, kmeans, ax_kmeans)
ax_kmeans.grid()
ax_tsne.grid()
# %% [markdown]
# ## | true |
025e683afb1dedec97b9bcd68141bda0c779b40b | Python | GhadeerHS/FullStack-DojoBootcamp | /Python_Stack/_python/OOP/User.py | UTF-8 | 976 | 3.671875 | 4 | [] | no_license | class User:
def __init__(self, name, email):
self.name = name
self.email = email
self.account_balance = 0
def make_deposit(self, amount):
self.account_balance += amount
def make_withdrawal(self, amount):
self.account_balance -= amount
def display_user_balance(self):
# print("User:"+ self.name + "Balance:" + self.account_balance)
print "User: {}, Balance: {}".format(self.name,self.account_balance)
return self
user1=User("Ghado","gh@gmail.com")
user2=User("Jan","jan@gmail.com")
user3=User("hui","hui@gmail.com")
user1.make_deposit(100)
user1.make_deposit(200)
user1.make_deposit(50)
user1.make_withdrawal(150)
user1.display_user_balance()
user2.make_deposit(200)
user2.make_deposit(200)
user2.make_withdrawal(150)
user2.make_withdrawal(10)
user2.display_user_balance()
user3.make_deposit(500)
user3.make_withdrawal(150)
user3.make_withdrawal(10)
user3.make_withdrawal(10)
user3.display_user_balance() | true |
a389e089d565ffadecbb9023f7dee80e109dfef8 | Python | rbnelr/py_vector_lib | /py_vector_lib/vector.py | UTF-8 | 8,522 | 3.484375 | 3 | [] | no_license | import operator
import math
def isvec(obj): # type can be iterated like a Vector
try:
len(obj)
return True
except:
return False # object not iterable
class Vector(tuple):
__slots__ = ()
def __repr__(self):
return "Vector%d(%s)" % (len(self), ", ".join(repr(x) for x in self))
def __str__(self): return repr(self)
size = property(lambda self: len(self))
# sadly this: v.x = 5 is impossible in python if i want to base my vectors on immutable types
# v2(1) -> v2(1,1)
# v2(v2(1,2))
# or v2((1,2))
# or v2([1,2])
# etc. -> v2(2,3)
# v2(2,3) -> v2(2,3)
# v3(v2(1,2),3) -> v3(1,2,3)
# v3(v2(1,2),3) -> v3(1,2,3)
# v3(1,v2(2,3)) # not allowed
def __new__(cls, *args, size=None):
l = len(args)
if l == 1:
if isvec(args[0]):
arr = args[0] # tuple/list/etc. passed in
else:
if not size:
raise ValueError("Vector(scalar): single scalar for all size, size needs to be specified")
arr = (args[0],) * size # single scalar for all size
elif all(not isvec(arg) for arg in args):
arr = args # all scalars specified
elif l > 1:
if not isvec(args[0]) or len(args[0]) < 2:
raise ValueError("Vector(Vector, scalars): Vector needs to be at least v2")
arr = args[0] # tuple/list/etc. as first arg
arr = tuple(arr) + args[1:]
else:
raise ValueError("Vector() needs at least one argument")
if size and size < len(arr):
arr = arr[:size] # downcast
elif size and size > len(arr): # upcast
remain = size -len(arr)
arr = list(arr)
if remain > 1:
arr += [0 for i in range(remain -1)]
arr.append(1)
return tuple.__new__(cls, arr)
x = property(lambda self: self[0] if 0 < self.size else None)
y = property(lambda self: self[1] if 1 < self.size else None)
z = property(lambda self: self[2] if 2 < self.size else None)
w = property(lambda self: self[3] if 3 < self.size else None)
# with this way of implementing the operators a 'v2() + v2()' is ~60x slower than a tuple concat, which seems ridiculous to me
# this probably is because of the creation of temporary tuples and lists (which i think is implossible to prevent while my vectors are based on immutable tuples)
# function call overhead might also be a source of big overhead
# the only way of making it faster (reduce temp tuples, lists and func calls) (but still slower than tuple concat) is to write the operators for each Vector size and each op manually
# TODO: is there any way of having this be abstract but still fast?
# i tried to use cython (in visual studio on windows), which i eventually got to work, but it seems like a pain to work with, and after updating my python version i started to get a crash with cython, so i abandoned this for now
# unary
def elementwise_unary(op):
def f(self):
return self.__class__([op(a) for a in self])
return f
# binary
def elementwise(op):
def f(self, other=None): # optional second argument for __round__(self[, ndigits])
if isvec(other):
if len(self) != len(other):
return NotImplemented
return self.__class__([op(a,b) for a,b in zip(self,other)])
else:
return self.__class__([op(a, other) for a in self])
return f
def relementwise(op):
def f(self, other=None):
if isvec(other):
if len(self) != len(other):
return NotImplemented
return self.__class__([op(b,a) for a,b in zip(self,other)])
else:
return self.__class__([op(other, a) for a in self])
return f
# ternary
def elementwise_ternary(op):
def f(self, other, modulo=None):
if isvec(other):
if len(self) != len(other):
return NotImplemented
else:
other = (other,) * len(self)
if isvec(modulo):
if len(self) != len(modulo):
return NotImplemented
else:
modulo = (modulo,) * len(self)
return self.__class__([op(a,b,c) for a,b,c in zip(self,other,modulo)])
return f
def divmod(self, other): # elementwise divmod would return Vector of tuples, we want tuple of vectors
res = Vector.elementwise(divmod)(self, other)
d,m = zip(*res)
return self.__class__(d), self.__class__(m)
def rdivmod(self, other):
res = Vector.relementwise(divmod)(self, other)
d,m = zip(*res)
return self.__class__(d), self.__class__(m)
__lt__ = elementwise(operator.lt)
__le__ = elementwise(operator.le)
__eq__ = elementwise(operator.eq)
__ne__ = elementwise(operator.ne)
__gt__ = elementwise(operator.gt)
__ge__ = elementwise(operator.ge)
__add__ = elementwise(operator.add)
__sub__ = elementwise(operator.sub)
__mul__ = elementwise(operator.mul)
#del __matmul__
__truediv__ = elementwise(operator.truediv)
__floordiv__ = elementwise(operator.floordiv)
__mod__ = elementwise(operator.mod)
__divmod__ = divmod
__pow__ = elementwise_ternary(pow)
__lshift__ = elementwise(operator.lshift)
__rshift__ = elementwise(operator.rshift)
__and__ = elementwise(operator.and_)
__xor__ = elementwise(operator.xor)
__or__ = elementwise(operator.or_)
__radd__ = relementwise(operator.add)
__rsub__ = relementwise(operator.sub)
__rmul__ = relementwise(operator.mul)
__rtruediv__ = relementwise(operator.truediv)
__rfloordiv__ = relementwise(operator.floordiv)
__rmod__ = relementwise(operator.mod)
__rdivmod__ = rdivmod
__rpow__ = relementwise(operator.pow)
__rlshift__ = relementwise(operator.lshift)
__rrshift__ = relementwise(operator.rshift)
__rand__ = relementwise(operator.and_)
__rxor__ = relementwise(operator.xor)
__ror__ = relementwise(operator.or_)
__neg__ = elementwise_unary(operator.neg)
__pos__ = elementwise_unary(operator.pos)
__abs__ = elementwise_unary(operator.abs)
__invert__ = elementwise_unary(operator.invert)
#__complex__
#__int__
#__float__
#__index__
__round__ = elementwise(round)
__trunc__ = elementwise_unary(math.trunc)
__floor__ = elementwise_unary(math.floor)
__ceil__ = elementwise_unary(math.ceil)
def is_vec(self, size):
valid = False
try:
valid = len(self) == size
except:
pass
if not valid:
raise ValueError("Vector must be of size %d for operation ('%s')" % (size, repr(self)))
def same_vecs(self, other):
valid = False
try:
valid = len(self) == len(other)
except:
pass
if not valid:
raise ValueError("Vectors must be of same size for operation ('%s', '%s')" % (repr(self), repr(other)))
def are_vecs(self, other, size):
valid = False
try:
valid = len(self) == size and len(other) == size
except:
pass
if not valid:
raise ValueError("Vectors must both be of size %d for operation ('%s', '%s')" % (size, repr(self), repr(other)))
class v2(Vector):
def __new__(cls, *args): return Vector.__new__(cls, *args, size=2)
def __repr__(self):
return "v2(%s)" % (", ".join([repr(x) for x in self]))
class v3(Vector):
def __new__(cls, *args): return Vector.__new__(cls, *args, size=3)
def __repr__(self):
return "v3(%s)" % (", ".join([repr(x) for x in self]))
class v4(Vector):
def __new__(cls, *args): return Vector.__new__(cls, *args, size=4)
def __repr__(self):
return "v4(%s)" % (", ".join([repr(x) for x in self]))
shorthand_vectors = { 2:v2, 3:v3, 4:v4 }
def length_sqr(self):
return sum(self * self)
def length(v):
return math.sqrt( sum(v * v) )
def normalize(v):
return v / length(v)
def normalize_or_zero(v):
len = length(v)
if len == 0:
return 0
return v / len
def dot(l, r):
Vector.same_vecs(l, r)
return sum(l * r)
def cross(l, r): # cross(v3,v3): cross product, cross(v2,v2): cross product hack, same as cross(v3(self, 0), v3(other, 0)).z, ie. the cross product of the 2d vectors on the z=0 plane in 3d space and then return the z coord of that (signed mag of cross product)
Vector.same_vecs(l, r)
if len(l) == 3:
return v3( l.y * r.z - l.z * r.y,
l.z * r.x - l.x * r.z,
l.x * r.y - l.y * r.x )
elif len(l) == 2:
return l.x * r.y - l.y * r.x
else:
raise ValueError("Vectors must be of size 2 or 3 for cross product ('%s')" % (size, repr(self)))
def vmax(l, *args):
return l.__class__([max(*x) for x in zip(l,*args)])
def vmin(l, *args):
return l.__class__([min(*x) for x in zip(l,*args)])
def clamp(v, a,b):
return vmin( vmax(v,a), b)
def lerp(a, b, t):
return (a * (1-t)) +(b * t)
def map(x, in_a, in_b, out_a=0, out_b=1):
return (x - in_a) / (in_b - in_a) * (out_b - out_a) + out_a
def rotate90(v): # rotate v2 by 90 degrees counter clockwise
Vector.is_vec(v, 2)
return v2(-v.y, v.x)
def rotate_unit(ang):
s,c = math.sin(ang), math.cos(ang)
return Matrix(c, s)
| true |
e01e11f9e6ac2773780608f4775cbaa1d6a83aed | Python | sightful-graduation-project/take-command | /command.py | UTF-8 | 1,614 | 3.234375 | 3 | [] | no_license | import speech_recognition as speech_recog
import text_to_speech
import speech_to_text
from playsound import playsound
#Returns None if can not determine the command type
def get_command_type (text):
command = None
if "see" in text or "front" in text or "seeing" in text:
command = "Object Detection"
elif "translation" in text or "translate" in text or "language" in text:
command = "Translation"
return command
#Loop to try to get the command from the user, breaks only if it gets its type
def take_command ():
response = None
first_time = True
while True:
if first_time:
text_to_speech.play_text("Sightful is here for you, how can I help? I am listening")
first_time = False
response = speech_to_text.recognize_speech_from_microphone()
#Speech recognition done successfully
if (response["error"] is None):
command = get_command_type(response["transcription"])
if command is not None:
text_to_speech.play_text("You asked for: " + command)
return command
else:
text_to_speech.play_text("I did not get what you say, can you please repeat it? I am Listening.")
elif(response["error"] == "API unavailable"):
playsound('internet_error.mp3')
break
elif(response["error"] == "Unable to recognize speech"):
text_to_speech.play_text("I did not get what you say, can you please repeat it? I am Listening.")
if __name__ == "__main__":
print(take_command())
| true |
b8223f96c81f827a6af60e9fab5fa89c04a0dc34 | Python | stoneeve415/LintCode | /_1671_play_game.py | UTF-8 | 476 | 3.46875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@title: 1671 玩游戏
@author: evestone
"""
def playGames(A):
_max = max(A)
left, right = 0, _max*2
while left < right:
mid = (left+right) // 2
cnt = 0
for item in A:
cnt += max(mid-item, 0)
if mid > cnt:
left = mid + 1
else:
right = mid
return max(left, _max)
if __name__ == '__main__':
# A = [84, 53]
A = [2, 2, 4, 3]
print(playGames(A)) | true |
99e1e7bcbdd4c44acb8d63beb18d8fefb71cfe90 | Python | GeneSlow/Hello_python | /5.3 if elif else 语句.py | UTF-8 | 674 | 4.1875 | 4 | [] | no_license | '''
age=13
if age<10:
price=0
elif age == 10:
price=10
else:
price=20
print('Hello,dear customer,your price of a ticket is '+'price'+'yuan.' )
>>> Hello,dear customer,your price of a ticket is priceyuan.
# 第一个问题在于冒号 第二个问提在与等号 用双等号
# 实验证明 ‘’无法直接把变量转换成字符串 还是需要用str
print('Hello,dear a, yourjjjj is '+str(price)+' yuan.')
'''
# 实验2 将price的值直接变成字符串
age=13
if age<10:
price='0'
elif age == 10:
price='10'
else:
price='20'
print('Hello,dear a, yourjjjj is '+price+' yuan.')
# 结果表明 我们成功啦!
| true |
c660ac8670431cc91cef29599816d7b21f2efe3e | Python | gxhrid/PyTorch-MNSIT-Model | /src/training/tuning/search/search.py | UTF-8 | 954 | 3.078125 | 3 | [] | no_license | import abc
class Search(abc.ABC):
def __init__(self, model_factory, param_values, scoring_func, train_loader, validation_loader):
"""
Represents a searching algorithm for tuning a model.
:param model_factory: A function that returns a new model instance given hyper params
:param param_values: A dict containing model and training hyper params and their possible values.
:param scoring_func: A function used to measure a model's performance.
:param train_loader: A loader that loads the training data.
:param validation_loader: A loader that loads the validation data.
"""
self.model_factory = model_factory
self.param_values = param_values
self.scoring_func = scoring_func
self.train_loader = train_loader
self.validation_loader = validation_loader
@abc.abstractmethod
def get_tuned_model(self):
raise NotImplementedError()
| true |
7dc966234b3785f7c93e7e0b7533412dd90f91b9 | Python | BiswasRajarshi/Explainable_ObjectClassification | /utility/utils.py | UTF-8 | 2,708 | 2.515625 | 3 | [] | no_license | """
Script containing the utilities needed for the
program.
"""
import os
import copy
import torch
import torch.hub
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import models, transforms
import cv2
import matplotlib.cm as cm
import numpy as np
def device_signature(cuda_flag):
cuda = cuda_flag
if cuda and torch.cuda.is_available():
device = torch.device("cuda")
current_device = torch.cuda.current_device()
print("Computing Device: {:}".format(torch.cuda.get_device_name(current_device)))
else:
device = torch.device("cpu")
current_device = device
print("Computing Device: {:}".format(current_device))
return device
def access_classlabels():
classlabels = []
with open('./accessories/synset_words.txt', 'r') as infile:
data = infile.readlines()
for line in data:
line = line.strip().split(" ", 1)[1]
category = line.split(", ", 1)[0].replace(" ", "_")
classlabels.append(category)
return classlabels
def write_gradient(filename, gradient):
grad_cpu = gradient.cpu()
grad_np = grad_cpu.numpy().transpose(1, 2, 0)
grad_minval = grad_np.min()
grad_maxval = grad_np.max()
grad_np -= grad_minval
grad_np /= grad_maxval
grad_np *= 255.0
cv2.imwrite(filename, np.uint8(grad_np))
def write_gradcam(filename, gcam, raw_image, paper_cmap=False):
gcam = gcam.cpu().numpy()
cmap = cm.jet_r(gcam)[..., :3] * 255.0
if paper_cmap:
alpha = gcam[..., None]
gcam = alpha * cmap + (1 - alpha) * raw_image
else:
gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
cv2.imwrite(filename, np.uint8(gcam))
def write_sensitivity(filename, maps):
maps = maps.cpu().numpy()
scale = max(maps[maps > 0].max(), -maps[maps <= 0].min())
maps = maps / scale * 0.5
maps += 0.5
maps = cm.bwr_r(maps)[..., :3]
maps = np.uint8(maps * 255.0)
maps = cv2.resize(maps, (224, 224), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(filename, maps)
def access_models():
model_names = [name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name])]
model_names = sorted(model_names)
return model_names
def image_preprocessing(image_path):
raw_image = cv2.imread(image_path)
raw_image = cv2.resize(raw_image, (224,) * 2)
image = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)(raw_image[..., ::-1].copy())
return image, raw_image
| true |
d071c63f2c6711cdb79f4f1b4bf914f046564ce3 | Python | andrejev/robotik | /obstancle.py | UTF-8 | 6,020 | 3.4375 | 3 | [] | no_license | from getDistance import *
from drive import *
import time
from math import *
#-----------------INTRODUCTION-------------------------------------------#
#Principle of work: The latest version of the Mechanised Resistance Autonomous Vehicle(MERAV 5000S) is designed to overcome simple mechanical obstancles like
#Trees, Rocks or, for that matter, boxes, put in MERAVs way by evil enemy fighters, or for test purposes.
#MERAVs sensor-array consists of two ultrasonic devices designed to measure distances to solid objects with great accuracy of about an inch and an angle of measurement of about 15 degrees.
#These sensors are aligned slightly outwards to have little overlap. If an obstancle is located in MERAVs path, it avoids it by steering. To decide the appropriate direction, it compares
#distance data from both sensors and steers towards the bigger distance. To avoid problems with objects right in front of the MERAV 5000 sticks with its steering decision until the
#signal leading towards the opposite side overcomes a certain level, compared to the second sensor. This is necessary to guarantee that the measurement error does not lead to sudden changes
#when facing objects directly in front of MERAV. These could otherwise cause the MERAV 5000 to hit the obstancle rather than avoid it.
#Previous attempts of equipping the MERAV 5000 with intelligent algorithms to distinguish obstancles from false data and taking smart driving-decisions proved to be unstable, and
#have therefore been discarded in favor of this approach, that is more simple, but apparently more effective as well.
#bullshitdist in [m] gibt ungefaere Grenze sinnvoller Messung an.
bullshitdist = 1.5
#measrange gives Number of measurements stored in the watchlist
measrange = 3
#movefactor gives the strength of steering to overcome obstancles. The path-curvature scales linearly with the distance to the obstancle.
movefactor = 2
#deaththreshold gives the minimum distance to an obstancle. If it comes too close, safety is no longer granted (as if it is otherwise) and the car stops.
deaththreshold = 0.3
#bias gives the minimum difference the two sensors have to measure if an obstancle is believed to be on the other side than before. This is to avoid sudden changes,
#uncertainties and eventual death due to hitting obstancles right in front of the car.
bias = 0.05
class Watcher():
#Init: Creating lists to store sensor-data.
def __init__(self):
self.watchlistL = []
self.watchlistR = []
#alarm: stores data from both sensors into watchlist.
def alarm(self):
alarmL=self.watchlistL[-1]
alarmR=self.watchlistR[-1]
#To prevent the car from unstable motion in front of obstancles, in case the larger distance changes from one site to the other, the difference has to be larger than a certain bias about the size of the error of measurement.
if((self.watchlistL[-1]-self.watchlistR[-1])*(self.watchlistL[-2]-self.watchlistR[-2]) < 0):
if(alarmL < alarmR):
alarmR = alarmR - bias
if(alarmL > alarmR):
alarmL = alarmL - bias
return (alarmL, alarmR)
#watch: reads the sensors and fills the watchlist. In case of an improper signal, the maximum meaurement distance bullshitdist is written into the list.
#The list stores only the last few measurement-points, given by measrange.
def watch(self):
a=distance(0)
if(a>0):
self.watchlistL.append(a)
if (len(self.watchlistL) > measrange):
self.watchlistL = self.watchlistL[1:]
else:
self.watchlistL.append(bullshitdist)
b=distance(1)
if(b>0):
self.watchlistR.append(b)
if (len(self.watchlistR) > measrange):
self.watchlistR = self.watchlistR[1:]
else:
self.watchlistR.append(bullshitdist)
#obstancle: Main routine of obstancle. Fills the watchlists and then reacts to obstancles below the threshold by turning towards the larger distance.
def obstancle(self):
# return
start = time.time()
#watching and waiting until the list is filled up
self.watch()
while(len(self.watchlistL) < measrange and len(self.watchlistR) < measrange):
self.watch()
#getting distance-measurements
(L,R) = self.alarm()
#checking if distance is larger than a minimum-safety-distance. If this is not the case, the car goes backwards.
if (L > deaththreshold and R > deaththreshold):
#obstancle takes the weel until no object is within the thresholddistance bullshitdist
while min(L,R) < bullshitdist:
#again checking for safety-distance every measurement-cycle
if (self.watchlistL[-1] > deaththreshold and self.watchlistR[-1] > deaththreshold):
#turning towards the larger distance. Curvature-radius scales linearly with distance and the gauge-value movefactor
if (L < R):
if(L*movefactor > 0.715):
steer(-L*movefactor) #negative curve radius steers to the right
##Test
print("Ich lenke nach rechts" , L,-L*movefactor)
end = time.time()
print "this took", end-start
#Limiting to the maximum value for the steering-servo.
else:
steer(-0.715)
##Test
print("Ich lenke nach rechts, maximal" , L, 0.715)
end = time.time()
print "this took", end-start
if (R < L):
if(R*movefactor > 0.715):
steer(R*movefactor)
##Test
print("Ich lenke nach links" , R, R*movefactor)
end = time.time()
print "this took", end-start
else:
steer(0.715)
##Test
print("Ich lenke nach links, maximal" , R, 0.715)
end = time.time()
print "this took", end-start
#going backwards:
else:
drive(-2)
time.sleep(0.5)
stop()
print("Fahr nicht gegen ne Wand du Arsch")
(L,R) = self.alarm()
self.watch()
#going backwards:
else:
if (L < R):
steer_at(0.715,-1)
##Test
print("Rueckwaerts nach links" , L,-L*movefactor)
if (R < L):
steer_at(-0.715,-1)
print("Rueckwaerts nach rechts" , R, R*movefactor)
if __name__ == "__main__":
o = Watcher()
o.obstancle()
| true |
03bb78dc47855c0c7f5dfe8b29b977bdba121548 | Python | JadenTurnbull/Crash-Course-Python | /chap03/TryItYourself_p82.py | UTF-8 | 375 | 3.734375 | 4 | [] | no_license | #3-1
names = ['Ruanne', 'Donnie', 'Yandré']
for name in names:
print(name)
#3-2
names = ['Ruanne', 'Donnie', 'Yandré']
for name in names:
print(f" Hello {name}")
#3-3
transport = ['car', 'motorcycle', 'plane']
print(f"BMW is one of the best {transport[0]} brands.")
print(f"I would like to drive a suzuki {transport[1]}.")
print(f"I love traveling by {transport[2]}")
| true |
26b33829b4150610c526509945059dd4819f8d49 | Python | rousseab/HardCoreFano | /modules/module_MatrixList.py | UTF-8 | 1,457 | 3.34375 | 3 | [] | no_license | #================================================================================
#
# Module MatrixList
# =================
#
# This module implements an object which can store a list of matrices
# and operate on them in an intuitive manner.
#
#================================================================================
from module_Constants import *
class MatrixList:
"""
The purpose of this class is to store and manipulate
lists of 2x2 matrices. This will allow the direct
implementation of equations in the graphene project.
"""
def __init__(self,m_11,m_12,m_21,m_22):
"""
Initialize the object with components
[ m_11 m_12 ]
[ m_21 m_22 ]
where each component is assumed to be a list of N elements.
"""
self.m_11 = deepcopy(m_11)
self.m_12 = deepcopy(m_12)
self.m_21 = deepcopy(m_21)
self.m_22 = deepcopy(m_22)
def return_list(self):
return N.array([ [self.m_11, self.m_12],[self.m_21, self.m_22]])
def __mul__(self,B_matrix):
new_11 = self.m_11*B_matrix.m_11 + self.m_12*B_matrix.m_21
new_12 = self.m_11*B_matrix.m_12 + self.m_12*B_matrix.m_22
new_21 = self.m_21*B_matrix.m_11 + self.m_22*B_matrix.m_21
new_22 = self.m_21*B_matrix.m_12 + self.m_22*B_matrix.m_22
New = MatrixList(new_11,new_12,new_21, new_22)
return New
| true |
18d9345a61e59adbff674a1fb28782faa51b5839 | Python | holgerteichgraeber/examples-pse | /src/surrogate/alamo_python/examples.py | UTF-8 | 1,944 | 2.765625 | 3 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/python
##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
import numpy as np
import sys
def sixcamel(*x):
x1, x2 = x
t1 = np.multiply(
4.0 - 2.1 * np.power(x1, 2) + np.divide(np.power(x1, 4), 3.0), np.power(x1, 2)
)
t2 = np.multiply(4 * np.power(x2, 2) - 4, np.power(x2, 2))
z = t1 + np.multiply(x1, x2) + t2
return z
def ackley(*x):
import numpy as np
x1, x2 = x
a = 20
b = 0.2
c = 2 * 3.14159
z = (
-a * np.exp(-b * np.sqrt(0.5 * (x1 ** 2 + x2 ** 2)))
- np.exp(0.5 * (np.cos(c * x1) + np.cos(c * x2)))
+ a
+ np.exp(1)
)
return z
def branin(*x):
import numpy as np
x1, x2 = x
pi = 3.14159
z = (
(x2 - (5.1 / (4 * pi ** 2)) * x1 ** 2 + (5 / pi) * x1 - 6) ** 2
+ 10 * (1 - (1 / (8 * pi)) * np.cos(x1) + 10)
+ np.random.normal(0, 0.1)
)
return z
if __name__ == "__main__":
sys.stdout.write(" ALAMOpy example functions ")
sys.stdout.write(" call functions with : ")
sys.stdout.write(" examples.<name>")
sys.stdout.write(" <name> = branin ")
sys.stdout.write(" sixcamel ")
sys.stdout.write(" ackley ")
| true |
d1015642129063565dc1dd3a0e0b6c08c19ccff8 | Python | wanzongqi/-R- | /76.py | UTF-8 | 626 | 3.0625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 19 11:25:36 2018
@author: strA
"""
ans = [[0 for j in range(100)] for i in range(101)]
#ans[i][j]代表用不超过j的数加到i的方案数
ans[1] = [1]*100
for i in range(2,101):
for j in range(1,100):
b = min(i,j)
for k in reversed(range(1,b+1)):
ans[i][j] += ans[i-k][k]
if j>=i:
ans[i][j] += 1 ##3=3这种由于是被后续的数调用的,所以在这里也算一个方案
###http://mathworld.wolfram.com/PartitionFunctionP.html 还可以用欧拉的生成函数
| true |
4459945ff7bd2460cef84467fa82e3d7c3c6b0d8 | Python | AnkitAvi11/Data-Structures-and-Algorithms-Everyday-practice | /List/basic.py | UTF-8 | 617 | 4.40625 | 4 | [] | no_license | List = list () # creating an empty list (list is nothing but an array)
# functions to add values to a List
List.append(1)
List.append(2)
List.extend([1,2,3,7])
List.insert(4,55)
# functions related to removing values from List
try :
# remove method throws an error if the value is not in the list
List.remove(33)
except Exception as e:
print(e)
List.pop() # when no parameters are passed the last element is removed from the array
List.pop(2) # pops out the element at the second index
del List[3] # using delete keyword to delete an element
print("New List = ", List, end=" ")
| true |
ce1656d5d65fda9174de7552f58e004ca2cbec37 | Python | ebcyford/mon | /use_model.py | UTF-8 | 1,836 | 2.59375 | 3 | [] | no_license | """Retrieve model and infer on raster data
This script loads the best performing model, along with a raster and positions
of tree centers, and performs inference on self-generated image chips.
These image chips are a function of the identified tree's height.
"""
import argparse
import cv2
import os
import rasterio
import shapely
import geopandas as gpd
import numpy as np
import tensorflow as tf
from mon.infer import get_trees
from rasterio.mask import mask
from shapely import speedups
from tqdm import tqdm
# Environment settings
if (speedups.available):
speedups.enable()
tf.get_logger().setLevel("INFO")
def main():
trees_classified = get_trees(raster, trees, model)
print("Writing to " + OUT_FILE)
trees_classified.to_file(OUT_FILE)
if __name__ == "__main__":
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_OUT = os.path.join(BASE_DIR, "output.shp")
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str,
help="location of CNN model")
parser.add_argument("--in_raster", type=str,
help="filepath of raster to perform inference")
parser.add_argument("--tree_centers", type=str,
help="shapefile of identified tree centers")
parser.add_argument("--out_file", type=str, default=DEFAULT_OUT,
help="shapefile of output trees and predictions [default:output.shp]")
FLAGS = parser.parse_args()
MODEL = FLAGS.model
TREES = FLAGS.tree_centers
RASTER = FLAGS.in_raster
OUT_FILE = FLAGS.out_file
print("Reading Data...")
raster = rasterio.open(RASTER)
trees = gpd.read_file(TREES)
model = tf.keras.models.load_model(MODEL)
SPATIAL_RES = raster.res[0]
IMG_SIZE = model.get_input_shape_at(0)[1]
main() | true |
891ec0e978fd85577315073abfbc0f97eee612d9 | Python | Aasthaengg/IBMdataset | /Python_codes/p03329/s826383818.py | UTF-8 | 628 | 2.796875 | 3 | [] | no_license | from bisect import bisect_right
n = int(input())
MX = 100010
dp = [float('inf') for _ in range(MX)]
dp[0] = 0
pow_6, pow_9 = [6], [9]
while True:
flg = False
if pow_6[-1]*6 <= MX:
pow_6.append(pow_6[-1]*6)
flg = True
if pow_9[-1]*9 <= MX:
pow_9.append(pow_9[-1]*9)
flg = True
if flg == False:
break
for i in range(1, MX):
tmp = [1]
if i >= 6:
x = bisect_right(pow_6, i)
tmp.append(pow_6[x-1])
if i >= 9:
y = bisect_right(pow_9, i)
tmp.append(pow_9[y-1])
for t in tmp:
dp[i] = min(dp[i], dp[i-t]+1)
print(dp[n]) | true |
a8e8e4686980d97a0d091c11e2726a043818978c | Python | YifengGuo/python_study | /basics/character_counter.py | UTF-8 | 130 | 3.703125 | 4 | [] | no_license | def count(s, letter):
count = 0
for char in s:
if char == letter:
count += 1
return count
print(count('mathmatica', 'm')) | true |
02fd00525459b5ef3c07fd65b10e531afe4154e2 | Python | fzellini/SolarTracker | /trackerdriver.py | UTF-8 | 7,729 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# TrackerDriver class
# a TrackerDriver is composed of two linearmotors, have an orientation
#
import math
import time
import logging.handlers
import linearmotor
import pickle
import os
from Vec3d import Vec3d
def getpitchroll(azi, ele):
vz = math.sin(math.radians(ele))
vx = -math.cos(math.radians(azi)) * math.cos(math.radians(ele))
vy = math.sin(math.radians(azi)) * math.cos(math.radians(ele))
v = Vec3d(vy, vx, vz)
pitch = v.get_angle_around_x() - 90
v.rotate_around_x(-pitch)
roll = v.get_angle_around_y()
return pitch, roll
class TrackerDriver:
def __init__(self, pitchmotor, rollmotor, azioffset, statefile="motor.dat"):
self.pitchMotor = pitchmotor
self.rollMotor = rollmotor
self.aziOffset = azioffset
self.statefile = statefile
self.restorestate()
def gotopitchposition(self, pos):
self.pitchMotor.gopos(pos)
self.savestate()
def gotopitchangle(self, angle):
self.pitchMotor.goangle(angle)
self.savestate()
def gotorollposition(self, pos):
self.rollMotor.gopos(pos)
self.savestate()
def gotorollangle(self, angle):
self.rollMotor.goangle(angle)
self.savestate()
def gotopitchrollpos(self, pitchpos, rollpos):
"""
move both axis together
:param pitchpos: pitch position in reed steps
:param rollpos: roll position in reed steps
"""
log.info("going to pos [pitch %d, roll %d] from [pitch %d, roll %d]" % (pitchpos, rollpos,
self.pitchMotor.pos, self.rollMotor.pos))
# todo: check also for max values
if pitchpos < 0:
pitchpos = 0
log.info("pitchpos forced to 0")
if rollpos < 0:
rollpos = 0
log.info("roll forced to 0")
self.rollMotor.wait = 0
self.pitchMotor.wait = 0
#
pitch = False
if abs(pitchpos - self.pitchMotor.pos) >= self.pitchMotor.minstep:
if pitchpos > self.pitchMotor.pos:
self.pitchMotor.forward()
else:
self.pitchMotor.backward()
pitch = True
roll = False
if abs(rollpos - self.rollMotor.pos) >= self.rollMotor.minstep:
if rollpos > self.rollMotor.pos:
self.rollMotor.forward()
else:
self.rollMotor.backward()
roll = True
if pitch or roll:
time.sleep(.2) # wait direction relais set-up
if pitch:
self.pitchMotor.on()
if roll:
self.rollMotor.on()
oroll = -999
opitch = -999
txr = time.time()
txp = txr
while pitch or roll:
# log.info( "pitch pos %d, roll pos %d" % (self.pitchMotor.pos,self.rollMotor.pos))
ty = time.time()
if pitch:
if ty - txp > .5:
txp = ty
if self.pitchMotor.pos == opitch:
pitch = False
opitch = self.pitchMotor.pos
if self.pitchMotor.step > 0:
# forward
if self.pitchMotor.pos > pitchpos - 2:
pitch = False
else:
# backward
if self.pitchMotor.pos < pitchpos + 2:
pitch = False
if not pitch:
self.pitchMotor.off()
if roll:
if ty - txr > .5:
txr = ty
if self.rollMotor.pos == oroll:
roll = False
oroll = self.rollMotor.pos
if self.rollMotor.step > 0:
# forward
if self.rollMotor.pos > rollpos - 2:
roll = False
else:
# backward
if self.rollMotor.pos < rollpos + 2:
roll = False
if not roll:
self.rollMotor.off()
time.sleep(.1)
log.info("pitch pos %d, roll pos %d" % (self.pitchMotor.pos, self.rollMotor.pos))
self.rollMotor.wait = .2
self.pitchMotor.wait = .2
self.savestate()
def gotopitchrollangle(self, pitchangle, rollangle):
"""
move tracker at specified pitch and roll angle
positive values for pitch points north, for roll points east
both pitch and roll == 0 means horizontal position
:param pitchangle: pitch angle
:param rollangle: roll angle
"""
log.info("going to pitchangle %f, rollangle %f" % (pitchangle, rollangle))
# fix angles for motors
pitchangle = self.pitchMotor.fixangle(pitchangle)
rollangle = self.rollMotor.fixangle(rollangle)
pitchpos = self.pitchMotor.angle2pos(pitchangle)
rollpos = self.rollMotor.angle2pos(rollangle)
self.gotopitchrollpos(pitchpos, rollpos)
def gotoaziele(self, az, alt):
"""
goto specific azi, ele, applying azioffset
"""
log.info("going to azi %f, ele %f" % (az, alt))
if self.aziOffset != 0:
az = az + self.aziOffset
az %= 360.0
log.info("correcting azi to %f due to offset of %f" % (az, self.aziOffset))
pr = getpitchroll(az, alt)
log.info(" pitch [%f], roll [%f]" % (pr[0], pr[1]))
self.gotopitchrollangle(pr[0], pr[1])
def savestate(self):
"""
save tracker state (motor position)
"""
state = (self.pitchMotor.pos, self.rollMotor.pos)
output = open(self.statefile, 'wb')
# Pickle dictionary using protocol 0.
pickle.dump(state, output)
output.close()
log.info("position saved [%s, pitch=%d roll=%d]" % (self.statefile, self.pitchMotor.pos, self.rollMotor.pos))
def restorestate(self):
"""
save tracker state (motor position)
"""
if os.path.exists(self.statefile):
inputhandle = open(self.statefile, 'rb')
# Pickle dictionary using protocol 0.
state = pickle.load(inputhandle)
inputhandle.close()
self.pitchMotor.pos, self.rollMotor.pos = state
log.info("restored position from [%s pitch=%d roll=%d]" % (self.statefile,
self.pitchMotor.pos,
self.rollMotor.pos))
else:
self.savestate()
def gpio_out(port, value, label=""):
# import RPi.GPIO as GPIO
# GPIO.output (port,value)
log.info("%s simulate setting of GPIO %d to %d" % (label, port, value))
# main test
if __name__ == "__main__":
logging.basicConfig()
log = logging.getLogger("trackerdriver")
log.setLevel(logging.INFO)
logm = logging.getLogger("linearmotor")
logm.setLevel(logging.INFO)
linearmotor.log = logm
pitchM = linearmotor.LinearMotor("[pitch]", dirport=21, powerport=19, pulseport=3, pulsestep=0.522, ab=225, bc=355,
cd=40, d=-5, offset=136, hookoffset=34)
pitchM.set_gpioout(gpio_out)
rollM = linearmotor.LinearMotor("[roll]", dirport=16, powerport=15, pulseport=5, pulsestep=0.522, ab=225, bc=708,
cd=40, d=75, offset=503)
rollM.set_gpioout(gpio_out)
td = TrackerDriver(pitchM, rollM, 0)
# td.gotoPitchRollAngle(0,0)
td.gotoaziele(150, 45)
| true |
781d862619da752d3ecd7f90d7b55169d16ee5ec | Python | link2618/CompraFacturacion | /inv/models.py | UTF-8 | 3,512 | 2.578125 | 3 | [] | no_license | from django.db import models
# Importamos la clase modelo madre que creamos en Bases
from bases.models import ClaseModelo
# Create your models here.
# Al herredar de ClaseModelo Ya tiene todos sus atributos
class Categoria(ClaseModelo):
descripcion = models.CharField('Descripción', max_length=100, help_text='Descripción de la Categoria', unique=True)
def __str__(self):
return "{}".format(self.descripcion)
#Para guardar la descripcion en mayuscula
def save(self):
self.descripcion = self.descripcion.upper()
super(Categoria, self).save()
# Para que no le agrege la letra s
class Meta:
verbose_name_plural = "Categorias"
class SubCategoria(ClaseModelo):
categoria = models.ForeignKey(Categoria, on_delete=models.CASCADE)
descripcion = models.CharField('Descripción', max_length=100, help_text='Descripción de la Sub Categoria')
def __str__(self):
return "{}: {}".format(self.categoria.descripcion, self.descripcion)
#Para guardar la descripcion en mayuscula
def save(self):
self.descripcion = self.descripcion.upper()
super(SubCategoria, self).save()
# Para que no le agrege la letra s y que no se repita la descripcion
class Meta:
verbose_name_plural = "Sub Categorias"
# Para que no se repita la descripcion
unique_together = ('categoria', 'descripcion')
class Marca(ClaseModelo):
descripcion = models.CharField('Descripción', max_length=100, help_text='Descripción de la Marca', unique=True)
def __str__(self):
return "{}".format(self.descripcion)
#Para guardar la descripcion en mayuscula
def save(self):
self.descripcion = self.descripcion.upper()
super(Marca, self).save()
# Para que no le agrege la letra s y que no se repita la descripcion
class Meta:
verbose_name_plural = "Marca"
class UnidadMedida(ClaseModelo):
descripcion = models.CharField('Descripción', max_length=100, help_text='Descripción de la Unidad de Medida', unique=True)
def __str__(self):
return "{}".format(self.descripcion)
#Para guardar la descripcion en mayuscula
def save(self):
self.descripcion = self.descripcion.upper()
super(UnidadMedida, self).save()
# Para que no le agrege la letra s y que no se repita la descripcion
class Meta:
verbose_name_plural = "Unidad de Medida"
class Producto(ClaseModelo):
codigo = models.CharField('Codigo', max_length=20, unique=True)
codigo_barra = models.CharField('Codigo de Barras', max_length=50)
descripcion = models.CharField('Descripcion', max_length=200)
precio = models.FloatField('Precio', default=0)
existencia = models.IntegerField('Existencia', default=0)
ultima_compra = models.DateField('Ultima Compra', null=True, blank=True)
# Llaves foraneas
marca = models.ForeignKey(Marca, on_delete=models.CASCADE)
unidad_medida = models.ForeignKey(UnidadMedida, on_delete=models.CASCADE)
subcategoria = models.ForeignKey(SubCategoria, on_delete=models.CASCADE)
def __str__(self):
return "{}".format(self.descripcion)
def save(self):
self.descripcion = self.descripcion.upper()
super(Producto, self).save()
# Para que no le agrege la letra s y que no se repita la descripcion
class Meta:
verbose_name_plural = "Productos"
# Para que no se repita la descripcion
unique_together = ('codigo', 'codigo_barra')
| true |
85408c1dbdababe735ddccfc5fc086ba011a0b13 | Python | CHOIsunhyeon/bioinfo-lecture-2021-07 | /src2/15.py | UTF-8 | 650 | 2.890625 | 3 | [] | no_license | #! usr/bin/python
import sys
sample =sys.argv[1]
#sys.argv는 리스트임
print(f"processing: {sample}")
##처리 분석
print(f"end: {sample}")
#[0]은 py가 들어가있고
#[1]은 비어 있어서 지금 인덱스 에러가 날꺼임
#그래서 설명서를 만들어보자
if len (sys.argv) != 2:
print(f"python {sys.argv[0]} [sample]")
sys.exit() #->오류보다는 샘플 이름을 넣으라고 문구가 뜸
#이게 맨 위로 올라가야함
#sys.exit(1)
#echoi $? ->0이 아니면 모두 비정상 코드인데 1번으로 종료된거면 아규먼트 안맞아서 종료 이런식으로 만들어줄 수 있음
| true |
105a359b7ad740a34b5a33a84ce66beeb6bd9b87 | Python | nick-kopy/Predicting-Bike-Rental-Station-Traffic | /data_clean.py | UTF-8 | 16,013 | 3.0625 | 3 | [
"MIT"
] | permissive | # This file contains all the necessary functions for model.ipynb to run
# To see examples of how to use these functions, see above mentioned notebook
# Authored by Nicholas Kopystynsky
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from geopy.distance import geodesic
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
plt.style.use('ggplot')
def basic_dist(row):
'''Gives a basic euclidean trip distance in meters'''
if row['round_trip'] == 1:
return 0
a = (row['start_lat'], row['start_lng'])
b = (row['end_lat'], row['end_lng'])
return geodesic(a, b).km * 1000
def station_data(region, eda=False, start_end=None, exclude_within_region=False):
'''Loads, preps, and filters data for machine learning
input: a set of strings, all station names
output: pd dataframe of recent Divvy trips
- Output is not quite AI ready or EDA ready, but right where they would branch
options:
- eda: If True includes extra columns with trip related statistics. Should be excluded for modeling.
- start_end: Pick if you want trips that start in a region or end in a region or leave blank for both.
- exclude_within_region: If a trip started and ended within a region, excludes those trips.
'''
# grab a set of station names for a given region
if region in ['downtown', 'lincoln_park', 'wicker_park', 'hyde_park', 'uptown', 'chinatown']:
stations = get_stations(region)
else:
stations = set([region])
# Gather one years worth of data
filelist = []
frames = []
# change this back to the full year later
for month in [10,11,12]: #[4,5,6,7,8,9,10,11,12]:
filelist.append('data/2020{:02d}-divvy-tripdata.csv'.format(month))
for month in [1,2,3]:
filelist.append('data/2021{:02d}-divvy-tripdata.csv'.format(month))
usecols = ['started_at', 'ended_at', 'start_station_name', 'end_station_name', 'member_casual', 'rideable_type',
'start_lat', 'start_lng', 'end_lat', 'end_lng']
# actually grab the data
for month in filelist:
lil_df = pd.read_csv(month, usecols=usecols)
# decide weather to look at trips starting and/or ending in our selected region
mask_end = (lil_df['end_station_name'].isin(stations))
mask_start = (lil_df['start_station_name'].isin(stations))
# want trips ending in our region, but may or may not want those starting in our region
if start_end == 'end':
if exclude_within_region == False:
mask = mask_end
elif exclude_within_region == True:
mask = mask_end & ~mask_start
# want trips starting in our region, but may or may not want those ending in our region
elif start_end == 'start':
if exclude_within_region == False:
mask = mask_start
elif exclude_within_region == True:
mask = mask_start & ~mask_end
# want all trips that started or ended in our region but may or may not want trips that did both
else:
if exclude_within_region == False:
# started or ended in region
mask = mask_start | mask_end
elif exclude_within_region == True:
# started xor ended in region
mask = (mask_start & ~mask_end) | (~mask_start & mask_end)
lil_df = lil_df[mask]
frames.append(lil_df)
df = pd.concat(frames, ignore_index=True)
# Only relevant missing data is lat/long, warns us if ever dropping more than 1%
allrows = df.shape[0]
df = df[df['start_lat'].notna()]
df = df[df['end_lat'].notna()]
if allrows/df.shape[0] > 1.01:
print('NULL WARNING: more than 1% of rows null')
df = df.reset_index(drop=True)
# target variable is grouped by date and hour
df['ended_at'] = pd.to_datetime(df['ended_at'])
df['started_at'] = pd.to_datetime(df['started_at'])
df['date'] = pd.to_datetime(df['ended_at']).dt.date
df['hour'] = df['ended_at'].dt.hour
# For some reason each month has a extra few trips from the upcoming month
# removed to prevent data leakage
df = df[df['date'] < pd.to_datetime('2021-04-01')]
# Future implementation can include weather data
#weather = grab_weather()
#df = df.merge(weather, how='left', left_on=['date', 'hour'], right_on=['date', 'hour'])
# AI wouldn't have following aggregate features available for predictions, so they aren't included in modeling
if eda == False:
# instead prep for machine learning
return vectorize(df)
# Extracting some interesting features for EDA
# daylight savings makes a few negative trip times, a quick approximate fix is okay
df['trip_time'] = abs((df['ended_at'] - df['started_at']).dt.total_seconds())
# All trips above 10,800 seconds (3 hrs) are on Nov 25, must be some systemic thing
df = df[df['trip_time'] < 10800]
df['round_trip'] = df.apply(lambda x: 1 if x['start_station_name'] == x['end_station_name'] else 0, axis=1)
df['electric'] = df['rideable_type'].apply(lambda x: 1 if x == 'electric_bike' else 0)
df['member'] = df['member_casual'].apply(lambda x: 1 if x == 'member' else 0)
df['trip_dist'] = df.apply(basic_dist, axis=1)
dropcols = ['rideable_type', 'member_casual', 'started_at', 'ended_at', 'start_lat', 'start_lng',
'end_lat', 'end_lng', 'start_station_name', 'end_station_name']
df = df.drop(columns=dropcols)
# extract target and add to output
out = df.groupby(['date', 'hour']).agg('mean')
out['target'] = df.groupby(['date', 'hour']).size()
# Some hours are missing, we want to include a row for that hour with target = 0
dti = pd.Series(pd.date_range("2020-04-01", freq="D", periods=365)).dt.date
idx = pd.MultiIndex.from_product([dti, np.arange(24)], names=['date', 'hour'])
# When weather is implemented column names will need to be included below
df_blank = pd.DataFrame(data = np.zeros(shape=(365*24, 6)),
index = idx,
columns = ['trip_time', 'round_trip', 'electric', 'member',
'trip_dist', 'target'])
out = pd.concat([df_blank, out]).groupby(['date', 'hour']).agg('sum')
return out
def grab_weather():
'''Future implementation: loads and preps weather data in a pandas df'''
pass
def get_stations(region):
'''Returns the set of station names necessary for grouping data
possible regions: 'downtown', 'lincoln_park', 'wicker_park', 'hyde_park', 'uptown', 'chinatown'
'''
groups = pd.read_csv('models/station_groups.csv')
return set(groups[groups['group'] == region].name.values)
def vectorize(inputdf):
'''Prepares data for machine learning
input: df from grab_data
output: 1D numpy array
- all non-target feature columns are scaled
future output: 2D numpy array, scaler used
- feature columns would all need to be scaled
'''
# extract target and add to output
out = inputdf.groupby(['date', 'hour']).agg('mean')
out['target'] = inputdf.groupby(['date', 'hour']).size()
dropcols = ['start_lat', 'start_lng', 'end_lat', 'end_lng']
out = out.drop(columns=dropcols)
# Some hours are missing, we want to include a row for that hour with target = 0
# Merging with a blank df seems to cover our bases
dti = pd.Series(pd.date_range("2020-04-01", freq="D", periods=365)).dt.date
idx = pd.MultiIndex.from_product([dti, np.arange(24)], names=['date', 'hour'])
# feature columns would need to be added below
df_blank = pd.DataFrame(data = np.zeros(shape=(365*24, 1)), index=idx,
columns=['target'])
out = pd.concat([df_blank, out]).groupby(['date', 'hour']).agg('sum')
out = out.reset_index(drop=True)
# If data is univariate, no need for scaling
if out.shape[1] == 1:
return out
# target feature should not be scaled
y = np.array(out.iloc[:, -1])
scaler = MinMaxScaler()
out = scaler.fit_transform(out.iloc[:, :-1])
return np.append(out, y.reshape(-1, 1), axis=1), scaler
class Model:
'''
Wrapper class for Keras GRU type recurrent neural network.
Includes architecture and methods to streamline model training.
'''
def __init__(self, df, univariate=True, load_model=None):
'''output of station_data(region, eda=False) should be passed'''
self.df = np.array(df)
# offset in hours from midnight (used in predictions)
self.offset = 6
# window of time to look back when making a prediction (in hours)
self.lookback = 120
if self.df.shape[1] == 1:
self.univariate=False
else:
self.univariate = univariate
# scale data
self.scaler = MinMaxScaler()
self.df = self.scaler.fit_transform(self.df)
if self.univariate == True:
self.X_train, self.X_test, self.y_train, self.y_test = self.split_and_windowize(self.df[:, -1], self.lookback, 0.0001, univariate=self.univariate)
else:
self.X_train, self.X_test, self.y_train, self.y_test = self.split_and_windowize(self.df, self.lookback, 0.0001, univariate=self.univariate)
if load_model is not None:
self.model = load_model
return None
# Model structure, feel free to make adjustments here
self.model = tf.keras.Sequential()
self.model.add(tf.keras.layers.GRU(100, return_sequences=False))
self.model.add(tf.keras.layers.Dropout(0.2))
self.model.add(tf.keras.layers.Dense(1, activation='relu'))
self.model.compile(optimizer='rmsprop', loss='mse')
def windowize_data(self, data, n_prev, univariate=True):
'''Function to add a dimension of past data points to a numpy array
input: 2D np array
output: 3d np array (where 3D dimension is just copies of previous rows)
Adapted from code by Michelle Hoogenhout:
https://github.com/michellehoog
'''
n_predictions = len(data) - n_prev
indices = np.arange(n_prev) + np.arange(n_predictions)[:, None]
if univariate == False:
y = data[n_prev:, -1]
x = data[indices]
else:
y = data[n_prev:]
x = data[indices, None]
return x, y
def split_and_windowize(self, data, n_prev, fraction_test=0.1, univariate=True):
'''Train/test splits data with added timestep dimension
Adapted from code by Michelle Hoogenhout:
https://github.com/michellehoog
'''
n_predictions = len(data) - 2*n_prev
n_test = int(fraction_test * n_predictions)
n_train = n_predictions - n_test
x_train, y_train = self.windowize_data(data[:n_train], n_prev, univariate=univariate)
x_test, y_test = self.windowize_data(data[n_train:], n_prev, univariate=univariate)
return x_train, x_test, y_train, y_test
def train(self):
'''Actually trains the model on the data'''
self.model.fit(self.X_train, self.y_train, batch_size=16, epochs=50)
def predict(self, n_out=24, offset=0):
'''Makes a prediction
offset is hours since March 27th, 2021 at 12am
'''
# first state of window
window = self.X_test[0+offset, :, :].reshape([1,-1,1])
out = []
for _ in range(n_out):
pred = self.model.predict(window)[0][0]
out.append(pred)
# add prediction as newest element to window, auto reshapes to (n+1, )
window = np.append(window, pred)
# delete oldest element at beginning
window = np.delete(window, 0)
# reshape so prediction() can use the window again
window = window.reshape([1,-1,1])
return np.array(out)
def predict_plot(self, n_out=24, offset=0):
'''Generates a plot of the prediction against the actual observations.
Includes a subplot of the residuals'''
yhat = self.predict(n_out=n_out, offset=offset)
ytest = self.y_test[0+offset:n_out+offset]
# unscale target
yhat = self.scaler.inverse_transform(yhat.reshape(-1, 1))
ytest = self.scaler.inverse_transform(ytest.reshape(-1, 1))
fig, (ax1, ax2) = plt.subplots(2, figsize=(12,7), gridspec_kw={'height_ratios': [2, 1]})
# xtick label if blocks
if n_out == 24:
ax1.set_xticks([0,6,12,18,24])
ax2.set_xticks([0,6,12,18,24])
if offset%24 == 0:
ax2.set_xticklabels(['12am', '6am', '12pm', '6pm', '12am'])
elif offset%24 == 6:
ax2.set_xticklabels(['6am', '12pm', '6pm', '12am', '6am'])
elif offset%24 == 12:
ax2.set_xticklabels(['12pm', '6pm', '12am', '6am', '12pm'])
elif offset%24 == 18:
ax2.set_xticklabels(['6pm', '12am', '6am', '12pm', '6pm'])
ax1.plot(np.arange(len(ytest)), ytest, c='darkslategrey', label='actual')
ax1.plot(np.arange(len(ytest)), yhat, c='orangered', label='predicted')
ax1.set_ylabel('Trips')
ax1.title.set_text('Traffic predictions')
ax1.set_xticklabels([])
ax1.legend()
ax2.plot(np.arange(len(ytest)), (ytest - yhat), c='darkslategrey')
ax2.axhline(c='orangered')
ax2.set_xlabel('Time')
ax2.set_ylabel('Trip Error')
ax2.set_ylim(-100, 100) # remove if doesn't look good
ax2.title.set_text('Error between Actual and Predicted Traffic');
def predict_score(self, n_out=24, offset=0):
'''Returns a tuple of model and baseline RMSE scores for a window'''
if self.univariate == True:
ybase = np.ones(n_out) * self.X_train[(n_out+offset)*-1:-1-offset, 0, -1].mean()
else:
ybase = np.ones(n_out) * self.X_train[(n_out+offset)*-1:-1-offset,:,:].mean()
if n_out==1:
# this line throws runtime errors
ybase = np.ones(n_out) * self.X_train[(n_out+offset)*-1:,:,:].mean()
ybase = mean_squared_error(self.y_test[offset:offset+n_out], ybase)**0.5
yhat = self.predict(n_out=n_out, offset=offset)
yhat = mean_squared_error(self.y_test[0+offset:n_out+offset], yhat)**0.5
#print('This model did {}% better than baseline ({})'.format(round((1-yhat/ybase)*100, 2), round(ybase, 2)))
return yhat, ybase
def rmse_spread(self):
'''Gives a couple different views of RMSE scores to evaluate a model.
Mostly used for model validation.
'''
rmse_24x1 = self.predict_score(n_out=24, offset=0)
hat = []
base = []
for off in [0, 6, 12, 16]:
a, b = self.predict_score(n_out=6, offset=off)
hat.append(a)
base.append(b)
rmse_6x4 = (np.array(hat).mean(), np.array(base).mean())
hat = []
base = []
for off in np.arange(24):
a, b = self.predict_score(n_out=1, offset=off)
hat.append(a)
base.append(b)
rmse_1x24 = (np.array(hat).mean(), np.array(base).mean())
print('Single 24hr test: {} vs baseline {}'.format(round(rmse_24x1[0], 4), round(rmse_24x1[1], 4)))
print('Four 6hr tests (averaged): {} vs baseline {}'.format(round(rmse_6x4[0], 4), round(rmse_6x4[1], 4)))
print('Twenty-four 1hr tests (averaged): {} vs baseline {}'.format(round(rmse_1x24[0], 4), round(rmse_1x24[1], 4))) | true |
c3b5266eac561824afccc1e7df8e0432ed5c22ac | Python | DimasNovianadi/MultiItemPayment | /multiitempayment.py | UTF-8 | 2,862 | 3.171875 | 3 | [] | no_license | tambah="y"
while tambah=="y":
print("")
print("========================================")
print(" DAFTAR MENU ")
print("========================================")
print(" Menu Makanan ")
print("========================================")
print(" 1 = NASI GORENG Rp 15.000")
print(" 2 = LONTONG GORENG Rp 14.900")
print(" 3 = BAKSO GORENG Rp 12.900")
print(" 4 = RUJAK GORENG Rp 13.000")
print(" 5 = RUJAK BAKSO Rp 15.000")
print(" 6 = RUJAK BAKSO PECEL Rp 17.000")
print("========================================")
print(" Menu Minuman ")
print("========================================")
print(" a = TEH DINGIN/PANAS Rp 2.500")
print(" b = KOPI DINGIN Rp 5.000")
print(" c = KOPI PANAS Rp 6.500")
print(" d = COCA COLA DINGIN Rp 3.500")
print(" e = COCA COLA DINGIN Rp 5.000")
print("========================================")
print("")
kodemakanan =['1','2','3','4','5','6']
namamakanan = ['NASI GORENG','LONTONG GORENG', 'BAKSO GORENG','RUJAK GORENG','RUJAK BAKSO','RUJAK BAKSO PECEL']
hargamakanan = [15000,14900,12900,13000,15000,17000]
kodeminuman =['a','b','c','d','e']
namaminuman = ['TEH DINGIN/PANAS','KOPI DINGIN','KOPI PANAS','COCA COLA DINGIN','COCA COLA DINGIN']
hargaminuman = [2500,5000,6500,3500,5000]
pilmakanan = input(">> Masukkan Kode makanan = ")
qtymakanan = input(">> Jumlah Makanan = ")
pilminuman = input(">> Masukkan Kode minuman = ")
qtyminuman = input(">> Jumlah Minuman = ")
i = 0
while i<len (namamakanan) and i<len (namaminuman):
if kodema[i] == pilma and kodemi[i] == pilmi:
namamak = namamakanan[i]
namamin = namaminuman[i]
hrgsatma = hargamakanan[i]
hrgsatmi = hargaminuman[i]
i+=1
tot_mak = hrgsatma * int(qtymakanan)
tot_min = hrgsatmi * int(qtyminuman)
tot_byr = tot_mak + tot_min
print(("----------------------------------------"))
print(">>> NAMA MAKANAN : " + namamak)
print(">>> JUMLAH : " + qtyma)
print(">>> HARGA MAKANAN : Rp " + str(tot_mak))
print(">>> NAMA MINUMAN : " + namamin)
print(">>> JUMLAH : "+ qtymi)
print(">>> HARGA MINUMAN : Rp "+ str(tot_min))
print(("----------------------------------------"))
print(">>> TOTAL BAYAR : Rp " + str(tot_byr))
bayar= int (input(">>> BAYAR : Rp "))
kembalian= bayar - int(tot_byr)
print(">>> KEMBALIAN : Rp "+ str(kembalian))
tambah=input("Pesan lagi (y/t)? ")
if tambah=="t":
print("Terima Kasih Telah Berbelanja")
break | true |
56bfd9eda1918f92834742de8f4bbdab95e50738 | Python | ILoveStudying/PKU-Deep-Learning | /homework6/06-isp/pr.py | UTF-8 | 1,280 | 2.640625 | 3 | [] | no_license | import csv
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df=pd.read_csv('./q2/q2psnr.csv')
# axes = df.plot(label="折线图",style='k')
# plt.plot(df['PSNRsigma=15'], linewidth=2.5, linestyle="-")
plt.plot(df['BSDratio=2'], linewidth=2.5, linestyle="-")
plt.plot(df['BSDratio=3'], linewidth=2.5, linestyle="-")
plt.plot(df['BSDratio=4'], linewidth=2.5, linestyle="-")
plt.plot(df['G100ratio=2'], linewidth=2.5, linestyle="-")
plt.plot(df['G100ratio=3'], linewidth=2.5, linestyle="-")
plt.plot(df['G100ratio=4'], linewidth=2.5, linestyle="-")
plt.plot(df['T91ratio=2'], linewidth=2.5, linestyle="-")
plt.plot(df['T91ratio=3'], linewidth=2.5, linestyle="-")
plt.plot(df['T91ratio=4'], linewidth=2.5, linestyle="-")
# ax=pd.read_csv('./q3/q3psnr.csv')
# plt.plot(ax['linear'], linewidth=2.5, linestyle="-")
# plt.plot(ax['sRGB'], linewidth=2.5, linestyle="-")
plt.xlabel('Epoch')
plt.ylabel('Average PSNR(db)')
plt.legend()
plt.ylim(16,32)
plt.yticks(np.arange(16, 33, 1.0))
plt.show()
# print(max(df['T91ratio=2']),max(df['T91ratio=3']),max(df['T91ratio=4']))
# print(max(df['G100ratio=2']),max(df['G100ratio=3']),max(df['G100ratio=4']))
# print(max(df['BSDratio=2']),max(df['BSDratio=3']),max(df['BSDratio=4']))
# print(max(ax['linear']),max(ax['sRGB'])) | true |
424c53763e92c1621fb1f41fc8ef20918d4e862c | Python | georoa/Engineering-Hiring-Project | /Problem_5.py | UTF-8 | 947 | 2.84375 | 3 | [] | no_license | policy = raw_input('Enter the policy name you want to add: ')
year = raw_input('Enter the effective date year: ')
month = raw_input('Enter the effective date month (Must include leading zeros if any!): ')
day = raw_input('Enter the effective date day (Must include leading zeros if any!): ')
premium = raw_input('Enter the annual premium: ')
billing = raw_input('Enter the billing schedule : ')
agent_input = raw_input('Enter the agents name (Contact must exist!): ')
insured_input = raw_input('Enter the insured name (Contact must exist!): ')
p1 = Policy(policy, date(int(year), int(month), int(day)), int(premium))
p1.billing_schedule = billing
q1 = Contact.query.filter_by(name=agent_input)\
.filter(Contact.role == 'Agent').first()
p1.agent = q1.id
q2 = Contact.query.filter_by(name =insured_input)\
.filter(Contact.role == 'Named Insured').first()
p1.named_insured = q2.id
db.session.add(p1)
db.session.commit()
print("Policy added!")
| true |
b9633c13e19e68e39e67b982603d9039686bfc27 | Python | nayamama/weather_station | /weather_report/google_api.py | UTF-8 | 3,173 | 3.09375 | 3 | [] | no_license | from requests import request
from urllib.parse import urlencode
class GoogleMapsClient:
lat = None
lng = None
data_type = 'json'
location_query = None
api_key = None
def __init__(self, api_key=None, address_or_zip=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api_key = api_key
if self.api_key is None:
raise Exception("API key is required!")
self.location_query = address_or_zip
if self.location_query is not None:
self.extract_lat_lng()
@staticmethod
def response(endpoint, params):
"""Get a response based on a constructed endpoint url."""
url_params = urlencode(params)
url = "{}?{}".format(endpoint, url_params)
response = request('GET', url)
if response.status_code not in range(200, 299):
raise Exception('The request is failed!')
return response.json()
def extract_lat_lng(self, location=None):
"""Get a tuple of (latitude, longitude) from an address or a viewport"""
geocode = {}
loc_query = self.location_query
if location is not None:
loc_query = location
endpoint = "https://maps.googleapis.com/maps/api/geocode/{}".format(self.data_type)
params = {"address": loc_query, "key": self.api_key}
response = self.response(endpoint, params)
try:
geocode = response['results'][0]['geometry']['location']
except:
return response['error_message']
lat, lng = geocode.get('lat'), geocode.get('lng')
self.lat = lat
self.lng = lng
return lat, lng
def nearby_search(self, keyword='Chinese Restaurant', location=None, radius=1000):
"""Search the nearly places based on a given keyword"""
lat, lng = self.lat, self.lng
if location is not None:
lat, lng = self.extract_lat_lng()
endpoint = f"https://maps.googleapis.com/maps/api/place/nearbysearch/{self.data_type}"
params = {
'key': self.api_key,
'location': f'{lat}, {lng}',
'radius': radius,
'keyword': keyword
}
return self.response(endpoint, params)
def detail_place_info(self, place_id=None):
"""Retrieve the detailed place information based on a given place ID"""
endpoint = "https://maps.googleapis.com/maps/api/place/details/json"
params = {
'place_id': f'{place_id}',
'fields': 'formatted_address,name,rating,formatted_phone_number',
'key': self.api_key
}
return self.response(endpoint, params)
def get_top_5_places(self):
"""Retrieve the detailed information of top 5 places based on rating"""
restaurant_list = self.nearby_search()
top5_restaurant = sorted(restaurant_list['results'], reverse=True,
key=lambda k: ('rating' not in k, k.get('rating')))[:5]
place_id_list = [r['place_id'] for r in top5_restaurant]
stores = [self.detail_place_info(id)['result'] for id in place_id_list]
return stores
| true |
8dd287bb470958f12af03830f0626770eab01bb8 | Python | kromdeniz/miles_to_km | /main.py | UTF-8 | 673 | 3.328125 | 3 | [] | no_license | from tkinter import *
window = Tk()
window.title("Miles to KM Converter")
window.minsize(width=200, height=140)
window.config(padx=10,pady=25)
input = Entry(width=2)
input.insert(0, " 0")
input.grid(column=2, row=1)
miles = Label(text="Miles")
miles.grid(column=3, row=1)
is_equal = Label(text="is equal to")
is_equal.grid(column=1 ,row=2)
def calc():
calc = round(int(input.get())*1.609344,3)
km.config(text=calc)
km = Label(text="0")
km.grid(column=2, row=2)
km_label = Label(text="Km")
km_label.grid(row=2, column=3)
button = Button(text="Calculate", command=calc)
button.grid(row=3, column=2)
window.mainloop() | true |
f62a36c0d32d0e78ab39fbe59d154566b15f3145 | Python | bian-hengwei/LeetCode | /codes/5.最长回文子串_2.py | UTF-8 | 700 | 3.359375 | 3 | [] | no_license | #
# @lc app=leetcode.cn id=5 lang=python3
#
# [5] 最长回文子串
# @lc code=start
# HashMap method (self-written)
class Solution:
def longestPalindrome(self, s: str) -> str:
# {character: [appearances]}
d = dict()
for i, c in enumerate(s):
d[c] = d.get(c, [])
d[c].append(i)
best = s[0]
for i, c in enumerate(s):
# search for palindromes at each index
for j in range(len(d[c])-1, 0, -1):
if d[c][j] == i: break
elif s[i:d[c][j]+1] == s[i:d[c][j]+1][::-1]:
best = s[i:d[c][j]+1] if d[c][j]-i+1 > len(best) else best
return best
# @lc code=end
| true |
9c0f1ff85bc946718e499bfd4888a317f3649157 | Python | himeldev/Stack_Exchange_Autopsy | /Content_Generation_Model/Diseconomies_of_Scale.py | UTF-8 | 4,310 | 2.71875 | 3 | [] | no_license | import csv
import math
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
def cobb_douglas_answer(x, A, lambda_0, lambda_1):
return A * np.power(x[0], lambda_0) * np.power(x[1], lambda_1)
def cobb_douglas_question(x, A, lambda_0):
return A * np.power(x, lambda_0)
figure_count = 1
data_file = open('C:/Users/Himel/Documents/GitHub/Stack_Exchange_Autopsy/Datasets/Site_Monthly_Stats.csv')
csv_data = csv.reader(data_file)
first_row = next(csv_data)
current_site = first_row[0]
month_no_of_questions = []
month_no_of_answers = []
month_no_of_askers = []
month_no_of_answerers = []
month_no_of_users = []
month_no_of_questions.append(int(first_row[2]))
month_no_of_answers.append(int(first_row[3]))
month_no_of_askers.append(int(first_row[6]))
month_no_of_answerers.append(int(first_row[7]))
month_no_of_users.append(int(first_row[14]))
for row in csv_data:
if row[0] == current_site:
month_no_of_questions.append(int(row[2]))
month_no_of_answers.append(int(row[3]))
month_no_of_askers.append(int(row[6]))
month_no_of_answerers.append(int(row[7]))
month_no_of_users.append(int(row[14]))
else:
month_no_of_questions = month_no_of_questions[:-1]
month_no_of_answers = month_no_of_answers[:-1]
month_no_of_askers = month_no_of_askers[:-1]
month_no_of_answerers = month_no_of_answerers[:-1]
month_no_of_users = month_no_of_users[:-1]
if len(month_no_of_questions) >= 24:
optimal_parameters_question, covariance_of_parameters_question = curve_fit(cobb_douglas_question, month_no_of_askers, month_no_of_questions)
answer_factors = np.array([month_no_of_questions, month_no_of_answerers])
optimal_parameters_answer, covariance_of_parameters_answer = curve_fit(cobb_douglas_answer, answer_factors, month_no_of_answers, bounds=([0, 0, 0],[np.inf, 1.0, 1.0]))
month_fraction_of_askers = [float(a)/b for a,b in zip(month_no_of_askers, month_no_of_users)]
month_fraction_of_answerers = [float(a)/b for a,b in zip(month_no_of_answerers, month_no_of_users)]
month_no_of_users_sorted = sorted(month_no_of_users)
month_potential_no_of_askers = [x*sum(month_fraction_of_askers)/float(len(month_fraction_of_askers)) for x in month_no_of_users_sorted]
month_potential_no_of_answerers = [x*sum(month_fraction_of_answerers)/float(len(month_fraction_of_answerers)) for x in month_no_of_users_sorted]
month_potential_no_of_questions = cobb_douglas_question(month_potential_no_of_askers, *optimal_parameters_question)
month_potential_no_of_answers = cobb_douglas_answer(np.array([month_potential_no_of_questions, month_potential_no_of_answerers]), *optimal_parameters_answer)
fig = plt.figure(figure_count)
figure_count += 1
ax = fig.add_subplot(111)
ax.scatter(month_no_of_users, [float(a)/b for a,b in zip(month_no_of_answers, month_no_of_questions)], color = 'black')
fit = np.polyfit(month_no_of_users, [float(a)/b for a,b in zip(month_no_of_answers, month_no_of_questions)], 1)
fit_function = np.poly1d(fit)
ax.plot(month_no_of_users, fit_function(month_no_of_users), 'b', label = 'Linear Regression')
ax.plot(month_no_of_users_sorted, [float(a)/b for a,b in zip(month_potential_no_of_answers, month_potential_no_of_questions)],'r', label = 'Economic Model')
ax.legend(loc='upper right')
ax.set_xlabel('No. of Users (U)')
ax.set_ylabel('Avg. No. of Answers per Question (N_a/N_q)')
fig.savefig('C:/Users/Himel/Documents/GitHub/Stack_Exchange_Autopsy/Figures/Cobb-Douglas/Scale/'+current_site)
plt.close(fig)
current_site = row[0]
month_no_of_questions[:] = []
month_no_of_answers[:] = []
month_no_of_askers[:] = []
month_no_of_answerers[:] = []
month_no_of_users[:] = []
month_no_of_questions.append(int(row[2]))
month_no_of_answers.append(int(row[3]))
month_no_of_askers.append(int(row[6]))
month_no_of_answerers.append(int(row[7]))
month_no_of_users.append(int(row[14]))
| true |
fe83e3bb108787ea71b7f814b282122c63632344 | Python | vignesh5698/python-workspace | /PycharmProjects/Numpy/numpy1.py | UTF-8 | 1,213 | 3.75 | 4 | [] | no_license | import numpy as np
list1 = [1, 2, 3]
print(np.array(list1))
arr = np.array(list1)
print("Array:")
print(arr)
print("Sepuence of n numbers:")
print(np.arange(0, 10))
print("Sequence of n numbers with stepSize 3:")
print(np.arange(0,100,3))
print("Print 5 zeros in Array:")
a=np.zeros(5)
print(a)
print("Print zeros in 3*3 Matrix:")
a=np.zeros((3,3))
print(a)
print("Print 6 one's in 1D Array:")
b=np.ones(6)
print(b)
print("Print 1's in 5*5 Matrix:")
b=np.ones((5,5))
print(b)
c=np.linspace(0,100,40)
print("It divides into 40 equal divisons between 0 to 100 (Incl. both end numbers):")
print(c)
c=np.linspace(0,100,3)
print("It divides from 0 to 100 into 3 equal divisions incl. both ends:")
print(c)
d=np.random.randint(0,100)
print(d)
d=np.random.randint(0,100,(3,3))
print(d)
f=d.max()
print("Maximum Number index:")
print(f)
g=d.min()
print("Minimum number:")
print(g)
h=d.argmax()
print("Maximum number index:")
print(h)
i=d.argmin()
print("Min num index:")
print(i)
np.random.seed(13)
e=np.random.randint(0,50)
print(e)
np.random.seed(100)
v=np.random.randint(1,100,10)
x=v.reshape(5,2)
print(x)
y=v.reshape(2,5)
print(y)
z=np.arange(0,100).reshape(10,10)
print(z)
print(z[4,3])
print(z[:,0])
print(z[1,:])
| true |
c1445a06ab238a9d511486a63d84aec370a60553 | Python | bioelectric-interfaces/nfb_studio | /nfb_studio/serial/json/encoder.py | UTF-8 | 6,640 | 3.4375 | 3 | [] | no_license | """An object-aware JSON encoder."""
import json
from warnings import warn
from typing import Union
from ..hooks import Hooks
def _write_metadata(obj, data: dict) -> dict:
"""Write metadata that is required to reassemble the object, encoded by JSONEncoder.
An internal function that adds the `__class__` metadata field to the serialized data.
Returns
-------
data : dict
The `data` parameter.
"""
if "__class__" in data:
warn(
"during serialization of " +
str(obj) +
" a \"__class__\" field is being overwritten"
)
# Add meta information necessary to decode the object later
data["__class__"] = {
"__module__": obj.__class__.__module__,
"__qualname__": obj.__class__.__qualname__
}
return data
class JSONEncoder(json.JSONEncoder):
"""JSON encoder that provides tools to serialize custom objects.
You can add support for serializing your class in two ways:
- By adding a member function to your class: `def serialize(self) -> dict`;
- By adding an external function `def serialize(obj) -> dict` and passing it in a dict as the `hooks`
parameter. (`hooks` is a dict that matches a class to a serialization function.). This parameter also can accept a
Hooks object or a tuple of two dicts: a serialization dict and a deserialization dict (the latter is ignored).
When serializing an object, this encoder checks if that object's class has a function in `hooks` or has a
callable serialize() attribute. If that is the case, the resulting dict from calling the function will be used in
that object's place in json.
Functions in the `hooks` parameter take precedence over member functions.
.. warning::
JSONEncoder adds a field to the dict, produced from the object, called `__class__`. This field is used in the
JSONDecoder to create an instance of the class, where json data is then deserialized.
See Also
--------
nfb_studio.serialize.decoder.JSONDecoder : An object-aware JSON decoder.
"""
def __init__(self, *,
hooks: Union[dict, tuple, Hooks] = None,
metadata=True,
skipkeys=False,
ensure_ascii=False,
check_circular=True,
allow_nan=True,
sort_keys=False,
indent=None,
separators=None,
**kw):
"""Constructs the JSONEncoder object.
Mostly inherits JSONEncoder parameters from the standard json module, except for `default`, which is not
inherited and is ignored.
Parameters
----------
hooks : dict, tuple, or Hooks object (default: None)
A dict, mapping types to functions that can be used to serialize them in the format `def foo(obj) -> dict`,
a tuple containing such dict as it's element 0, or a `hooks.Hooks` object;
metadata : bool (default: True)
If True, each custom object is serialized with an additional metadata field called `__class__`. This field
is used in the JSONDecoder to create an instance of the class, where json data is then deserialized. If
False, this field is skipped, but the decoder will not be able to deserialize custom objects.
skipkeys : bool (default: False)
If False, then it is a TypeError to attempt encoding of keys that are not str, int, float or None. If
skipkeys is True, such items are simply skipped.
ensure_ascii : bool (default: False)
If True, the output is guaranteed to have all incoming non-ASCII characters escaped. If ensure_ascii is
False, these characters will be output as-is.
check_circular : bool (default: True)
If check_circular is True, then lists, dicts, and custom encoded objects will be checked for circular
references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise,
no such check takes place.
allow_nan : bool (default: True)
If True, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification
compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a
ValueError to encode such floats.
sort_keys : bool (default: False)
If True, then the output of dictionaries will be sorted by key; this is useful for regression tests to
ensure that JSON serializations can be compared on a day-to-day basis.
indent : int, str, or None (default: None)
If indent is a non-negative integer or string, then JSON array elements and object members will be
pretty-printed with that indent level. An indent level of 0, negative, or "" will only insert newlines. None
(the default) selects the most compact representation. Using a positive integer indent indents that many
spaces per level. If indent is a string (such as `"\t"`), that string is used to indent each level.
separators : tuple (default: None)
If specified, separators should be an (item_separator, key_separator) tuple. The default is (', ', ': ') if
indent is None and (',', ': ') otherwise. To get the most compact JSON representation, you should specify
(',', ':') to eliminate whitespace.
"""
super().__init__(
skipkeys=skipkeys,
ensure_ascii=ensure_ascii,
check_circular=check_circular,
allow_nan=allow_nan,
sort_keys=sort_keys,
indent=indent,
separators=separators
)
if isinstance(hooks, dict):
self.hooks = hooks
elif isinstance(hooks, tuple): # hooks.Hooks is also a tuple
self.hooks = hooks[0] # Only serialization functions
else:
self.hooks = {}
self.metadata = metadata
def default(self, o):
"""Implementation of `JSONEncoder`'s `default` method that enables the serialization logic."""
if type(o) in self.hooks:
data = self.hooks[type(o)](o)
if self.metadata:
_write_metadata(o, data)
return data
if hasattr(o, "serialize") and callable(o.serialize):
data = o.serialize()
if self.metadata:
_write_metadata(o, data)
return data
return super().default(o)
| true |
866bbf989d19b8589762738ffd1f484166ff749c | Python | FernandoUrdapilleta/Python101 | /Set.py | UTF-8 | 1,237 | 4.75 | 5 | [] | no_license | # Set
# Set is a collection which is unordered and unindexed. No duplicate members.
thisset = {"apple", "banana", "cherry"}
print(thisset)
print("-------------------------------------------------------------------------------- Line 6")
# Check if "banana" is present in the set:
print("banana" in thisset)
print("-------------------------------------------------------------------------------- Line 11")
# Note: Once a set is created, you cannot change its items, but you can add new items.
#
# Add Items
# Add an item to a set, using the add() method:
thisset.add("orange")
print(thisset)
print("-------------------------------------------------------------------------------- Line 20")
# Add multiple items to a set, using the update() method:
thisset.update(["orange", "mango", "grapes"])
print(thisset)
print("-------------------------------------------------------------------------------- Line 26")
# Get the number of items in a set:
thisset = {"apple", "banana", "cherry"}
print(len(thisset))
print("-------------------------------------------------------------------------------- Line 32")
# Remove Item
# To remove an item in a set, use the remove(), or the discard() method.
thisset.remove("banana")
print(thisset)
| true |
ecf66a3f0cab999f7d64048ff55b1dd8dec62201 | Python | TranslucentSabre/pyChess | /pychess/app/chess.py | UTF-8 | 10,474 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env python3
from colorama import init
from pychess.app.ChessGame import *
import cmd
"""This tries to make raw_input look like input for python 2.7
it does obscure the 2.7 version of input, but I am not using it anyway"""
try:
input = raw_input
except NameError:
pass
class Chess(cmd.Cmd):
intro = "Welcome to pyChess. Type help or ? to list commands.\nWritten by Tim Myers -- Version "+VERSION+"\n"
prompt = "pyChess# "
game = ChessGame()
def emptyline(self):
return
def do_show(self,arg):
"""Display the current board"""
print(self.game.showCurrentBoard())
def do_first(self,arg):
"""Go to the first move in the game"""
self.game.firstMove()
def do_last(self,arg):
"""Go to the last move in the game"""
self.game.lastMove()
def do_next(self,arg):
"""Go to the next move in the game"""
self.game.nextMove()
def do_previous(self,arg):
"""Go to the previous move in the game"""
self.game.previousMove()
def do_restart(self,arg):
"""Restart our current game"""
self.game.restartGame()
def do_move(self,arg):
"""Move a piece, this function takes two chess coordinates and an optional Piece to use for promotion if necessary, the first being the starting square of the piece to move and the second being the ending square of the move.\n
In order to perform a castle move, move the king to the final position required for the castle.
Ex. move b2 b4\n
move e7 f8 Q\n
move e8 c8"""
moves = arg.split()
if len(moves) < 2:
print("Two coordinates are required.")
return
if len(moves) > 3:
print("Only two coordinates and one promotion piece are accepted")
return
if len(moves) == 2:
"""Add the nonexistent promotion piece to the array"""
moves.append(None)
if self.game.twoCoordMove(moves[0], moves[1], moves[2]):
print(self.game.showPendingBoard())
if self._booleanPrompt("Are you sure this is the move you would like to make?"):
self.game.commitTurn()
else:
self.game.cancelTurn()
else:
print(self.game.lastError)
self.game.cancelTurn()
def do_algebra(self,arg):
"""Move a piece, this function takes one move in algebraic notation.\n
Ex. algebra Nf3\n
algebra O-O\n"""
move = arg.split()
if len(move) > 1:
print("Only one argument is valid.")
return
if self.game.algebraicMove(move[0]):
print(self.game.showPendingBoard())
if self._booleanPrompt("Are you sure this is the move you would like to make?"):
self.game.commitTurn()
else:
self.game.cancelTurn()
else:
print(self.game.lastError)
self.game.cancelTurn()
def do_valid(self,arg):
"""Print the valid moves of all pieces in play with no arguments or print the moves of just the piece at the coordiante given.\n
Ex. valid\n
valid f3\n"""
coord = arg.split()
if len(coord) == 1:
coord = coord[0]
moves = self.game.getValidMovesForPieceAtPosition(coord)
self.printValidMoves(coord, moves)
else:
moves = self.game.getAllValidMoves()
for player in [ self.game.whitePlayer, self.game.blackPlayer ]:
print("{0} Pieces:".format(player.color.name))
for piece in player.getAllPieces():
self.printValidMoves(piece.position, moves[piece.position])
print("")
def printValidMoves(self, coord, moves):
print("{0}({1})".format(moves[0], coord))
moves = moves[1]
if len(moves) == 0:
print(" None")
else:
for move in moves:
print(" {0}: ".format(move), end="")
print(*moves[move], sep=", ")
print("")
def do_load(self,arg):
"""Read all games from a file and make them available to be the current game, if no argument is given use the default import file configured,
if one is given use the argument as a filename to read a savegame from."""
if not self.game.loadSaveFile(arg):
print(self.game.lastError)
def do_save(self,arg):
"""Write the current list of games out to a file. This will erase the old savegame file. If no argument is given use the default export file configured,
if one is given use the argument as a filename to write the savegame to."""
if self._booleanPrompt("This will erase the contents of the the export file before writing. Continue?"):
if not self.game.writeSaveFile(arg):
print(self.game.lastError)
def do_config(self,arg):
"""Set or read configuration options. The first argument must be one of the following settings:
import (read/set default import file)
export (read/set default export file)
name (read/set the players real name)
location (read/set the physical location of the player)
strict (read/set strict algebraic parsing mode, if True only exactly formed algebraic notation is accepted)
files (read/set path to the location of save games and configuration
random (read/set whether we have random peices when starting a new game
threshold (read/set allowed piece value variance between players in random mode
If the second argument is given then the argument will be saved as the setting, if it is omitted then
the current value of the setting is printed to the screen."""
#Only split once, this allows the user to supply items with spaces in them
args = arg.split(None,1)
numOfArgs = len(args)
if numOfArgs == 0:
print("You must specify a configuration item to set or read.")
elif numOfArgs > 2:
print("Too many aguments provided.")
else:
if numOfArgs == 1:
value = self.game.getConfigItem(args[0])
if value != None:
print(value)
else:
print(self.game.lastError)
else:
if not self.game.setConfigItem(args[0], args[1]):
print(self.game.lastError)
def do_test(self,arg):
"""Run the unit tests that have been developed for pyChess"""
if(arg == "-v" or arg == "--verbose"):
verbose = True
else:
verbose = False
self.game.runTests(verbose)
def do_quit(self,arg):
"""Stop playing chess"""
return True
do_exit = do_quit
do_EOF = do_quit
def do_pgn(self, arg):
"""Perform various PGN related operations. The first argument must be one of the following keywords:
games : Displays the game index, White Player, Black Player, and Date for each game in the loaded file
select : Requires a further argument which is the game index, this makes that game the current game
new : Start a brand new game and make it the current game
reset : Remove all currently selectable games"""
args = arg.split()
numOfArgs = len(args)
if numOfArgs == 0:
print("You must specify a PGN operation.")
else:
if args[0] == "games":
currentGameIndex = self.game.getCurrentGameIndex()
for game in self.game.getGameHeaders():
if currentGameIndex == game.index:
print("***Current Game***")
print("Index : "+str(game.index+1))
print("Date: "+game.date.value)
print("White Player: "+game.white.value)
print("Black Player: "+game.black.value)
print("")
elif args[0] == "select":
if numOfArgs != 2:
print("You must specify a game index to load.")
return
if self.game.selectGame(int(args[1]) - 1):
if not self.game.readMovesFromCurrentGame():
print("That game had errors while loading moves...")
return
else:
print("Could not select that game...")
elif args[0] == "new":
self.game.startNewGame()
elif args[0] == "reset":
self.game.resetAllGames()
else:
print("You must specify a valid PGN operation.")
def _printTagTuple(self, tagTuple):
print (tagTuple[0]+": "+tagTuple[1])
def do_tags(self, arg):
"""View and set tags for the current game. Takes up to two arguments, the tag name and the tag value respectively.
With no arguments view all tags for the current game.
With one argument view the tag found using the tag name provided.
With two arguments create (or modify) the tag with the name provided using the value provided."""
#Only split once, this allows the user to supply items with spaces in them
args = arg.split(None,1)
numOfArgs = len(args)
if numOfArgs == 0:
for tag in self.game.getTags():
self._printTagTuple(tag)
elif numOfArgs == 1:
self._printTagTuple(self.game.getTag(args[0]))
elif numOfArgs == 2:
self.game.setTag(args[0], args[1])
else:
print("Invalid number of arguments")
def do_delete(self, arg):
"""Delete tags associated with the current game. The first argument must be the string "tag", and the second argument must be the name of the tag to delete.
Deleting on of the tags in the mandatory Seven Tag Roster will reset it to default instead of removing it."""
args = arg.split()
numOfArgs = len(args)
if numOfArgs == 0:
print("You must provide keyword \"tag\" and then a tag name.")
elif numOfArgs == 1:
print("You must provide a tag name.")
elif numOfArgs == 2:
if args[0].lower() != "tag":
print("The keyword \"tag\" must be the first argument.")
else:
self.game.deleteTag(args[1])
else:
print("Too many arguments given")
def help_help(self):
print("Display the help for one of the available commands.")
def _booleanPrompt(self, prompt):
confirmation = input(prompt+" [y/n]:")
if confirmation in ["y" , "Y" , "Yes" , "yes" , "YES"]:
return True
else:
return False
if __name__ == "__main__":
init()
try:
Chess().cmdloop()
except KeyboardInterrupt:
pass
| true |
cbdd9e3863a33e9e4953fa9e6f9af2d47f71e5a2 | Python | igemsoftware2021/TAU_Israel | /modules/promoters/intersect_motifs_2_org_final.py | UTF-8 | 3,956 | 3 | 3 | [] | no_license | import numpy as np
from scipy.stats import stats
import re
import pandas as pd
import os
import xml.etree.ElementTree as et
def extract_pssm_from_xml(fname):
pssms = dict()
tree = et.parse(fname)
root = tree.getroot()
for m in root.findall('.//motif'):
full_name = m.get('id')
n = full_name.index('-')
index = int(full_name[:n])
id_num = full_name[n + 1:]
width = m.get('width')
df = pd.DataFrame(index=['A', 'C', 'G', 'T'])
for i, pos in enumerate(m.findall('pos')):
freqs = [pos.get('A'), pos.get('C'), pos.get('G'), pos.get('T')]
df[i + 1] = np.array(freqs, dtype=float)
pssms[id_num] = df
return pssms
def padding_opt(v1, v2):
"""
inserts uniform distributions at the edges for and calculates correlation between the two flattened pssms
:param v1: the larger flattened vector (as a list)
:param v2: the shorter flattened vector (as a list)
:return:the highest correlation and pval between the motifs
"""
pos_len_dif = int((len(v1) - len(v2)) / 4)
corr = -1
pval = -1
for i in range(pos_len_dif + 1):
padded_v2 = i * 4 * [0.25] + v2 + (pos_len_dif - i) * 4 * [0.25]
current_corr, current_pval = stats.spearmanr(v1, padded_v2)
if current_corr > corr:
corr = current_corr
pval = current_pval
return corr, pval
def compare_pssms(pssm1, pssm2):
"""
calculates corelation between 2 pssms
:param pssm1: pssm for first motif as df
:param pssm2: pssm for second motif as df
:return: corr and p-value using spearman correlation
"""
pssm1_vec = list(pssm1.to_numpy().flatten())
pssm2_vec = list(pssm2.to_numpy().flatten())
if len(pssm2_vec) == len(pssm1_vec):
corr, pval = stats.spearmanr(pssm1_vec, pssm2_vec)
elif len(pssm2_vec) > len(pssm1_vec):
corr, pval = padding_opt(pssm2_vec, pssm1_vec)
else:
corr, pval = padding_opt(pssm1_vec, pssm2_vec)
return corr, pval
def compare_pssm_sets(pssm1_dict, pssm2_dict):
"""
make a df of correlations between all motif pairs
:param pssm1_dict: formatted with the motif name as the key anf pssm (as a df) as value
:param pssm2_dict: same format as the first set of motifs
:return: a df with pssm1 as rows and pssm2 as columns
"""
corr_df = pd.DataFrame()
pval_df = pd.DataFrame()
for motif1, pssm1 in pssm1_dict.items():
for motif2, pssm2 in pssm2_dict.items():
corr, pval = compare_pssms(pssm1, pssm2)
corr_df.loc[motif1, motif2] = corr
pval_df.loc[motif1, motif2] = pval
return corr_df, pval_df
def find_selective_and_intergenic(selective_dict, intergenic_dict, final_percent_of_motifs=50):
"""
find selective motifs to use for ranking the promoter options
:param selective_dict: formatted with the motif name as the key anf pssm (as a df) as value, from the mast xml file
of 50% highly of the optimised against 50% highly of deoptimized
:param intergenic_dict: same format, from mast xml of all promoters against intergenic sequences
:param final_percent_of_motifs: the percent of motifs from the initial set of selective that will be returned (those with the highest correlation)
:return: a dict of {selective motif name: max(corr with intergenic)} for only for the motifs that were selected according to
their correlation value and final_percent_of_motifs
"""
corr_df, pval_df = compare_pssm_sets(selective_dict, intergenic_dict)
max_corr_dict = corr_df.max(axis=1).to_dict()
final_percent_of_motifs = round(len(max_corr_dict)*final_percent_of_motifs/100)
corr_vals = list(max_corr_dict.values())
corr_vals.sort(reverse=True)
th = corr_vals[final_percent_of_motifs-1]
selected_motifs = {motif:corr for motif, corr in max_corr_dict.items() if corr>=th}
return selected_motifs
| true |
f72aee65f4c2a0576a5c967495f8bdba2fe2cc96 | Python | shahidul2k9/problem-solution | /leetcode/_1424_DiagonalTraverseII.py | UTF-8 | 770 | 3.078125 | 3 | [] | no_license | from typing import List
class Solution:
def findDiagonalOrder(self, nums: List[List[int]]) -> List[int]:
R = len(nums)
C = max([len(c) for c in nums])
diagonal_matrix = [[] for _ in range(R + C - 1)]
for r in range(R):
for c in range(len(nums[r])):
diagonal_matrix[r + c].append((r, nums[r][c]))
while len(diagonal_matrix[-1]) == 0:
diagonal_matrix.pop()
for r in range(len(diagonal_matrix)):
diagonal_matrix[r].sort(key=lambda x: -x[0])
diagonal_thread = []
for r in range(len(diagonal_matrix)):
for c in range(len(diagonal_matrix[r])):
diagonal_thread.append(diagonal_matrix[r][c][1])
return diagonal_thread
| true |
d82a199601eae092ee6a3d1c645bcfc5410797ac | Python | juzb/torchsupport | /torchsupport/modules/normalization.py | UTF-8 | 2,975 | 2.640625 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
import torch.nn.functional as func
class PixelNorm(nn.Module):
def __init__(self, eps=1e-16, p=2):
super(PixelNorm, self).__init__()
self.eps = eps
self.p = 2
def forward(self, inputs):
return inputs / torch.norm(inputs, dim=1, keepdim=True, p=self.p)
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_size, ada_size):
super(AdaptiveInstanceNorm, self).__init__()
self.scale = nn.Linear(ada_size, in_size)
self.bias = nn.Linear(ada_size, in_size)
def forward(self, inputs, style):
in_view = inputs.view(inputs.size(0), inputs.size(1), 1, 1, -1)
mean = in_view.mean(dim=-1)
std = in_view.std(dim=-1)
scale = self.scale(style).view(style.size(0), -1, 1, 1)
bias = self.bias(style).view(style.size(0), -1, 1, 1)
return scale * (inputs - mean) / (std + 1e-6) + bias
class AdaptiveInstanceNormPP(AdaptiveInstanceNorm):
def __init__(self, in_size, ada_size):
super(AdaptiveInstanceNormPP, self).__init__(in_size, ada_size)
self.mean_scale = nn.Linear(ada_size, in_size)
def forward(self, inputs, style):
in_view = inputs.view(inputs.size(0), inputs.size(1), 1, 1, -1)
mean = in_view.mean(dim=-1)
mean_mean = mean.mean(dim=1, keepdim=True)
std = in_view.std(dim=-1)
mean_std = mean.std(dim=1, keepdim=True)
scale = self.scale(style).view(style.size(0), -1, 1, 1)
mean_scale = self.mean_scale(style).view(style.size(0), -1, 1, 1)
bias = self.bias(style).view(style.size(0), -1, 1, 1)
result = scale * (inputs - mean) / (std + 1e-6) + bias
correction = mean_scale * (mean - mean_mean) / (mean_std + 1e-6)
return result + correction
class AdaptiveBatchNorm(nn.Module):
def __init__(self, in_size, ada_size):
super(AdaptiveBatchNorm, self).__init__()
self.scale = nn.Linear(ada_size, in_size)
self.bias = nn.Linear(ada_size, in_size)
def forward(self, inputs, style):
in_view = inputs.view(inputs.size(0), -1)
mean = inputs.mean(dim=0, keepdim=True)
std = inputs.std(dim=0, keepdim=True)
scale = self.scale(style).view(style.size(0), -1, 1, 1)
scale = scale - scale.mean(dim=1, keepdim=True) + 1
bias = self.bias(style).view(style.size(0), -1, 1, 1)
bias = bias - bias.mean(dim=1, keepdim=True)
return scale * (inputs - mean) / (std + 1e-6) + bias
class AdaptiveLayerNorm(nn.Module):
def __init__(self, in_size, ada_size):
super(AdaptiveLayerNorm, self).__init__()
self.scale = nn.Linear(ada_size, in_size)
self.bias = nn.Linear(ada_size, in_size)
def forward(self, inputs, style):
mean = inputs.mean(dim=1, keepdim=True)
std = inputs.std(dim=1, keepdim=True)
scale = self.scale(style).view(style.size(0), -1, 1, 1)
scale = scale - scale.mean(dim=1, keepdim=True) + 1
bias = self.bias(style).view(style.size(0), -1, 1, 1)
bias = bias - bias.mean(dim=1, keepdim=True)
return scale * (inputs - mean) / (std + 1e-6) + bias
| true |
3b914f147f5b938b4d669844bfde28a91f0ffc29 | Python | Arjun2001/coding | /hackerrank/hackerrank Equal Stacks.py | UTF-8 | 616 | 2.875 | 3 | [] | no_license | h1,h2,h3 = map(int,input().split())
h1 = list(map(int,input().split()))
h2 = list(map(int,input().split()))
h3 = list(map(int,input().split()))
sums = {}
s1= sum(h1)
s2 = sum(h2)
s3 = sum(h3)
l1 = len(h1)
l2 = len(h2)
l3 = len(h3)
top1 = top2 = top3 = 0
while True:
if top1 == l1 or top2 == l2 or top3 == l3:
print(0)
break
if s1 == s2 == s3:
print(s1)
break
if s1 >= s2 and s1 >= s3:
s1 -= h1[top1]
top1 += 1
elif s2 >= s1 and s2 >= s3:
s2 -= h2[top2]
top2 += 1
elif s3 >= s1 and s3 >= s1:
s3 -= h3[top3]
top3 += 1
| true |
557bf67e1fd4e12da65de3f3a42798f36ff7cf31 | Python | kljoshi/Python | /Exercise/CommaCode.py | UTF-8 | 428 | 4.5625 | 5 | [] | no_license | # Comma Code program
# function that takes in List
# and prints the list item in single line with last item on the list
# seperated by and.
def printWithComma(aList):
for position in range(len(aList)):
if(position == (len(aList) - 1)):
print('and ' + aList[position], end='')
else:
print(aList[position] +', ',end ='')
spam = ['apple', 'banana', 'tofu', 'cat']
printWithComma(spam)
| true |
1debebf194463ecb9ecd27b15c7cb5c21342b89a | Python | nityamall/Python_Projects | /workspace/HelloWorld/TEST/test3.py | UTF-8 | 252 | 3.4375 | 3 | [] | no_license | n=input("ENTER A NO.")
n=int (n)
f=n
b=0
s=1
l=f
m=0
if(n%2==0):
for i in range (1,f+1):
s=s*i
print("%d"%s)
else:
for j in range (1,l):
if(l%j==0):
m=m+j
print("%d"%m)
| true |
782e195126de7a742e758f8befa764411b814690 | Python | hsolbrig/pyjsg | /tests/test_basics/parser.py | UTF-8 | 1,129 | 2.625 | 3 | [
"CC0-1.0"
] | permissive | from typing import Callable, Optional
from antlr4 import InputStream, CommonTokenStream
from pyjsg.parser.jsgLexer import jsgLexer
from pyjsg.parser.jsgParser import jsgParser
from pyjsg.parser.jsgParserVisitor import jsgParserVisitor
from pyjsg.parser_impl.generate_python import ParseErrorListener
from pyjsg.parser_impl.jsg_doc_context import JSGDocContext
def parse(text: str, production_rule: str, listener) -> Optional[jsgParserVisitor]:
"""
Parse text fragment according to supplied production rule and evaluate with listener class.
Example: parse("{1,*}", "ebnfSuffix", JSGEbnf)
"""
error_listener = ParseErrorListener()
lexer = jsgLexer(InputStream(text))
lexer.addErrorListener(error_listener)
tokens = CommonTokenStream(lexer)
tokens.fill()
if error_listener.n_errors:
return None
parser = jsgParser(tokens)
parser.addErrorListener(error_listener)
base_node = getattr(parser, production_rule)()
listener_module = listener(JSGDocContext())
listener_module.visit(base_node)
return listener_module if not error_listener.n_errors else None
| true |
4a8375f199b6dc20374f930777ab8b35b71f003a | Python | mingyuchoo/django_study | /algorithm/tests/test_models_binarysearch.py | UTF-8 | 1,182 | 3.265625 | 3 | [
"MIT"
] | permissive | from django.test import TestCase
from algorithm.models import BinarySearchTree
class BinarySearchTestCase(TestCase):
def setUp(self) -> None:
self.bst = BinarySearchTree()
self.array = [21, 14, 28,11, 18, 25, 32, 5, 12, 15, 19, 23, 27, 30, 37]
def tearDown(self) -> None:
pass
def test_insert(self):
for i in self.array:
self.bst.insert(i)
self.assertIsNotNone(self.bst)
def test_find(self):
self.test_insert()
print('test_find > 15 = ', self.bst.find(15))
print('test_find > 17 = ', self.bst.find(17))
def test_delete(self):
self.test_insert()
print('test_delete > 14 = ', self.bst.delete(14))
def test_pre_order_traversal(self):
self.test_insert()
self.bst.pre_order_traversal()
def test_in_order_traversal(self):
self.test_insert()
self.bst.in_order_traversal()
def test_post_order_traversal(self):
self.test_insert()
self.bst.in_order_traversal()
def test_level_order_traversal(self):
self.test_insert()
self.bst.level_order_traversal() | true |
0013c9ea1ddc3f662af8f2ca4a9eda65e3ec554c | Python | Naimulnobel/python-learning | /ignorcasesensitive.py | UTF-8 | 262 | 2.90625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 20 15:03:32 2019
@author: Student mct
"""
import re
r = re.compile(r'nobel', re.I)
m=r.search('nobel is a programmer').group()
print(m)
search1=r.search('Nobel is a programmer').group()
print(search1) | true |
b6a1c9b3a3eab3960c7f43aea7f9441aadba4b54 | Python | pumbaacave/atcoder | /Leetcode/Hard/AutocompleteSystem.py | UTF-8 | 1,090 | 3.25 | 3 | [] | no_license | from collections import defaultdict
class AutocompleteSystem:
def input_word(self):
return ''.join(self.sb)
def __init__(self, sentences: List[str], times: List[int]):
self.cnt = defaultdict(int)
self.sb = []
self.temp_cnt = None
for word, fre in zip(sentences, times):
self.cnt[word] = fre
def input(self, c: str) -> List[str]:
# end input
if c == "#":
cur_input = self.input_word()
# save as history
if cur_input:
self.cnt[cur_input] += 1
self.sb.clear()
self.temp_cnt = None
return []
# normal flow
self.sb.append(c)
cur_input = self.input_word()
bucket = []
for k,v in self.cnt.items():
if k.startswith(cur_input):
bucket.append((-v, k))
return [a for b,a in sorted(bucket)[:3][1]]
# Your AutocompleteSystem object will be instantiated and called as such:
# obj = AutocompleteSystem(sentences, times)
# param_1 = obj.input(c) | true |
80d46535471ab5ca4b3bf6f27532dda578846260 | Python | ml4ai/delphi | /scripts/program_analysis/call_graph.py | UTF-8 | 10,144 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | import pygraphviz as pgv
import sys
import os
def main():
top_dir_path = sys.argv[1]
right_idx = top_dir_path[:-1].rfind("/")
top_dir = top_dir_path[right_idx + 1 : -1]
ignore_builtins = True
if len(sys.argv) > 2:
ignore_builtins = bool(int(sys.argv[2]))
modules = make_module_table(top_dir_path, ignore_builtins=ignore_builtins)
# print_module_table(modules)
subs = [sub for module in modules.values() for sub in module.keys()]
# print("There are {} total subroutines and {} unique subroutines".format(len(subs), len(list(set(subs)))))
non_unique_subs = list(set([sub for sub in subs if subs.count(sub) > 1]))
subroutines = {
(mod_name, sub): calls
for mod_name, module in modules.items()
for sub, calls in module.items()
}
g = pgv.AGraph(directed=True)
all_edges = list()
for idx, ((mod_name, sub_name), calls) in enumerate(subroutines.items()):
if sub_name in non_unique_subs:
sub_name = "{}.{}".format(mod_name, sub_name)
print("SUB is now: {}".format(sub_name))
for call in calls:
if call in non_unique_subs:
if call in modules[mod_name].keys():
call = "{}.{}".format(mod_name, sub_name)
else:
correct_mod = module_lookup(modules, mod_name, call)
call = "{}.{}".format(correct_mod, sub_name)
print("CALL is now: {}".format(call))
g.add_edge(sub_name, call)
all_edges.append((sub_name, call))
outfile = "{}_call_graph".format(top_dir)
if not ignore_builtins:
outfile += "_with_builtins"
outfile += ".dot"
# g.draw("call-graph.png", prog="fdp") # use fdp, dot, or circo
g.write(outfile)
with open("{}_edges.txt".format(top_dir), "w+") as txtfile:
for (inc, out) in all_edges:
txtfile.write("{}, {}\n".format(inc, out))
def make_module_table(codebase_path, ignore_builtins=True):
files = [
os.path.join(root, elm)
for root, dirs, files in os.walk(codebase_path)
for elm in files
]
fortran_files = [x for x in files if x.endswith(".for")]
comment_strs = ["!", "!!", "C"]
modules = dict()
for fpath in fortran_files:
dssat_idx = fpath.find("dssat-csm/")
module_name = fpath[dssat_idx + 10 : -4].replace("/", ".")
subroutines = dict()
with open(fpath, "rb") as ffile:
cur_calls = list()
cur_subroutine = None
lines = ffile.readlines()
for idx, line in enumerate(lines):
text = line.decode("ascii", errors="replace")
tokens = text.split()
if (
len(tokens) <= 0
or tokens[0] in comment_strs
or tokens[0].startswith("!")
):
continue
if tokens[0] == "SUBROUTINE" or tokens[0] == "FUNCTION":
subroutine_name = tokens[1]
paren_idx = subroutine_name.find("(")
if paren_idx != -1:
subroutine_name = subroutine_name[:paren_idx]
if cur_subroutine is not None:
subroutines[subroutine_name] = cur_calls
cur_calls = list()
cur_subroutine = subroutine_name
if "CALL" in tokens:
call_idx = tokens.index("CALL")
if len(tokens) > call_idx + 1:
call_name = tokens[call_idx + 1]
paren = call_name.find("(")
if paren != -1:
call_name = call_name[:paren]
if ignore_builtins:
if call_name.lower() not in F_INTRINSICS:
cur_calls.append(call_name)
else:
cur_calls.append(call_name)
modules[module_name] = subroutines
return modules
def module_lookup(all_modules, curr_mod, func):
parent_name = curr_mod[: curr_mod.rfind(".")]
mods_to_check = [mod for mod in all_modules.keys() if parent_name in mod]
for mod in mods_to_check:
if func in all_modules[mod].keys():
return mod
return module_lookup(all_modules, parent_name, func)
def print_module_table(mod_table):
for module, subroutines in mod_table.items():
print("\nMODULE: {}".format(module))
for sub_name, calls in subroutines.items():
print("\tSUBROUTINE: {}".format(sub_name))
if len(calls) > 0:
print("\t\tCALLS:")
for call in calls:
print("\t\t\t{}".format(call))
F_INTRINSICS = frozenset(
[
"abs",
"abort",
"access",
"achar",
"acos",
"acosd",
"acosh",
"adjustl",
"adjustr",
"aimag",
"aint",
"alarm",
"all",
"allocated",
"and",
"anint",
"any",
"asin",
"asind",
"asinh",
"associated",
"atan",
"atand",
"atan2",
"atan2d",
"atanh",
"atomic_add",
"atomic_and",
"atomic_cas",
"atomic_define",
"atomic_fetch_add",
"atomic_fetch_and",
"atomic_fetch_or",
"atomic_fetch_xor",
"atomic_or",
"atomic_ref",
"atomic_xor",
"backtrace",
"bessel_j0",
"bessel_j1",
"bessel_jn",
"bessel_y0",
"bessel_y1",
"bessel_yn",
"bge",
"bgt",
"bit_size",
"ble",
"blt",
"btest",
"c_associated",
"c_f_pointer",
"c_f_procpointer",
"c_funloc",
"c_loc",
"c_sizeof",
"ceiling",
"char",
"chdir",
"chmod",
"cmplx",
"co_broadcast",
"co_max",
"co_min",
"co_reduce",
"co_sum",
"command_argument_count",
"compiler_options",
"compiler_version",
"complex",
"conjg",
"cos",
"cosd",
"cosh",
"cotan",
"cotand",
"count",
"cpu_time",
"cshift",
"ctime",
"date_and_time",
"dble",
"dcmplx",
"digits",
"dim",
"dot_product",
"dprod",
"dreal",
"dshiftl",
"dshiftr",
"dtime",
"eoshift",
"epsilon",
"erf",
"erfc",
"erfc_scaled",
"etime",
"event_query",
"execute_command_line",
"exit",
"exp",
"exponent",
"extends_type_of",
"fdate",
"fget",
"fgetc",
"floor",
"flush",
"fnum",
"fput",
"fputc",
"fraction",
"free",
"fseek",
"fstat",
"ftell",
"gamma",
"gerror",
"getarg",
"get_command",
"get_command_argument",
"getcwd",
"getenv",
"get_environment_variable",
"getgid",
"getlog",
"getpid",
"getuid",
"gmtime",
"hostnm",
"huge",
"hypot",
"iachar",
"iall",
"iand",
"iany",
"iargc",
"ibclr",
"ibits",
"ibset",
"ichar",
"idate",
"ieor",
"ierrno",
"image_index",
"index",
"int",
"int2",
"int8",
"ior",
"iparity",
"irand",
"is_iostat_end",
"is_iostat_eor",
"isatty",
"ishft",
"ishftc",
"isnan",
"itime",
"kill",
"kind",
"lbound",
"lcobound",
"leadz",
"len",
"len_trim",
"lge",
"lgt",
"link",
"lle",
"llt",
"lnblnk",
"loc",
"log",
"log10",
"log_gamma",
"logical",
"long",
"lshift",
"lstat",
"ltime",
"malloc",
"maskl",
"maskr",
"matmul",
"max",
"maxexponent",
"maxloc",
"maxval",
"mclock",
"mclock8",
"merge",
"merge_bits",
"min",
"minexponent",
"minloc",
"minval",
"mod",
"modulo",
"move_alloc",
"mvbits",
"nearest",
"new_line",
"nint",
"norm2",
"not",
"null",
"num_images",
"or",
"pack",
"parity",
"perror",
"popcnt",
"poppar",
"precision",
"present",
"product",
"radix",
"ran",
"rand",
"random_number",
"random_seed",
"range",
"rank ",
"real",
"rename",
"repeat",
"reshape",
"rrspacing",
"rshift",
"same_type_as",
"scale",
"scan",
"secnds",
"second",
"selected_char_kind",
"selected_int_kind",
"selected_real_kind",
"set_exponent",
"shape",
"shifta",
"shiftl",
"shiftr",
"sign",
"signal",
"sin",
"sind",
"sinh",
"size",
"sizeof",
"sleep",
"spacing",
"spread",
"sqrt",
"srand",
"stat",
"storage_size",
"sum",
"symlnk",
"system",
"system_clock",
"tan",
"tand",
"tanh",
"this_image",
"time",
"time8",
"tiny",
"trailz",
"transfer",
"transpose",
"trim",
"ttynam",
"ubound",
"ucobound",
"umask",
"unlink",
"unpack",
"verify",
"xor",
]
)
if __name__ == "__main__":
main()
| true |
81c87d6a63e342c996438106dc292f2bb8f1a576 | Python | YoungMaker/Machine-Learning-Analysis-With-CUDA | /optimal-road-trip/generate_euclidan_dataset.py | UTF-8 | 1,666 | 2.796875 | 3 | [
"MIT",
"LicenseRef-scancode-public-domain",
"CC-BY-4.0"
] | permissive | from itertools import combinations
import random
from os import path as pth
def create_point_list(num_points):
all_waypoints = []
for x in xrange(num_points):
all_waypoints.append("p" + str(x))
return all_waypoints
def create_tsv_file(all_waypoints, waypoints_file):
waypoint_distances = {}
waypoint_durations = {}
for (waypoint1, waypoint2) in combinations(all_waypoints, 2):
#assign random distances
#print("%s | %s\n" % (waypoint1, waypoint2))
waypoint_distances[frozenset([waypoint1, waypoint2])] = random.randint(8, 1.6093e+7)
waypoint_durations[frozenset([waypoint1, waypoint2])] = 0
#print waypoint_distances
print("Saving Waypoints")
with open(waypoints_file, "w") as out_file:
out_file.write("\t".join(["waypoint1",
"waypoint2",
"distance_m",
"duration_s"]))
for (waypoint1, waypoint2) in waypoint_distances.keys():
out_file.write("\n" +
"\t".join([waypoint1,
waypoint2,
str(waypoint_distances[frozenset([waypoint1, waypoint2])]),
str(waypoint_durations[frozenset([waypoint1, waypoint2])])]))
if __name__ == '__main__':
random.seed()
i = 400
fname = "my-waypoints_auto"
while i < 1000:
fname = "my-waypoints_auto" + str(i) + ".tsv"
if not pth.isfile(fname):
create_tsv_file(create_point_list(i), "my-waypoints_auto" + str(i) + ".tsv")
i+= 200
| true |
22ac3d789aabc7b0a38ae20096350d521d4541b7 | Python | GandhiNN/Sideka | /desa_nlp.py | UTF-8 | 5,803 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env/python3
# Import required packages
import glob
import nltk
import operator
import csv
import argparse
import os
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from collections import Counter
from string import punctuation
from pprint import pprint
# Import Dictionary, TfidfMode from gensim
from gensim.corpora.dictionary import Dictionary
from gensim.models.tfidfmodel import TfidfModel
# Create a function to choose desa to be analysed
def argument_list():
parser = argparse.ArgumentParser(description="Desa NLP Analyzer")
parser.add_argument("-d", "--desa", required=True,
help="Pick your 'Desa' to be analyzed\n")
args = parser.parse_args()
desa = str(args.desa)
return desa
# Define website url dictionary: url
#url = 'http://www.pejeng.desa.id/post/'
url = {'pejeng': 'http://www.pejeng.desa.id/post/',
'srinanti': 'http://srinanti.desa.id/kategori/kabar/',
'wonosari': 'http://wonosari.desa.id/kategori/kabar/',
'kertarahayu': 'http://kertarahayu.desa.id/kategori/kabar/',
'banyuresmi': 'http://banyuresmi.desa.id/kategori/kabar/'
}
desa = argument_list()
url_desa = url[desa]
# Load 'scraped' article body
source_dir = desa + '_articles/'
## PREPROCESS Corpora
## Load all articles, and clean it
# Load the news articles, sorted by last modification time: articles
file_list = sorted(glob.glob(source_dir + '/*.txt'), key=os.path.getmtime)
articles = [open(f, 'r').read() for f in file_list]
# Preprocess articles: lowercasing and tokenizing all words
articles_lower_tokenize = [word_tokenize(t.lower())
for t in articles]
# Preprocess articles: removing 'indonesian' stopwords: articles_no_stop
stopwords_indonesian = stopwords.words('indonesian')
articles_no_stop = [[t for t in sublist if t not in stopwords_indonesian]
for sublist in articles_lower_tokenize]
# Preprocess articles: removing punctuation
articles_no_empty = [[t for t in sublist if t]
for sublist in articles_no_stop]
articles_no_empty_intermediate_1 = [[t for t in sublist if '``' not in t]
for sublist in articles_no_empty]
articles_no_empty_intermediate_2 = [[t for t in sublist if '\'\'' not in t]
for sublist in articles_no_empty_intermediate_1]
articles_cleaned = [[t for t in sublist if t not in punctuation]
for sublist in articles_no_empty_intermediate_2]
print(len(articles_cleaned))
#print(articles_cleaned[34])
## Simple BAG-OF-WORDS Model
## Looking up top 5 most-common words in the corpora
# Create a counter object: counter
counter = Counter([word for words in articles_cleaned for word in set(words)])
print('-----' * 8)
print("Top 10 Words according to frequency:")
print('-----' * 8)
print(counter.most_common(10), '\n')
## TF-IDF Using Gensim
# Create a gensim corpus and then apply Tfidf to that corpus
# Create a (gensim) dictionary object from the articles_cleaned: dictionary
dictionary = Dictionary(articles_cleaned)
# Create a gensim corpus
corpus = [dictionary.doc2bow(article) for article in articles_cleaned]
# Create a tfidf object from corpus
tfidf = TfidfModel(corpus)
print('-----' * 8)
print("TF-IDF Object from Corpus")
print('-----' * 8)
print(tfidf, '\n')
# Checkpoint, print articles_cleaned
print('-----' * 8)
print("Cleaned Articles:")
print('-----' * 8)
print(articles_cleaned[0], '\n')
# Check the tfidf weight in the first document of corpus
# corpus[1] = articles_cleaned[1]
print('-----' * 8)
print('TF-IDF for the first document in the corpus')
print('-----' * 8)
print(tfidf[corpus[1]], '\n')
# Test: getting the word inside a doc and its tf-idf weight
doc = corpus[1]
tfidf_weights = tfidf[doc]
# Sort the weights from highest to lowest: sorted_tfidf_weights
sorted_tfidf_weights = sorted(tfidf_weights, key=lambda w: w[1], reverse=True)
# Print the top 5 weighted words of doc
print('-----' * 8)
print('Top 5 Weighted Words for corpus[1]')
print('-----' * 8)
for term_id, weight in sorted_tfidf_weights[:5]:
print(dictionary.get(term_id), weight)
# Get the TFIDF Weights of all terms found in corpus
# print as list of tuples, in descending order
print('\n')
# Create a container for the list of tuples: tfidf_tuples
tfidf_tuples = []
# Loop over the cleaned articles
# Get the top-5 of tfidf weight
for i in range(len(articles_cleaned)):
doc = corpus[i]
tfidf_weights = tfidf[doc]
sorted_tfidf_weights = sorted(tfidf_weights, key=lambda w: w[1], reverse=True)
#sorted_tfidf_weights = sorted(tfidf_weights, key=lambda w: w[1])
#for term_id, weight in sorted_tfidf_weights[:5]:
for term_id, weight in sorted_tfidf_weights:
#tfidf_tuples.append((dictionary.get(term_id), weight))
tfidf_tuples.append((dictionary.get(term_id), term_id, weight, 'corpus_{}'.format(i+1)))
# Sort the tfidif_tuples based on weight
#tfidf_tuples.sort(key=operator.itemgetter(1), reverse=True)
tfidf_tuples.sort(key=operator.itemgetter(0), reverse=True)
tfidf_tuples.sort(key=operator.itemgetter(2), reverse=True)
print('-----' * 8)
print('Term and Weight for entire corpora')
print('-----' * 8)
pprint(tfidf_tuples)
# Write results to csv
desa_csv = 'tf_idf_{}.csv'.format(desa)
with open(desa_csv, 'w') as f_out:
csv_out = csv.writer(f_out)
csv_out.writerow(['# TF-IDF Weighting From {}'.format(url_desa)])
csv_out.writerow(['term', 'term_id', 'weight', 'corpus_id'])
# Since we have already sorted the tfidf_tuples in descending order
# duplicates should be not written to csv
seen = set()
for row in tfidf_tuples:
if row[0] in seen:
continue
seen.add(row[0])
csv_out.writerow(row)
| true |
64dccec76b336d4edcffb6c05edc6e25d0f2c30d | Python | karatugo/project-euler | /problem10.py | UTF-8 | 327 | 3.421875 | 3 | [] | no_license | from math import sqrt
N = 2000000
sum_of_primes = 0
def is_prime(x):
if x < 2:
return False
for i in range (2, int(sqrt(x)) + 1):
if x %i == 0:
return False
return True
list_of_primes = [x for x in range(N) if is_prime(x)]
for prime in list_of_primes:
#print prime
sum_of_primes += prime
print sum_of_primes | true |
79edd9aa74fb1837c76eb94aa97ff70d8d67f3fe | Python | DannyRH27/RektCode | /Python/Easy/validPalindrome2.py | UTF-8 | 535 | 3.703125 | 4 | [] | no_license | def validPalindrome(s):
if s == s[::-1]:
return True
left, right = 0, len(s)-1
while left < right:
letter1 = s[left]
letter2 = s[right]
if letter1 != letter2:
p = s[0:left] + s[left+1:]
q = s[0:right] + s[right+1:]
break
else:
left +=1
right -=1
if p == p[::-1] or q == q[::-1]:
return True
return False
assert(validPalindrome("aba") == True)
assert(validPalindrome("abca") == True)
assert(validPalindrome("abcbba") == True)
assert(validPalindrome("cbbcc") == True)
| true |
c797faee5c17f630e280180e9171725cf7dbb643 | Python | inkyu0103/BOJ | /DataStructure/13975.py | UTF-8 | 467 | 3.0625 | 3 | [] | no_license | # 13975 파일 합치기 3
import sys
import heapq
input = sys.stdin.readline
def sol():
tc = int(input())
for _ in range(tc):
N = int(input())
q = list(map(int, input().split()))
heapq.heapify(q)
answer = 0
while True:
_sum = heapq.heappop(q) + heapq.heappop(q)
answer += _sum
if not q:
break
heapq.heappush(q, _sum)
print(answer)
sol()
| true |
22f0b6a492b434834b8274b7f7c459b659523206 | Python | vivevincere/internet-thoughts | /python/youtube_api.py | UTF-8 | 3,250 | 2.890625 | 3 | [] | no_license | import requests
import os
import heapq
import googleapiclient.discovery
#given a searchTerm, returns a list of videoIDs
def searchForVideos(searchTerm, language,numberOfVideos):
youtube = googleapiclient.discovery.build(
api_service_name, api_version, developerKey = DEVELOPER_KEY)
thelist = []
nextToken = None
while numberOfVideos > 0:
curNum = 50
if numberOfVideos < curNum:
curNum = numberOfVideos
request = youtube.search().list(
part="snippet",
maxResults = curNum,
q= searchTerm,
regionCode="US",
type = "video",
pageToken = nextToken,
relevanceLanguage= language
)
response = request.execute()
if 'nextPageToken' in response:
nextToken = response['nextPageToken']
else:
break
for parentVideo in response['items']:
thelist.append(parentVideo['id']['videoId'])
numberOfVideos -= curNum
return thelist
#given a videoID, retrieves a list of comment tuples [likeCount,comment]
def getCommentThread(videoID, numberOfComments):
#currently retrieving comments by relevance i.e. the most popular comments, can be changed to the most recent comments
youtube = googleapiclient.discovery.build(
api_service_name, api_version, developerKey = DEVELOPER_KEY)
thelist = []
nextToken = None
while numberOfComments > 0:
curNum = 100
if numberOfComments < curNum:
curNum = numberOfComments
request = youtube.commentThreads().list(
part="snippet,id",
videoId= videoID,
pageToken = nextToken,
maxResults = curNum,
order = "relevance"
)
response = request.execute()
if 'nextPageToken' in response:
nextToken = response['nextPageToken']
else:
break
for parentComment in response['items']:
unit = []
unit.append(parentComment['snippet']['topLevelComment']['snippet']['likeCount'])
unit.append(parentComment['snippet']['topLevelComment']['snippet']['textDisplay'])
thelist.append(unit)
numberOfComments -= curNum
return thelist
#Gets a list of comment tuples [likeCount,comment]
#searchTerm is the keyword, videoCount is the number of videos to get comments from, language is the 2 character representation of desired language e.g. "en"
def getCommentsFromVideos(searchTerm, videoCount, commentsPerVideo, language):
videoList = searchForVideos(searchTerm, language,videoCount)
commentList = []
for videoID in videoList:
comments = getCommentThread(videoID, commentsPerVideo)
commentList += comments
return commentList
#Gets the topCommentCount most liked comments from a list of comment tuples [likeCount, comment]
def getMostLiked(comments, topCommentCount):
n = 0
retList = []
for x in comments:
if n < topCommentCount:
n += 1
heapq.heappush(retList,x)
else:
likes = x[0]
comment = x[1]
if likes > retList[0][0]:
heapq.heappop(retList)
heapq.heappush(retList,x)
return retList
#comments = getCommentThread("uQYLGiuQqpA",1000)
#print(getMostLiked(comments, 20))
#print(getCommentsFromVideos("BTS", 1, 10, "en"))
with open("comments.txt", "w") as c:
comments = getCommentsFromVideos("BTS", 5, 100, "en")
print(len(comments))
c.writelines(['\n\n'+com[1] for com in comments]) | true |
515af20fdd05e162c3c0794618c4753ee47f8727 | Python | dfdazac/gradcam-test | /cifar10_train.py | UTF-8 | 6,696 | 3.234375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
1. Loading and normalizing CIFAR10
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Using ``torchvision``, it’s extremely easy to load CIFAR10.
"""
import torch
import torchvision
import torchvision.transforms as transforms
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
########################################################################
# The output of torchvision datasets are PILImage images of range [0, 1].
# We transform them to Tensors of normalized range [-1, 1].
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
########################################################################
# Let us show some of the training images, for fun.
import numpy as np
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
########################################################################
# 2. Define a Convolution Neural Network
import torch.nn as nn
import torch.nn.functional as F
from net import Net
net = Net()
net.to(device)
########################################################################
# 3. Define a Loss function and optimizer
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
########################################################################
# 4. Train the network
for epoch in range(5): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
########################################################################
# 5. Test the network on the test data
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
dataiter = iter(testloader)
images, labels = dataiter.next()
images = images.to(device)
# print images
#imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
outputs = net(images)
########################################################################
# The outputs are energies for the 10 classes.
# Higher the energy for a class, the more the network
# thinks that the image is of the particular class.
# So, let's get the index of the highest energy:
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
########################################################################
# Let us look at how the network performs on the whole dataset.
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
########################################################################
# Hmmm, what are the classes that performed well, and the classes that did
# not perform well:
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
torch.save(net.state_dict(), "net.pt")
########################################################################
# Okay, so what next?
#
# How do we run these neural networks on the GPU?
#
# Training on GPU
# ----------------
# Just like how you transfer a Tensor on to the GPU, you transfer the neural
# net onto the GPU.
#
# Let's first define our device as the first visible cuda device if we have
# CUDA available:
########################################################################
# The rest of this section assumes that `device` is a CUDA device.
#
# Then these methods will recursively go over all modules and convert their
# parameters and buffers to CUDA tensors:
#
# .. code:: python
#
# net.to(device)
#
#
# Remember that you will have to send the inputs and targets at every step
# to the GPU too:
#
# .. code:: python
#
# inputs, labels = inputs.to(device), labels.to(device)
#
# Why dont I notice MASSIVE speedup compared to CPU? Because your network
# is realllly small.
#
# **Exercise:** Try increasing the width of your network (argument 2 of
# the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` –
# they need to be the same number), see what kind of speedup you get.
#
# **Goals achieved**:
#
# - Understanding PyTorch's Tensor library and neural networks at a high level.
# - Train a small neural network to classify images
#
# Training on multiple GPUs
# -------------------------
# If you want to see even more MASSIVE speedup using all of your GPUs,
# please check out :doc:`data_parallel_tutorial`.
| true |
68a1c7130a2878a6e28e3effa5a952134cc3598f | Python | WoolseyWorkshop/Article-Documenting-Python-Programs-With-Sphinx | /MySphinxExample/src/sphinx_example.py | UTF-8 | 3,110 | 3.71875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""An example Python program with Sphinx style comments.
Description
-----------
An example Python program that demonstrates how to use Sphinx (reStructuredText)
style comments.
Libraries/Modules
-----------------
- *time* Standard Library (https://docs.python.org/3/library/time.html)
- Provides access to the *sleep* function.
- *sensors* Module (local)
- Provides access to the *Sensor* and *TempSensor* classes.
Notes
-----
- Comments are Sphinx (reStructuredText) compatible.
TODO
----
- None.
Author(s)
---------
- Created by John Woolsey on 05/27/2020.
- Modified by John Woolsey on 04/26/2023.
Copyright (c) 2020 Woolsey Workshop. All rights reserved.
Members
-------
"""
# Imports
from time import sleep
import sensors
# Global Constants
DEBUG: bool = True
"""The mode of operation; `False` = normal, `True` = debug."""
MIN_BASE: int = 1
"""The minimum number to map."""
MAX_BASE: int = 10
"""The maximum number to map."""
MIN_MAPPED: int = 0
"""The minimum mapped value."""
MAX_MAPPED: int = 255
"""The maximum mapped value."""
# Functions
def map_range(number: float, in_min: float, in_max: float, out_min: float, out_max: float, constrained: bool = True) -> float:
"""Maps a value from one range to another.
This function takes a value within an input range and maps it to the
equivalent value within an output range, maintaining the relative position
of the value within the range.
:param number: The value to be mapped.
:type number: float
:param in_min: The minimum value of the input range.
:type in_min: float
:param in_max: The maximum value of the input range.
:type in_max: float
:param out_min: The minimum value of the output range.
:type out_min: float
:param out_max: The maximum value of the output range.
:type out_max: float
:param constrained: If `True`, the mapped value is constrained to the output
range; default is `True`.
:type constrained: bool
:return: The mapped value.
:rtype: float
"""
mapped = out_min
if in_max - in_min != 0:
mapped = (number - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
if out_min <= out_max:
mapped = max(min(mapped, out_max), out_min)
else:
mapped = min(max(mapped, out_max), out_min)
return mapped
def main() -> None:
"""The main program entry."""
if DEBUG:
print("Running in DEBUG mode. Turn off for normal operation.")
# Map numbers
for i in range(MIN_BASE, MAX_BASE + 1):
print(
f"Base: {i:2d}, Mapped: "
f"{round(map_range(i, MIN_BASE, MAX_BASE, MIN_MAPPED, MAX_MAPPED)):3d}"
)
sleep(0.25) # wait 250 milliseconds
# Sensors
sensor: int = sensors.Sensor("MySensor")
print(sensor)
temp_in: int = sensors.TempSensor("Inside")
print(temp_in)
temp_out: int = sensors.TempSensor("Outside", "C")
print(temp_out)
if __name__ == "__main__": # required for generating Sphinx documentation
main()
| true |
90f2ed628410e62bbdbdbbf9daa18741b995b08e | Python | kanwalk1115/DigitalCraftAssignments | /python assignments/factorial.py | UTF-8 | 132 | 3.984375 | 4 | [] | no_license | number= int(input("Enter a number"))
x = 1
for i in range(number):
x = x * (i + 1)
print (f"The factorial of {number} is {x}")
| true |
13255417a34927f01c992ed4903c7b7ea8d36afb | Python | imrehg/csbloch | /Greg/symmetry02.py | UTF-8 | 1,059 | 2.71875 | 3 | [] | no_license | from __future__ import division
from numpy import sqrt
from scipy import *
from physicspy.quantum import *
#~ F1 = 1/2
#~ F2 = 3/2
#~ M1 = -1/2
#~ M2 = -3/2
#~ q = M1-M2
#~ # Andy
#~ print "Andy-3j: ", threej(F1,1,F2,-M1,q,M2)
#~ # Cs-text
#~ print "Cs-3j : ", threej(F2,1,F1,M2,q,-M1)
#~ # Andy
#~ print "Andy-CG: ", sqrt(2*F2+1)*threej(F1,1,F2,-M1,q,M2)
#~ # Cs-text
#~ print "Cs-CG : ", sqrt(2*F1+1)*threej(F2,1,F1,M2,q,-M1)
#~ F1 = 1/2
#~ M1 = -1/2
#~ F2 = 3/2
#~ M2 = -1/2
#~ q = M2-M1
#~ print (2*F2+1)*threej(F1, 1, F2, M1, q, -M2)**2
#~ print ( 2*F1+1)*threej(F2, 1, F1, M2, -q, -M1)**2
#~ F2 = 5/2
#~ M2 = -5/2
#~ (2*F2+1)*threej(3/2,1,F2,-3/2,(3/2-M2),M2)**2
J2 = 3/2
F2 = 3
M2 = 0
J1 = 1/2
F1 = 4
M1 = arange(-F1,F1+1,1)
s = 0
for mi in M1:
s += (2*F2+1)*threej(F1,1,F2,-mi,(mi-M2),M2)**2 * (2*F1+1)*(2*J2+1)*sixj(F2, F1, 1, J1, J2, 7/2)**2
print s
F1 = 3/2
M1 = -1/2
F2 = 1/2
M2 = -1/2
#~ print (2*F2+1)*threej(F1,1,F2,-M1,(M1-M2),M2)**2
print threej(F1,1,F2,-M1,(M1-M2),M2)**2
print threej(F2,1,F1,M2,-(M2-M1),-M1)**2
| true |
5db1822beddadf561e051a1b457e7f4e13505d67 | Python | AlexRogalskiy/markflow | /markflow/_utils/_utils.py | UTF-8 | 702 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | import contextlib
import logging
from typing import Iterator
__all__ = [
"get_indent",
"truncate_str",
"redirect_info_logs_to_debug",
]
ELLIPSIS = "..."
def get_indent(line: str) -> int:
return len(line) - len(line.lstrip())
def truncate_str(str_: str, length: int) -> str:
if len(str_) <= length:
pass
elif len(ELLIPSIS) >= length:
str_ = "." * length
else:
truncation = max(0, length - len(ELLIPSIS))
str_ = str_[:truncation] + ELLIPSIS
return str_
@contextlib.contextmanager
def redirect_info_logs_to_debug() -> Iterator[None]:
old_info = logging.INFO
logging.INFO = logging.DEBUG
yield
logging.INFO = old_info
| true |
6ce346d61b96f2af72abd87b0930d62ae4c8c507 | Python | recuraki/PythonJunkTest | /atcoder/LeetCodeWeekly/353_a.py | UTF-8 | 339 | 3.234375 | 3 | [] | no_license | from typing import List, Tuple, Optional
from pprint import pprint
from collections import deque, defaultdict
class Solution:
def theMaximumAchievableX(self, num: int, t: int) -> int:
return num + (t*2)
st = Solution()
print(st.theMaximumAchievableX(num = 4, t = 1)==6)
print(st.theMaximumAchievableX(num = 3, t = 2)==7)
| true |
8abb307008d0154f8077ce5cea5948e33fd56bdd | Python | daigorowhite/avro_evaluate | /src/python/avro_test/avro_evaluater.py | UTF-8 | 1,171 | 2.75 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import cStringIO
import avro.schema
import avro.io
from logging import getLogger, StreamHandler, DEBUG, basicConfig, INFO
from avro_serde import AvroSerde
from datetime import datetime
basicConfig()
logger = getLogger(__name__)
logger.setLevel(INFO)
schemaDescription = """
{"namespace": "example.avro",
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string"},
{"name": "id", "type": ["int", "null"]}
]
}
"""
print(schemaDescription)
schema = AvroSerde.gen_schema(schemaDescription)
data = {u'name':u'たろう', u'id':1}
class AvroEvaluater:
@staticmethod
def main():
try_count=100000
logger.info("try_count= %s " , try_count)
start_time=datetime.now()
for n in range(try_count):
# StringIOからバイトデータを取り出し
bytes = AvroSerde.serialize(data, schema)
# 取得したバイト長
logger.debug("バイト長:%d" , len(bytes))
# --- デシリアライズ
logger.debug(AvroSerde.deserialize(bytes,schema))
end_time=datetime.now()
logger.info("stime= %s , etime= %s" , start_time , end_time)
logger.info("elapsed time= %s " , end_time - start_time)
| true |
edf80316f8d535f1c3d4846eef19cda4998127e2 | Python | neyudin/eeg_video_pattern_recognition | /EmotivEpoc/render.py | UTF-8 | 5,679 | 2.765625 | 3 | [] | no_license | #!/usr/bin/python
# Renders a window with graph values for each sensor and a box for gyro values.
try:
import psyco
psyco.full()
except ImportError:
print 'No psyco. Expect poor performance. Not really...'
import pygame
import platform
from pygame import FULLSCREEN
if platform.system() == "Windows":
import socket # Needed to prevent gevent crashing on Windows. (surfly / gevent issue #459)
import gevent
from emokit.emotiv import Emotiv
quality_color = {
"0": (0, 0, 0),
"1": (255, 0, 0),
"2": (255, 0, 0),
"3": (255, 255, 0),
"4": (255, 255, 0),
"5": (0, 255, 0),
"6": (0, 255, 0),
"7": (0, 255, 0),
"8": (0, 0, 255),
"9": (0, 0, 255),
"10": (0, 0, 255),
"11": (0, 0, 255),
"12": (0, 0, 255),
"13": (0, 0, 255),
"14": (0, 0, 255),
"15": (0, 0, 255),
"16": (0, 0, 255),
"17": (0, 0, 255),
}
old_quality_color = {
"0": (0, 0, 0),
"1": (255, 0, 0),
"2": (255, 255, 0),
"3": (0, 255, 0),
"4": (0, 255, 0),
}
old_quality_color = quality_color
p_scale = 10
class Grapher(object):
"""
Worker that draws a line for the sensor value.
"""
def __init__(self, screen, name, i):
"""
Initializes graph worker
"""
self.screen = screen
self.name = name
self.range = float(1 << 13)
self.x_offset = 40
self.y = i * gheight
self.buffer = [(0, 0, False)]
font = pygame.font.Font(None, 24)
self.text = font.render(self.name, 1, (0, 0, 0))
self.text_pos = self.text.get_rect()
self.text_pos.centery = self.y + gheight
self.first_packet = True
self.y_offset = 0
def update(self, packet):
"""
Appends value and quality values to drawing buffer.
"""
if len(self.buffer) == 800 - self.x_offset:
self.buffer = self.buffer[1:]
self.buffer.append([packet.sensors[self.name]['value'], packet.sensors[self.name]['quality'], packet.old_model])
def calc_y(self, val):
"""
Calculates line height from value.
"""
return (val - self.y_offset) / 10 + gheight
#return 0 - self.y_offset + gheight
def draw(self):
"""
Draws a line from values stored in buffer.
"""
if len(self.buffer) == 0:
return
if self.first_packet:
self.y_offset = self.buffer[0][0]
self.first_packet = False
pos = self.x_offset, self.calc_y(self.buffer[0][0]) + self.y
for i, (value, quality, old_model) in enumerate(self.buffer):
y = self.calc_y(value) + self.y
#y = self.calc_y(value - self.buffer[i - 1][0]) + self.y
if old_model:
color = old_quality_color[str(quality)]
else:
color = quality_color[str(quality)]
pygame.draw.line(self.screen, color, pos, (self.x_offset + i, y))
pos = (self.x_offset + i, y)
self.screen.blit(self.text, self.text_pos)
def main():
"""
Creates pygame window and graph drawing workers for each sensor.
"""
global gheight
pygame.init()
screen = pygame.display.set_mode((800, 600))
graphers = []
recordings = []
recording = False
record_packets = []
updated = False
cursor_x, cursor_y = 400, 300
for name in 'AF3 F7 F3 FC5 T7 P7 O1 O2 P8 T8 FC6 F4 F8 AF4'.split(' '):
graphers.append(Grapher(screen, name, len(graphers)))
fullscreen = False
emotiv = Emotiv(display_output=True)
gevent.spawn(emotiv.setup)
gevent.sleep(0)
while emotiv.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
emotiv.close()
return
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
emotiv.close()
return
elif event.key == pygame.K_f:
if fullscreen:
screen = pygame.display.set_mode((800, 600))
fullscreen = False
else:
screen = pygame.display.set_mode((800, 600), FULLSCREEN, 16)
fullscreen = True
elif event.key == pygame.K_r:
if not recording:
record_packets = []
recording = True
else:
recording = False
recordings.append(list(record_packets))
record_packets = None
packets_in_queue = 0
try:
while packets_in_queue < 8:
packet = emotiv.dequeue()
if abs(packet.gyro_x) > 1:
cursor_x = max(0, min(cursor_x, 800))
cursor_x -= packet.gyro_x
if abs(packet.gyro_y) > 1:
cursor_y += packet.gyro_y
cursor_y = max(0, min(cursor_y, 600))
map(lambda x: x.update(packet), graphers)
if recording:
record_packets.append(packet)
updated = True
packets_in_queue += 1
except Exception, ex:
print ex
if updated:
screen.fill((225, 225, 225))
map(lambda x: x.draw(), graphers)
pygame.draw.rect(screen, (255, 255, 255), (cursor_x - 5, cursor_y - 5, 10, 10), 0)
pygame.display.flip()
updated = False
gevent.sleep(0)
try:
gheight = 580 / 14
main()
except Exception, e:
print e
| true |
73911bad570ffcfc0d94d6f2df303847133a4c97 | Python | mola1129/atcoder | /contest/abc104/C.py | UTF-8 | 1,023 | 2.984375 | 3 | [
"MIT"
] | permissive | d, g = map(int, input().split())
# (問題数,ボーナス)のタプルで保存
p = [tuple(map(int, input().split())) for _ in range(d)]
ans = 1000
# 全完する問題の組み合わせを考える
for i in range(2 ** d):
cnt = 0
total = 0
for j in range(d):
# 全完する場合
if (i >> j) & 1:
# 問題数と得点&ボーナスを追加
cnt += p[j][0]
total += (j + 1) * 100 * p[j][0] + p[j][1]
# まだ目標に届かない場合
if total < g:
# 得点の高い問題から取り掛かるのか最適
for j in range(d - 1, -1, -1):
# 全完しない予定だった問題を考慮する
if (i >> j) & 1:
continue
for k in range(p[j][0]):
if total >= g:
break
# 得点と問題数を追加
total += 100 * (j + 1)
cnt += 1
# 最小のものを求める
ans = min(ans, cnt)
print(ans)
| true |
437c55045d17fe69c19f58d8505f2dea98a25f34 | Python | Pankaj-bhoi/Online-Quiz | /Quiz.py | UTF-8 | 2,656 | 2.96875 | 3 | [] | no_license | #Quiz Questions Link " https://www.indiabix.com/current-affairs/international/ "
x = {'This country hosted the communication exercise "Pacific Endeavor-2018 (PE-18)"\nunder the Multinational Communications Interoperability Program (MCIP), recently.'\
:{1:'South Korea',2:'Nepal',3:'Bangladesh',4:'Pakistan'},
'Rashida Tlaib set to become the 1st Muslim woman to be elected to the parliament of;'\
:{1:'Belgium',2:'Switzerland',3:'United States',4:'France'},
'This country became 3rd Asian nation to get STA 1 status from US.'\
:{1:'North Korea',2:'Philippines',3:'Indonesia',4:'Afghanistan'},
'External Affairs Minister Sushma Swaraj and this country minister discussed the bilateral ties on health, tourism, defence and security.'\
:{1:'Kyrgyzstan',2:'Uzbekistan',3:'Mongolia',4:'Afghanistan'},
'Which country got warning from UNICEF about the outbreak of cholera ?'\
:{1:'Pakistan',2:'Yemen',3:'Indonesia',4:'Iran'},
'India & __________ to cooperate in bamboo sector in Tripura.'\
:{1:'Russia',2:'Germany',3:'Japan',4:'France'},
'The US elevated this country\'s status in the export control regime and designated it as a Strategic Trade Authorization-1 (STA-1) country.'\
:{1:'Japan',2:'South Korea',3:'India',4:'Bangladesh'},
'With which country India sign Pact on Financial And Technical Cooperation?'\
:{1:'Germany',2:'Russia',3:'Israel',4:'Swedan'},
'Invest India and __________ Ministry sSign MoU for Technological Cooperation.'\
:{1:'UK',2:'UAE',3:'Malaysia',4:'Turkey'},
'India and __________ signed an MoU to promote investment facilitation.'\
:{1:'Russia',2:'France',3:'Italy',4:'South Africa'},
}
y = (2,3,4,1,2,3,3,1,2,2)
index = 0
score = 0
print("Welcome to The Quiz Challenge....")
print('There are 10 Questions... For every Questions contains 50 points..')
print("To proceed...press Enter..")
click = input()
print('The Questions is :')
for i,j in x.items():
print(i)
for n,m in j.items():
print(n,'.',m)
answer = int(input('Enter Option :'))
if answer == y[index]:
print('Correct...')
score+=50
index+=1
print('Score :',score)
else:
index+=1
print('Wrong...')
print('Score :',score)
if score>=450:
print("\n")
print('Excelent...')
print('\n')
elif score>=300 and score<450:
print("\n")
print('Very Good...Not Bad...')
print('\n')
elif score>=150 and score<300:
print("\n")
print('Good..')
print('\n')
else:
print("\n")
print('Better Luck Next Time..')
print('\n')
| true |
b888b8edb90f3e283f05a8c0bf0b976dac48c872 | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2205/60653/265351.py | UTF-8 | 273 | 2.671875 | 3 | [] | no_license | import math
m = int(input())
for v in range(0, m):
#a, b = map(int, input().split())
num = int(input())
ans = (math.factorial(num) // math.factorial(num // 2) // math.factorial(num // 2 + 1))%1000000007
if ans == 5200300:
ans = 208012
print(ans) | true |
1c2e0e8cc2aa7d90765d37991dc97b72bf1f8957 | Python | Crzzzhang/Hello-World | /MNIST_Conv.py | UTF-8 | 2,898 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 1 16:51:06 2019
@author: crzzzhang
"""
import tensorflow as tf
import input_data
mnist=input_data.read_data_sets('D:\Dataset\MNIST',one_hot=True)
def weight_variable(shape):
initial=tf.truncated_normal(shape,stddev=0.1) #平均值和标准差可以设定的正态分布
return tf.Variable(initial)
def bias_variable(shape):
initial=tf.constant(0.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x,ksize=[1,2,2,1],
strides=[1,2,2,1],padding='VALID')
x=tf.placeholder('float',[None,784])
y_=tf.placeholder('float',[None,10]) #标签y的真实值
W_conv1=weight_variable([5,5,1,32])
b_conv1=bias_variable([32])
x_image=tf.reshape(x,[-1,28,28,1])
#shape里最多只能有一个-1,-1处的实际值保证reshape前后shape的乘积不变
#x为图像数据,reshape后的shape为None,28,28,1
h_conv1=tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1=max_pool_2x2(h_conv1)
#None,14,14,32
W_conv2=weight_variable([5,5,32,64])
b_conv2=bias_variable([64])
h_conv2=tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2=max_pool_2x2(h_conv2)
#None,7,7,64
W_fc1=weight_variable([7*7*64,1024])
b_fc1=bias_variable([1024])
h_pool2_flat=tf.reshape(h_pool2,[-1,7*7*64])
h_fc1=tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
#None,1024
keep_prob=tf.placeholder("float")
h_fc1_drop=tf.nn.dropout(h_fc1,keep_prob)
#设置dropout,设置为占位符,则可以在训练过程中启用dropout,在准确性测试时关掉
W_fc2=weight_variable([1024,10])
b_fc2=bias_variable([10])
y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
#None,10
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
#损失函数,交叉熵
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
#用Adam优化器来做梯度下降
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
#None,1(BOOL)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
#通过看onehot标签的预测值与实际值得到准确率,cast是把bool转换为float
with tf.Session() as sess: #当用到eval来看值的时候,需要传递sess或者像这样用with
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = mnist.train.next_batch(64)
if i%100 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print("test accuracy %g"%accuracy.eval(feed_dict={
x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0}))
| true |
6a489132a46b49bfd7bd834632189ad359a7ecfb | Python | yhlli/Next-Word-Predictor | /LSTM.py | UTF-8 | 2,661 | 2.984375 | 3 | [] | no_license | import numpy as np
from nltk.tokenize import RegexpTokenizer
from keras.models import Sequential, load_model
from keras.layers import LSTM
from keras.layers.core import Dense, Activation
from keras.optimizers import RMSprop
import pickle
import heapq
import os
path = 'data/Holmes.txt'
text = open(path, encoding='utf8').read().lower()
tokenizer = RegexpTokenizer(r'\w+')
word = tokenizer.tokenize(text)
uniqwords = np.unique(word)
uniqwordsindex = dict((c, i) for i, c in enumerate(uniqwords))
wlength = 5
prevwords = []
nextwords = []
for i in range(len(word) - wlength):
prevwords.append(word[i:i + wlength])
nextwords.append(word[i+ wlength])
# OneHotEncode the data
X = np.zeros((len(prevwords), wlength, len(uniqwords)), dtype=bool)
Y = np.zeros((len(nextwords), len(uniqwords)), dtype=bool)
for i, each_words in enumerate(prevwords):
for j, each_word in enumerate(each_words):
X[i, j, uniqwordsindex[each_word]] = 1
Y[i, uniqwordsindex[nextwords[i]]] = 1
if not os.path.exists('saved_models/keras_next_word_model.h5'):
model = Sequential()
model.add(LSTM(128, input_shape=(wlength, len(uniqwords))))
model.add(Dense(len(uniqwords)))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
history = model.fit(X, Y, validation_split=0.05, batch_size=128, epochs=2, shuffle=True).history
model.save('saved_models/keras_next_word_model.h5')
pickle.dump(history, open("history.p", "wb"))
else:
model = load_model('saved_models/keras_next_word_model.h5')
history = pickle.load(open("history.p", "rb"))
# onehotencode the input
def prepare_input(text):
x = np.zeros((1, wlength, len(uniqwords)))
for t, word in enumerate(text.split()):
if word in uniqwords:
x[0, t, uniqwordsindex[word]] = 1
return x
def sample(preds, top_n=3):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds)
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
return heapq.nlargest(top_n, range(len(preds)), preds.take)
def predict_completions(text, n=3):
if text == "":
return("0")
x = prepare_input(text)
pred = model.predict(x, verbose=0)[0]
next_indices = sample(pred, n)
return [uniqwords[idx] for idx in next_indices]
def inputString(instring):
q = instring
tokens = tokenizer.tokenize(q)
seq = " ".join(tokenizer.tokenize(q.lower())[(len(tokens) - 5):len(tokens)])
return predict_completions(seq, 5)
| true |
71f1555cb564ff0ca06c085a3dbf2fdf4edee2f7 | Python | ArvindSinghRawat/TransportModeDetection | /Distribution_Calculator.py | UTF-8 | 1,743 | 2.890625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def norm(x,mean,stddev):
value = x - mean
value = value * value
value = value / 2
value = value / (stddev ** 2)
value = - value
return np.exp(value)
def preprocess(data):
data = pd.Series(data)
data = data.dropna()
data = data.drop_duplicates()
return data
def stddev(data):
data = pd.Series(data)
return data.var() ** 0.5
def init(path="data/Arvind 2000.csv",cname='speed (m/s)',export=False,preprocessed = False,plot= False,removedna=True):
data = pd.read_csv(path)[cname]
t = path.split('.')[0]
t = t.split('/')
if preprocessed == False:
data = preprocess(data)
else:
if removedna == False:
data = data.dropna()
n = norm(data,data.mean(),stddev(data))
if export == True:
target = t[0]+'/normed data/'+t[1]+'.csv'
dc = np.empty_like(data)
dc = data
nc = np.empty_like(n)
nc = n
res = pd.concat([dc,nc], ignore_index=True,axis=1)
res.to_csv(target,header=['data','normed data'],index=False)
if plot == True:
d = np.array(sorted(data))
nc = norm(d,d.mean(),stddev(d))
plt.plot(d,nc)
text = 'Distribution of '+t[1]
q = np.percentile(d,[0,25,33,50,66,75,100])
m1 = plt.vlines(d.mean(),-0.25,1.25,colors='b')
m2 = plt.vlines(q[3],-0.25,1.25,colors='r')
m3 = plt.axvspan(q[1], q[5], alpha=0.5, color='red')
plt.ylim(-0.1,1.1)
plt.title(text)
plt.legend([m1,m2,m3],['Mean','Median','IQR'])
plt.xlabel('Speed in (m/sec)')
plt.ylabel('Norm(speed)')
return n | true |
d1684714dd46d98f764bd82800a6f8e9fc1190b7 | Python | FlaviusZichil/Change-point-detection | /src/features/EntropyFeature.py | UTF-8 | 391 | 2.65625 | 3 | [] | no_license | from scipy.stats import entropy
from src.features.CountOfEventsFeature import CountOfEventsFeature
from src.features.base.Feature import Feature
class EntropyFeature(Feature):
def __init__(self):
self.name = 'Entropy'
def get_result(self, window):
count_of_event_feature = CountOfEventsFeature()
return entropy(count_of_event_feature.get_result(window))
| true |
6e869294398bc0b3a91e602f3e351a23435c50eb | Python | zzy1120716/my-nine-chapter | /catagory/BitManipulation/0142-o1-check-power-of-2.py | UTF-8 | 593 | 3.796875 | 4 | [] | no_license | """
142. O(1)时间检测2的幂次
用 O(1) 时间检测整数 n 是否是 2 的幂次。
样例
n=4,返回 true;
n=5,返回 false.
挑战
O(1) time
"""
# 不断乘二(左移一位),当与n相等时,则是2的幂,
# 输入一定小于 2 ^ 31,所以可以限制循环的次数。
class Solution:
"""
@param n: An integer
@return: True or false
"""
def checkPowerOf2(self, n):
# write your code here
ans = 1
for i in range(31):
if ans == n:
return True
ans = ans << 1
return False
| true |
dc19b20eef83f44323137b05d939acc157bc5424 | Python | NoteXYX/myCNN_RNN_attention | /data/data_process.py | UTF-8 | 10,394 | 2.90625 | 3 | [] | no_license | import numpy as np
import re
import pickle
from collections import Counter
import gensim
import random
def getlist(filename):
with open(filename,'r',encoding='utf-8') as f:
datalist,taglist=[],[]
for line in f:
line=line.strip()
datalist.append(line.split('\t')[0])
taglist.append(line.split('\t')[1])
return datalist,taglist
#build vocabulary
def get_dict(filenames):
trnTweet,testTweet=filenames
sentence_list=getlist(trnTweet)[0]+getlist(testTweet)[0]
words=[]
for sentence in sentence_list:
word_list=sentence.split()
words.extend(word_list)
word_counts=Counter(words)
words2idx={word[0]:i+1 for i,word in enumerate(word_counts.most_common())}
idx2words = {v: k for (k,v) in words2idx.items()}
labels2idx = {'O': 0, 'B': 1, 'I': 2, 'E': 3, 'S': 4}
dicts = {'words2idx': words2idx, 'labels2idx': labels2idx, 'idx2words': idx2words}
return dicts
def get_train_test_dicts(filenames):
"""
Args:
filenames:trnTweet,testTweet,tag_id_cnt
Returns:
dataset:train_set,test_set,dicts
train_set=[train_lex,train_y,train_z]
test_set=[test_lex,test_y,test_z]
dicts = {'words2idx': words2idx, 'labels2idx': labels2idx}
"""
trnTweetCnn, testTweetCnn= filenames
dicts=get_dict([trnTweetCnn,testTweetCnn])
trn_data=getlist(trnTweetCnn)
test_data=getlist(testTweetCnn)
trn_sentence_list,trn_tag_list=trn_data
test_sentence_list,test_tag_list=test_data
words2idx=dicts['words2idx']
labels2idx=dicts['labels2idx']
def get_lex_y(sentence_list,tag_list,words2idx):
lex,y,z=[],[],[]
bad_cnt=0
for s,tag in zip(sentence_list,tag_list):
word_list=s.split()
t_list=tag.split()
emb=list(map(lambda x:words2idx[x],word_list))
begin=-1
for i in range(len(word_list)):
ok=True
for j in range(len(t_list)):
if word_list[i+j]!=t_list[j]:
ok=False;
break
if ok==True:
begin=i
break
if begin==-1:
bad_cnt+=1
continue
lex.append(emb)
labels_y=[0]*len(word_list)
for i in range(len(t_list)):
labels_y[begin+i]=1
y.append(labels_y)
labels_z=[0]*len(word_list)
if len(t_list)==1:
labels_z[begin]=labels2idx['S']
elif len(t_list)>1:
labels_z[begin]=labels2idx['B']
for i in range(len(t_list)-2):
labels_z[begin+i+1]=labels2idx['I']
labels_z[begin+len(t_list)-1]=labels2idx['E']
z.append(labels_z)
return lex,y,z
train_lex, train_y, train_z = get_lex_y(trn_sentence_list,trn_tag_list, words2idx) # train_lex: [[每条tweet的word的idx],[每条tweet的word的idx]], train_y: [[关键词的位置为1]], train_z: [[关键词的位置为0~4(开头、结尾...)]]
test_lex, test_y, test_z = get_lex_y(test_sentence_list,test_tag_list,words2idx)
train_set = [train_lex, train_y, train_z]
test_set = [test_lex, test_y, test_z]
data_set = [train_set, test_set, dicts]
with open('../CNTN/data/inspec_wo_stem/data_set.pkl', 'wb') as f:
pickle.dump(data_set, f)
# dill.dump(data_set, f)
return data_set
def get_CNTN_train_test_dicts(filenames):
"""
Args:
filenames:trnTweet,testTweet,tag_id_cnt
Returns:
dataset:train_set,test_set,dicts
train_set=[train_lex,train_y,train_z]
test_set=[test_lex,test_y,test_z]
dicts = {'words2idx': words2idx, 'labels2idx': labels2idx}
"""
trnTweetCnn, testTweetCnn = filenames
dicts = get_dict([trnTweetCnn, testTweetCnn])
trn_data = getlist(trnTweetCnn)
test_data = getlist(testTweetCnn)
trn_sentence_list, trn_tag_list = trn_data
test_sentence_list, test_tag_list = test_data
words2idx = dicts['words2idx']
labels2idx = dicts['labels2idx']
def get_CNTN_lex_y(sentence_list, tag_list, words2idx):
lex, y, z = [], [], []
for s, tag in zip(sentence_list, tag_list):
word_list = s.split()
t_list = tag.split()
emb = list(map(lambda x: words2idx[x], word_list))
i = 0
find_keyphrase = False
len_keyphrase = 0
all_keyphrase_sub = []
cur_keyphrase_sub = []
while i < len(word_list):
cur_word = word_list[i]
j = 0
while j < len(t_list):
cur_keyword = t_list[j]
if cur_word == cur_keyword:
len_keyphrase += 1
cur_keyphrase_sub.append(i)
find_keyphrase = True
j = 0
# i += 1
break
elif find_keyphrase and j == len(t_list)-1:
all_keyphrase_sub.append(cur_keyphrase_sub)
cur_keyphrase_sub = []
find_keyphrase = False
j += 1
# i += 1
else:
# tag_again = False
j += 1
# if j == len(t_list):
# i += 1
continue
i += 1
lex.append(emb)
cur_y = [ 0 for k in range(len(word_list))]
cur_z = [ 0 for k in range(len(word_list))]
for cur_sub in all_keyphrase_sub:
if len(cur_sub) == 1:
cur_y[cur_sub[0]] = 1
cur_z[cur_sub[0]] = labels2idx['S']
elif len(cur_sub) > 1:
cur_y[cur_sub[0]] = 1
cur_z[cur_sub[0]] = labels2idx['B']
for k in range(len(cur_sub) - 2):
cur_y[cur_sub[1+k]] = 1
cur_z[cur_sub[1+k]] = labels2idx['I']
cur_y[cur_sub[-1]] = 1
cur_z[cur_sub[-1]] = labels2idx['E']
y.append(cur_y)
z.append(cur_z)
return lex, y, z
train_lex, train_y, train_z = get_CNTN_lex_y(trn_sentence_list, trn_tag_list,
words2idx) # train_lex: [[每条tweet的word的idx],[每条tweet的word的idx]], train_y: [[关键词的位置为1]], train_z: [[关键词的位置为0~4(开头、结尾...)]]
test_lex, test_y, test_z = get_CNTN_lex_y(test_sentence_list, test_tag_list, words2idx)
train_set = [train_lex, train_y, train_z]
test_set = [test_lex, test_y, test_z]
data_set = [train_set, test_set, dicts]
with open('../CNTN/data/semeval_wo_stem/data_set123.pkl', 'wb') as f:
pickle.dump(data_set, f)
# dill.dump(data_set, f)
return data_set
def load_bin_vec(frame,vocab):
k = 0
word_vecs = {}
model = gensim.models.KeyedVectors.load_word2vec_format(frame, binary=True)
vec_vocab = model.vocab
for word in vec_vocab:
embedding = model[word]
if word in vocab:
word_vecs[word] = np.asarray(embedding,dtype=np.float32)
k += 1
if k % 10000 == 0:
print("load_bin_vec %d" % k)
return word_vecs
# def load_txt_vec(frame,vocab):
# k=0
# word_vecs={}
# with open(frame,'r',encoding='utf-8') as f:
# for line in f.readlines():
# word=line.strip().split('\t',1)[0]
# embeding=line.strip().split('\t',1)[1].split()
# if word in vocab:
# word_vecs[word]=np.asarray(embeding,dtype=np.float32)
# k+=1
# if k%10000==0:
# print ("load_bin_vec %d" % k)
#
# return word_vecs
def add_unknown_words(word_vecs, vocab, min_df=1, dim=300):
"""
For words that occur in at least min_df documents, create a separate word vector.
0.25 is chosen so the unknown vectors have (approximately) same variance as pre-trained ones
"""
k=0
for w in vocab:
if w not in word_vecs:
word_vecs[w]=np.asarray(np.random.uniform(-0.25,0.25,dim),dtype=np.float32)
k+=1
if k % 10000==0:
print ("add_unknow_words %d" % k)
return word_vecs
def get_embedding(w2v,words2idx,k=300):
embedding = np.zeros((len(w2v) + 2, k), dtype=np.float32)
for (w,idx) in words2idx.items():
embedding[idx]=w2v[w]
#embedding[0]=np.asarray(np.random.uniform(-0.25,0.25,k),dtype=np.float32)
with open('../CNTN/data/semveal_wo_stem/embedding.pkl','wb') as f:
pickle.dump(embedding,f)
return embedding
if __name__ == '__main__':
data_folder = ["../CNTN/data/semeval_wo_stem/mytrain123.txt","../CNTN/data/semeval_wo_stem/mytestNEW.txt"]
data_set = get_CNTN_train_test_dicts(data_folder)
print ("data_set complete!")
dicts = data_set[2]
vocab = set(dicts['words2idx'].keys())
print ("total num words: " + str(len(vocab)))
print ("dataset created!")
train_set, test_set, dicts=data_set
print ("total train lines: " + str(len(train_set[0])))
print("total test lines: " + str(len(test_set[0])))
#GoogleNews-vectors-negative300.txt为预先训练的词向量
#w2v_file = 'D:\PycharmProjects\myCNN_RNN_attention\data\original_data\GoogleNews-vectors-negative300.bin'
#w2v = load_bin_vec(w2v_file,vocab)
#print ("word2vec loaded")
#w2v = add_unknown_words(w2v, vocab)
#embedding=get_embedding(w2v,dicts['words2idx'])
#print ("embedding created")
# f = open("../CNTN/data/semeval_wo_stem/mytest.txt", 'r', encoding='utf-8')
# w = open("../CNTN/data/semeval_wo_stem/mytestNEW.txt", 'w', encoding='utf-8')
# lines = f.readlines()
# for line in lines:
# content = line.split('\t')[0]
# keys = line.split('\t')[1]
# mycon1 = content[:(len(content)-1)//2]
# mycon2 = content[(len(content)-1)//2+1:]
# if mycon1 == mycon2:
# w.write(mycon1 + '\t' + keys)
# else:
# w.write(line)
# # print(len(content))
# f.close()
# w.close()
| true |
b14360f73c813225e4abdcc5748fb80ac6f19263 | Python | monkey1302/mining_comment | /comment_mining.py | UTF-8 | 11,227 | 2.640625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# ----- 这里代码主要进行名词提取、每个名词的修饰词提取 ------
import time
from scipy import stats
import math
import numpy as np
from sklearn.cluster import KMeans
import os
from pyltp import Segmentor
from pyltp import Postagger
import json
from gensim.models import word2vec
LTP_DATA_DIR = './ltp_data' # ltp模型目录的路径
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model') # 分词模型路径,模型名称为`cws.model`
pos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')
segmentor = Segmentor() # 初始化实例
segmentor.load(cws_model_path) # 加载模型
postagger = Postagger() # 初始化实例
postagger.load(pos_model_path) # 加载模型
stop_word = [line.strip() for line in open('stop_word.txt','r',encoding='utf-8').readlines()]
model =word2vec.Word2Vec.load("./mining.model")#之前用所有评论训练好的模型
def readcomment(comment_path):
'''
input: one comment file path
output: comment list ['comment1','comment2',....]
'''
# -- 读文件的每一条记录 --
with open(comment_path,'r',encoding='utf-8') as f:
data = f.read().strip().split("\n")
# -- 删除错误行 --
wrong_index = []
for i in range(len(data)):
data[i] = json.loads(data[i])
if data[i].__contains__("sku_id"):
wrong_index.append(i)
for i in range(len(wrong_index)):
ind = wrong_index[i]-i
del data[ind]
# -- 从每条记录中,提取出评论正文 --
comments = []
for item in data:
if item['content']!= None:
comments.append( item['content'])
return (comments)
def choose_adj(adj_list): #方法1,选取频率最高的adj
'''
input:[adj1,adj2, ...] 一个形容词的列表,有重复的
output: adj 一个形容词,词频最高的
'''
adj_count = {}
for adj in adj_list:
if adj_count.__contains__(adj):
adj_count[adj] +=1
else:
adj_count[adj] = 1
res=sorted(adj_count.items(),key = lambda x:x[1],reverse = True) #[(adj1,count1) , (adj2,count2) ......]
return res[0]
def w2v_kmeans(full_list,k):
'''
input: full_list:[ [noun,count,[adj_list]] , [] ,[]...] , [ [名词1,词频,[形容词列表]], [], []...]
input: k:Kmeans聚类时,k的值
output : 无,结果会写到文件中
'''
noun_list = [item[0] for item in full_list] #名词列表
# -- 亲属关系的词,过滤掉 --
filtering_word = ['宝宝','时候', '孩子','图片', '学生','奶奶','公公','好评',"棒棒",'小哥','东西','产品','小孩', '小孩子', '小朋友', '小宝宝', '婴儿', '朋友', '同事', '亲戚', '邻居','妈妈', '父母', '老妈', '爸妈', '老人', '妹妹', '爸爸', '姐姐', '弟弟','家人', '儿子', '老婆', '家里人', '老公', '女儿']
# --- word2vec part ---
global model
vec_list = [] #每个词的向量
# -- 对每个名词,找到它词向量,不是所有词都有对应的向量,因此要把这些没有词向量的名词整条记录删除 --
wrong_index=[]
for i in range(len(noun_list)):
try:
vec_list.append(model[noun_list[i]]) #把词向量记录下来
except Exception:
wrong_index.append(i)
for i in range(len(wrong_index)):
index = wrong_index[i]-i
del noun_list[index]
del full_list[index]
# -- 删除掉不能向量化的名词之后,再获词频列表,和形容词列表 --
adj_list = [item[2] for item in full_list]
count_list = [item[1] for item in full_list]
total_count = sum(count_list)
# -- k-mean part --
estimator = KMeans(n_clusters=k,max_iter=100000) #初始化
cluster_result = estimator.fit_transform(vec_list)#开始聚类
data_len = len(cluster_result)
label_pred = estimator.labels_ #获取聚类标签
f = open("noun_adj.txt","w+",encoding='utf-8') #把最终结果写在文件里
# -- 处理结果 --
cluster_count = {} #每类计数
score_dict = {} #每类每个点的分值 {1:[[index,分值],[index,分值]...], 2:[..], 3:[...]}
#初始化两个字典
for i in range(k):
cluster_count[str(i)] = 0
score_dict[str(i)] = []
#每类计数
for label in label_pred:
cluster_count[str(label)] +=1
#每类每个点算分值 返回[第几类,分值]
for i in range(data_len):
label = label_pred[i]
distance = cluster_result[i][label]
score = 0*math.exp(-distance)+1*(count_list[i]/total_count) #选取代表词,可以调整参数,第一项是距中心点距离,第二项是词频
score_dict[str(label)].append([i,score])
res_index = []
for i in range(k):# 循环每个类
res = sorted(score_dict[str(i)], key = lambda x :x[1],reverse=True) #获取这一类中所有的名词的index
#f.write("\n------cluster:{}-----\n".format(i))
print("------cluster:{}----".format(i))
word_index = [] #名词的index
for item in res:
word_index.append(item[0])
words = []
for index in word_index:
words.append(noun_list[index])
if words[0] in filtering_word: #过滤掉字典中的
print("this cluster will be dropped")
continue
if count_list[word_index[0]]<total_count/200: #某一类中,词频最高的词,它的词频小于总词数的1/200,过滤掉这个词所在的类
print(words)
print("this cluster will be dropped")
continue
res_index.append(res[0][0]) #第一个元素的index
print(words)
#f.write(str(words))
result = [noun_list[i] for i in range(len(noun_list)) if i in res_index]
print("---------final result-------")
f.write("\n---------final result-------\n")
f.write(str(result)+"\n")
print(result)
#------给每个result noun找到一个adj-----
for i in res_index:
this_adj_list = adj_list[i]
final_adj = choose_adj1(this_adj_list)
print (noun_list[i],final_adj)
f.write(str(noun_list[i])+"\t"+str(final_adj)+"\n")
f.close()
def find_adj(words_list,postags_list,i):
'''
作用,给定一句话,以及名词的位置,给这个名词找到修饰它的形容词
input: [word1, word2, ...] 词的列表
input: [verb, noun, adj ...]每个词对应的词性列表
input: i, 当前名词的index
output:[adj1,adj2,...] 修饰这个名词的所有形容词列表
'''
adj = []
# -- 向后找 --
if i < len(words_list)-1:
if i <len(words_list)-2:#不是倒数第二个词
if postags_list[i+1] == 'wp': #如果遇到标点,就不向后了
pass
else:
if postags_list[i+1] in ['a']:
adj.append(words_list[i+1])
elif postags_list[i+1] in ['d','v']:
if postags_list[i+2] in ['a']:
adj.append(words_list[i+1]+words_list[i+2])
else: #是倒数第二个词,只向后找一个
if postags_list[i+1] in ['a']:
adj.append(words_list[i+1])
#print(words_list[i])
#print(adj)
# -- 向前找 --
if i>0:
if i >1: #不是正数第二个词
if postags_list[i-1] == 'wp': #如果遇到标点,就不向前了
pass
else:
if postags_list[i-1] in ['a']:
adj.append(words_list[i-1])
elif postags_list[i-1] in ['d','v']:
if postags_list[i-2] in ['a']:
adj.append(words_list[i-2]+words_list[i-1])
else:#是正数第二个词
if postags_list[i-1] in ['a']:
adj.append(words_list[i-1])
return adj
def mining(comments):
'''
input : [comment1, comment2, ...]
output: 无
'''
word_count ={} #{noun1:count , noun2:count, ....}
total_wordcount= 0
noun_adj = {} #{noun:[adj_list], noun:[adj_list].....}
for comment in comments:
words = segmentor.segment(comment) # 分词
words_list = [word.lower() for word in list(words) if word not in stop_word] #过滤掉停词
words_list = [word.replace('.','').replace(',','').replace('?','').replace('!','').replace('@','') for word in words_list] #去除英文标点
while '' in words_list: #删除空单词
words_list.remove('')
total_wordcount +=len(words_list) #累加词的数量
postags = postagger.postag(words_list) # 词性标注
postags_list = list(postags)
# -- 给名词附加上形容词 --
for i in range(len(words_list)):
if postags_list[i]=='n':
adj_list=find_adj(words_list,postags_list,i)
if noun_adj.__contains__(words_list[i]):
noun_adj[words_list[i]].extend(adj_list)
else:
noun_adj[words_list[i]]=adj_list
# -- 名词计数 --
for i in range(len(words_list)):
if postags[i]=="n" and len(words_list[i])>1:
word = words_list[i]
if word_count.__contains__(word):
word_count[word]=word_count[word]+1
else:
word_count[word] = 1
# -- 按词频排序 --
result = sorted(word_count.items(),key = lambda x:x[1],reverse = True) #list:[(noun1,count1),(noun2,count2),.....]
full_list=[] #关于所有名词的列表,包含词、词频、形容词列表 [[word1, 10 ,[adj1,adj2]], ...]
for item in result:
word = item[0]
count = item[1]
adj=noun_adj[word]
full_list.append([word,count,adj])
# -- 设置k-means的k值,目标是尽量设大一些,为了防止后面聚类不够细 --
lens = len(full_list)
if lens > 50:
k=50
else:
k=int(len*0.7)
w2v_kmeans(full_list,k) #第二个参数是聚类的个数
# -- 完整部分,循环读所有文件 ---
'''
folder_path = "./raw_comments/"
files= os.listdir(folder_path) #得到文件夹下的所有文件名称
f = open("aspect_result1.csv","w+",encoding='utf-8')
for file in files:
comment_path = folder_path+file
comments = readcomment(comment_path)
if comments == None:
result = " "
else:
result = mining(comments)
'''
# -- 测试部分,只读一个文件 --
comment_path = "./raw_comments/item_comments_jd_1741527728"
comments = readcomment(comment_path) # [comment1, comment2, ...]
if comments == None:
result = " "
else:
result = mining(comments)
segmentor.release() # 释放模型
postagger.release() # 释放模型
| true |
732b61ecd50ecbc764a1ee780793ee9e18235970 | Python | kajendranL/Daily-Practice | /Pattern practice.py | UTF-8 | 1,153 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[6]:
print("Pattern 1")
print()
n=6
for i in range(1, n+1):
print("*"*n)
# In[22]:
print("Pattern 2")
print()
n=6
for i in range(1,n+1):
for j in range (1, n+1):
print(i, end='') # i printed
print()
print()
# In[13]:
print("Pattern 3")
print()
n=6
for i in range(1, n+1):
for j in range(1,n+1):
print(j, end='') # printed
print()
# In[16]:
print("Pattern 4")
print()
n=6
for i in range(1,n+1):
for j in range (1,n+1):
print(chr(64+i),end='')
print()
# In[21]:
print("Pattern 5")
print()
n=6
for i in range(1,n+1):
for j in range(1, n+1):
print(chr(64+i), end='')
print()
print()
print("Pattern 5 Modified")
print()
n=6
for i in range(1,n+1):
for j in range(1, n+1):
print(chr(64+1), end='')
print()
# In[24]:
print("Pattern 6")
print()
n=6
for i in range(1,n+1):
for j in range(1, n+1):
print(n+1-i, end="")
print()
# In[25]:
print("Pattern 7")
print()
n=6
for i in range(1,n+1):
for j in range(1,n+1):
print(n+1-j, end='')
print()
# In[ ]:
| true |
d7371eb2eea6d251f3b4e0d2af578fd46d07ba48 | Python | Aaron-Lichtblau/scheduler_tool | /api/helpers.py | UTF-8 | 9,286 | 3.203125 | 3 | [] | no_license | from api.schedule import Schedule
import api.constants as constants
# -------------------------------------------------------------------------------
# Node Manipulation Helper Functions
# Parameter formats:
# student_nodes = e_name_shift
# name - student's name without special characters
# shift - integer number representing jth shift. (if student's cap = 3, the student will have 3 nodes: student_0, student_1, student_2)
# slot_nodes = day_starttime_slottype
# day - day of week from list: [Mo, Tu, We, Th, Fr, Sa, Su]
# starttime - in 4 digit military time: 2100
# slottype - 0 or 1, representing a single (2hr) slot or a double (4hr) slot
# -------------------------------------------------------------------------------
def get_student_nodes(name, student_nodes):
"""given a student name, returns that student's nodes"""
name_nodes = []
for node in student_nodes:
if node.split("_")[:1][0] == name:
name_nodes.append(node)
return name_nodes
def get_slot(slot_node):
"""gets the slot given the slot node"""
slot = slot_node[:-2]
return slot
def get_hours(slot_node, slot_duration):
"""gets the node type (2hr vs 4hr) given the node. slot_duration is time length of slots in minutes"""
hours = slot_duration // constants.MINUTES_IN_HOUR
slot_type = slot_node[-1]
if int(slot_type) == 1:
return 2 * hours
else:
return hours
def get_slots_of_type(to_nodes, slot_type):
"""gets all slots of a given type (0 or 1). to_nodes are all slot_nodes"""
all_slot_type = []
for slot_node in to_nodes:
if int(slot_node[-1]) == int(slot_type):
all_slot_type.append(slot_node)
return all_slot_type
def get_alt_slot(slot_node, prev_slot):
"""gets the slot node's other node type (ending with 1)"""
slot = get_slot(slot_node)
if is_double(slot, prev_slot):
alt_slot = str(slot) + constants.DOUBLE_SLOT_SUFFIX
return alt_slot
else:
return None
def is_double(slot, prev_slot):
"""checks whether a slot is potentially a double (4hr) slot"""
if slot in prev_slot.keys():
return True
else:
return False
def get_prev_slot(slot_node, prev_slot):
"""gets the previous slot node in schedule (possibly None)"""
slot = get_slot(slot_node)
if is_double(slot, prev_slot):
prev_slot = prev_slot[slot]
prev_slot_node = str(prev_slot) + constants.SINGLE_SLOT_SUFFIX
return prev_slot_node
else:
return None
def get_day_slots(day, slot_nodes_2, slot_nodes_4):
"""gets all slot nodes of the given day"""
day_slots = []
for slot_node in slot_nodes_2:
slot = get_slot(slot_node)
slot_day = slot[:-2]
if str(slot_day) == str(day):
day_slots.append(slot_node)
for slot_node in slot_nodes_4:
slot_day = slot[:-2]
if str(slot_day) == str(day):
day_slots.append(slot_node)
return day_slots
# -------------------------------------------------------------------------------
# Dataframe Manipulation Helper Functions
# -------------------------------------------------------------------------------
def order_sched(df, unordered_sched_dict, slotdict):
"""takes unordered sched dict and returns an ordered Schedule object"""
ordered_sched = {k: unordered_sched_dict[k] for k in slotdict.keys()}
max_weight_dict = {}
for slot in ordered_sched:
max_weight_dict[slot] = []
for student in ordered_sched[slot]:
name = student.split("_")[0]
max_weight_dict[slot].append(name)
max_weight_sched = Schedule(max_weight_dict)
return max_weight_sched
def update_df(df, student, slot, slot_duration):
"""updates the dataframe in place. Given the student (name) and slot (e.g. 'Mo_2100'),
this function adds (slot duration / 60) hours to the student's hours column and updates their happiness column of the dataframe"""
try:
index = df.loc[df["name"] == student].index[0]
except:
print("student not found in df: ", student)
# update preference table
score = df.at[index, slot]
cap = df.at[index, "cap"]
df.at[index, slot] = -(score)
# update hours worked and happiness
temp_work = df.at[index, "hours"]
temp_hap = df.at[index, "happiness"]
hap = (score * 100) / (3 * cap)
if df.at[index, slot] < 0: # shows they added slot
df.at[index, "hours"] = temp_work + slot_duration // constants.MINUTES_IN_HOUR
df.at[index, "happiness"] = temp_hap + hap
else:
df.at[index, "hours"] = temp_work - slot_duration // constants.MINUTES_IN_HOUR
df.at[index, "happiness"] = temp_hap + hap
def schedule_to_df(df, schedule, slot_duration):
"""given a schedule, this updates the starting dataframe of preferences"""
for slot in schedule:
if len(slot) == 0:
print("empty slot in schedule ERROR!")
for student in schedule[slot]:
update_df(df, student, slot, slot_duration)
def get_slots(df):
"""gets the slot names from the df"""
slots = df.columns.tolist()
non_slots = [
"name",
"slot_type",
"availability",
"cap",
"experience",
"skill",
"hours",
"happiness",
"gap",
]
for val in non_slots:
try:
slots.remove(val)
except:
continue
for slot in slots:
if len(slot) != 7:
print(slot, " is not correctly formatted: should be (ex: Mo_1900)")
return slots
def get_dict(df, col):
"""makes a dict of names (keys) and col values (values)"""
col_dict = {}
for name in df["name"]:
index = df.loc[df["name"] == name].index[0]
col_dict[name] = df.at[index, col]
return col_dict
def color_schedule(val):
"""color the shifts being worked. 3 = green, 2 = yellow, 1 = red"""
if int(val) == -3.0:
background_color = "green"
elif int(val) == -2.0:
background_color = "yellow"
elif int(val) == -1.0:
background_color = "red"
else:
background_color = ""
return "background-color: %s" % background_color
def color_wrong_type(s):
"""
highlight the wrong types dark orange
"""
is_wrong = s == True
return ["background-color: darkorange" if v else "" for v in is_wrong]
# -------------------------------------------------------------------------------
# Slot Time Manipulation Helper Functions
# -------------------------------------------------------------------------------
def get_start_time(slot):
'''given a slot "Mo_1900", it returns the start time: "1900"'''
start_time = int(slot[-4:])
return start_time
def add_time(start_time, added_minutes):
"""return the start time plus added minutes (does not wrap around i.e. 2500 is valid end_time)
start_time is 4 digit military time i.e. 2100
added_minutes is integer number of minutes"""
start_hour = int(str(start_time)[:2])
start_minute = int(str(start_time)[2:4])
hours_added = int((start_minute + int(added_minutes)) / constants.MINUTES_IN_HOUR)
min_added = int((start_minute + int(added_minutes)) % constants.MINUTES_IN_HOUR)
if min_added < 10:
min_added = "0" + str(min_added)
end_hour = start_hour + hours_added
end_time = str(end_hour) + str(min_added)
return int(end_time)
# -------------------------------------------------------------------------------
# Interactive Command Helper Functions
# -------------------------------------------------------------------------------
def get_target(val):
text = "{} : ".format(val)
print(text)
target = int(input())
return target
def get_slotdict(df):
print("Enter the number of desired TA's for each slot: ")
slots = get_slots(df)
slotdict = {}
for slot in slots:
slotdict[slot] = get_target(slot)
return slotdict
def get_duration():
print("Enter the duration of slots in minutes: ", end="")
duration = int(input())
return duration
def get_weightdict():
print("Enter the value you want to give to each weight: ")
weight_dict = {}
for weight in constants.WEIGHTS:
weight_dict[weight] = get_target(weight)
return weight_dict
def get_stress_slots(slots):
print(
"Enter the slot names separated by commas, that you want to guarantee to have min_skill number of 'skilled' TAs:"
)
stress_slots = str(input()).replace(" ", "").split(",")
print(stress_slots)
if len(stress_slots) != 1 or stress_slots[0] != "":
invalid = []
for slot in stress_slots:
if slot not in slots:
print(slot, " is not a valid slot and is being removed.")
invalid.append(slot)
for slot in invalid:
stress_slots.remove(slot)
return stress_slots
else:
return []
def get_constraints(slots):
constraints = []
constraints.append(get_stress_slots(slots))
constraints.append(get_target("min_exp"))
constraints.append(get_target("min_skill"))
constraints.append(get_target("target_delta"))
constraints.append(get_target("flex_shifts"))
return constraints
| true |
8cc048449e289d0e03b34da65f168f3ba720735c | Python | Aasthaengg/IBMdataset | /Python_codes/p03951/s060452198.py | UTF-8 | 248 | 2.96875 | 3 | [] | no_license | N = int(input())
s = input()
t = input()
c = 0
for o in range(len(s) + 1):
d = 0
for i in range(len(t)):
if o + i >= len(s): break
if s[o + i] == t[i]: d += 1
else: break
c = max(c, d)
print(len(s) + len(t) - c)
| true |
eb89cd4f33e4614f2f067b1f2dfd7fa4142e0326 | Python | freecraver/SOS_ants_ga | /knapsack/instance_solver.py | UTF-8 | 2,834 | 3.0625 | 3 | [] | no_license | import os
import time
from datetime import datetime
import pandas as pd
from knapsack.ga.ga_knapsack import solve_knapsack
from knapsack.aco.aco_knapsack import solve_aco_knapsack
INSTANCE_PATH = "res"
STATS_PATH = "stats"
NR_ITERATION = 10 # number of times a single instance should be evaluated (random fluctuations, confidence intervals,..)
def solve_ga(capacity, instances):
res, _ = solve_knapsack(capacity, instances)
best = res.keys[0].values
return best[1]
def solve_aco(capacity, instances):
res = solve_aco_knapsack(capacity, instances, ant_count=10, iteration_count=50)
return res["fitness"]
def solve_instance(instance_name, capacity, instances):
stats_lst = []
for run in range(NR_ITERATION):
for solver in [
{"solver_name": "GA",
"solve":lambda capacity, instances: solve_ga(capacity, instances)},
{"solver_name": "ACO",
"solve": lambda capacity, instances: solve_aco(capacity, instances)}
]:
print(f"Starting attempt {run + 1} for {solver['solver_name']}")
start_time = time.time()
best_value = solver["solve"](capacity, instances)
exec_time = time.time() - start_time
print(f"Best solution gives a value of {best_value}")
stats_lst.append({
"instance": instance_name,
"method": solver["solver_name"],
"value": best_value,
"execution_time": exec_time,
"run": run + 1})
return stats_lst
def solve_folder(folder_name):
print("*"*60)
stats_lst = []
print(f"Solving instances from folder {folder_name}...")
for f in os.listdir(os.path.join(INSTANCE_PATH, folder_name)):
print("-"*60)
print(f"Solving instance {f}...")
capacity, instances = load_instance(os.path.join(INSTANCE_PATH, folder_name, f))
stats_lst.extend(solve_instance(f, capacity, instances))
store_results(folder_name, stats_lst)
print("*" * 60)
def store_results(folder_name, stats_lst):
res_file_name = os.path.join(STATS_PATH, folder_name+"_run"+datetime.now().strftime('%m-%d_%H_%M_%S')+".csv")
pd.DataFrame.from_dict(stats_lst).to_csv(res_file_name, index=False)
def load_instance(path):
"""
:param path: path to instance file
:return: capacity: float indicating capacity of knapsack,
items: list of weight[0], value [1] items
"""
with open(path, "r") as f:
_, capacity = f.readline().split()
item_infos = f.readlines()
# read every single line and switch weight+value
items = [list(map(float,item.split()[::-1])) for item in item_infos if len(item.split()) == 2]
return float(capacity), items
if __name__ == "__main__":
solve_folder("large_scale") | true |
8a1beba785f1b243a7d305664b0ced51888fb00e | Python | pratapbhanu/misc | /Author-Paper/PythonBenchmark/train_svm.py | UTF-8 | 4,093 | 2.546875 | 3 | [
"BSD-2-Clause"
] | permissive | import data_io
from sklearn import svm, cross_validation
import os
import cPickle
import numpy as np
from sklearn.grid_search import GridSearchCV
from sklearn.svm.classes import SVC
from sklearn.metrics.metrics import classification_report
from sklearn.cross_validation import train_test_split
def main():
print("Getting features for deleted papers from the database")
if(os.path.exists("features_deleted.obj")):
with open("features_deleted.obj", 'r') as loadfile:
features_deleted = cPickle.load(loadfile)
else:
features_deleted = data_io.get_features_db("TrainDeleted")
with open("features_deleted.obj", 'w') as dumpfile:
cPickle.dump(features_deleted, dumpfile, protocol=cPickle.HIGHEST_PROTOCOL)
print("Getting features for confirmed papers from the database")
if(os.path.exists("features_confirmed.obj")):
with open("features_confirmed.obj", 'r') as loadfile:
features_conf = cPickle.load(loadfile)
else:
features_conf = data_io.get_features_db("TrainConfirmed")
with open("features_confirmed.obj", 'w') as dumpfile:
cPickle.dump(features_conf, dumpfile, protocol=cPickle.HIGHEST_PROTOCOL)
features = [x[2:] for x in features_deleted + features_conf]
target = [0 for x in range(len(features_deleted))] + [1 for x in range(len(features_conf))]
#code for including keywords match feature
print "adding addtional features..."
import additional_features as af
all_features = af.get_keywords_feature()
kw_deleted, kw_confirmed, _ = all_features
kw_features = kw_deleted+kw_confirmed
for i in range(len(features)):
_,_,ckw = kw_features[i]
features[i]+=(ckw,)
featuresnp = np.array(features, dtype='float32')
targetnp = np.array(target, dtype='int32')
featuresnp -= np.mean(featuresnp, axis=0)
featuresnp /= np.std(featuresnp, axis=0)
# Set the parameters by cross-validation
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(featuresnp, targetnp, test_size=0.3, random_state=0)
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=4, score_func=score, n_jobs=4, verbose=2)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_estimator_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.cv_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# print "Training svm model"
# #clf = svm.SVC(verbose=True, probability=True,)
# clf = svm.SVC(verbose=True)
#
## cv = cross_validation.KFold(len(features), n_folds=4)
# cv = cross_validation.ShuffleSplit(len(features), n_iter=4, test_size=0.3, random_state=0)
# results = cross_validation.cross_val_score(clf, X=featuresnp, y=targetnp, cv=cv, n_jobs=4, verbose=True)
# #print out the mean of the cross-validated results
# print "Results: ", results
# print "Results: " + str( np.array(results).mean())
# clf.fit(features, target)
# print "saving linear logistic regression model"
# data_io.save_model(clf, prefix="svm_")
if __name__=="__main__":
main() | true |
e6e3a22446496a47a4cd957a7483262840840ff9 | Python | toyugo/holbertonschool-higher_level_programming | /0x03-python-data_structures/6-print_matrix_integer.py | UTF-8 | 195 | 3.453125 | 3 | [] | no_license | #!/usr/bin/python3/
def print_matrix_integer(matrix=[[]]):
for i in matrix:
for j in i:
print("{:d}".format(j), end="" if j == i[(len(i) - 1)] else " ")
print("")
| true |
741a6480c20b41bc81197754bda835f8bd532eb3 | Python | Marowak/pesquisaLinearVideos | /main.py | ISO-8859-1 | 1,332 | 3.234375 | 3 | [] | no_license | # -*- coding: cp1252 -*-
import numpy as np
import cv2
def main():
fileName='video.flv' # Nome do arquivo a ser lido
img1 = cv2.imread('frame.jpg') # Imagem a ser comparada
video = cv2.VideoCapture(fileName) # Carrega o vdeo
framesPassados = 0
diffSalvo = 50000
frameDesejado = 0
totalFrames = 0
while(video.isOpened()): # Continua lendo cada frame enquanto o vdeo est aberto
ret, frame = video.read()
if ret:
cv2.imshow('frame',frame) # Exibe os frames na tela
totalFrames +=1
diff = mse(img1,frame)
if ((diff < 3000) and (diff < diffSalvo)):
diffSalvo = diff
frameDesejado = totalFrames
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
tempoTotal = frameDesejado / 25.0
minutos = str(np.floor(tempoTotal / 60))
segundos = str(tempoTotal % 60)
print("Minutos: " + minutos + " Segundos: " + segundos)
video.release()
cv2.destroyAllWindows()
def mse(imagem1,imagem2):
err = np.sum((imagem1.astype("float") - imagem2.astype("float")) ** 2)
err /= float(imagem1.shape[0] * imagem2.shape[1])
return err
main()
| true |
3fe89f72f014aa8387ac42bc2bcaa2a11cc55b03 | Python | evereux/pycatia | /pycatia/sketcher_interfaces/circle_2D.py | UTF-8 | 5,134 | 2.6875 | 3 | [
"MIT"
] | permissive | #! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.sketcher_interfaces.curve_2D import Curve2D
from pycatia.sketcher_interfaces.point_2D import Point2D
from pycatia.system_interfaces.system_service import SystemService
class Circle2D(Curve2D):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| SketcherInterfaces.GeometricElement
| SketcherInterfaces.Geometry2D
| SketcherInterfaces.Curve2D
| Circle2D
|
| Class defining a circle in 2D Space.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.circle_2d = com_object
@property
def center_point(self) -> Point2D:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property CenterPoint() As Point2D
|
| Returns the center point of the circle.
|
| Parameters:
|
| oCenterPoint
| The center point of the circle
:return: Point2D
:rtype: Point2D
"""
return Point2D(self.circle_2d.CenterPoint)
@center_point.setter
def center_point(self, value: Point2D):
"""
:param Point2D value:
"""
self.circle_2d.CenterPoint = value
@property
def radius(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Radius() As double (Read Only)
|
| Returns the radius of the circle
|
| Parameters:
|
| oRadius
| The radius of the circle
:return: float
:rtype: float
"""
return self.circle_2d.Radius
def get_center(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetCenter(CATSafeArrayVariant oData)
|
| Returns the center of the circle
|
| Parameters:
|
| oData[0]
| The X Coordinate of the circle center point
| oData[1]
| The Y Coordinate of the circle center point
| Example:
| The following example reads the coordinates of the
| center
| of the circle myCircle: double center(1) myCircle.GetCenter
| center
:return: tuple
:rtype: tuple
"""
vba_function_name = 'get_center'
vba_code = """
Public Function get_center(circle2_d)
Dim oData (2)
circle2_d.GetCenter oData
get_center = oData
End Function
"""
system_service = self.application.system_service
return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_data(self, i_center_x: float, i_center_y: float, i_radius: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetData(double iCenterX,
| double iCenterY,
| double iRadius)
|
| Modifies the caracteristics of the circle
|
| Parameters:
|
| iCenterX
| The X Coordinate of the circle center
| iCenterY
| The Y Coordinate of the circle center
| iRadius
| The radius of the circle
:param float i_center_x:
:param float i_center_y:
:param float i_radius:
:return: None
:rtype: None
"""
return self.circle_2d.SetData(i_center_x, i_center_y, i_radius)
def __repr__(self):
return f'Circle2D(name="{self.name}")'
| true |
bcb32f93428be196118dd5a6110d470ad3640e6c | Python | melund/toyplot | /toyplot/projection.py | UTF-8 | 8,836 | 2.984375 | 3 | [] | no_license | # Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from __future__ import division
import numpy
def _mix(a, b, amount):
return ((1.0 - amount) * a) + (amount * b)
def _log(x, base):
return numpy.log10(numpy.abs(x)) / numpy.log10(base)
def _in_range(a, x, b):
left = min(a, b)
right = max(a, b)
return numpy.logical_and(left <= x, x <= right)
class Piecewise(object):
"""Compute a projection from an arbitrary collection of linear and log segments."""
class Segment(object):
class Container(object):
pass
def __init__(
self,
scale,
domain_min,
domain_max,
range_min,
range_max,
domain_bounds_min,
domain_bounds_max,
range_bounds_min,
range_bounds_max):
self.scale = scale
self.domain = Piecewise.Segment.Container()
self.domain.min = domain_min
self.domain.max = domain_max
self.domain.bounds = Piecewise.Segment.Container()
self.domain.bounds.min = domain_bounds_min
self.domain.bounds.max = domain_bounds_max
self.range = Piecewise.Segment.Container()
self.range.min = range_min
self.range.max = range_max
self.range.bounds = Piecewise.Segment.Container()
self.range.bounds.min = range_bounds_min
self.range.bounds.max = range_bounds_max
def __repr__(self):
return "toyplot.projection.Piecewise.Segment(%s, %s, %s, %s, %s, %s, %s, %s, %s)" % (self.scale,
self.domain.min,
self.domain.max,
self.range.min,
self.range.max,
self.domain.bounds.min,
self.domain.bounds.max,
self.range.bounds.min,
self.range.bounds.max)
def __init__(self, segments):
self._segments = segments
def __call__(self, domain_values):
"""Transform values from the domain to the range."""
domain_values = numpy.ma.array(domain_values, dtype="float64")
range_values = numpy.empty_like(domain_values)
for segment in self._segments:
indices = _in_range(
segment.domain.bounds.min,
domain_values,
segment.domain.bounds.max)
if segment.scale == "linear":
amount = (domain_values[
indices] - segment.domain.min) / (segment.domain.max - segment.domain.min)
range_values[indices] = _mix(
segment.range.min, segment.range.max, amount)
else:
scale, base = segment.scale
if scale == "log":
amount = (_log(domain_values[indices],
base) - _log(segment.domain.min,
base)) / (_log(segment.domain.max,
base) - _log(segment.domain.min,
base))
range_values[indices] = _mix(
segment.range.min, segment.range.max, amount)
else:
raise Exception("Unknown scale: %s" % (scale,))
if range_values.shape == ():
range_values = numpy.asscalar(range_values)
return range_values
def inverse(self, range_values):
"""Transform values from the range to the domain."""
range_values = numpy.ma.array(range_values, dtype="float64")
domain_values = numpy.empty_like(range_values)
for segment in self._segments:
indices = _in_range(
segment.range.bounds.min,
range_values,
segment.range.bounds.max)
if segment.scale == "linear":
amount = (range_values[
indices] - segment.range.min) / (segment.range.max - segment.range.min)
domain_values[indices] = _mix(
segment.domain.min, segment.domain.max, amount)
else:
scale, base = segment.scale
if scale == "log":
amount = (range_values[
indices] - segment.range.min) / (segment.range.max - segment.range.min)
domain_values[indices] = numpy.sign(
segment.domain.min) * numpy.power(
base, _mix(
_log(
segment.domain.min, base), _log(
segment.domain.max, base), amount))
else:
raise Exception("Unknown scale: %s" % (scale,))
if domain_values.shape == ():
domain_values = numpy.asscalar(domain_values)
return domain_values
def linear(domain_min, domain_max, range_min, range_max):
return Piecewise([
Piecewise.Segment("linear", domain_min, domain_max, range_min,
range_max, -numpy.inf, numpy.inf, -numpy.inf, numpy.inf),
])
def log(
base,
domain_min,
domain_max,
range_min,
range_max,
linear_domain_min=-1,
linear_domain_max=1):
# Negative domain
if domain_max < 0:
return Piecewise([
Piecewise.Segment(("log", base), domain_min, domain_max, range_min,
range_max, -numpy.inf, numpy.inf, -numpy.inf, numpy.inf),
])
# Positive domain
if 0 < domain_min:
return Piecewise([
Piecewise.Segment(("log", base), domain_min, domain_max, range_min,
range_max, -numpy.inf, numpy.inf, -numpy.inf, numpy.inf),
])
# Mixed negative / positive domain
if domain_min < linear_domain_min and linear_domain_max < domain_max:
linear_range_min = _mix(range_min, range_max, 0.4)
linear_range_max = _mix(range_min, range_max, 0.6)
return Piecewise([
Piecewise.Segment(("log", base), domain_min, linear_domain_min, range_min,
linear_range_min, -numpy.inf, linear_domain_min, -numpy.inf, linear_range_min),
Piecewise.Segment("linear", linear_domain_min, linear_domain_max, linear_range_min,
linear_range_max, linear_domain_min, linear_domain_max, linear_range_min, linear_range_max),
Piecewise.Segment(("log", base), linear_domain_max, domain_max, linear_range_max,
range_max, linear_domain_max, numpy.inf, linear_range_max, numpy.inf),
])
if domain_min < linear_domain_min:
linear_range_min = _mix(range_min, range_max, 0.8)
return Piecewise([
Piecewise.Segment(("log", base), domain_min, linear_domain_min, range_min,
linear_range_min, -numpy.inf, linear_domain_min, -numpy.inf, linear_range_min),
Piecewise.Segment("linear", linear_domain_min, linear_domain_max, linear_range_min,
range_max, linear_domain_min, numpy.inf, linear_range_min, numpy.inf),
])
if linear_domain_max < domain_max:
linear_range_max = _mix(range_min, range_max, 0.2)
return Piecewise([
Piecewise.Segment("linear", domain_min, linear_domain_max, range_min,
linear_range_max, -numpy.inf, linear_domain_max, -numpy.inf, linear_range_max),
Piecewise.Segment(("log", base), linear_domain_max, domain_max, linear_range_max,
range_max, linear_domain_max, numpy.inf, linear_range_max, numpy.inf),
])
return Piecewise([
Piecewise.Segment("linear", domain_min, domain_max, range_min,
range_max, -numpy.inf, numpy.inf, -numpy.inf, numpy.inf),
])
| true |
8e9bde139cb033e6a6f2ecfa5767dfcc16c58a4f | Python | pondz1/027 | /histogram.py | UTF-8 | 1,126 | 2.859375 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
def histogram(img, L=256):
h = np.zeros(L, dtype=np.uint8)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
h[img.item(i, j)] += 1
return h
def equalization(img):
L = 256
p, bins = np.histogram(img, bins=L, range=(0,L))
p = p/img.size
P = p.copy()
out = np.zeros(img.shape, dtype=np.uint8)
for j in range(1,L):
P[j] = P[j-1] + P[j]
for i in range(out.shape[0]):
for j in range(out.shape[1]):
a = int(P[img.item(i, j)]*(L-1))
out.itemset((i, j), a)
plt.show()
return out.astype(np.uint8), P
filename = 'misc/house.tiff'
img = cv2.imread(filename, 0)
cv2.namedWindow('Grayscale', cv2.WINDOW_NORMAL)
cv2.imshow('Grayscale', img)
img_eq_in = equalization(img)
img_eq = equalization(img)
cv2.namedWindow('Image Equalization', cv2.WINDOW_NORMAL)
cv2.imshow('Image Equalization', img_eq[0])
his_in = histogram(img_eq_in[0])
his = histogram(img_eq[0])
print(img_eq[0].shape)
plt.plot(his[1])
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows() | true |
8bfbf3314108cd0125f5bc0b722d01fcff3bbe80 | Python | weishiyan/Physics-Informed-Reinforcement-Learning | /pendulum_SL/video_maker.py | UTF-8 | 688 | 2.515625 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
import glob
print("Starting...")
img_array = []
network = "PINN"
size = ()
file_list = glob.glob(f'./plots/*_{network}_Epochs.png')
file_list.sort()
for filename in file_list:
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width, height)
img_array.append(img)
# fps defined within pinn.py fit function
fps = 24
# cv2.VideoWriter(Filename, codec, fps, size, color=True)
out = cv2.VideoWriter(f'./plots/{network}_result_vid.avi',
cv2.VideoWriter_fourcc(*'DIVX'), fps, size)
for i in range(len(img_array)):
out.write(img_array[i])
out.release()
print("Finished!")
| true |
87b6f71b5108d87c2ebc9a35d17b357c96bf0c32 | Python | pwdemars/projecteuler | /josh/Problems/38.py | UTF-8 | 489 | 2.859375 | 3 | [] | no_license | max_num = 0
for a in range(1,9876):
if len(set(str(a))) == len(str(a)) and '0' not in str(a):
success = False
num = a
b = 2
while not success:
num = int(str(num)+str(b*a))
if len(str(num)) == 9 and len(set(str(num))) == len(str(num)) and '0' not in str(num):
print num
success = True
elif (len(str(num)) == 9 and len(set(str(num))) != len(str(num))) or len(str(num)) > 9 :
break
b += 1
if success == True and num > max_num :
max_num = num
print max_num | true |
f11a751129e981c7cba72f3eebe4e3088e288331 | Python | abetts155/Projects | /tools/sort.py | UTF-8 | 5,012 | 3.546875 | 4 | [] | no_license | from argparse import ArgumentParser
from random import choice
from typing import Dict, List
def generate(length: int, minimum: int, maximum: int) -> List[int]:
options = [k for k in range(minimum, maximum + 1)]
data = []
for _ in range(length):
picked = choice(options)
data.append(picked)
options.remove(picked)
return data
class Vertex:
__slots__ = ['value', 'left', 'right']
def __init__(self, value):
self.value = value
self.left = None
self.right = None
def print_graph(low: Vertex, mid: Vertex):
print('-' * 80)
crawler = low
while crawler:
if mid == crawler:
print('|{}|'.format(crawler.value), end=' ')
else:
print(crawler.value, end=' ')
crawler = crawler.right
print()
print('-' * 80)
print()
def sort(data: List[int]):
low, mid, high = Vertex(data[0]), Vertex(data[1]), Vertex(data[2])
low.right = mid
mid.left = low
mid.right = high
high.left = mid
left_size = 1
right_size = 1
print_graph(low, mid)
for value in data[3:]:
vertex = Vertex(value)
if vertex.value < low.value:
low.left = vertex
vertex.right = low
low = vertex
left_size += 1
elif vertex.value > high.value:
vertex.left = high
high.right = vertex
high = vertex
right_size += 1
else:
mid_distance = abs(value - mid.value)
if value < mid.value:
low_distance = abs(value - low.value)
if low_distance < mid_distance:
other = low.right
other.left = vertex
vertex.right = other
vertex.left = low
low.right = vertex
else:
other = mid.left
other.right = vertex
vertex.left = other
vertex.right = mid
mid.left = vertex
left_size += 1
else:
high_distance = abs(value - high.value)
if mid_distance < high_distance:
other = mid.right
other.left = vertex
vertex.right = other
vertex.left = mid
mid.right = vertex
else:
other = high.left
other.right = vertex
vertex.left = other
vertex.right = high
high.left = vertex
right_size += 1
if abs(left_size - right_size) > 1:
if left_size > right_size:
mid = mid.left
left_size -= 1
right_size += 1
else:
mid = mid.right
left_size += 1
right_size -= 1
print_graph(low, mid)
done = True
vertex = low
data.clear()
while vertex:
data.append(vertex.value)
if vertex.right and vertex.value > vertex.right.value:
done = False
vertex = vertex.right
print(done)
def main(data: List[int], length: int, minimum: int, maximum: int):
if not data:
data = generate(length, minimum, maximum)
print('In: {}'.format(' '.join(str(x) for x in data)))
if len(data) >= 3:
a, b, c, *rest = data
if a > b:
a, b = b, a
if c < a:
data[0], data[1], data[2] = c, a, b
elif c > b:
data[0], data[1], data[2] = a, b, c
else:
data[0], data[1], data[2] = a, c, b
sort(data)
elif len(data) == 2:
if data[0] > data[1]:
data[1], data[0] = data[0], data[1]
print('Out: {}'.format(' '.join(str(x) for x in data)))
def parse_command_line():
parser = ArgumentParser(description='Sort an array of numbers')
parser.add_argument('-L',
'--length',
type=int,
help='length of array to sort',
default=10,
metavar='<INT>')
parser.add_argument('--min',
type=int,
help='minimum allowed integer in the array',
default=0,
metavar='<INT>')
parser.add_argument('--max',
type=int,
help='maximum allowed integer in the array',
default=100,
metavar='<INT>')
parser.add_argument('-A',
'--array',
type=int,
help='sort this array',
nargs='+')
return parser.parse_args()
if __name__ == '__main__':
args = parse_command_line()
main(args.array, args.length, args.min, args.max)
| true |
943f67fed07f315034139958a27f86c714e1a404 | Python | tpys/datatools | /src/utils.py | UTF-8 | 3,073 | 2.671875 | 3 | [] | no_license | import os,errno
import shutil
import re
def mkdirP(path):
"""
Create a directory and don't error if the path already exists.
If the directory already exists, don't do anything.
:param path: The directory to create.
:type path: str
"""
assert path is not None
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def gen_topplus_dataset(path):
#f_list = open(list_name, "w")
#zhPattern = re.compile(ur'[\u4e00-\u9fa5]+')
#test = PinYin()
#test.load_word()
class_label = 0
count = 0
for parent, dirnames, filenames in os.walk(path):
for filename in filenames:
is_small_num_faces = False
print filename
#match = zhPattern.search(filename.decode('utf8'))
#print match
#pinyin_words = test.hanzi2pinyin_split(string=match.group(0), split="_")
new_filename = filename.replace(" ", "_")
folder_path = path +"/"+new_filename[:-4]# + "_" + pinyin_words
mkdirP(folder_path)
shutil.copyfile(path + "/" + filename,folder_path + "/"+ new_filename[:-4] + "_0001" + filename[-4:])
count = count + 1
def change_filenames_for_weibofaces(path):
for parent, dirnames, filenames in os.walk(path):
for dirname in dirnames:
for sub_parent, sub_dirnames, sub_filenames in os.walk(path+"/"+dirname):
count = 1
for sub_filename in sub_filenames:
if(sub_filename.endswith("png") or sub_filename.endswith("jpg") or sub_filename.endswith("bmp")):
new_filename = dirname + "_%04d" % (count) + sub_filename[-4:]
count = count + 1
print path + "/" + dirname+"/"+new_filename
os.rename(path + "/" + dirname+"/"+sub_filename, path + "/" + dirname+"/"+ new_filename)
# f_list.write("{0} {1}\r\n".format(dirname+"/"+sub_filename, str(class_label)))
def create_list(path, list_path, create_aligned_folder = False, folder_suffix = "-aligned"):
f_list = open("{0}/list.txt".format(list_path), "w")
class_label = 0
for parent, dirnames, filenames in os.walk(path):
for dirname in dirnames:
if create_aligned_folder:
mkdirP(path + folder_suffix +"/"+dirname)
for sub_parent, sub_dirnames, sub_filenames in os.walk(path+"/"+dirname):
for sub_filename in sub_filenames:
if(sub_filename.endswith("png") or sub_filename.endswith("jpg") or sub_filename.endswith("bmp")):
f_list.write("{0} {1}\r\n".format(dirname+"/"+sub_filename, str(class_label)))
class_label += 1
f_list.close()
if __name__ == '__main__':
# gen_topplus_dataset("D:/dataset/Face/topplus/people")
#change_filenames_for_weibofaces("../data/weibo_face-aligned")
pass
| true |
422e792bcd00a432fa13c1f6b1b6f841d0cbde97 | Python | Leonardus21/palapy | /3 esercizi python.py | UTF-8 | 409 | 4.28125 | 4 | [] | no_license | #primo esercizio
a = int(input("inserisci il valore di a: "))
b = int(input("inserisci il valore di b: "))
equazione = "ax+b=0"
print(equazione)
x = -b/a
print("x è uguale a: ", x)
#secondo esercizio
c = int(input("inserisci il valore di c: "))
d =max(a, b, c)
print("il numero maggiore tra ", a, ",", b, ",", c, "è: ", d)
#terzo esercizio
a, b = b, a
print("a = ", a)
print("b = ", b)
| true |
eaabfed1a069c6b6e3cb9709d3c29deb95485cac | Python | outsider7777/OCGNN | /networks/GAE.py | UTF-8 | 2,855 | 2.671875 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
from dgl.nn.pytorch import GraphConv
from networks.GCN import GCN
class InnerProductDecoder(nn.Module):
"""Decoder model layer for link prediction."""
def forward(self, z, sigmoid=True):
"""Decodes the latent variables :obj:`z` into a probabilistic dense
adjacency matrix.
Args:
z (Tensor): The latent space :math:`\mathbf{Z}`.
sigmoid (bool, optional): If set to :obj:`False`, does not apply
the logistic sigmoid function to the output.
(default: :obj:`True`)
"""
adj = torch.matmul(z, z.t())
return torch.sigmoid(adj) if sigmoid else adj
class GAE(nn.Module):
def __init__(self,
g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super(GAE, self).__init__()
#self.g = g
self.encoder=GCN(g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout)
self.A_decoder=GCN(g,
n_classes,
n_hidden,
in_feats,
n_layers,
activation,
dropout)
self.S_decoder=GCN(g,
n_classes,
n_hidden,
in_feats,
n_layers-1,
activation,
dropout)
self.InnerProducter=InnerProductDecoder()
def forward(self,g, features):
h = features
z=self.encoder(g, features)
recon=self.A_decoder(g, z)
z_=self.S_decoder(g,z)
adj=self.InnerProducter(z_)
return z,recon,adj
# class GCN(nn.Module):
# def __init__(self,
# g,
# in_feats,
# n_hidden,
# n_classes,
# n_layers,
# activation,
# dropout):
# super(GCN, self).__init__()
# self.g = g
# self.layers = nn.ModuleList()
# # input layer
# self.layers.append(GraphConv(in_feats, n_hidden, bias=True, activation=activation))
# # hidden layers
# for i in range(n_layers - 1):
# self.layers.append(GraphConv(n_hidden, n_hidden, bias=True, activation=activation))
# # output layer
# self.layers.append(GraphConv(n_hidden, n_classes,bias=True))
# self.dropout = nn.Dropout(p=dropout)
# print(self.layers)
# def forward(self,g, features):
# h = features
# for i, layer in enumerate(self.layers):
# if i != 0:
# h = self.dropout(h)
# h = layer(g, h)
# return h | true |
a5d39ca65ad121005ee7f79cf4bdf2e77d13a99f | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_8/sngris012/question2.py | UTF-8 | 593 | 4.0625 | 4 | [] | no_license | """Rishen Singh
Assignment 8
Question 2"""
count=0
def pairs(message):
global count #defines count as a global variable
if message=='':
return count #if blank, returns 0
else:
if(len(message)>1 and message[0]==message[1]): #checks through for pairs
count=count+1 #increases count
return pairs(message[2:len(message)]) #checks the rest of the message for pairs
else:
return pairs(message[1:len(message)])
user_input=input("Enter a message:\n") #user input
print("Number of pairs:",pairs(user_input)) | true |
1c53dd6db7d252dbc1a2fb05b5201218070f3228 | Python | ab93/Hotel-Reviews-Classfication | /nbclassify.py | UTF-8 | 5,865 | 2.546875 | 3 | [] | no_license | __author__ = 'Avik'
import os
import sys
import json
import math
import re
from decimal import *
from collections import defaultdict
stopWords = ['i','me','my','myself','we','our','ours','ourselves','you','your','yours',
'yourself','yourselves','he','him','his','himself','she','her','hers','herself',
'it','its','itself','they','them','their','theirs','themselves','what','which',
'who','whom','this','that','these','those','am','is','are','was','were','be','been',
'being','have','has','had','having','do','does','did','doing','a','an','the','and',
'but','if','or','because','as','until','while','of','at','by','for','with','about',
'against','between','into','through','during','before','after','above','below','to',
'from','up','down','in','out','on','off','over','under','again','further','then',
'once','here','there','when','where','why','how','all','any','both','each','few',
'more','most','other','some','such','no','nor','not','only','own','same',
'so','than','too','very','s','t','can','will','just','don','should','now']
punctuationList = ["!",'"',"#","$","%","&","'","(",")","*","+",",","-",".","/",":",";","<","=",">","?","@",
"[",'\\',']','^','_','`','{','|','}','~']
def calculateScore():
precision = [0.0 for i in range(4)]
recall = [0.0 for i in range(4)]
f1 = [0.0 for i in range(4)]
with open('nboutput.txt','r') as outputFile:
data = outputFile.readlines()
tp = [0.0 for i in range(4)]
fp = [0.0 for i in range(4)]
fn = [0.0 for i in range(4)]
for line in data:
line = line.strip('\n').split(' ')
if (line[1] == 'positive') and ( re.search(r'(.)*positive(.)*',line[2]) ):
tp[0] += 1
elif (line[1] == 'positive') and ( re.search(r'(.)*negative(.)*',line[2]) ):
fp[0] += 1
elif (line[1] == 'negative') and ( re.search(r'(.)*positive(.)*',line[2]) ):
fn[0] += 1
if (line[1] == 'negative') and ( re.search(r'(.)*negative(.)*',line[2]) ):
tp[1] += 1
elif (line[1] == 'negative') and ( re.search(r'(.)*positive(.)*',line[2]) ):
fp[1] += 1
elif (line[1] == 'positive') and ( re.search(r'(.)*negative(.)*',line[2]) ):
fn[1] += 1
if (line[0] == 'truthful') and ( re.search(r'(.)*truthful(.)*',line[2]) ):
tp[2] += 1
elif (line[1] == 'truthful') and ( re.search(r'(.)*deceptive(.)*',line[2]) ):
fp[2] += 1
elif (line[1] == 'deceptive') and ( re.search(r'(.)*truthful(.)*',line[2]) ):
fn[2] += 1
if (line[0] == 'deceptive') and ( re.search(r'(.)*deceptive(.)*',line[2]) ):
tp[3] += 1
elif (line[1] == 'deceptive') and ( re.search(r'(.)*truthful(.)*',line[2]) ):
fp[3] += 1
elif (line[1] == 'truthful') and ( re.search(r'(.)*deceptive(.)*',line[2]) ):
fn[3] += 1
#print tp
#print fp
#print fn
for c in range(4):
precision[c] = tp[c]/(tp[c] + fp[c])
recall[c] = tp[c] / (tp[c] + fn[c])
f1[c] = (2 * precision[c] * recall[c]) / (precision[c] + recall[c])
#P = tp/(tp + fp)
#R = tp/(tp + fn)
print "Precision:", precision
print "Recall:",recall
print "F1:",f1
print "F1 avg:",sum(f1)/4.0
#print line
def readFeatures():
with open('nbmodel.txt','r') as jsonFile:
condProb = json.load(jsonFile)
priorProb = condProb['PRIOR']
del condProb['PRIOR']
return priorProb,condProb
def writeFile(path,classProb):
index1 = classProb.index(max(classProb[0:2]))
index2 = classProb.index(max(classProb[2:4]))
#print path,index1,index2
#raw_input()
with open('nboutput.txt','a+') as outputFile:
if index2 == 2:
outputFile.write("truthful" + ' ')
else:
outputFile.write("deceptive" + ' ')
if index1 == 0:
outputFile.write("positive" + ' ')
else:
outputFile.write("negative" + ' ')
outputFile.write(path + '\n')
def readTestFiles(path,priorProb,condProb):
punctString = ''
for item in punctuationList:
punctString = punctString + str(item)
remove = punctString
f = open('nboutput.txt','w+')
f.close()
for root,dirs,files in os.walk(path,topdown=False):
for name in files:
classProb = [math.log(prob,2) for prob in priorProb]
#classProb = [prob for prob in priorProb]
#print classProb
#raw_input()
if name not in ['.DS_Store','LICENSE','README.md','README.txt']:
with open(os.path.join(root,name),'r') as f:
data = f.read()
data = data.lower().translate(None,remove)
data = ' '.join([word for word in data.split() if word not in stopWords])
#print data
#raw_input()
for word in data.strip().split(' '):
if word not in condProb:
continue
#classProb = map(Decimal,classProb)
for c in range(len(classProb)):
#classProb[c] *= condProb[word][c]
#classProb[c] += condProb[word][c]
classProb[c] += math.log(condProb[word][c],2)
#print classProb
#print name
#print classProb
#raw_input()
writeFile(os.path.join(root,name),classProb)
#raw_input()
def main():
priorProb, condProb = readFeatures()
readTestFiles(sys.argv[1],priorProb,condProb)
calculateScore()
if __name__ == '__main__':
main() | true |
c5b5f390e3ba09993c5703eb398b30e5422c3848 | Python | jgomezc1/nldyna | /source/postprocesor.py | UTF-8 | 6,094 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 11 06:22:56 2018
@author: JULIAN PARRA
"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Post-processing functions for graphics, and result visualization
def NodalDispPLT(DOF,TotalT,ninc,ylabel):
""" This function plots displacement Vs. t
INPUT:
-----
- DOF : Displacement response history (1d array) of the degree of freedom
- TotalT: Total time of the step by step integration procedure of solution
OUTPUT:
------
- python plot displacement Vs. t
"""
t = np.linspace(0,TotalT,len(DOF))
plt.figure(figsize=(6.7,4))
plt.plot(t,DOF,'gray')
plt.grid(True)
plt.xlim(xmin=0,xmax=TotalT)
plt.title("Displacement history for the specified DOF")
plt.xlabel("Time (sec)")
plt.ylabel(ylabel)
plt.show()
return
def GrafModel(Elements,Nodes):
""" This function plots Model, only for frame elements
INPUT:
-----
- Elements: Element conectivity (array)
- Nodes: Nodes coordinates (array)
OUTPUT:
------
- python model plot
"""
Nlines = len(Elements)
#
plt.figure(figsize=(7,4))
for i in range (Nlines):
Cordx = np.array([Nodes[Elements[i][3]][1],Nodes[Elements[i][4]][1]])
Cordy = np.array([Nodes[Elements[i][3]][2],Nodes[Elements[i][4]][2]])
plt.plot(Cordx,Cordy,'black')
#End for
plt.xlim(min(Nodes[:,1])-1,max(Nodes[:,1])+1)
plt.ylim(min(Nodes[:,2])-1,max(Nodes[:,2])+1)
plt.xlabel("Y")
plt.ylabel("X")
#
plt.show()
return
def GrafModel3D(Elements,Nodes):
""" This function plots 3D Model, only for frame elements
INPUT:
-----
- Elements: Element conectivity (array)
- Nodes: Nodes coordinates (array)
OUTPUT:
------
- python model plot
"""
Nlines = len(Elements)
#
fig = plt.figure(figsize=(7,4))
ax = fig.gca(projection = '3d')
for i in range (Nlines):
Cordx = np.array([Nodes[Elements[i][3]][1],Nodes[Elements[i][4]][1]])
Cordy = np.array([Nodes[Elements[i][3]][2],Nodes[Elements[i][4]][2]])
Cordz = np.array([Nodes[Elements[i][3]][3],Nodes[Elements[i][4]][3]])
ax.plot(Cordx,Cordy,Cordz,'k')
#End for
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
#
ax.set_xlim3d(min(Nodes[:,1])-1,max(Nodes[:,1])+1)
ax.set_ylim3d(min(Nodes[:,2])-1,max(Nodes[:,2])+1)
ax.set_zlim3d(min(Nodes[:,3]),max(Nodes[:,3])+1)
return
def PlasModel(MvarsGen, Element, xlabel, ylabel):
""" This function plots from results the elasto-plastic histeretic curve
INPUT:
-----
- MsvarGen: Python list. It storages the history of state variables of each element
- Element : Integer.
- xlabel : String for title of X axis
- ylabel : String for title of Y axis
OUTPUT:
------
- elastoplatic curve plot
"""
X = np.zeros(len(MvarsGen))
Y = np.zeros(len(MvarsGen))
for i in range (len(MvarsGen)):
Mvars = MvarsGen[i]
X[i] = Mvars[Element][1]
Y[i] = Mvars[Element][0]
plt.figure(figsize=(6.7,4))
plt.plot(X,Y,'gray')
Title = "Constitutive model history for element "
plt.title(Title + str(Element))
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.grid(True)
plt.show()
return
def writeVTKs(Elements,Nodes,IBC,disp):
""" This function save vtks files for model animation with paraview
INPUT:
-----
- Elements: Element conectivity (array)
- Nodes: Nodes coordinates (array)
- IBC: Index boundary condition (array)
- disp: displacement history of non restraint nodes (array)
OUTPUT:
------
- VTK files
"""
#
Ninc = len(disp[0,:])
Nnodes = len(Nodes)
Nelem = len(Elements)
#
Totaldisp =np.zeros((Nnodes*2,Ninc)) # For all degrees of freedom, even those with restraints
#
for i in range (Nnodes):
for j in range (2):
#
if IBC[i][j] != -1:
Totaldisp[2*i + j,:] = disp[IBC[i][j],:]
#End if
# End for j
#End for i
#
# For each deltaT, its generated a VTK file with coordinates for each node acummulatting nodal displacements x and y
#
for i in range(Ninc):
VTKi = open('03_VTKs/' + 't' + str(i) + '.vtk','w')
VTKi.write('# vtk DataFile Version 2.0\n')
VTKi.write('File for t = ' + str(i) + '\n')
VTKi.write('ASCII\n')
VTKi.write('DATASET UNSTRUCTURED_GRID\n')
VTKi.write('POINTS ' + str(Nnodes) + ' float\n')
#
for k in range (Nnodes):
VTKi.write('%10.2f %10.2f %10.2f\n' %(Nodes[k][1],Nodes[k][2],0.0))
# End for k
VTKi.write('\n')
VTKi.write('CELLS ' + str(Nelem) + ' ' + str(Nelem*3) + '\n')
#
for k in range (Nelem):
VTKi.write('%10i %10i %10i\n' %(2,Elements[k][3],Elements[k][4]))
# End for k
VTKi.write('\n')
VTKi.write('CELL_TYPES ' + str(Nelem) + '\n')
#
for k in range (Nelem):
VTKi.write('%10i\n' %(3))
# End for k
VTKi.write('\n')
VTKi.write('POINT_DATA ' + str(Nnodes)+ '\n')
VTKi.write('SCALARS dispX float\n')
VTKi.write('LOOKUP_TABLE default\n')
#
FMT=1*'%10.2f'
for k in range (Nnodes):
VTKi.write(FMT %(Totaldisp[2*k][i]) + '\n')
# End for k
#
VTKi.write('\n')
VTKi.write('SCALARS dispY float\n')
VTKi.write('LOOKUP_TABLE default\n')
#
FMT=1*'%10.2f'
for k in range (Nnodes):
VTKi.write(FMT %(Totaldisp[2*k + 1][i]) + '\n')
# End for k
VTKi.close
# End for i
return Totaldisp
| true |
6676eea23fecaf0a3f9d9d7b6d796a6498886c81 | Python | StrickenMaple05/Python | /codewars/Roman Numerals Helper.py | UTF-8 | 1,934 | 3.84375 | 4 | [] | no_license | class RomanNumerals:
to_roman_dict = {
1: "I",
5: "V",
10: "X",
50: "L",
100: "C",
500: "D",
1000: "M",
5000: "",
10000: "",
}
from_roman_dict = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000
}
@staticmethod
def translate_to_roman(digit, capacity):
dict_get = RomanNumerals.to_roman_dict.get
one = dict_get(10 ** capacity)
digit = int(digit)
return {
0 <= digit <= 3: one * digit,
4 <= digit <= 8: one * max(0, 5 - digit) +
dict_get(5 * 10 ** capacity) +
one * max(0, digit - 5),
digit == 9: one + dict_get(10 ** (capacity + 1))
}[True]
@staticmethod
def translate_from_roman(digit):
return RomanNumerals.from_roman_dict.get(digit)
@staticmethod
def to_roman(number):
translate = RomanNumerals.translate_to_roman
if number >= 4000:
return ""
answer = ""
digits = list(str(number))
length = len(digits)
for i in reversed(range(length)):
answer = translate(digits[i], length - i - 1) + answer
return answer
@staticmethod
def from_roman(number):
translate = RomanNumerals.translate_from_roman
if len(number) == 0:
return 0
answer = 0
current_ = translate(number[0])
for i in range(1, len(number)):
next_ = translate(number[i])
answer += current_ if current_ >= next_ else -current_
current_ = next_
answer += translate(number[len(number) - 1])
return answer
print(RomanNumerals.to_roman(2999))
print(RomanNumerals.from_roman("MMCMXCIX"))
| true |
331a06945cdb1bc3f07fe607f3f67b96ddf7ee88 | Python | sroubert/tvi_ee_lab3 | /12_18/moveOneLine.py | UTF-8 | 2,517 | 3.34375 | 3 | [] | no_license | from adafruit_motorkit import MotorKit
from adafruit_motor import stepper
import time
kit = MotorKit()
kit.stepper2.release()
kit.stepper1.release()
#line is x1, y1, x2, y2
#x_diff = x2 - x1
x_diff = -10
#y_diff = y2 - y1
y_diff = -2
#to be measured by students
stepsPerCm = 50
yMove = y_diff*50
xMove = x_diff*50
'''
DO NOT CHANGE: Define basic move functions
'''
def yForward():
kit.stepper1.onestep(direction=stepper.BACKWARD, style=stepper.DOUBLE)
kit.stepper2.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
def yBackward():
kit.stepper1.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
kit.stepper2.onestep(direction=stepper.BACKWARD, style=stepper.DOUBLE)
def xForward():
kit.stepper1.onestep(direction=stepper.BACKWARD, style=stepper.DOUBLE)
kit.stepper2.onestep(direction=stepper.BACKWARD, style=stepper.DOUBLE)
def xBackward():
kit.stepper1.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
kit.stepper2.onestep(direction=stepper.FORWARD, style=stepper.DOUBLE)
'''
*loop functions
'''
def loopY(ratio,y,x):
for i in range( abs(y) ):
#always step in y
if ( y > 0): yForward()
if ( y < 0): yBackward()
#check when step in X
if ( (i % ratio)== 0 ): #ASK STUDENTS WHAT TO PUT HERE
if ( x > 0 ): xForward()
if ( x < 0 ): xBackward()
def loopX(ratio,y,x):
for i in range( abs(x) ):
print("hi") #always step in x
if ( x > 0): xForward()
if ( x < 0): xBackward()
#check when step in y
if ( (i % ratio)== 0 ): #ASK STUDENTS WHAT TO PUT HERE
if ( y > 0 ): yForward()
if ( y < 0 ): yBackward()
'''
*if there xMove or yMove are zero
'''
if (yMove==0):
for i in range( abs( xMove )):
if (xMove > 0): xForward()
if (xMove < 0): xBackward()
kit.stepper2.release()
kit.stepper1.release()
exit() #this ends the python script
if (xMove==0):
for i in range( abs( yMove )):
if (yMove > 0): yForward()
if (yMove < 0): yBackward()
kit.stepper2.release()
kit.stepper1.release()
exit() #this ends the python script
'''
*loop through steps
'''
#loop over y
if ( abs( yMove ) > abs( xMove ) ):
ratio = round(abs( yMove / xMove) )
loopY(ratio,yMove,xMove)
#loop over x
if ( abs( xMove ) > abs( yMove ) ):
ratio = round(abs( xMove / yMove) )
loopX(ratio,yMove,xMove)
kit.stepper2.release()
kit.stepper1.release()
| true |
dcf35a0d9663db4f8c8c642718c9a0921fe39ef9 | Python | JunhongXu/rrt-ros | /scripts/test_cspace.py | UTF-8 | 1,488 | 2.921875 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
def read_cspace_data():
name = "test.txt"
with open(name, "r") as f:
polygons = []
polygon = []
point = np.zeros((1, 2))
lines = f.readlines()
i = 0
nlines = len(lines)
n_plane = 0
while i != nlines:
n_points= 0
if "Plane" in lines[i]:
line = lines[i].strip("\n")
line = line.split(" ")
n_points = int(line[-1])
i = i +1
for j in range(0, n_points):
line = lines[i+j].strip("\n")
coord = [float(c) for c in line.split(" ")]
coord.extend([n_plane])
polygon.append(np.array(coord))
i = i + j+1
polygons.append(polygon)
polygon = []
n_plane += 1
return polygons
ps = read_cspace_data()
for index, p in enumerate(ps):
ps[index] = np.vstack(p)
fig = plt.figure()
ax = Axes3D(fig)
for i in range(len(ps)):
x = ps[i][:, 0]
y = ps[i][:, 1]
z = ps[i][:, 2]
verts = [list(zip(x, y, z))]
collection = Poly3DCollection(verts, alpha=0.9)
collection.set_facecolor('r')
collection.set_edgecolor('k')
ax.add_collection3d(collection)
ax.set_xlim3d(-10, 10)
ax.set_ylim3d(-10, 10)
ax.set_zlim3d(0, 361)
plt.show()
| true |
406507fffda2be64a326a748048cb947a697c992 | Python | will8889/assortedproblem | /no11.py | UTF-8 | 256 | 3.796875 | 4 | [] | no_license | def char_freq(string):
list = {}
for c in string:
if c in list:
list[c] = list[c] + 1
else:
list[c] = 1
for char_item, freq in list.items():
print(char_item + ": " + str(freq))
char_freq("hello") | true |