seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
23407226022 | import config
import json
from tqdm import tqdm
from inspect_wikidump import init_inspect
from urllib.parse import unquote
from lxml import etree
from collections import Counter
from sqlitedict import SqliteDict
def iterative_checking(check_func_dict, debug_num=None, verbose=False):
total_doc_num = init_inspect.TOTAL_NUM_DOC if debug_num is None else debug_num
cur_count = 0
with open(config.ABS_WIKI_FILE, 'rb') as in_f:
for line in tqdm(in_f, total=total_doc_num):
if debug_num is not None and debug_num == cur_count:
break
item = json.loads(line)
print(item)
print(item.keys())
print(item['text_with_links'])
print(item['charoffset_with_links'])
cur_count += 1
print("Total Count:", cur_count)
# We will need to do this later to check the consistency
# TODO do cross checking for 'whole' and 'abs'
def iterative_cross_checking_abs_whole(debug_num=None):
total_doc_num = init_inspect.TOTAL_NUM_DOC if debug_num is None else debug_num
cur_count = 0
error_count = 0
with open(config.WHOLE_WIKI_FILE, 'rb') as in_whole_f, open(config.ABS_WIKI_FILE, 'rb') as in_abs_f:
while True:
if debug_num is not None and debug_num == cur_count:
break
whl_line = next(in_whole_f)
abs_line = next(in_abs_f)
if whl_line and abs_line:
whl_item = json.loads(whl_line)
abs_item = json.loads(abs_line)
cur_count += 1
the_para = None
for whl_para in whl_item['text'][1:]:
print(whl_para)
if len(''.join(whl_para)) > 50:
the_para = whl_para
break
if the_para != abs_item['text_with_links']:
print(abs_item['title'])
print(whl_item['title'])
print(the_para)
print(whl_item['text'])
print(abs_item['text_with_links'])
# print(whl_item['text'][1])
error_count += 1
raise Exception()
print(error_count)
print(cur_count)
print(error_count / cur_count)
def iterative_cross_checking_abs_whole_from_db(debug_num=None):
total_doc_num = init_inspect.TOTAL_NUM_DOC if debug_num is None else debug_num
cur_count = 0
error_count = 0
with SqliteDict(str(config.ABS_WIKI_DB), flag='r', encode=json.dumps, decode=json.loads) as abs_db, \
SqliteDict(str(config.WHOLE_WIKI_DB), flag='r', encode=json.dumps, decode=json.loads) as whole_db:
titles = []
for title in tqdm(abs_db.iterkeys(), total=len(abs_db)):
titles.append(title)
for title in tqdm(titles):
abs_item = abs_db[title]
if title not in whole_db:
print(f"Title: {title} not in whole_db")
return
else:
whl_item = whole_db[title]
cur_count += 1
the_para = None
for whl_para in whl_item['text'][0:]:
# print(whl_para)
if len(''.join(whl_para)) >= 50:
the_para = whl_para
break
if the_para != abs_item['text_with_links']:
print(abs_item['title'])
print(whl_item['title'])
print(the_para)
print(whl_item['text'])
print(the_para)
print(abs_item['text_with_links'])
# print(whl_item['text'][1])
error_count += 1
raise Exception()
print(error_count)
print(cur_count)
print(error_count / cur_count)
if __name__ == '__main__':
# iterative_checking(None, debug_num=1)
# iterative_cross_checking_abs_whole(100)
iterative_cross_checking_abs_whole_from_db(100)
| easonnie/semanticRetrievalMRS | src/inspect_wikidump/inspect_abs_file.py | inspect_abs_file.py | py | 3,998 | python | en | code | 59 | github-code | 13 |
14392500820 | import copy
import urllib
import datetime
import os
import types
from django.http import Http404
from django.conf import settings
from django.utils import translation
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from haystack.utils import Highlighter
def first_words(s, num_chars=100):
"""return a string of length not more than num_chars with the first words of the given string
appends "..." if the original string is longer than num_chars
"""
result = s[:100]
result = ' '.join(result.split()[:-1])
if len(s) > 100:
result += '...'
return result
def urlencode(d):
"""call urllib.urlencode, but first tries to avoid encoding errors"""
try:
return urllib.urlencode(d)
except UnicodeEncodeError:
d = copy.copy(d)
for k in d:
if type(d[k]) == type(u''):
d[k] = d[k].encode('utf8')
return urllib.urlencode(d)
class DasaHighlighter(Highlighter):
def __init__(self, query, **kwargs):
self.query = query
if 'max_length' in kwargs:
self.max_length = int(kwargs['max_length'])
if 'html_tag' in kwargs:
self.html_tag = kwargs['html_tag']
if 'css_class' in kwargs:
self.css_class = kwargs['css_class']
boolean_operators = ['AND', 'OR', 'NOT']
self.query_words = set([word.lower() for word in self.query.split() if not word.startswith('-') and word not in boolean_operators])
def find_window(self, highlight_locations):
best_start, best_end = super(DasaHighlighter, self).find_window(highlight_locations)
# if we are close to the start, we just go to the beginning
if best_start < 20:
best_start = 0
return (best_start, best_end)
def format_date_for_timeglider(d):
return '%04d-%02d-%02d 01:00:00' % (d.year, d.month, d.day)
MONTHS = {
1: _('Jan.'),
2: _('Feb.'),
3: _('Mar.'),
4: _('Apr.'),
5: _('May.'),
6: _('Jun.'),
7: _('Jul.'),
8: _('Aug.'),
9: _('Sep.'),
10: _('Oct.'),
11: _('Nov.'),
12: _('Dec.'),
}
def prettyprint_date(y, m=None, d=None):
if isinstance(m, types.IntType) and int(m) in range(1, 13):
month = MONTHS[int(m)]
elif isinstance(m, types.StringTypes) and m.isdigit() and int(m) in range(1, 13):
month = MONTHS[int(m)]
else:
month = str(m)
if y and m and d:
if month:
return u'{month} {d}, {y}'.format(d=d, month=month, y=y)
else:
return u'{d}-{m}-{y}'.format(d=d, m=m, y=y)
elif y and m:
if month:
return u'{month} {y}'.format(month=month, y=y)
else:
return u'{m}-{y}'.format(m=m, y=y)
elif y:
return u'{y}'.format(y=y)
def to_date(y, m=None, d=None):
if not y:
return None
if not m:
m = 1
if not d:
d = 1
return datetime.date(y, m, d)
def sluggify(s):
"""Turn s into a friendlier URL fragment
removes underscores, and strips forward slashes from beginning and end
returns:
a string
"""
# XXX make this smarter
s = s.lower()
s = s.strip()
s = s.replace(' ', '-')
while '--' in s:
s = s.replace('--', '-')
if s.startswith('/'):
s = s[1:]
if s.endswith('/'):
s = s[:-1]
return s
def slugs2breadcrumbs(ls):
"""given a list of slugs, return a list of (title, url) tuples"""
result = []
for slug in ls:
page = get_page(slug=slug)
result.append(page)
result = [(page.title, page.get_absolute_url()) for page in result]
return result
def fix_date(s):
if s.isdigit() and len(s) == 4:
s = '%s-1-1' % s
return s
def pagebrowser_id(ead_id, archive_id, archiveFile):
result = '%s-%s-%s' % (ead_id, archive_id, archiveFile)
result = result.replace('/', '-')
result = result.replace('.', '-')
return result
def get_page(slug, default=None):
from dasa import models
try:
page = models.BasicPage.objects.get(slug=slug)
except models.BasicPage.DoesNotExist:
if default is None:
msg = 'Could not find BasicPage with slug "%s" - please add one on /admin/dasa/basicpage/' % slug
raise Http404(msg)
else:
return default
return page
def to_integer(s):
# transform '1.0' into '1'
try:
return str(int(float(s)))
except Exception:
return s
def print_link_to_pagebrowser(scans, archive='K66a'):
"""return nicely groups set of links to the pagebrowser to the given set of scans"""
# exclude scans without folioNumber
scans = [scan for scan in scans if scan.get('folioNumber')]
if not scans:
return ''
scans_to_sort = [(scan.get('archiveFile', ''), scan.get('folioNumber'), scan) for scan in scans]
scans_to_sort.sort()
scans = [scan for _archivefile, _folionumber, scan in scans_to_sort]
scan_groups = []
for scan in scans:
if scan_groups and \
scan_groups[-1][-1]['archiveFile'] == scan['archiveFile'] and \
scan_groups[-1][-1]['folioNumber'] and \
scan['folioNumber'] and \
scan_groups[-1][-1]['folioNumber'].isdigit() and \
scan['folioNumber'].isdigit() and \
int(scan['folioNumber']) == int(scan_groups[-1][-1]['folioNumber']) + 1:
scan_groups[-1].append(scan)
else:
scan_groups.append([scan])
language_code = translation.get_language()
ead_id = settings.LANGUAGE2EAD.get(language_code, settings.LANGUAGE_CODE)
results = []
archive_id = settings.ARCHIVE_IDS[archive]
for i, scan_group in enumerate(scan_groups):
scan = scan_group[0]
archiveFile = scan['archiveFile']
pb_url = os.path.join(settings.PAGEBROWSER_PUBLIC_URL, pagebrowser_id(ead_id=ead_id, archive_id=archive_id, archiveFile=archiveFile))
pb_url += '?page_number=%s' % scan['folioNumber']
if i > 0 and scan_groups[i - 1][0]['archiveFile'] == archiveFile:
if len(scan_group) == 1:
description = '{folioNumber}'.format(**scan)
else:
description = '{folioNumber}-{last_number}'.format(last_number=scan_group[-1]['folioNumber'], **scan)
else:
if len(scan_group) == 1:
description = 'file {archiveFile}, folio {folioNumber}'.format(**scan)
else:
description = 'file {archiveFile}, folios {folioNumber}-{last_number}'.format(last_number=scan_group[-1]['folioNumber'], **scan)
results.append('<a href="#" onClick="return openPageBrowser(\'{pb_url}\')">{description}</a>'.format(**locals()))
return mark_safe(', '.join(results))
def sort_string(s):
"""a string used for sorting
"""
original_s = s
s = s.strip()
for prefix in ['de ', "'t ", "l'", "'s "]:
if s.startswith(prefix):
s = s[len(prefix):]
s = s + original_s
return s.lower().strip()
| sejarah-nusantara/site | apps/dasa/utils.py | utils.py | py | 7,105 | python | en | code | 1 | github-code | 13 |
19902468067 | import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import art3d
import numpy as np
def cos(theta):
return np.cos(theta)
def sin(theta):
return np.sin(theta)
def rotation(phi, theta, psi):
R_x = np.array([[1, 0, 0], [0, cos(phi), -sin(phi)], [0, sin(phi), cos(phi)]])
R_y = np.array(
[[cos(theta), 0, sin(theta)], [0, 1, 0], [-sin(theta), 0, cos(theta)]]
)
R_z = np.array([[cos(psi), -sin(psi), 0], [sin(psi), cos(psi), 0], [0, 0, 1]])
return R_z @ R_y @ R_x
def animate(fig_no, phi, theta, psi):
lx = 0.5
ly = 0.25
lz = 0.1
ll = 1
lmax = np.max(np.array([lx, ly, lz, ll]))
v0 = np.array(
[
[-lx, -ly, -lz],
[lx, -ly, -lz],
[lx, ly, -lz],
[-lx, ly, -lz],
[-lx, -ly, lz],
[lx, -ly, lz],
[lx, ly, lz],
[-lx, ly, lz],
]
)
f = np.array(
[
[0, 2, 1],
[0, 3, 2],
[1, 2, 6],
[1, 6, 5],
[0, 5, 4],
[0, 1, 5],
[4, 5, 6],
[6, 7, 4],
[3, 7, 6],
[6, 2, 3],
[0, 4, 7],
[7, 3, 0],
]
)
v1 = np.zeros(np.shape(v0))
[m, n] = np.shape(v1)
R = rotation(phi, theta, psi)
for i in range(0, m):
vec = np.array([v0[i, 0], v0[i, 1], v0[i, 2]])
vec = R.dot(vec)
v1[i] = vec
fig = plt.figure(1)
ax = fig.add_subplot(2, 2, fig_no, projection='3d')
pc1 = art3d.Poly3DCollection(v1[f], facecolors='blue', alpha=0.25)
# ax.add_collection(pc0)
ax.add_collection(pc1)
origin = np.array([0, 0, 0])
dirn_x = np.array([1, 0, 0])
dirn_x = R.dot(dirn_x)
dirn_y = np.array([0, 1, 0])
dirn_y = R.dot(dirn_y)
dirn_z = np.array([0, 0, 1])
dirn_z = R.dot(dirn_z)
ax.quiver(
origin[0], origin[1], origin[2],
dirn_x[0], dirn_x[1], dirn_x[2],
length=1, arrow_length_ratio=0.1,
normalize=True, color='red',
)
ax.quiver(
origin[0], origin[1], origin[2],
dirn_y[0], dirn_y[1], dirn_y[2],
length=1, arrow_length_ratio=0.1,
normalize=True, color='green',
)
ax.quiver(
origin[0], origin[1], origin[2],
dirn_z[0], dirn_z[1], dirn_z[2],
length=1, arrow_length_ratio=0.1,
normalize=True, color='blue',
)
fac = 180 / np.pi
phideg = math.trunc(float(phi * fac))
thetadeg = math.trunc(float(theta * fac))
psideg = math.trunc(float(psi * fac))
subtit = (
'phi=' + str(phideg) + ';' +
'theta=' + str(thetadeg) + ';' +
'psi=' + str(psideg) + ';'
)
ax.set_title(subtit)
ax.set_xlim(-lmax, lmax)
ax.set_ylim(-lmax, lmax)
ax.set_zlim(-lmax, lmax)
ax.axis('off')
if __name__ == '__main__':
phi, theta, psi = 0, 0, 0
animate(1, phi, theta, psi)
phi, theta, psi = 0, 0, np.pi / 2
animate(2, phi, theta, psi)
phi, theta, psi = 0, np.pi / 2, np.pi / 2
animate(3, phi, theta, psi)
phi, theta, psi = np.pi / 2, np.pi / 2, np.pi / 2
animate(4, phi, theta, psi)
plt.show()
| kimsooyoung/robotics_python | lec16_3D_rotations/rotation_of_a_box_321_euler.py | rotation_of_a_box_321_euler.py | py | 3,202 | python | en | code | 18 | github-code | 13 |
213995105 | # standard modules
import traceback
import decimal, datetime
# 3rd party modules
import pyodbc
from fastapi import HTTPException, Request
from fastapi.responses import ORJSONResponse
# application modules
from src.config import config
def get_db_cursor():
conn = pyodbc.connect(config.db_connection_string)
return conn.cursor()
async def exception_handler(request: Request, exc: Exception):
code = 500
detail = str(exc)
if isinstance(exc, HTTPException):
code = exc.status_code
detail = exc.detail
print(traceback.print_exc())
return ORJSONResponse(content={"detail":detail}, status_code=code)
def to_camel_case(snake_str):
components = snake_str.split('_')
return components[0] + ''.join(x.title() for x in components[1:])
def convert(input_value):
if isinstance(input_value, str) or isinstance(input_value, int) or isinstance(input_value, datetime.datetime) or isinstance(input_value, datetime.date):
return input_value
if isinstance(input_value, decimal.Decimal):
return int(input_value)
if isinstance(input_value, list):
converted_input_value = []
for n in input_value:
converted_input_value.append(convert(n))
return converted_input_value
if hasattr(input_value, '__dict__'):
input_value = input_value.__dict__
if isinstance(input_value, dict):
converted_input_value = {}
for key, value in input_value.items():
converted_input_value[to_camel_case(key)] = convert(value)
return converted_input_value
raise HTTPException(detail=f"Cannot convert type {type(input_value)}", status_code=500)
| nareshh74/voicegen_admin | src/utils.py | utils.py | py | 1,675 | python | en | code | 0 | github-code | 13 |
34030442399 | # Filename: q4_sum_digits.py
# Author: Justin Leow
# Created: 24/1/2013
# Modified: 24/1/2013
# Description: reads an integer between 0 and 1000 and adds all the digits in the integer.
def newString(inputString):
tempInput = input([inputString + "; or 'quit' to quit program"])
if(tempInput=="quit"):
quit()
try:
int(tempInput)
except:
print("Input is not a number. Utilizing default value of 10")
return "10"
return tempInput
# main
while(True):
#get user input
myNumber = newString("Input a positive integer")
#Get number of characters in the string
numElements = len(myNumber)
currentNumber = 0
for i in myNumber:
currentNumber += int(i)
#output volume
print("The sum of all the digits in your number is: {0:.0f}! \n".format(currentNumber))
| JLtheking/cpy5python | practical01/q4_sum_digits.py | q4_sum_digits.py | py | 865 | python | en | code | 0 | github-code | 13 |
32276511103 | # exercise 143: Anagrams
# solution through function which has one parameter and is invoked on two strings (logic in main function)
def histogram(s):
s = s.upper()
h = {}
for c in s:
if c not in h:
h[c] = 1
else:
h[c] += 1
return h
def main():
word1 = input('enter the first word: ')
word2 = input('enter the second word: ')
if histogram(word1) == histogram(word2):
print('those words are anagrams')
else:
print('those words are NOT anagrams')
if __name__ == '__main__':
main()
"""
the words are anagrams if their histograms letters-frequences are equal
dictionaries are equal if they have the same keys and values independently from the order
"""
| sara-kassani/1000_Python_example | books/Python Workbook/dictionaries/ex143.py | ex143.py | py | 749 | python | en | code | 1 | github-code | 13 |
45931553764 | from discord import app_commands, Attachment, Message, Interaction
from discord.ext import commands
import os
class Files(commands.Cog):
def __init__(self, bot):
self.bot = bot
@app_commands.command(name="upload", description="Upload a file.")
async def upload(self, inter: Interaction, file: Attachment) -> None:
path = "uploads/" + str(inter.user.id) + "/"
await inter.response.send_message(f"Uploading...")
# Create a directory to store user uploads
if not os.path.exists(path):
os.mkdir(path)
await file.save(path + file.filename)
await inter.followup.send(f"Upload complete.")
async def setup(bot):
await bot.add_cog(Files(bot)) | Mudpuppy12/grue | cogs/files.py | files.py | py | 736 | python | en | code | 0 | github-code | 13 |
31041268483 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 11:22:49 2020
@author: likeufo_ah
"""
import matplotlib.pyplot as plt
import numpy as np
from numpy import linalg as la
import operator
import sys
def load_data(filename):
with open(filename, "r") as f:
data=f.readlines()
new_data=[]
for i in data:
rows=list(i.strip().split())
new_data.append(rows)
return new_data
def meanX(dataX):
return np.mean(dataX,axis=0)
def pca(XMat, k):
average = meanX(XMat)
m, n = np.shape(XMat)
data_adjust = []
avgs = np.tile(average, (m, 1))
data_adjust = XMat - avgs
covX = np.cov(data_adjust.T)
featValue, featVec= np.linalg.eig(covX)
index = np.argsort(-featValue)
finalData = []
if k > n:
print ("k must lower than feature number")
return
else:
selectVec = np.matrix(featVec.T[index[:k]])
finalData = data_adjust * selectVec.T
reconData = (finalData * selectVec) + average
return finalData, reconData,featValue,featVec,selectVec
def knn(traindata,testdata,labels,k=1):
distances=np.linalg.norm(testdata-traindata,axis=1)
sortDistance = distances.argsort()
count = {}
for i in range(k):
vote = labels[sortDistance[i]]
count[vote] = count.get(vote, 0) + 1
sortCount = sorted(count.items(), key=operator.itemgetter(1), reverse=True)
return sortCount[0][0]
def question1():
data=load_data(file_train)
dataX=np.array(data).reshape(280,1024)
dataX=dataX.astype(np.float)
data_mean=meanX(dataX)
mean_picture=np.array(data_mean).reshape(32,32).T
plt.imshow(mean_picture,cmap="gray")
plt.show()
#eigenface=pca(dataX,5)[4]
def question1_2(k):
data=load_data(file_train)
dataX=np.array(data).reshape(280,1024)
dataX=dataX.astype(np.float)
p=pca(dataX, k)
selectVec,recon=p[4],p[1]
for i in selectVec:
a=i.reshape(32,32).T
a=a.astype(float)
plt.imshow(a,cmap="gray")
plt.show()
def question2(k):
data=load_data(file_test)
dataY=np.array(data)
#print(dataY)
dataY=dataY.astype(np.float)
p=pca(dataY,k)
reco=p[1]
reco_pic=reco[2].reshape(32,32).T.astype(float)
plt.imshow(reco_pic,cmap="gray")
plt.title(f"k={k}")
plt.show()
def question3_1nn(k):
traindata=np.array(load_data(file_train))
traindata=traindata.astype(np.float)
#load trainset
testdata=np.array(load_data(file_test))
testdata=testdata.astype(np.float)
#load testset
with open(label_train, "r") as f:
labels_train=f.read().split()
#labels of trainset
with open(label_test, "r") as f:
labels_test=f.read().split()
#labels of testset
pca_train=pca(traindata,k)[0]
#train_dataset after dimension reduction
average = meanX(testdata)
m, n = np.shape(testdata)
avgs = np.tile(average, (m, 1))
testdata = testdata - avgs
pca_test= testdata * pca(traindata,k)[4].T
#test dataset after dimension reduction
'''
X_values=[i for i in range(1,k)]
Y_values=[question3_1nn(j) for j in X_values]
plt.plot(X_values,Y_values)
'''
sum=0.0
wrong_value=[]
right_value=[]
index=[]
for i,value in enumerate(pca_test):
label=knn(pca_train,value,labels_train,k=1)
label2=labels_test[i]
if label==label2:
sum=sum+1
else:
wrong_value.append(label)
right_value.append(label2)
index.append(i)
accuracy=sum/len(labels_test)
return accuracy,wrong_value,right_value,index
def question3_image(k):
X_values=[i for i in range(1,k)]
Y_values=[question3_1nn(j)[0] for j in X_values]
plt.plot(X_values,Y_values)
def question4(k):
testdata=np.array(load_data(file_test))
accuracy,wrong_value,right_value,index=question3_1nn(k)
for i in index:
pic=np.array(testdata[i])
pic=pic.reshape(32,32).T
pic=pic.astype(float)
plt.imshow(pic,cmap="gray")
plt.show()
print("accuracy:",accuracy)
return accuracy,wrong_value,right_value,index
if __name__ == "__main__":
if len(sys.argv) < 6:
print("not enough arguments specified")
sys.exit(1)
file_train=sys.argv[1]
label_train=sys.argv[2]
k=int(sys.argv[3])
file_test=sys.argv[4]
label_test=sys.argv[5]
question1()
question1_2(5)
question2(5)
question2(10)
question2(50)
question2(100)
question2(k)# when input k,another image will be shown except for 4 images needed
question3_1nn(100)
question3_image(100)
question4(100)
| ShangGao-forever/Shang_Gao | DATA7703/assignment2.py | assignment2.py | py | 5,074 | python | en | code | 0 | github-code | 13 |
35924431056 | from abc import ABC, abstractmethod
from board import Board
from playeragentinterface import PlayerAgentFactoryInterface
class Game:
def __init__(self, player_x, player_o):
self.player_x = player_x
self.player_o = player_o
def play(self):
mark = 'x'
current_player = self.player_x
other_player = self.player_o
print(f'{current_player.request_player_name()} ({current_player.request_agent_description()}) vs {other_player.request_player_name()} ({other_player.request_agent_description()})')
board = Board()
#board.print()
#print('')
is_game_won = False
while not board.is_full():
(r, c) = current_player.request_move()
board.set(r, c, mark)
other_player.notify_other_players_move(r, c)
#board.print()
winner = board.find_winner()
if winner:
print(f'{current_player.request_player_name()} ({current_player.request_agent_description()}) defeated {other_player.request_player_name()} ({other_player.request_agent_description()})')
current_player.notify_game_over('win')
other_player.notify_game_over('lose')
is_game_won = True
break
(current_player, other_player) = (other_player, current_player)
mark = 'o' if mark == 'x' else 'x'
if not is_game_won:
current_player.notify_game_over('draw')
other_player.notify_game_over('draw')
def run(player_agent_factory):
while True:
player_1 = player_agent_factory.make_player_agent()
player_2 = player_agent_factory.make_player_agent()
game = Game(player_1, player_2)
game.play()
| cfeyer/tictactoe | server.py | server.py | py | 1,786 | python | en | code | 0 | github-code | 13 |
73302333779 | import math
import matplotlib.pyplot as plt
proba = []
X= []
Y = []
print("ok")
# la formule renvoie le resulat multiplié par types^tirées
def formuleInt(types, tirées):
somme = 0
signe = 1
for k in range(types):
# la fonction choose s'appelle "comb" dans python
somme += signe * math.comb(types, k) * (types - k) ** tirées
# pour alterner + et - devant de maniere plus efficace que (-1)^k
signe = -signe
return somme
def main():
_input = int(input("#Types de cartes : "))
step = int(input("#ecart : "))
for i in range(1):
T = (i + 1) * _input
tirées = T * 5
probaInt = 0
while 100 * probaInt <= 98 * T ** (tirées - step):
probaInt = formuleInt(T, tirées)
proba.append((100 * probaInt / T ** (tirées)))
print("\nPersonnes: " + str(1) + "\nTaille de la collection: " + str(T) + "\nCartes tirees: " + str(tirées) + "\nProbabilitée de completer: " + str(100 * probaInt) + "%")
tirées += step
print(str(T) + " : " + str(tirées - 1))
X.append(T)
Y.append(tirées - 1)
plt.plot(proba, lw=2, label=str(T))
proba.clear()
print(proba)
plt.title('Probabilité pour ' + str(T) + ' cartes')
plt.xlabel('Nombre de cartes tirées')
plt.ylabel('Probabilitée')
plt.legend()
plt.show()
'''
for i in range(20):
# on vérifie par échelle de 5% la probabilité
while 20 * probaInt <= i * T ** (tirées - 1):
probaInt = formuleInt(T, tirées)
proba.append((100*probaInt / T ** (tirées)))
xproba.append(i)
tirées += 1
# une fois le nombre de cartes tirées trouvées, on imprime le message
print(str(5*i) + "%:\nCartes Tirées: " + str(tirées - 1) +
"\nPourcentage de Chances: " + str(100*probaInt / T ** (tirées-1)) + "%")
'''
main()
plt.plot(X,Y,lw=2)
plt.title('Nombre de cartes nescessaires pour 98% en fonction du nombre total de cartes')
plt.xlabel('Nombre de cartes différentes')
plt.ylabel('Nombre de cartes tirées')
plt.legend()
plt.show() | Aymco/mathenjeans | main2.py | main2.py | py | 2,208 | python | fr | code | 1 | github-code | 13 |
32391659347 | import numpy as np
execfile("trustRegionMethod.py")
execfile("doglegMethod.py")
f = lambda x: (x[0]**2 + x[1] - 11)**2 + (x[0] + x[1]**2 - 7)**2
def gradf(x):
dx0 = 4*(x[0]**3 + x[0]*(x[1] - 11)) + 2*(x[0] + x[1]**2 - 7)
dx1 = 2*(x[0]**2 + x[1] - 11) + 4*(x[1]**3 + x[1]*(x[0] - 7))
return np.array([dx0, dx1])
def hessianf(x):
dx0x0 = 12*x[0]**2 + 4*x[1] - 42
dx0x1 = 4*x[0] + 4*x[1]
dx1x1 = 12*x[1]**2 + 3*x[0] - 26
return np.array([[dx0x0, dx0x1],[dx0x1, dx1x1]])
def plotResults(f, x0Range, x1Range, solList, title):
minX0 = x0Range[0]
maxX0 = x0Range[1]
minX1 = x1Range[0]
maxX1 = x1Range[1]
meshSize = 100
x0list = np.linspace(minX0, maxX0, meshSize)
x1list = np.linspace(minX1, maxX1, meshSize)
X0, X1 = np.meshgrid(x0list, x1list)
Z = np.zeros([meshSize,meshSize])
for i in range(meshSize):
for j in range(meshSize):
Z[i,j] = f([X0[i,j], X1[i,j]])
plt.figure()
levels = [10, 40, 80, 160, 320, 640]
contour = plt.contour(X0, X1, Z, levels, colors='k')
for i in range(len(solList)):
plt.plot(solList[i][:,0], solList[i][:,1], '-k')
plt.plot(solList[i][:,0], solList[i][:,1], 'ko')
plt.clabel(contour, colors='k', fmt='%2.1f', fontsize=12)
plt.xlabel('x')
plt.ylabel('y')
plt.title(title)
plt.show()
maxDelta = 2
Delta0 = 1
eta = .2
TOL = 1e-10
MaxIter = 100
x0 = [5.5, 5.5]
sol0 = trustRegionMethod(f, gradf, hessianf, doglegMethod, x0, maxDelta, Delta0, eta, TOL, MaxIter)
x0 = [-5.5, 5.5]
sol1 = trustRegionMethod(f, gradf, hessianf, doglegMethod, x0, maxDelta, Delta0, eta, TOL, MaxIter)
x0 = [5.5, -5.5]
sol2 = trustRegionMethod(f, gradf, hessianf, doglegMethod, x0, maxDelta, Delta0, eta, TOL, MaxIter)
x0 = [-5.5, -5.5]
sol3 = trustRegionMethod(f, gradf, hessianf, doglegMethod, x0, maxDelta, Delta0, eta, TOL, MaxIter)
solList = [sol0, sol1, sol2, sol3]
plotResults(f, [-6, 6], [-6, 6], solList, "")
| caleblogemann/MATH565ContinuousOptimization | Homework/midtermExam_2.py | midtermExam_2.py | py | 1,962 | python | en | code | 0 | github-code | 13 |
3093715229 | from ...constants import *
def draw(chart, canvas):
text_width = canvas.stringWidth(chart['title'],
"Helvetica", 24)
text_height = 24 * 1.2
left = CHART_WIDTH/2 - text_width/2
bottom = CHART_HEIGHT - TITLE_HEIGHT/2 + text_height/2
canvas.setFont("Helvetica", 24)
canvas.setFillColorRGB(0.25, 0.25, 0.625)
canvas.drawString(left, bottom, chart['title'])
| chiraag-kakar/ckstats | build/lib/ckstats/renderers/pdf/title.py | title.py | py | 427 | python | en | code | 2 | github-code | 13 |
35658564815 | #!/usr/bin/python3
import sys
import requests
if len(sys.argv) < 2:
print(sys.argv[0] + ": <url>")
sys.exit(1)
headers = {'Referer': 'http://www.peter-lustig.com'}
r = requests.get(sys.argv[1], data=headers)
print(r.content)
| balle/python-network-hacks | referer-spoofing.py | referer-spoofing.py | py | 237 | python | en | code | 135 | github-code | 13 |
74468147857 | '''
This class serves as a third class
to create a dataset for the neural network
to train on.
'''
import numpy as np
import os
from angle_utils import get_angles
from openpose_data_for_single_image import get_single_image_data
import cv2
#Creates the two files (train input / labels) that will be used
def create_train_data():
#Using same location as feature-based classifier
input_dir = '../gesture-by-feature-classifier/Dataset'
#Input training files that will be generated for the neural network
output_fileAngles = 'train_angles.npy'
output_fileLabels = 'train_labels.npy'
#Arrays that will be converted to numpy format for training data.
angle_list = []
outputs = []
#Filling the angle_list (training angles array) and the output labels with data
i = 0
for label in os.listdir(input_dir):
for image in os.listdir(input_dir + '/' + label + '/'):
path = input_dir + '/' + label + '/' + image
outputs.append(i)
angle_list.append(get_angles(get_single_image_data(path)))
i += 1
#Convert arrays into numpy format and save them into files.
angle_list = np.array(angle_list)
np.save(output_fileAngles,angle_list)
outputs = np.array(outputs)
np.save(output_fileLabels,outputs)
create_train_data() | AnshKetchum/gesture-classifier | gesture-by-angle-classifier/input_creator.py | input_creator.py | py | 1,321 | python | en | code | 0 | github-code | 13 |
71351745618 | import asyncio
import aiohttp
import pickle
import csv
from bs4 import BeautifulSoup
import re
import argparse
import sys
import getpass
import time
def parse_arguments():
parser = argparse.ArgumentParser(
description=(
'Descarga las paginas [START, FINISH) del foro de la facultad.\n'
'El tamanno default del batch es 10, tener cuidado con este parametro '
'porque hacerlo muy grande puede hacer que bloqueen la cuenta.\n'
'Leer el readme para una descripcion mas detrallada de uso y requisitos.'
'Los archivos de salida se generan automaticamente y se llaman root_START-FINISH.tsv'
'y child_START-FINISH.tsv'
)
)
parser.add_argument("start", metavar="START", help="primera pagina que se quiere bajar",
type=int)
parser.add_argument("finish", metavar="FINISH", help="ultima pagina que se quiere bajar",
type=int)
parser.add_argument("-b", "--batch_size", default=10, help="cantidad de paginas que se bajan a la vez, default 10",
type=int)
parser.add_argument("-l", "--login_data", help="un pickle con los datos del usuario para realizar la conexion, si se omite el script pide login")
args = parser.parse_args()
return args
def extract_data(raw_html):
"""
Esta wea devuelve un diccionario y una lista. El diccionario tiene las weas
que vamos a guardar del OP y la lista contiene diccionarios con la info
que vamos a guardar en cada comentario hijo de la publicacion
"""
soup = BeautifulSoup(re.sub(r'>\s+<', '><', raw_html), features='html5lib')
# para el OP
raices = soup.find_all('div', class_='raiz')
roots = []
for raiz in raices:
temp = {}
temp['id'] = raiz.attrs['id'].split('_')[1]
temp['titulo'] = raiz.h1.getText(strip=True)
temp['autor'] = (
raiz.find('a', class_='usuario').getText(strip=True)
if raiz.find('a', class_='usuario') is not None
else "NO_AUTHOR"
)
temp['fecha'] = raiz.find('li', class_='fecha').getText(strip=True)
temp['tema'] = raiz.find('li', class_='tema').a.getText(strip=True)
# para sacar el texto de un comentario hay que eliminar la lista
# de botones que tiene al final, como responder, padre, etc.
comentario = raiz.find('div', class_='texto')
# cuidado que esto modifica la sopa, el ul se borra definitivamente
comentario.ul.decompose()
text = ' '.join(comentario.stripped_strings)
temp['mensaje'] = text if len(text) > 0 else 'NO_TEXT'
temp['current_time'] = time.time()
roots.append(temp)
hijos = soup.find_all('div', class_='hijo')
childs = []
for hijo in hijos:
temp = {}
temp['id'] = hijo.attrs['id'].split('_')[1]
temp['id_th'] = hijo.attrs['class'][1][1:]
temp['id_p'] = hijo.parent.attrs['id'].split('_')[1]
temp['autor'] = (
hijo.find('a', class_='usuario').getText(strip=True)
if hijo.find('a', class_='usuario') is not None
else "NO_AUTHOR"
)
temp['fecha'] = hijo.find('em').getText(strip=True)
# mismos comentarios que arriba
comentario = hijo.find('div', class_='texto')
comentario.ul.decompose()
text = ' '.join(comentario.stripped_strings)
temp['mensaje'] = text if len(text) > 0 else 'NO_TEXT'
temp['current_time'] = time.time()
childs.append(temp)
return roots, childs
# async def fetch(session, url):
# async with session.get(url) as response:
# return await response.text()
async def download_page(session, url, root_writer, child_writer):
"""
Esta funcion recibe la sesion (que deberia estar logueada), la url y
una wea pa escribir en un archivo, baja la pagina y la escribe en el archivo.
PUM que sorpresa, no me lo esperaba.
"""
async with session.get(url) as response:
# por ahora voy a probar solo con example.com y me se donde esta el texto
# print(f'\t{url}')
roots, childs = extract_data(await response.text())
for root in roots:
root_writer.writerow(root)
for child in childs:
child_writer.writerow(child)
async def download_batch(session, batch, root_writer, child_writer):
tasks = []
for i, url in enumerate(batch):
if i is 0:
print(f'\tPrimera url del batch: {url}')
task = asyncio.ensure_future(
download_page(session, url, root_writer, child_writer)
)
tasks.append(task)
await asyncio.gather(*tasks)
async def download_all(batches, root_writer, child_writer, login_data):
async with aiohttp.ClientSession() as session:
# conectar a cuenta de ucursos aqui, si no se pasa un archivo
# el script pide login
# tengo mis datos escondidos, porque obvio
if login_data:
with open('user_data.pic', 'rb') as f:
payload = pickle.load(f)
else:
payload = {}
payload['username'] = input('Nombre de usuario: ')
payload['password'] = getpass.getpass('Contrasenna (tranqui no se muestra): ')
# es importante agregarle esto a la wea que se envia pa poder loguearse
payload['servicio'] = 'ucursos'
# payload['debug'] = 0
# esta wea es a logearse con el usuario de cada uno y mantener la sesion
# abierta pa poder seguir SURFEANDO ucursos
post_url = 'https://www.u-cursos.cl/upasaporte/adi'
async with session.post(post_url, data=payload) as resp:
print(f"Hola, {payload['username'].split('.')[0].capitalize()} !")
print('Respuesta login: ', resp.status)
print()
assert resp.status == 200, 'diablos, deberia ser 200'
for i, batch in enumerate(batches):
print(f'Descargando batch {i}')
await download_batch(session, batch, root_writer, child_writer)
if __name__ == '__main__':
args = parse_arguments()
# print(args)
# sys.exit()
# N es la cantidad de paginas que se quiere descargar (el ultimo offset)
N = args.finish - args.start
# M es la cantidad de requests que se quieren hacer de una
# WARNING: CUIDADO CON HACER ESTO MUY GRANDE, PUEDE QUEDAR LA CAGADA
M = args.batch_size
print(f'Cantidad total de requests: {N}')
print(f'Cantidad de requests a la vez: {M}')
print(f'Numero de batches: {(N + M - 1) // M}')
print(f'\nAfirmense cabros...\n')
# url base, los parentesis son pa puro quede mas bonito el codigo
base_url = (
'https://www.u-cursos.cl/ingenieria/2/foro_institucion/'
'?id_tema=&offset={}'
)
# base_url = 'https://example.com/{}'
# esta wea vuelve un generator pa todas las url que queremos descargar,
# si fuera un lista normal pesaria como 100kb lo que no es mucho pero
# igual es sacrilegio
batches = (
(
base_url.format(args.start + j)
for j
in range(
i * M,
(i + 1) * M if (i + 1) * M < N else N
)
)
for i
in range((N + M - 1) // M)
)
# ahora empieza el mambo con I/O
with open(f'root_{args.start}-{args.finish}.tsv', 'w') as f_root,\
open(f'child_{args.start}-{args.finish}.tsv', 'w') as f_child:
root_fields = ['id', 'titulo', 'autor', 'fecha', 'tema', 'mensaje', 'current_time']
root_writer = csv.DictWriter(
f_root,
fieldnames=root_fields,
delimiter='\t'
)
# mejor no escribir el header, para que sea mas facil unir
# los archivos usando cat
# root_writer.writeheader()
child_fields = ['id', 'id_th', 'id_p', 'autor', 'fecha', 'mensaje', 'current_time']
child_writer = csv.DictWriter(
f_child,
fieldnames=child_fields,
delimiter='\t'
)
# mismo comentario de mas arriba
# child_writer.writeheader()
asyncio.get_event_loop().run_until_complete(
download_all(batches, root_writer, child_writer, args.login_data)
)
print()
print("Creo que termine, igual revisa que la cantidad de comentarios descargados tenga sentido")
| cc5212/2019-ustalker | intento-de-scrapper/scraper.py | scraper.py | py | 7,788 | python | es | code | 3 | github-code | 13 |
15508700180 | '''
Created on Mar 17, 2014
@author: corwin
'''
import sys
#from PyQt4.QtCore import Qt, QSize
#from PyQt4.QtGui import QApplication, QMainWindow, QWidget, QPainter, QImage, QColor
import PIL.Image
from collections import namedtuple
import math
class BoxGeometry(namedtuple('BoxGeometry', ['x', 'y', 'w', 'h'])):
@classmethod
def parse(cls, spec):
xy, wh = spec.split(':')
x, y = map(int, xy.split(','))
w, h = map(int, wh.split('x'))
return BoxGeometry(x, y, w, h)
class CrosswordBitmap(object):
def __init__(self, ncols, nrows,
box_geom, png_filename="../../output.png"):
self.ncols, self.nrows = ncols, nrows
self.png = PIL.Image.open(png_filename)
if box_geom:
self.box = box_geom
else:
self.box = BoxGeometry(0, 0, self.png.width, self.png.height)
def raster(self, fb):
for x in xrange(self.w):
for y in range(self.h):
c = self._near(x, y) * 4
fb.setPixel(x, y, QColor(c, c, c).rgb())
def detect_grid(self):
nrows, ncols = self.nrows, self.ncols
cw = [[0] * ncols for _ in range(nrows)]
w = self.box.w
h = self.box.h
for row in range(nrows):
for col in range(ncols):
x, y = self._trasnform_xy((col * w + w/2) // ncols,
(row * h + h/2) // nrows)
v = max(self._bright(self.png.getpixel((x+dx, y+dy)))
for dx in [-1,0,1]
for dy in [-1,0,1])
#print(row,col,x,y,v)
is_white = v > 0.8
cw[row][col] = " " if is_white else "X"
return CrosswordGrid(cw)
def _trasnform_xy(self, x, y):
return (x + self.box.x, y + self.box.y)
def _near(self, x, y):
x, y = self._trasnform_xy(x, y)
if self._bright(self.png.getpixel((x, y))) > 0.8: return 255/4
return 0
#return int(255*self._bright(self.png.pixel(x, y)))
w, h = self.png.width(), self.png.height()
dx, dy = 0, 0
while x+dx < w and self._bright(self.png.getpixel((x + dx, y))) > 0.8 and \
y+dy < h and self._bright(self.png.getpixel((x, y + dy))) > 0.8 and dx < 55:
dx += 1 ; dy += 1
return dx
def _bright(self, qc):
qc = [x/255. for x in qc] # normalize
if len(qc) == 4: # contains alpha
r, g, b, _ = qc #QColor(qc).getRgbF()
else:
r, g, b = qc #QColor(qc).getRgbF()
return math.sqrt((r*r + g*g + b*b) / 3.)
class CrosswordGrid(object):
def __init__(self, cw):
self.cw = cw
def renumber(self):
cw = self.cw
in_range = lambda row, col: 0 <= row < len(cw) and 0 <= col < len(cw[row])
is_vacant = lambda row, col: in_range(row, col) and cw[row][col] == " "
numbered = []
for i, row in enumerate(cw):
for j, cell in reversed(list(enumerate(row))): # @UnusedVariable
if not is_vacant(i, j+1) and is_vacant(i, j) and is_vacant(i, j-1):
numbered.append((i, j))
elif not is_vacant(i-1, j) and is_vacant(i, j) and is_vacant(i+1, j):
numbered.append((i, j))
for idx, (i, j) in enumerate(numbered): cw[i][j] = str(idx+1)
def output_json(self, out=sys.stdout):
cw = self.cw
for r in cw:
print("#", " ".join("%2s" % x for x in r), file=out)
print("{", file=out)
print(' "$": {"nrows": %d, "ncols": %d},' % (len(cw), len(cw[0])), file=out)
fmt = lambda cell: '"x"' if cell == 'X' else cell
for i, row in enumerate(cw):
if i > 0: print(",", file=out)
print(" %d: {" % (i+1), end=' ', file=out)
print(", ".join("%d: %s" % (j+1, fmt(cell)) for j, cell in enumerate(row)
if cell != " "), "}", end=' ', file=out)
print("}", file=out)
class Style(object):
Stroke = namedtuple('Stroke', 'style color')
Fill = namedtuple('Fill', 'style color')
Fill.__new__.__defaults__ = (None,)
def __init__(self):
self.stroke = self.Stroke('solid', 0)
self.fill = self.Fill('transparent')
def with_(self, **kw):
for k,v in kw.iteritems():
setattr(self, k, v)
return self
DEFAULT=None
Style.DEFAULT = Style()
def detect_grid_and_output_json(png_filename, json_filename=None, is_cropped=False):
size, margin_top = None, 0
wb = CrosswordBitmap(13, 13, size, size, margin_top, png_filename)
cw = wb.detect_grid()
cw.renumber()
out = open(json_filename, 'w') if json_filename else sys.stdout
cw.output_json(out)
if __name__ == '__main__':
import sys
import argparse
a = argparse.ArgumentParser()
a.add_argument("png-filename")
a.add_argument('--box', type=str, nargs='?', default=None)
a.add_argument('--geom', type=str, nargs='?', default='13')
a = a.parse_args()
# Parse geom
geom = a.geom.split('x')
if len(geom) == 2: w, h = map(int, geom)
elif len(geom) == 1: w = h = int(geom[0])
else: raise RuntimeError("invalid size '%s'" % a.geom)
# Parse box
if a.box:
box = BoxGeometry.parse(a.box)
else:
box = None
wb = CrosswordBitmap(w, h, box, png_filename=getattr(a, 'png-filename'))
cw = wb.detect_grid()
cw.renumber()
cw.output_json()
| corwin-of-amber/Web.Crossword | src/cropper/squares.py | squares.py | py | 5,659 | python | en | code | 0 | github-code | 13 |
251361701 | from dolfin import *
import numpy as np
import sympy as sm
import matplotlib.pyplot as plt
class MMS:
"""
Class for calculating source terms of the KNP-EMI system for given exact
solutions
"""
def __init__(self):
# define symbolic variables
self.x, self.y, self.t = sm.symbols('x[0] x[1] t')
def get_exact_solution(self):
# define manufactured (exact) solutions
x = self.x; y = self.y; t = self.t
# ---------------------------- Non-zero J_M ---------------------------- #
# sodium (Na) concentration
Na_i_e = 0.7 + 0.3*sm.sin(2*pi*x)*sm.sin(2*pi*y)*sm.exp(-t)
Na_e_e = 1.0 + 0.6*sm.sin(2*pi*x)*sm.sin(2*pi*y)*sm.exp(-t)
# potassium (K) concentration
K_i_e = 0.3 + 0.3*sm.sin(2*pi*x)*sm.sin(2*pi*y)*sm.exp(-t)
K_e_e = 1.0 + 0.2*sm.sin(2*pi*x)*sm.sin(2*pi*y)*sm.exp(-t)
# chloride (Cl) concentration
Cl_i_e = 1.0 + 0.6*sm.sin(2*pi*x)*sm.sin(2*pi*y)*sm.exp(-t)
Cl_e_e = 2.0 + 0.8*sm.sin(2*pi*x)*sm.sin(2*pi*y)*sm.exp(-t)
# potential - nonzero J_M
phi_i_e = sm.cos(2*pi*x)*sm.cos(2*pi*y)*(1 + sm.exp(-t))
phi_e_e = sm.cos(2*pi*x)*sm.cos(2*pi*y)
exact_solutions = {'Na_i_e':Na_i_e, 'K_i_e':K_i_e, 'Cl_i_e':Cl_i_e,\
'Na_e_e':Na_e_e, 'K_e_e':K_e_e, 'Cl_e_e':Cl_e_e,\
'phi_i_e':phi_i_e, 'phi_e_e':phi_e_e}
return exact_solutions
def get_MMS_terms_EMI(self, time):
x = self.x; y = self.y; t = self.t
# get manufactured solution
exact_solutions = self.get_exact_solution()
# unwrap exact solutions
for key in exact_solutions:
# exec() changed from python2 to python3
exec('global %s; %s = exact_solutions["%s"]' % (key, key ,key))
# --------------------- Calculate components ------------------------ #
# gradients
grad_phii, grad_phie = [np.array([sm.diff(foo, x), sm.diff(foo, y)])
for foo in (phi_i_e, phi_e_e)]
# membrane potential
phi_M_e = phi_i_e - phi_e_e
# current defined intracellular: - grad(phi_i) dot i_normals [(-1, 0), (1, 0), (0, -1), (0, 1)]
JMe_i = [grad_phii[0], - grad_phii[0], grad_phii[1], - grad_phii[1]]
# current defined extracellular: grad(phi_e) dot e_normals [(1, 0), (-1, 0), (0, 1), (0, -1)]
JMe_e = [grad_phie[0], - grad_phie[0], grad_phie[1], - grad_phie[1]]
# setup subdomain for internal and external facets - normals (1, 0), (1, 0), (0, 1), (0, 1)
subdomains_MMS = [('near(x[0], 0)', 'near(x[0], 1)', 'near(x[1], 0)', 'near(x[1], 1)'),
('near(x[0], 0.25)', 'near(x[0], 0.75)', 'near(x[1], 0.25)', 'near(x[1], 0.75)')]
# --------------------- Calculate source terms ---------------------- #
# equations for potentials: fE = - F sum(z_k*div(J_k_r)
f_phi_i = - (sm.diff(grad_phii[0], x) + sm.diff(grad_phii[1], y))
f_phi_e = - (sm.diff(grad_phie[0], x) + sm.diff(grad_phie[1], y))
# equation for phi_M: f = C_M*d(phi_M)/dt - (I_M - I_ch)
fJM = [sm.diff(phi_M_e, t) - foo for foo in JMe_i]
# coupling condition for I_M: - grad(ui)*n_i = grad(ue)*n_e + g
fgM = [i - e for i,e in zip(JMe_i, JMe_e)]
# --------------------- Convert to expressions ---------------------- #
# exact solutions
phii_e, phie_e, phiM_e = [Expression(sm.printing.ccode(foo), t=time, degree=4)
for foo in (phi_i_e, phi_e_e, phi_M_e)]
# exact membrane flux
JM_e = [Expression(sm.printing.ccode(foo), t=time, degree=4) for foo in JMe_i]
# source terms
f_phii, f_phie = [Expression(sm.printing.ccode(foo), t=time, degree=4)
for foo in (f_phi_i, f_phi_e)]
# source term membrane flux
f_JM = [Expression(sm.printing.ccode(foo), t=time, degree=4) for foo in fJM]
f_gM = [Expression(sm.printing.ccode(foo), t=time, degree=4) for foo in fgM]
# initial conditions
#init_phiM = Expression(sm.printing.ccode(phi_M_e), t=0.0, degree=4)
init_phiM = Expression(sm.printing.ccode(phiM_e), t=0.0, degree=4)
# exterior boundary terms
J_e = Expression((sm.printing.ccode(grad_phie[0]), \
sm.printing.ccode(grad_phie[1])), t=time, degree=4)
# --------------------- Gather expressions -------------------------- #
# exact solutions
exact_sols = {'phi_i_e':phii_e, 'phi_e_e':phie_e, 'phi_M_e':phiM_e, 'J_M_e':JM_e}
# source terms
src_terms = {'f_phi_i':f_phii, 'f_phi_e':f_phie, 'f_J_M':f_JM, 'f_g_M':f_gM}
# initial conditions
init_conds = {'phi_M':init_phiM}
# boundary terms
bndry_terms = {'J_e':J_e}
return src_terms, exact_sols, init_conds, bndry_terms, subdomains_MMS
def get_MMS_terms_KNPEMI(self, time):
x = self.x; y = self.y; t = self.t
# get manufactured solution
exact_solutions = self.get_exact_solution()
# unwrap exact solutions
for key in exact_solutions:
# exec() changed from python2 to python3
exec('global %s; %s = exact_solutions["%s"]' % (key, key ,key))
# --------------------- Calculate components ------------------------ #
# gradients
grad_Nai, grad_Ki, grad_Cli, grad_phii, grad_Nae, grad_Ke, grad_Cle, grad_phie = \
[np.array([sm.diff(foo, x), sm.diff(foo, y)])
for foo in (Na_i_e, K_i_e, Cl_i_e, phi_i_e, Na_e_e, K_e_e, Cl_e_e, phi_e_e)]
# compartmental fluxes
J_Na_i = - grad_Nai - Na_i_e*grad_phii
J_Na_e = - grad_Nae - Na_e_e*grad_phie
J_K_i = - grad_Ki - K_i_e*grad_phii
J_K_e = - grad_Ke - K_e_e*grad_phie
J_Cl_i = - grad_Cli + Cl_i_e*grad_phii
J_Cl_e = - grad_Cle + Cl_e_e*grad_phie
# membrane potential
phi_M_e = phi_i_e - phi_e_e
# membrane flux defined intracellularly
total_flux_i = - (J_Na_i + J_K_i - J_Cl_i)
# current defined intracellular: - total_flux_i dot i_normals [(-1, 0), (1, 0), (0, -1), (0, 1)]
JMe_i = [- total_flux_i[0], total_flux_i[0], - total_flux_i[1], total_flux_i[1]]
# membrane flux defined extracellularly
total_flux_e = J_Na_e + J_K_e - J_Cl_e
# current defined intracellular: total_flux_e dot e_normals [(1, 0), (-1, 0), (0, 1), (0, -1)]
JMe_e = [total_flux_e[0], - total_flux_e[0], total_flux_e[1], - total_flux_e[1]]
# ion channel currents
I_ch_Na = phi_M_e # Na
I_ch_K = phi_M_e # K
I_ch_Cl = phi_M_e # Cl
I_ch = I_ch_Na + I_ch_K + I_ch_Cl # total
# setup subdomain for internal and external facets - normals (1, 0), (1, 0), (0, 1), (0, 1)
subdomains_MMS = [('near(x[0], 0)', 'near(x[0], 1)', 'near(x[1], 0)', 'near(x[1], 1)'),
('near(x[0], 0.25)', 'near(x[0], 0.75)', 'near(x[1], 0.25)', 'near(x[1], 0.75)')]
# --------------------- Calculate source terms ---------------------- #
# equations for ion cons: f = dk_r/dt + div (J_kr)
f_Na_i = sm.diff(Na_i_e, t) + sm.diff(J_Na_i[0], x) + sm.diff(J_Na_i[1], y)
f_Na_e = sm.diff(Na_e_e, t) + sm.diff(J_Na_e[0], x) + sm.diff(J_Na_e[1], y)
f_K_i = sm.diff(K_i_e, t) + sm.diff(J_K_i[0], x) + sm.diff(J_K_i[1], y)
f_K_e = sm.diff(K_e_e, t) + sm.diff(J_K_e[0], x) + sm.diff(J_K_e[1], y)
f_Cl_i = sm.diff(Cl_i_e, t) + sm.diff(J_Cl_i[0], x) + sm.diff(J_Cl_i[1], y)
f_Cl_e = sm.diff(Cl_e_e, t) + sm.diff(J_Cl_e[0], x) + sm.diff(J_Cl_e[1], y)
# equations for potentials: fE = - F sum(z_k*div(J_k_r)
f_phi_i = - ((sm.diff(J_Na_i[0], x) + sm.diff(J_Na_i[1], y))
+ (sm.diff(J_K_i[0], x) + sm.diff(J_K_i[1], y))
- (sm.diff(J_Cl_i[0], x) + sm.diff(J_Cl_i[1], y)))
f_phi_e = - ((sm.diff(J_Na_e[0], x) + sm.diff(J_Na_e[1], y))
+ (sm.diff(J_K_e[0], x) + sm.diff(J_K_e[1], y))
- (sm.diff(J_Cl_e[0], x) + sm.diff(J_Cl_e[1], y)))
# equation for phi_M: f = C_M*d(phi_M)/dt - (I_M - I_ch)
fJM = [sm.diff(phi_M_e, t) + I_ch - foo for foo in JMe_i]
# coupling condition for I_M: - total_flux_i*n_i = total_flux_e*n_e + g
fgM = [i - e for i,e in zip(JMe_i, JMe_e)]
# --------------------- Convert to expressions ---------------------- #
# exact solutions
Nai_e, Nae_e, Ki_e, Ke_e, Cli_e, Cle_e, phii_e, phie_e, phiM_e = \
[Expression(sm.printing.ccode(foo), t=time, degree=4)
for foo in (Na_i_e, Na_e_e, K_i_e, K_e_e, Cl_i_e, Cl_e_e, phi_i_e, phi_e_e, phi_M_e)]
# exact membrane flux
JM_e = [Expression(sm.printing.ccode(foo), t=time, degree=4) for foo in JMe_i]
# source terms
f_Nai, f_Nae, f_Ki, f_Ke, f_Cli, f_Cle, f_phii, f_phie = \
[Expression(sm.printing.ccode(foo), t=time, degree=4)
for foo in (f_Na_i, f_Na_e, f_K_i, f_K_e, f_Cl_i, f_Cl_e, f_phi_i, f_phi_e)]
# source term membrane flux
f_JM = [Expression(sm.printing.ccode(foo), t=time, degree=4) for foo in fJM]
# source term continuity coupling condition on gamma
f_gM = [Expression(sm.printing.ccode(foo), t=time, degree=4) for foo in fgM]
# initial conditions concentrations
init_Nai, init_Nae, init_Ki, init_Ke, init_Cli, init_Cle, init_phiM = \
[Expression(sm.printing.ccode(foo), t=time, degree=4)
for foo in (Na_i_e, Na_e_e, K_i_e, K_e_e, Cl_i_e, Cl_e_e, phi_M_e)]
# exterior boundary terms
J_Nae, J_Ke, J_Cle = [Expression((sm.printing.ccode(foo[0]),
sm.printing.ccode(foo[1])), t=time, degree=4)
for foo in (J_Na_e, J_K_e, J_Cl_e)]
# ion channel currents
I_ch_Na, I_ch_K, I_ch_Cl = \
[Expression(sm.printing.ccode(foo), t=time, degree=4)
for foo in (I_ch_Na, I_ch_K, I_ch_Cl)]
# --------------------- Gather expressions -------------------------- #
# exact solutions
exact_sols = {'Na_i_e':Nai_e, 'K_i_e':Ki_e, 'Cl_i_e':Cli_e,
'Na_e_e':Nae_e, 'K_e_e':Ke_e, 'Cl_e_e':Cle_e,
'phi_i_e':phii_e, 'phi_e_e':phie_e, 'phi_M_e':phiM_e,
'J_M_e':JM_e, 'I_ch_Na':I_ch_Na, 'I_ch_K':I_ch_K,
'I_ch_Cl':I_ch_Cl}
# source terms
src_terms = {'f_Na_i':f_Nai, 'f_K_i':f_Ki, 'f_Cl_i':f_Cli,
'f_Na_e':f_Nae, 'f_K_e':f_Ke, 'f_Cl_e':f_Cle,
'f_phi_i':f_phii, 'f_phi_e':f_phie, 'f_J_M':f_JM,
'f_g_M':f_gM}
# initial conditions
init_conds = {'Na_i':init_Nai, 'K_i':init_Ki, 'Cl_i':init_Cli,
'Na_e':init_Nae, 'K_e':init_Ke, 'Cl_e':init_Cle,
'phi_M':init_phiM}
# boundary terms
bndry_terms = {'J_Na_e':J_Nae, 'J_K_e':J_Ke, 'J_Cl_e':J_Cle}
return src_terms, exact_sols, init_conds, bndry_terms, subdomains_MMS
| cdaversin/mixed-dimensional-examples | KNPEMI/KNPEMI_MMS.py | KNPEMI_MMS.py | py | 11,362 | python | en | code | 3 | github-code | 13 |
71304613137 | from pacotes.Contato import Contato
from pacotes.ListaEncadeada import ListaEncadeada
from pacotes.Fila import Fila
from pacotes.Pilha import Pilha
l1 = ListaEncadeada()
l2 = Fila()
l3 = Pilha()
c1 = Contato()
c1.nome = "Kelvin"
l1.adicionar(c1)
l2.adicionar(c1)
l3.adicionar(c1)
c2 = Contato()
c2.nome = "Richardson"
l1.adicionar(c2)
l2.adicionar(c2)
l3.adicionar(c2)
c3 = Contato()
c3.nome = "Erick"
l1.adicionar(c3)
l2.adicionar(c3)
l3.adicionar(c3)
c4 = Contato()
c4.nome = "Joaquim"
l1.adicionar(c4)
l2.adicionar(c4)
l3.adicionar(c4)
l1.exibir() | rich4rds0n/EstruturaDeDados | main.py | main.py | py | 556 | python | pt | code | 0 | github-code | 13 |
3131238145 | import pandas as pd
import numpy as np
import time
import re
import sys
def clean(p1, p2):
"""
CARGA DE DATOS
Si los datos los obtuviésemos de un recurso remoto, podríamos
leerlos con wget con las siguientes órdenes:
import wget
url = 'https://path/to/file'
filename = wget.download(url)
Para simplificar supondremos que ya los tenemos en local.
"""
# Carga de datos
dfm = pd.read_csv(p1)
dfr = pd.read_csv(p2)
"""
TRATAMIENTO VALORES PERDIDOS
"""
# Eliminar duplicados
dfm = dfm.drop_duplicates()
dfr = dfr.drop_duplicates()
# Valores perdidos dfm
dfm = dfm[np.isreal(dfm['movieId'])]
dfm = dfm.dropna(subset=['movieId'])
def regex_filter(val):
regex = "[a-zA-Z \-]+(\|[a-zA-Z \-]+)*$"
if val and re.fullmatch(regex, val):
return True
return False
dfm = dfm[dfm['genres'].apply(regex_filter)]
# Valores perdidos dfr
dfr = dfr.dropna(subset=['movieId'])
dfr = dfr[np.isreal(dfr['movieId'])]
dfr = dfr.dropna(subset=['rating'])
dfr = dfr[(np.isreal(dfr['rating'])) &
(dfr['rating'].isin(np.arange(0, 5.5, 0.5)))]
dfr = dfr.dropna(subset=['timestamp'])
dfr = dfr[(np.isreal(dfr['timestamp'])) &
(dfr['timestamp'] >= 0) &
(dfr['timestamp'] <= time.time())]
"""
GUARDAR DATOS ACTUALIZADOS
"""
dfm.to_csv("movies_procesadas.csv", index = False)
dfr.to_csv("ratings_procesados.csv", index = False)
if __name__ == '__main__':
clean(sys.argv[1], sys.argv[2])
| Ludvins/MCD_Practicas_GD | limpieza.py | limpieza.py | py | 1,592 | python | es | code | 0 | github-code | 13 |
74443902736 | #Implementing a stack
class Stack:
def __init__(self):
self.stack = list()
def push(self, data):
#Checking if entry exists
if data not in self.stack:
self.stack.append(data)
return True
else:
print("Duplicate Entry")
return None
#Removing the Top element of the Stack
def pop(self):
if len(self.stack) <= 0:
print("Stack is Empty")
return None
return self.stack.pop()
def printStack(self):
if len(self.stack) <= 0:
print("Stack Empty")
return None
print("\nElements of the Stack are\n")
for i in self.stack:
print("{} ".format(i))
return None
stack = Stack()
stack.push(4)
stack.push(7)
stack.push(8)
stack.push(9)
stack.push(10)
stack.push(10)
stack.pop()
stack.printStack()
| hashbanger/Python_Advance_and_DS | DataStructures/Traditional/Stack.py | Stack.py | py | 930 | python | en | code | 0 | github-code | 13 |
30073013846 | from plyer import notification
import requests
from bs4 import BeautifulSoup
import time
def notifyMe(title, message):
notification.notify(
title=title,
message=message,
app_icon="C:\\Users\\dhira\\Desktop\\notification\\corona.ico",
timeout=10
)
def getData(url):
r = requests.get(url)
return r.text
if __name__ == "__main__":
# notifyMe("Dexzter", "Lets stop the spread of covid-19")
while True:
myHtmlData = getData('https://www.mohfw.gov.in/')
soup = BeautifulSoup(myHtmlData, 'html.parser')
# print(soup.prettify())
myDataStr = ""
for tr in soup.find_all('tbody')[7].find_all('tr'):
myDataStr += tr.get_text()
myDataStr = myDataStr[1:]
itemList = (myDataStr.split("\n\n"))
states = ["Delhi", "Bihar", "West Bengal"]
for item in itemList[0:24]:
dataList = item.split('\n')
if dataList[1] in states:
print(dataList)
nTitle = 'Cases of COVID-19'
nText = f"{dataList[1]}\n Total Cases: {int(dataList[2]) + int(dataList[3])}\n Cured: {dataList[4]} Deaths: {dataList[5]}"
notifyMe(nTitle, nText)
time.sleep(2)
time.sleep(3600)
| dexzter07/notification-on-covid-19 | main.py | main.py | py | 1,287 | python | en | code | 0 | github-code | 13 |
12918622920 | import os
from numericalFunctions import pointwiseXY_C
if( 'CHECKOPTIONS' in os.environ ) :
options = os.environ['CHECKOPTIONS'].split( )
if( '-e' in options ) : print( __file__ )
CPATH = '../../../../Test/UnitTesting/integrate'
os.system( 'cd %s; make -s clean; ./integrationXY -v > v' % CPATH )
def skipBlankLines( ls ) :
i = 0
for i, l in enumerate( ls ) :
if( l.strip( ) != '' ) : break
ls = ls[i:]
if( ( len( ls ) == 1 ) and ( ls[0].strip( ) == '' ) ) : ls = []
return( ls )
def getIntegerValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = int( ls[0].split( '=' )[1] )
return( ls[1:], value )
def getDoubleValue( name, ls ) :
s = "# %s = " % name
n = len( s )
if( ls[0][:n] != s ) : raise Exception( '%s: missing %s info: "%s"' % ( __file__, name, ls[0][:-1] ) )
value = float( ls[0].split( '=' )[1] )
return( ls[1:], value )
def compareValues( label, i, v1, v2 ) :
sv1, sv2 = '%.12e' % v1, '%.12e' % v2
sv1, sv2 = '%.7e' % float( sv1 ), '%.7e' % float( sv2 )
if( sv1 != sv2 ) : print( '<%s> <%s>' % ( sv1, sv2 ) )
if( sv1 != sv2 ) : raise Exception( '%s: values %e and %e diff by %e at %d for label = %s' % ( __file__, v1, v2, v2 - v1, i, label ) )
def getXYData( ls ) :
ls, length = getIntegerValue( 'length', ls )
data = [ list( map( float, ls[i].split( ) ) ) for i in range( length ) ]
data = pointwiseXY_C.pointwiseXY_C( data, initialSize = len( data ), overflowSize = 10 )
ls = ls[length:]
ls = skipBlankLines( ls )
return( ls, data )
def checkIntegration( count, xMin, xMax, data, normedData, sums ) :
V = data.integrate( xMin, xMax )
sum = float( sums.split( 'invSum' )[0].split( '=' )[1] )
compareValues( 'sum', count, V, sum )
invV = data.integrate( xMax, xMin )
if( V != -invV ) : raise Exception( '%s: at %d V = %g != -invV = %g' % ( __file__, count, V, invV ) )
norm = data.normalize( )
if( len( norm ) != len( normedData ) ) : raise Exception( '%s: at %d len( norm ) = %d != len( normedData ) = %d' % \
( __file__, count, len( norm ), len( normedData ) ) )
for i, xy in enumerate( norm ) :
compareValues( "x norm", count, xy[0], normedData[i][0] )
compareValues( "x norm", count, xy[1], normedData[i][1] )
v = norm.integrate( )
if( abs( v - 1. ) > 1e-14 ) : raise Exception( '%s: at %d norm = %e != 1.' % ( __file__, count, v ) )
f = open( os.path.join( CPATH, 'v' ) )
ls = f.readlines( )
f.close( )
count = 0
while( len( ls ) ) :
count += 1
if( count == 9 ) :
ls, dummy = getXYData( ls )
ls, dummy = getXYData( ls )
ls, xMin = getDoubleValue( 'xMin', ls )
ls, xMax = getDoubleValue( 'xMax', ls )
ls, data = getXYData( ls )
ls, sums = ls[1:], ls[0]
ls, normedData = getXYData( ls )
checkIntegration( count, xMin, xMax, data, normedData, sums )
| LLNL/gidiplus | numericalFunctions/ptwXY/Python/Test/UnitTesting/integrate/integrationXY.py | integrationXY.py | py | 3,023 | python | en | code | 10 | github-code | 13 |
5409590029 | # -*- coding: utf-8 -*-
import http.server
import threading
import webbrowser
import os
HOST = "0.0.0.0"
PORT = 8000
def run_server():
# Only share app folder
web_dir = os.path.join(os.path.dirname(__file__), 'app')
#print(web_dir)
os.chdir(web_dir)
Handler = http.server.SimpleHTTPRequestHandler
httpd = http.server.HTTPServer((HOST, PORT), Handler)
print("Serving HTTP on %s port %s ..." % (HOST,PORT))
httpd.serve_forever()
def launch_browser():
def open_browser_call():
# Open URL in a new tab, if a browser window is already open.
webbrowser.open_new_tab('http://localhost:8000/')
th = threading.Timer(interval=5, function=open_browser_call) # Wait 5 seconds
th.start()
if __name__ == "__main__":
launch_browser()
run_server()
| threemonkeybits/geometry-combat | Geometry_Combat.py | Geometry_Combat.py | py | 815 | python | en | code | 0 | github-code | 13 |
6565243813 | import os
import re
import time
import tensorflow as tf
from tensorflow.python.framework.ops import EagerTensor
from src.decoder import CaptionDecoder
from src.utils import prepare_image_for_model
class ModelManager:
"""
Class that orchestrates the usage of a model.
"""
def __init__(self, encoder, decoder, tokenizer, optimizer, config, saved_models_file_dir='saved_models'):
self.encoder = encoder
self.decoder = decoder
self.tokenizer = tokenizer
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
self.optimizer = optimizer
self.config = config
self.saved_models_file_dir = saved_models_file_dir
def loss_function(self, real_values, pred):
mask = tf.math.logical_not(tf.math.equal(real_values, 0))
loss_ = self.loss_object(real_values, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
loss_plot = []
@tf.function
def train_step(self, img_tensor, target) -> tuple:
"""
Function which performs one train step, from retrieving features from the encoder to generating texts with decoder.
It also calculates gradients and updates model's parameters with them.
:param img_tensor: Image from which texts are about to be generated
:param target: Tokenized target captions
:return: It outputs loss and total loss (loss divided by the len of the caption)
"""
loss = 0
# initializing the hidden state for each batch
# because the captions are not related from image to image
hidden = CaptionDecoder.reset_state(target.shape[0], self.decoder.units)
# initialize the batch of predictions with [[3],[3], ...] i.e. with start tokens
dec_input = tf.expand_dims([self.tokenizer(['starttoken'])[0][0]] * target.shape[0], 1)
with tf.GradientTape() as tape:
features = self.encoder(img_tensor)
for i in range(1, target.shape[1]):
# passing the features through the decoder
predictions, hidden, _ = self.decoder((dec_input, features, hidden))
loss += self.loss_function(target[:, i], predictions)
# using teacher forcing
dec_input = tf.expand_dims(target[:, i], 1)
total_loss = (loss / int(target.shape[1]))
trainable_variables = self.encoder.trainable_variables + self.decoder.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return loss, total_loss
def fit(self, batched_dataset) -> dict:
"""
A method that fits a passed batched dataset to the model. It iterates through data, performs a train_step method
(defined above) and calculates loss. Each 100 batches it checks whether save_model (based on self.save_model_check
method which checks its loss value). After each epoch, it's number, loss and time to complete is printed.
Additionally, it uses early stopping (threshold is set in the config file)
:param batched_dataset: Data to be fitted
"""
prev_epoch_loss = 999
history_dict = {"epochs": 0, "loss": []}
for epoch in range(self.config['epochs']):
start = time.time()
total_loss = 0
num_steps = 0
for batch, (img_tensor, target) in enumerate(batched_dataset):
batch_loss, t_loss = self.train_step(img_tensor, target)
total_loss += t_loss
num_steps += 1
if batch % 100 == 0:
scaled_batch_los = batch_loss.numpy() / int(target.shape[1])
print('Epoch {} Batch {} Loss {:.4f}'.format(
epoch + 1, batch, scaled_batch_los))
if self.save_model_check(scaled_batch_los):
self.save_model(scaled_batch_los)
current_loss = total_loss / num_steps
history_dict['epochs'] += 1
history_dict['loss'].append(current_loss.numpy())
print('Epoch {} Loss {:.6f} Time taken {:.1f} sec'.format(
epoch + 1,
current_loss,
time.time() - start))
# stop once it has converged
improvement = prev_epoch_loss - current_loss
if improvement < self.config['early_stop_thresh']:
print("Stopping because improvement={} < {}".format(improvement, self.config['early_stop_thresh']))
break
prev_epoch_loss = current_loss
return history_dict
def save_model(self, loss_value) -> None:
"""
Saves both encoder and decoder with regard to theirs corresponding losses.
:param loss_value: Loss value corresponding to the models that are about to be saved
"""
model_path = os.path.join(self.saved_models_file_dir, 'total_loss_' + str(round(loss_value, 3)))
model_path = model_path if model_path[0] != '/' else model_path[1:] # Permission problem
self.encoder.save(os.path.join(model_path, 'encoder'))
self.decoder.save(os.path.join(model_path, 'decoder'))
def save_model_check(self, loss_value) -> bool:
"""
Checks whether a model should be saved. It iterates through already saved models, checks whether the new loss
value is lower than the found ones and if so returns a boolean value that tells to save a model
:param loss_value: Loss value corresponding to the models that are about to be saved
:return: A boolean value describing whether saving can be performed
"""
try:
saved_models_paths = os.listdir(self.saved_models_file_dir)
except FileNotFoundError: # The provided pass does not exist (saved_models dir has not been created yet)
return True # Creates saved_models directory and saves model
saved_models_losses = []
# Iterates through found model paths and checks whether a new model should be saved
for saved_model_path in saved_models_paths:
# List of numbers found in the saved model path
saved_model_loss = re.findall(r"[-+]?(?:\d*\.\d+|\d+)", saved_model_path)
# Exclude any integers
saved_model_loss = [model_loss for model_loss in saved_model_loss if '.' in model_loss]
if saved_model_loss:
saved_models_losses.append(float(saved_model_loss[0])) # If loss is float, appends to the list
if saved_models_losses: # If any model already saved
return loss_value < min(saved_models_losses) # If current model is better than others
return True # No model was saved yet, so save the current one
def predict(self, data_source) -> list[list]:
"""
Method that predicts captions given data. Data_source can be either of a form of string (path to an image), or
a tensor in the shape of [no. images, width, height, no, channels]
:param data_source: Handled data sources: str (path to an image) tensor, or list of paths to which predictions
should be made
:return: List of lists (for each image one list)
"""
if type(data_source) == str:
generated_captions = self.predict_from_path(data_source)
elif type(data_source) == EagerTensor:
generated_captions = self.predict_from_tensor(data_source)
elif type(data_source) == list:
generated_captions = self.predict_form_paths_list(data_source)
else:
raise NotImplementedError(f"This data source: {data_source} is not yet implemented ")
return generated_captions
def predict_from_tensor(self, data) -> list[list]:
"""
Iterates through images in a batch and creates predictions from images which are already preprocessed, in the
form of tensor
:param data: Preprocessed images in the form of a tensor with shape [no. images, width, height, no.channels]
:return: List with captions for each image
"""
all_images_outcome = []
data = tf.expand_dims(data, 0) if len(data.shape) == 3 else data # If just one image is passed
for image in data:
image = tf.expand_dims(image, 0) if len(image.shape) == 3 else image # Add batch dim if needed
image_captions = self.predict_image(image)
all_images_outcome.append(image_captions)
return all_images_outcome
def predict_from_a_file(self, file_path) -> list[str]:
"""
First it reads and preprocessses a file with prepare_image_for_model method and the generates captions with
self.predict_image method
:param file_path: File path to an image to generate captions for
:return: List of generated captions for the passed image
"""
image = prepare_image_for_model(file_path)
captions = self.predict_image(image)
return captions
def predict_image(self, image) -> list[str]:
"""
Creates captions for an image
:param image: Image to generate captions for
:return: List of generated captions
"""
outcome = []
features = self.encoder(image)
hidden = CaptionDecoder.reset_state(1, self.decoder.units)
dec_input = tf.expand_dims([self.tokenizer(['starttoken'])[0][0]], 0)
for i in range(self.config['max_caption_len']):
predictions, hidden, _ = self.decoder((dec_input, features, hidden))
predicted_id = tf.random.categorical(predictions, 1)[0][0].numpy()
outcome.append(self.tokenizer.get_vocabulary()[predicted_id])
if self.tokenizer.get_vocabulary()[predicted_id] == 'endtoken':
break
dec_input = tf.expand_dims([predicted_id], 0)
return outcome
def predict_form_paths_list(self, paths_list:list) -> list[list]:
generated_captions = []
for file_path in paths_list:
captions_for_image = self.predict_from_a_file(file_path)
generated_captions.append(captions_for_image)
return generated_captions
def predict_from_path(self, path):
generated_captions = []
if os.path.isdir(path):
for file_path in os.listdir(path):
image_path = os.path.join(path, file_path)
one_file_captions = self.predict_from_a_file(image_path)
generated_captions.append(one_file_captions)
else:
generated_captions = self.predict_from_a_file(path)
return generated_captions
| Michalweg/Image_captioning | src/model_manager.py | model_manager.py | py | 10,854 | python | en | code | 0 | github-code | 13 |
7782261596 | import os
import unittest
import tempfile
import clustermgr
from clustermgr.models import LDAPServer
class ViewFunctionsTestCase(unittest.TestCase):
@classmethod
def setUpClass(self):
clustermgr.app.config.from_object('clustermgr.config.TestingConfig')
self.db_fd, clustermgr.app.config['DATABASE'] = tempfile.mkstemp()
clustermgr.app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
clustermgr.app.config['DATABASE']
self.app = clustermgr.app.test_client()
with clustermgr.app.app_context():
clustermgr.application.db.create_all()
@classmethod
def tearDownClass(self):
os.close(self.db_fd)
os.unlink(clustermgr.app.config['DATABASE'])
def xtest_01_add_server_adds_data_to_db(self):
server_count = LDAPServer.query.count()
self.app.post('/add_server/', data=dict(host='test.hostname.com',
port=1389, starttls=True, role='master', server_id=100,
replication_id=111), follow_redirects=True)
self.assertEqual(server_count+1, LDAPServer.query.count())
if __name__ == '__main__':
unittest.main()
| GuillaumeSmaha/cluster-mgr | tests/test_views.py | test_views.py | py | 1,174 | python | en | code | 0 | github-code | 13 |
27328732338 | import socket
import time
import GameWorld as gw
import tiles
import Player_Class as pc
from Screen import Screen
from MenuHandler import MenuHandler
import json
import threading
class Server():
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.clients = []
self.mainSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.mainSocket.bind((self.ip,self.port))
self.mainSocket.setblocking(0)
self.quitting = False
self.lock = threading.Lock()
def Start(self):
self.thread = threading.Thread(target = self.main)
self.thread.setDaemon(True)
self.thread.start()
def main(self):
while not self.quitting:
try:
data, addr = self.mainSocket.recvfrom(1024).decode()
self.handleData(data,addr)
jsonMessage = json.dumps({"data":data, "addr" : addr})
for client in self.clients:
if client != addr:
self.mainSocket.sendto(jsonMessage.encode(), client)
except:
pass
self.mainSocket.close()
def handleData(self,data,addr):
if addr not in self.clients:
self.clients.append(addr)
def sendData(self,data):
for client in self.clients:
self.mainSocket.sendto('data: ' + data + " addr: "+ self.ip, client)
def Close(self):
self.quitting = True
| synctax/Ascii-Arenas | Game Code/Server.py | Server.py | py | 1,237 | python | en | code | 0 | github-code | 13 |
28580254735 | #!/bin/python3
import subprocess
from queue import Queue
import time
from .pipelistener import PipeListener
'''
Interface EngineInterface: une instance de EngineInterface encapsule
une instance réelle du programme de go Engine avec des pipe Unix pour
écrire à son stdin et lire de son stdout et son stderr.
La classe a la méthode ask(cmd) qui permet d'envoyer une commande au
engine via son stdin, et récupère le output principal de son stdout
le output secondaire via son stderr.
La fonction retourne un tuple (contenu de stdout, contenu de stderr)
NOTE: <pipe>.readline() est un appel bloquant il faut donc utiliser
un thread séparé pour lire un nombre inconnu de lignes de stderr.
Engine output 2 lignes exactement sur STDOUT pour chaque commande.
on peut donc faire deux readline() par commande on sait qu'après ces
deux getline, le coup est fini. C'est ce que la fonction ask() fait.
Pour le output sur STDERR, c'est plus compliqué parce qu'on ne sait
pas combien de readline() faire. C'est la raison pour stderr_queue
et PipeListenerThread. De façon asynchrone, les lignes lues de stderr
sont mises dans stderr_queue.
Le main thread peut donc "vider stderr" de façon non-bloquante.
'''
class GTPWrapper(object):
def __init__(self, engine_cmd, stdout_queue=None, stderr_queue=None):
print("===Python : GTPWrapper : Starting {} ===".format(engine_cmd[0]))
self._engine = subprocess.Popen(
engine_cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1,
universal_newlines=True
)
self.stdout_queue = stdout_queue if stdout_queue is not None else Queue()
self._stdout_listener = PipeListener(
input_pipe=self._engine.stdout,
output_queue=self.stdout_queue
)
self._stdout_listener.start()
self.stderr_queue = stderr_queue if stderr_queue is not None else Queue()
self._stderr_listener = PipeListener(
input_pipe=self._engine.stderr,
output_queue=self.stderr_queue
)
self._stderr_listener.start()
def get_stderr(self):
return self._stderr_listener.get_content()
def get_stdout(self):
return self._stdout_listener.get_content()
def ask(self, cmd):
self._engine.stdin.write(cmd + '\n')
def quit(self):
self.ask('quit')
self._stderr_listener.stop()
self._stdout_listener.stop()
outs, errs = self._engine.communicate()
def kill(self):
self._engine.kill()
self._stderr_listener.stop()
self._stdout_listener.stop()
outs, errs = self._engine.communicate()
| PhilippeCarphin/leela_interface | src/gtpwrapper.py | gtpwrapper.py | py | 2,782 | python | fr | code | 2 | github-code | 13 |
71137865938 | import pandas as pd
cars=pd.read_csv('cars.csv')
#Problem 2A
odd=cars.iloc[0:5,0::2]
print(odd)
#Problem 2B
MazdaRow=cars.loc[[0]]
print(MazdaRow)
#Problem 2C
cyl=cars.loc[[23],['cyl']]
print(cyl)
#Problem 2D
z=cars.loc[[1,28,18],['Model','cyl','gear']]
print(z) | maricarr/Pandas | temp.py | temp.py | py | 283 | python | en | code | 0 | github-code | 13 |
28105391909 | #!/usr/bin/env python
import sys
from intcode import Intcode
class Game:
def __init__(self, data, input):
self.intcode = Intcode(data, input)
self.screen = {}
self.rounds = 0
def find_obj(self, obj):
for coord in self.screen.keys():
if self.screen[coord] == obj:
return coord
def get_blocks(self):
pieces = []
blocks = 0
for n in range(len(game.intcode.outputs)):
if n % 3 == 2:
pieces += [game.intcode.outputs[n]]
for n in range(len(pieces)):
if pieces[n] == 2:
blocks += 1
return blocks
def draw_screen(self):
outputs = self.intcode.outputs
self.intcode.outputs = []
index = 0
while index < len(outputs):
x = outputs[index]
y = outputs[index + 1]
item = outputs[index + 2]
index += 3
self.screen[(x,y)] = item
screen = ""
for y in range(25):
for x in range(41):
if (x,y) not in self.screen or self.screen[(x,y)] == 0:
screen += ' '
elif self.screen[(x,y)] == 1:
screen += 'w'
elif self.screen[(x,y)] == 2:
screen += 'b'
elif self.screen[(x,y)] == 3:
screen += 'p'
elif self.screen[(x,y)] == 4:
screen += 'o'
screen += '\n'
print(f"\n{screen}")
def run_game(self, manual=False):
game.intcode.data[0] = 2
last_move = ' '
score = 0
while self.intcode.is_running():
self.rounds += 1
self.intcode.run()
self.draw_screen()
ball = self.find_obj(4)
paddle = self.find_obj(3)
score = self.screen[(-1,0)]
if (-1,0) in self.screen:
print(f"score={score} rounds={self.rounds} ball={ball} paddle={paddle}")
if manual:
move_s = input("enter move[left=-1,,neutral=0,right=1.]: ")
if len(move_s) == 0:
move_s = last_move
if move_s == ',':
move = -1
elif move_s == '.':
move = 1
elif move_s == ' ':
move = 0
elif move_s == 's':
name = f"day13-{self.rounds}"
data = [self.intcode.data[n] for n in self.intcode.data]
open(name, 'w').write(str(data))
print(f"wrote file {name}")
continue
else:
continue
last_move = move_s
else:
if ball[0] < paddle[0]:
move = -1
elif ball[0] > paddle[0]:
move = 1
else:
move = 0
self.intcode.inputs = [move]
return score
if __name__ == '__main__':
name = 'day13.input'
if len(sys.argv) > 1:
name = sys.argv[1]
contents = open(name).read().strip()
if contents.startswith("["):
contents = contents[1:]
if contents.endswith("]"):
contents = contents[:-1]
data = [int(item.strip()) for item in contents.split(',')]
game = Game(data[:], [])
game.intcode.run()
print(game.intcode.outputs)
part1 = game.get_blocks()
game = Game(data[:], [])
part2 = game.run_game()
print(f"part 1: {part1}")
print(f"part 2: {part2}")
| danschaffer/aoc | 2019/day13.py | day13.py | py | 3,626 | python | en | code | 0 | github-code | 13 |
10943821289 | from transformers import (
CamembertModel,
CamembertTokenizer,
CamembertConfig,
)
import torch
from torch import nn
from .config import CFG
class TextEncoder(nn.Module):
def __init__(self, model_name=CFG.text_encoder_model, pretrained=CFG.pretrained, trainable=CFG.trainable):
super().__init__()
if pretrained:
self.model = CamembertModel.from_pretrained(model_name)
else:
self.model = CamembertModel(config=CamembertConfig())
for p in self.model.parameters():
p.requires_grad = trainable
# we are using the CLS token hidden representation as the sentence's embedding
self.target_token_idx = 0
def forward(self, input_ids, attention_mask):
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
last_hidden_state = output.last_hidden_state
return last_hidden_state[:, self.target_token_idx, :]
class ProjectionHead(nn.Module):
def __init__(
self,
embedding_dim,
projection_dim=CFG.projection_dim,
dropout=CFG.dropout
):
super().__init__()
self.projection = nn.Linear(embedding_dim, projection_dim)
self.gelu = nn.GELU()
self.fc = nn.Linear(projection_dim, projection_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(projection_dim)
def forward(self, x):
projected = self.projection(x)
x = self.gelu(projected)
x = self.fc(x)
x = self.dropout(x)
x = x + projected
x = self.layer_norm(x)
return x
class TextModel(nn.Module):
def __init__(
self,
text_embedding = CFG.text_embedding
):
super().__init__()
self.text_encoder = TextEncoder()
self.text_projection = ProjectionHead(embedding_dim=text_embedding)
self.tokenizer = CamembertTokenizer.from_pretrained(CFG.text_tokenizer)
def forward(self, batch):
# Getting Text Features
text_features = self.text_encoder(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"]
)
# Project to the same dim of image encoder
text_embeddings = self.text_projection(text_features)
return text_embeddings
def encode_text(self, text):
tokened_word = self.tokenizer(text, padding=True, truncation=True, max_length=CFG.max_length)
text_features = self.text_encoder(
input_ids=torch.tensor(tokened_word["input_ids"]).to(CFG.device),
attention_mask=torch.tensor(tokened_word["attention_mask"]).to(CFG.device)
)
text_embeddings = self.text_projection(text_features)
return text_embeddings | vikimark/Thai-Cross-CLIP | source/model.py | model.py | py | 2,773 | python | en | code | 3 | github-code | 13 |
16498501816 | from django.shortcuts import render,redirect,HttpResponse
from django.contrib.auth.models import User
from django.contrib import messages
from .models import *
def index(request):
title = "Select Location"
country = Country.objects.all()
d = {'country': country}
if request.method == "POST":
name = request.POST['name']
country = request.POST['country']
state = request.POST['states']
city = request.POST['cities']
district = request.POST['districts']
obj = UserResponses.objects.create(name=name,country=Country.objects.get(id=country).name,state=State.objects.get(id=state).name,city=City.objects.get(id=city).name,district=District.objects.get(id=district).name)
messages.success(request, 'submitted successfully.')
return render(request,'index.html',d)
def load_states(request):
country_id = request.GET.get('country')
states = State.objects.filter(country_id=country_id).order_by('name')
return render(request, 'states_dropdown_list_options.html', {'states': states})
def load_districts(request):
state_id = request.GET.get('state')
districts = District.objects.filter(state_id=state_id).order_by('name')
return render(request, 'districts_dropdown_list_options.html', {'districts': districts})
def load_cities(request):
district_id = request.GET.get('district')
cities = City.objects.filter(district_id=district_id).order_by('name')
return render(request, 'cities_dropdown_list_options.html', {'cities': cities})
def form (request):
title = "Select Location"
if request.method == "POST":
name = request.POST['name']
country = request.POST['country']
state = request.POST['state']
city = request.POST['city']
obj = UserResponses.objects.create(name=name,country=country,state=state,city=city)
messages.success(request, 'submitted successfully.')
return render(request,'form2.html')
def printresponse(request):
responses = UserResponses.objects.all()
parms = {
"responses":responses,
}
return render(request,'responses.html',parms) | palakshivlani-11/django-dropdown-task | dropdown/views.py | views.py | py | 2,194 | python | en | code | 0 | github-code | 13 |
3273557877 | import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
def make_1080p():
cap.set(3, 1920)
cap.set(4, 1080)
def make_720p():
cap.set(3, 1280)
cap.set(4, 720)
def make_480p():
cap.set(3, 640)
cap.set(4, 480)
def change_res(width, height):
cap.set(3, width)
cap.set(4, height)
def rescale_frame(frame, percent=75):
width = int(frame.shape[1] * percent/ 100)
height = int(frame.shape[0] * percent/ 100)
dim = (width, height)
return cv2.resize(frame, dim, interpolation =cv2.INTER_AREA)
while(True):
ret, frame = cap.read()
frame = rescale_frame(frame, 30)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow("frame", frame)
cv2.imshow("gray", gray)
# cv2.waitKey(20)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
| CyberrGhostt/PyFacialRecognition | tut2/res-change.py | res-change.py | py | 889 | python | en | code | 1 | github-code | 13 |
383545803 | from __future__ import absolute_import, unicode_literals
import subprocess
import sys
import six
if six.PY2 and sys.platform == "win32":
from . import _win_subprocess
Popen = _win_subprocess.Popen
else:
Popen = subprocess.Popen
def run_cmd(cmd):
try:
process = Popen(
cmd, universal_newlines=True, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE
)
out, err = process.communicate() # input disabled
code = process.returncode
except OSError as os_error:
code, out, err = os_error.errno, "", os_error.strerror
return code, out, err
__all__ = (
"subprocess",
"Popen",
"run_cmd",
)
| alexnathanson/solar-protocol | backend/createHTML/venv-bk/lib/python3.7/site-packages/virtualenv/util/subprocess/__init__.py | __init__.py | py | 696 | python | en | code | 207 | github-code | 13 |
24245561939 | from microbit import *
"""
while True:
""""""
"""
ball_pos = [2, 0]
bar_pos = [2, 3] #
isGameOver = False
speed = [0, 1]
dt = 1000 #1000ミリ秒[ms] = 1秒
start = running_time()
while not isGameOver:
time = running_time() - start # ループが始まってからの時間
# ループ処理の最初に描画してもよい
# set_pixel(x座標, y座標, 明るさ)
display.clear()
display.set_pixel(bar_pos[0], 4, 9)
display.set_pixel(bar_pos[1], 4, 9)
display.set_pixel(ball_pos[0], ball_pos[1], 9)
#if time % dt == 0:
if time % dt < 2 or time % dt > dt - 2:
"""
ball_pos の計算処理
"""
ball_pos[0] += speed[0]
ball_pos[1] += speed[1]
# ボールの跳ね返りを計算する
"""まずはバーとボールの跳ね返り処理"""
""" ダメなやつ
if ball_pos[1] == 3 and ball_pos[0] == 3 and bar_pos[1] == 3:
"""
if ball_pos[1] == 3 and ball_pos[0] == bar_pos[0]:
speed[1] *= -1
elif ball_pos[1] == 3 and ball_pos[0] == bar_pos[1]:
speed[1] *= -1
elif ball_pos[1] == 0:
speed[1] *= -1
# ボタンを押すとバーが動く処理を書くよ
if button_a.was_pressed():
bar_pos[0] -= 1
bar_pos[1] -= 1
# display.set_pixel(bar_pos[0], 4, 9)
elif button_b.was_pressed():
bar_pos[0] += 1
bar_pos[1] += 1
# バーが壁にぶつかってもエラーに
# ならなくさせる処理
if bar_pos[0] < 0:
bar_pos[0] = 0
bar_pos[1] = 1
elif bar_pos[1] > 4:
bar_pos[0] = 3
bar_pos[1] = 4
# 全部の計算が終わったあとに描画しても良い
display.scroll("Game Over!") | irinaka-robodone/master-lesson08-2022 | sample/exception.py | exception.py | py | 1,857 | python | ja | code | 0 | github-code | 13 |
24653070122 | class Animal:
def __init__(self, domestic, eatsGrass,legs):
self.domestic = domestic
self.eatsGrass = eatsGrass
self.legs = legs
def displayAnimal(self):
if self.eatsGrass:
return self.domestic + " has " + str(self.legs) + " legs and eats the grass."
else:
return self.domestic + " has " + str(self.legs) + " legs and doesn't eat the grass."
class Mammal:
def __init__(self, domestic, eatsGrass, legs, milk):
super().__init__(domestic,eatsGrass,legs)
self.milk = milk
def displayMammal(self):
if self.milk:
print(super().displayAnimal() + "And drinks milk.")
else:
print(super().displayAnimal() + "And doesn't drink milk.")
a1=Mammal("domestic", True, 4, False)
a1.displayMammal()
a2=Mammal("domestic", False, 2, True)
a2.displayMammal()
a3=Mammal("wild", True, 4, False)
| albinafrolova/pythonProgramming | animalclass.py | animalclass.py | py | 912 | python | en | code | 0 | github-code | 13 |
9974864355 | """
Sequential Search vs Binary Search:
-----------------------------------
- Sequential Search: O(n)
- Binary Search: O(log(n))
- Binary Search requires a sorted list
- Binary Search is faster than Sequential Search
Search Codes:
-------------
- Sequential Search:
- Search for an element in a list
- Return True if found, False otherwise
- Binary Search:
- Search for an element in a list
- Return True if found, False otherwise
- Requires a sorted list
- Uses the divide and conquer approach
- Compares the element with the middle element of the list
"""
class SearchingAlgorithms:
def sequantialSearchUnordered(self, unorderedLİst, number):
index = 0
found = False
while index < len(unorderedLİst) and not found:
if unorderedLİst[index] == number:
found = True
else:
index += 1
return found
def sequantialSearchOrdered(self, orderedList, number):
index = 0
found = False
stop = False
while index < len(orderedLİst) and not found and not stop:
if orderedLİst[index] == number:
found = True
else:
if orderedList[index] > number:
stop = True
else:
index += 1
return found
def binarySearch(self, orderedList, number):
first = 0
last = len(orderedList) - 1
found = False
while first <= last and not found:
midPoint = (first + last) // 2
if orderedList[midPoint] == number:
found = True
else:
if number < orderedList[midPoint]:
last = midPoint - 1
else:
first = midPoint + 1
return found
"""
Hash Table:
-----------
- Hash Table is a data structure that maps keys to values for highly efficient lookup
- Hash Table is known as Dictionary in Python
- O(1) for insert, delete and search
- Hash Table uses a hash function to compute an index into an array of buckets or slots
"""
class HashTable:
def __init__(self, size):
self.size = size
self.dataMap = [None] * self.size
def hashFunc(self, key):
myHash = 0
for letter in key:
myHash = (myHash + ord(letter) * 23) % len(self.dataMap)
return myHash
def setItem(self, key, value):
index = self.hashFunc(key)
if self.dataMap[index] == None:
self.dataMap[index] = []
self.dataMap[index].append([key, value])
def getItem(self, key):
index = self.hashFunc(key)
if self.dataMap[index] is not None:
for i in range(len(self.dataMap[index])):
if self.dataMap[index][i][0] == key:
return self.dataMap[index][i][1]
return None
def getKeys(self):
keys = []
for i in range(len(self.dataMap)):
if self.dataMap[i]:
for j in range(len(self.dataMap[i])):
keys.append(self.dataMap[i][j][0])
return keys
def printTable(self):
for index, value in enumerate(self.dataMap):
print(index, '->', value)
myHashTable = HashTable(5)
myHashTable.setItem('apple', 10)
myHashTable.setItem('orange', 20)
myHashTable.setItem('car', 30)
myHashTable.setItem('table', 40)
print(myHashTable.getItem('apple'))
print(myHashTable.getItem('orange'))
myHashTable.printTable()
print(myHashTable.getKeys())
#Two Sum Problem:
#----------------
#Given an array of integers, return indices of the two numbers such that they add up to a specific target.
#You may assume that each input would have exactly one solution, and you may not use the same element twice.
class SumSolution:
def twoSum(self, nums, target):
for i in range(0, len(nums)):
for j in range (1, len(nums)):
if nums[i] + nums[j] == target:
return [i, j]
return None
def twoSum2(self, nums, target):
for i in range(0, len(nums)):
if target - nums[i] in nums:
return [i, nums.index(target - nums[i])]
return None
#for space and time complexity O(n)
def twoSum3(self, nums, target):
myHash = {}
for index, num in enumerate(nums):
difference = target - num
if difference in myHash:
return [myHash[difference], index]
myHash[num] = index
twosum = SumSolution()
print(twosum.twoSum([2, 7, 11, 15], 22))
print(twosum.twoSum2([2, 7, 11, 15], 17))
print(twosum.twoSum3([2, 13, 11, 15], 13))
#Encode and Decode TinyURL:
#--------------------------
#TinyURL is a URL shortening service where you enter a URL such as https://leetcode.com/problems/design-tinyurl and it returns a short URL such as http://tinyurl.com/4e9iAk.
#Design the encode and decode methods for the TinyURL service. There is no restriction on how your encode/decode algorithm should work.
#You just need to ensure that a URL can be encoded to a tiny URL and the tiny URL can be decoded to the original URL.
class Codec:
def __init__(self):
self.encodingMap = {}
self.decodingMap = {}
self.baseUrl = 'http://tinyurl.com/'
def encode(self, longUrl):
if longUrl not in self.encodingMap:
shortUrl = self.baseUrl + str(len(self.encodingMap) + 1)
self.encodingMap[longUrl] = shortUrl
self.decodingMap[shortUrl] = longUrl
return self.encodingMap[longUrl]
def decode(self, shorturl):
return self.decodingMap[shorturl]
myUrl = Codec()
print(myUrl.encode('https://recepbattal.com/test/leet/code'))
print(myUrl.decode('http://tinyurl.com/1'))
#Brick Wall:
#-----------
#There is a brick wall in front of you. The wall is rectangular and has several rows of bricks. The bricks have the same height but different width.
#You want to draw a vertical line from the top to the bottom and cross the least bricks.
#The brick wall is represented by a list of rows. Each row is a list of integers representing the width of each brick in this row from left to right.
#If your line go through the edge of a brick, then the brick is not considered as crossed. You need to find out how to draw the line to cross the least bricks and return the number of crossed bricks.
#You cannot draw a line just along one of the two vertical edges of the wall, in which case the line will obviously cross no bricks.
class BrickWall:
def leastBricks(self, wall):
myHashMap = {0:0}
for row in wall:
gapCount = 0
for brick in row[:-1]:
gapCount += brick
myHashMap[gapCount] = 1 + myHashMap.get(gapCount,0)
return len(wall) - max(myHashMap.values())
myWall = BrickWall()
print(myWall.leastBricks([[1, 2, 2, 1],
[3, 1, 2],
[1, 3, 2],
[2, 4],
[3, 1, 2],
[1, 3, 1, 1]]))
print(myWall.leastBricks([[1],[1],[1]])) | reepNao/PythonProgress | LiveCodingExamples/Sequential_Binary.py | Sequential_Binary.py | py | 7,450 | python | en | code | 0 | github-code | 13 |
38644865522 | import torch.nn as nn
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer3 = nn.Sequential(
nn.Conv2d(32, 48, kernel_size=3, stride=1, padding=2),
nn.BatchNorm2d(48),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer4 = nn.Sequential(
nn.Conv2d(48, 64, kernel_size=3, stride=1, padding=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(1200, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
class AlexNet(nn.Module):
def __init__(self):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, 10),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x | fengziyue/CU-Computing-Autonomy | Homework1/models.py | models.py | py | 2,473 | python | en | code | 5 | github-code | 13 |
36603782394 | import datetime
def solution(n, customers):
answer = 0
e_time = [datetime.datetime(2020,1,1,0,0,0) for _ in range(n)] # 업무 완료시간
key_c = [0 for _ in range(n)] # 키오스크 별 사용 횟수
match_key = 0
for custom_info in customers:
a_date, a_time, s_time = map(str, custom_info.split())
mon, day = map(int, a_date.split('/'))
hour, minute, second = map(int, a_time.split(':'))
s_time = int(s_time)
c_datetime = datetime.datetime(2020,mon,day,hour,minute,second)
match_key = e_time.index(min(e_time)) # 가장 일찍 끝나는 키오스크 선택
key_c[match_key] += 1
if min(e_time) < c_datetime: # 현재 운영되지 않는 키오스크 존재
e_time[match_key] = c_datetime + datetime.timedelta(minutes=s_time) # 손님 도착 시간부터 업무 수행
else: # 모든 키오스크 운영중
e_time[match_key] += datetime.timedelta(minutes=s_time) # 운영 종료 후 업무 수행
answer = max(key_c)
return answer | majung2/CTpractice | python/2020하반기/2020쿠팡테크캠퍼스리쿠르팅/02.py | 02.py | py | 1,063 | python | ko | code | 0 | github-code | 13 |
72299603218 |
closure_map = {
'UBERON:0001434PHENOTYPE': 'Skeletal system',
'UBERON:0002101PHENOTYPE': 'Limbs',
'UBERON:0001016PHENOTYPE': 'Nervous system',
'UBERON:0007811PHENOTYPE': 'Head or neck',
'MP:0005376': 'Metabolism/homeostasis',
'UBERON:0004535PHENOTYPE': 'Cardiovascular system',
'UBERON:0002416PHENOTYPE': 'Integument',
'UBERON:0004122PHENOTYPE': 'Genitourinary system',
'UBERON:0000970PHENOTYPE': 'Eye',
'UBERON:0001015PHENOTYPE': 'Musculature',
'MPATH:218PHENOTYPE': 'Neoplasm',
'UBERON:0001007PHENOTYPE': 'Digestive system',
'UBERON:0002405PHENOTYPE': 'Immune system',
'UBERON:0002390PHENOTYPE': 'Blood and blood-forming tissues',
'UBERON:0000949PHENOTYPE': 'Endocrine',
'UBERON:0001004PHENOTYPE': 'Respiratory system',
'UBERON:0001690PHENOTYPE': 'Ear',
'UBERON:0002384PHENOTYPE': 'Connective tissue',
'UBERON:0000323PHENOTYPE': 'Prenatal development or birth',
'GO:0040007PHENOTYPE': 'Growth',
'HP:0025142': 'Symptom',
'UBERON:0002224PHENOTYPE': 'Thoracic cavity',
'UBERON:0000310PHENOTYPE': 'Breast',
'HP:0001608': 'Voice',
'CL:0000000PHENOTYPE': 'Cellular'
}
def create_closure_bin(fcmap={}):
"""
Given a facet count dict from golr_query (i.e. map of class ID to count)
return a new dict that maps original IDs to high level text descriptors.
It is assumed that the input dict is complete (with closed world assumption).
i.e. grouping terms already included, and if not included assume = 0
Return: Tuple of two dictionaries, a label-count map and id-count map
"""
lmap = {}
idmap = {}
for curie, label in closure_map.items():
lmap[label] = 0
idmap[curie] = 0
for k,v in fcmap.items():
if k in closure_map:
label = closure_map[k]
# we expect duplicates due to merging
# of different ontologies. We take the higher value
if label in lmap:
if lmap[label] > v:
continue
lmap[label] = v
idmap[k] = v
return lmap, idmap
| monarch-initiative/biolink-api | biolink/api/bio/closure_bins.py | closure_bins.py | py | 2,105 | python | en | code | 61 | github-code | 13 |
19343690694 | from socket import *
from threading import Thread
import time
from sys import getsizeof
from os import _exit
import os
import sys
port = 10080
bufferSize = 1400
headerSize = 48
SR_G_AV = [0, 0, 0]
# opens file with file name and type, and returns 0 when file open is failed
def getfile(filename, type):
try:
fptr = open(os.path.join(sys.path[0], filename), type)
return fptr
except:
return 0
# Add the header to the data with the given packet number
def addheader(pktnum, data):
header = (str(pktnum)).encode()
while getsizeof(header) < headerSize:
header = header + b'\0'
return header + data
# Writes the contents to the log file
def logfilewrite(flogptr, starttime):
while True:
time.sleep(2.0)
timediff = time.time() - starttime
content = f'{timediff:0.3f}' + "\t|\t" + f'{SR_G_AV[2]:0.3f}' + "\t|\t" + str(SR_G_AV[0] / 2) + "pkts/sec" \
+ "\t|\t" + str(SR_G_AV[1] / 2) + "pkts/sec" + "\n"
flogptr.write(content)
flogptr.flush()
SR_G_AV[0] = 0
SR_G_AV[1] = 0
# Sender module : sends the packets to Receiver module
def SEM(Socket, ip, wndSize):
# Measure the sample RTT
sampstart = time.time()
Socket.sendto(b'sample', (ip, port))
recvmsg,_addr = Socket.recvfrom(headerSize)
if recvmsg == b'sampleOK':
sampleRTT = time.time() - sampstart
else:
print('something went wrong')
exit(1)
print('samplertt : ' + str(sampleRTT))
# Set the values of timeout, avgRTT and devRTT
SR_G_AV[2] = sampleRTT
devRTT = 0.01
timeoutVal = SR_G_AV[2] + 4 * devRTT
# Variables for receiving packet
recentAck = -1
recvAck = -1
duplicateAck = -1
window = []
is_first_pkt = 1
is_first_pkt_sent = 0
while True:
# If window has the left space to sent
while len(window) < wndSize:
# When window is empty
if len(window) == 0:
expectAck = recentAck + 1
# When window is not empty
else:
expectAck = window[len(window) - 1] + 1
# Sends the generated packet
Socket.sendto(addheader(expectAck, genpacket), (ip, port))
if is_first_pkt_sent == 0 and is_first_pkt == 1:
Socket.settimeout(timeoutVal)
nowTime = time.time()
is_first_pkt_sent = 1
sampleAck = expectAck
window.append(expectAck)
SR_G_AV[0] += 1 # Increases the sending rate
# After sending
while True:
# Receive ack from the receiver
try:
recvbyte, recvaddr = Socket.recvfrom(headerSize)
# When timeout occurs, set the timeout again
except:
# If the timeout is occurred after the ack receive
window = []
wndSize = 1
is_first_pkt = 1
is_first_pkt_sent = 0
break
recvAck = int(recvbyte.decode())
SR_G_AV[1] += 1 # Increases the goodput
# First Ack before the timeout occurs
if is_first_pkt == 1 and recvAck == sampleAck:
# Calculate the timeout using the RTT
sampleRTT = time.time() - nowTime
devRTT = 0.75 * devRTT + 0.25 * abs(sampleRTT - SR_G_AV[2])
SR_G_AV[2] = 0.875 * SR_G_AV[2] + 0.125 * sampleRTT
timeoutVal = SR_G_AV[2] + 4 * devRTT
is_first_pkt = 0
if time.time() - nowTime > timeoutVal:
wndSize += 1
is_first_pkt = 1
is_first_pkt_sent = 0
elif wndSize <= 4:
wndSize *= 2
if window == []:
break
# If the received ack is the right order
if recvAck == window[0]:
duplicateAck = 0
recentAck = recvAck
window.pop(0)
break
# If the received ack is larger than the expected ack
elif recvAck > window[0]:
duplicateAck = 0
recentAck = recvAck
# pops the elements in the window while the right one is found or window becomes empty
while len(window) != 0 and recvAck == window[0]:
window.pop(0)
break
# If Ack is duplicated
elif recvAck == recentAck:
duplicateAck += 1
# For 3 duplicate Ack
if duplicateAck >= 3:
if wndSize != 1:
wndSize //= 2 # Reduce the window size as its half
window = []
duplicateAck = 0
Socket.settimeout(timeoutVal / 2)
try:
while True:
recvbyte, recvaddr = Socket.recvfrom(headerSize)
SR_G_AV[1] += 1
except:
is_first_pkt = 1
is_first_pkt_sent = 0
break
# If Ack is smaller than recently recevied packet, just ignore it
if __name__ == "__main__":
# Generates the packet only exists for null
genpacket = b'\0'
while getsizeof(genpacket) < bufferSize:
genpacket += genpacket
genpacket = genpacket[:bufferSize - headerSize]
# Gets the ip address and initial window size
print("Receiver IP address: ", end ="")
ip = input()
print("start initial window size : ", end ="")
wndSize = int(input())
# Bind the socket and send the starting message
print("Sending Start")
senderSocket = socket(AF_INET, SOCK_DGRAM)
senderSocket.bind(('', 0))
senderSocket.sendto(b'send', (ip, port))
# Open the log file corresponding to port number
portnum = senderSocket.getsockname()
portnum = str(portnum[1])
print(portnum)
flogptr = getfile(portnum + '_log.txt', 'wt')
startTime = time.time()
# Runs the log file writing thread
Thread(target=logfilewrite, args=(flogptr, startTime)).start()
# Runs the sender module to send the packet
Thread(target=SEM, args=(senderSocket, ip, wndSize)).start()
# When user enters stop, exit the program
while True:
state = input()
if state == 'stop':
senderSocket.sendto(b'end', (ip, port))
flogptr.close()
senderSocket.close()
_exit(0)
| sinclairr08/university-courses | 2018-2-computer-networks/HW5/sender.py | sender.py | py | 6,638 | python | en | code | 0 | github-code | 13 |
71157402898 | def maxProfit(prices):
n = len(prices)
buy = 0
dp = [[-1]*2 for i in range(n+1)]
(dp[n])[0] = (dp[n])[0] = 0
for ind in range(n-1, -1, -1):
for buy in range(0, 2):
if buy == 0:
(dp[ind])[buy] = max((-prices[ind]+(dp[ind+1])[1]),(dp[ind+1])[0])
else:
(dp[ind])[buy] = max((+prices[ind]+(dp[ind+1])[0]),(dp[ind+1])[1])
return (dp[0])[0]
print(maxProfit([7, 1, 5, 3, 6, 4])) | Fragman228/Algoritmi2 | Дз_26.10/Ефыл_4.py | Ефыл_4.py | py | 459 | python | en | code | 0 | github-code | 13 |
38156423491 | import json
import pandas as pd
from datetime import datetime
import requests
def json2dfConversion(jsonText, intervals):
p = json.loads(jsonText)
ohlc_json = p['chart']['result'][0]['indicators']['quote'][0]
dates = p['chart']['result'][0]['timestamp']
ohlc_df = pd.DataFrame.from_dict(ohlc_json)
if intervals == "1d":
ohlc_df['Date'] = [datetime.fromtimestamp(x).date() for x in dates]
else:
ohlc_df['Date'] = [datetime.fromtimestamp(x) for x in dates]
ohlc_df.index = ohlc_df['Date']
return ohlc_df
def yahooDataV8(sym, start, end=datetime.today().strftime('%Y-%m-%d'), interval="1d"): # interval=1m, 2m, 5m, 15m, 30m, 1h, 5d, 1w, 1mo
if interval not in ["1m", "2m", "5m", "15m", "30m", "1h", "1d","5d", "1wk", "1mo"]:
raise ValueError("Invalid Parameter: interval!")
header = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9'}
baseurl = "https://query1.finance.yahoo.com/v8/finance/chart/" + sym
x = int(datetime.strptime(start, '%Y-%m-%d').strftime("%s")) #convert %Y-%m-%d to epoch
y = int(datetime.strptime(end, '%Y-%m-%d').strftime("%s"))
url = baseurl + "?period1=" + str(x) + "&period2=" + str(y) + "&interval=" + interval +"&events=history"
print(url)
jsonOutput = requests.get(url, headers=header)
return json2dfConversion(jsonOutput.text, interval) | cmskzhan/helloworld | concepts/python/dockerfile/streamlit1/yahooData.py | yahooData.py | py | 1,462 | python | en | code | 0 | github-code | 13 |
10660373295 | from django import forms
from crispy_forms.helper import *
from crispy_forms.bootstrap import *
from crispy_forms.layout import *
from .models import Welder
from .models import PerformanceQualification
from .models import WelderHistory
from core.models import WelderStampLov
class WelderCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WelderCreateForm, self).__init__(*args, **kwargs)
self.fields['welder_stamp'] = forms.ModelChoiceField(queryset=WelderStampLov.objects.exclude(id__in=Welder.objects.values_list('welder_stamp', flat=True)), label=('Welder Stamp'))
self.helper = FormHelper(self)
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.add_input(Submit('submit', 'Save Welder'))
class Meta:
model = Welder
exclude = '__all__'
class WelderUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.current_welder_id = kwargs.pop('current_welder_id', None)
super(WelderUpdateForm, self).__init__(*args, **kwargs)
# Combine the available welder_stamp list with the currently assigned stamp
welder_stamp = WelderStampLov.objects.filter(id__in=Welder.objects.values_list('welder_stamp', flat=True).filter(pk=self.current_welder_id))
assigned_welder_stamp = WelderStampLov.objects.exclude(id__in=Welder.objects.values_list('welder_stamp', flat=True))
available_welder_stamp_queryset = welder_stamp | assigned_welder_stamp
self.fields['welder_stamp'] = forms.ModelChoiceField(queryset=available_welder_stamp_queryset)
self.helper = FormHelper(self)
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.add_input(Submit('submit', 'Update Welder', css_class='btn-default'))
class Meta:
model = Welder
exclude = '__all__'
class PerformanceQualificationCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PerformanceQualificationCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.add_input(Submit('submit', 'Save Performance Qualification', css_class='btn-default'))
class Meta:
model = PerformanceQualification
exclude = [ 'welder' ]
class PerformanceQualificationUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(PerformanceQualificationUpdateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.add_input(Submit('submit', 'Update Performance Qualification', css_class='btn-default'))
class Meta:
model = PerformanceQualification
exclude = [ 'welder' ]
class WelderHistoryCreateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WelderHistoryCreateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.add_input(Submit('submit', 'Save Welder History'))
class Meta:
model = WelderHistory
exclude = [ 'welder' ]
class WelderHistoryUpdateForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(WelderHistoryUpdateForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'POST'
self.helper.form_class = 'form-horizontal'
self.helper.label_class = 'col-lg-2'
self.helper.field_class = 'col-lg-8'
self.helper.add_input(Submit('submit', 'Update Welder History', css_class='btn-default'))
class Meta:
model = WelderHistory
exclude = [ 'welder' ] | rsombach/btm419_demo | cessco/welderlist/forms.py | forms.py | py | 4,038 | python | en | code | 0 | github-code | 13 |
11622968171 | #!/usr/bin/env python3
import csv
import os
import math
from tqdm import tqdm
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-d", "--directory", dest="directory",
help="directory path to /Photos/", metavar="DIR")
parser.add_argument("-c", "--convert-HEIC-to-JPG",
action="store_true", dest="convert_HEIC_to_JPG", default=False,
help="convert HEIC images to JPG")
args = parser.parse_args()
PHOTO_INDEX = {} # mapping from filename to Img object
TIME_IDX = 4
AM_PM_IDX = 5
DAY_IDX = 2
MONTH_IDX = 1
YEAR_IDX = 3
MONTH_DICT = {
"January": 1,
"February": 2,
"March": 3,
"April": 4,
"May": 5,
"June": 6,
"July": 7,
"August": 8,
"September": 9,
"October": 10,
"November": 11,
"December": 12
}
class Img():
def __init__(self, img_name, date_string):
self.img_name = img_name
self.date_string = date_string
self.hours, self.minutes = self.convert_time(date_string[TIME_IDX],
date_string[AM_PM_IDX])
self.day = int(date_string[DAY_IDX])
self.month = MONTH_DICT[date_string[MONTH_IDX]]
self.year = int(date_string[YEAR_IDX])
self.formatted_name = self.format_name()
def convert_time(self, time, am_pm):
time = float(time.replace(":", "."))
if am_pm == "PM":
time += 12.0
minutes, hours = math.modf(time)
return int(hours), round(minutes*100)
def format_name(self):
return f"{self.year}-{self.month:02d}-{self.day:02d}_{self.hours:02d}-{self.minutes:02d}_{self.img_name}"
def build_photo_index(directory, csv_filename):
csv_filepath = os.path.join(directory, csv_filename)
assert os.path.exists(csv_filepath), f"no such file: {csv_filepath}"
with open(csv_filepath, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter=' ', quotechar='|')
next(reader) # skipping header
for row in reader:
r = ",".join(row)
img_name = r.split(",")[0]
date_string = r.split(",")[1:]
img = Img(img_name = img_name, date_string = date_string)
PHOTO_INDEX[img.img_name] = img.formatted_name
def build_index(directory):
assert os.path.exists(directory), f"directory {directory} not found"
assert os.path.isdir(directory), f"{directory} is not a directory"
for f in sorted(os.listdir(directory)):
if f.endswith(".csv"):
print(f"Processing {f}")
build_photo_index(directory = directory,
csv_filename = f)
def sort_images(directory, convert_HEIC_to_JPG):
file_list = sorted(os.listdir(directory))
for f in tqdm(file_list):
if not f.endswith(".csv"):
if PHOTO_INDEX.get(f):
new_filename = PHOTO_INDEX.get(f)
elif PHOTO_INDEX.get(f.replace(".MOV", ".HEIC")):
# for .MOV, use .HEIC metadata if no .MOV metadata is available
new_filename = PHOTO_INDEX.get(f.replace(".MOV", ".HEIC")).replace(".HEIC", ".MOV")
else:
print(f"file {f}: no metadata found, thus keeping this file as is")
continue
old_filepath = os.path.join(directory, f)
new_filepath = os.path.join(directory, new_filename)
os.rename(old_filepath, new_filepath)
if convert_HEIC_to_JPG and new_filename.endswith(".HEIC"):
os.system(f"heif-convert -q 100 {new_filepath} {new_filepath.replace('.HEIC', '.JPG')} > /dev/null")
os.remove(new_filepath)
if __name__ == "__main__":
build_index(directory = args.directory)
sort_images(directory = args.directory,
convert_HEIC_to_JPG = args.convert_HEIC_to_JPG)
| rgeirhos/linux-sort-iCloud-photos | sort_photos.py | sort_photos.py | py | 3,908 | python | en | code | 5 | github-code | 13 |
41470231893 | import cv2
# # define a video capture object
# vid = cv2.VideoCapture(0)
# while(True):
# # Capture the video frame
# # by frame
# ret, frame = vid.read()
# # Display the resulting frame
# cv2.imshow('frame', frame)
# cv2.waitKey(1)#waits for 1 ms
cam = cv2.VideoCapture(0)
result,i = cam.read()
cv2.imshow("Image",i)
r = cv2.selectROI("Select image area of interest",i)
print(r)
k = cv2.imread("./images/download.png")
resizedK = cv2.resize(k,(r[2],r[3]))
cv2.imshow("Image",resizedK)
i[r[1]:r[1]+r[3],r[0]:r[0]+r[2]] = resizedK;
cv2.imshow("Final",i)
cv2.waitKey(0)
| kunal118/Edge-ai | cameraFeed.py | cameraFeed.py | py | 631 | python | en | code | 0 | github-code | 13 |
23989363539 | """
Script calculates Eurasian snow area index for October-November following the
methods of Peings et al. 2017 in ERA-Interim (land)
Notes
-----
Author : Zachary Labe
Date : 24 July 2019
"""
### Import modules
import datetime
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sts
import scipy.signal as SS
import read_Reanalysis as MOR
### Define directories
directoryfigure = '/home/zlabe/Desktop/'
directoryoutput = '/home/zlabe/Documents/Research/AMIP/Data/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Calculating Snow Cover Area Index - %s----' % titletime)
#### Alott time series
year1 = 1979
year2 = 2015
years = np.arange(year1,year2+1,1)
### Add parameters
varnames = 'SNC'
def readVar(varnames):
### Call function to read in ERA-Interim
lat,lon,time,lev,era = MOR.readDataR('SNC','surface',False,True)
### Select October-November for index
eraq = np.nanmean(era[:,9:11,:,:],axis=1)
# eraq = era[:,9:10,:,:].squeeze() # Octobers
return eraq,lat,lon,lev
###############################################################################
### Read in data functions
era,lat,lon,lev = readVar(varnames)
### Slice over region of interest for Eurasia (40-80N,35-180E)
latq = np.where((lat >= 40) & (lat <= 80))[0]
lonq = np.where((lon >= 35) & (lon <=180))[0]
latn = lat[latq]
lonn = lon[lonq]
lon2,lat2 = np.meshgrid(lonn,latn)
eralat = era[:,latq,:]
eralon = eralat[:,:,lonq]
eraslice = eralon.copy()
### Calculate sea ice extent
def calcExtent(snowq,lat2):
"""
Calculate snow cover extent from snow concentration grids following
the methods of Robinson et al. 1993 [BAMS]
"""
### Extent is a binary 0 or 1 for 50% snow threshold
thresh=50.
snow = snowq.copy()
snow[np.where(snow<thresh)]=np.nan
snow[np.where(snow>thresh)]=1
ext = np.zeros((snow.shape[0]))
valyr = np.zeros((snow.shape))
for ti in range(snow.shape[0]):
for i in range(snow.shape[1]):
for j in range(snow.shape[2]):
if snow[ti,i,j] == 1.0:
### Area 1.9x2.5 grid cell [58466.1 = (278.30) * (210.083)]
valyr[ti,i,j] = 58466.1 * np.cos(np.radians(lat2[i,j]))
ext[ti] = np.nansum(valyr[ti,:,:])/1e6
return ext
### Calculate snow cover area
snowarea = calcExtent(eraslice,lat2)
### Calculate detrended snow index
snowareaindexdt = SS.detrend(snowarea,type='linear')
#### Save both indices
#np.savetxt(directoryoutput + 'SNA_Eurasia_ON_ERAi.txt',
# np.vstack([years,snowarea]).transpose(),delimiter=',',fmt='%3.1f',
# footer='\n Snow cover index calculated for the' \
# '\n ERA-Interim reanalysis from 1979-2015\n' \
# ' in Oct-Nov (AREA)',newline='\n\n')
#np.savetxt(directoryoutput + 'SNA_Eurasia_ON_ERAi_DETRENDED.txt',
# np.vstack([years,snowareaindexdt]).transpose(),delimiter=',',fmt='%3.1f',
# footer='\n Snow cover index calculated for the' \
# '\n ERA-Interim reanalysis from 1979-2015\n' \
# ' in Oct-Nov ---> detrended data (AREA)',newline='\n\n') | zmlabe/AMIP_Simu | Scripts/calc_SNA_Data_Eurasia_Reanalysis.py | calc_SNA_Data_Eurasia_Reanalysis.py | py | 3,368 | python | en | code | 1 | github-code | 13 |
11708171337 | import tensorflow.keras.backend as K
# from tensorflow.keras.backend import _to_tensor
from tensorflow.keras.losses import binary_crossentropy, mean_squared_error, mean_absolute_error
import tensorflow as tf
def angle_rmse(pred, labels):
# calculate mask
pred = tf.cast(tf.argmax(pred, axis=-1), tf.float32)
labels = tf.cast(tf.argmax(labels, axis=-1), tf.float32)
mask = tf.cast(tf.not_equal(labels, 0), tf.float32)
# apply mask
labels = labels * mask
pred = pred * mask
# calculate score
score = tf.math.sqrt(mean_squared_error(y_pred=pred, y_true=labels))
score = tf.reduce_mean(score, axis=1)
return score
def lstm_rmse(pred, labels):
# calculate mask
pred = tf.cast(tf.argmax(pred, axis=-1), tf.float32)
labels = tf.cast(tf.argmax(labels, axis=-1), tf.float32)
mask = tf.cast(tf.not_equal(labels, 0), tf.float32)
# apply mask
labels = labels * mask
pred = pred * mask
# calculate score
score = tf.math.sqrt(mean_squared_error(y_pred=pred, y_true=labels))
score = tf.reduce_mean(score, axis=1)
return score
def kld_loss_masked(labels, predictions):
mask = tf.cast(tf.not_equal(tf.reduce_sum(labels, axis=-1), 0), tf.float64)
kl_loss = tf.losses.kullback_leibler_divergence(labels, predictions)
kl_loss = kl_loss * mask
kl_loss = tf.reduce_sum(kl_loss, axis=0)
return tf.reduce_mean(kl_loss)
def dice_coef_clipped(y_true, y_pred, smooth=1.0):
y_true_f = K.flatten(K.round(y_true))
y_pred_f = K.flatten(K.round(y_pred))
intersection = K.sum(y_true_f * y_pred_f)
return 100. * (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef(y_true, y_pred, smooth=1.0):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_without_background(y_true, y_pred, smooth=1e-7):
y_true_f = K.flatten(y_true[..., :-1])
y_pred_f = K.flatten(y_pred[..., :-1])
intersection = K.sum(y_true_f * y_pred_f)
denominator = K.sum(y_true_f + y_pred_f)
return (2. * intersection + smooth) / (denominator + smooth)
def dice_loss_without_background(y_true, y_pred):
return 1 - dice_without_background(y_true, y_pred)
def bce_dice_softmax(y_true, y_pred):
return tf.keras.losses.CategoricalCrossentropy()(y_true, y_pred) + dice_loss_without_background(y_true, y_pred)
def bootstrapped_crossentropy(y_true, y_pred, bootstrap_type='hard', alpha=0.95):
target_tensor = y_true
prediction_tensor = y_pred
_epsilon = tf.convert_to_tensor(K.epsilon(), prediction_tensor.dtype.base_dtype)
prediction_tensor = tf.clip_by_value(prediction_tensor, _epsilon, 1 - _epsilon)
prediction_tensor = tf.math.log(prediction_tensor / (1 - prediction_tensor))
if bootstrap_type == 'soft':
bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.sigmoid(prediction_tensor)
else:
bootstrap_target_tensor = alpha * target_tensor + (1.0 - alpha) * K.cast(
K.sigmoid(prediction_tensor) > 0.5, tf.float32)
return K.mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=bootstrap_target_tensor, logits=prediction_tensor))
def wing_loss(landmarks, labels, w=10.0, epsilon=2.0):
"""
Arguments:
landmarks, labels: float tensors with shape [batch_size, num_landmarks, 2].
w, epsilon: a float numbers.
Returns:
a float tensor with shape [].
"""
with tf.name_scope('wing_loss'):
x = landmarks - labels
c = w * (1.0 - tf.math.log(1.0 + w / epsilon))
absolute_x = tf.abs(x)
losses = tf.where(
tf.greater(w, absolute_x),
w * tf.math.log(1.0 + absolute_x / epsilon),
absolute_x - c
)
loss = tf.reduce_mean(tf.reduce_sum(losses, axis=[1, 2]), axis=0)
return loss
def cos_loss_angle(y_true, y_pred):
"""
(𝜙,𝜃)=2(1−cos(𝜙−𝜃))=(𝜙−𝜃)2+𝑂((𝜙−𝜃)4)
:return:
"""
loss = 2 * (1 - tf.math.cos(y_pred - y_true))
loss = tf.reduce_sum(loss)
return loss
def kld_loss(labels, predictions):
"""
Loss for angle net
:param labels:
:param predictions:
:return:
"""
kl_loss = tf.losses.kullback_leibler_divergence(labels, predictions)
kl_loss = tf.reduce_sum(kl_loss, axis=0)
return tf.reduce_mean(kl_loss)
def masked_cos_loss_angle(y_true, y_pred):
"""
(𝜙,𝜃)=2(1−cos(𝜙−𝜃))=(𝜙−𝜃)2+𝑂((𝜙−𝜃)4)
:return:
"""
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
# y_true = tf.gather(y_true, tf.where(mask > 0.0, y_true))
#
# y_pred = tf.gather_nd(y_pred, tf.where(mask > 0.0, y_pred), )
y_true = y_true * mask
y_pred = y_pred * mask
# 175 degree threshold to change loss in radians
thresh_175 = 3.05433
thresh_5 = 0.0872665
# loss = 2 * (1 - tf.math.cos(y_pred - y_true)) + 4 - 4 * (1 - tf.math.square(tf.math.cos(y_pred - y_true)))
# losses = tf.where(
# tf.logical_or(tf.greater(y_true, thresh_175), tf.greater(thresh_5, y_true)),
# 4 - 4 * (1 - tf.math.square(tf.math.cos(y_pred - y_true))),
# 2 * (1 - tf.math.cos(y_pred - y_true))
# )
losses = 2 * (1 - tf.math.cos(y_pred - y_true))
loss = tf.reduce_mean(tf.reduce_sum(losses, axis=[1, 2]), axis=0)
# loss = tf.reduce_sum(losses)
return loss
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
""" Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
see https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
The implementation is a bit different as we use binary crossentropy instead of softmax
SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
# Arguments
y_true: A tensor with labels.
y_pred: A tensor with predicted probabilites.
pixels: number of hard pixels to keep
threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
# Returns
Mean loss value
"""
y_true = K.flatten(y_true)
y_pred = K.flatten(y_pred)
difference = K.abs(y_true - y_pred)
values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
min_difference = (1 - threshold)
y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))
return K.mean(K.binary_crossentropy(y_true, y_pred))
def dice_coef_loss_border(y_true, y_pred):
return (1 - dice_coef_border(y_true, y_pred)) * 0.05 + 0.95 * dice_coef_loss(y_true, y_pred)
def bce_dice_loss_border(y_true, y_pred):
return bce_border(y_true, y_pred) * 0.05 + 0.95 * dice_coef_loss(y_true, y_pred)
def dice_coef_border(y_true, y_pred):
border = get_border_mask((21, 21), y_true)
border = K.flatten(border)
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
y_true_f = K.gather(y_true_f, tf.where(border > 0.5))
y_pred_f = K.gather(y_pred_f, tf.where(border > 0.5))
return dice_coef(y_true_f, y_pred_f)
def bce_border(y_true, y_pred):
border = get_border_mask((21, 21), y_true)
border = K.flatten(border)
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
y_true_f = K.gather(y_true_f, tf.where(border > 0.5))
y_pred_f = K.gather(y_pred_f, tf.where(border > 0.5))
return binary_crossentropy(y_true_f, y_pred_f)
def get_border_mask(pool_size, y_true):
negative = 1 - y_true
positive = y_true
positive = K.pool2d(positive, pool_size=pool_size, padding="same")
negative = K.pool2d(negative, pool_size=pool_size, padding="same")
border = positive * negative
return border
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
def dice_coef_loss_bce(y_true, y_pred, dice=0.5, bce=0.5, bootstrapping='hard', alpha=1.):
return bootstrapped_crossentropy(y_true, y_pred, bootstrapping, alpha) * bce + dice_coef_loss(y_true, y_pred) * dice
def dice_coef_loss_bce_weighted(y_true, y_pred, dice=0.5, bce=0.5, bootstrapping='hard', alpha=1.):
return bootstrapped_crossentropy(y_true, y_pred, bootstrapping, alpha) * bce + dice_coef_loss(y_true, y_pred) * dice
def mse_masked(y_true, y_pred):
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
# y_true = tf.gather(y_true, tf.where(mask > 0.0, y_true))
#
# y_pred = tf.gather_nd(y_pred, tf.where(mask > 0.0, y_pred), )
y_true = y_true * mask
y_pred = y_pred * mask
return mean_squared_error(y_true, y_pred)
def wing_masked(y_true, y_pred):
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
# y_true = tf.gather(y_true, tf.where(mask > 0.0, y_true))
#
# y_pred = tf.gather_nd(y_pred, tf.where(mask > 0.0, y_pred), )
y_true = y_true * mask
y_pred = y_pred * mask
return wing_loss(y_pred, y_true)
def crossentropy_with_KL(y_true, y_pred):
bce = K.binary_crossentropy(y_true, y_pred)
kl = KL_to_uniform(y_pred)
return bce + 0.3 * kl
def KL_to_uniform(y_pred):
channels = 2
y_pred = K.clip(y_pred, K.epsilon(), 1)
uniform = K.ones_like(y_pred) / K.cast(channels, K.floatx())
return uniform * K.log(uniform / y_pred)
def time_crossentropy(labels, pred):
loss = 0
for i in range(pred.shape[1]):
loss += dice_coef_loss_bce(labels[:, i], pred[:, i], dice=0.8, bce=0.2, bootstrapping='soft', alpha=1)
# loss += K.sum(tf.losses.kullback_leibler_divergence(labels[:, i] > 0.02, pred[:, i]))
# loss += K.binary_crossentropy(labels[:, i], pred[:, i])
return loss
def make_loss(loss_name):
if loss_name == 'crossentropy':
return K.binary_crossentropy
elif loss_name == 'crossentropy_time':
return time_crossentropy
if loss_name == 'crossentropy_with_kl':
return crossentropy_with_KL
elif loss_name == 'crossentropy_boot':
def loss(y, p):
return bootstrapped_crossentropy(y, p, 'hard', 0.9)
return loss
elif loss_name == 'dice':
return dice_coef_loss
elif loss_name == 'bce_dice':
def loss(y, p):
return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=1)
return loss
elif loss_name == 'bce_dice_softmax':
return bce_dice_softmax
elif loss_name == 'boot_soft':
def loss(y, p):
return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=0.95)
return loss
elif loss_name == 'boot_hard':
def loss(y, p):
return dice_coef_loss_bce(y, p, dice=0.8, bce=0.2, bootstrapping='hard', alpha=0.95)
return loss
elif loss_name == 'online_bootstrapping':
def loss(y, p):
return online_bootstrapping(y, p, pixels=512, threshold=0.7)
return loss
elif loss_name == 'dice_coef_loss_border':
return dice_coef_loss_border
elif loss_name == 'bce_dice_weighted':
def loss(y, p):
return dice_coef_loss_bce_weighted(y, p, dice=0.8, bce=0.2, bootstrapping='soft', alpha=1)
return loss
elif loss_name == 'bce_dice_loss_border':
return bce_dice_loss_border
elif loss_name == 'mean_squared_error':
return mean_squared_error
elif loss_name == 'mean_squared_error_masked':
return mse_masked
elif loss_name == 'mean_absolute_error':
return mean_absolute_error
elif loss_name == 'wing':
return wing_masked
elif loss_name == 'cos_loss':
return cos_loss_angle
elif loss_name == 'masked_cos_loss':
return masked_cos_loss_angle
elif loss_name == 'kl_loss':
return kld_loss
elif loss_name == 'kl_loss_masked':
return kld_loss_masked
elif loss_name == 'angle_rmse':
return angle_rmse
else:
ValueError("Unknown loss.")
| Justdjent/agrivision_challenge | research_code/losses.py | losses.py | py | 12,121 | python | en | code | 0 | github-code | 13 |
34030622629 | def decToRoman(num):
ans = ""
while num >= 10:
ans += "X"
num = num - 10
if num == 9:
ans += "IX"
return ans
while num >= 5:
ans += "V"
num = num - 5
if num == 4:
ans += "IV"
return ans
while num > 0:
ans += "I"
num = num - 1
return ans
def romanToDec(num):
ans = 0
for i in range(len(num)):
if num[i] == 'X':
ans = ans + 10
elif num[i] == 'V':
ans = ans + 5
elif num[i] == 'I':
if i < len(num) - 1:
if num[i+1] == 'X' or num[i+1] == 'V':
ans = ans - 1
else:
ans = ans + 1
else:
ans = ans + 1
else:
print("Error: Must contain 'X', 'V' and 'I' only. No spaces.")
return -1
return ans
##main
def validateRoman(num):
num = romanToDec(num)
if num == -1:
return False
if num > 20:
print("Roman numeral must not be greater than 20")
return False
return True
firstNum = input("input first Roman numeral:")
while not validateRoman(firstNum):
firstNum = input("input first Roman numeral:")
secondNum = input("input second Roman numeral:")
while not validateRoman(secondNum):
secondNum = input("input second Roman numeral:")
mySum = romanToDec(firstNum) + romanToDec(secondNum)
print(decToRoman(mySum)) | JLtheking/cpy5python | promopractice/4.3.py | 4.3.py | py | 1,204 | python | en | code | 0 | github-code | 13 |
43358829783 | import sys
n = int(sys.stdin.readline())
A = list(map(int, sys.stdin.readline().split()))
operater = list(map(int, sys.stdin.readline().split()))
numberOfOperater = n - 1
maxSolution = -1000000001
minSolution = 1000000001
check = [0] * 4
solution = [0] * n
solution[0] = A[0]
def operate(a, b, x):
if x == 0:
return a + b
elif x == 1:
return a - b
elif x == 2:
return a * b
elif x == 3:
if a * b >=0:
return a // b
else:
return (abs(a) // abs(b) * (-1))
def dfs(v):
global maxSolution
global minSolution
global solution
if(numberOfOperater == v):
maxSolution = max(maxSolution, solution[v])
minSolution = min(minSolution, solution[v])
return
else:
for i in range(4):
if check[i] < operater[i]:
check[i] += 1
solution[v+1] = operate(solution[v], A[v+1], i)
dfs(v+1)
check[i] -= 1
dfs(0)
print(maxSolution)
print(minSolution) | W00SUNGLEE/baekjoon | baekjoon/14888/14888_backtracking.py | 14888_backtracking.py | py | 943 | python | en | code | 0 | github-code | 13 |
8539924394 | import base64
import logging
import re
from datetime import datetime, timedelta
from html import escape
from pathlib import Path
from time import sleep
from phpserialize import serialize, unserialize
from slugify import slugify
from _db import database
from helper import helper
from settings import CONFIG
logging.basicConfig(format="%(asctime)s %(levelname)s:%(message)s", level=logging.INFO)
EPISODE_COVER = True
KEY_MAPPING = {
"Réalisé par": "dtcreator",
"Avec": "dtcast",
"Acteurs": "dtcast",
"Genre": "genres",
"Réalisateur": "dtcreator",
}
TAXONOMIES = {
"movies": [
"genres",
"dtcast",
# "cast_tv",
# "gueststar",
"dtdirector",
# "directors_tv",
# "country",
"dtyear",
],
"tvshows": [
"genres",
# "cast",
"dtcast",
# "gueststar",
# "directors",
"dtcreator",
# "country",
"dtyear",
],
}
class DoothemeHelper:
def get_episode_title_and_language_and_number(self, episode_title: str) -> str:
title = episode_title.lower()
if title.endswith("en vf"):
language = "VF"
title = title.replace("en vf", "").strip()
elif title.endswith("en vostfr"):
language = "VOSTFR"
title = title.replace("en vostfr", "").strip()
else:
language = "VO"
pattern = r"épisode\s(\d+(\.\d+)?)"
match = re.search(pattern, title)
if match:
number = match.group(1)
else:
self.error_log(
msg=f"Unknown episode number for: {title}",
log_file="toroplay_get_episode_title_and_language_and_number.log",
)
number = ""
title = title.title()
return [title, language, number]
def generate_trglinks(
self,
server: str,
link: str,
lang: str = "English",
quality: str = "HD",
) -> str:
if "http" not in link:
link = "https:" + link
server_term_id, isNewServer = self.insert_terms(
post_id=0, terms=server, taxonomy="server"
)
lang_term_id, isNewLang = self.insert_terms(
post_id=0, terms=lang, taxonomy="language"
)
quality_term_id, isNewQuality = self.insert_terms(
post_id=0, terms=quality, taxonomy="quality"
)
link_data = {
"type": "1",
"server": str(server_term_id),
"lang": int(lang_term_id),
"quality": int(quality_term_id),
"link": base64.b64encode(bytes(escape(link), "utf-8")).decode("utf-8"),
"date": self.get_timeupdate().strftime("%d/%m/%Y"),
}
link_data_serialized = serialize(link_data).decode("utf-8")
return f's:{len(link_data_serialized)}:"{link_data_serialized}";'
def format_text(self, text: str) -> str:
return text.strip("\n").replace('"', "'").strip()
def error_log(self, msg: str, log_file: str = "failed.log"):
datetime_msg = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
Path("log").mkdir(parents=True, exist_ok=True)
with open(f"log/{log_file}", "a") as f:
print(f"{datetime_msg} LOG: {msg}\n{'-' * 80}", file=f)
def get_season_number(self, strSeason: str) -> int:
strSeason = strSeason.split(" ")[0]
res = ""
for ch in strSeason:
if ch.isdigit():
res += ch
return res
def get_episode_title_and_language_and_number(self, episode_title: str) -> str:
title = episode_title.lower()
if title.endswith("en vf"):
language = "VF"
title = title.replace("en vf", "").strip()
elif title.endswith("en vostfr"):
language = "VOSTFR"
title = title.replace("en vostfr", "").strip()
else:
language = "VO"
pattern = r"épisode\s(\d+(\.\d+)?)"
match = re.search(pattern, title)
if match:
number = match.group(1)
else:
self.error_log(
msg=f"Unknown episode number for: {title}",
log_file="toroplay_get_episode_title_and_language_and_number.log",
)
number = ""
title = title.title()
return [title, language, number]
def get_title_and_season_number(self, title: str) -> list:
title = title
season_number = "1"
try:
for seasonSplitText in CONFIG.SEASON_SPLIT_TEXTS:
if seasonSplitText in title:
title, season_number = title.split(seasonSplitText)
break
except Exception as e:
self.error_log(
msg=f"Failed to find title and season number\n{title}\n{e}",
log_file="toroplay.get_title_and_season_number.log",
)
return [
self.format_text(title),
self.get_season_number(self.format_text(season_number)),
]
def insert_postmeta(self, postmeta_data: list, table: str = "postmeta"):
database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}{table}", data=postmeta_data, is_bulk=True
)
def generate_film_data(
self,
title,
description,
post_type,
trailer_id,
fondo_player,
poster_url,
extra_info,
):
post_data = {
"description": description,
"title": title,
"post_type": post_type,
# "id": "202302",
"youtube_id": f"{trailer_id}",
# "serie_vote_average": extra_info["IMDb"],
# "episode_run_time": extra_info["Duration"],
"fondo_player": fondo_player,
"poster_url": poster_url,
# "category": extra_info["Genre"],
# "stars": extra_info["Actor"],
# "director": extra_info["Director"],
# "release-year": [extra_info["Release"]],
# "country": extra_info["Country"],
}
key_mapping = {
"Réalisé par": "cast",
"Avec": "cast",
"Acteurs": "cast",
"Genre": "category",
"Date de sortie": "annee",
"Réalisateur": "directors",
}
for info_key in key_mapping.keys():
if info_key in extra_info.keys():
post_data[key_mapping[info_key]] = extra_info[info_key]
for info_key in ["cast", "directors"]:
if info_key in post_data.keys():
post_data[f"{info_key}_tv"] = post_data[info_key]
return post_data
def get_timeupdate(self) -> datetime:
timeupdate = datetime.now() - timedelta(hours=7)
return timeupdate
def generate_post(self, post_data: dict) -> tuple:
timeupdate = self.get_timeupdate()
data = (
0,
timeupdate.strftime("%Y/%m/%d %H:%M:%S"),
timeupdate.strftime("%Y/%m/%d %H:%M:%S"),
post_data["description"],
post_data["title"],
"",
"publish",
"open",
"open",
"",
slugify(post_data["title"]),
"",
"",
timeupdate.strftime("%Y/%m/%d %H:%M:%S"),
timeupdate.strftime("%Y/%m/%d %H:%M:%S"),
"",
0,
"",
0,
post_data["post_type"],
"",
0,
)
return data
def insert_post(self, post_data: dict) -> int:
data = self.generate_post(post_data)
post_id = database.insert_into(table=f"{CONFIG.TABLE_PREFIX}posts", data=data)
return post_id
def insert_film(self, post_data: dict) -> int:
try:
post_id = self.insert_post(post_data)
timeupdate = self.get_timeupdate()
postmeta_data = [
(post_id, "_edit_last", "1"),
(post_id, "_edit_lock", f"{int(timeupdate.timestamp())}:1"),
# _thumbnail_id
(post_id, "tr_post_type", "2"),
(post_id, "field_title", post_data["title"]),
# (
# post_id,
# "field_trailer",
# CONFIG.YOUTUBE_IFRAME.format(post_data["youtube_id"]),
# ),
(
post_id,
"poster_hotlink",
post_data["poster_url"],
),
(
post_id,
"backdrop_hotlink",
post_data["fondo_player"],
),
]
if "rating" in post_data.keys():
postmeta_data.append((post_id, "rating", post_data["rating"]))
tvseries_postmeta_data = [
(
post_id,
"number_of_seasons",
"0",
),
(
post_id,
"number_of_episodes",
"0",
),
]
movie_postmeta_data = []
if "annee" in post_data.keys():
annee = (
post_id,
"field_date",
post_data["annee"][0],
)
tvseries_postmeta_data.append(annee)
movie_postmeta_data.append(annee)
if "field_runtime" in post_data.keys():
tvseries_postmeta_data.append(
(
post_id,
"field_runtime",
"a:1:{i:0;i:" + post_data["field_runtime"] + ";}",
)
)
movie_postmeta_data.append(
(post_id, "field_runtime", f"{post_data['field_runtime']}m"),
)
if post_data["post_type"] == "series":
postmeta_data.extend(tvseries_postmeta_data)
else:
postmeta_data.extend(movie_postmeta_data)
self.insert_postmeta(postmeta_data)
for taxonomy in CONFIG.TAXONOMIES[post_data["post_type"]]:
if taxonomy in post_data.keys() and post_data[taxonomy]:
self.insert_terms(
post_id=post_id, terms=post_data[taxonomy], taxonomy=taxonomy
)
return post_id
except Exception as e:
self.error_log(
f"Failed to insert film\n{e}", log_file="toroplay.insert_film.log"
)
def format_condition_str(self, equal_condition: str) -> str:
return equal_condition.replace("\n", "").strip().lower()
def insert_terms(
self,
post_id: int,
terms: str,
taxonomy: str,
is_title: str = False,
term_slug: str = "",
):
terms = [term.strip() for term in terms.split(",")] if not is_title else [terms]
termIds = []
for term in terms:
term_slug = slugify(term_slug) if term_slug else slugify(term)
cols = "tt.term_taxonomy_id, tt.term_id"
table = (
f"{CONFIG.TABLE_PREFIX}term_taxonomy tt, {CONFIG.TABLE_PREFIX}terms t"
)
condition = f't.slug = "{term_slug}" AND tt.term_id=t.term_id AND tt.taxonomy="{taxonomy}"'
be_term = database.select_all_from(
table=table, condition=condition, cols=cols
)
if not be_term:
term_id = database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}terms",
data=(term, term_slug, 0),
)
term_taxonomy_id = database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}term_taxonomy",
data=(term_id, taxonomy, "", 0, 0),
)
termIds = [term_taxonomy_id, True]
else:
term_taxonomy_id = be_term[0][0]
term_id = be_term[0][1]
termIds = [term_taxonomy_id, False]
try:
database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}term_relationships",
data=(post_id, term_taxonomy_id, 0),
)
except:
pass
return termIds
doohelper = DoothemeHelper()
class Dootheme:
def __init__(self, film: dict, film_links: dict):
self.film = film
self.film["quality"] = self.film["extra_info"].get("Quality", "HD")
self.film_links = film_links
def format_slug(self, slug: str) -> str:
return slug.replace("’", "").replace("'", "")
def format_condition_str(self, equal_condition: str) -> str:
return equal_condition.replace("\n", "").strip().lower()
def insert_postmeta(self, postmeta_data: list, table: str = "postmeta"):
logging.info(f"Inserting postmeta into table {table}")
database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}{table}", data=postmeta_data, is_bulk=True
)
def insert_terms(self, post_id: int, terms: list, taxonomy: str):
termIds = []
for term in terms:
term_name = self.format_condition_str(term)
cols = "tt.term_taxonomy_id, tt.term_id"
table = (
f"{CONFIG.TABLE_PREFIX}term_taxonomy tt, {CONFIG.TABLE_PREFIX}terms t"
)
condition = f't.name = "{term_name}" AND tt.term_id=t.term_id AND tt.taxonomy="{taxonomy}"'
be_term = database.select_all_from(
table=table, condition=condition, cols=cols
)
if not be_term:
term_id = database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}terms",
data=(term, slugify(term), 0),
)
termIds = [term_id, True]
term_taxonomy_id = database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}term_taxonomy",
data=(term_id, taxonomy, "", 0, 0),
)
else:
term_taxonomy_id = be_term[0][0]
term_id = be_term[0][1]
termIds = [term_id, False]
try:
database.insert_into(
table=f"{CONFIG.TABLE_PREFIX}term_relationships",
data=(post_id, term_taxonomy_id, 0),
)
except:
pass
return termIds
def insert_movie_details(self, post_id):
if not self.film_links:
return
logging.info("Inserting movie players")
movie_links = {}
for server_name, server_links in self.film_links.items():
for language, link in server_links.items():
if not link:
continue
movie_links.setdefault(language, {})
movie_links[language][server_name] = link
postmeta_data = [
(
post_id,
"repeatable_fields",
self.generate_repeatable_fields(movie_links),
)
]
if (
"Country" in self.film["extra_info"].keys()
and self.film["extra_info"]["Country"]
):
postmeta_data.append(
(post_id, "Country", self.film["extra_info"]["Country"][0]),
)
self.insert_postmeta(postmeta_data)
def generate_film_data(
self,
title,
description,
post_type,
trailer_id,
fondo_player,
poster_url,
extra_info,
):
post_data = {
"description": description,
"title": title,
"post_type": post_type,
# "id": "202302",
"youtube_id": "[]",
# "serie_vote_average": extra_info["IMDb"],
# "episode_run_time": extra_info["Duration"],
"dt_backdrop": fondo_player,
"dt_poster": poster_url,
# "imdbRating": extra_info["IMDb"],
# "stars": extra_info["Actor"],
# "director": extra_info["Director"],
# "release-year": [extra_info["Release"]],
# "country": extra_info["Country"],
}
for info_key in KEY_MAPPING.keys():
if info_key in extra_info.keys():
post_data[KEY_MAPPING[info_key]] = extra_info[info_key].split(",")
if "Date de sortie" in extra_info.keys():
post_data["dtyear"] = [extra_info["Date de sortie"]]
if "dtcreator" in post_data.keys():
post_data["dtdirector"] = post_data["dtcreator"]
return post_data
def get_timeupdate(self) -> datetime:
timeupdate = datetime.now() - timedelta(hours=7)
return timeupdate
def generate_post(self, post_data: dict) -> tuple:
timeupdate = self.get_timeupdate()
data = (
0,
timeupdate.strftime("%Y/%m/%d %H:%M:%S"),
(timeupdate - timedelta(hours=2)).strftime("%Y/%m/%d %H:%M:%S"),
post_data["description"],
post_data["title"],
"",
"publish",
"open",
"open",
"",
slugify(self.format_slug(post_data["title"])),
"",
"",
timeupdate.strftime("%Y/%m/%d %H:%M:%S"),
(timeupdate - timedelta(hours=2)).strftime("%Y/%m/%d %H:%M:%S"),
"",
0,
"",
0,
post_data["post_type"],
"",
0,
)
return data
def insert_post(self, post_data: dict) -> int:
data = self.generate_post(post_data)
post_id = database.insert_into(table=f"{CONFIG.TABLE_PREFIX}posts", data=data)
return post_id
def insert_film_to_database(self, post_data: dict) -> int:
try:
post_id = self.insert_post(post_data)
timeupdate = self.get_timeupdate()
postmeta_data = [
(
post_id,
"youtube_id",
post_data["youtube_id"],
),
(
post_id,
"dt_poster",
post_data["dt_poster"],
),
(
post_id,
"dt_backdrop",
post_data["dt_backdrop"],
),
(post_id, "original_name", post_data["title"]),
(post_id, "_edit_last", "1"),
(post_id, "_edit_lock", f"{int(timeupdate.timestamp())}:1"),
# _thumbnail_id
# (
# post_id,
# "poster_hotlink",
# post_data["poster_url"],
# ),
# (
# post_id,
# "backdrop_hotlink",
# post_data["fondo_player"],
# ),
]
tvseries_postmeta_data = [
(post_id, "ids", post_id),
(post_id, "clgnrt", "1"),
]
movie_postmeta_data = []
if "episode_run_time" in post_data.keys():
movie_postmeta_data.append(
(post_id, "runtime", post_data["episode_run_time"]),
)
for key in ["episode_run_time", "imdbRating"]:
if key in post_data.keys():
tvseries_postmeta_data.append(
(
post_id,
key,
post_data[key],
)
)
if post_data["post_type"] == "tvshows":
postmeta_data.extend(tvseries_postmeta_data)
else:
postmeta_data.extend(movie_postmeta_data)
self.insert_postmeta(postmeta_data)
for taxonomy in TAXONOMIES[post_data["post_type"]]:
if taxonomy in post_data.keys() and post_data[taxonomy]:
self.insert_terms(
post_id=post_id, terms=post_data[taxonomy], taxonomy=taxonomy
)
return post_id
except Exception as e:
helper.error_log(f"Failed to insert film\n{e}")
def insert_root_film(self) -> list:
condition_post_title = self.film["post_title"].replace("'", "''")
condition = f"""post_title = '{condition_post_title}' AND post_type='{self.film["post_type"]}'"""
be_post = database.select_all_from(
table=f"{CONFIG.TABLE_PREFIX}posts", condition=condition
)
if not be_post:
logging.info(f'Inserting root film: {self.film["post_title"]}')
post_data = self.generate_film_data(
self.film["post_title"],
self.film["description"],
self.film["post_type"],
self.film["trailer_id"],
self.film["fondo_player"],
self.film["poster_url"],
self.film["extra_info"],
)
return [self.insert_film_to_database(post_data), True]
else:
return [be_post[0][0], False]
def update_season_number_of_episodes(self, season_term_id, number_of_episodes):
try:
condition = f"term_id={season_term_id} AND meta_key='number_of_episodes'"
be_number_of_episodes = database.select_all_from(
table=f"{CONFIG.TABLE_PREFIX}termmeta",
condition=condition,
cols="meta_value",
)[0][0]
if int(be_number_of_episodes) < number_of_episodes:
database.update_table(
table=f"{CONFIG.TABLE_PREFIX}termmeta",
set_cond=f"meta_value={number_of_episodes}",
where_cond=condition,
)
except Exception as e:
helper.error_log(
msg=f"Error while update_season_number_of_episodes\nSeason {season_term_id} - Number of episodes {number_of_episodes}\n{e}",
log_file="torotheme.update_season_number_of_episodes.log",
)
def generate_repeatable_fields(self, video_links: dict) -> str:
video_players = {}
i = 0
for language, server_links in video_links.items():
for server_name, link in server_links.items():
video_players[i] = {
"name": f"{language} - {server_name}".upper(),
# "select": "iframe", URL Embed
"select": "dtshcode",
"idioma": "",
# "url": link, URL Embed
"url": CONFIG.IFRAME.format(link),
}
i += 1
video_players_serialize = serialize(video_players)
return video_players_serialize.decode("utf-8")
def format_serie_film_links(self):
new_film_links = {}
for episode_title, episode_links in self.film_links.items():
is_has_link = False
for server, link in episode_links.items():
if link:
is_has_link = True
break
if not is_has_link:
continue
(
episode_title,
language,
episode_number,
) = doohelper.get_episode_title_and_language_and_number(
episode_title=episode_title
)
if not episode_number:
continue
new_film_links.setdefault(episode_number, {})
new_film_links[episode_number]["title"] = episode_title
new_film_links[episode_number].setdefault("video_links", {})
new_film_links[episode_number]["video_links"][language] = episode_links
return new_film_links
def insert_episodes(self, post_id: int, season_id: int):
self.film_links = self.format_serie_film_links()
# self.update_season_number_of_episodes(season_id, lenEpisodes)
for episode_number, episode in self.film_links.items():
episode_title = episode["title"]
episode_name = (
self.film["post_title"]
+ f': {self.film["season_number"]}x{episode_number}'
)
condition_post_title = episode_name.replace("'", "''")
condition = (
f"""post_title = '{condition_post_title}' AND post_type='episodes'"""
)
be_post = database.select_all_from(
table=f"{CONFIG.TABLE_PREFIX}posts", condition=condition
)
if not be_post:
logging.info(f"Inserting episodes: {episode_name}")
post_data = self.generate_film_data(
episode_name,
"",
"episodes",
self.film["trailer_id"],
self.film["fondo_player"],
self.film["poster_url"],
self.film["extra_info"],
)
episode_id = self.insert_post(post_data)
episode_postmeta = [
(
episode_id,
"temporada",
self.film["season_number"],
),
(
episode_id,
"episodio",
episode_number,
),
(
episode_id,
"serie",
self.film["post_title"],
),
(
episode_id,
"episode_name",
episode_title,
),
(episode_id, "ids", post_id),
(episode_id, "clgnrt", "1"),
(
episode_id,
"repeatable_fields",
self.generate_repeatable_fields(episode["video_links"]),
),
(episode_id, "_edit_last", "1"),
(
episode_id,
"_edit_lock",
f"{int(self.get_timeupdate().timestamp())}:1",
),
]
if EPISODE_COVER:
episode_postmeta.append(
(
episode_id,
"dt_backdrop",
self.film["poster_url"],
)
)
# if "air_date" in self.film.keys():
# episode_postmeta.append(
# (
# episode_id,
# "air_date",
# self.film["air_date"],
# )
# )
self.insert_postmeta(episode_postmeta)
def insert_season(self, post_id: int):
season_name = self.film["post_title"] + ": Saison " + self.film["season_number"]
condition_post_title = season_name.replace("'", "''")
condition = f"""post_title = '{condition_post_title}' AND post_type='seasons'"""
be_post = database.select_all_from(
table=f"{CONFIG.TABLE_PREFIX}posts", condition=condition
)
if not be_post:
logging.info(f"Inserting season: {season_name}")
post_data = self.generate_film_data(
season_name,
self.film["description"],
"seasons",
self.film["trailer_id"],
self.film["fondo_player"],
self.film["poster_url"],
self.film["extra_info"],
)
season_id = self.insert_post(post_data)
season_postmeta = [
(
season_id,
"temporada",
self.film["season_number"],
),
(
season_id,
"serie",
self.film["post_title"],
),
(
season_id,
"dt_poster",
self.film["poster_url"],
),
(season_id, "ids", post_id),
(season_id, "clgnrt", "1"),
(season_id, "_edit_last", "1"),
(
season_id,
"_edit_lock",
f"{int(self.get_timeupdate().timestamp())}:1",
),
]
# if "air_date" in self.film.keys():
# season_postmeta.append(
# (
# season_id,
# "air_date",
# self.film["air_date"],
# )
# )
self.insert_postmeta(season_postmeta)
return season_id
else:
return be_post[0][0]
def insert_film(self):
(
self.film["post_title"],
self.film["season_number"],
) = doohelper.get_title_and_season_number(self.film["title"])
post_id, isNewPostInserted = self.insert_root_film()
logging.info("Root film ID: %s", post_id)
if self.film["post_type"] != "tvshows":
if isNewPostInserted:
self.insert_movie_details(post_id)
else:
season_term_id = self.insert_season(post_id)
self.insert_episodes(post_id, season_term_id)
| KiritoU/french-stream_dootheme | dootheme.py | dootheme.py | py | 29,976 | python | en | code | 0 | github-code | 13 |
29816188789 | #!/usr/bin/env python3
"""Coefficient for noise sensitivity evaluation."""
import argparse
import math
import os
import sys
import numpy as np
from scipy.stats import linregress
def load_file(path):
with open(path) as f:
return np.array([float(line.strip()) for line in f])
def main():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument(
"files", nargs="+", type=str, help="File with numbers.")
args = parser.parse_args()
non_existing = [path for path in args.files if not os.path.exists(path)]
if non_existing:
print(f"Files do not exists: {', '.join(non_existing)}",
file=sys.stderr)
exit(1)
if len(args.files) < 2:
print("Provide at least two series of numbers", file=sys.stderr)
exit(1)
all_series = [load_file(path) for path in args.files]
for path, series in zip(args.files, all_series):
if len(series) != 11:
print(f"Missing measurements in {path}.", file=sys.stderr)
exit(1)
noise_probailities = np.arange(0, 1.1, 0.1)
for i, series in enumerate(all_series):
slope, intercept, r_value, p_value, std_err = linregress(
noise_probailities, series)
print(slope / intercept)
if __name__ == "__main__":
main()
| jlibovicky/char-nmt | noisy_slope.py | noisy_slope.py | py | 1,308 | python | en | code | 1 | github-code | 13 |
17849486958 | from sys import argv
def main():
if len(argv) < 2:
print("Error: Too few arguments. Expected 1, found {}".format(len(argv)))
exit(1)
elif len(argv) > 2:
print("Error: Too many arguments. Expected 1, found {}".format(len(argv)))
exit(1)
# Exactly one CLI input
try:
log_file = open(argv[1], "r")
except FileNotFoundError:
print("Error: File \"{}\" not found.".format(argv[1]))
exit(1)
run = 0
index = 0
x_values = []
avg_avg_values = []
avg_best_values = []
for line in log_file:
tokens = line.split("\t")
if tokens[0][:3] == "Run":
# Next run, reset things
run += 1
index = 0
else:
try:
if len(tokens) == 3:
if run == 1:
x_values.append(int(tokens[0]))
avg_avg_values.append(float(tokens[1]))
avg_best_values.append(float(tokens[2].strip("\n")))
else:
avg_avg_values[index] *= (run - 1)
avg_avg_values[index] += float(tokens[1])
avg_avg_values[index] /= run
avg_best_values[index] *= (run - 1)
avg_best_values[index] += float(tokens[2].strip("\n"))
avg_best_values[index] /= run
index += 1
except ValueError:
pass
file_name = argv[1][:argv[1].find(".")] + "_parsed.log"
output_file = open(file_name, "w")
for i in range(len(x_values)):
output_file.write("{}\t{}\t{}\n".format(x_values[i], avg_avg_values[i], avg_best_values[i]))
print("Successfully wrote to {}".format(file_name))
if __name__ == "__main__":
main()
| Jmgiacone/CS5401 | hw2c/src/parse_log_file.py | parse_log_file.py | py | 1,851 | python | en | code | 0 | github-code | 13 |
72603332497 | from pyspark import SparkConf, SparkContext, RDD
from pyspark.mllib.recommendation import ALS, MatrixFactorizationModel, Rating
import math
conf = SparkConf().setAppName("Recommender").set("spark.executor.memory", "7g")
conf = SparkConf().setAppName("Recommender").set("spark.storage.memoryFraction", "0.1")
sc = SparkContext(conf=conf)
# get data, make rdd
weather_file = sc.textFile('mini_proc_weather.csv')
weather_data = weather_file.map(lambda l: l.split(','))
# stat_nbr, (year, month, day, avg temp)
weather_data = weather_data.map(lambda l: (int(l[0]), (int(l[1]), int(l[2]), int(l[3]), int(l[4]))))
key_file = sc.textFile('key_store_stat.csv')
key_data = key_file.map(lambda l: l.split(','))
# stat_nbr, store_nbr
key_data = key_data.map(lambda l: (int(l[1]), int(l[0])))
combined_data = key_data.join(weather_data)
store_date_temp = combined_data.map(lambda l: l[1])
# ^ now (store, (YY, MM, DD, avgTemp))
#store_date_temp = store_date_temp.map(lambda l: (str(l[0])+'-'+l[1][0], l[1][1]))
sales_file = sc.textFile('mini_proc_sales.csv')
#[store number, year, month, day, item number, sales]
sales_data = sales_file.map(lambda l: l.split(','))
#[(store #, year, month, day), (item, sales)]
sales_data = sales_data.map(lambda l: ((int(l[0]), int(l[1]), int(l[2]), int(l[3])), (int(l[4]), int(l[5]))))
#[(store #, year, month, day), temp]
store_date_temp = store_date_temp.map(lambda l: ((l[0], l[1][0], l[1][1], l[1][2]), l[1][3]))
sales_temp_data = sales_data.join(store_date_temp)
# ((store, year, month, date), ((item, sales), temp))
ratings_RDD = sales_temp_data.map(lambda l: Rating(l[0][0]*1000+l[1][0][0], l[1][1], l[1][0][1]))
# ((store*1000+item, temp, sales)
#print(ratings_RDD.take(3))
# train model
#training_RDD, validation_RDD = ratings_RDD.randomSplit([8, 2], 0)
#validation_for_predict_RDD = validation_RDD.map(lambda x: (x[0], x[1]))
#print(training_RDD.collect().take(3))
seed = 5
iterations = 12
regularization_parameter = 0.1
rank = 4
#errors = [0, 0, 0]
#err = 0
#tolerance = 0.02
training_RDD, test_RDD = ratings_RDD.randomSplit([8, 2], 0)
training_1 = training_RDD.map(lambda l: (l[0], l[1]//5, l[2]))
training_2 = training_RDD.map(lambda l: (l[0], (l[1]+3)//5, l[2]))
training_3 = training_RDD.map(lambda l: (l[0], (l[1]-3)//5, l[2]))
model_1 = ALS.train(training_1, rank, seed=None, iterations=iterations, lambda_=regularization_parameter,\
nonnegative = True)
model_2 = ALS.train(training_2, rank, seed=None, iterations=iterations, lambda_=regularization_parameter,\
nonnegative = True)
model_3 = ALS.train(training_3, rank, seed=None, iterations=iterations, lambda_=regularization_parameter,\
nonnegative = True)
test_for_predict_RDD = test_RDD.map(lambda x: (x[0], x[1], x[1]//5, (x[1]+3)//5, (x[1]-3)//5))
preds = test_for_predict_RDD.map(lambda x: (x[0], x[1], model_1.predict(x[0], x[2]), model_2.predict(x[0], x[3]),\
model_3.predict(x[0], x[4])))
preds = preds.map(lambda x: ((x[0], x[1]), (x[2][2]+x[3][2]+x[4][2])/3))
print(preds.take(3).collect())
rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2])))
#rates_and_preds = rates_and_preds.join(preds)
'''
preds_1 = preds_1.map(lambda x: (x[0], x[1], x[2][2]))
preds_2 = test_for_predict_RDD.map(lambda x: (x[0], x[1], model_2.predict(x[0], x[3])))
preds_2 = preds_2.map(lambda x: (x[0], x[1], x[2][2]))
preds_3 = test_for_predict_RDD.map(lambda x: (x[0], x[1], model_3.predict(x[0], x[4])))
preds_3 = preds_3.map(lambda x: (x[0], x[1], x[2][2]))
preds_all = preds_1.join(preds_2)
preds_all = preds_12.join(preds_3)
preds_all = preds_all.map(lambda x: ((x[0, x[1]]), x[2]))
preds_avg = preds_all.mapValues(lambda x: (x, 1)).reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))
preds_avg = preds_avg(lambda x: (x[0], float(x[1][0]/x[1][1])))
pred_list.append((user[0], user[1], meanp))
predictions = sc.parallelize(pred_list)
#predictions = complete_model.predictAll(test_for_predict_RDD).map(lambda r: ((r[0], r[1]), r[2]))
rates_and_preds = test_RDD.map(lambda r: ((int(r[0]), int(r[1])), float(r[2]))).join(predictions)
mae = rates_and_preds.map(lambda r: (abs(r[1][0] - r[1][1]))).mean()
rmse = math.sqrt(rates_and_preds.map(lambda r: (r[1][0] - r[1][1])**2).mean())
logs = rates_and_preds.map(lambda r: (math.log(r[1][1] + 1) - math.log(r[1][0] + 1)))
rmsle = math.sqrt(logs.map(lambda x: x**2).mean())
print("The MAE is {:G}".format(mae))
print("The RMSE is {:G}".format(rmse))
print("The RMSLE is {:G}".format(rmsle))
'''
#print(sales_temp_data.take(3))
#^[((1, 12, 1, 1), ((1, 0), 42)), ((1, 12, 1, 1), ((2, 0), 42)), ((1, 12, 1, 1), ((3, 0), 42))]
| jjones203/SalesPredictions | RecommenderRangeAvg.py | RecommenderRangeAvg.py | py | 4,700 | python | en | code | 2 | github-code | 13 |
15125041514 | class Solution(object):
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
ind = 0
l = len(nums)
while ind<l:
if nums[ind]==val:
ind1 = ind
while ind1<l-1:nums[ind1],ind1 = nums[ind1+1],ind1+1
nums.pop(ind1)
l-=1
else:ind+=1 | My-name-is-Jamshidbek/malumotlar_tuzilmasi_va_algoritmlash | leetcode_learn/learn_array/lissen_3/remove_element.py | remove_element.py | py | 422 | python | en | code | 1 | github-code | 13 |
23213279900 | import math
import os
import librosa
import warnings
import numpy as np
import pandas as pd
from datasets import Dataset
from transformers.file_utils import filename_to_url
def speech_file_to_array(x):
global longest_audio
with warnings.catch_warnings():
warnings.simplefilter('ignore')
speech_array, sampling_rate = librosa.load(x, sr=16_000)
return speech_array
def import_torgo(location, test_train, t):
print('Import Torgo dataset')
if not test_train:
speakers = os.listdir(location)
dfs = []
for speaker in speakers:
df = pd.DataFrame(columns=['id', 'file', 'target'])
l = os.path.join(location, speaker)
if os.path.isdir(l):
sessions = os.listdir(l)
for session in sessions:
if session[0:7] == 'Session':
s = os.path.join(l, session)
if os.path.isdir(os.path.join(s, 'wav_arrayMic')):
recordings_location = os.path.join(
s, 'wav_arrayMic')
else:
recordings_location = os.path.join(
s, 'wav_headMic')
recordings = os.listdir(recordings_location)
for recording in recordings:
if len(recording) == 8:
if os.path.isfile(os.path.join(s, 'prompts', (recording[:-4] + '.txt'))):
sentence = open(os.path.join(
s, 'prompts', (recording[:-4] + '.txt'))).read()
if "[" in sentence or "/" in sentence or "xxx" in sentence:
continue
else:
new_row = {'id': str(speaker), 'file': str(os.path.join(
recordings_location, recording)), 'target': str(sentence)}
df = df.append(
new_row, ignore_index=True)
else:
continue
df['speech'] = [speech_file_to_array(x) for x in df['file']]
for x in df['file']:
if librosa.get_duration(filename=x) > 10:
df.drop(df[df['file'] == x].index, inplace=True)
df.drop('file', axis=1, inplace=True)
d = Dataset.from_pandas(df)
columns_to_remove = []
for f in d.features.type:
if str(f.name) not in ['id', 'speech', 'target']:
columns_to_remove.append(str(f.name))
d = d.remove_columns(columns_to_remove)
dfs.append(d)
for d in dfs:
print(d.features)
return dfs
if test_train:
train_ds = pd.DataFrame(columns=['id', 'speech', 'target'])
test_ds = pd.DataFrame(columns=['id', 'speech', 'target'])
speakers = os.listdir(location)
for speaker in speakers:
df = pd.DataFrame(columns=['id', 'file', 'target'])
l = os.path.join(location, speaker)
if os.path.isdir(l):
sessions = os.listdir(l)
for session in sessions:
if session[0:7] == 'Session':
s = os.path.join(l, session)
if os.path.isdir(os.path.join(s, 'wav_arrayMic')):
recordings_location = os.path.join(
s, 'wav_arrayMic')
else:
recordings_location = os.path.join(
s, 'wav_headMic')
recordings = os.listdir(recordings_location)
for recording in recordings:
if len(recording) == 8:
if os.path.isfile(os.path.join(s, 'prompts', (recording[:-4] + '.txt'))):
sentence = open(os.path.join(
s, 'prompts', (recording[:-4] + '.txt'))).read()
if "[" in sentence or "/" in sentence or "xxx" in sentence:
continue
else:
new_row = {'id': speaker, 'file': os.path.join(
recordings_location, recording), 'target': sentence}
df = df.append(
new_row, ignore_index=True)
else:
continue
df['speech'] = [speech_file_to_array(x) for x in df['file']]
for x in df['file']:
if librosa.get_duration(filename=x) > 10:
df.drop(df[df['file'] == x].index, inplace=True)
tr_ds, te_ds = np.split(df, [math.ceil(int(.8*len(df)))])
train_ds = train_ds.append(tr_ds)
test_ds = test_ds.append(te_ds)
for x in train_ds['file']:
if librosa.get_duration(filename=x) > 10:
train_ds.drop(train_ds[train_ds['file'] == x].index, inplace=True)
train_ds.drop('file', axis=1, inplace=True)
test_ds.drop('file', axis=1, inplace=True)
return Dataset.from_pandas(train_ds) if t=='train' else [], Dataset.from_pandas(test_ds)
| timherzig/asr_dysarthria | script/import_ds/import_torgo.py | import_torgo.py | py | 5,766 | python | en | code | 1 | github-code | 13 |
28187711205 |
from PyQt4 import QtGui,QtCore
class FaderWidget(QtGui.QWidget):
def __init__(self,newWidget):
QtGui.QWidget.__init__(self, newWidget)
self.newWidget = newWidget
# self.new_pix = QtGui.QPixmap(self.newWidget.size())
self.new_pix = QtGui.QPixmap(1000,200)
self.pix_opacity = 0
# self.render(self.newWidget)
self.timeline = QtCore.QTimeLine()
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.close)
self.timeline.setDuration(1000)
self.timeline.start()
self.show()
def paintEvent(self, event):
self.painter = QtGui.QPainter()
self.painter.begin(self)
self.painter.setOpacity(self.pix_opacity)
self.painter.drawPixmap(0,0,self.new_pix)
self.painter.end()
def animate(self, value):
self.pix_opacity = self.pix_opacity + (value/2)
self.repaint()
| brownharryb/webtydesk | custom_widgets/custom_notify.py | custom_notify.py | py | 950 | python | en | code | 0 | github-code | 13 |
7760757820 | import platform
windows = platform.system() == 'Windows'
try:
from setuptools import setup
except ImportError:
has_setuptools = False
from distutils.core import setup
else:
has_setuptools = True
version_string = '0.5.0'
setup_kwargs = {
'name': 'gittle',
'description': 'A high level pure python git implementation',
'keywords': 'git dulwich pure python gittle',
'version': version_string,
'url': 'https://github.com/FriendCode/gittle',
'license': 'MIT',
'author': "Aaron O'Mullan",
'author_email': 'aaron@friendco.de',
'long_description': """
Gittle is a wrapper around dulwich. It provides an easy and familiar interface to git.
It's pure python (no dependency on the ``git`` binary) and has no other dependencies besides
the python stdlib, dulwich and paramiko (optional).
""",
'packages': ['gittle', 'gittle.utils'],
'install_requires': [
# PyPI
'paramiko>=1.10.0',
'pycrypto==2.6',
'dulwich>=0.9.7',
'funky>=0.0.2',
],
}
try:
# Run setup with C extensions
setup(**setup_kwargs)
except SystemExit as exc:
import logging
logging.exception(exc)
logging.info("retrying installation without VisualStudio...")
# Remove C dependencies
install_requires = [r for r in setup_kwargs['install_requires']
if r.split('=')[0] not in ('paramiko', 'pycrypto')]
# Install dulwich as pure Python
if windows and has_setuptools:
from setuptools.command.easy_install import easy_install
run_setup = easy_install.run_setup
def _run_setup(self, setup_script, setup_base, args):
"""Alternate run_setup function to pass '--pure' to the
Dulwich installer on Windows.
"""
if 'dulwich' in setup_script:
args.insert(0, '--pure')
run_setup(self, setup_script, setup_base, args)
easy_install.run_setup = _run_setup
# Run setup without C extensions
setup_kwargs['install_requires'] = install_requires
setup(**setup_kwargs)
| FriendCode/gittle | setup.py | setup.py | py | 2,087 | python | en | code | 732 | github-code | 13 |
31466466022 | # Follow up for problem "Populating Next Right Pointers in Each Node".
# What if the given tree could be any binary tree? Would your previous solution still work?
# Note:
# You may only use constant extra space.
# For example,
# Given the following binary tree,
# 1
# / \
# 2 3
# / \ \
# 4 5 7
# After calling your function, the tree should look like:
# 1 -> NULL
# / \
# 2 -> 3 -> NULL
# / \ \
# 4-> 5 -> 7 -> NULL
class Node(object):
"""Simple node class."""
def __init__(self, data):
"""."""
self.data = data
self.left = None
self.right = None
def connect2(node):
"""LC 117."""
while node:
curr = node
while curr:
if curr.left and curr.right: # with both children
curr.left.next = curr.right
if curr.next:
if curr.left and not curr.right: # curr single child left
if curr.next.left: # next single child left
curr.left.next = curr.next.left
elif curr.next.right: # next single child right
curr.left.next = curr.next.right
elif curr.right and not curr.left: # curr single child right
if curr.next.left:
curr.right.next = curr.next.left
elif curr.next.right:
curr.right.next = curr.next.right
curr = curr.next # loop across
if node.right and not node.left: # single child right
node = node.right
else: # single child left or both or none(all fine)
node = node.left
# a more elagent solution..
def second_connect2(root):
"""LC 117."""
head = None
pre = None
curr = root
while curr:
while curr:
if curr.left:
if pre:
pre.next = curr.left
else:
head = curr.left
pre = curr.left
if curr.right:
if pre:
pre.next = curr.right
else:
head = curr.right
pre = curr.right
curr = curr.next
curr = head
pre = None
head = None
if __name__ == "__main__":
head = Node(20)
head.left = Node(10)
head.left.left = Node(5)
head.left.right = Node(15)
head.right = Node(30)
head.right.left = Node(25)
head.right.right = Node(35)
| han8909227/leetcode | tree/sibliing_pointer_ii_lc117.py | sibliing_pointer_ii_lc117.py | py | 2,563 | python | en | code | 3 | github-code | 13 |
39216727434 | import cv2
import pyzbar.pyzbar as pyzbar
import time
cap = cv2.VideoCapture(0)
fob = open('atendence.txt','w+')
names = []
def enterDate(z):
if z in names:
pass
else:
names.append(z)
z = ''.join(str(z))
fob.write(z+'\n')
return names
print('二维码读取中...')
def checkData(data):
data = str(data)
if data in names:
print('已签到')
else:
print('\n'+str(len(names)+1)+'\n'+data)
enterData(data) | cerebrumWeaver/python-example | 摄像头识别二维码2.py | 摄像头识别二维码2.py | py | 483 | python | en | code | 0 | github-code | 13 |
25196503734 | from http.server import BaseHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
port = 8084
address = '0.0.0.0'
class Hendler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
content = '''
<html>
<head>
<title>
Aula
</title>
</head>
<body>
<h1>Aula de Redes de Computadores</h1>
<h2>IFPR Cascavel</h2>
<input></input>
</body>
</html>
'''
self.wfile.write(bytes(content, 'utf-8'))
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
pass
def main():
try:
server = ThreadedHTTPServer((address, port), Hendler)
server.serve_forever()
except KeyboardInterrupt:
print('Exiting server')
server.socket.close()
if __name__ == "__main__":
main() | pedrobolfe/Redes | server_threads/http_server_lib_threads.py | http_server_lib_threads.py | py | 1,026 | python | en | code | 0 | github-code | 13 |
9783336458 | #!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
from common import *
import PIL
from pix_select import PixSelect
"""
The cost function is defined as
fi = Dt(C(x)pt_i) - tz(x)pr_i
where:
pt_i: The point i observed by ref camera
C(x) = K*T(x): Camera projection matrix
K: intrinsics matrix
T(x): Transform matrix for reference to target camera
tz(x): the z(3rd) row of T
x: 6DoF transform parameter
Dt: The depth image from target camera
"""
class PhotomericError:
def __init__(self, ref_pts, tar_img, K, show_log = False):
self.tar_img = tar_img
self.ref_pts = ref_pts
self.H, self.W = tar_img.shape
self.K = K
self.T = v2T([0,0,0,0,0,0])
self.dTdx = self.calc_dTdx()
self.border = 2
self.show_log = show_log
def setT(self, T):
self.T = T
def track(self, max_err = 255):
self.max_err = max_err
tar_img = self.tar_img
ref_pts = self.ref_pts
K = self.K
W = self.W
H = self.H
last_err = np.inf
iter = 0
while(True):
#transform the reference points to target coordinate
cur_pts = transform(self.T, ref_pts[0:3,:])
#Projection points to camera
reproj_pix = projection(K, cur_pts)
#Make sure all points are located inside the camera boundaries
check = range_check(reproj_pix, H, W, self.border)
cur_pts = cur_pts[:,check]
reproj_pix = reproj_pix[:,check]
ref_intensity = ref_pts[3,check]
#Calcate the residuals
err, residuals = self.residuals(tar_img, reproj_pix, ref_intensity)
#Calcate the partial derivative
dCdT = self.calc_dCdT(K, cur_pts)
if(self.show_log):
print("iter:%d, err:%f"%(iter,err))
iter+=1
if err < 0.01:
break
if last_err - err < 0.001:
self.T = self.last_T
self.img0, self.img1 = self.get_proj_err_image(self.tar_img, reproj_pix, ref_intensity)
#self.img0, self.img1 = reprojection_error_image(tar_depth, ref_depth, self.T, K)
break
#Calcate the jacobian
dDdC = self.calc_dDdC(tar_img, reproj_pix)
dCdx = np.matmul(dCdT, self.dTdx)
dDdx = np.matmul(dDdC, dCdx)
J = dDdx
residuals = residuals
#Gauss-nowton method
J = J.reshape(-1,6)
hessian = np.dot(J.T,J)
hessian_inv = np.linalg.inv(hessian)
temp = -np.dot(J.T, residuals)
dx = np.dot(hessian_inv,temp)
dT = v2T(dx)
self.last_T = self.T
self.T = np.dot(self.T, dT)
last_err = err
def calc_dTdx(self):
A1 = np.array([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0. ]).reshape([3,4])
A2 = np.array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0. ]).reshape([3,4])
A3 = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1. ]).reshape([3,4])
A4 = np.array([0, 0, 0, 0, 0, 0,-1, 0, 0, 1, 0, 0. ]).reshape([3,4])
A5 = np.array([0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 0. ]).reshape([3,4])
A6 = np.array([0,-1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0. ]).reshape([3,4])
dTdx = np.vstack([A1.flatten(),
A2.flatten(),
A3.flatten(),
A4.flatten(),
A5.flatten(),
A6.flatten()]).T
return dTdx
def residuals(self, img, pix, intensity):
residuals = interpn(img, pix) - intensity
residuals = np.nan_to_num(residuals)
residuals = np.clip(residuals, -self.max_err, self.max_err)
r = np.abs(residuals)
e = np.nansum(r)
return e/(residuals.shape[0]), residuals
def get_proj_err_image(self, img, pix, intensity):
residuals = interpn(img, pix) - intensity
residuals = np.nan_to_num(residuals)
r = np.abs(residuals)
error_image = np.zeros_like(img).astype(float)
reproj_image = np.zeros_like(img).astype(float)
#reproj_image.fill(np.nan)
#error_image.fill(np.nan)
pix = np.around(pix)
pix = pix.astype(int)
error_image[pix[1], pix[0]] = r
reproj_image[pix[1], pix[0]] = intensity
return error_image, reproj_image
def calc_dDdC(self, img, pix):
pix_x1y0 = pix.copy()
pix_x1y0[0] = pix_x1y0[0] + 1.
pix_x0y1 = pix.copy()
pix_x0y1[1] = pix_x0y1[1] + 1.
x0y0 = interpn(img, pix)
x1y0 = interpn(img, pix_x1y0)
x0y1 = interpn(img, pix_x0y1)
dx = x1y0 - x0y0
dy = x0y1 - x0y0
dDdC = np.nan_to_num(np.dstack([dx.reshape(-1,1),dy.reshape(-1,1)]))
return dDdC
def calc_dCdT(self, K, pts):
x = pts[0,:]
y = pts[1,:]
z = pts[2,:]
z2 = z*z
xy = x*y
x2 = x*x
y2 = y*y
fx = K[0,0]
fy = K[1,1]
dCdT = np.zeros([pts.shape[1], 2, 12])
dCdT[:,0, 0] = fx*x/z
dCdT[:,0, 1] = fx*y/z
dCdT[:,0, 2] = fx
dCdT[:,0, 3] = fx/z
dCdT[:,0, 8] = -fx*x2/z2
dCdT[:,0, 9] = -fx*xy/z2
dCdT[:,0,10] = -fx*x/z
dCdT[:,0,11] = -fx*x/z2
dCdT[:,1, 4] = fy*x/z
dCdT[:,1, 5] = fy*y/z
dCdT[:,1, 6] = fy
dCdT[:,1, 7] = fy/z
dCdT[:,1, 8] = -fy*xy/z2
dCdT[:,1, 9] = -fy*y2/z2
dCdT[:,1,10] = -fy*y/z
dCdT[:,1,11] = -fy*y/z2
return dCdT
def calc_dtzdT(self, pts):
x = pts[0,:]
y = pts[1,:]
z = pts[2,:]
dtzdT = np.zeros([pts.shape[1], 1, 12])
dtzdT[:,0, 8] = x
dtzdT[:,0, 9] = y
dtzdT[:,0,10] = z
dtzdT[:,0,11] = 1
return dtzdT
if __name__ == "__main__":
fx = 721.538
fy = 721.538
cx = 609.559
cy = 172.854
K = np.array([[fx,0, cx], [0, fy, cy], [0,0,1]])
ref_depth = np.load('/home/liu/workspace/VisualLidar/depth/0000.npy')
ref_img = np.asarray(PIL.Image.open('/media/liu/SSD-PSMU3/kitti_slam/00/image_2/000000.png').convert('L'))
tar_img = np.asarray(PIL.Image.open('/media/liu/SSD-PSMU3/kitti_slam/00/image_2/000001.png').convert('L'))
pix = PixSelect(ref_img)
ref_pts = getpts(ref_depth, ref_img, pix, K ,1)
ref_pts = depth2ptsI(ref_depth, ref_img, K ,2)
matcher = PhotomericError(ref_pts, tar_img, K, True)
matcher.setT(v2T([0,0,0,0,0,0]))
matcher.track()
matcher.track(10)
ref_pts = depth2ptsI(ref_depth, ref_img, K ,1)
cur_pts = transform(matcher.T, ref_pts[0:3,:])
reproj_pix = projection(K, cur_pts)
check = range_check(reproj_pix, tar_img.shape[0], tar_img.shape[1], 1)
cur_pts = cur_pts[:,check]
reproj_pix = reproj_pix[:,check]
ref_intensity = ref_pts[3,check]
img0, img1 = matcher.get_proj_err_image(matcher.tar_img, reproj_pix, ref_intensity)
fig, axes= plt.subplots(3)
axes[0].imshow(img0, vmin=0, vmax=30)
axes[0].set_title('reproj error',loc='left')
axes[1].imshow(img1, vmin=0, vmax=255)
axes[1].set_title('ref reproj to tar depth',loc='left')
axes[2].imshow(matcher.tar_img, vmin=0, vmax=255)
axes[2].set_title('tar depth',loc='left')
print(matcher.T)
if(False):
import open3d as o3d
pcd0 = o3d.geometry.PointCloud()
tar_pts = depth2pts(tar_depth, K)
pcd0.points = o3d.utility.Vector3dVector(tar_pts.T)
c = np.zeros_like(tar_pts.T)
c[:,2] = 1
pcd0.colors = o3d.utility.Vector3dVector(c)
pcd1 = o3d.geometry.PointCloud()
ref_pts = depth2pts(ref_depth, K)
ref_pts = transform(np.linalg.inv(matcher.T), ref_pts)
pcd1.points = o3d.utility.Vector3dVector(ref_pts.T)
c = np.zeros_like(ref_pts.T)
c[:,1] = 1
pcd1.colors = o3d.utility.Vector3dVector(c)
o3d.visualization.draw_geometries([pcd0, pcd1])
plt.show() | scomup/VisualLidar | photometric_error.py | photometric_error.py | py | 8,025 | python | en | code | 0 | github-code | 13 |
28326017557 | from odoo import api, fields, models
class CreateAmendmentWizard(models.TransientModel):
_name = "hr.contract.amendment.wizard"
_description = "Create Contract Amendment"
contract_id = fields.Many2one(
comodel_name="hr.contract", string="Amendment of"
)
type_id = fields.Many2one(
comodel_name="hr.contract.type",
string="Contract Type",
domain="[('echelon', '=', 'amendment')]",
required=True,
default="",
copy=False,
)
@api.model
def default_get(self, field_names):
defaults = super().default_get(field_names)
contract_id = self.env.context["active_id"]
defaults["contract_id"] = contract_id
return defaults
@api.multi
def create_amendment(self):
self.ensure_one()
return self.contract_id.create_amendment(self.type_id)
| odoo-cae/odoo-addons-hr-incubator | hr_cae_contract/wizard/create_amendment_wizard.py | create_amendment_wizard.py | py | 871 | python | en | code | 0 | github-code | 13 |
14646969435 | from sqlalchemy import Boolean, Column, ForeignKey, Identity, Integer, Table
from . import metadata
TerminalReaderReaderResourceProcessConfigJson = Table(
"terminal_reader_reader_resource_process_configjson",
metadata,
Column(
"skip_tipping",
Boolean,
comment="Override showing a tipping selection screen on this transaction",
nullable=True,
),
Column(
"tipping",
TerminalReaderReaderResourceTippingConfig,
ForeignKey("TerminalReaderReaderResourceTippingConfig"),
nullable=True,
),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["terminal_reader_reader_resource_process_config.json"]
| offscale/stripe-sql | stripe_openapi/terminal_reader_reader_resource_process_config.py | terminal_reader_reader_resource_process_config.py | py | 712 | python | en | code | 1 | github-code | 13 |
3079573899 | import numpy as np
from numpy import ndarray
from errors_namedtuple import SurveillanceErrors
from surveillance_data import SurveillanceData
from target import Target
from trace_ import Trace
class MultiFunctionalRadar:
"""Класс, описывающий работу МФР"""
__slots__ = ("start_tick",
"tick",
"number",
"stable_point",
"is_adjustment",
"residuals",
"surveillance_data",
"target_list",
"trace_list",
"registration")
def __init__(self,
target_list: list,
stable_point: ndarray = np.zeros(3),
mfr_number: int = 1,
errors: SurveillanceErrors = SurveillanceErrors(0, 0)) -> None:
# Время начала работы МФР
self.start_tick = 0.
# Текущее время в тиках
self.tick = 0.
# Номер МФР
self.number = mfr_number
# Точка стояния
self.stable_point = stable_point
# Признак юстированности
self.is_adjustment = True if self.number == 1 else False
# Вектор поправок
self.residuals = None
# Параметры обзора
self.surveillance_data = SurveillanceData(errors)
# Массив целей, которых пытается сопровождать МФР
self.target_list = target_list
# Массив трасс
self.trace_list = [Trace(target=trg,
mfr_number=self.number,
mfr_stable_point=self.stable_point) for trg in target_list]
# Массив информации о каждой трассе этого МФР
self.registration = []
def __repr__(self) -> str:
return f"МФР c номером {self.number!r}, c точкой стояния {self.stable_point!r}. " \
f"Объект класса {self.__class__.__name__} по адресу в памяти {hex(id(self))}"
def operate(self, ticks: int) -> None:
"""Основной алгоритм работы
:param ticks: Текущее время в тиках
:type ticks: int
:return: None
"""
# Текущее время в тиках
self.tick = ticks - self.start_tick
if self.tick >= 0:
# Добавление или удаление трасс из состава трасс МФР
self.update_trace_list()
# Сопровождение целей: измерение, фильтрация, пересчёт в МЗСК МФР
self.tracking()
# Формирование сообщений на ПБУ
self.update_source_traces()
# Регистрация нужных переменных
self.register()
def update_trace_list(self) -> None:
"""Алгоритм обновления массива трасс
Удаляет трассы по тем целям, которые нельзя сопровождать
Добавляет трассы по целям, которые можно сопровождать, если трассы не было
:return: None
"""
# Цикл по всем целям
for target in self.target_list:
# Проверка на возможность сопровождения
# Координаты цели в системе координат МФР
real_target_coordinates = target.coordinates - self.stable_point
# Если можно сопровождать
if self.surveillance_data.validate_tracking(real_target_coordinates):
# то добавить трассу, если не было трассы по такой цели
self.append_trace_for_target(target)
else:
# Если сопровождать нельзя, а трасса по такой цели есть, то удалить трассу
self.remove_trace_for_target(target)
def append_trace_for_target(self, target: Target) -> None:
"""Добавление трассы по цели, по которой не было трассы
:param target: Цель, по которой хотим создать трассу
:type target: Target
:return: None
"""
# Если не было трассы по такой цели
if not any(target is trace.target for trace in self.trace_list):
# то добавить трассу
self.trace_list.append(Trace(target=target,
mfr_number=self.number,
mfr_stable_point=self.stable_point))
def remove_trace_for_target(self, target: Target) -> None:
"""Удаление трассы по цели
:param target: Цель, по которой хотим удалить трассу
:type target: Target
:return: None
"""
self.trace_list = list(filter(lambda trace: trace.target is not target, self.trace_list))
def tracking(self) -> None:
"""Алгоритм сопровождения
:return: None
"""
for trace in self.trace_list:
# В зависимости от темпа сопровождения
if not self.tick % trace.frame_tick:
# Измерение
self.create_measurement(trace)
# Фильтрация
trace.filtrate()
# Пересчёт в декартовые координаты
self.calculate_trace_to_dec(trace)
def create_measurement(self, trace: Trace) -> None:
"""Измерение координат целей
:param trace: Трасса цели
:type trace: Trace
:return: None
"""
# Пересчёт координат и производных реального положения цели в прямоугольную декартовую МЗСК МФР
coordinates_dec = trace.target.coordinates - self.stable_point
velocities_dec = trace.target.velocities
# Пересчёт координат и производных реального положения цели в БСК МФР
dec2bcs = self.surveillance_data.position_antenna_data.dec2bcs
coordinates_bcs, velocities_bcs = dec2bcs(coordinates_dec, velocities_dec)
# Выбор СКО для координат в БСК
sigma_bcs = self.surveillance_data.sigma_bcs
# Измерение биконических координат цели, каждая из которых - нормально распредлённая величина
trace.measure(coordinates_bcs, sigma_bcs)
def calculate_trace_to_dec(self, trace: Trace) -> None:
"""Пересчёт координат и ковариационных матриц в МЗСК МФР
:param trace: Трасса цели
:type trace: Trace
:return: None
"""
# Выбор функции для пересчёта координат и скоростей
bcs2dec = self.surveillance_data.position_antenna_data.bcs2dec
# Расчёт координат и скоростей в декартовой прямоугольной МЗСК МФР, c учетом поправок
trace.calculate_dec_coord_and_vel(bcs2dec, self.residuals)
# Выбор функциия для пересчёта ковариационных матриц
bsc2dec_for_matrix = self.surveillance_data.position_antenna_data.calc_dec_covariance_matrix_from_bcs
# Расчёт ковариационных матриц в декартовой прямоугольной МЗСК МФР
trace.calculate_dec_covariance_matrix(bsc2dec_for_matrix)
def update_source_traces(self) -> None:
"""Обновление данных трасс источника, которыми пользуется ПБУ
:return: None
"""
for trace in self.trace_list:
# В зависимости от темпа сопровождения
if not self.tick % trace.frame_tick:
trace.update_source_trace()
def register(self) -> None:
"""Регистрация работы МФР
:return: None
"""
# Цикл по всем трассам
for trace in self.trace_list:
# В зависимости от темпа сопровождения
if not self.tick % trace.frame_tick:
# Хотим регистрировать следующее:
registration_row = [self.tick, self.number, self.is_adjustment, *trace.target.registration, *trace.source_trace.registration]
self.registration.append(registration_row)
| Igor9rov/TrackingAndIdentificationModel | Model/ModelMFR/multi_functional_radar.py | multi_functional_radar.py | py | 9,286 | python | ru | code | 0 | github-code | 13 |
14430871065 | '''
Write a script that takes a sentence from the user and returns:
- the number of lower case letters
- the number of uppercase letters
- the number of punctuations characters
- the total number of characters
Use a dictionary to store the count of each of the above.
Note: ignore all spaces.
Example input:
I love to work with dictionaries!
Example output:
Upper case: 1
Lower case: 26
Punctuation: 1
'''
def char_analysis(message):
input_trimmed = (message.replace(" ", ""))
analysis_dict = {"Upper case:": 0, "Lower case:": 0, "Punctuation:": 0, "Numbers:" : 0, "Total characters:": len(input_trimmed)}
# Classify each character in its category.
for a in input_trimmed:
if a.islower():
analysis_dict["Lower case:"] += 1
elif a.isalpha():
analysis_dict["Upper case:"] += 1
elif a.isnumeric():
analysis_dict["Numbers:"] += 1
elif not a.isalnum():
analysis_dict["Punctuation:"] += 1
return analysis_dict
def print_dictionary(d):
for i in d:
print(i, d[i])
user_input = input()
print_dictionary(char_analysis(user_input))
| lauramayol/laura_python_core | week_03/labs/09_dictionaries/09_03_count_cases.py | 09_03_count_cases.py | py | 1,160 | python | en | code | 0 | github-code | 13 |
15021678370 | #!/usr/bin/env python3
"""thread.py: The threading manager file for the CarSoft project."""
__author__ = "Rhys Read"
__copyright__ = "Copyright 2019, Rhys Read"
import logging
import threading
class ThreadManager(object):
instance = None
def __init__(self):
if ThreadManager.instance is not None:
logging.warning('ThreadManager repeat instance occurrence. Please check as this is undesirable.')
ThreadManager.instance = self
self.__threads = []
def add_task(self, func, args: tuple, start=True):
thread = threading.Thread(target=func, args=args, daemon=True)
self.__threads.append(thread)
if start:
thread.start()
return thread
def check_threads(self, remove_complete=True):
for thread in self.__threads:
if not thread.is_alive() and remove_complete:
self.__threads.remove(thread)
def start(self):
for thread in self.__threads:
if not thread.is_alive():
thread.start()
| RhysRead/CarSoft | src/thread.py | thread.py | py | 1,052 | python | en | code | 0 | github-code | 13 |
72941958098 |
'''
/*
We are working on a security system for a badged-access room in our company's building.
We want to find employees who badged into our secured room unusually often.
We have an unordered list of names and entry times over a single day. Access times are given as numbers up to four digits in length using 24-hour time,
such as "800" or "2250".
Write a function that finds anyone who badged into the room three or more times in a one-hour period. <= 60, times >= 3
Your function should return each of the employees who fit that criteria, plus the times that they badged in during the one-hour period.
If there are multiple one-hour periods where this was true for an employee, just return the earliest one for that employee.
badge_times = [
["Paul", "1355"], ["Jennifer", "1910"], ["Jose", "835"],
["Jose", "830"], ["Paul", "1315"], ["Chloe", "0"],
["Chloe", "1910"], ["Jose", "1615"], ["Jose", "1640"],
["Paul", "1405"], ["Jose", "855"], ["Jose", "930"],
["Jose", "915"], ["Jose", "730"], ["Jose", "940"],
["Jennifer", "1335"], ["Jennifer", "730"], ["Jose", "1630"],
["Jennifer", "5"], ["Chloe", "1909"], ["Zhang", "1"],
["Zhang", "10"], ["Zhang", "109"], ["Zhang", "110"],
["Amos", "1"], ["Amos", "2"], ["Amos", "400"],
["Amos", "500"], ["Amos", "503"], ["Amos", "504"],
["Amos", "601"], ["Amos", "602"]];
1, 2, 3, 4, 5, 6
Expected output (in any order)
Paul: 1315 1355 1405
Jose: 830 835 855 915 930
Zhang: 10 109 110
Amos: 500 503 504
*/
'''
import collections
def find_people(badge_times):
d = collections.defaultdict(list)
for name, time in badge_times:
d[name].append(time)
res = {}
for k, v in d.items():
if len(v) >= 3:
collect_res(k, v, res)
return res
def collect_res(k, v, res):
# 忘记sort了
v.sort(key = lambda x : int(x))
i = 0
n = len(v)
for j in range(n):
# invalid move i
while int(v[j]) - int(v[i]) > 100:
i += 1
# valid move j get max
if j - i + 1 >= 3:
while j < n and int(v[j]) - int(v[i]) <= 100:
j += 1
res[k] = v[i : j]
break
#Jose': ['835', '830', '1615', '1640', '855', '930', '915', '730', '940', '1630'],#
# def parse(s):
# if len(s) <= 2:
# return int(s)
# if len(s) == 3:
# return int(s[0]) * 60 + int(s[1:3])
# if len(s) == 4:
# return int(s[:2]) * 60 + int(s[2:4])
badge_times = [
["Paul", "1355"], ["Jennifer", "1910"], ["Jose", "835"],
["Jose", "830"], ["Paul", "1315"], ["Chloe", "0"],
["Chloe", "1910"], ["Jose", "1615"], ["Jose", "1640"],
["Paul", "1405"], ["Jose", "855"], ["Jose", "930"],
["Jose", "915"], ["Jose", "730"], ["Jose", "940"],
["Jennifer", "1335"], ["Jennifer", "730"], ["Jose", "1630"],
["Jennifer", "5"], ["Chloe", "1909"], ["Zhang", "1"],
["Zhang", "10"], ["Zhang", "109"], ["Zhang", "110"],
["Amos", "1"], ["Amos", "2"], ["Amos", "400"],
["Amos", "500"], ["Amos", "503"], ["Amos", "504"],
["Amos", "601"], ["Amos", "602"],
]
print(find_people(badge_times))
| isabellakqq/Alogorithm | twoPointers/slidingWindow/robinhood.py | robinhood.py | py | 3,405 | python | en | code | 2 | github-code | 13 |
43114959572 | from preprocessor import *
import time
def main():
s = time.time()
#grab data from files
actor_data,director_data,genre_data,tags_data,user_tag_data,train_data,test_data = get_dataframes()
#generate movie and user objects
movies,users = get_movies(actor_data,director_data,genre_data,tags_data,user_tag_data,train_data,test_data)
print('Number of users: {}'.format(len(users)))
print('Number of movies: {}'.format(len(movies)))
#populate final_ratings with ratings using User.get_rating(movie dictionary, movie to get rating for)
final_ratings = []
counter = 0
#model creation
for test in test_data.itertuples():
#get ids from current test
u_id = test[1]
mov_id = test[2]
#get movie & user
movie = movies.get(mov_id)
#some movies aren't in the initial grab of movies
if movie is None:
print('couldnt find movie {}'.format(str(mov_id)))
user = users.get(u_id)
#gets predicted rating of movie from this user
pred_rating = user.get_rating(movies, movie)
final_ratings.append(pred_rating)
if (counter % 100) == 0:
print('done with {}'.format(counter))
counter += 1
#write predicted ratings out to file
with open('output1.txt', 'w') as f:
for rating in final_ratings:
f.write("%s\n" % str(rating))
#before changing movie attributes to dicts
#runtime 6748 seconds
#score 1.13
#rank 40
#after changing attributes to dicts
#rutnime 685 seconds
#score 1.14
#rank 42
e = time.time()
#takes about 3.5 minutes rn
print("runtime = " + str(e-s) + " seconds")
if __name__ == '__main__':
main() | jtouma1/CS484_HW4 | src/recommender.py | recommender.py | py | 1,542 | python | en | code | 0 | github-code | 13 |
41890264439 | """
Пользователь вводит две даты в формате ДД.ММ.ГГГГ ЧЧ:ММ. Пользователь вводит
третью дату в формате ДД.ММ.ГГГГ ЧЧ:ММ. Определить, лежит ли дата внутри
временного интервала, образованного первыми двумя датами.
"""
from task_3 import date_check
def date_occurrence(checked_date, checked_date_2, checked_date_3):
if checked_date <= checked_date_3 <= checked_date_2:
print('Входит')
elif checked_date_2 <= checked_date_3 <= checked_date:
print('Входит')
else:
print('Не входит')
if __name__ == "__main__":
date_1 = date_check(input('Введите первую дату в формате ДД.ММ.ГГГГ ЧЧ:ММ: '))
date_2 = date_check(input('Введите вторую дату в формате ДД.ММ.ГГГГ ЧЧ:ММ: '))
date_3 = date_check(input('Введите третью дату, которую нужно определить, в формате ДД.ММ.ГГГГ ЧЧ:ММ: '))
date_occurrence(date_1, date_2, date_3)
"""
12.02.2021 15:17
12.05.2021 17:01
12.04.2021 20:17
13.12.2021 15:17
"""
| Dkodsy/Practical-minimum | task_5.py | task_5.py | py | 1,264 | python | ru | code | 0 | github-code | 13 |
31830369222 | def vowel_count(phrase):
"""Return frequency map of vowels, case-insensitive.
>>> vowel_count('rithm school')
{'i': 1, 'o': 2}
>>> vowel_count('HOW ARE YOU? i am great!')
{'o': 2, 'a': 3, 'e': 2, 'u': 1, 'i': 1}
"""
vowels = {'a', 'e', 'i', 'o', 'u'}
dict = {}
for ltr in phrase.lower():
if ltr in vowels:
if ltr in dict:
dict[ltr] += 1
else:
dict[ltr] = 1
return dict | jasonscotch/python-data-structure-practice | 26_vowel_count/vowel_count.py | vowel_count.py | py | 518 | python | en | code | 0 | github-code | 13 |
21872634930 | """
This script is a module called by cwreport.py, it creates the csv file
"""
import yaml
import numpy
# Open the metrics configuration file metrics.yaml and retrive settings
with open("metrics.yaml", 'r') as f:
metrics = yaml.load(f, Loader=yaml.FullLoader)
# Function to determine the statistic type the user is trying to calculate from the statistic varible in metrics.yaml
# Supported statistics are average, min, max, sum. It is called in write_to_csv()
def requested_overall_statistic(data):
user_request = metrics['statistics']
#Check for user requested statistic type from metrics.yaml
if user_request.lower() == 'maximum':
method_to_call = getattr(numpy, 'max')
output = method_to_call(data)
elif user_request.lower() == 'minimum':
method_to_call = getattr(numpy, 'min')
output = method_to_call(data)
elif user_request.lower() == 'sum':
method_to_call = getattr(numpy, 'sum')
output = method_to_call(data)
elif user_request.lower() == 'average':
method_to_call = getattr(numpy, 'average')
output = method_to_call(data)
else:
method_to_call = getattr(numpy, 'size')
output = method_to_call(data)
return output
# Construct csv headers and return
def make_csv_header(service):
if service == 'ec2':
csv_headers = [
'Name',
'Instance',
'Type',
'Hypervisor',
'Virtualization Type',
'Architecture',
'EBS Optimized',
'CPUUtilization (Percent)',
'DiskReadOps (Count)',
'DiskWriteOps (Count)',
'DiskReadBytes (Bytes)',
'DiskWriteBytes (Bytes)',
'NetworkIn (Bytes)',
'NetworkOut (Bytes)',
'NetworkPacketsIn (Count)',
'NetworkPacketsOut (Count)'
]
return csv_headers
elif service == 'tgwattachment':
csv_headers = [
'Attachment ID',
'TransitGateway ID',
'Resource Type',
'Resource ID',
'BytesIn',
'BytesOut',
'PacketsIn (Count)',
'PacketsOut (Count)',
'PacketDropCountBlackhole (Count)',
'PacketDropCountNoRoute (Count)'
]
return csv_headers
else:
csv_headers = ['Resource Identifier']
for metric in metrics['metrics_to_be_collected'][service]:
csv_headers.append(metric['name']+" ("+metric['unit']+")")
return csv_headers
# function to write to csv
def write_to_csv(service, csvwriter, resource, metrics_info):
if service == 'ec2':
# get instance name
if resource.tags:
name_dict = next(
(i for i in resource.tags if i['Key'] == 'Name'),
None)
else:
name_dict = None
csvwriter.writerow([
'' if name_dict is None else name_dict.get('Value'),
resource.id,
resource.instance_type,
resource.hypervisor,
resource.virtualization_type,
resource.architecture,
resource.ebs_optimized,
numpy.round(requested_overall_statistic(metrics_info['CPUUtilization']), 2),
numpy.round(requested_overall_statistic(metrics_info['DiskReadOps']), 2),
numpy.round(requested_overall_statistic(metrics_info['DiskWriteOps']), 2),
numpy.round(requested_overall_statistic(metrics_info['DiskReadBytes']), 2),
numpy.round(requested_overall_statistic(metrics_info['DiskWriteBytes']), 2),
numpy.round(requested_overall_statistic(metrics_info['NetworkIn']), 2),
numpy.round(requested_overall_statistic(metrics_info['NetworkOut']), 2),
numpy.round(requested_overall_statistic(metrics_info['NetworkPacketsIn']), 2),
numpy.round(requested_overall_statistic(metrics_info['NetworkPacketsOut']), 2)
])
elif service == 'tgwattachment':
# get attachment name
if resource['Tags']:
name_dict = next(
(i for i in resource['Tags'] if i['Key'] == 'Name'),
None)
else:
name_dict = None
csvwriter.writerow([
resource['TransitGatewayAttachmentId'] if name_dict is None else name_dict.get('Value'),
resource['TransitGatewayId'],
resource['ResourceType'],
resource['ResourceId'],
numpy.round(requested_overall_statistic(metrics_info['BytesIn']), 2),
numpy.round(requested_overall_statistic(metrics_info['BytesOut']), 2),
numpy.round(requested_overall_statistic(metrics_info['PacketsIn']), 2),
numpy.round(requested_overall_statistic(metrics_info['PacketsOut']), 2),
numpy.round(requested_overall_statistic(metrics_info['PacketDropCountBlackhole']), 2),
numpy.round(requested_overall_statistic(metrics_info['PacketDropCountNoRoute']), 2)
])
else:
row_data = [resource]
for metric in metrics['metrics_to_be_collected'][service]:
row_data.append(numpy.round(requested_overall_statistic(metrics_info[metric['name']]), 2))
csvwriter.writerow(row_data)
| k-guo/collect-aws-vpc-cloudwatch-stats | csvconfig.py | csvconfig.py | py | 5,387 | python | en | code | 0 | github-code | 13 |
69905804498 | from setuptools import setup, find_packages
with open('README.rst') as f:
description = f.read()
setup(
name='eelale',
url='http://github.com/emulbreh/eelale/',
version='0.3.0-dev',
packages=find_packages(),
license='MIT License',
author='',
maintainer='Johannes Dollinger',
maintainer_email='emulbreh@googlemail.com',
description='cross-compiles Python wheels',
long_description=description,
include_package_data=True,
install_requires=[
'click',
],
entry_points={
'console_scripts': [
'eelale = eelale.cli:main',
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'
]
)
| emulbreh/eelale | setup.py | setup.py | py | 878 | python | en | code | 0 | github-code | 13 |
72161269779 | from sqlalchemy.engine import Engine
from sqlmodel import Session, SQLModel, create_engine, select
from .models import Category, Entry
class Service:
engine: Engine
def __init__(self, connection_string: str = "sqlite://") -> None:
self.engine = create_engine(connection_string)
def init(self):
SQLModel.metadata.create_all(self.engine)
def list_entries(self) -> list[tuple[Entry, Category]]:
with Session(self.engine) as session:
statement = (
select(Entry, Category)
.where(Entry.category_id == Category.id)
)
return session.exec(statement).all()
def create_entry(self, entry: Entry) -> Entry:
with Session(self.engine) as session:
session.add(entry)
session.commit()
session.refresh(entry)
return entry
def list_categories(self) -> list[Category]:
with Session(self.engine) as session:
return session.exec(select(Category)).all()
def create_category(self, category: Category) -> Category:
with Session(self.engine) as session:
session.add(category)
session.commit()
session.refresh(category)
return category
| humrochagf/midas | backend/midas/service.py | service.py | py | 1,274 | python | en | code | 0 | github-code | 13 |
12345254692 | import streamlit as st
def app():
st.title('Profitability Index')
colPVNetAnnualCashFlow, colInitialInvestment = st.columns(2)
with colPVNetAnnualCashFlow:
PVNetAnnualCashFlow = st.number_input("Enter the present value of net annual cash flows($): ", min_value=0.0, format='%f')
with colInitialInvestment:
InitialInvestment = st.number_input("Enter the initial investment($): ", min_value=0.0, format='%f')
try:
profitability_index = PVNetAnnualCashFlow / InitialInvestment
except ZeroDivisionError:
profitability_index = 0
st.header("**Answer**")
st.subheader("Profitability Index: " + str(round(profitability_index,2))) | andrewdwallo/AccountingCalculator | AccountingCalculator/apps/profitability_index.py | profitability_index.py | py | 742 | python | en | code | 0 | github-code | 13 |
42994498679 | #
# custom_board.py
#
# - For build.address replace VECT_TAB_ADDR to relocate the firmware
# - For build.ldscript use one of the linker scripts in buildroot/share/PlatformIO/ldscripts
#
import pioutil
if pioutil.is_pio_build():
import marlin
board = marlin.env.BoardConfig()
address = board.get("build.address", "")
if address:
marlin.relocate_firmware(address)
ldscript = board.get("build.ldscript", "")
if ldscript:
marlin.custom_ld_script(ldscript)
| MarlinFirmware/Marlin | buildroot/share/PlatformIO/scripts/custom_board.py | custom_board.py | py | 494 | python | en | code | 15,422 | github-code | 13 |
14218463195 | import re
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
#from module.attention import ChannelAttention, SpatialAttention
class DenseLayer(nn.Sequential):
"""Dense Layer"""
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate):
super(DenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, stride=1, padding=1, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(DenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class DilatedDenseLayer(nn.Sequential):
"""Dense Layer using dilation convolution, default dilation rate is 2"""
def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, dilation=2):
super(DilatedDenseLayer, self).__init__()
self.add_module('norm1', nn.BatchNorm2d(num_input_features)),
self.add_module('relu1', nn.ReLU(inplace=True)),
self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *
growth_rate, kernel_size=1, stride=1, bias=False)),
self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module('relu2', nn.ReLU(inplace=True)),
self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,
kernel_size=3, dilation=dilation, stride=1, padding=dilation, bias=False)),
self.drop_rate = drop_rate
def forward(self, x):
new_features = super(DilatedDenseLayer, self).forward(x)
if self.drop_rate > 0:
new_features = F.dropout(new_features, p=self.drop_rate, training=self.training)
return torch.cat([x, new_features], 1)
class DenseBlock(nn.Sequential):
"""Dense Block --> stacked (Dilation) Dense Layer"""
def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, is_dilated):
super(DenseBlock, self).__init__()
for i in range(num_layers):
if is_dilated == True:
layer = DilatedDenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate,
dilation=i % 3 + 1)
else:
layer = DenseLayer(num_input_features + i * growth_rate, growth_rate, bn_size, drop_rate)
self.add_module('denselayer%d' % (i + 1), layer)
class Transition(nn.Sequential):
"""Transition Layer between different Dense Blocks"""
def __init__(self, num_input_features, num_output_features, stride=2):
super(Transition, self).__init__()
self.add_module('norm', nn.BatchNorm2d(num_input_features))
self.add_module('relu', nn.ReLU(inplace=True))
self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False))
if stride == 2:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))
elif stride == 1:
self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
class DenseNet(nn.Module):
"""Densne Net"""
def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),
num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000,
attention=False, dilation_config=(False, False, False, True), no_channels = 3):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(OrderedDict([
('conv0', nn.Conv2d(no_channels, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)),
('norm0', nn.BatchNorm2d(num_init_features)),
('relu0', nn.ReLU(inplace=True)),
('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))
self.attention = nn.Sequential()
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = DenseBlock(num_layers=num_layers, num_input_features=num_features,
bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate,
is_dilated=dilation_config[i])
self.features.add_module('denseblock%d' % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = Transition(num_input_features=num_features, num_output_features=num_features // 2)
self.features.add_module('transition%d' % (i + 1), trans)
num_features = num_features // 2
# ---- Attention module ----
if attention:
pass
# self.features.add_module('attention%d_1' % (i + 1), ChannelAttention(num_features))
# self.features.add_module('attention%d_2' % (i + 1), SpatialAttention(num_features))
# Final batch norm
self.features.add_module('norm5', nn.BatchNorm2d(num_features))
# ---- Final Attention module ----
if attention:
pass
#self.features.add_module('attention5_1', ChannelAttention(num_features))
#self.features.add_module('attention5_2', SpatialAttention(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from pytorch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
m.bias.data.zero_()
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.avg_pool2d(out, kernel_size=7, stride=1).view(features.size(0), -1)
out = self.classifier(out)
return out
def densenet121(pretrained=False, attention=False, dilation_config=(False, False, False, False), drop_rate=0, **kwargs):
"""
Densenet-121 model from <https://arxiv.org/pdf/1608.06993.pdf>
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), attention=attention,
dilation_config=dilation_config, drop_rate=drop_rate, **kwargs)
if pretrained:
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model.state_dict()
state_dict_pretrained = torch.load('./checkpoints/densenet/densenet121.pth')
for key in list(state_dict_pretrained.keys()):
if key not in state_dict:
res = pattern.match(key)
if res: # for res block params
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict_pretrained[key]
else:
print('Ignore layer {}'.format(key))
continue
else: # for base conv params
state_dict[key] = state_dict_pretrained[key]
model.load_state_dict(state_dict, strict=False)
print('success in loading weights!')
return model
def densenet121_CSN(pretrained=False, attention=False, dilation_config=(False, False, False, False), drop_rate=0, **kwargs):
"""
Densenet-121 model from <https://arxiv.org/pdf/1608.06993.pdf>
"""
model = DenseNet(num_init_features=64, growth_rate=32, block_config=(6, 12, 24, 16), attention=attention,
dilation_config=dilation_config, drop_rate=drop_rate, **kwargs)
if pretrained:
pattern = re.compile(
r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$')
state_dict = model.state_dict()
state_dict_pretrained = torch.load('./checkpoints/densenet/densenet121.pth')
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict, strict=False)
num_ftrs = 64
model.classifier = nn.Linear(num_ftrs, 2)
# print('checkpoint: ', state_dict_pretrained)
# print('checkpoint keys: ', state_dict_pretrained.keys())
# # print('checkpoint reshape to 2,1024: ')
# for key in list(state_dict_pretrained.keys()):
# if key not in state_dict:
# res = pattern.match(key)
# if res: # for res block params
# new_key = res.group(1) + res.group(2)
# state_dict[new_key] = state_dict_pretrained[key]
# else:
# print('Ignore layer {}'.format(key))
# continue
# else: # for base conv params
# state_dict[key] = state_dict_pretrained[key]
# model.load_state_dict(state_dict, strict=False)
# print('success in loading weights!')
return model
| ljarabek/CSN_chexpert | network_base/densenet.py | densenet.py | py | 9,930 | python | en | code | 1 | github-code | 13 |
21268487718 | import sys,json
with open("/home/fux/fux/miRNASNP3/map_utr3_snp/map_utr_02/freq/truncate_altutr_03.key.json","a") as out:
temp_json={}
with open("/home/fux/fux/miRNASNP3/map_utr3_snp/map_utr_02/freq/truncate_altutr_03.key") as infile:
for line in infile:
nline=line.strip().split('#')
lkey=nline[0]+'#'+nline[5]
value=nline[8]+'\t'+nline[9]
temp_json[lkey]=value
json.dump(temp_json,out) | chunjie-sam-liu/miRNASNP-v3 | scr/predict_result/altutr/B-00-truncate-key2json.py | B-00-truncate-key2json.py | py | 460 | python | en | code | 3 | github-code | 13 |
33936141677 | from pathlib import Path
from typing import Union
def load_graph(file: Union[Path, str], fmt="auto", ignore_vp=None, ignore_ep=None,
ignore_gp=None, directed=True, **kwargs):
import warnings
from graph_tool import load_graph_from_csv
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
from graph_tool import load_graph
if fmt == 'auto' and isinstance(file, str) and \
Path(file).suffix[1:] in ["csv", "edgelist", "edge", "edges", "el", "txt"]:
delimiter = kwargs.get("delimiter", None)
if delimiter is None:
delimiter = "," if Path(file).suffix == ".csv" else " "
g = load_graph_from_csv(file,
directed=directed,
eprop_types=kwargs.get("eprop_types", None),
eprop_names=kwargs.get("eprop_names", None),
# string_vals=kwargs.get("string_vals", False),
hashed=kwargs.get("hashed", False),
hash_type=kwargs.get("hash_type", "string"),
skip_first=kwargs.get("skip_first", False),
ecols=kwargs.get("ecols", (0, 1)),
csv_options=kwargs.get("csv_options", {
"delimiter": delimiter,
"quotechar": '"'
}),
)
else:
g = load_graph(file,
fmt=fmt,
ignore_vp=ignore_vp,
ignore_ep=ignore_ep,
ignore_gp=ignore_gp,
**kwargs
)
return g
| NetworkDismantling/review | network_dismantling/common/loaders.py | loaders.py | py | 1,837 | python | en | code | 6 | github-code | 13 |
17055108634 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiSalesLeadsShopleadsCreateModel(object):
def __init__(self):
self._address = None
self._branch_name = None
self._brand_id = None
self._category_id = None
self._city_id = None
self._company_name = None
self._contacts_name = None
self._contacts_no = None
self._country_id = None
self._district_id = None
self._ext_info = None
self._head_shop_name = None
self._latitude = None
self._longitude = None
self._memo = None
self._province_id = None
self._register_date = None
self._request_id = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def branch_name(self):
return self._branch_name
@branch_name.setter
def branch_name(self, value):
self._branch_name = value
@property
def brand_id(self):
return self._brand_id
@brand_id.setter
def brand_id(self, value):
self._brand_id = value
@property
def category_id(self):
return self._category_id
@category_id.setter
def category_id(self, value):
self._category_id = value
@property
def city_id(self):
return self._city_id
@city_id.setter
def city_id(self, value):
self._city_id = value
@property
def company_name(self):
return self._company_name
@company_name.setter
def company_name(self, value):
self._company_name = value
@property
def contacts_name(self):
return self._contacts_name
@contacts_name.setter
def contacts_name(self, value):
self._contacts_name = value
@property
def contacts_no(self):
return self._contacts_no
@contacts_no.setter
def contacts_no(self, value):
self._contacts_no = value
@property
def country_id(self):
return self._country_id
@country_id.setter
def country_id(self, value):
self._country_id = value
@property
def district_id(self):
return self._district_id
@district_id.setter
def district_id(self, value):
self._district_id = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def head_shop_name(self):
return self._head_shop_name
@head_shop_name.setter
def head_shop_name(self, value):
self._head_shop_name = value
@property
def latitude(self):
return self._latitude
@latitude.setter
def latitude(self, value):
self._latitude = value
@property
def longitude(self):
return self._longitude
@longitude.setter
def longitude(self, value):
self._longitude = value
@property
def memo(self):
return self._memo
@memo.setter
def memo(self, value):
self._memo = value
@property
def province_id(self):
return self._province_id
@province_id.setter
def province_id(self, value):
self._province_id = value
@property
def register_date(self):
return self._register_date
@register_date.setter
def register_date(self, value):
self._register_date = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
def to_alipay_dict(self):
params = dict()
if self.address:
if hasattr(self.address, 'to_alipay_dict'):
params['address'] = self.address.to_alipay_dict()
else:
params['address'] = self.address
if self.branch_name:
if hasattr(self.branch_name, 'to_alipay_dict'):
params['branch_name'] = self.branch_name.to_alipay_dict()
else:
params['branch_name'] = self.branch_name
if self.brand_id:
if hasattr(self.brand_id, 'to_alipay_dict'):
params['brand_id'] = self.brand_id.to_alipay_dict()
else:
params['brand_id'] = self.brand_id
if self.category_id:
if hasattr(self.category_id, 'to_alipay_dict'):
params['category_id'] = self.category_id.to_alipay_dict()
else:
params['category_id'] = self.category_id
if self.city_id:
if hasattr(self.city_id, 'to_alipay_dict'):
params['city_id'] = self.city_id.to_alipay_dict()
else:
params['city_id'] = self.city_id
if self.company_name:
if hasattr(self.company_name, 'to_alipay_dict'):
params['company_name'] = self.company_name.to_alipay_dict()
else:
params['company_name'] = self.company_name
if self.contacts_name:
if hasattr(self.contacts_name, 'to_alipay_dict'):
params['contacts_name'] = self.contacts_name.to_alipay_dict()
else:
params['contacts_name'] = self.contacts_name
if self.contacts_no:
if hasattr(self.contacts_no, 'to_alipay_dict'):
params['contacts_no'] = self.contacts_no.to_alipay_dict()
else:
params['contacts_no'] = self.contacts_no
if self.country_id:
if hasattr(self.country_id, 'to_alipay_dict'):
params['country_id'] = self.country_id.to_alipay_dict()
else:
params['country_id'] = self.country_id
if self.district_id:
if hasattr(self.district_id, 'to_alipay_dict'):
params['district_id'] = self.district_id.to_alipay_dict()
else:
params['district_id'] = self.district_id
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.head_shop_name:
if hasattr(self.head_shop_name, 'to_alipay_dict'):
params['head_shop_name'] = self.head_shop_name.to_alipay_dict()
else:
params['head_shop_name'] = self.head_shop_name
if self.latitude:
if hasattr(self.latitude, 'to_alipay_dict'):
params['latitude'] = self.latitude.to_alipay_dict()
else:
params['latitude'] = self.latitude
if self.longitude:
if hasattr(self.longitude, 'to_alipay_dict'):
params['longitude'] = self.longitude.to_alipay_dict()
else:
params['longitude'] = self.longitude
if self.memo:
if hasattr(self.memo, 'to_alipay_dict'):
params['memo'] = self.memo.to_alipay_dict()
else:
params['memo'] = self.memo
if self.province_id:
if hasattr(self.province_id, 'to_alipay_dict'):
params['province_id'] = self.province_id.to_alipay_dict()
else:
params['province_id'] = self.province_id
if self.register_date:
if hasattr(self.register_date, 'to_alipay_dict'):
params['register_date'] = self.register_date.to_alipay_dict()
else:
params['register_date'] = self.register_date
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiSalesLeadsShopleadsCreateModel()
if 'address' in d:
o.address = d['address']
if 'branch_name' in d:
o.branch_name = d['branch_name']
if 'brand_id' in d:
o.brand_id = d['brand_id']
if 'category_id' in d:
o.category_id = d['category_id']
if 'city_id' in d:
o.city_id = d['city_id']
if 'company_name' in d:
o.company_name = d['company_name']
if 'contacts_name' in d:
o.contacts_name = d['contacts_name']
if 'contacts_no' in d:
o.contacts_no = d['contacts_no']
if 'country_id' in d:
o.country_id = d['country_id']
if 'district_id' in d:
o.district_id = d['district_id']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'head_shop_name' in d:
o.head_shop_name = d['head_shop_name']
if 'latitude' in d:
o.latitude = d['latitude']
if 'longitude' in d:
o.longitude = d['longitude']
if 'memo' in d:
o.memo = d['memo']
if 'province_id' in d:
o.province_id = d['province_id']
if 'register_date' in d:
o.register_date = d['register_date']
if 'request_id' in d:
o.request_id = d['request_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/KoubeiSalesLeadsShopleadsCreateModel.py | KoubeiSalesLeadsShopleadsCreateModel.py | py | 9,382 | python | en | code | 241 | github-code | 13 |
39740101817 | import re
import os
import sys
import locale
import datetime
import xlsxwriter
from django.utils.translation import gettext_lazy as _
from . import utils
from . import scramble
from . import minimal_intervals
from .models import *
from .add_excel_info import add_excel_info
data_headers = (
'Bib#',
'LastName', 'FirstName',
'Team', 'TeamCode',
'City', 'StateProv',
'Category', 'Age', 'Gender',
'License',
'NatCode', 'UCI ID',
'Tag', 'Tag2', # These must be last.
)
category_headers = (
'Category Type',
'Name',
'Gender',
'Numbers',
'Start Offset',
'Race Laps',
'Race Distance',
'Race Minutes',
'Publish',
'Upload',
'Series',
)
property_headers = (
'Event Name',
'Event Organizer',
'Event City',
'Event StateProv',
'Event Country',
'Event Date',
'Scheduled Start',
'TimeZone',
'Race Number',
'Race Discipline',
'Enable RFID',
'Distance Unit',
'Time Trial',
'RFID Option',
'Use SFTP',
'FTP Host',
'FTP User',
'FTP Password',
'FTP Path',
'FTP Upload During Race',
'GATrackingID',
'Road Race Finish Times',
'No Data DNS',
'Win and Out',
'Event Long Name',
'Email',
'Google Maps API Key',
)
def get_number_range_str( numbers ):
# Combine consecutive numbers into range pairs.
numbers = sorted( set(numbers) )
if len(numbers) <= 1:
return ','.join( '{}'.format(n) for n in numbers )
pairs = [[numbers[0], numbers[0]]]
for n in numbers[1:]:
if n == pairs[-1][1] + 1:
pairs[-1][1] += 1
else:
pairs.append( [n, n] )
return ','.join( '{}'.format(p[0]) if p[0] == p[1] else '{}-{}'.format(*p) for p in pairs )
def get_subset_number_range_str( bib_all, bib_subset ):
bib_subset = sorted( bib_subset )
if len(bib_subset) <= 1:
return ','.join( '{}'.format(n) for n in bib_subset )
bib_all = sorted( bib_all )
bib_i = {bib:i for i, bib in enumerate(bib_all)}
numbers = [bib_i[bib] for bib in bib_subset]
pairs = [[numbers[0], numbers[0]]]
for n in numbers[1:]:
if n == pairs[-1][1] + 1:
pairs[-1][1] += 1
else:
pairs.append( [n, n] )
i_bib = {i:bib for i, bib in enumerate(bib_all)}
return ','.join( '{}'.format(i_bib[p[0]]) if p[0] == p[1] else '{}-{}'.format(i_bib[p[0]],i_bib[p[1]]) for p in pairs )
def get_gender_str( g ):
return ('Men', 'Women', 'Open')[g]
def safe_xl( v ):
if v is None or isinstance(v, (str, int, float, bool, datetime.datetime, datetime.date) ):
return v
return '{}'.format(v)
def write_row_data( ws, row, row_data, format = None ):
if format is None:
for col, d in enumerate(row_data):
ws.write( row, col, d )
else:
if isinstance(format, list):
col_format = { col:f for col, f in enumerate(format) }
default_format = None
else:
col_format = {}
default_format = format
for col, d in enumerate(row_data):
f = col_format.get(col, default_format)
if f is not None:
ws.write( row, col, d, f )
else:
ws.write( row, col, d )
return row + 1
def add_categories_page( wb, title_format, event ):
#---------------------------------------------------------------------------------------------------
# Category information.
#
ws = wb.add_worksheet('--CrossMgr-Categories')
competition = event.competition
participant_categories = set( Category.objects.filter(pk__in =
Participant.objects.filter(competition=competition,role=Participant.Competitor).order_by('category__pk').values_list('category__pk',flat=True).distinct()
)
)
exclude_empty_categories = SystemInfo.get_exclude_empty_categories()
# Get some more reasonable number ranges for the categories.
def get_category_intervals():
cat_sequence = []
numbers = []
for wave in event.get_wave_set().all():
categories = set( c for c in wave.categories.all() if c in participant_categories ) if exclude_empty_categories else wave.categories.all()
categories = sorted( categories, key = lambda c: c.sequence )
if not categories:
continue
participants = list( p for p in wave.get_participants_unsorted() if p.license_holder.is_eligible )
for category in categories:
numbers.append( set(p.bib for p in participants if p.category == category and p.bib) )
cat_sequence.append( category )
intervals = [minimal_intervals.interval_to_str(i) for i in minimal_intervals.minimal_intervals(numbers)]
return {c:i for c, i in zip(cat_sequence,intervals)}
category_intervals = get_category_intervals()
row = write_row_data( ws, 0, category_headers, title_format )
for wave in event.get_wave_set().all():
categories = set( c for c in wave.categories.all() if c in participant_categories ) if exclude_empty_categories else wave.categories.all()
categories = sorted( categories, key = lambda c: c.sequence )
if not categories:
continue
wave_flag = getattr( wave, 'rank_categories_together', False )
component_flag = not wave_flag
participants = list( p for p in wave.get_participants_unsorted() if p.license_holder.is_eligible )
if len(categories) == 1: # If only one category, do not output Component waves.
for category in categories:
row_data = [
'Wave',
category.code,
get_gender_str(category.gender),
#get_number_range_str( p.bib for p in participants if p.category == category and p.bib ),
category_intervals.get(category,''),
'{}'.format(getattr(wave,'start_offset','')),
wave.laps if wave.laps else '',
competition.to_local_distance(wave.distance) if wave.distance else '',
getattr(wave, 'minutes', None) or '',
True, True, True,
]
row = write_row_data( ws, row, row_data )
else:
genders = list( set(c.gender for c in categories) )
row_data = [
'Wave',
wave.name,
get_gender_str( 2 if len(genders) != 1 else genders[0] ),
'', # No ranges here - these come from the categories.
'{}'.format(getattr(wave,'start_offset','')),
wave.laps if wave.laps else '',
competition.to_local_distance(wave.distance) if wave.distance else '',
getattr(wave, 'minutes', None) or '',
wave_flag, wave_flag, wave_flag,
]
row = write_row_data( ws, row, row_data )
for category in categories:
row_data = [
'Component',
category.code,
get_gender_str(category.gender),
category_intervals.get(category,''),
'{}'.format(getattr(wave,'start_offset','')),
'',
'',
'',
component_flag, component_flag, component_flag,
]
row = write_row_data( ws, row, row_data )
bibs_all = None
for category in event.get_custom_categories():
if bibs_all is None:
bibs_all = event.get_participants().exclude(bib__isnull=True).values_list('bib',flat=True)
row_data = [
'Custom',
category.code,
get_gender_str(category.gender),
get_subset_number_range_str( bibs_all, category.get_bibs() ),
'',
'',
'',
'',
True, True, True,
]
row = write_row_data( ws, row, row_data )
def add_properties_page( wb, title_format, event, raceNumber ):
competition = event.competition
server_date_time = timezone.localtime(event.date_time)
ws = wb.add_worksheet('--CrossMgr-Properties')
row = write_row_data( ws, 0, property_headers, title_format )
row_data = [
'-'.join( (competition.name, event.name) ),
competition.organizer,
competition.city,
competition.stateProv,
competition.country,
server_date_time.strftime( '%Y-%m-%d' ),
server_date_time.strftime( '%H:%M' ),
str(timezone.get_current_timezone()),
raceNumber,
competition.discipline.name,
competition.using_tags,
['km', 'miles'][competition.distance_unit],
True if event.event_type == 1 else False, # Time Trial
event.rfid_option,
competition.use_sftp,
competition.ftp_host,
competition.ftp_user,
scramble.encode(utils.removeDiacritic(competition.ftp_password)),
competition.ftp_path,
competition.ftp_upload_during_race,
competition.ga_tracking_id,
event.road_race_finish_times,
event.dnsNoData,
getattr(event, 'win_and_out', False),
'-'.join( [competition.long_name, event.name] ) if competition.long_name else '',
competition.organizer_email,
competition.google_maps_api_key,
]
row = write_row_data( ws, row, row_data )
def get_crossmgr_excel( event_mass_start ):
competition = event_mass_start.competition
output = BytesIO()
wb = xlsxwriter.Workbook( output, {'in_memory': True} )
title_format = wb.add_format( dict(bold = True) )
#---------------------------------------------------------------------------------------------------
# Competitor data
#
ws = wb.add_worksheet('Registration')
table = [list(data_headers)] if competition.using_tags else [list(data_headers[:-2])]
for p in event_mass_start.get_participants():
h = p.license_holder
row_data = [
p.bib if p.bib else '',
h.last_name, h.first_name,
'{}'.format(p.team_name), p.team.team_code if p.team else '',
h.city, h.state_prov,
p.category.code, competition.competition_age(h), get_gender_str(h.gender),
h.license_code_export,
h.nation_code, h.get_uci_id_text(),
]
if competition.using_tags:
row_data.extend( [h.existing_tag, h.existing_tag2] if competition.use_existing_tags else [p.tag, p.tag2] )
table.append( row_data )
# Remove empty columns. Keep Bib column.
for col in range(len(table[0])-1, 0, -1):
if not any( table[row][col] for row in range(1, len(table)) ):
for row in range(0, len(table)):
del table[row][col]
# Write the rider data.
write_row_data( ws, 0, table[0], title_format )
for row in range(1, len(table)):
write_row_data( ws, row, table[row] )
table = None
add_categories_page( wb, title_format, event_mass_start )
raceNumber = 1
for ms in competition.eventmassstart_set.all():
if ms == event_mass_start:
break
raceNumber += 1
add_properties_page( wb, title_format, event_mass_start, raceNumber )
add_excel_info( wb )
wb.close()
return output.getvalue()
#------------------------------------------------------------------------------------------------
def get_crossmgr_excel_tt( event_tt ):
competition = event_tt.competition
output = BytesIO()
wb = xlsxwriter.Workbook( output, {'in_memory': True} )
title_format = wb.add_format( dict(bold = True) )
time_format = wb.add_format( dict(num_format='h:mm:ss') )
#---------------------------------------------------------------------------------------------------
# Competitor data
#
ws = wb.add_worksheet('Registration')
table = [['StartTime'] + list(data_headers)] if competition.using_tags else [['StartTime'] + list(data_headers[:-2])]
participants = list( event_tt.get_participants() )
start_times = { p: event_tt.get_start_time(p) for p in participants } if event_tt.create_seeded_startlist else {}
def get_start_time( p ):
t = start_times.get(p, None)
return t.total_seconds() if t is not None else 10000.0*60*24*24
participants.sort( key=lambda p: (get_start_time(p), p.bib or 9999999) )
for p in participants:
# Convert to Excel time which is a fraction of a day.
start_time = start_times.get(p, None)
h = p.license_holder
row_data = [
start_time.total_seconds() / (24.0*60.0*60.0) if start_time is not None else '',
p.bib if p.bib else '',
h.last_name, h.first_name,
p.team.name if p.team else '', p.team.team_code if p.team else '',
h.city, h.state_prov,
p.category.code, competition.competition_age(h), get_gender_str(h.gender),
h.license_code,
h.nation_code, h.get_uci_id_text(),
]
if competition.using_tags:
row_data.extend( [h.existing_tag, h.existing_tag2] if competition.use_existing_tags else [p.tag, p.tag2] )
table.append( row_data )
# Remove empty columns. Keep Bib and StartTime column.
for col in range(len(table[0])-1, 1, -1):
if not any( table[row][col] for row in range(1, len(table)) ):
for row in range(0, len(table)):
del table[row][col]
# Write the rider data.
write_row_data( ws, 0, table[0], title_format )
format = [time_format]
for row in range(1, len(table)):
write_row_data( ws, row, table[row], format )
table = None
add_categories_page( wb, title_format, event_tt )
raceNumber = 1 + competition.eventmassstart_set.all().count()
for ms in competition.eventtt_set.all():
if ms == event_tt:
break
raceNumber += 1
add_properties_page( wb, title_format, event_tt, raceNumber )
add_excel_info( wb )
wb.close()
return output.getvalue()
| esitarski/RaceDB | core/get_crossmgr_excel.py | get_crossmgr_excel.py | py | 12,298 | python | en | code | 12 | github-code | 13 |
8954184310 | # Import dependencies.
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
############################################################
# Set up database.
############################################################
# Create engine to hawaii.sqlite (source: SQLAlchemy.ipynb file).
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# Reflect an existing database into a new model (source: SQLAlchemy.ipynb file).
Base = automap_base()
# Reflect the tables (source: SQLAlchemy.ipynb file).
Base.prepare(autoload_with=engine, reflect=True)
# Save references to each table (source: SQLAlchemy.ipynb file).
measurement = Base.classes.measurement
station = Base.classes.station
############################################################
# Set up flask app.
############################################################
# Create app.
app = Flask(__name__)
# Define action for user request for index route.
@app.route("/")
def index():
"""List all available api routes."""
return (
f"AVAILABLE ROUTES:<br/>"
f"<br/>"
f"Precipitation data with dates:<br/>"
f"/api/v1.0/precipitation<br/>"
f"<br/>"
f"Stations and names:<br/>"
f"/api/v1.0/stations<br/>"
f"<br/>"
f"Temperature observations from the Waihee station:<br/>"
f"/api/v1.0/tobs<br/>"
f"<br/>"
f"Minimum, average, and maximum temperatures for a given date (please use format yyyy-mm-dd):<br/>"
f"/api/v1.0/yyyy-mm-dd<br/>"
f"<br/>"
f"Minimum, average, and maximum temperatures for a given start and end dates (please use format yyyy-mm-dd):<br/>"
f"/api/v1.0/start:yyyy-mm-dd/end:yyyy-mm-dd")
############################################################
# Define precipitation route
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create session from python to the database.
session = Session(engine)
"""Return a list of all precipitation data"""
# Query precipitation data (source: SQLAlchemy.ipynb file).
precip = session.query(measurement.date, measurement.prcp).filter(measurement.date >= "2016-08-24").all()
session.close()
# Convert the returned list to a dictionary.
precip_all = []
for date,prcp in precip:
precip_dict = {}
precip_dict["date"] = date
precip_dict["prcp"] = prcp
precip_all.append(precip_dict)
return jsonify(precip_all)
############################################################
# Define station route.
@app.route("/api/v1.0/stations")
def stations():
# Create session from python to the database.
session = Session(engine)
"""Return a list of all stations"""
# Query station data
stations = session.query(station.station, station.name).all()
session.close()
# Convert list into
station_list = list(np.ravel(stations))
return jsonify(station_list)
############################################################
# Define TOBs route.
@app.route("/api/v1.0/tobs")
def tobs():
# Create session from python to the database.
session = Session(engine)
"""Return a list of all TOBs"""
# Query tobs data.
tobs_results = session.query(measurement.date, measurement.prcp, measurement.tobs).\
filter(measurement.station=='USC00519281').\
filter(measurement.date >= '2016-08-24').all()
session.close()
# Convert the returned list to a dictionary.
tobs_all = []
for date, prcp, tobs in tobs_results:
tobs_dict = {}
tobs_dict['date'] = date
tobs_dict['prcp'] = prcp
tobs_dict['tobs'] = tobs
tobs_all.append(tobs_dict)
return jsonify(tobs_all)
############################################################
# Define start d ate route.
@app.route("/api/v1.0/<start_date>")
def start_date(start_date):
# Create session from python to the database.
session = Session(engine)
"""Return a list of minimum, average, and maximum temperature observations for a start date"""
# Query tobs data.
tobs_results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
filter(measurement.date >= start_date).all()
session.close()
# Convert the returned list to a dictionary.
start_tobs = []
for min, avg, max in tobs_results:
start_tobs_dict = {}
start_tobs_dict['min_temp'] = min
start_tobs_dict['avg_temp'] = avg
start_tobs_dict['max_temp'] = max
start_tobs.append(start_tobs_dict)
return jsonify(start_tobs)
############################################################
# Define start/end date route.
@app.route("/api/v1.0/<start_date>/<end_date>")
def start_end(start_date, end_date):
# Create session from python to the database.
session = Session(engine)
"""Return a list of minimum, average, and maximum temperature observations for start and end dates"""
tobs_results = session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
filter(measurement.date >= start_date).filter(measurement.date <= end_date).all()
session.close()
# Create a list for results.
start_end_list = []
for min, avg, max in tobs_results:
list = {}
list['min_temp'] = min
list['avg_temp'] = avg
list['max_temp'] = max
start_end_list.append(list)
return jsonify(start_end_list)
if __name__ == '__main__':
app.run(debug=True) | EdSpiezio-Runyon/UTSA_10_SQLalchemy-Challenge | app.py | app.py | py | 5,866 | python | en | code | 0 | github-code | 13 |
32765580189 | # Config functions for toggling ui status values in config.json
# TODO: Consider refactoring toggle functions into individual on/off functions
import json
def toggle_weather_ui():
"""Change boolean value for 'weather_ui_on' in config.json"""
# Open config file, load as dictionary
with open("config.json", "r") as f:
config = json.load(f)
# Modify boolean value
config["weather_ui_on"] = not config["weather_ui_on"]
# Dump back to file
with open("config.json", "w") as f:
json.dump(config, f, indent=4)
def toggle_news_ui():
"""Change boolean value for 'news_ui_on' in config.json"""
# Open config file, load as dictionary
with open("config.json", "r") as f:
config = json.load(f)
# Modify boolean value
config["news_ui_on"] = not config["news_ui_on"]
# Dump back to file
with open("config.json", "w") as f:
json.dump(config, f, indent=4)
def toggle_time_ui():
"""Change boolean value for 'time_ui_on' in config.json"""
# Open config file, load as dictionary
with open("config.json", "r") as f:
config = json.load(f)
# Modify boolean value
config["time_ui_on"] = not config["time_ui_on"]
# Dump back to file
with open("config.json", "w") as f:
json.dump(config, f, indent=4)
def toggle_ui():
"""Change boolean value for 'ui_on' in config.json"""
# Open config file, load as dictionary
with open("config.json", "r") as f:
config = json.load(f)
# Modify all boolean values
if config["ui_on"]:
config["weather_ui_on"] = False
config["news_ui_on"] = False
config["time_ui_on"] = False
config["ui_on"] = False
else:
config["weather_ui_on"] = True
config["news_ui_on"] = True
config["time_ui_on"] = True
config["ui_on"] = True
# Dump back to file
with open("config.json", "w") as f:
json.dump(config, f, indent=4)
def jarvis_change_ui(*args):
"""
Show/hide UI elements based on speech input.
KEYWORD(s): 'show', 'hide'
"""
# List speech and keywords/functions
speech = args[0].split()
keywords = {
'weather': toggle_weather_ui,
'time': toggle_time_ui,
'date': toggle_time_ui,
'news': toggle_news_ui,
'headlines': toggle_news_ui
}
# Create command queue and execute
command_queue = []
for word in speech:
if word.lower() in keywords:
command_queue.append(keywords[word])
for command in command_queue:
command()
# Return True break value to break out of main command loop in Jarvis.py
return True
def main():
pass
if __name__ == '__main__':
main() | FellowshipOfThePing/Jarvis | config.py | config.py | py | 2,745 | python | en | code | 2 | github-code | 13 |
72263972818 | from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
# Typing
from Crypto.Hash.SHA256 import SHA256Hash
from Crypto.PublicKey.DSA import DsaKey
from Crypto.Signature.DSS import FipsDsaSigScheme
from typing import List
def verify(hashed_message_1: SHA256Hash, hashed_message_2: SHA256Hash, signature_1: str, signature_2: str) -> None:
file = open('./keys/public.key')
public_key = DSA.import_key(file.read())
verifier = DSS.new(public_key, 'fips-186-3')
print(verifier)
check_signature(verifier, "# CHECK IF THE SIGNATURE IS VALID FOR BOTH MESSAGES",
hashed_message_1, hashed_message_2, signature_1, signature_2)
check_signature(verifier, "# CHECK THAT THE SIGNATURE IS NOT VALID IF WE SWAP THEM",
hashed_message_1, hashed_message_2, signature_2, signature_1)
def check_signature(verifier: FipsDsaSigScheme, message: str,
hashed_message_1: SHA256Hash, hashed_message_2: SHA256Hash,
signature_1: str, signature_2: str):
print(message)
try:
verifier.verify(hashed_message_1, signature_1)
verifier.verify(hashed_message_2, signature_2)
print("-> Both messages are authentic\n")
except ValueError:
print("-> The messages are not authentic\n")
def sign(keys: DsaKey, hashed_message: SHA256Hash) -> str:
signer = DSS.new(keys, 'fips-186-3')
signature = signer.sign(hashed_message)
return signature
def hash(message_plain_list: List[str]) -> SHA256Hash:
hashed_string = SHA256.new(str.encode(message_plain_list[0]))
for line in range(1, len(message_plain_list)):
hashed_string.update(str.encode(message_plain_list[line]))
return hashed_string
def read_file() -> List[str]:
with open('files/message.txt') as f:
lines = f.readlines()
return lines
def create_keys() -> DsaKey:
keys = DSA.generate(2048)
# Private key
with open("./keys/private.key", "wb") as file:
file.write(keys.exportKey("PEM")) # DER
# Public key
public_key = keys.publickey()
with open("./keys/public.key", "wb") as file:
file.write(public_key.exportKey("PEM")) # DER
return keys
def main() -> None:
# Generate the keys
keys = create_keys()
# Read the message from file
message_list = read_file()
# Generate 2 different messages
hashed_message_1 = hash(message_list)
hashed_message_2 = hash("Another message")
# Generate both signatures
signature_1 = sign(keys, hashed_message_1)
signature_2 = sign(keys, hashed_message_2)
# Verify that the authors of the hashed messages by its signature
verify(hashed_message_1, hashed_message_2, signature_1, signature_2)
if __name__ == "__main__":
main()
| CrisDgrnu/DSA-sign-verifier | DSA.py | DSA.py | py | 2,821 | python | en | code | 0 | github-code | 13 |
41833344235 | import torch
from torch import nn, optim
from torch.utils.data import DataLoader, Dataset
from torch.utils.data.dataset import random_split
import torch.nn.functional as F
import torchsummary
from basicblock import BasicBlock
import os
import cv2
import numpy as np
import time
from datetime import timedelta
class Img2Heatmap(nn.Module):
def __init__(self):
super(Img2Heatmap, self).__init__()
# 3, 362, 1608
self.layer1 = nn.Sequential(
nn.ReplicationPad2d((0, 0, 3, 3)),
nn.Conv2d(3, 64, kernel_size=8, stride=2, padding=(0, 3), padding_mode='circular', bias=False),
nn.BatchNorm2d(64)
) # 64, 181, 804
self.layer2 = nn.Sequential(
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
) # 64, 90, 402
self.block64 = nn.Sequential(
BasicBlock(64, 64, 1),
BasicBlock(64, 64, 1)
)
self.downsample1 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(128)
)
self.block128 = nn.Sequential(
BasicBlock(64, 128, 2, self.downsample1),
BasicBlock(128, 128, 1)
) # 128, 45, 201
self.downsample2 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=1, stride=2, bias=False),
nn.BatchNorm2d(256)
)
self.block256 = nn.Sequential(
BasicBlock(128, 256, 2, self.downsample2),
BasicBlock(256, 256, 1)
) # 256, 23, 101
self.layer4 = nn.Sequential(
nn.Conv2d(384, 128, kernel_size=1, stride=1, padding=0),
nn.ReLU())
self.layer5 = nn.Sequential(
nn.Conv2d(192, 64, kernel_size=1, stride=1, padding=0),
nn.ReLU())
self.out1_layer = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=1, stride=1),
nn.ReLU(),
nn.Conv2d(64, 1, kernel_size=1, stride=1),
nn.ReLU()
)
self.out2_layer = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=1, stride=1),
nn.ReLU(),
nn.Conv2d(64, 1, kernel_size=1, stride=1),
nn.ReLU()
)
self.out3_layer = nn.Sequential(
nn.Conv2d(64, 1, kernel_size=1, stride=1),
nn.ReLU()
)
self.tanh = nn.Tanh()
def forward(self, x):
_, _, h, w = x.size() # [N, C, H, W] 3, 362, 1608
#h, w = h // 4, w // 4
x = self.layer1(x) # 64, 181, 804
x = self.layer2(x) # 64, 90, 402
skip1 = self.block64(x) # 64, 90, 402
skip2 = self.block128(skip1) # 128, 45, 201
skip3 = self.block256(skip2) # 256, 23, 101
# out1
out1 = F.interpolate(skip3, scale_factor=2, mode='bilinear', align_corners=True) # 256, 46, 202
skip2 = F.pad(skip2, pad=(1, 0, 1, 0), mode='circular') # 128, 45 + 1, 201 + 1
concat_out1 = torch.cat((out1, skip2), dim=1) # concat_out2 = [N, 384, 46, 202], C = 256 + 128
# out2
out2 = F.interpolate(self.layer4(concat_out1), scale_factor=2, mode='bilinear', align_corners=True) # 128, 92, 404
skip1 = F.pad(skip1, pad=(1, 1, 1, 1), mode='circular') # 64, 90 + 2, 402 + 2
concat_out3 = torch.cat((out2, skip1), dim=1) # concat_out3 [N, 192, 92, 404], C = 128 + 64
# out3
out3 = F.interpolate(self.layer5(concat_out3), size=(h//2, w//2), mode='bilinear', align_corners=True) # 64, 181, 804
output = torch.cat(
[F.interpolate(self.out1_layer(out1), size=(h, w)).unsqueeze(-1),
F.interpolate(self.out2_layer(out2), size=(h, w)).unsqueeze(-1),
F.interpolate(self.out3_layer(out3), size=(h, w)).unsqueeze(-1)], dim=-1).sum(dim=-1)
output = torch.clamp(self.tanh(output), torch.finfo(torch.float32).eps, 1-torch.finfo(torch.float32).eps)
return output
class MyDataset2(Dataset):
def __init__(self, x_path, y_path):
super(MyDataset2, self).__init__()
self.x_file_names = os.listdir(x_path)
self.x_file_names.sort()
self.x_file_list = [os.path.join(x_path, filename) for filename in self.x_file_names]
self.y_file_names = os.listdir(y_path)
self.y_file_names.sort()
self.y_file_list = [os.path.join(y_path, filename) for filename in self.y_file_names]
def __len__(self):
return len(self.x_file_list)
def __getitem__(self, idx):
x = np.transpose(cv2.imread(self.x_file_list[idx], cv2.IMREAD_COLOR)[75:-75, :, :], (2, 0, 1))
x = torch.tensor(x, dtype=torch.float)
y = np.expand_dims(cv2.imread(self.y_file_list[idx], cv2.IMREAD_GRAYSCALE)[75:-75, :], axis=0)
y = torch.tensor(y > 0, dtype=torch.float) # heatmap 데이터에서 0보다 큰 값의 픽셀은 전부 vehicle이다.
return x, y
if __name__ == '__main__':
# image size == 3, 1608, 362
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
C, H, W = 3, 362, 1608
image_data_path = './original_img'
heatmap_data_path = './heatmap'
dataset_len = len(os.listdir(image_data_path))
train_dataset, val_dataset = random_split(MyDataset2(image_data_path, heatmap_data_path), [round(dataset_len * 0.9), round(dataset_len * 0.1)])
train_dataloader = DataLoader(train_dataset, batch_size=12, num_workers=0, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=12, num_workers=0)
def FL_of_CornerNet(X, y, alpha=2, beta=4):
p_inds = y.eq(1).float()
n_inds = (-p_inds + 1.)
p_loss = (torch.log(X) * torch.pow(1 - X, alpha) * p_inds).sum()
n_loss = (torch.log(1 - X) * torch.pow(X, alpha) * torch.pow(1 - y, beta) * n_inds).sum()
p_num = p_inds.sum()
return -(p_loss + n_loss) / p_num
NEW_MODEL = True
if NEW_MODEL:
model = Img2Heatmap()
print('Training New Model')
else:
model = torch.load('best_model_img2heatmap.pt')
print('load model')
model.to(device)
torchsummary.summary(model, (C, H, W), batch_size=4, device=device)
optimizer = optim.Adam(model.parameters(), lr=0.0001)
epoch = 200
preval_loss, val_loss = 0.0, 0.0
total_time, epoch_time, batch_time = time.time(), 0.0, 0.0
MSE_funcion = nn.MSELoss()
for i in range(epoch):
epoch_time = time.time()
print('epoch: {}'.format(i+1))
model.to(device)
model.train()
batch_time = time.time()
for batch, (X, Y) in enumerate(train_dataloader):
X, Y = X.to(device), Y.to(device)
pred = model(X)
loss = FL_of_CornerNet(pred, Y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
current = batch * len(X)
batch_time = time.time() - batch_time
print(f"loss: {loss.item():>7f} [{current:>5d}/{round(dataset_len * 0.9):>5d}] --- time: {timedelta(seconds=round(batch_time))}")
batch_time = time.time()
print('train epoch done')
model.eval()
val_loss = 0.0
with torch.no_grad():
for batch, (X, Y) in enumerate(val_dataloader):
X, Y = X.to(device), Y.to(device)
pred = model(X)
val_loss += FL_of_CornerNet(pred, Y).item()
if i == 0 or preval_loss > val_loss:
torch.save(model, 'new_img2heatmap.pt')
preval_loss = val_loss
print(f'val_loss: {val_loss} --- val_loss decreased, best model saved.')
else:
print(f'val_loss: {val_loss} --- model not saved')
epoch_time = time.time() - epoch_time
print(f'time spent {timedelta(seconds=round(epoch_time))} per epoch')
print('\n')
print(f'total learning time: {timedelta(seconds=round(time.time() - total_time))}')
| Emcyz/alpha_dl | img2heatmap_train.py | img2heatmap_train.py | py | 8,097 | python | en | code | 0 | github-code | 13 |
39722670617 | inp = open('input.txt', 'r')
out = open('output.txt', 'w')
n, k = map(int, inp.readline().split(' ')[0:2])
l = inp.readline().strip()
h = set()
answer = "NO"
for i in range(n-k+1):
s = l[i:i+k]
if s in h:
answer = "YES"
break
else:
h.add(s)
print(answer)
out.write(answer)
| esix/competitive-programming | acmp/page-01/0034/main.py | main.py | py | 313 | python | en | code | 15 | github-code | 13 |
11351385961 | '''
要求:将一个有序的数组存入到二叉树中,(该二叉树也是有序)
思路:
1. 找出数组的中间元素,设为根节点。
2. 再将左右部分填入二叉树的左右子树中
'''
class BTree:
def __init__(self):
self.data = None
self.lchild = None
self.rchild = None
def arrToTree(arr, startIndex, endIndex):
# 二叉树 = 根节点的data + 左子树结点 + 右子树结点
# 找到中间元素设为根节点之后,再递归调用本函数将剩余部分填入左右子树
if endIndex >= startIndex:
root = BTree()
mid = int((startIndex + endIndex + 1) / 2)
root.data = arr[mid]
root.lchild = arrToTree(arr, startIndex, mid-1)
root.rchild = arrToTree(arr, mid+1, endIndex)
else:
root = None
return root
def printTreeByMidOrder(root):
# 中序: 左 -> 根 -> 右
if root == None:
return
if root.lchild != None:
# 先 左子树,递归调用,总会遍历到叶子结点
# 等结束最底层的递归时,执行下一条语句,遍历那时子树的根节点
printTreeByMidOrder(root.lchild)
print(root.data, end=' ,')
if root.rchild != None:
printTreeByMidOrder(root.rchild)
if __name__ == "__main__":
arr = [1,2,3,4,5,6,7,8,9,10]
root = arrToTree(arr, 0, len(arr)-1)
print(root.data)
printTreeByMidOrder(root) | DaToo-J/NotesForBookAboutPython | ch3 二叉树/2-arrToTree.py | 2-arrToTree.py | py | 1,355 | python | zh | code | 0 | github-code | 13 |
20751192648 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def __str__(self):
return str(self.val)
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if root is None:
return None
if root:
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
def traverse(root):
current_level = [root]
while current_level:
print(' '.join(str(node) for node in current_level))
next_level = list()
for n in current_level:
if n.left:
next_level.append(n.left)
if n.right:
next_level.append(n.right)
current_level = next_level
root = TreeNode(4)
root.left = TreeNode(2)
root.right = TreeNode(7)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right.right = TreeNode(9)
root.right.left = TreeNode(6)
print("Input Tree")
traverse(root)
print("Reversed Tree")
Solution().invertTree(root)
traverse(root) | nezlobnaya/leetcode_solutions | invert_binary_tree.py | invert_binary_tree.py | py | 1,190 | python | en | code | 0 | github-code | 13 |
39789266842 | import logging
import os
from collections.abc import Iterable
import numpy as np
from unicore.data import (
Dictionary,
NestedDictionaryDataset,
AppendTokenDataset,
PrependTokenDataset,
RightPadDataset,
TokenizeDataset,
RightPadDataset2D,
RawArrayDataset,
FromNumpyDataset,
EpochShuffleDataset,
)
from unimol.data import (
KeyDataset,
ConformerSampleDockingPoseDataset,
DistanceDataset,
EdgeTypeDataset,
NormalizeDataset,
RightPadDatasetCoord,
LMDBDataset,
CrossDistanceDataset,
NormalizeDockingPoseDataset,
TTADockingPoseDataset,
RightPadDatasetCross2D,
CroppingPocketDockingPoseDataset,
PrependAndAppend2DDataset,
RemoveHydrogenPocketDataset,
)
from unicore import checkpoint_utils
from unicore.tasks import UnicoreTask, register_task
logger = logging.getLogger(__name__)
@register_task("docking_pose")
class DockingPose(UnicoreTask):
"""Task for training transformer auto-encoder models."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument(
"data",
help="downstream data path",
)
parser.add_argument(
"--finetune-mol-model",
default=None,
type=str,
help="pretrained molecular model path",
)
parser.add_argument(
"--finetune-pocket-model",
default=None,
type=str,
help="pretrained pocket model path",
)
parser.add_argument(
"--conf-size",
default=10,
type=int,
help="number of conformers generated with each molecule",
)
parser.add_argument(
"--dist-threshold",
type=float,
default=8.0,
help="threshold for the distance between the molecule and the pocket",
)
parser.add_argument(
"--max-pocket-atoms",
type=int,
default=256,
help="selected maximum number of atoms in a pocket",
)
def __init__(self, args, dictionary, pocket_dictionary):
super().__init__(args)
self.dictionary = dictionary
self.pocket_dictionary = pocket_dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol("[MASK]", is_special=True)
self.pocket_mask_idx = pocket_dictionary.add_symbol("[MASK]", is_special=True)
@classmethod
def setup_task(cls, args, **kwargs):
mol_dictionary = Dictionary.load(os.path.join(args.data, "dict_mol.txt"))
pocket_dictionary = Dictionary.load(os.path.join(args.data, "dict_pkt.txt"))
logger.info("ligand dictionary: {} types".format(len(mol_dictionary)))
logger.info("pocket dictionary: {} types".format(len(pocket_dictionary)))
return cls(args, mol_dictionary, pocket_dictionary)
def load_dataset(self, split, **kwargs):
"""Load a given dataset split.
'smi','pocket','atoms','coordinates','pocket_atoms','pocket_coordinates','holo_coordinates','holo_pocket_coordinates','scaffold'
Args:
split (str): name of the data scoure (e.g., bppp)
"""
data_path = os.path.join(self.args.data, split + ".lmdb")
dataset = LMDBDataset(data_path)
if split.startswith("train"):
smi_dataset = KeyDataset(dataset, "smi")
poc_dataset = KeyDataset(dataset, "pocket")
dataset = ConformerSampleDockingPoseDataset(
dataset,
self.args.seed,
"atoms",
"coordinates",
"pocket_atoms",
"pocket_coordinates",
"holo_coordinates",
"holo_pocket_coordinates",
True,
)
else:
dataset = TTADockingPoseDataset(
dataset,
"atoms",
"coordinates",
"pocket_atoms",
"pocket_coordinates",
"holo_coordinates",
"holo_pocket_coordinates",
True,
self.args.conf_size,
)
smi_dataset = KeyDataset(dataset, "smi")
poc_dataset = KeyDataset(dataset, "pocket")
def PrependAndAppend(dataset, pre_token, app_token):
dataset = PrependTokenDataset(dataset, pre_token)
return AppendTokenDataset(dataset, app_token)
dataset = RemoveHydrogenPocketDataset(
dataset,
"pocket_atoms",
"pocket_coordinates",
"holo_pocket_coordinates",
True,
True,
)
dataset = CroppingPocketDockingPoseDataset(
dataset,
self.seed,
"pocket_atoms",
"pocket_coordinates",
"holo_pocket_coordinates",
self.args.max_pocket_atoms,
)
dataset = RemoveHydrogenPocketDataset(
dataset, "atoms", "coordinates", "holo_coordinates", True, True
)
apo_dataset = NormalizeDataset(dataset, "coordinates")
apo_dataset = NormalizeDataset(apo_dataset, "pocket_coordinates")
src_dataset = KeyDataset(apo_dataset, "atoms")
src_dataset = TokenizeDataset(
src_dataset, self.dictionary, max_seq_len=self.args.max_seq_len
)
coord_dataset = KeyDataset(apo_dataset, "coordinates")
src_dataset = PrependAndAppend(
src_dataset, self.dictionary.bos(), self.dictionary.eos()
)
edge_type = EdgeTypeDataset(src_dataset, len(self.dictionary))
coord_dataset = FromNumpyDataset(coord_dataset)
distance_dataset = DistanceDataset(coord_dataset)
coord_dataset = PrependAndAppend(coord_dataset, 0.0, 0.0)
distance_dataset = PrependAndAppend2DDataset(distance_dataset, 0.0)
src_pocket_dataset = KeyDataset(apo_dataset, "pocket_atoms")
src_pocket_dataset = TokenizeDataset(
src_pocket_dataset,
self.pocket_dictionary,
max_seq_len=self.args.max_seq_len,
)
coord_pocket_dataset = KeyDataset(apo_dataset, "pocket_coordinates")
src_pocket_dataset = PrependAndAppend(
src_pocket_dataset,
self.pocket_dictionary.bos(),
self.pocket_dictionary.eos(),
)
pocket_edge_type = EdgeTypeDataset(
src_pocket_dataset, len(self.pocket_dictionary)
)
coord_pocket_dataset = FromNumpyDataset(coord_pocket_dataset)
distance_pocket_dataset = DistanceDataset(coord_pocket_dataset)
coord_pocket_dataset = PrependAndAppend(coord_pocket_dataset, 0.0, 0.0)
distance_pocket_dataset = PrependAndAppend2DDataset(
distance_pocket_dataset, 0.0
)
holo_dataset = NormalizeDockingPoseDataset(
dataset,
"holo_coordinates",
"holo_pocket_coordinates",
"holo_center_coordinates",
)
holo_coord_dataset = KeyDataset(holo_dataset, "holo_coordinates")
holo_coord_dataset = FromNumpyDataset(holo_coord_dataset)
holo_coord_pocket_dataset = KeyDataset(holo_dataset, "holo_pocket_coordinates")
holo_coord_pocket_dataset = FromNumpyDataset(holo_coord_pocket_dataset)
holo_cross_distance_dataset = CrossDistanceDataset(
holo_coord_dataset, holo_coord_pocket_dataset
)
holo_distance_dataset = DistanceDataset(holo_coord_dataset)
holo_coord_dataset = PrependAndAppend(holo_coord_dataset, 0.0, 0.0)
holo_distance_dataset = PrependAndAppend2DDataset(holo_distance_dataset, 0.0)
holo_coord_pocket_dataset = PrependAndAppend(
holo_coord_pocket_dataset, 0.0, 0.0
)
holo_cross_distance_dataset = PrependAndAppend2DDataset(
holo_cross_distance_dataset, 0.0
)
holo_center_coordinates = KeyDataset(holo_dataset, "holo_center_coordinates")
holo_center_coordinates = FromNumpyDataset(holo_center_coordinates)
nest_dataset = NestedDictionaryDataset(
{
"net_input": {
"mol_src_tokens": RightPadDataset(
src_dataset,
pad_idx=self.dictionary.pad(),
),
"mol_src_distance": RightPadDataset2D(
distance_dataset,
pad_idx=0,
),
"mol_src_edge_type": RightPadDataset2D(
edge_type,
pad_idx=0,
),
"pocket_src_tokens": RightPadDataset(
src_pocket_dataset,
pad_idx=self.pocket_dictionary.pad(),
),
"pocket_src_distance": RightPadDataset2D(
distance_pocket_dataset,
pad_idx=0,
),
"pocket_src_edge_type": RightPadDataset2D(
pocket_edge_type,
pad_idx=0,
),
"pocket_src_coord": RightPadDatasetCoord(
coord_pocket_dataset,
pad_idx=0,
),
},
"target": {
"distance_target": RightPadDatasetCross2D(
holo_cross_distance_dataset, pad_idx=0
),
"holo_coord": RightPadDatasetCoord(holo_coord_dataset, pad_idx=0),
"holo_distance_target": RightPadDataset2D(
holo_distance_dataset, pad_idx=0
),
},
"smi_name": RawArrayDataset(smi_dataset),
"pocket_name": RawArrayDataset(poc_dataset),
"holo_center_coordinates": RightPadDataset(
holo_center_coordinates,
pad_idx=0,
),
},
)
if split.startswith("train"):
nest_dataset = EpochShuffleDataset(
nest_dataset, len(nest_dataset), self.args.seed
)
self.datasets[split] = nest_dataset
def build_model(self, args):
from unicore import models
model = models.build_model(args, self)
if args.finetune_mol_model is not None:
print("load pretrain model weight from...", args.finetune_mol_model)
state = checkpoint_utils.load_checkpoint_to_cpu(
args.finetune_mol_model,
)
model.mol_model.load_state_dict(state["model"], strict=False)
if args.finetune_pocket_model is not None:
print("load pretrain model weight from...", args.finetune_pocket_model)
state = checkpoint_utils.load_checkpoint_to_cpu(
args.finetune_pocket_model,
)
model.pocket_model.load_state_dict(state["model"], strict=False)
return model
| dptech-corp/Uni-Mol | unimol/unimol/tasks/docking_pose.py | docking_pose.py | py | 11,158 | python | en | code | 453 | github-code | 13 |
493736085 | import tornado.web
from tornado.httpclient import HTTPRequest
from emoji_proxy.interfaces import Interfaces
class ProxyHandler(tornado.web.RequestHandler):
def initialize(self, ifaces: Interfaces) -> None:
self.http_client = ifaces.http_client
self.content_filter = ifaces.content_filter
async def get(self, path: str) -> None:
request = HTTPRequest(f'https://lifehacker.ru/{path}')
response = await self.http_client.fetch(request)
body = self.content_filter.add_emojis_to_article(response.body)
self.write(body)
| i-zhivetiev/emoji-proxy | emoji_proxy/proxy_handler.py | proxy_handler.py | py | 573 | python | en | code | 0 | github-code | 13 |
39722447347 | from math import *
inp = open('input.txt', 'r')
out = open('output.txt', 'w')
a, b = inp.readline().split(' ')[0:2]
x, y = 0, 0
for i in range(4):
if a[i] == b[i]:
x += 1
else:
if a[i] in b: y += 1
print(x,y)
out.write(str(x) + " " + str(y))
| esix/competitive-programming | acmp/page-01/0013/main.py | main.py | py | 273 | python | en | code | 15 | github-code | 13 |
40034551993 | """
服务器讯息打印
"""
from datetime import datetime, timezone, timedelta
from pyrogram import filters
from bot import bot, emby_line, tz_id
from bot.func_helper.emby import emby
from bot.func_helper.filters import user_in_group_on_filter
from bot.sql_helper.sql_emby import sql_get_emby
from bot.func_helper.fix_bottons import cr_page_server
from bot.func_helper.msg_utils import callAnswer, editMessage
@bot.on_callback_query(filters.regex('server') & user_in_group_on_filter)
async def server(_, call):
"""
显示账户名密码,线路和设置好服务器信息
:param _:
:param call:
:return:
"""
try:
j = call.data.split(':')[1]
except IndexError:
# 第一次查看
send = await editMessage(call, "**▎🌐查询中...\n\nο(=•ω<=)ρ⌒☆ 发送bibo电波~bibo~ \n⚡ 卡住请等待即可.**")
if send is False:
return
keyboard, sever = await cr_page_server()
# print(keyboard, sever)
if len(tz_id) > 1:
sever = sever[tz_id[0]]
else:
keyboard, sever = await cr_page_server()
sever = sever[j]
await callAnswer(call, '🌐查询中...')
data = sql_get_emby(tg=call.from_user.id)
if data is None:
return await editMessage(call, '⚠️ 数据库没有你,请重新 /start录入')
lv = data.lv
pwd = data.pwd
if lv == "d" or lv == "c" or lv == "e":
x = ' - **无权查看**'
else:
x = f'{emby_line}'
online = emby.get_current_playing_count()
text = f'**▎目前线路 & 用户密码 `{pwd}`**\n\n{x}\n\n{sever}· 🎬 在线 | **{online}** 人\n\n **· 🌏 [{(datetime.now(timezone(timedelta(hours=8)))).strftime("%Y-%m-%d %H:%M:%S")}]**'
await editMessage(call, text, buttons=keyboard)
| mdnoyanred/Sakura_embyboss | bot/modules/panel/server_panel.py | server_panel.py | py | 1,810 | python | en | code | null | github-code | 13 |
41488981191 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from app01 import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'django_08bbs.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/', views.login),
url(r'^index/', views.index),
url(r'^addfavor/', views.addfavor),
url(r'^getreply/', views.getreply),
url(r'^submitreply/', views.submitreply),
url(r'^submitchat/', views.submitchat),
url(r'^getchart/', views.getchart),
url(r'^getchart2/', views.getchart2),
url(r'^api/', include('app01.urls')),
)
| zhangjinsi/django_08bbs | django_08bbs/urls.py | urls.py | py | 667 | python | en | code | 0 | github-code | 13 |
13628468079 | from shop.models import Cart, CartItem
def cartProcessor(request):
if request.user.is_authenticated:
cart, created = Cart.objects.get_or_create(user=request.user)
cartItems = CartItem.objects.filter(cart=cart)
qty = 0
total = 0.0
for items in cartItems:
qty += items.quantity
total += items.amt
return {'cartCount': qty, 'cartList': cartItems, 'cartTotal': total}
else:
return None | dev-agarwal-keshav/shoppingly | shopify/context_processors.py | context_processors.py | py | 472 | python | en | code | 0 | github-code | 13 |
74662663696 | import pygame.font
class Button():
def __init__(self,screen,msg):
#initialize button attributes
self.screen=screen
self.screen_rect=screen.get_rect()
#set dimensions/properties of the button
self.width,self.height=200,50
self.button_color=(0,255,0)
self.text_color=(255,255,255)
self.font=pygame.font.SysFont(None,48)
#Build buttons rect object and center it
self.rect=pygame.Rect(0,0,self.width,self.height)
self.center=self.screen_rect.center
#Button msg should be prepped only once
self.prep_msg(msg)
def prep_msg(self,msg):
#Turn msg into a rendered image and center text on the button.
self.msg_image = self.font.render(msg, True, self.text_color,
self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
#Draw button to screen
#draws rectangular portion of the button
self.screen.fill(self.button_color, self.rect)
#draws text image to screen passing it an image and the rect object associated w image
self.screen.blit(self.msg_image, self.msg_image_rect)
| Muchiri-cmd/learningpython | Alien Invasion/Alien Invasion/button.py | button.py | py | 1,277 | python | en | code | 1 | github-code | 13 |
15009931138 | import os
import pickle
import paddle
import paddlenlp
from paddle.io import Dataset, DataLoader
import paddle.nn as nn
from conf import MODELNAME
class BanfoDataset(Dataset):
def __init__(self, data, tokenizer):
super().__init__()
self.data = data
self.tokenizer = tokenizer
def __getitem__(self, idx):
return paddle.to_tensor(self.data[idx][0], dtype='int64'), paddle.to_tensor(self.data[idx][1], dtype='int64')
def __len__(self):
return len(self.data)
if not os.path.exists('models'):
os.mkdir('models')
paddle.set_device('gpu')
gptModel = paddlenlp.transformers.GPTModel.from_pretrained(MODELNAME)
gptModel = paddlenlp.transformers.GPTForPretraining(gptModel)
tokenizer = paddlenlp.transformers.GPTChineseTokenizer.from_pretrained(MODELNAME)
# 有本地模型存在,则加载本地模型
checkpoint = os.path.join('models', 'model_state.pdparams')
if os.path.exists(checkpoint):
model_state = paddle.load(checkpoint)
gptModel.set_state_dict(model_state)
# 设置为评估模型
gptModel.eval()
# 测试效果
encodedText = tokenizer(text='前段时间我跟一个老大哥一起吃火锅。大哥的孩子,都上学了', return_token_type_ids=False)
ids, _ = gptModel.generate(input_ids=paddle.to_tensor(encodedText['input_ids'], dtype='int64').unsqueeze(0),
max_length=16, min_length=1, decode_strategy='sampling')
ids = ids[0].numpy().tolist()
# 使用tokenizer将生成的id转为文本
text = tokenizer.convert_ids_to_string(ids)
print('generation text is {}'.format(text))
# 加载训练数据
with open(os.path.join('preprocessData', 'trainData.pkl'), 'rb') as f:
data = pickle.load(f)
trainDataLoader = DataLoader(dataset=BanfoDataset(data, tokenizer), batch_size=64, shuffle=True, return_list=True)
numEpochs = 100
learningRate = 2e-5
warmupProportion = 0.1
weightDecay = 0.1
maxSteps = (len(trainDataLoader) * numEpochs)
lr_scheduler = paddle.optimizer.lr.LambdaDecay(learningRate,
lambda currentStep, numWarmupSteps=maxSteps * warmupProportion, numTrainingSteps=maxSteps: float(currentStep) / float(max(1, numWarmupSteps)) if currentStep < numWarmupSteps else max(0.0, float(numTrainingSteps - currentStep) / float(max(1, numTrainingSteps - numWarmupSteps))))
optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler,
parameters=gptModel.parameters(),
weight_decay=weightDecay,
grad_clip=nn.ClipGradByGlobalNorm(1.0),
apply_decay_param_fun=lambda x: x in [
p.name for n, p in gptModel.named_parameters()
if not any(nd in n for nd in ["bias", "norm"])
])
globalStep = 1
save_steps = 100
criterion = paddle.nn.loss.CrossEntropyLoss()
gptModel.train()
for epoch in range(numEpochs):
for step, batch in enumerate(trainDataLoader, start=1):
ids, label = batch
logits, _ = gptModel.forward(ids, use_cache=True)
loss = criterion(logits, label)
loss.backward()
optimizer.step()
lr_scheduler.step()
optimizer.clear_gradients()
if globalStep % save_steps == 0:
print(globalStep, loss.numpy())
gptModel.save_pretrained('models')
globalStep += 1
| WithHades/banfoStyle | train.py | train.py | py | 3,471 | python | en | code | 164 | github-code | 13 |
73210238418 | import unittest
from caninehotel_backend.database import connect_to_mongodb
from caninehotel_backend.modules import room
connect_to_mongodb()
class TestRoomOperations(unittest.TestCase):
def test_add(self):
data = dict(
number = 1,
type_room = 'CLASICA',
cost = 17.2
)
self.assertTrue(bool(room.operations.add(data)))
if __name__ == 'main':
unittest.main() | HeinerAlejandro/caninehotel | caninehotel_backend/caninehotel_backend/tests/odm/room_operations_test.py | room_operations_test.py | py | 380 | python | en | code | 0 | github-code | 13 |
21253942996 | class Solution:
def isMatch(self, s: str, p: str) -> bool:
n = len(s)
m = len(p)
cache = {}
def dfs(i, j):
if i >= n and j >= m:
return True
if j >= m:
return False
if (i, j) in cache:
return cache[(i, j)]
match = i < n and (s[i] == p[j] or p[j] == '.')
cache[(i, j)] = False
if (j + 1) < m and p[j + 1] == "*":
cache[(i, j)] = dfs(i, j + 2) or (match and dfs(i + 1, j))
if match:
cache[(i, j)] = dfs(i + 1, j + 1)
return cache[(i, j)]
return dfs(0, 0)
print(Solution().isMatch("aab", "c*a*b"))
| sundar91/dsa | DP/regex-1.py | regex-1.py | py | 728 | python | en | code | 0 | github-code | 13 |
24501859839 | import time
import numpy as np
import tensorflow as tf
def get_train_data(n):
x = np.random.random((n, 3))
w = np.array([[0.1], [0.2], [0.3]])
y = np.dot(x, w)
return x, y
def get_w(shape, lumbda):
'''
lumbda 其实就是lambda
'''
w = tf.Variable(tf.random_normal(shape, seed=1), dtype=tf.float32)
tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(lumbda)(w))
return w
def train():
r = 800
n = 200
y_ = tf.placeholder(tf.float32, shape=[None, 1])
X = tf.placeholder(tf.float32, shape=[None, 3])
layers_nodes = [3, 10, 10, 1]
n_layers = len(layers_nodes)
in_node = layers_nodes[0]
Y = X
for i in range(1, n_layers):
out_node = layers_nodes[i]
w = get_w([in_node, out_node], 0.0001)
Y = tf.matmul(Y, w)
in_node = out_node
loss_end = tf.reduce_mean(tf.square(y_ - Y))
tf.add_to_collection('losses', loss_end)
loss = tf.add_n(tf.get_collection("losses"))
train_step = tf.train.AdagradOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
init_variables = tf.global_variables_initializer()
sess.run(init_variables)
train_x, train_y = get_train_data(n)
print(train_x, train_y)
for i in range(r):
train_x, train_y = get_train_data(n)
_, loss_, w_ = sess.run([train_step, loss, w], feed_dict={
X: train_x,
y_: train_y})
print(loss_, '---', w_)
time.sleep(0.1)
if __name__ == "__main__":
train()
| yunsonbai/tensorflow_example | l2Collection.py | l2Collection.py | py | 1,567 | python | en | code | 1 | github-code | 13 |
5145461626 | """
Checks if all source and test files were analyzed by infer.
Run this script in the same directory with ./infer-out
and all src/test dirs.
"""
import re
import os
import sys
from os.path import join
def add_arr_and_dict_to_list(files_list, array, dictionary):
"""
Populates an empty list with an array and a dictionary
Args:
files_list: empty list
array: contains file paths
dictionary: contains file names &
how many times they appear
Returns:
A list containing array and dictionary
"""
files_list.append(array)
files_list.append(dictionary)
return files_list
def add_file_to_dict(dictionary, file):
"""
Populates a dictionary with
key: file name
value: number of instances in the directory
Args:
dictionary: dictionary of file names
file: file name to be added to dictionary
Returns:
The modified dictionary
"""
# If file exists in dictionary
if file in dictionary:
# Add number of instances of the file
dictionary[file] += 1
# If file does not exist
else:
# Add it to our dictionary
dictionary[file] = 1
return dictionary
def cut_last_three_fields(filename):
"""
Cut the last three fields of child HTML files
to retrieve the rest of the filename
Args:
filename: full filename
Returns:
The modified filename
Filename format: path.filename.method(args):return.hashcode.ext
For example, if we have the string:
org.sbolstandard.core.fileName$.method(type arg1):void.1234a.html
1) path = org.sbolstandard.core
2) .filename.method(args):return. = .fileName$.method(type arg1):void.
3) hashcode. = 1234a.
4) ext = html
"""
regex = "([a-z0-9._]+)" \
"([.][a-zA-Z0-9_$,:;<>.()\[\]-]+\.)" \
"([a-z0-9]+\.)" \
"([a-z]+)"
fname = re.match(regex, filename)
# Return the path field
return fname.groups()[0]
def get_filename(directory_path):
"""
For ./infer-out/captured directories with no child HTML
get its filename from directory path
Args:
directory_path: path to a directory
with no child HTML
Returns:
The modified filename
"""
# Get file.java.hashcode from dir1/dir2/file.java.hashcode
fields = directory_path.split("/")
# Get file.java from filename.java.hashcode
filename_parts = fields[3].split(".")
filename = filename_parts[0] + "." + filename_parts[1]
return filename
def get_filename_from_dir(directory_path):
"""
Retrieves a filename from a directory path
e.g. we want to get filename from
./infer-out/captured/filename.java.hashcode
Args:
directory_path: path to a directory
Returns:
the filename
"""
fields = directory_path.split("/")
filename = fields[3].split(".")
return filename[0]
def preprocess_infer_files(infer_arr, infer_dic):
"""
Args:
infer_arr: empty array
infer_dic: empty dictionary
Returns:
A list containing
1) populated array of paths of child HTML files
2) populated dictionary of filenames of dirs
w/out child HTML files
"""
# Traverse through the sub-directories in ./infer-out/captured
for root, dirs, files in os.walk("./infer-out/captured"):
for current_directory in dirs:
directory_path = os.path.join(root, current_directory)
# Do not enter the "nodes" directory in /captured
if current_directory == "nodes":
continue
# Want to check if current directory has a child HTML file
has_child_html = False
# Iterate through the files and directories in current directory
items = os.listdir(directory_path)
for item in items:
# Found a child HTML java file
if item.endswith(".html"):
has_child_html = True
# get the path to the filename
separated_filename = cut_last_three_fields(item) + "."
# append the filename
separated_filename += get_filename_from_dir(directory_path)
# modify the filename into a valid path
final_path = separated_filename.replace(".", "/")
infer_arr.append(final_path)
break
# Child HTML not found
if not has_child_html:
# Add its filename to captured_dict
infer_dic = add_file_to_dict(infer_dic,
get_filename(directory_path))
captured_files_list = []
captured_files_list = add_arr_and_dict_to_list(captured_files_list,
infer_arr, infer_dic)
return captured_files_list
def preprocess_src_and_test_files(src_test_array, src_test_dict):
"""
Args:
src_test_array: empty array
src_test_dict: empty dictionary
Returns:
A list containing:
1) populated array of src and test file paths
2) populated dictionary of src and test filenames
"""
# Find all java file paths outside of ./infer-out/captured
for root, dirs, files in os.walk("."):
# Ignore any directories within ./infer-out
if "infer-out" in root:
continue
# Check directories outside ./infer-out
else:
for dir_name in dirs:
if "infer-out" not in dir_name:
# Make a list of all files outside ./infer-out
files = os.listdir(os.path.join(root, dir_name))
for file in files:
# From the list, get all .java files
if file.endswith(".java"):
file_path = os.path.join(root, dir_name, file)
# Add the file path to src_test_array
src_test_array.append(file_path)
# Add the filename to src_test_dict
java_file = file_path.split("/")[-1]
src_test_dict = add_file_to_dict(src_test_dict,
java_file)
src_test_list = []
src_test_list = add_arr_and_dict_to_list(src_test_list,
src_test_array, src_test_dict)
return src_test_list
def compare_dicts(file, src_test_dict, infer_dict):
"""
Check if a particular file
exists in the source/test dict and infer dict
If file exists, decrement the counter in
both dictionaries
Args:
file: file potentially not analyzed by infer
src_test_dict: dictionary containing
src/test files
infer_dict: dictionary containing infer files
Returns:
True if file exists in both, False otherwise
"""
if file in src_test_dict and file in infer_dict:
if src_test_dict[file] > 0 and infer_dict[file] > 0:
src_test_dict[file] -= 1
infer_dict[file] -= 1
return True
return False
def check_list(not_found_list, src_test_dict, infer_dict):
"""
Checks a list of files potentially not analyzed by infer
Verifies if a file was analyzed by infer
by checking the file in the source/test dict & infer dict
If the file was indeed analyzed, we remove it from the list
Args:
not_found_list: a list of files
src_test_dict: dictionary containing src/test files
infer_dict: dictionary containing infer files
Returns:
the list of remaining files
"""
index = 0
# For each file in not_found_list
# This loop checks if the list is empty
while len(not_found_list) > 0 and index < len(not_found_list):
# Check the file in both src/test and infer dictionaries
file_found_in_dicts = compare_dicts(not_found_list[index],
src_test_dict, infer_dict)
# File was found in both src and infer dictionaries
if file_found_in_dicts:
# Remove the file from our list
del not_found_list[index]
else: # File was not found in both dictionaries
# Check the next file on the list
index += 1
if len(not_found_list) == 0:
print("All source and test files were analyzed by infer.")
else:
print("These source/test files were not analyzed"
" by infer: {}".format(not_found_list))
return not_found_list
def compare_arrays(not_found_list, src_test_array, infer_array):
"""
Compare source and test files with those analyzed by infer
If arrays differ, return a list of files not analyzed
Args:
not_found_list: list of files not analyzed by infer
src_test_array: source and test files
infer_array: files analyzed by infer
Returns:
list of files
"""
file_found = False
# For each source and test file
for src_test_path in src_test_array:
# Find a corresponding file in the infer array
for infer_path in infer_array:
# File is found in infer array
# (Infer file path is a substring of source/test file path)
if infer_path in src_test_path:
file_found = True
# Move on to the next source/test file
break
# File not found in infer array
if file_found is False:
# Add file to not_found_list
filename = src_test_path.split("/")[-1]
not_found_list.append(filename)
file_found = False
return not_found_list
def main():
"""
Checks if source and test files were analyzed by infer
If some files were not analyzed,
this program will print a list of those files
"""
results = open("results.txt", "w")
# For the items in ./infer-out/captured
# Initialize an array for directories with child HTML file
infer_array = []
# Initialize a dictionary for directories without a child HTML file
infer_dict = {}
# Initialize & fill a list containing the pre-processed infer file path
# The array contains file paths with a child HTML
# The dictionary contains file names without a child HTML
infer_list = preprocess_infer_files(infer_array, infer_dict)
# Retrieve the infer array and dictionary from infer_list
infer_array = infer_list[0]
infer_dict = infer_list[1]
# Initialize an array and dictionary for all src and test files
src_and_test_array = []
src_and_test_dict = {}
# Gather ALL the java src and test file paths and file names
# The array keeps file paths, the dictionary keeps file names
src_and_test_list = preprocess_src_and_test_files(src_and_test_array,
src_and_test_dict)
# Retrieve the array and dictionary from the list
src_and_test_array = src_and_test_list[0]
src_and_test_dict = src_and_test_list[1]
# Compare arrays
not_found_list = compare_arrays([], src_and_test_array, infer_array)
# Prints message: a) all files analyzed OR b) a list of files not analyzed
not_found_list = check_list(not_found_list, src_and_test_dict, infer_dict)
if not_found_list:
for item in not_found_list:
results.write('{},\n'.format(item))
if __name__ == "__main__":
sys.exit(main())
| ucd-plse/Static-Bug-Detectors-ASE-Artifact | scripts/util/infer-coverage.py | infer-coverage.py | py | 11,799 | python | en | code | 5 | github-code | 13 |
43341769717 | '''
-find how many digits are in a given number
-find a digit
--raise number to the power
-Find some of the powered digits
-Compare final number to input number
'''
def is_armstrong(number):
print(number)
strnumber = str(number)
numberofdigits = len(strnumber)
sum = 0
for strdigit in strnumber:
digit = int(strdigit)
sum = sum + (digit**numberofdigits)
if(sum == (int(strnumber))):
return True
else:
return False
def generate_armstrong_numbers(highend):
for x in range(highend):
armstrong = is_armstrong(x)
if(armstrong == True):
print(str(x) + " Armstrong number. ")
generate_armstrong_numbers(1000000000000000)
| JBoas/python | armstrong.py | armstrong.py | py | 641 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.