seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
17057244494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class OpenIdConfigRequestExt(object):
def __init__(self):
self._biz_id = None
self._biz_type = None
self._cal_type = None
self._execute_mode = None
self._gray_mode = None
self._gray_ratio = None
self._gray_users = None
@property
def biz_id(self):
return self._biz_id
@biz_id.setter
def biz_id(self, value):
self._biz_id = value
@property
def biz_type(self):
return self._biz_type
@biz_type.setter
def biz_type(self, value):
self._biz_type = value
@property
def cal_type(self):
return self._cal_type
@cal_type.setter
def cal_type(self, value):
self._cal_type = value
@property
def execute_mode(self):
return self._execute_mode
@execute_mode.setter
def execute_mode(self, value):
self._execute_mode = value
@property
def gray_mode(self):
return self._gray_mode
@gray_mode.setter
def gray_mode(self, value):
self._gray_mode = value
@property
def gray_ratio(self):
return self._gray_ratio
@gray_ratio.setter
def gray_ratio(self, value):
self._gray_ratio = value
@property
def gray_users(self):
return self._gray_users
@gray_users.setter
def gray_users(self, value):
if isinstance(value, list):
self._gray_users = list()
for i in value:
self._gray_users.append(i)
def to_alipay_dict(self):
params = dict()
if self.biz_id:
if hasattr(self.biz_id, 'to_alipay_dict'):
params['biz_id'] = self.biz_id.to_alipay_dict()
else:
params['biz_id'] = self.biz_id
if self.biz_type:
if hasattr(self.biz_type, 'to_alipay_dict'):
params['biz_type'] = self.biz_type.to_alipay_dict()
else:
params['biz_type'] = self.biz_type
if self.cal_type:
if hasattr(self.cal_type, 'to_alipay_dict'):
params['cal_type'] = self.cal_type.to_alipay_dict()
else:
params['cal_type'] = self.cal_type
if self.execute_mode:
if hasattr(self.execute_mode, 'to_alipay_dict'):
params['execute_mode'] = self.execute_mode.to_alipay_dict()
else:
params['execute_mode'] = self.execute_mode
if self.gray_mode:
if hasattr(self.gray_mode, 'to_alipay_dict'):
params['gray_mode'] = self.gray_mode.to_alipay_dict()
else:
params['gray_mode'] = self.gray_mode
if self.gray_ratio:
if hasattr(self.gray_ratio, 'to_alipay_dict'):
params['gray_ratio'] = self.gray_ratio.to_alipay_dict()
else:
params['gray_ratio'] = self.gray_ratio
if self.gray_users:
if isinstance(self.gray_users, list):
for i in range(0, len(self.gray_users)):
element = self.gray_users[i]
if hasattr(element, 'to_alipay_dict'):
self.gray_users[i] = element.to_alipay_dict()
if hasattr(self.gray_users, 'to_alipay_dict'):
params['gray_users'] = self.gray_users.to_alipay_dict()
else:
params['gray_users'] = self.gray_users
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = OpenIdConfigRequestExt()
if 'biz_id' in d:
o.biz_id = d['biz_id']
if 'biz_type' in d:
o.biz_type = d['biz_type']
if 'cal_type' in d:
o.cal_type = d['cal_type']
if 'execute_mode' in d:
o.execute_mode = d['execute_mode']
if 'gray_mode' in d:
o.gray_mode = d['gray_mode']
if 'gray_ratio' in d:
o.gray_ratio = d['gray_ratio']
if 'gray_users' in d:
o.gray_users = d['gray_users']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/OpenIdConfigRequestExt.py | OpenIdConfigRequestExt.py | py | 4,187 | python | en | code | 241 | github-code | 13 |
43262791312 | def main():
ans = X
for _ in range(K):
ans, mod = divmod(ans, 10)
if mod > 4:
ans += 1
return print(ans * 10**K)
if __name__ == '__main__':
X, K = map(int, input().split())
main()
| Shirohi-git/AtCoder | abc271-/abc273_b.py | abc273_b.py | py | 231 | python | en | code | 2 | github-code | 13 |
39051725437 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on: 2019-01-02
@author: Byng Zeng
"""
from tkinter import *
from tkinter.filedialog import askdirectory, askopenfilename
class WebImageCrawlerWindow(object):
HELP_MENU = (
'==================================',
' Template help',
'==================================',
'option: -x xxx',
' -x xxx: xxxx',
)
def __init__(self, name=None):
self._name = name
self._wm = dict()
def menu_file_open(self):
print('menu file open')
f = askopenfilename()
enPath = self._wm['enPath']
enPath.insert(0, f)
def menu_file_exit(self):
print('menu file exit')
def menu_about_about(self):
print('menu about about')
def create_menu(self, root):
menubar = Menu(root)
file_menu = Menu(menubar, tearoff = 0)
file_menu.add_command(label = 'Open', command=self.menu_file_open)
file_menu.add_command(label = 'Exit', command=self.menu_file_exit)
about_menu = Menu(menubar, tearoff = 0)
about_menu.add_command(label = 'About', command=self.menu_about_about)
menubar.add_cascade(label = 'File', menu = file_menu)
menubar.add_cascade(label = 'About', menu = about_menu)
root['menu'] = menubar
def on_bnPath_click(self):
print('get path: %s' % self._wm['enPath'].get())
def create_main_window_frames(self, root):
Path = Frame(root)
Path.pack(side = TOP, fill=X)
Hdr = Frame(root)
Hdr.pack(side = TOP, fill=X)
Fs = Frame(root)
Fs.pack(side = TOP, fill=X)
FsList = Frame(Fs)
FsList.pack(side = LEFT, expand = 1, fill=X)
SbY = Frame(Fs)
SbY.pack(side = RIGHT, fill=Y)
SbX = Frame(root)
SbX.pack(side = TOP, fill = X)
self._wm['frmPath'] = Path
self._wm['frmHdr'] = Hdr
self._wm['frmFs'] = Fs
self._wm['frmFsList'] = FsList
self._wm['frmSbX'] = SbX
self._wm['frmSbY'] = SbY
def create_path_widgets(self):
frm = self._wm['frmPath']
lbPath = Label(frm, text = 'Path:')
lbPath.pack(side = LEFT, expand=1, fill=X)
enPath = Entry(frm, width = 78)
enPath.pack(side = LEFT, expand=1, fill=X)
bnPath = Button(frm, text = 'Run', command = self.on_bnPath_click)
bnPath.pack(side = LEFT, expand=1, fill=X)
self._wm['lbPath'] = lbPath
self._wm['enPath'] = enPath
self._wm['bnPath'] = bnPath
def create_header_widgets(self):
frm = self._wm['frmHdr']
#self.chkFsSelAll = Checkbutton(frm, justify=LEFT)
self.lbFsURL = Label(frm, text = 'URL', width = 32)
self.lbFsState = Label(frm, text = 'State', width = 8)
self.lbFsOutput = Label(frm, text = 'Output', width = 32)
#self.chkFsSelAll.pack(side = LEFT, expand =1, fill = X)
self.lbFsURL.pack(side = LEFT, expand =1, fill=X)
self.lbFsState.pack(side = LEFT, expand =1, fill=X)
self.lbFsOutput.pack(side = LEFT, expand =1, fill=X)
def create_file_list_widgets(self):
frmFsList = self._wm['frmFsList']
lbFs = Listbox(frmFsList, height = 38)
lbFs.pack(side = LEFT, expand = 1, fill = X)
frmSbY = self._wm['frmSbY']
sbY = Scrollbar(frmSbY)
sbY.pack(side = TOP, expand = 1, fill=Y)
frmSbX = self._wm['frmSbX']
sbX = Scrollbar(frmSbX, orient = HORIZONTAL)
sbX.pack(side = TOP, expand = 1, fill=X)
self._wm['lbFs'] = lbFs
self._wm['sbY'] = sbY
self._wm['sbX'] = sbX
def create_main_window(self, root):
# create frames for main window.
self.create_main_window_frames(root)
# create path widgets.
self.create_path_widgets()
# create header of file list.
self.create_header_widgets()
# create file list widghts
self.create_file_list_widgets()
def add_file_info(self, url, state, output):
lbfs = self._wm['lbFs']
lbfs.insert(END, '%s%s%s' % (url.ljust(64), state.ljust(12), output.ljust(64)))
#ChkList = Checkbutton(lbfs, text = '%s%s%s' % (url.ljust(64), state.ljust(12), output.ljust(64)))
#ChkList = Checkbutton(lbfs, text = '%s%s%s' % (url, state, output))
#ChkList.pack(side = TOP, expand = 1, fill = X)
def update_file_list_scrollbar(self):
pass
def update_file_list(self):
lbfs = self._wm['lbFs']
sbY = self._wm['sbY']
sbX = self._wm['sbX']
for index in range(100):
self.add_file_info('https://www.toutiao.com/a1245%d.html' % (1000+index),
'Waitting', '/home/yingbin/Dowloads/Pstatp/')
#lbfs.insert(END, index)
lbfs['yscrollcommand'] = sbY.set
sbY['command'] = lbfs.yview
lbfs['xscrollcommand'] = sbX.set
sbX['command'] = lbfs.xview
#self.update_file_list_scrollbar()
def main(self):
top = Tk()
self._wm['top'] = top
top.title('WebImageCrawler')
top.geometry('800x640')
top.resizable(0, 0)
self.create_menu(top)
self.create_main_window(top)
self.update_file_list()
top.mainloop()
if __name__ == '__main__':
wm = WebImageCrawlerWindow()
wm.main() | SanniZ/python | tk/webcrawler.py | webcrawler.py | py | 5,397 | python | en | code | 0 | github-code | 13 |
16388480711 | import re
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime, Boolean, Date, ForeignKey, Double
from sqlalchemy.ext.declarative import as_declarative
from sqlalchemy.orm import relationship, backref, declared_attr
# Example : One to One Relationship
# class Parent(Base):
# __tablename__ = 'parent'
# id = Column(Integer, primary_key=True)
# child_id = Column(Integer, ForeignKey('child.id'))
# child = relationship("Child", backref=backref("parent", uselist=False))
#
# class Child(Base):
# __tablename__ = 'child'
# id = Column(Integer, primary_key=True)
from src.database import Base
@as_declarative()
class Base:
created_at = Column(DateTime, default=datetime.now)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
__name__: str
# CamelCase Class Name -> snake_case Table Name ์๋์์ฑ
@declared_attr
def __tablename__(cls) -> str:
return re.sub(r'(?<!^)(?=[A-Z])', '_', cls.__name__).lower()
class User(Base):
id = Column(Integer, primary_key=True, index=True) # ํ์ID
account = Column(String, nullable=False) # ๊ฐ์
๊ณ์
password = Column(String) # ๋น๋ฐ๋ฒํธ
birth = Column(DateTime) # ์๋
์์ผ
name = Column(String, nullable=False) # ์ด๋ฆ
nickname = Column(String) # ๋๋ค์
login_type = Column(String, nullable=False) # ๋ก๊ทธ์ธ ํ์
profile_image = Column(String) # ํ๋กํ์ฌ์ง
user_type = Column(String) # ์ ์ ํ์
__mapper_args__ = {
"polymorphic_on": user_type,
"polymorphic_identity": "user",
}
# seller = relationship("Seller", backref=backref("user", uselist=False)) # User-Seller 1:1
class Seller(User):
id = Column(Integer, ForeignKey("user.id"), primary_key=True) # ์
๋ฌID
seller_name = Column(String, nullable=False) # ์
๋ฌ๋ช
insta_account = Column(String) # ์ธ์คํ ๊ณ์
youtube_account = Column(String) # ์ ํ๋ธ ๊ณ์
website_url = Column(String) # ์น์ฌ์ดํธ ์ฃผ์
seller_profile_image = Column(String) # ํ๋กํ ์ฌ์ง
__mapper_args__ = {
"polymorphic_identity": "seller",
}
market = relationship("Market", backref="seller") # Seller-Market 1:N
seller_category = relationship("SellerCategory", backref="seller")
class SellerCategory(Base):
id = Column(Integer, ForeignKey("seller.id"), primary_key=True) # ์
๋ฌID
category_name = Column(String, nullable=False) # ์นดํ
๊ณ ๋ฆฌ๋ช
class Market(Base):
id = Column(Integer, primary_key=True, index=True) # ๋ง์ผID
name = Column(String, nullable=False) # ๋ง์ผ๋ช
open_date = Column(Date, nullable=False) # ์คํ๋ ์ง
close_date = Column(Date, nullable=False) # ๋ง๊ฐ๋ ์ง
operation_status = Column(Boolean, nullable=False) # ์ด์์ํ
seller_id = Column(Integer, ForeignKey("seller.id")) # ์
๋ฌID
market_type = Column(String) # ๋ง์ผํ์
__mapper_args__ = {
"polymorphic_on": market_type,
"polymorphic_identity": "market",
}
class PopupStore(Market):
id = Column(Integer, ForeignKey("market.id"), primary_key=True)
latitude = Column(Double, nullable=False) # ์๋
longitude = Column(Double, nullable=False) # ๊ฒฝ๋
location = Column(String, nullable=False) # ์์น
parking_lot = Column(Boolean, nullable=False) # ์ฃผ์ฐจ์ฅ ์ฌ๋ถ
toilet = Column(Boolean, nullable=False) # ํ์ฅ์ค ์ฌ๋ถ
__mapper_args__ = {"polymorphic_identity": "popup_store", }
class FleeMarket(Market):
id = Column(Integer, ForeignKey("market.id"), primary_key=True)
event_id = Column(Integer, ForeignKey("event.id"))
__mapper_args__ = {
"polymorphic_identity": "flee_market",
}
class Event(Base):
id = Column(Integer, primary_key=True) # ํ์ฌID
name = Column(String, nullable=False) # ํ์ฌ๋ช
latitude = Column(Double, nullable=False) # ์๋
longitude = Column(Double, nullable=False) # ๊ฒฝ๋
location = Column(String, nullable=False) # ์์น
parking_lot = Column(Boolean, nullable=False) # ์ฃผ์ฐจ์ฅ ์ฌ๋ถ
toilet = Column(Boolean, nullable=False) # ํ์ฅ์ค ์ฌ๋ถ
flee_market = relationship("FleeMarket", backref="event")
| dongbin98/popple-fastapi | src/models.py | models.py | py | 4,286 | python | en | code | 0 | github-code | 13 |
36069672222 | import os
import re
from collections import defaultdict
dct = defaultdict(dict)
def listfiles(folder):
for root, folders, files in os.walk(folder):
for filename in folders + files:
if filename.endswith(".py"):
yield os.path.join(root, filename)
def read_file(path):
with open(path, "r") as f:
try:
data = f.read()
return data
except Exception as e:
print(e)
def strip_multiline_comments(data):
if data:
temp_data = False
match = re.findall('""".*?"""', str(data), re.DOTALL)
if match:
for m in match:
temp_data = data.replace(m, "")
match = re.findall("'''.*?'''", str(data), re.DOTALL)
if match:
for m in match:
temp_data = data.replace(m, "")
if temp_data:
return temp_data
return data
def iterate_over_lines(data):
data_cleaned = strip_multiline_comments(data)
if data_cleaned:
for line in data_cleaned.splitlines():
line_cleaned = re.sub("#.*|>>>.*|=.*|\".*?\"|'.*?'", "", line)
line_cleaned = line_cleaned.strip()
if line_cleaned:
tokens = re.split(r"\s+", line_cleaned)
tokens = [x for x in tokens if x]
if re.findall("from|import", tokens[0]):
if "import" in tokens:
if "from" in line_cleaned:
resp = [x for x in re.split("from", line_cleaned) if x]
if resp:
resp = resp[0]
resp = [
x.strip() for x in re.split("import", resp) if x
]
if "," not in resp:
if len(resp) == 2:
key = resp[0]
imports[key].append(resp[1])
else:
key = resp[0]
imports[key].extend(resp[1].split(","))
if re.findall("def|class", tokens[0]):
if "def" in tokens[0]:
match = re.findall("def(.*?\))", line_cleaned)
if match:
defs["function_defs"].append(match)
file_list = sorted(list(listfiles(".")), key=lambda x: len(x))
base_dict = defaultdict(dict)
for f_name in file_list:
imports = defaultdict(list)
defs = defaultdict(list)
classes = defaultdict(list)
data = read_file(f_name)
iterate_over_lines(data)
base_dict[f_name].update({"imports": imports})
base_dict[f_name].update(defs)
for k, v in base_dict.items():
print(k)
for key, value in v.items():
print(key, value)
print("\n")
| msgoff/Python_Scripts | walk.py | walk.py | py | 2,946 | python | en | code | 0 | github-code | 13 |
5261669810 |
def sum_of_num(numb_array):
res = 0
for numbers in numb_array:
res += float(numbers)
return res
def extract_numbers(numb_array, degree):
numbers = []
unknown = []
i = 0
while (i < len(numb_array)):
if numb_array[i] == '-' or numb_array[i] == '+' or (i == 0 and (numb_array[i:].find('-') > numb_array[i:].find('*') or numb_array[i:].find('-') == -1)):
j = numb_array.find('^', i ) + 1
if numb_array[j] == '0':
numbers.append(numb_array[i:numb_array.find('*', i)])
elif numb_array.find(' ', j) != -1:
unknown.append(numb_array[i:numb_array.find(' ', j)])
else:
unknown.append(numb_array[i:])
i += 1
if degree == 0:
return numbers
return numbers, unknown
def get_discriminant(a, b, c):
discriminant = (b ** 2) - (4 * a * c)
return discriminant
def get_reduced_form(numbers, degree):
print('Polynomial degree : '+str(degree))
result = 'Reduced form:'
i = degree
j = 0
while i >= 0:
if numbers[j] == 0:
pass
elif numbers[j] > 0 and i != degree:
result = result+' + '+str(numbers[j])+' * X^'+str(i)
elif numbers[j] < 0:
result = result+' - '+str(numbers[j] * -1)+' * X^'+str(i)
else:
result = result+' '+str(numbers[j])+' * X^'+str(i)
i -= 1
j += 1
result = result +' = 0'
print(result)
def get_results(a, b, discriminant):
if discriminant == 0:
if a == 0:
print('There is no value for X that makes the equation true')
return 1
print('Discriminant is zero, there is one solution : ')
print(float(-b / (2 * a)))
elif discriminant > 0:
print('Discriminant is strictly positive, the two solutions are: ')
first_sol = float((-b + (discriminant ** 0.5)) / (2 * a) )
second_sol = float((-b - (discriminant ** 0.5)) / (2 * a) )
print(first_sol)
print(second_sol)
else:
print('Discriminant is strictly negative, there is no real solution. Instead there is two imaginary solutions: ')
first_sol_real = (-b / (2 * a))
first_sol_im = ((- discriminant) ** 0.5) / (2* a)
second_sol_real = first_sol_real
second_sol_im = first_sol_im * -1
if first_sol_im > 0:
print(str(first_sol_real) + ' + '+str(first_sol_im)+'i')
print(str(second_sol_real)+' - '+str(second_sol_im * -1)+'i')
else:
print(str(first_sol_real)+' - '+str(first_sol_im * -1)+'i')
print(str(second_sol_real)+' + '+str(second_sol_im)+'i')
def clean_list(numb_array, part):
new_numb_array = []
for numbers in numb_array:
numbers = numbers.replace(' ', '')
numbers = float(numbers)
if part == 'right':
numbers = numbers * -1
new_numb_array.append(numbers)
return new_numb_array
def get_power(vars_str):
power_index = vars_str.find('^') + 1
power = int(vars_str[power_index:])
return power
def final_clean(left_x, right_x, numbers, degree):
number = 0
if left_x != {}:
for i in range(1, 3):
if i not in left_x and i not in right_x:
left_x[i] = 0
elif i in left_x and i in right_x:
left_x[i] += right_x[i]
elif i in right_x:
left_x[i] = right_x[i]
else:
left_x = right_x
for num in numbers:
number += num
if degree == 0:
return number
return left_x, number
def clean_vars(numb_array, part):
new_vars_dict = {}
for numbers in numb_array:
numbers = numbers.replace(' ', '')
numbers = numbers.split('*')
power = get_power(numbers[1])
if part == 'right' and power not in new_vars_dict:
new_vars_dict[power] = float(numbers[0]) * -1
elif power not in new_vars_dict:
new_vars_dict[power] = float(numbers[0])
else:
if part == 'right':
numbers[0] = float(numbers[0]) * -1
new_vars_dict[power] = new_vars_dict[power] + numbers[0]
else:
new_vars_dict[power] = new_vars_dict[power] + float(numbers[0])
return new_vars_dict
def get_degree(numb_str):
i = 0
degree = -1
while i < len(numb_str):
if numb_str[i] == 'X' and numb_str[i + 1] == '^':
j = i
while j < len(numb_str) and numb_str[j] != ' ':
if ((numb_str[j] == '-' and numb_str[j - 1] == '^') or numb_str[j] == '.' or numb_str[j] == ','):
return -2
j += 1
if numb_str[i + 2: j] >= '0' and numb_str[i + 2:j] <= '9':
tmp_degree = int(numb_str[i + 2: j])
else:
return -2
if tmp_degree > degree:
degree = tmp_degree
i += 1
return degree | Ethma/ComputorV1 | utils.py | utils.py | py | 4,198 | python | en | code | 0 | github-code | 13 |
14738119981 | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import os
import math
import numpy as np
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Parameters
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
MAX_CHILD_CNT = 1500
# the max number of children in the file system arborescence
NAMELEN = 5
# the length for any subdirectory
# The name will be left-padded with '0' if needed to reach the length given by 'NAMELEN'.
DEFAULT_IMAGE_TYPE = 'jpg'
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Functions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def getImageFilepath(
imageId
, imageType = DEFAULT_IMAGE_TYPE
, imageRootDir = None
):
'''
DESCRIPTION
The purpose of this function is to return the path corresponding to the file corresponding to any image whose
the id has been given inside <imageId>.
This function will produce the name of
help keeping a reasonable number of images inside a directory.
For this the objective is to dispatch images among many sub-directories with the following rule:
each sub-directory will host a maximum of <MAX_CHILD_CNT> images.
This fonction will return the name of the sub-directory that will host the image corresponding to the
<imageId> identifier.
The name of each subdirectory which will be returned by this function will be a string with a
length equals to <NAMELEN>.
ARGUMENTS
imageId
an integer corresponding to an identifier of an image.
RETURN
A string value that will correspond to the sub-directory name that will contain the image with the <imageId>
identifier.
The sub-directory will be located just inside the root directory that will contain all the images.
The path of the parent directory will be managed outside of this function !
'''
if (imageRootDir is not None) and (not isinstance(imageRootDir, str)):
raise TypeError('ERROR: Invalid type for argument <imageRootDir>: "string" or "None" type was expected')
def subdirFromImageId(imageId):
'''
DESCRIPTION
The purpose of this function is to help keeping a reasonable number of images inside a directory.
For this the objective is to dispatch images among many sub-directories with the following rule:
each sub-directory will host a maximum of <MAX_CHILD_CNT> images.
This fonction will return the name of the sub-directory that will host the image corresponding to the
<imageId> identifier.
The name of each subdirectory which will be returned by this function will be a string with a
length equals to <NAMELEN>.
ARGUMENTS
imageId
an integer corresponding to an identifier of an image.
RETURN
A string value that will correspond to the sub-directory name that will contain the image with the <imageId>
identifier.
The sub-directory will be located just inside the root directory that will contain all the images.
The path of the parent directory will be managed outside of this function !
'''
if ( MAX_CHILD_CNT <= 0 ):
raise ValueError('ERROR: Invalid value for <MAX_CHILD_CNT> parameter: expected value must be > 0.')
idx = math.ceil( imageId / MAX_CHILD_CNT )
dirIdx = str(idx).zfill(NAMELEN)
return dirIdx
subdir = subdirFromImageId(imageId)
filename = str(imageId) + '.' + imageType
filepath = os.path.join(subdir, filename)
if (imageRootDir is not None):
filepath = os.path.join(imageRootDir, filepath)
return filepath
| DCEN-tech/Mushroom_Py-cture_Recognition | src/lib/datasource/image/path.py | path.py | py | 3,732 | python | en | code | 0 | github-code | 13 |
70166114259 | """Script to update all game logs by year."""
import sys
import requests
from classes.database import Database
from functions.new_game_logs import new_game_logs
from functions.check_duplicate_game_logs import check_duplicate_game_logs
BASE_URL = "http://lookup-service-prod.mlb.com/lookup/json/"
GAME_LOG_EXT = (
"named.sport_%s_game_log_composed.bam"
"?game_type='R'&league_list_id='mlb_hist'&player_id=%s&season=%s"
)
GET_PLAYERS = (
"SELECT id, mlb_id, primary_stat_type"
" FROM players"
" ORDER BY id"
)
GET_GAME_LOGS_HITTING = (
"SELECT players_id, mlb_team_id, opponent_mlb_team_id, game_date, ab, r,"
" h, tb, 2b, 3b, hr, rbi, bb, ibb,so, sb ,cs ,hbp, sac, sf, home_away,"
" game_id, game_year"
" FROM game_logs_hitting"
" WHERE game_year = %s"
" ORDER BY players_id, game_id"
)
GET_GAME_LOGS_PITCHING = (
"SELECT players_id, mlb_team_id, opponent_mlb_team_id, game_date, g, gs,"
" cg, sho, sv, svo, ip, h, r, er, hr, bb, ibb, so, np, s , w, l,"
" home_away, game_id, game_year"
" FROM game_logs_pitching"
" WHERE game_year = %s"
" ORDER BY players_id, game_id"
)
ADD_GAME_LOGS_HITTING = (
"INSERT INTO game_logs_hitting"
" (players_id, mlb_team_id, opponent_mlb_team_id, game_date, ab, r, h, tb,"
" 2b, 3b, hr, rbi, bb, ibb, so, sb, cs, hbp, sac, sf, home_away, game_id,"
" game_year)"
" VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,"
" %s, %s, %s, %s, %s, %s, %s)"
)
ADD_GAME_LOGS_PITCHING = (
"INSERT INTO game_logs_pitching"
" (players_id, mlb_team_id, opponent_mlb_team_id, game_date, g, gs, cg,"
" sho, sv, svo, ip, h, r, er, hr, bb, ibb, so, np, s , w, l, home_away,"
" game_id, game_year)"
" VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,"
" %s, %s, %s, %s, %s, %s, %s, %s, %s)")
# database_players comes in as a list with tuple/list structure:
# (id, mlb_id, primary_stat_type)
def insert_game_logs_by_year(year):
[duplicate_pitchers, duplicate_hitters] = check_duplicate_game_logs()
if (len(duplicate_pitchers) > 0 or len(duplicate_hitters) > 0):
return
"""Insert only new game logs by year."""
db = Database()
database_players = db.query(GET_PLAYERS)
db.__del__
all_pitching_game_log_data = []
all_hitting_game_log_data = []
for player in database_players:
players_id = player[0]
player_mlb_id = player[1]
primary_stat_type = player[2]
if (primary_stat_type == "pitching" or primary_stat_type == "both"):
link = BASE_URL + GAME_LOG_EXT % ("pitching", player_mlb_id, year)
response = requests.get(link).json()
try:
pitching_log_results = (
response["sport_pitching_game_log_composed"]
["sport_pitching_game_log"]["queryResults"]
)
except KeyError:
print("Could not find pitching_log_results")
print(link)
print(players_id)
print(year)
return
game_logs_count = pitching_log_results["totalSize"]
if game_logs_count == "0":
game_logs = []
else:
game_logs = pitching_log_results["row"]
game_logs = (
[game_logs] if game_logs_count == "1" else game_logs
)
for game_log in game_logs:
year = game_log["game_date"].split('-')[0]
game_log_data = (
players_id, game_log["team_id"], game_log["opponent_id"],
game_log["game_date"], game_log["g"], game_log["gs"],
game_log["cg"], game_log["sho"], game_log["sv"],
game_log["svo"], game_log["ip"], game_log["h"],
game_log["r"], game_log["er"], game_log["hr"],
game_log["bb"], game_log["ibb"], game_log["so"],
game_log["np"], game_log["s"], game_log["w"],
game_log["l"], game_log["home_away"], game_log["game_id"],
year
)
all_pitching_game_log_data.append(game_log_data)
if (primary_stat_type == "hitting" or primary_stat_type == "both"):
link = BASE_URL + GAME_LOG_EXT % ("hitting", player_mlb_id, year)
response = requests.get(link).json()
try:
hitting_log_results = (
response["sport_hitting_game_log_composed"]
["sport_hitting_game_log"]["queryResults"]
)
except KeyError:
print("Could not find hitting_log_results")
print(link)
print(players_id)
print(year)
return
game_logs_count = hitting_log_results["totalSize"]
if game_logs_count == "0":
game_logs = []
else:
game_logs = hitting_log_results["row"]
game_logs = (
[game_logs] if game_logs_count == "1" else game_logs
)
for game_log in game_logs:
year = game_log["game_date"].split('-')[0]
game_log_data = (
players_id, game_log["team_id"], game_log["opponent_id"],
game_log["game_date"], game_log["ab"], game_log["r"],
game_log["h"], game_log["tb"], game_log["d"],
game_log["t"], game_log["hr"], game_log["rbi"],
game_log["bb"], game_log["ibb"], game_log["so"],
game_log["sb"], game_log["cs"], game_log["hbp"],
game_log["sac"], game_log["sf"], game_log["home_away"],
game_log["game_id"], year
)
all_hitting_game_log_data.append(game_log_data)
db = Database()
database_pitching_game_log_data = db.query(GET_GAME_LOGS_PITCHING, (year,))
database_hitting_game_log_data = db.query(GET_GAME_LOGS_HITTING, (year,))
new_pitching_game_log_data = new_game_logs(all_pitching_game_log_data,
database_pitching_game_log_data,
23)
new_hitting_game_log_data = new_game_logs(all_hitting_game_log_data,
database_hitting_game_log_data,
21)
new_game_log_count = (
len(new_pitching_game_log_data) + len(new_hitting_game_log_data)
)
print("Number of new game logs", new_game_log_count)
db.insert(ADD_GAME_LOGS_PITCHING, new_pitching_game_log_data, many=True)
db.insert(ADD_GAME_LOGS_HITTING, new_hitting_game_log_data, many=True)
check_duplicate_game_logs()
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Wrong number of arguments")
elif not sys.argv[1].isdigit():
print("Argument should be a year (digit)")
else:
year = sys.argv[1]
insert_game_logs_by_year(year)
| jarrett-pon/mlbscrapper | insert_game_logs_by_year.py | insert_game_logs_by_year.py | py | 7,140 | python | en | code | 0 | github-code | 13 |
73042605779 | # -*- coding: utf-8 -*-
# @Time : 2021/9/26 10:46
# @Author : kanghe
# @Email : 244783726@qq.com
# @File : test_title.py
import allure
import pytest
params = [
("tom", "en name"),
("ๅผ ไธ", "zh name")
]
# ๅฏไปฅ่ฏปๅๅๆฐๅไธญ็ๅ้ไฝไธบ็จไพๆ ้ข
@allure.title("{title}")
@pytest.mark.parametrize("name, title", params)
def test_title(name, title):
print(f"{name} is testing {title}")
| dengfan2018/python-api-testing | testcase/pytest_learn/test_title.py | test_title.py | py | 420 | python | en | code | 0 | github-code | 13 |
2929369785 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
__NAME__ = 'Griffin Lim Algorithm'
import scipy
import shutil
import numpy as np
import librosa
from librosa import display
from optparse import OptionParser
from matplotlib import pyplot as plt
def griffin_lim(stftm_matrix, shape, min_iter=20, max_iter=50, delta=20):
y = np.random.random(shape)
y_iter = []
for i in range(max_iter):
if i >= min_iter and (i - min_iter) % delta == 0:
y_iter.append((y, i))
stft_matrix = librosa.core.stft(y) # stft_matrix:(1025,122), stftm_matrix:(1025,122)
stft_matrix = stftm_matrix * (stft_matrix / np.abs(stft_matrix)) # np.arrayไน้คไธบๅฏนๅบๅ
็ด ไน้ค
y = librosa.core.istft(stft_matrix) # (62208,)
y_iter.append((y, max_iter)) # ๅฝ่พพๅฐmax_iterๆถๆทปๅ
return y_iter
if __name__ == '__main__':
# ็จๅบไธญargv[0]ๅทฒๆฟๆขไธบwave_name
wave_name = "sample.wav"
"""
cmd_parser = OptionParser(usage="usage: %prog <wav-file>")
cmd_parser.parse_args()
(opts, argv) = cmd_parser.parse_args()
if len(argv) != 1:
cmd_parser.print_help()
exit(-1)
"""
# ๆฏๆฌก่ฟ่กไปฃ็ ๆถ่ฎพ็ฝฎ็ธๅ็seed,ๅๆฏๆฌก็ๆ็้ๆบๆฐไน็ธๅ,็ธๅฝไบ่ฏด"0"ๆฏ็ป้ๆบๆฐ่ตท็ๅๅญ
np.random.seed(0)
# assume 1 channel wav file
sr, data = scipy.io.wavfile.read(wave_name) # sr:16000, data:(62208,),้(-1,1),ๆดๆฐ
stftm_matrix = np.abs(librosa.core.stft(data)) # <class 'tuple'>:(1025, 122)
stftm_matrix_modified = stftm_matrix + np.random.random(stftm_matrix.shape) # ็ๆ0ๅ1ไน้ด็้ๆบๆตฎ็นๆฐfloat
y_iters = griffin_lim(stftm_matrix_modified, data.shape)
n_figure = 1 + len(y_iters)
plt.figure(figsize=(8, 14))
plt.subplot(n_figure, 1, 1)
display.waveplot(data, sr=sr)
plt.title('origin wave')
for i in range(0, len(y_iters)):
y, n_iters = y_iters[i]
store_file = wave_name.replace('.wav', '_griffinlim_iters{iters}.wav'.format(iters=n_iters))
print('NumIters {}, Audio: {}'.format(n_iters, store_file))
plt.subplot(n_figure, 1, i + 2)
display.waveplot(y.astype(np.int16), sr=sr)
plt.title('reconstructed wave from STFT-M (Iter {})'.format(n_iters))
shutil.rmtree(store_file, ignore_errors=True) # ๅฆๆๅญๅจๆญค้ณ้ขๆไปถๅๅ ้ค
scipy.io.wavfile.write(store_file, sr, y.astype(np.int16))
store_file = wave_name.replace('.wav', '_griffinlim.png')
print("Waveform image: {}".format(store_file))
plt.savefig(store_file, dpi=100)
print('DONE')
| aishoot/Audio_Signal_Processing | 05-GriffinLim/GriffinLim_example.py | GriffinLim_example.py | py | 2,606 | python | en | code | 52 | github-code | 13 |
73147672017 | from tkinter import *
class SoftwareActivationWindow(Tk):
def __init__(self, software_activation_function):
# Copy over functions needed for operation
self.software_activation = software_activation_function
# Create window
self.instantiate_window()
# Draw the widgets
self.draw_widgets()
def instantiate_window(self):
# Start root window
super().__init__()
# Configure root window
self.title("Activate Your Software")
self.geometry('650x400')
self.minsize(650, 400)
# Make the app responsive
self.columnconfigure(index=0, weight=1)
self.columnconfigure(index=1, weight=1)
self.columnconfigure(index=2, weight=1)
self.columnconfigure(index=3, weight=1)
self.rowconfigure(index=0, weight=2)
self.rowconfigure(index=1, weight=1)
self.rowconfigure(index=2, weight=1)
self.rowconfigure(index=3, weight=2)
def draw_widgets(self):
# Title label
self.title_label = Label(self, text="Activate Your Software", font=('Arial', 32))
self.title_label.grid(column = 0, row = 0, columnspan = 4)
# Enter key label
self.enter_key_label = Label(self, text="Enter The Key:", font=('Arial', 14))
self.enter_key_label.grid(column = 1, row = 1, columnspan = 2)
# Serial key entry
self.entry_function_register = self.register(self.validate_key_input)
self.serial_key_entry = Entry(self, justify='center', validate='key', validatecommand=(self.entry_function_register, '%d', '%S', '%P'), bg='white', fg='black', width='46', font='Arial 17')
self.serial_key_entry.grid(column = 0, row = 2, columnspan = 4, ipady=10)
# Activate software button
self.activate_software_button = Button(self, text='Activate', state='disabled', command=self.attempt_software_activation, height=2, width=10, font=('Arial', 26))
self.activate_software_button.grid(column = 2, row = 3, columnspan = 2)
def validate_key_input(self, action_type, text_change, value_after):
# Text entry validation
if (action_type == '1' and not
(text_change.isalnum() and
len(value_after) <= 25)):
return False
# Button activation validation
if action_type == '1' and len(value_after) == 25:
self.activate_software_button.config(state='normal')
elif action_type == '0':
self.activate_software_button.config(state='disabled')
# Styling the entry
self.serial_key_entry.config(bg='white')
self.serial_key_entry.config(fg='black')
return True
def attempt_software_activation(self):
software_key = self.serial_key_entry.get()
result = self.software_activation(software_key)
if result:
# Close window upon success
self.destroy()
else:
# Styling entry after rejection
self.serial_key_entry.config(bg='red')
self.serial_key_entry.config(fg='white')
if __name__ == '__main__': # Checks whether to run main window or not
raise Exception("GUI file run outside main window...") # If so, quits
| AndreiCravtov/python-software-activation-wrapper | src/client/activategui.py | activategui.py | py | 3,361 | python | en | code | 0 | github-code | 13 |
27180238856 | from sqlalchemy.orm import Session
from models.client import Place
def load_menu(db: Session,
place_id: int,
username
):
place = db.query(Place).get(place_id)
return {"username": username, "place": place.name, "menus": place.menus} | ah00ee/kiosk-fastapi | apis/kiosk/menu/menu_crud.py | menu_crud.py | py | 281 | python | en | code | 0 | github-code | 13 |
27730602193 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
class FACES(Dataset):
def __init__(self, dataset_path, tv_transforms, partition):
super().__init__()
self.dataset_path = dataset_path
self.partition = partition
images = []
# ids = []
ages = []
genders = []
expressions = []
# picture_sets = []
for img in sorted(os.listdir(os.path.join(dataset_path, self.partition))):
# Read person ID, age, and expression from filename.
img_labes = img.split("_")
# ids.append(img_labes[0])
ages.append(img_labes[1])
genders.append(img_labes[2])
expressions.append(img_labes[3])
# picture_sets.append(img_labes[4].split('.')[0])
# Save the image.
images.append(img)
# Prepare dataset specific information.
# id_lbls = {x:e for e, x in enumerate(sorted(set(ids)))}
ages_lbls = {x:e for e, x in enumerate(sorted(set(ages)))}
gender_lbls = {x:e for e, x in enumerate(sorted(set(genders)))}
expression_lbls = {x:e for e, x in enumerate(sorted(set(expressions)))}
# pset_lbls = {x:e for e, x in enumerate(sorted(set(picture_sets)))}
# id_lbl_encoded = [id_lbls[x] for x in ids]
age_lbl_encoded = [ages_lbls[x] for x in ages]
gender_lbl_encoded = [gender_lbls[x] for x in genders]
expression_lbl_encoded = [expression_lbls[x] for x in expressions]
# pset_lbl_encoded = [pset_lbls[x] for x in picture_sets]
# self.y = np.stack([id_lbl_encoded, gender_lbl_encoded, age_lbl_encoded, expression_lbl_encoded, pset_lbl_encoded], axis=1)
self.y = np.stack([gender_lbl_encoded, age_lbl_encoded, expression_lbl_encoded], axis=1)
self.imgs = images
self.tv_transforms = tv_transforms
# MTL information.
self.num_tasks = self.y.shape[1]
self.task_ids = [i for i in range(self.num_tasks)]
# self.task_lbl_sizes = [len(set(ids)), len(set(genders)), len(set(ages)),
# len(set(expressions)), len(set(picture_sets))]
self.task_lbl_sizes = [len(set(genders)), len(set(ages)), len(set(expressions))]
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
imgs = Image.open(self.dataset_path / self.partition / self.imgs[index]).convert('RGB')
imgs = self.tv_transforms(imgs)
return imgs, self.y[index]
| geriskenderi/mtl-models | data/faces.py | faces.py | py | 2,599 | python | en | code | 3 | github-code | 13 |
29278062256 | #Programmer: Collin M. Fields
#Date: 11/05/2018
#Purpose: Count the number of words in a text.
def wordCounter(textToCountWords):
wordCount = 0
textToBeCounted = textToCountWords.split(" ")
for word in textToBeCounted:
wordCount += 1
return wordCount
| CollinFields/ProjectsWIP | TextProjects/wordCounter.py | wordCounter.py | py | 259 | python | en | code | 0 | github-code | 13 |
32308960675 | import os
from .buf_app import WidgetBufferWithInputs, WidgetList, TextWidget, SimpleInput, WidgetBuffer, BufferHistory, MultiSelectWidget
from .func_register import vim_register
from .vim_utils import SetVimRegister, Normal_GI, Singleton, input_no_throw, escape, win_eval
import vim
from functools import partial
from .log import debug
from .buf_app_filetree import CursorLineBuffer
from .remote_fs import FileSystem
from .windows import GPW, PreviewWindow
class GitCommitter(CursorLineBuffer):
def __init__(self, name="GitCommitter"):
self.mult = MultiSelectWidget(*self.git_stage_files())
self.widgets = WidgetList("", [
TextWidget("Press space to select: "),
self.mult,
])
self.syntax = "gitcommitter"
options = {
'minwidth': 50,
'minheight': 30,
}
super().__init__(self.widgets, name, "Git Committer", None, options)
def git_stage_files(self):
lines = FileSystem().eval("git status -s")
files = []
selected = {}
for line in lines:
line = line.rstrip()
type, file = line[:2], line[3:]
if type == "??" : file = f"untrace | {file}"
elif type[1] != " ": file = f"unstage | {file}"
elif type[1] == " ": file = f"stage | {file}"
selected[file] = False
if type[1] == " ":
selected[file] = True
files.append(file)
sort_map = {'unstage': 1, 'untrace': 2, 'stage ': 3}
files.sort(key=lambda x: sort_map[x[0:7]])
return files, selected
def git_add(self, item):
FileSystem().command(f"git add {item}")
def git_unstage(self, item):
FileSystem().command(f"git reset HEAD -- {item}")
def is_git_staged(self, item: str):
if item.startswith("stage"): return True
return False
def on_space(self):
for item in self.mult.get_selected():
if not self.is_git_staged(item):
self.git_add(item[10:])
for item in self.mult.get_not_selected():
if "untrace" not in item and self.is_git_staged(item):
self.git_unstage(item[10:])
self.mult.reset(*self.git_stage_files())
self.redraw()
def on_jump_label(self):
print ("not implement.")
@property
def select_item(self):
number = self.cur_cursor_line()
if number < 1: return True
return self.mult.items[number-1]
def on_key(self, key):
if key in ['j', 'k', 'h', 'l'] and not GPW.hidden:
{ 'j': GPW.line_down,
'k': GPW.line_up,
'h': GPW.page_up,
'l': GPW.page_down, }[key]()
return True
if key == "<space>":
number = self.cur_cursor_line()
if number < 1: return True
self.mult.onselect(number - 1)
GPW.hide()
self.redraw()
return True
if key == "<cr>":
GPW.hide()
self.on_space()
return True
if key == "p":
"""preview the changes"""
number = self.cur_cursor_line()
if number < 1: return True
self.git_show(self.mult.items[number-1])
return True
if key == "c":
self.commit()
return True
if key == 'D':
self.remove(self.select_item)
if key == "e":
self.start_edit()
return True
if super().on_key(key):
return True
return False
def commit(self):
message = input_no_throw("Commit Message: ")
if message is None:
return
self.close()
message = escape(message, "\"'\\")
if FileSystem().command(f'git commit -m "{message}"'):
print ("Success.")
def remove(self, item):
prompt = ""
command = ""
filename = item[10:]
if "untrace" in item:
prompt = f"You will remove untrace file `{filename}`, press `yes` to confirm: "
command = f"rm -rf {filename}"
elif "unstage" in item:
prompt = f"You will remote all changes in `{filename}`, press `yes` to confirm: "
command = f"git checkout -- {filename}"
elif "stage" in item:
prompt = f"You will remote all changes in `{filename}`, press `yes` to confirm: "
self.git_unstage(filename)
command = f"git checkout -- {filename}"
if input_no_throw(prompt) == "yes":
FileSystem().command(command)
GPW.hide()
self.on_space() # to save the changes
self.mult.reset(*self.git_stage_files())
self.redraw()
def start_edit(self):
number = self.cur_cursor_line()
if number < 1: return True
file = self.mult.items[number-1][10:]
file_line_nr = 1
if hasattr(GPW.pwin, "wid"):
wid = GPW.pwin.wid
preview_line = int(win_eval(wid, 'getpos(".")')[1]) - 1
preview_text = win_eval(wid, 'getline(1, "$")')
offset = -1
while preview_line >= 0:
line = preview_text[preview_line]
if line.startswith("@@"): break
preview_line -= 1
if not line.startswith("-"): offset += 1
if preview_line < 0:
file_line_nr = 1
else:
line = preview_text[preview_line]
file_line_nr = offset + int(line.split("@@")[1].strip().split(" ")[1].strip().split(",")[0][1:])
GPW.hide()
self.close()
FileSystem().edit(file)
vim.command(f":{file_line_nr}")
def git_show(self, item):
if "untrace" in item:
print ("Can't show untrace file.")
return
if "unstage" in item:
lines = FileSystem().eval(f"git diff -- {item[10:]}")
elif "stage" in item:
lines = FileSystem().eval(f"git diff --cached {item[10:]}")
self.preview(item[10:], lines)
def preview(self, file, lines):
position = { 'zindex': 1000, }
GPW.set_showable([
PreviewWindow.ContentItem(file, lines, "magit", 1, position)
])
GPW.trigger()
def on_exit(self):
GPW.hide()
self.on_space()
@vim_register(command="GitCommit")
def StartGitCommit(args):
commit = GitCommitter()
commit.create()
commit.show()
| 2742195759/xkvim | xiongkun/plugin/pythonx/Xiongkun/buf_app_git_committer.py | buf_app_git_committer.py | py | 6,565 | python | en | code | 2 | github-code | 13 |
36480723056 | from mmcv.ops import diff_iou_rotated_2d
import torch
if __name__ == '__main__':
pred = torch.tensor([[40.0, 50, 20, 20, 0.8], \
[40.0, 50, 20, 20, 1], \
[40.0, 50, 20, 20, 0.7]]).to('cuda:0')
gt = torch.tensor([[40.0, 50, 20, 20, 1], \
[40.0, 50, 20, 20, 0.8]]).to('cuda:0')
num_pred = pred.size(0)
num_gt = gt.size(0)
pred = pred[:, None].repeat(1, num_gt, 1).reshape(-1, 5)
gt = gt[None].repeat(num_pred, 1, 1).reshape(-1, 5)
print(diff_iou_rotated_2d(pred[None], gt[None]).reshape(num_pred, num_gt).shape)
print(diff_iou_rotated_2d(pred[None], gt[None]).squeeze(0).reshape(num_pred, num_gt).shape) | liangkaiwen159/icann_dino_detr | test_rotate.py | test_rotate.py | py | 708 | python | en | code | 0 | github-code | 13 |
21264428616 | from collections import deque
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
class Solution:
def connectAllSiblings(self, root):
queue = deque()
queue.append(root)
while queue:
node = queue.popleft()
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
# if the next node exist, link the current with it.
# if it is the current node is the tail node, set its next to None.
if not queue:
node.next = None
else:
node.next = queue[0]
return root | sundaycat/Leetcode-Practice | solution/connect-all-level-order-siblings.py | connect-all-level-order-siblings.py | py | 850 | python | en | code | 0 | github-code | 13 |
12605275472 | import math
#CONSTANTES DO SISTEMA
RaioTerra = 6378.173 #Raio da terra em Km
CentroMassa = 42158 #Centro de massa em Km
velocidadeLuz = 300000000 #velocidade da luz
#-- 1ยบ LOCALIZAรรO DAS ESTAรรES
nomeEstacaoA = input('Nome da localizaรงรฃo da estaรงao terrena - ')
#-- 1.1ยบ Latitudes e longitudes das estaรงรตes
latitudeEstacaoTerrenaA = int(input("Insere a latitude da estaรงรฃo terrena: "))
longitudeEstacaoTerrenaA = int(input("Insere a longitude da estaรงรฃo terrena: "))
#-- 1.2ยบ Latitudes e longitudes do satรฉlite
longitudeSatelite = int(input("Insere a longitude do satรฉlite: "))
#CALCULO DA DISTรNCIA ENTRE A ESTAรรO TERRENA E O SATรLITE
aux1 = math.pow(RaioTerra,2) + math.pow(CentroMassa,2)
aux2 = 2*RaioTerra*CentroMassa
aux3 = math.cos(latitudeEstacaoTerrenaA)*math.cos(longitudeEstacaoTerrenaA-longitudeSatelite)
distancia_sat_EstTerrenaA = math.sqrt(aux1 - (aux2*aux3))
#CALCULO DO รNGULO DE ELEVAรรO DAS ESTAรรES TERRENAS
#----Estaรงรฃo A
auxAngElev1 = CentroMassa/RaioTerra
auxAngElev2 = math.cos(latitudeEstacaoTerrenaA)
auxAngElev3 = math.cos(longitudeEstacaoTerrenaA-longitudeSatelite)
auxAngElev4 = math.pow(auxAngElev1,2) #Quadrado do auxiliar 1
auxAngElev5 = (auxAngElev1*auxAngElev2*auxAngElev3)-1
auxAngElev6 = 1+auxAngElev4
auxAngElev7 = 2*auxAngElev1*auxAngElev2*auxAngElev3
elevacaoA = auxAngElev5/math.sqrt(auxAngElev6-auxAngElev7)
anguloElevacaoA = 90-math.acos(elevacaoA)
#CALCULO DO AZIMUTE DAS ESTAรรES TERRENAS
#----Estaรงรฃo A
auxAzimute1 = math.cos(longitudeEstacaoTerrenaA-longitudeSatelite)
auxAzimute11 = math.pow(auxAzimute1, 2)
auxAzimute2 = math.cos(latitudeEstacaoTerrenaA)
auxAzimute22 = math.pow(auxAzimute2, 2)
auxAzimute23 = auxAzimute11*auxAzimute22
auxAzimute3 = math.sqrt(1-(auxAzimute23))
azimute = (auxAzimute1*math.sin(latitudeEstacaoTerrenaA))/auxAzimute3
azimuteA = 180 - math.acos(azimute)
print("---------------------------------------------------------------------------------")
print("Latitude Estaรงรฃo terrena ",nomeEstacaoA,"= ",latitudeEstacaoTerrenaA,"ยบ")
print("Longitude Estaรงรฃo terrena ",nomeEstacaoA,"= ",longitudeEstacaoTerrenaA,"ยบ")
print("Longitude Do Satรฉlite = ",longitudeSatelite)
print()
print("Distรขncia Entre Estaรงรฃo terrena ",nomeEstacaoA," e o Satรฉlite = ", distancia_sat_EstTerrenaA,"km")
print("-----------------")
print("รngulo de Elevaรงรฃo",nomeEstacaoA," = ",anguloElevacaoA,"ยบ")
print("-----------------")
print("Azimute da Estaรงรฃo ",nomeEstacaoA," = ",azimuteA,"ยบ")
| PauloTec/link-sat-lite-em-Python | distancia estacao terrena satelite.py | distancia estacao terrena satelite.py | py | 2,502 | python | pt | code | 0 | github-code | 13 |
26073699474 | import os, glob, sys
import numpy as np
import matplotlib.pyplot as plt
def limiter(a,b):
return minmod(a,b) # more diffusive
# return superbee(a,b) # less diffusive
# return vanLeer(a,b)
# return vanAlbada1(a,b)
def superbee(a,b): return maxmod(minmod(a,2.*b),minmod(2.*a,b))
def maxmod(a,b): return 0.5*(np.sign(a) + np.sign(b)) * np.maximum(np.abs(a),np.abs(b))
def minmod(a,b): return 0.5*(np.sign(a) + np.sign(b)) * np.minimum(np.abs(a),np.abs(b))
def vanLeer(a,b):
r = div0(a,b)
return (r + np.abs(r))/(1 + np.abs(r))
def vanAlbada1(a,b):
r = div0(a,b)
return (r**2 + r)/(r**2 +1)
def minmod2(a,b,c,theta=2):
# theta=1 - no increase of total variation
# theta=2 - least dissipative
retval = np.zeros_like(a)
positive_values = (a>0)*(b>0)*(c>0)
negative_values = (a<0)*(b<0)*(c<0)
retval[positive_values] = np.minimum(theta*a,b,theta*c)[positive_values]
retval[negative_values] = np.maximum(theta*a,b,theta*c)[negative_values]
return retval
def div0( a, b ):
"""This function replaces nans with zeros when dividing by zero.
:param a: array - Numerator
:param b: array - Demoniator
:returns: array - a/b, with infs and nans replaced by 0
"""
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide( a, b )
c[ ~ np.isfinite( c )] = 0 # -inf inf NaN
return c
def roll(c,step,ax):
'''
A step of +1 gives c_{j-1}. A step of -1 gives c_{j+1}
'''
return np.roll(c,step,ax)
# if ax == 0:
# if step == 1:
# return np.vstack([c[0],c[:-1]]) ## <BC TAG>
# elif step == -1:
# return np.vstack([c[1:],c[-1]]) ## <BC TAG>
# # elif ax == 1: # THIS DIRECTION UNTESTED
# # if step == 1:
# # return np.hstack([c[:,0],c[:,:-1]])
# # elif step == -1:
# # return np.hstack([c[:,1:],c[:,-1]])
# else: print('WARNING: THIS AXIS NOT IMPLEMENTED.')
def flux(c,v):
flux = c*velocity(c,v)
return flux
def velocity(c,v):
# vel = v*(1-c) # simple segregation model
R = 2.5 # size ratio
vel = v*(1-1/(c + (1-c)*R))
return vel
def KT(c,v,dx,dt,ax):
cx = limiter((c - roll(c,1,ax))/dx, (roll(c,-1,ax) - c)/dx)
# cx = minmod2((roll(c,-1,ax) - c)/dx,
# (roll(c,-1,ax) - roll(c,1,ax))/(2*dx),
# (c - roll(c,1,ax))/dx
# )
cplusleft = c - dx/2*cx
cminusright = c + dx/2*cx
cplusright = roll(cplusleft,-1,ax)
cminusleft = roll(cminusright,1,ax)
vleft = roll(v,1,ax)
vright = roll(v,-1,ax)
aright = np.maximum(np.abs(velocity(cminusright,vright)),np.abs(velocity(cplusright,vright)))
aleft = np.maximum(np.abs(velocity( cminusleft, vleft)),np.abs(velocity( cplusleft, vleft)))
RHS = -( flux( cplusright,vright) +
flux(cminusright,vright) -
flux( cplusleft, vleft) -
flux( cminusleft, vleft) -
( aright*(cplusright - cminusright) -
aleft*(cplusleft - cminusleft) )
)/(2*dx)
return RHS
def pad(c,v):
# constant default value is zero
C = np.pad(c,1,mode='edge') # this doesnt appear to matter at all
V = np.pad(v,1,mode='constant') # zero velocity outside of domain - this sets up the no flux BC!
# print(V[-1])
return C,V
def BC(c,v): # apply no flux boundaries ## <BC TAG>
# KIND OF WORKS
# c[:padwidth] = 0
# c[-padwidth:] = 1
# v[:padwidth] = 0
# v[-padwidth:] = 0
# c[1] = div0(c[2]*v[2],v[1])
# c[0] = div0(c[1]*v[1],v[0])
# c[-2] = div0(c[-3]*v[-3],v[-2])
# c[-1] = div0(c[-2]*v[-2],v[-1])
# v[1] = -div0(c[2]*v[2],c[1])
# v[0] = 0#-div0(c[1]*v[1],c[0])
# v[-2] = -div0(c[-3]*v[-3],c[-2])
# v[-1] = 0#-div0(c[-2]*v[-2],c[-1])
# print(c[-1,0])
# c[0] = c[2]
# c[-1] = c[-3]
# v[0] = 0
# v[1] = 0
# v[2] = 0
# v[3] = 0
# v[-2] = 0
# v[-1] = 0
# v[1] = -div0(c[2]*v[2],c[1])
# v[0] = -div0(c[1]*v[1],c[0])
# v[-2] = -div0(c[-3]*v[-3],c[-2])
# v[-1] = -div0(c[-2]*v[-2],c[-1])
# c[-1] = 1
# c[0] = c[1] = 0
# c[-1] = c[-2] = 1
return c, v
def RK4(C,V,dx,dt,ax):
c,v = pad(C,V)
k1 = KT(c, v,dx,dt,ax)
k2 = KT(c+dt/2*k1,v,dx,dt,ax)
k3 = KT(c+dt/2*k2,v,dx,dt,ax)
k4 = KT(c+dt*k3, v,dx,dt,ax)
dc = dt/6*(k1+2*k2+2*k3+k4)
return dc[1:-1,1:-1]
def RK3(C,V,dx,dt,ax):
c,v = pad(C,V)
c1 = c + dt*KT( c,v,dx,dt,ax)
c2 = 0.75*c + 0.25*(c1 + dt*KT(c1,v,dx,dt,ax))
c3 = 0.33333*c + 0.66667*(c2 + dt*KT(c2,v,dx,dt,ax))
return c3[1:-1,1:-1]
def Euler(c,v,dx,dt,ax):
c,v = pad(c,v)
dc = dt*KT(c,v,dx,dt,ax)
return dc[1:-1,1:-1]
def diffusion(c,D,dx,dt,ax):
dDc_dy = np.gradient(D*c,dx,axis=ax)
# dc_dy[0] = 0
# dc_dy[-1] = 0
d2Dc_dy2 = np.gradient(dDc_dy,dx,axis=ax)
return c + dt*d2Dc_dy2
def main():
nx = 1
ny = 201
L = 1.0
# c = 0.5*np.ones([ny,nx])
c = np.zeros([ny,nx])
c[ny//4:3*ny//4] = 1.0
v = -np.ones_like(c)
CFL = 0.025
dx = dy = L/(np.maximum(nx,ny)-1)
dt = CFL*dx*4/(np.max(np.abs(v)))
t_max = 2.0
nt = np.int(t_max/dt)
D = 5e-3
# nt = 10
# c_old = c.copy()
for i in range(nt):
u = np.zeros_like(v)
# c_old = c.copy()
# c += Euler(c,v,dy,dt,ax=0)
# c += RK4(c_pad.T,v_pad.T,dx,dt,ax=0).T
c = RK3(c,v,dy,dt,ax=0)
c = diffusion(c,D,dy,dt,ax=0)
if i%(nt//10)==0:
plt.plot(c[:,0])
print(' t = ' + str(i*dt) + ' ', end='\r')
plt.show()
if __name__=='__main__':
main()
print('\nAll done.')
| benjym/poly-mpm | new_integrator.py | new_integrator.py | py | 5,742 | python | en | code | 13 | github-code | 13 |
38715618003 | # Program to detect multiple alternatives in a class
import re
def parse(text, components):
print('RE: TEXT', text)
# Convert all component names to lower case
for i in range(len(components)):
components[i] = components[i].lower()
# Stores the final output in string format
output = ""
# Converting text to lower case
text = text.lower()
# Components - Specify the list of components on flight
# components = ['engine','gearbox','wing','brake']
print("Components: ", components)
# Regex to split the sentence into one or more sentences, questions etc
split_pat = r".*?[\.\?\,]"
sentences = re.findall(split_pat, text)
print('Sentences: ', sentences)
print(sentences)
# Generating a string representing the or form of all the components available
component_alt = "("
for component in components:
component_alt = component_alt + component + "|"
component_alt = component_alt[0:len(component_alt) - 1] + ")"
print("Component Alternatives:", component_alt)
# Generating the regex pattern and regex object for component wise matching
match_pat = component_alt
match_rex = re.compile(match_pat, flags=re.IGNORECASE)
# Pronount pat
pronoun_alt = ".*(It|they|them).*"
pronoun_pat = pronoun_alt
pronoun_rex = re.compile(pronoun_pat, flags=re.IGNORECASE)
# Initialize component descripition dictionary to store the description of each component
component_desc = {}
for component in components:
component_desc[component] = []
# Last appended list will store the list of components to which descritions were appended to for the
# previous sentence
last_appended = []
# Split and append
index = 0
for sentence in sentences:
components_matched = match_rex.findall(sentence)
if len(components_matched) != 0:
for component in components_matched:
component_desc[component].append({'index': index, 'sentence': sentence})
elif len(last_appended) != 0 and pronoun_rex.match(sentence):
for component in last_appended:
last_desc_idx = len(component_desc[component]) - 1
new_desc = component_desc[component][last_desc_idx]['sentence'] + sentence
component_desc[component][last_desc_idx]['sentence'] = new_desc
last_appended = components_matched
index +=1
# print('Component descripitions: ', component_desc)
# print("\n-----------------------------")
# print('System Report (By Components)')
# for component in components:
# print('Component:',component)
# if component_desc[component] == []:
# print('-> No description available')
# else:
# for desc in component_desc[component]:
# print('->', desc)
# print("-----------------------------")
"""
output += "-----------------------------"
output += '\nSystem Report (By Components)'
for component in components:
output += '\n\nComponent: ' + component
if not component_desc[component]:
output += '\n-> No description available'
else:
for desc in component_desc[component]:
output += '\n-> ' + desc
output += "\n-----------------------------"
"""
return component_desc
| vyshnavkarunonYT/ai-based-flight-debriefing | src/utils/regparser.py | regparser.py | py | 3,471 | python | en | code | 0 | github-code | 13 |
39068911930 | from django.conf.urls import patterns, include, url
from .views import index, db
urlpatterns = patterns('',
url(r'^db/(\w+)/', db, name='translate_db'),
url(r'^pofile/$', 'rosetta.views.home', name='rosetta-home'),
url(r'^$', index, name='translate_index'),
url(r'^download/$', 'rosetta.views.download_file', name='rosetta-download-file'),
url(r'^select/(?P<langid>[\w\-]+)/(?P<idx>\d+)/$','rosetta.views.lang_sel', name='rosetta-language-selection'),
)
| TechnoServe/SMSBookkeeping | tns_glass/translate/urls.py | urls.py | py | 476 | python | en | code | 0 | github-code | 13 |
24600362260 | from spack import *
import os
class Castep(MakefilePackage):
"""
CASTEP is a leading code for calculating the properties of materials from
first principles.
"""
homepage = "http://www.castep.org"
url = "file://%s/CASTEP-21.11.tar.gz" % os.getcwd()
licensed = True
version('21.11', sha256='d909936a51dd3dff7a0847c2597175b05c8d0018d5afe416737499408914728f')
depends_on('intel-mpi')
depends_on('intel-mkl')
depends_on('fftw-api@3')
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', self.prefix)
def build(self, spec, prefix):
with working_dir(self.build_directory):
make('ROOTDIR={}'.format(self.build_directory),
'FFT=mkl',
'FFTLIBDIR={}'.format(os.environ['MKLROOT']),
'MATHLIBS=mkl',
'MATHLIBDIR={}'.format(os.environ['MKLROOT']),
'ARCH=linux_x86_64_ifort',
'COMMS_ARCH=mpi'
)
def install(self, spec, prefix):
with working_dir(self.build_directory):
make('ROOTDIR={}'.format(self.build_directory),
'INSTALL_DIR={}'.format(prefix),
'install')
| epfl-scitas/spack-repo-externals | packages/castep/package.py | package.py | py | 1,235 | python | en | code | 3 | github-code | 13 |
27617479520 | from os import listdir
from os.path import join
from werkzeug.utils import secure_filename
from flask import jsonify
from routes.detect_image import detect_image
import json
#--Methods--
def listmodels():
models_list = [m for m in listdir('./static/models')]
response = jsonify(models_list)
return response
def save_image(file):
fname = file.filename
save_dir = join(_app.config['UPLOAD_FOLDER'], secure_filename(fname))
file.save(save_dir)
return save_dir
def detect():
global _req
global _app
img_dir = save_image(_req.files['file'])
param = json.loads(_req.form['param'])
response = jsonify(detect_image(param['model'], float(param['consistency']), float(param['uniqueness']), img_dir))
return response
#--Routes--
routes = {
'models': listmodels,
'detect': detect
}
#--app & request object--
_app = None
_req = None
def app_routes(url, app, req):
global routes
global _req
global _app
_req = req
_app = app
return routes[url]() | gianmartind/Skripsi-6181801015 | Lampiran/app.py | app.py | py | 1,025 | python | en | code | 0 | github-code | 13 |
33038329966 | '''
N๊ฐ์ ์ซ์๋ก ์ด๋ฃจ์ด์ง ์์ด
๋งจ ์์ ์ซ์๋ฅผ ๋งจ๋ค๋ก ๋ณด๋ด๋ ์์
์ M๋ฒํ์ ๋ ์์ด์ ๋งจ ์์ ์๋ ์ซ์๋?
'''
def order(lst, M):
for _ in range(M):
lst.append(lst.pop(0))
return lst[0]
import sys
sys.stdin = open('input.txt', 'r')
T=int(input())
for test_case in range(1,T+1):
N, M = map(int, input().split())
lst = list(map(int, input().split()))
print(f"#{test_case} {order(lst, M)}")
| Seobway23/Laptop | Algorithm/february_class/0220/ํ์ .py | ํ์ .py | py | 467 | python | ko | code | 0 | github-code | 13 |
38058590890 | """Aliqout Number The aliquot of a number is defined as the sum of the proper divisors of a number.
Example - 1: aliquot of 15 = 1 + 3 + 5 = 9
Example - 2: aliquot of 30 = 1 + 2 + 3 + 5 + 6 + 10 + 15 = 42
Note : aliquot of any prime is 1.
Write a function that determines the aliquot of a given number. """
def aliquot_number(n: int) -> int:
# Write your code here
sum = 0
if(n<=0):
return 1
for i in range(1,int(n/2)+1):
if(n%i==0):
sum+=i
return sum
aliquot_number(1)
| unitinguncle/PythonPrograms | Aliqout Number.py | Aliqout Number.py | py | 526 | python | en | code | 0 | github-code | 13 |
21578257551 | import torch
from torch import nn
from torch.nn.parameter import Parameter
class ECALayer(nn.Module):
"""Constructs a ECA module.
Args:
channel: Number of channels of the input feature map
k_size: Adaptive selection of kernel size
"""
def __init__(self, channel, k_size=3):
super(ECALayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
# feature descriptor on the global spatial information
y = self.avg_pool(x)
# Two different branches of ECA module
y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
# Multi-scale information fusion
y = self.sigmoid(y)
return x * y.expand_as(x)
class GCTLayer(nn.Module):
def __init__(self, num_channels, epsilon=1e-5, mode='l2', after_relu=False):
super(GCTLayer, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if self.mode == 'l2':
embedding = (x.pow(2).sum((2,3), keepdim=True) + self.epsilon).pow(0.5) * self.alpha
norm = self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) + self.epsilon).pow(0.5)
elif self.mode == 'l1':
if not self.after_relu:
_x = torch.abs(x)
else:
_x = x
embedding = _x.sum((2,3), keepdim=True) * self.alpha
norm = self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=True) + self.epsilon)
else:
print('Unknown mode!')
sys.exit()
gate = 1. + torch.tanh(embedding * norm + self.beta)
return x * gate | cxgincsu/SemanticGuidedHumanMatting | model/attention.py | attention.py | py | 2,071 | python | en | code | 160 | github-code | 13 |
7665157825 | #written by Aceroni
#aceroni.com
import asyncio
import os
import discord
from discord.ext import commands
TOKEN = os.getenv('DISCORD_TOKEN')
intents = discord.Intents.all()
intents.members = True
intents.presences = True
bot = commands.Bot(command_prefix="!", intents=intents)
class Ctf(commands.Cog):
def __init__(self,b):
self.bot = b
self.player_states = {}
self.GAME_STATES = {
"UNINITIATED": {
"question":"Hello, {name} would you like to play a game?",
"answers":[],
"correct_answer":"YES",
'next_state':"QUESTION_1",
"incorrect_response":"aww schucks let me know if you want to play",
"correct_response":"awesome! let me know when you are ready for the next question by using the !ctf command"
},
"QUESTION_1":{
"question":"Jung qnl jnf gur svefg OFvqrfCQK rirag?",
"answers":["Sevqnl, Bpgbore 7, 2011","Zbaqnl, Whyl 4, 2011","Sevqnl, Abirzore 9, 2012","Fngheqnl, Frcgrzore 28, 2013"],
"correct_answer":"FRIDAY, OCTOBER 7, 2011",
"next_state":"QUESTION_2",
"incorrect_response":"I am sorry that is incorrect, use !ctf to try again",
"correct_response":"Good job! type !ctf to get the next question"
},
"QUESTION_2":{
"question":"23-8-1-20 23-1-19 20-8-5 20-9-20-12-5 15-6 20-8-5 20-1-12-11 7-9-22-5-14 2-25 7-5-14-5 11-9-13 1-20 20-8-5 6-9-18-19-20 2-19-9-4-5-19-16-4-24 5-22-5-14-20?",
"answers":["3-15-22-5-18-20 3-1-12-12-9-14-7: 19-5-3-18-5-20-19 15-6 19-15-3-9-1-12 5-14-7-9-14-5-5-18-9-14-7 18-5-22-5-1-12-5-4!","12-5-22-5-12 21-16: 8-15-23 19-5-3-21-18-9-20-25 9-19-14โ20 12-9-11-5 16-12-1-25-9-14-7 1 22-9-4-5-15 7-1-13-5","23-8-25 9-14-6-15-19-5-3 9-19 8-5-12-16-9-14-7 9-20 6-1-9-12โฆ 1-14-4 8-15-23 20-15 6-9-24 9-20","15-16-5-14-9-14-7 18-5-13-1-18-11-19"],
"correct_answer":"WHY INFOSEC IS HELPING IT FAILโฆ AND HOW TO FIX IT",
"next_state":"QUESTION_3",
"incorrect_response":"I am sorry that is incorrect, use !ctf to try again",
"correct_response":"Good job! type !ctf to get the next question"
},
"QUESTION_3":{
"question":".-- .... . .-. . / -.. .. -.. / - .... . / ..--- ----- ..--- ----- / -... ... .. -.. . ... .--. -.. -..- / . ...- . -. - / - .- -.- . / .--. .-.. .- -.-. . ..--..",
"answers":["... -- .. - .... / -- . -- --- .-. .. .- .-.. / ... - ..- -.. . -. - / ..- -. .. --- -.","... -- .. - .... / -- . -- --- .-. .. .- .-.. / ... - ..- -.. . -. - / ..- -. .. --- -.","--- .-. . --. --- -. / -.-. --- -. ...- . -. - .. --- -. / -.-. . -. - . .-.",".--- --- . / ..-. .. - --.. .----. ... / --. .- .-. .- --. ."],
"correct_answer":"ONLINE",
"next_state":"QUESTION_4",
"incorrect_response":"I am sorry that is incorrect, use !ctf to try again",
"correct_response":"Good job! type !ctf to get the next question"
},
"QUESTION_4": {
"question":"9 44 666 0 444 7777 0 8 44 33 0 222 44 2 444 777 6 2 66 0 666 333 0 8 44 33 0 222 333 7 0 777 33 888 444 33 9 0 22 666 2 777 3 0 333 666 777 0 22 7777 444 3 33 7777 7 3 99 0 8 44 444 7777 0 999 33 2 777?",
"answers":["8 666 7 44 33 777 0 8 444 6 9999 33 66","6 2 4 4 444 33 0 5 2 88 777 33 4 88 444","S6 2 777 444 666 66 0 6 2 777 7777 222 44 2 555 33 55","6 444 222 44 2 33 555 0 555 33 444 22 666 9 444 8 9999"],
"correct_answer":"MICHAEL LEIBOWITZ",
"next_state":"FINISHED",
"incorrect_response":"I am sorry that is incorrect, use !ctf to try again",
"correct_response":"Good job! type !ctf to get your prize"
},
"FINISHED":{
"flag":"BSidesPDX{s0m3t1m3s_4_C7F_f33ls_l1k3_4_7r1v14l_pur5u17}"
}
}
async def run_quiz(self,ctx,state):
if state == "FINISHED":
await ctx.send(self.GAME_STATES[state]["flag"])
return
channel = ctx.channel
await ctx.send(self.GAME_STATES[state]["question"].format(name = ctx.author.name) + "\n".join(self.GAME_STATES[state]["answers"]))
def check(m):
return m.channel == channel
msg = await self.bot.wait_for('message', check = check)
if msg.content.upper() == self.GAME_STATES[state]["correct_answer"]:
self.player_states[ctx.author.id] = self.GAME_STATES[state]["next_state"]
await ctx.channel.send(self.GAME_STATES[state]["correct_response"])
else:
await ctx.channel.send(self.GAME_STATES[state]["incorrect_response"])
@commands.Cog.listener()
async def on_message(self,message):
if "FLAG" in message.content.upper():
await message.channel.send("Cmon, you didn't think it would be that easy did you?")
@commands.command(name = "ctf")
async def cmd_ctf(self,ctx):
"""
Starts the quiz to receive the flag for BSidesPDX PDX CTF . Must be used in a direct message with the 0xBill the bot.
"""
if ctx.author.bot == True:
return
if not isinstance(ctx.channel,discord.DMChannel):
return
if ctx.author.id not in self.player_states:
self.player_states[ctx.author.id] = "UNINITIATED"
await self.run_quiz(ctx,self.player_states[ctx.author.id])
@bot.event
async def on_ready():
print(f'{bot.user.name} has connected to Discord!')
async def setup(bot):
await bot.add_cog(Ctf(bot))
if __name__ == "__main__":
asyncio.run(setup(bot))
bot.run(TOKEN)
| BSidesPDX/CTF-2022 | misc/100-discordia/src/bot.py | bot.py | py | 5,811 | python | en | code | 0 | github-code | 13 |
25102934966 | from sense_hat import SenseHat
sense = SenseHat()
from time import sleep
b=(0,0,0)
w=(255,255,255)
r=(255,0,0)
g=(0,255,0)
x=2
y=2
game_over = 0
board = [
[r,r,r,r,r,r,r,r],
[r,b,b,b,b,b,b,r],
[b,b,b,b,g,r,b,r],
[b,r,r,b,r,r,b,r],
[b,b,b,b,b,b,b,b],
[b,r,b,r,r,b,b,b],
[b,b,b,r,b,b,b,r],
[r,r,b,b,b,r,r,r] ]
def check_wall(x,y,new_x,new_y):
if board[new_y][new_x] != r:
return new_x, new_y
elif board[new_y][x] != r:
return x, new_y
elif board[y][new_x] != r:
return new_x, y
else:
return x,y
# This function checks the pitch value and the x coordinate
# to determine whether to move the marble in the x-direction.
# Similarly, it checks the roll value and y coordinate to
# determine whether to move the marble in the y-direction.
def move_marble(pitch,roll,x,y):
new_x = x #assume no change to start with
new_y = y #assume no change to start with
if 1 < pitch < 179 and x != 0:
new_x -= 1 # move left
elif 359 > pitch > 179 and x != 7:
new_x += 1 # move right
if 1 < roll < 179 and y != 7:
new_y += 1 # move up
elif 359 > roll > 179 and y != 0:
new_y -= 1 # move down
new_x, new_y = check_wall(x,y,new_x,new_y)
return new_x, new_y
while not game_over:
pitch = sense.get_orientation()['pitch']
roll = sense.get_orientation()['roll']
x,y = move_marble(pitch,roll,x,y)
board[y][x] = w
board_sum = sum(board,[])
sense.set_pixels(board_sum)
if g not in board_sum:
game_over = 1
else:
pass
sleep(0.05)
board[y][x] = b
sense.show_message('u winner')
print("youre winner")
| bleow/CZ1103-IntroToCS_Python | lab6.py | lab6.py | py | 1,575 | python | en | code | 0 | github-code | 13 |
36941751198 | # Caluculate the different ways of climbing the stairs, assuming this person can only climb 1 or 2 steps at a time
# Solved by using recursion
class Solution:
def climbStairs(self, numStairs):
return self.fib(numStairs + 1)
def fib(self, n):
fib = []
fib.insert(0, 0)
fib.insert(1, 1)
for i in range(2, n + 1):
fib.insert(i, fib[i - 1] + fib[i - 2])
return fib[n]
def climbStairsMultiple(self, numStairs, numSteps):
return self.fib_multiple(numStairs + 1, numSteps)
def fib_multiple(self, n, m):
result = 0
for i in range(m):
if n <= 1:
return n
result = result + self.fib_multiple(n-1, m) + self.fib_multiple(n-2, m)
return result
print(Solution().climbStairsMultiple(4,2))
print | amandazhuyilan/Breakfast-Burrito | Problems-and-Solutions/python/climbingStairs.py | climbingStairs.py | py | 726 | python | en | code | 3 | github-code | 13 |
69806485777 | import threading
import ELYZA_res
#import LINE_res
#import rinna_res
#import rinna_gptq_res
import talk
import time
from datetime import datetime, timedelta
### for speach recognition
import speech_recognition as sr
### for julius
import socket
import re
import vosk_streaming
SPEECH_RECOGNITION_GOOGLE = 0
SPEECH_RECOGNITION_JULIUS = 1
SPEECH_RECOGNITION_VOSK = 2
class chat():
def __init__(self, mode):
self.mode = mode
self.started = threading.Event()
self.alive = True
self.chat_time = time.time()
if self.mode == SPEECH_RECOGNITION_GOOGLE:
### for speach recognition
self.r = sr.Recognizer()
self.mic = sr.Microphone(device_index = 0)
elif self.mode == SPEECH_RECOGNITION_JULIUS:
### for julius
# ใญใผใซใซ็ฐๅขใฎIPใขใใฌใน
self.host = '127.0.0.1'
# Juliusใจใฎ้ไฟก็จใใผใ็ชๅท
self.port = 10500
# ๆญฃ่ฆ่กจ็พใง่ช่ญใใใ่จ่ใๆฝๅบ
self.extracted_word = re.compile('WORD="([^"]+)"')
# Juliusใซใฝใฑใใ้ไฟกใงๆฅ็ถ
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect((self.host, self.port))
time.sleep(2)
elif self.mode == SPEECH_RECOGNITION_VOSK:
### for vosk
self.vosk_asr =vosk_streaming.init()
self.user_message = ''
self.response = ''
self.before = ''
self.data = ''
self.thread = threading.Thread(target=self.chat_sentence_thread)
self.thread.start()
def __del__(self):
self.kill()
def begin(self):
print("begin")
self.chat_time = time.time()
self.before = ''
self.started.set()
def end(self):
self.started.clear()
print("\nend")
def kill(self):
self.started.set()
self.alive = False
self.thread.join()
if self.mode == SPEECH_RECOGNITION_JULIUS:
### for julius
print('PROCESS END')
self.client.send("DIE".encode('shift_jis'))
self.client.close()
def get_chat_time(self):
return self.chat_time
def llm_chat(self):
self.response = 'ๅฃฐใ่ใๅใใพใใใงใใใผ'
if self.mode == SPEECH_RECOGNITION_GOOGLE:
### for speach recognition
with self.mic as source:
self.r.adjust_for_ambient_noise(source) #้้ณๅฏพ็ญ
audio = self.r.listen(source)
try:
self.data = ""
t1 = time.time()
if self.mode == SPEECH_RECOGNITION_GOOGLE:
### for speach recognition
self.user_message = self.r.recognize_google(audio, language='ja-JP')
if self.mode == SPEECH_RECOGNITION_JULIUS:
### for julius
while (self.data.find("</RECOGOUT>\n.") == -1):
self.data += str(self.client.recv(1024).decode('shift_jis'))
# ๅ่ชใๆฝๅบ
self.user_message = ""
for word in filter(bool, self.extracted_word.findall(self.data)):
self.user_message += word
if self.mode == SPEECH_RECOGNITION_VOSK:
self.user_message = vosk_streaming.get_message(self.vosk_asr)
t2 = time.time()
print(self.user_message)
self.response = ELYZA_res.elyza_response(self.user_message)
# self.response = LINE_res.line_response(user_message)
# self.response = rinna_res.rinnna_response(user_message)
# self.response = rinna_gptq_res.rinna_gptq_response(user_message, self.before)
t3 = time.time()
self.before = self.response
print('talk recognize:', t2 - t1)
print('response create:', t3 - t2)
except:
self.response = 'ใใฟใพใใใใใใใกใฉใใญใใใใพใใผ'
return self.response
def chat_sentence_thread(self):
self.started.wait()
while self.alive:
talk.read_text(self.llm_chat())
self.started.wait()
self.chat_time = time.time()
def get_user_message(self):
return self.user_message
def get_response(self):
return self.response
if __name__ == '__main__':
test = chat(SPEECH_RECOGNITION_VOSK)
test.begin()
while True:
time.sleep(1)
if(time.time() - test.get_chat_time()) > 60:
test.end()
break
test.kill()
| fernangit/win_py_Greeting | LLM_chat.py | LLM_chat.py | py | 4,630 | python | en | code | 0 | github-code | 13 |
10176748855 | import sys
#recipe = { "ingredients": [], "meal": "", "prep_time": }
Sandwich = { "ingredients" : ["ham", "bread", "cheese", "tomatoes"], "meal" : "lunch", "prep_time" : 10}
Cake = { "ingredients" : ["flour", "sugar", "eggs"], "meal" : "dessert", "prep_time" : 60}
Salad = { "ingredients" : ["avocado", "arugula", "tomatoes", "spinach"], "meal" : "lunch", "prep_time" : 15}
cookbook = {"Sandwich" : Sandwich, "Cake" : Cake, "Salad" : Salad}
def print_recipe_names():
for recipe_name in cookbook.keys():
print(recipe_name)
def print_recipe_details(recipe_name):
if recipe_name in cookbook:
print("Recipe for", recipe_name, ":")
print(" Ingredients list:", cookbook[recipe_name]["ingredients"])
print(" To be eaten for", cookbook[recipe_name]["meal"])
print(" Takes", cookbook[recipe_name]["prep_time"],"minutes of cooking.")
else:
print("Recipe for", recipe_name, "doesn't exist in cookbook!")
def delete_recipe(recipe_name):
if recipe_name in cookbook:
del cookbook[recipe_name]
print(recipe_name, "has been deleted from the cookbook.")
else:
print("Recipe for", recipe_name, "doesn't exist in cookbook!")
def is_valid_number(value):
if not value.strip():
print("The value can't be empty.")
return False
try:
int(value)
return True
except ValueError:
pass
try:
float(value)
return True
except ValueError:
pass
if '.' in value:
return False
return False
def add_recipe():
new_recipe = {"ingredients" : [], "meal" : None, "prep_time" : None}
print("Enter a name:")
while True:
recipe_name = input()
if recipe_name.strip() == "":
print("The value can't be empty.")
else:
recipe_name = recipe_name.strip()
break
print("Enter ingredients:")
while True:
ingredient = input()
if ingredient == "":
if len(new_recipe["ingredients"]) != 0:
break
elif ingredient.strip() == "":
print("The value can't be empty.")
elif ingredient in new_recipe["ingredients"]:
print(ingredient, "already exists in the recipe. Please enter a differenc ingredient.")
else:
new_recipe["ingredients"].append(ingredient.strip())
print("Enter a meal type:")
while True:
meal = input()
if meal.strip() == "":
print("The value can't be empty.")
else:
new_recipe["meal"] = meal.strip()
break
print("Enter a preparation time:")
while True:
prep_time = input()
if is_valid_number(prep_time) == True:
new_recipe["prep_time"] = prep_time
break
else:
print("The value should be a number.")
cookbook[recipe_name] = new_recipe
def print_option_list():
print("List of available option:")
print(" 1: Add a recipe")
print(" 2: Delete a recipe")
print(" 3: Print a recipe")
print(" 4: Print the cookbook")
print(" 5: Quit")
def select_one_option(option):
if option == 1:
add_recipe()
elif option == 2:
print("Please enter a recipe name to delete:")
while True:
recipe_name = input()
if recipe_name.strip() == "":
print("The value can't be empty.")
else:
recipe_name = recipe_name.strip()
break
delete_recipe(recipe_name)
elif option == 3:
print("Please enter a recipe name to get its details:")
while True:
recipe_name = input()
if recipe_name.strip() == "":
print("The value can't be empty.")
else:
recipe_name = recipe_name.strip()
break
print_recipe_details(recipe_name)
elif option == 4:
print_recipe_names()
elif option == 5:
print("Cookbook closed. Goodbye !")
sys.exit(0)
def check_prompt_input():
option_list = range(1, 6)
num = input()
if num.isdigit():
option = int(num)
if option in option_list:
select_one_option(option)
else:
print("Sorry, this option does not exist.")
print_option_list()
else:
print("Sorry, this option does not exist.")
print_option_list()
if __name__ == "__main__":
print("Welcome to the Python Cookbook !")
print_option_list()
while True:
print("\nPlease select an option:")
check_prompt_input()
| jmcheon/python_module | 00/ex06/recipe.py | recipe.py | py | 3,949 | python | en | code | 0 | github-code | 13 |
24617965622 | """
Test the redis interface for user and docs handling.
"""
import pytest
import os
from lib.data import Data
from lib.ebook import write_epub
config = {
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'REDIS_DATABASE': 1, # <-- TESTING
'ADMIN_USER': 'admin',
'TIME_ZONE': 'Australia/Sydney',
}
data = Data(config, strict=True)
@pytest.mark.integration
def test_write_epub():
"""
Create a hash, find its key, delete it.
"""
file_path = '/tmp/eukras-help.epub'
if os.path.exists(file_path):
os.remove(file_path)
write_epub('eukras', 'help', file_path)
assert os.path.exists(file_path)
| eukras/article-wiki | lib/test/test_ebook.py | test_ebook.py | py | 649 | python | en | code | 0 | github-code | 13 |
17814465955 | import re
value = "3113322113"
def describe(match):
return str(len(match[0])) + match[1]
def look_and_say(inp):
sections = re.findall(r"((.)\2*)", inp)
return "".join(map(describe, sections))
for x in range(0, 40):
value = look_and_say(value)
print(len(value))
for x in range(0, 10):
value = look_and_say(value)
print(len(value))
| QuarkNerd/adventOfCode | 2015/10.py | 10.py | py | 360 | python | en | code | 1 | github-code | 13 |
26589609195 | #!/usr/bin/env python
"""Script to run before releasing a new version."""
import argparse
import os
import subprocess
from rich.progress import Progress
from project_stats import stats
PROJECT_NAME = 'nori_ui'
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_DIR = os.path.join(ROOT_DIR, PROJECT_NAME)
DOCS_DIR = os.path.join(ROOT_DIR, 'docs')
STATS_DIR = os.path.join(ROOT_DIR, 'project_stats')
GITHUB_PATH = f'https://github.com/amorphousWaste/{PROJECT_NAME}'
def get_args() -> dict:
"""Get the args from argparse.
Returns:
args (dict): Arguments from argparse.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--skipdocs',
help='Run the pre-release script without generating docs.',
action='store_true',
)
parser.add_argument(
'--skipstats',
help='Run the pre-release script without generating stats.',
action='store_true',
)
parser.add_argument(
'--skipblack',
help='Run the pre-release script without black linting.',
action='store_true',
)
args = parser.parse_args()
return vars(args)
def generate_docs() -> None:
"""Generate pdoc documentation."""
os.environ['PYTHONPATH'] = PROJECT_DIR
cmd = [
'pdoc',
'--template-directory',
os.path.join(DOCS_DIR, 'pdoc'),
'--output-directory',
DOCS_DIR,
'--logo',
f'"{GITHUB_PATH}/blob/main/images/icon_small.png"',
'--logo-link',
f'"{GITHUB_PATH}"',
PROJECT_DIR,
]
with Progress(transient=True) as progress:
task = progress.add_task('Running pdoc...', total=100)
progress.update(task, advance=1)
# Call pdoc via subprocess.
subprocess.check_output(cmd, stderr=subprocess.STDOUT)
progress.update(task, advance=100)
progress.stop()
def generate_stats() -> None:
"""Generate the bot stats."""
bot_stats = stats.BotStats()
bot_stats.generate_stats_report()
def run_black() -> None:
"""Run black formatting check."""
cmd = [
'black',
'--skip-string-normalization',
'--diff',
'--color',
'--line-length',
'79',
'--target-version',
'py39',
ROOT_DIR,
]
print('Running black...')
# Call black via subprocess.
output = subprocess.check_output(cmd)
print(str(output.decode('utf-8')))
def run_prerelease() -> None:
"""Run the pre-release code."""
# Get the arguments.
args = get_args()
if not args.get('skipdocs', False):
generate_docs()
if not args.get('skipstats', False):
generate_stats()
if not args.get('skipblack', False):
run_black()
if __name__ == '__main__':
run_prerelease()
| amorphousWaste/nori_ui | prerelease.py | prerelease.py | py | 2,811 | python | en | code | 1 | github-code | 13 |
73389309778 | import logging
import sys
import os
from rubikscube import Cube, HalfTurnMetric
import unittest
import timeit
class TestBenchMarkEnv(unittest.TestCase):
def setUp(self):
self.trials = int(1e7)
self.log = logging.getLogger('BenchLogger')
def test_turn_repr_solved(self):
t_turn_repr_solve = timeit.timeit(
'cube.turn(0);cube.representation();cube.solved()',
setup='from rubikscube import Cube;cube=Cube.cube_htm()',
number=self.trials)
self.log.debug(
f"time -- turn + repr + solved ::: {t_turn_repr_solve}")
def test_turn_repr(self):
t_turn_repr = timeit.timeit(
'cube.turn(0);cube.representation()',
setup='from rubikscube import Cube;cube=Cube.cube_htm()',
number=self.trials)
self.log.debug(f"time -- turn + repr ::: {t_turn_repr}")
def test_turn(self):
t_turn = timeit.timeit(
'cube.turn(0)',
setup='from rubikscube import Cube;cube=Cube.cube_htm()',
number=self.trials)
self.log.debug(f"time -- turn ::: {t_turn}")
def test_env(self):
t_env = timeit.timeit(
'env.step(0)',
setup=
"from rubikscube import HalfTurnMetric;from env import CubeEnv;env=CubeEnv('half-turn');env.reset()",
number=self.trials)
self.log.debug(f"time -- env ::: {t_env}")
if __name__ == "__main__":
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
logging.basicConfig(stream=sys.stderr)
logging.getLogger("BenchLogger").setLevel(logging.DEBUG)
unittest.main()
| h4rr9/rcube | train/tests/test_bench.py | test_bench.py | py | 1,657 | python | en | code | 0 | github-code | 13 |
24997465360 | #ะะพะดัะปั gemes
# ะะตะผะพะฝัััะธััะตั ัะพัะดะฐะฝะธะต ะผะพะดัะปั
def ask_yes_no(question):
"""ัะพะฟัะพั ะดะฐ ะธะปะธ ะฝะตั"""
response = None
while response not in ("y", "n"):
response = input(question + ' (y/n)? ').lower()
return response
#
def ask_number(question, low, high):
"""ะัะพัะธั ะฒะตััะธ ัะธัะปะพ ะธะท ะดะธะฐะฟะพะทะพะฝะฐ"""
response = None
while response not in range(low, high + 1):
response = int(input(question))
return response
if __name__ == "__main__":
print("ะั ะทะฐะฟัััะธะปะธ ะผะพะดัะปั games")
input("\n\nะะฐะผะธัะต Enter, ััะพะฑั ะฒัะนัะธ.")
| Timyr486786866745/black-jack | BJ/games.py | games.py | py | 690 | python | ru | code | 0 | github-code | 13 |
3498097744 | from django.shortcuts import render
from .models import RestOpening , Resturant
from rest_framework.decorators import api_view
from datetime import datetime
from django.views.decorators.csrf import csrf_exempt
import re
from .serializers import RestOpeningSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.db.models import Q ,F
# Create your views here.
# Function to convert the date format
def convert24(str1):
# Checking if last two elements of time
# is AM and first two elements are 12
if str1[-2:] == "AM" and str1[:2] == "12":
return "00" + str1[2:-2]
# remove the AM
elif str1[-2:] == "AM":
return str1[:-2]
# Checking if last two elements of time
# is PM and first two elements are 12
elif str1[-2:] == "PM" and str1[:2] == "12":
return str1[:-2]
else:
# add 12 to hours and remove PM
return str(int(str1[:2]) + 12) + str1[2:5]
# Driver Code
def process_time(t_str):
t_str = t_str.lower().replace(" " ,"")
pm_index = t_str.index('m')
pm_str = t_str[pm_index-1:pm_index+1].upper()
data = t_str[:pm_index-1].split(':')
if len(data) == 1 :
hour = f"{int(data[0]):02d}"
minutes = '00'
else:
hour = f"{int(data[0]):02d}"
minutes = f"{int(data[1]):02d}"
time_str = convert24(((':').join([hour, minutes]))+pm_str)
time_object = datetime.strptime(time_str, '%H:%M').time()
return time_object
def parse_datetime(my_date , my_time ):
# get day_idx
try:
int(my_date[0])
my_day_idx = str(datetime.strptime(my_date , "%Y-%M-%d").weekday() )
except :
day_map = ('mon', 'tue' , 'wed' , 'thu' , 'fri' , 'sat' , 'sun')
my_day_idx = day_map.index(my_date[:3].lower())
# get time and check if it's am/pm format
my_time = my_time.lower().replace(" ", "")
if not re.search('m' ,my_time ):
# This should 24-format
data = my_time.split(':')
hour = f"{int(data[0]):02d}"
minutes = f"{int(data[1]):02d}"
q_time = datetime.strptime( (':').join([hour , minutes]), '%H:%M').time()
else:
q_time = process_time(my_time)
return my_day_idx ,q_time
def get_unique(ordered_dicts ):
keys = dict()
for i, value in enumerate(ordered_dicts):
keys[value['Name']] = i
idxs = keys.values()
return [ordered_dicts[i] for i in idxs]
@csrf_exempt
@api_view(["GET"])
def get_available_resturants(request):
q_day , q_time = parse_datetime(request.query_params['date'] , request.query_params['time'])
# query_set = RestOpening.objects.filter(day= q_day, st_time__lte =q_time , end_time__gt=q_time)
list_1 = RestOpening.objects.filter( Q(st_time__lte=F('end_time')), Q(st_time__lte=q_time), end_time__gt=q_time ,day=q_day)
list_2 = RestOpening.objects.filter(Q(st_time__gt=F('end_time')), Q(st_time__lte=q_time) | Q(end_time__gt=q_time) ,day=q_day )
concat_list = list_1 | list_2
serlized = RestOpeningSerializer(concat_list , many = True)
return Response(get_unique(serlized.data) , status = status.HTTP_200_OK)
# class GetResturants(APIView):
# @csrf_exempt
# def get(self, request):
# q_day , q_time = parse_datetime(request.query_params['date'] , request.query_params['time'])
# query_set = RestOpening.objects.filter(day= q_day, st_time__lte =q_time , end_time__gte=q_time)
# list_1 = RestOpening.objects.filter(Q(st_time__lte=F('end_time')), Q(st_time__lte=q_time), end_time__gte=q_time)
# list_2 = RestOpening.objects.filter(Q(st_time__gt=F('end_time')), Q(st_time__lte=q_time) | Q(end_time__gte=q_time))
# concat_list = list_1 | list_2
# serlized = RestOpeningSerializer(concat_list , many = True)
# return Response(serlized.data , status = status.HTTP_200_OK)
| abdullahalsaidi16/resturant_opening_hours | api/views.py | views.py | py | 3,908 | python | en | code | 0 | github-code | 13 |
23723605180 | #!/usr/bin/env python3
import pdb, csv, os
from datetime import datetime
from PaySlip import PaySlip
from CsvFile import CsvFile
if __name__ == "__main__":
src_field_names = ['First Name', 'Last Name', 'Annual Salary', 'Super Rate', 'Payment Start Date']
out_field_names = ['Name', 'Pay Period', 'Gross Income', 'Income Tax', 'Net Income', 'Super']
print("\nStarting to read input CSV file ..........")
staff_info_list = CsvFile(os.path.join(os.path.dirname(__file__), 'input.csv'), src_field_names).read()
pay_slip_list = [PaySlip(*item) for item in staff_info_list]
for item in pay_slip_list: print(" {}".format(item))
print("Completed input CSV file reading..........")
# pdb.set_trace()
print("\nWriting following PaySlips into output CSV file ..........")
CsvFile(os.path.join(os.path.dirname(__file__), 'output.csv'), out_field_names).write(pay_slip_list)
print("Completed output CSV file writing..........................\n")
| iascending/pay_slip | src/myob-exercise.py | myob-exercise.py | py | 989 | python | en | code | 0 | github-code | 13 |
39660717314 | # This code contains various helper functions used to process household survey data with pandas
import pandas as pd
import numpy as np
import time
import h5toDF
import imp
import scipy.stats as stats
import math
def round_add_percent(number):
''' Rounds a floating point number and adds a percent sign '''
if type(number) == str or type(number) == None:
raise ValueError("Not float type, cannot process")
outnumber = str(round(number, 2)) + '%'
return outnumber
def remove_percent(input = str):
''' Removes a percent sign at the end of a string to get a number '''
if input[len(input) - 1] != '%':
raise TypeError("No percent string present")
try:
output = float(input[:len(input) - 1])
return output
except ValueError:
raise TypeError("Woah, " + input + "'s not going to work. I need a string where everything other than the last character could be a floating point number.")
#Functions based on formulas at http://www.nematrian.com/R.aspx?p=WeightedMomentsAndCumulants
def weighted_variance(df_in, col, weights):
wa = weighted_average(df_in, col, weights)
df_in['sp'] = df_in[weights] * (df_in[col] - wa) ** 2
n_out = df_in['sp'].sum() / df_in[weights].sum()
return n_out
def weighted_skew(df_in, col, weights):
wa = weighted_average(df_in, col, weights)
wv = weighted_variance(df_in, col, weights)
df_in['sp'] = df_in[weights] * ((df_in[col] - wa) / (math.sqrt(wv))) ** 3
n_out = df_in['sp'].sum() / df_in[weights].sum()
return n_out
def weighted_kurtosis(df_in, col, weights, excess = True): #Gives the excess kurtosis
wa = weighted_average(df_in, col, weights)
wv = weighted_variance(df_in, col, weights)
df_in['sp'] = df_in[weights] * ((df_in[col] - wa) / (math.sqrt(wv))) ** 4
if excess:
n_out = df_in['sp'].sum() / df_in[weights].sum() - 3
else:
n_out = df_in['sp'].sum() / df_in[weights].sum()
return n_out
def recode_index(df,old_name,new_name): #Recodes index
df[new_name]=df.index
df=df.reset_index()
del df[old_name]
df=df.set_index(new_name)
return df
def min_to_hour(input, base): #Converts minutes since a certain time of the day to hour of the day
timemap = {}
for i in range(0, 24):
if i + base < 24:
for j in range(0, 60):
if i + base < 9:
timemap.update({i * 60 + j: '0' + str(i + base) + ' - 0' + str(i + base + 1)})
elif i + base == 9:
timemap.update({i * 60 + j: '0' + str(i + base) + ' - ' + str(i + base + 1)})
else:
timemap.update({i * 60 + j: str(i + base) + ' - ' + str(i + base + 1)})
else:
for j in range(0, 60):
if i + base - 24 < 9:
timemap.update({i * 60 + j: '0' + str(i + base - 24) + ' - 0' + str(i + base - 23)})
elif i + base - 24 == 9:
timemap.update({i * 60 + j: '0' + str(i + base - 24) + ' - ' + str(i + base - 23)})
else:
timemap.update({i * 60 + j:str(i + base - 24) + ' - ' + str(i + base - 23)})
output = input.map(timemap)
return output
def all_same(items): #Checks if all of the items in a list or list-like object are the same
return all(x == items[0] for x in items)
def to_percent(y, position): #Converts a number to a percent
global found
if found:
# Ignore the passed in position. This has the effect of scaling the default
# tick locations.
s = str(100 * y)
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] == True:
return s + r'$\%$'
else:
return s + '%'
else:
print('No matplotlib')
return 100 * y
def variable_guide(guide_file):
''' loads a categorical variable dictionary as a dataframe. '''
guide = h5toDF.get_guide(guide_file)
return h5toDF.guide_to_dict(guide)
def load_survey_sheet(file_loc, sheetname):
''' load excel worksheet into dataframe, specified by sheetname '''
return pd.io.excel.read_excel(file_loc, sheetname=sheetname) | psrc/travel-studies | 2014/region/summary/scripts/helpers.py | helpers.py | py | 4,203 | python | en | code | 5 | github-code | 13 |
31346876690 | ## program to find result of arithmatic operations
## using user defined functions +, -,*,/,%,**
##
##input : 2 numbers , opration
##output : Result depending on operation
##operation: functions, conditional stmts
def add2(x,y):
print("The sum is",x+y)
def sub2(x,y):
print("The Difference is",x-y)
def mul2(x,y):
print("The product is",x*y)
a=int(input("Enter the first number :"))
b=int(input("Enter the Second number :"))
c=input("Enter the operrator (+, -,*,/,%,**) :")
if (c=="+"):
add2(a,b)
elif(c=="-"):
sub2(a,b)
elif(c=="*"):
mul2(a,b)
| bcshylesh/PythonPrograms | ArithmaticFunction.py | ArithmaticFunction.py | py | 574 | python | en | code | 0 | github-code | 13 |
39476963410 | #!/usr/bin/env python3
from jsread import jsread
from settings import *
import argparse
import sys
sys.path.append("../atp")
from channel import Channel
import socket
import pyinotify
import re
import time
from threading import Thread
class SpeedOrder(Thread):
# TODO : utiliser un mutex sur x et y, et utiliser une condition dans le run
def __init__(self, asserv, delay = DELAY):
super().__init__()
self.asserv = asserv
self.delay = delay
self.x = 0
self.old_x = 0
self.y = 0
self.old_y = 0
self.z = 0
self.old_z = 0
def update(self, x, y, z):
self.x = x
self.y = y
self.z = z
def run(self):
while True:
time.sleep(self.delay)
if self.x != self.old_x or self.y != self.old_y or self.z != self.old_z:
self.old_x = self.x
self.old_y = self.y
self.old_z = self.z
self.send_command(self.x, self.y, self.z)
def send_command(self, _x, _y, _z):
#print(_x, _y, _z)
from math import floor, ceil
#x = (_x * Vmax) / 32767
#y = (_y * Vmax) / 32767
#left = - round((Vmax * (y + x)) / (Vmax + abs(x)))
#right = - round((Vmax * (y - x)) / (Vmax + abs(x)))
v = - round((_y * Vmax) / 32767)
theta = - round((_x * Omax) / 32767)
z = round((_z * Zmax) / 65536 + Zmax / 2)
#print("[%+3d %+3d] (%4d)" %(right, left, z))
print("[%+3d] (%+3d) (%4d)" %(v, theta, z))
self.asserv.speedOmega(v/100.0, theta/100.0, 1, 1, 1, 1)
class Processor:
def __init__(self, host, port):
self.sock = socket.socket()
self.sock.connect((HOST, PORT+5))
self.asserv_file = self.sock.makefile(mode='rw')
self.asserv = Channel(self.asserv_file.buffer,
lambda name, args: name, proto = 'asserv')
self.states = None
self.speed = SpeedOrder(self.asserv)
self.speed.start()
self.sock2 = socket.socket()
self.sock2.connect((HOST, PORT+6))
self.mother_file = self.sock2.makefile(mode='rw')
self.mother = Channel(self.mother_file.buffer,
lambda name, args: name, proto = 'mother')
def event(self, axes, buttons):
#print(axes, buttons)
self.pince = axes[2] < 0
self.speed.update(axes[0], axes[1], axes[2])
if self.states and len(self.states) == len(buttons):
for i in range(len(buttons)):
if self.states[i] == 0 and buttons[i] == 1:
#print("Button %d pressed!" %i)
if self.pince:
if i == 2:
self.mother.sortirPince()
elif i == 3:
self.mother.getNombreVerres()
elif i == 0:
self.mother.chopperVerre()
elif i == 1:
self.mother.lacherVerres()
else:
if i == 2:
self.mother.BougiesOn()
elif i == 3:
self.mother.BougiesOff()
elif i == 0:
self.mother.BougiesHitBot()
elif i == 1:
self.mother.BougiesHitTop()
if i == 4:
self.mother.startAX12()
elif i == 5:
self.mother.FunnyAction()
elif i == 6:
self.mother.stopAX12()
elif i == 7:
self.asserv.stop()
self.states = buttons
class MyHandler(pyinotify.ProcessEvent):
def my_init(self):
self.processor = Processor(host, port)
self.re = re.compile(REGEXP)
def open(self, name, pathname):
if self.re.match(name):
print("Opening %sโฆ" %pathname)
time.sleep(0.1)
jsread(LIB, pathname, self.processor.event)
def process_IN_CREATE(self, event):
self.open(event.name, event.pathname)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Control robot with joystick.', add_help = False)
parser.add_argument('-d', '--dir', dest='devices', help='Dir to watch for new joystick device.')
parser.add_argument('-l', '--lib', dest='lib', help='Lib to use.')
parser.add_argument('-h', '--host', dest='host', help='Connect to the specified host.')
parser.add_argument('-p', '--port', dest='port', help='Base port to compute port to connect.')
args = parser.parse_args()
if args.devices:
devices = args.devices
else:
devices = DEVICES
if args.lib:
lib = args.lib
else:
lib = LIB
if args.host:
host = args.host
else:
host = HOST
if args.port:
port = args.port
else:
port = PORT
wm = pyinotify.WatchManager()
handler = MyHandler()
notifier = pyinotify.Notifier(wm, default_proc_fun=handler)
wm.add_watch(devices, pyinotify.IN_CREATE)
import glob
import os.path
for device in glob.glob(os.path.join(devices, '*')):
handler.open(os.path.basename(device), device)
notifier.loop()
| 7Robot-Soft/jsbot | jsbot.py | jsbot.py | py | 5,385 | python | en | code | 0 | github-code | 13 |
13103657254 | # https://www.acmicpc.net/problem/1744
from sys import stdin
from bisect import bisect_left, bisect_right
input = stdin.readline
N = int(input())
numbers = sorted([int(input()) for _ in range(N)])
ans = 0
has_zero = True if 0 in numbers else False
has_one = True if 1 in numbers else False
first_zero = bisect_left(numbers, 0)
first_one = bisect_left(numbers, 1)
last_one = bisect_right(numbers, 1)
num_ones = last_one - first_one
if first_zero > 0:
if first_zero % 2 == 1:
first_zero -= 1
if not has_zero:
ans += numbers[first_zero]
for i in range(1, first_zero, 2):
ans += numbers[i-1] * numbers[i]
if has_one:
ans += num_ones
if last_one < N:
if (N - last_one) % 2 == 1:
ans += numbers[last_one]
last_one += 1
for i in range(last_one, N-1, 2):
ans += numbers[i] * numbers[i+1]
print(ans)
| olwooz/algorithm-practice | practice/2022_08/220830_Baekjoon_1744_BindNumbers/220830_Baekjoon_1744_BindNumbers.py | 220830_Baekjoon_1744_BindNumbers.py | py | 881 | python | en | code | 0 | github-code | 13 |
43592727973 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Date : 2018-02-11 17:20:15
# @Author : fxb1rd (w1589534127@outlook.com)
# @Link : http://
# @Version : $Id$
#ๅช้็จไบๆๅบๅ่กจ
def binary_search(list,item):
low = 0
high = len(list) - 1
while low<=high:
mid = (low + high)
guess = list[mid]#ๅๅพๅ
็ด
if guess == item:
return mid #่ฟๅๅบๅท
elif guess > item:
high = mid - 1
else:
low = mid + 1
return None #ๆชๆพๅฐๅ
็ด
my_list = [2,4,5,6,8,9]
print(binary_search(my_list,6))
print(binary_search(my_list,10)) | Fxb1rd/Algorithm_learning | Algorithm_diagram/ไบๅๆฅๆพ.py | ไบๅๆฅๆพ.py | py | 625 | python | en | code | 0 | github-code | 13 |
9753665758 | from main import Main
import itertools
# Hyperparameters
BATCH = 32
EPOCH = 100
SEED = 5
VAL_RATIO = 0.1
EARLY_STOP = -1
REPORT = 'best'
DEVICE = 'cuda'
MODEL_PATH = ''
slide_win = [20]
dim = [64]
slide_stride = [1]
out_layer_num = [3]
out_layer_inter_dim = [128]
decay = [0]
topk = [20]
dataset = [
'adasyn_1'
]
combi = itertools.product(slide_win, dim, slide_stride, out_layer_num, out_layer_inter_dim, decay, topk, dataset)
for item in combi:
train_config = {
'batch': BATCH,
'epoch': EPOCH,
'slide_win': item[0],
'dim': item[1],
'slide_stride': item[2],
'comment': item[7],
'seed': SEED,
'out_layer_num': item[3],
'out_layer_inter_dim': item[4],
'decay': item[5],
'val_ratio': VAL_RATIO,
'topk': item[6],
'early_stop': EARLY_STOP,
}
env_config={
'save_path': item[7],
'dataset': item[7],
'report': REPORT,
'device': DEVICE,
'load_model_path': MODEL_PATH,
}
main = Main(train_config, env_config, debug=False)
main.run()
| CKAbundant/Project | GDN/wrapper.py | wrapper.py | py | 1,104 | python | en | code | 0 | github-code | 13 |
5125727784 | # pypy
import sys
N = int(input())
matrixs : list = []
for i in range(N):
matrixs.append(list(map(int, sys.stdin.readline().split())))
dp = [[0]*N for _ in range(N)]
for i in range(1, N):
for j in range(N-i):
if i == 1:
dp[j][j+i] = matrixs[j][0]*matrixs[j][1]*matrixs[j+1][1]
continue
dp[j][j+i] = 2**32
for k in range(j, j+i):
dp[j][j+i] = min(dp[j][j+i],
dp[j][k]+dp[k+1][j+i]+matrixs[j][0]*matrixs[k][1]*matrixs[j+i][1])
print(dp[0][N-1])
| JeongHooon-Lee/ps_python_rust | 2022_4/11049.py | 11049.py | py | 552 | python | en | code | 0 | github-code | 13 |
73875534097 | import torch
import torch.nn as nn
from modules.view import View
class Encoder(nn.Module):
def __init__(self, latent_size: int):
super().__init__()
self.__sequential_blocks = [
nn.Flatten(start_dim=1),
nn.Linear(28 * 28, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, latent_size)
]
self.main = nn.Sequential(*self.__sequential_blocks)
def forward(self, input_images: torch.Tensor):
assert input_images.size(1) == 1 and input_images.size(2) == 28 and input_images.size(3) == 28
encoded_latent = self.main(input_images)
return encoded_latent
class Decoder(nn.Module):
def __init__(self, latent_size: int):
super().__init__()
self.__sequential_blocks = [
nn.Linear(latent_size, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 200),
nn.ReLU(),
nn.Linear(200, 28 * 28),
nn.Sigmoid(),
View(-1, 1, 28, 28)
]
self.main = nn.Sequential(*self.__sequential_blocks)
def forward(self, input_latent: torch.Tensor):
decoded_images = self.main(input_latent)
assert decoded_images.size(1) == 1 and decoded_images.size(2) == 28 and decoded_images.size(3) == 28
return decoded_images
| gmum/cwae-pytorch | src/architectures/mnist.py | mnist.py | py | 1,515 | python | en | code | 6 | github-code | 13 |
43822384035 | """
Save segment files to mongodb
format:
word_dict:
{
"word": "ไปฃ้ฉพ",
"length": 2,
"pinyin": {
"vowels": [
"ia",
"ai"
],
"tones": [
"4",
"4"
],
"initials": [
"j",
"d"
]
},
"updated_date": ISODate("2017-05-21T15:43:48.062Z")
}
char_dict:
{
"hanzi": "ไธ",
"unicode": "U+4E1C",
"pinyin": [
"dลng"
],
"updated_date": ISODate("2017-05-15T13:56:20.886Z"),
"freq_level": 1
}
"""
import os
import logging
import concurrent.futures
from utils import word_dict, char_dict, upsert_db, get_pinyin
logger = logging.getLogger(__name__)
executor = concurrent.futures.ThreadPoolExecutor(max_workers=2)
DIR_DICTIONARY = 'dictionaries'
def add_task(func):
def __decorator(coll):
logger.info('Starting deal with %s', coll.full_name)
# func(coll)
executor.submit(func, coll)
return __decorator
@add_task
def insert_word_dict(db_collection):
good = 0
files = ['seg-added-words-v6.txt', 'seg-cn-word-dictionary.txt']
files = [os.path.join(DIR_DICTIONARY, d) for d in files]
for fn in files:
with open(fn, 'r', encoding='utf-8') as fp:
content = [line.strip() for line in fp]
for word in content:
# ๅปๆไธญๆ้ๅท ๏ผ
word.replace('๏ผ', '')
upsert_db(
['word'],
{
'word': word,
'length': len(word),
'pinyin': get_pinyin(word)
},
db_collection
)
good = good + 1
logger.info('Done with %s, success item: %d, failed item: 0',
db_collection.full_name, good)
def _deal_with_char_dict(filename, db_collection):
good, bad = 0, 0
with open(filename, 'r', encoding='utf-8') as fp:
for line in fp:
line = line.strip()
if line.startswith('#'):
continue
result = line.split()
if (len(result) != 4) or ('#' not in result):
bad = bad + 1
logger.warning(
'%s, Invalid line schema, can not insert to db.',
str(result))
continue
upsert_db(
['hanzi'],
{
'unicode':
result[0][:-1] if result[0][-1] == ':' else result[0],
'pinyin':
get_pinyin(result[3]),
'hanzi':
result[3],
},
db_collection
)
good = good + 1
return good, bad
@add_task
def insert_char_dict(db_collection):
files = ['seg-pinyin.txt', 'seg-zdic.txt']
files = [os.path.join(DIR_DICTIONARY, d) for d in files]
# ๅ
ๅญ seg-zdic ็๏ผๅ็จ seg-pinyin ็ๅป่ฆ็
# ๆ ผๅผไธพไพ๏ผU+3469: luรณ # ใฉ
good, bad = _deal_with_char_dict(files[1], db_collection)
append = _deal_with_char_dict(files[0], db_collection)
good, bad = good + append[0], bad + append[1]
logger.info('Done with %s, success item: %d, failed item: %d',
db_collection.full_name, good, bad)
logger.info('Starting deal with frequent hanzi table in %s',
db_collection.full_name)
insert_char_freq(db_collection)
def insert_char_freq(db_collection):
good, bad = 0, 0
level = 0
files = ['seg-ๅธธ็จๆฑๅญ่กจ.txt', 'seg-้็จๆฑๅญ่ง่่กจ.txt']
files = [os.path.join(DIR_DICTIONARY, d) for d in files]
with open(files[0], 'r', encoding='utf-8') as fp:
content = [line.strip() for line in fp if len(line.strip()) == 1]
for d in content:
upsert_db(
['hanzi'],
{
'hanzi': d,
'freq_level': level
},
char_dict
)
good = good + 1
with open(files[1], 'r', encoding='utf-8') as fp:
for line in fp:
line = line.strip()
if line.startswith('#'):
level = level + 1
continue
if len(line) != 1:
logger.warning('%s, Invalid line schema, can not insert to db.',
line)
bad = bad + 1
continue
upsert_db(
['hanzi'],
{
'hanzi': line,
'freq_level': level
},
char_dict
)
good = good + 1
logger.info('Done with %s, success item: %d, failed item: %d',
db_collection.full_name, good, bad)
def run():
insert_char_dict(char_dict)
insert_word_dict(word_dict)
executor.shutdown(wait=True)
if __name__ == '__main__':
run()
| tanx-code/levelup | howtorap/dictionaries/script_save_to_db.py | script_save_to_db.py | py | 5,223 | python | en | code | 0 | github-code | 13 |
43360995144 | from django.conf.urls import patterns, url
from BandList import views
urlpatterns = patterns('',
(r'^$', views.base),
(r'^shows/$', views.shows),
(r'^bands/$', views.bands),
(r'^register/$', views.register),
url(r'home/$', views.home, name='home'),
(r'^accounts/login/$', views.user_login),
(r'^add/$', views.add),
(r'^remove/$', views.remove),
) | Goldielocks/bander | BandList/urls.py | urls.py | py | 358 | python | en | code | 0 | github-code | 13 |
17433034612 | from datetime import time ,datetime, timedelta
def check_time_interval(time1, time2):
fmt = '%H:%M:%S'
# get time interval between time1 and time2 as timedelta object
time_interval = datetime.strptime(str(time1), fmt) - datetime.strptime(str(time2), fmt)
return (time_interval >= timedelta(0))
class Menu:
def __init__(self, name, items, start_time, end_time):
self.name = name
self.items = items
self.start_time = start_time
self.end_time = end_time
def get_start_time(self):
return self.start_time
def get_end_time(self):
return self.end_time
def calculate_bill(self, purchased_items):
bill = 0
for purchased_item in purchased_items:
bill += self.items[purchased_item]
return bill
def __repr__(self):
return ("{} menu available from {} GMT to {} GMT".format(self.name, self.start_time, self.end_time))
brunch = Menu("brunch", {
'pancakes': 7.50, 'waffles': 9.00, 'burger': 11.00, 'home fries': 4.50, 'coffee': 1.50, 'espresso': 3.00, 'tea': 1.00, 'mimosa': 10.50, 'orange juice': 3.50
}, time(11), time(16))
early_bird = Menu("early_bird", {
'salumeria plate': 8.00, 'salad and breadsticks (serves 2, no refills)': 14.00, 'pizza with quattro formaggi': 9.00, 'duck ragu': 17.50, 'mushroom ravioli (vegan)': 13.50, 'coffee': 1.50, 'espresso': 3.00,
}, time(15), time(18))
dinner = Menu("dinner", {
'crostini with eggplant caponata': 13.00, 'ceaser salad': 16.00, 'pizza with quattro formaggi': 11.00, 'duck ragu': 19.50, 'mushroom ravioli (vegan)': 13.50, 'coffee': 2.00, 'espresso': 3.00,
}, time(17), time(23))
kids = Menu("kids", {
'chicken nuggets': 6.50, 'fusilli with wild mushrooms': 12.00, 'apple juice': 3.00
}, time(11), time(21))
print(brunch)
print(brunch.calculate_bill(["pancakes", "home fries","coffee"]))
print(early_bird.calculate_bill(["salumeria plate", "mushroom ravioli (vegan)"]))
class Franchise:
def __init__(self, address, menus):
self.address = address
self.menus = menus
def available_menus(self, check_time):
return [menu for menu in self.menus if (check_time_interval(time(check_time), menu.get_start_time()) and check_time_interval(menu.get_end_time(),time(check_time)))]
def __repr__(self):
return ("Welcome to Franchise at {}".format(self.address))
flagship_store = Franchise("1232 West End Road", [brunch, early_bird, dinner, kids])
new_installment = Franchise("12 East Mulberry Street", [brunch, early_bird, dinner, kids])
flagship_store_menus_available_12pm = flagship_store.available_menus(12)
new_installment_menus_available_12pm = new_installment.available_menus(12)
print("menus at flagship_store at 12 noon: ")
for menu in flagship_store_menus_available_12pm:
print(menu)
print(" ")
print("menus at new_installment at 12 noon: ")
for menu in new_installment_menus_available_12pm:
print(menu)
flagship_store_menus_available_5pm = flagship_store.available_menus(17)
new_installment_menus_available_5pm = new_installment.available_menus(17)
print(" ")
print("menus at flagship_store at 5pm: ")
for menu in flagship_store_menus_available_5pm:
print(menu)
print(" ")
print("menus at new_installment at 5pm: ")
for menu in new_installment_menus_available_5pm:
print(menu)
print(" ")
class Business:
def __init__(self, name, franchises):
self.name = name
self.franchises = franchises
first_business = Business("Basta Fazoolin' with my Heart", [flagship_store, new_installment])
arepas_menu = Menu("arepas_menu", {
'arepa pabellon': 7.00, 'pernil arepa': 8.50, 'guayanes arepa': 8.00, 'jamon arepa': 7.50
}, time(10), time(20))
arepas_place = Franchise("189 Fitzgerald Avenue", [arepas_menu])
new_business = Business("Take a' Arepa", [arepas_place])
| bessilfie-nyame/basta-fazoolin | basta_fazoolin.py | basta_fazoolin.py | py | 3,756 | python | en | code | 0 | github-code | 13 |
28326100947 | from odoo import api, fields, models
from odoo.tools.translate import html_translate
class EventType(models.Model):
_inherit = "event.type"
description = fields.Html(
string="Description",
oldname="note",
translate=html_translate,
sanitize_attributes=False,
readonly=False,
)
class Event(models.Model):
_inherit = "event.event"
department_id = fields.Many2one("hr.department", string="Department")
duration = fields.Float(
string="Duration",
compute="_compute_duration",
store=True,
help="hours",
)
co_organizer_id = fields.Many2one("res.partner", string="Co-Organizer")
state = fields.Selection(readonly=False)
@api.depends("date_begin", "date_end")
@api.multi
def _compute_duration(self):
for event in self:
if event.date_begin and event.date_end:
duration = (
event.date_end - event.date_begin
).total_seconds() / 3600
else:
duration = False
event.duration = duration
@api.onchange("event_type_id")
def _onchange_type(self):
res = super()._onchange_type()
if self.event_type_id.description:
self.description = self.event_type_id.description
return res
@api.multi
def confirm_registrations(self):
for event in self:
for registration in event.registration_ids:
registration.confirm_registration()
class EventRegistration(models.Model):
_inherit = "event.registration"
employee_id = fields.Many2one(
comodel_name="hr.employee", string="Employee", required=False
)
state = fields.Selection(readonly=False)
@api.onchange("employee_id")
def _onchange_employee_id(self):
if self.employee_id:
self.name = self.employee_id.name or self.name
self.email = self.employee_id.work_email or self.email
self.phone = self.employee_id.work_phone or self.phone
self.partner_id = self.employee_id.address_home_id or False
# Note that the partner is overwritten with False if not found,
# to prevent inconsistency between partner and employee
@api.onchange("partner_id")
def _onchange_partner_id(self):
if self.partner_id:
contact_id = self.partner_id.address_get().get("contact", False)
if contact_id:
contact = self.env["res.partner"].browse(contact_id)
employees = self.env["hr.employee"].search(
[("address_home_id", "=", contact.id)]
)
if employees:
self.employee_id = employees[0] or False
else:
self.employee_id = False
else:
self.employee_id = False
# Note that the employee is overwritten with False if not found,
# to prevent inconsistency between partner and employee
| odoo-cae/odoo-addons-hr-incubator | hr_cae_event/models/event.py | event.py | py | 3,049 | python | en | code | 0 | github-code | 13 |
2992872703 | import requests
API_VERSION = '5.131'
def get_upload_url(token, group_id):
"""ะะพะปััะธัั ะฐะดัะตั ะดะปั ะทะฐะณััะทะบะธ ัะพัะพ"""
params = {
'access_token': token,
'v': API_VERSION,
'group_id': group_id
}
response = requests.get(
'https://api.vk.com/method/photos.getWallUploadServer',
params=params
)
response.raise_for_status()
response = response.json()
upload_url = handle_response(response)['upload_url']
return upload_url
def upload_photo(token, group_id, filename):
"""ะะฐะณััะทะธัั ัะพัะพ ะฝะฐ ัะตัะฒะตั"""
url = get_upload_url(token, group_id)
files = {
'photo': (filename, open(filename, 'rb'))
}
response_post = requests.post(url, files=files)
response_post.raise_for_status()
photo_upload = response_post.json()
params = {
'access_token': token,
'v': API_VERSION,
'group_id': group_id,
'photo': photo_upload['photo'],
'server': photo_upload['server'],
'hash': photo_upload['hash'],
}
response = requests.get(
'https://api.vk.com/method/photos.saveWallPhoto',
params=params
)
response.raise_for_status()
response = response.json()
return handle_response(response)
def wall_post(token, group_id, photo, message):
"""ะัะปะพะถะธัั ัะพัะพ ะฝะฐ ััะตะฝั ะณััะฟะฟั"""
photo = photo[0]
photo_id = f'photo{photo["owner_id"]}_{photo["id"]}'
params = {
'access_token': token,
'v': API_VERSION,
'attachments': photo_id,
'message': message,
'owner_id': f'-{group_id}',
'from_group': '1'
}
response = requests.get(
'https://api.vk.com/method/wall.post',
params=params
)
response.raise_for_status()
response = response.json()
handle_response(response)
def handle_response(response):
"""ะะฑัะฐะฑะพัะบะฐ ะพัะฒะตัะฐ ะพั API"""
if 'response' in response:
return response['response']
else:
err = response['error']
raise requests.HTTPError(f'{err["error_code"]}: {err["error_msg"]}')
| dmitry-zharinov/xkcd-publisher | vk.py | vk.py | py | 2,168 | python | en | code | 0 | github-code | 13 |
73130288019 | import time
# from bs4 import BeautifulSoup
from tqdm import tqdm
from definitions import NOVEL_URL, TAG_NAME
from driver import driver
from logger import log
from scraper import collect_chapter_content
def get_chapter_count(url=""):
try:
driver.get(url)
book_name = url.removeprefix(NOVEL_URL + "/").removesuffix("/")
# with open(book_name + '.html', 'w', encoding='utf-8') as fp:
# fp.write(driver.page_source)
time.sleep(1)
elements = driver.find_elements(by= TAG_NAME, value='a')
count = 0
for element in elements:
link = element.get_attribute('href')
if link is None:
continue
if not book_name in link:
continue
if not '/chapter' in link:
continue
count += 1
except:
return get_chapter_count(url)
return count
def get_chapter_content_from_novel(url=""):
count = get_chapter_count(url)
chapter_map = {}
for i in range(1, count+1):
chapter_map[i] = collect_chapter_content(url + '/chapter-' + str(i))
return chapter_map
| tejasmr/ScrapeBoxnovel | chapters.py | chapters.py | py | 1,150 | python | en | code | 0 | github-code | 13 |
1362264455 | ################################
# Program name:
# Author: Tom Gill
# Course: CWCT Python Essentials
# Date: 9/16/2021
# Assignment: MOD01A1 Phone List
# Purpose: Write a program that provides a menu-driven digital contact list to the user. The program should utilize a
# file containing names, phone numbers (number and type - such as Cell, Home, Work, etc.) and email addresses (address
# and type).
#
# The program should open the file (if it exists) and populate the program with the contact data.
# The user should then be able to
# search for the data for a given contact name
# add new contacts
# delete contacts
# add/update/delete phone numbers or email addresses for a contact.
# When the program finishes it should create a file (or overwrite the existing file) with the contact information
# Store any functions you create in a package separate from the 'main' menu-driven program/script and import them as
# needed.
# Global variables and imports
import sys
# Functions
def mainMenu():
phoneBook = open("phoneBook.txt","a")
phoneBook.close()
print("\nWelcome to contacts by TOMOOGLE.")
while True:
print("\n*****Contacts by TOMOOGLE*****",
"\nPlease enter your one of the below selections:",
"\nEnter 1 to search for and view a contact.",
"\nEnter 2 to search for and edit a contact.",
"\nEnter 3 to add a new contact.",
"\nEnter 4 to remove a contact.",
"\nEnter 5 to exit.",)
selection = input("Selection: ")
print("\n")
if selection == "5": # Quit Program
print("\nThank you for using TOMOOGLE contacts.")
sys.exit()
elif selection == "1": # search for and return a contact
print("Please enter contacts last name, first name to view their information.")
name = input("Lastname, Firstname: ")
print("\n")
phoneBook = open("phoneBook.txt", "r")
for line in phoneBook:
if line.find(name) != -1:
print(line)
print("Please note: if the name you searched for returns no results, it was not in your contact list.\n")
elif selection == "2": # search for and edit a contact
print("Please enter the exact information you want to replace.")
print("For example, if you want to replace a phone number, "
"enter the phone number exactly as you entered it.")
search_info = input("Enter information to be corrected: ")
replace_info = input("Now please enter the correct information: ")
with open ("phoneBook.txt", "r") as file:
corrected = file.read()
corrected = corrected.replace(search_info, replace_info)
with open("phoneBook.txt", "w") as file:
file.write(corrected)
print("Correction Made")
elif selection == "3": # add new contact
phoneBook = open("phoneBook.txt", "a")
newContact = input("Please enter contacts Lastname: ")
newContact = newContact + ", " + input("Please enter contacts First name: ")
newContact = newContact + "; " + input("Phone number type: ")
newContact = newContact + ": " + input("Phone number: ")
newContact = newContact + "; " + input("Email Type: ")
newContact = newContact + ": " + input("Email address: ")
phoneBook.write(newContact)
phoneBook.write("\n")
phoneBook.close()
continue
elif selection == "4": # remove a contact
with open("phoneBook.txt", "r") as phoneBook:
lines = phoneBook.readlines()
contact = input("Enter Contact name (Last, First) to remove: ")
with open("phoneBook.txt", "w") as phoneBook:
for line in lines:
if line.find(contact) == -1:
phoneBook.write(line)
print("Removed.")
else:
print("Please enter a valid selection, its not hard (1, 2, 3 or 4).")
mainMenu() | Gillt1/Python_Class | Python Class/M01A1_Phone_list/M01A1_Main.py | M01A1_Main.py | py | 4,400 | python | en | code | 0 | github-code | 13 |
15390625407 | import os
import sys
import _init_paths
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from vit_pytorch_loc.vit_pytorch import ViT
from utils.utils import set_gpu, seed_all, _pil_interp, load_partial_weight
from tqdm import tqdm
import argparse
import math
import ipdb
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='running parameters',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# general parameters for data and model
# data parameters
parser.add_argument('--data_path', default='./datasets/cifar10/', type=str, help='path to ImageNet data')
parser.add_argument('--ckpt_path', default='datasets/pretrained_models/base_p16_224_backbone.pth', type=str, help='path to load checkpoint')
parser.add_argument('--batch_size', default=64, type=int, help='mini-batch size for data loader')
parser.add_argument('--workers', default=4, type=int, help='number of workers for data loader')
parser.add_argument('--crop_pct', default=0.9, type=float, help='crop ratio')
parser.add_argument('--interpolation', default='bicubic', type=str, help='interpolation method')
# model parameters
parser.add_argument('--input_size', default=224, type=int, help='size of input')
parser.add_argument('--patch_size', default=16, type=int, help='size of patch')
parser.add_argument('--num_classes', default=10, type=int, help='num_classes')
parser.add_argument('--dim', default=768, type=int, help='dim')
parser.add_argument('--depth', default=12, type=int, help='depth')
parser.add_argument('--heads', default=12, type=int, help='heads')
parser.add_argument('--mlp_dim', default=3072, type=int, help='mlp_dim')
parser.add_argument('--dropout', default=0.1, type=float, help='dropout')
parser.add_argument('--emb_dropout', default=0.1, type=float, help='emb_dropout')
parser.add_argument('--qkv_bias', default=True, type=bool, help='use qkv_bias')
# training parameters
parser.add_argument('--max_epoch', default=200, type=int, help='max epoch')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--val_per', default=1, type=int, help='validate per epochs')
parser.add_argument('--val_begin', action='store_true', help='validate before training')
parser.add_argument('--save_path', default='./save/cifar10', type=str, help='path to save checkpoints')
parser.add_argument('--save_per', default=2, type=int, help='save ckpt per epochs')
# other parameters
parser.add_argument('--seed', default=1005, type=int, help='random seed for results reproduction')
parser.add_argument('--gpu', default='0', type=str, help='gpu')
args = parser.parse_args()
print('Called With Args:')
for k,v in sorted(vars(args).items()):
print(' ', k,'=',v)
print()
seed_all(args.seed)
set_gpu(args.gpu)
# build validation dataset
data_path = args.data_path
batch_size = args.batch_size
workers = args.workers
img_size = args.input_size # set img_size = input_size
crop_pct = args.crop_pct
interpolation = args.interpolation
scale_size = int(math.floor(img_size / crop_pct))
normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
train_transform = transforms.Compose([
transforms.Resize(scale_size, _pil_interp(interpolation)),
transforms.CenterCrop(img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
# normalize,
])
val_transform = transforms.Compose([
transforms.Resize(scale_size, _pil_interp(interpolation)),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
# normalize,
])
train_dataset = datasets.CIFAR10(
root=data_path,
train=True,
transform=train_transform)
val_dataset = datasets.CIFAR10(
root=data_path,
train=False,
transform=val_transform)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size = batch_size, shuffle=False,
num_workers=workers, pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size = batch_size, shuffle=False,
num_workers=workers, pin_memory=True
)
# ipdb.set_trace()
# build ViT model
input_size = args.input_size
patch_size = args.patch_size
num_classes = args.num_classes
dim = args.dim
depth = args.depth
heads = args.heads
mlp_dim = args.mlp_dim
dropout = args.dropout
emb_dropout = args.emb_dropout
qkv_bias = args.qkv_bias
v = ViT(
image_size = input_size,
patch_size = patch_size,
num_classes = num_classes,
dim = dim,
depth = depth,
heads = heads,
mlp_dim = mlp_dim,
dropout = dropout,
emb_dropout = emb_dropout,
qkv_bias= qkv_bias
)
print('Building ViT Model:\n{}'.format(v))
print()
# initialize save_path
save_path = args.save_path
if not os.path.isdir(save_path):
print('Creating Saving Path: \'{}\''.format(save_path))
os.makedirs(save_path)
else:
print('\033[1;31mWARNING: Saving Path \'{}\' Already Exist. May Cover Saved Checkpoints\033[0m'.format(save_path))
# load weight
ckpt_path = args.ckpt_path
print('Loading Weights from \'{}\''.format(ckpt_path))
print()
weight = torch.load(ckpt_path)
load_partial_weight(v, weight)
v.cuda()
# build optimizer
max_epoch = args.max_epoch
val_per_epoch = args.val_per
save_per_epoch = args.save_per
lr = args.lr
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(v.parameters(),lr=lr,momentum=0.9,weight_decay=1e-4)
torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,max_epoch/4,eta_min=0.0003)
# validate before training
if args.val_begin:
print('Validating before Training')
v.eval()
correct = 0
total = 0
with torch.no_grad():
for data in tqdm(val_loader, desc='Validating'):
imgs, labels = data
imgs, labels = imgs.cuda(), labels.cuda()
output = v(imgs)
_,predict_labels = torch.max(output.data,1)
predict_labels = predict_labels.view(-1)
correct+= torch.sum(torch.eq(predict_labels,labels)).item()
total+=len(labels)
print('Validated on {} Images, Accuracy: {}%'.format(total, correct/total*100.0))
print()
# run train on cifar10
max_acc = 0.0
for epoch in range(1, max_epoch+1):
v.train()
total_train_loss = 0.0
total_train_acc = 0.0
total_data_num = 0
total_train_correct = 0
for data in tqdm(train_loader, desc='Epoch {}'.format(epoch)):
imgs, labels = data
imgs, labels = imgs.cuda(), labels.cuda()
output = v(imgs)
loss = criterion(output, labels)
total_train_loss += loss * imgs.shape[0]
total_data_num += imgs.shape[0]
_,predict_labels = torch.max(output.data,1)
predict_labels = predict_labels.view(-1)
total_train_correct += torch.sum(torch.eq(predict_labels,labels)).item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_train_loss /= total_data_num
total_train_acc = total_train_correct / total_data_num * 100
print('Training Loss: {}, Training Acc: {}%'.format(total_train_loss, total_train_acc))
# run validation
if (epoch%val_per_epoch==0):
v.eval()
correct = 0
total = 0
with torch.no_grad():
for data in tqdm(val_loader, desc='Validating'):
imgs, labels = data
imgs, labels = imgs.cuda(), labels.cuda()
output = v(imgs)
_,predict_labels = torch.max(output.data,1)
predict_labels = predict_labels.view(-1)
correct+= torch.sum(torch.eq(predict_labels,labels)).item()
total+=len(labels)
val_acc = correct/total*100.0
print('Validated Epoch {} on {} Images, Accuracy: {}%'.format(epoch, total, val_acc))
# print('Final Accuracy: %f%%'%(correct/total*100.0))
# save checkpoint
if val_acc > max_acc:
max_acc = val_acc
save_file = 'max_acc_epoch_' + str(epoch) + '.pth'
save_file_path = os.path.join(save_path, save_file)
torch.save(v.state_dict(), save_file_path)
print('Max_Acc Checkpoint Saved to \'{}\''.format(save_file_path))
if (epoch%save_per_epoch==0):
save_file = 'epoch_' + str(epoch)
save_file_path = os.path.join(save_path, save_file)
torch.save(v.state_dict(), save_file_path)
print('Epoch {} Checkpoint Saved to \'{}\''.format(epoch, save_file_path))
print()
# save final weight
print('Training Finished')
save_file = 'final_epoch_' + str(epoch) + '.pth'
save_file_path = os.path.join(save_path, save_file)
torch.save(v.state_dict(), save_file_path)
print('Final Checkpoint Saved to \'{}\''.format(save_file_path))
# run test on cifar10
print()
print('Testing Fine-tuned ViT on Cifar10 Testset')
v.eval()
correct = 0
total = 0
with torch.no_grad():
for data in tqdm(val_loader):
imgs, labels = data
imgs, labels = imgs.cuda(), labels.cuda()
output = v(imgs)
_,predict_labels = torch.max(output.data,1)
predict_labels = predict_labels.view(-1)
correct+= torch.sum(torch.eq(predict_labels,labels)).item()
total+=len(labels)
print('Tested on {} Images'.format(total))
print('Final Accuracy: %f%%'%(correct/total*100.0))
| Sebastian-X/vit-pytorch-with-pretrained-weights | tools/cifar10_finetune.py | cifar10_finetune.py | py | 10,283 | python | en | code | 5 | github-code | 13 |
42596553634 | import configparser
import random
import requests
import mysql.connector as mysql
import re
import argparse
import platform
import os
import time
parser = argparse.ArgumentParser(description = "GNS3 Management Tool")
parser.add_argument("-o", "--optie", help = "Opties: aanmaken, verwijderen, exporteren, importeren", required = False, default = "")
parser.add_argument("-p", "--projectnaam", help = "Naam van het (nieuwe)project", required = False, default = "")
parser.add_argument("-b", "--bevesteging", help = "Bevestiging", required = False, default = "")
argument = parser.parse_args()
status = False
if argument.optie:
status = True
option = argument.optie
if argument.projectnaam:
status = True
project_name = argument.projectnaam
if argument.bevesteging:
status = True
conformation = argument.bevesteging
sleepcounter = 2
option = ""
start = "on"
# Bepalen van schoonmaak commando op basis van OS
sys = platform.system()
if sys == "Windows":
clear = "cls"
elif sys == "Linux" or "Darwin":
clear = "clear"
# Config inlezen
config = configparser.ConfigParser()
config.read('config.ini')
gns3_server = config['default']['gns3_server']
# Database config ophalen
db = mysql.connect(
host = config['database']['host'],
user = config['database']['user'],
passwd = config['database']['pwd'],
database = config['database']['database'],
)
cursor = db.cursor()
def create ():
go = "on"
os.system(clear)
if argument.projectnaam == "":
print ("Wat is de naam van het nieuwe project?")
project_name = input()
getprojectname = """SELECT name FROM `projects` WHERE `name` = %s"""
cursor.execute(getprojectname, (project_name, ))
fetch = cursor.fetchall()
clean = str(fetch)
sql_projectname = re.sub(r'[^\w\s]', '', clean)
#Als de projectnaam bestaat word het script afgebroken
if project_name == sql_projectname:
os.system(clear)
print ("Project bestaat al. Probeer het opnieuw")
time.sleep (sleepcounter)
go = "off"
if go == "on":
os.system(clear)
if argument.bevesteging == "":
print ("Weet je het zeker dat je een nieuw project wilt starten met de naam " + project_name + " ? (y/n)")
conformation = input()
if conformation == "y":
#Het eerste gedeelte van het project ID genereren
id_first_part = str (random.randint(10000000, 99999999))
id = (id_first_part + "-0405-0607-0809-0a0b0c0d0e0f")
payload = {
"name": project_name,
"project_id": id
}
#API request om het project aan te maken uitvoeren.
headers = {'content-type': 'application/json'}
url = "http://" + gns3_server + ":3080/v2/projects"
r = requests.post(url, json=payload, headers=headers)
#Project naam en ID wegschrijven naar de database
cursor.execute("INSERT INTO `projects` VALUES (NULL, %s, %s)", (project_name, id))
db.commit()
os.system(clear)
print ("Het project is aangemaakt")
time.sleep(sleepcounter)
elif conformation == "n":
os.system(clear)
print ("Taak is afgebroken door de gebruiker")
time.sleep(sleepcounter)
else:
os.system(clear)
print("Input niet herkend. Er zijn geen wijzigingen uitgevoerd")
time.sleep(sleepcounter)
os.system (clear)
if option != "":
exit ()
print ("Wil je nog een project aanmaken? (y/n)")
answer = input()
if answer == "y":
print ()
elif answer == "n":
print ()
else:
os.system(clear)
print ("Input niet herkend, je word doorgewezen naar het hoofdmenu")
time.sleep(sleepcounter)
def remove ():
if argument.projectnaam == "":
print ("Wat is de naam van het project dat je wilt verwijderen?")
project_name = input()
print()
if argument.bevesteging == "":
print ("Weet je het zeker? (y/n)")
conformation = input()
if conformation == "y":
getprojectid = """SELECT project_id FROM `projects` WHERE `name` = %s"""
cursor.execute(getprojectid, (project_name, ))
fetch = cursor.fetchall()
clean = str(fetch)
project_id = clean[3:-4]
headers = {'content-type': 'application/json'}
url = "http://" + gns3_server + ":3080/v2/projects/" + project_id
r = requests.delete(url)
print (r.text)
cursor.execute("DELETE FROM projects WHERE project_id = %s ;", (project_id,))
db.commit()
print ("Project is verwijderd...")
elif conformation == "n":
print ("Taak is afgebroken door de gebruiker")
else:
print ("Input niet herkend er zijn geen wijzigingen toegepast")
while start == "on":
os.system (clear)
print ("GNS3 Management Tool")
print ()
print ("1 - Lijst met projecten weergeven")
print ("2 - Project aanmaken")
print ("3 - Project verwijderen")
print ("4 - Project exportern")
print ("5 - Project importern")
print ("6 - Afsluiten")
print ()
print ("Vul het nummer van de optie die je wilt gebruiken.")
answer = input ()
if answer == "1":
print ()
elif answer == "2":
create ()
elif answer == "3":
remove ()
elif answer == "4":
print ()
elif answer == "5":
print ()
elif answer == "6":
#DB connectie verbreken
cursor.close()
db.close()
os.system (clear)
print ("Bye, Bye")
time.sleep (sleepcounter)
exit ()
elif answer != "1" or "2" or "3" or "4" or "5" or "6":
os.system (clear)
print ("Input niet herkend probeer het opnieuw")
time.sleep (sleepcounter)
| rouwens/Fontys | test/functions.py | functions.py | py | 6,071 | python | nl | code | 0 | github-code | 13 |
10115640125 | import streamlit as st
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
#cosine similarity function is a efficient way to calculate similarity of 2 data
from sklearn.metrics.pairwise import cosine_similarity
#diiflib is used to indentify given input with closest data
import difflib
st.markdown(
"<style>"
".stApp h1 {"
"font-family: 'Arial', sans-serif;"
"font-weight: bold;"
"color: black;" # Text color for the title
"background-color: #FFD700;" # IMDb-like yellow background color
"border-radius: 10px;" # Rounded corners
"padding: 10px 20px;" # Add some padding for spacing
"}"
"</style>",
unsafe_allow_html=True,
)
# Apply custom CSS styles to the subtitle
# Your existing code for movie recommendation
st.title("Movie Recommender")
st.write("")
st.write("")
st.write("")
st.write("")
movies_data = pd.read_csv('movies.csv')
relevant_features = ['genres','keywords','cast','director','tagline']
for feature in relevant_features:
movies_data[feature] = movies_data[feature].fillna('')
combined_features = movies_data['genres']+' '+movies_data['keywords']+' '+movies_data['tagline']+' '+movies_data['cast']+' '+movies_data['director']
vectorizer = TfidfVectorizer()
feature_vectors = vectorizer.fit_transform(combined_features)
similarity = cosine_similarity(feature_vectors)
csv_file = "titles.csv" # Change this to your CSV file path
titles_df = pd.read_csv(csv_file)
# Extract the list of titles from the DataFrame
titles = titles_df['Titles'].tolist()
# Create a dropdown widget to select a movie title
movie_name = st.selectbox("Movie You watched :", titles)
if movie_name != "NONE":
list_of_all_titles = movies_data['title'].tolist()
find_close_match = difflib.get_close_matches(movie_name, list_of_all_titles)
close_match = find_close_match[0]
index_of_the_movie = movies_data[movies_data.title == close_match]['index'].values[0]
similarity_score = list(enumerate(similarity[index_of_the_movie]))
sorted_similar_movies = sorted(similarity_score, key = lambda x:x[1], reverse = True)
st.markdown("<style>@keyframes dust { 0% { transform: translate(0, -10px); opacity: 0; } 100% { transform: translate(0, 0); opacity: 1; } } .dust-in { animation: dust 2.0s ease-in; }</style>", unsafe_allow_html=True)
st.markdown("<h2>Movies suggested for you:</h2>", unsafe_allow_html=True)
i = 1
imdb_search_base_url = "https://www.imdb.com/find?q=" # Define the IMDb search base URL here
for movie in sorted_similar_movies:
index = movie[0]
title_from_index = movies_data[movies_data.index == index]['title'].values[0]
imdb_search_query = title_from_index.replace(" ", "+") # Convert movie title to a search query
if (i < 7):
if(i!=1):
st.markdown(f"<div class='dust-in'><h3>{i-1}. <a href='{imdb_search_base_url}{imdb_search_query}' target='_blank'>{title_from_index}</a></h3></div>", unsafe_allow_html=True)
i += 1
else:
st.write('Waiting') | W4R10CK99/Movie-Recommendation-System | streamlit_app.py | streamlit_app.py | py | 3,084 | python | en | code | 0 | github-code | 13 |
26203222785 | from resource.base.handler.lcp import LCP as BaseLCP
from requests import delete as delete_req
from requests import post as post_req
from requests import put as put_req
from document.ebpf_program.catalog import _eBPFProgramCatalogDocument
from document.exec_env import ExecEnvDocument
from lib.response import UnprocEntityResponse
from lib.token import create_token
from utils.log import Log
from utils.sequence import wrap
MSG_RESP_NOT_VALID = "Response from LCP({}@{}:{}) not valid"
MSG_REQ_NOT_EXEC = "Request to LCP({}@{}:{}) not executed"
# FIXME parameters add to instance
# TODO check if work everything
class LCP(BaseLCP):
def __init__(self, catalog, req, resp):
self.log = Log.get("ebpf-program-instance-lcp")
self.req = req
self.resp = resp
self.req_lcp = {}
self.catalog = catalog
@classmethod
def post(cls, instance, req, resp):
def __data(instance, catalog):
return dict(
id=instance.meta.id,
interface=req.get("interface", None),
**catalog.config.to_dict(),
)
cls.__handler(
instance=instance, req=req, resp=resp, caller=post_req, data=__data
)
@classmethod
def put(cls, instance, req, resp):
def __data(instance, catalog):
return dict(
id=instance.meta.id,
interface=req.get("interface", None),
**catalog.config.to_dict(),
)
cls.__handler(
instance=instance, req=req, resp=resp, caller=put_req, data=__data
)
@classmethod
def delete(cls, instance, req, resp):
def __data(instance, _):
return {"id": instance.meta.id}
cls.__handler(
instance=instance,
req=req,
resp=resp,
caller=delete_req,
data=__data,
)
@classmethod
def __handler(cls, instance, req, resp, caller, data):
document = _eBPFProgramCatalogDocument
_id = instance.ebpf_program_catalog_id
label = "eBPF Program Catalog"
ebpf_program_catalog = cls.from_doc(document, _id, label, resp)
exec_env = cls.from_doc(
document=ExecEnvDocument,
doc_id=instance.exec_env_id,
label="Execution Environment",
resp=resp,
)
if all([ebpf_program_catalog, exec_env]):
LCP(catalog=ebpf_program_catalog, req=req, resp=resp).__apply(
instance=instance, exec_env=exec_env, caller=caller, data=data
)
def __apply(self, instance, exec_env, caller, data):
hostname, port = exec_env.hostname, exec_env.lcp.port
schema = "https" if exec_env.lcp.https else "http"
ep_lcp = "/" + exec_env.lcp.endpoint if exec_env.lcp.endpoint else ""
resp_caller = caller(
f"{schema}://{hostname}:{port}{ep_lcp}/code",
headers={"Authorization": create_token()},
json=data(instance, self.catalog),
)
if resp_caller.content:
try:
self.resp.extend(wrap(resp_caller.json()))
except Exception as exception:
_msg = (
MSG_RESP_NOT_VALID.format(
exec_env.meta.id, exec_env.hostname, exec_env.lcp.port
),
)
self.log.exception(_msg, exception)
UnprocEntityResponse(_msg, exception).add(self.resp)
else: # noqa F401
UnprocEntityResponse(
MSG_REQ_NOT_EXEC.format(
exec_env.meta.id, exec_env.hostname, exec_env.lcp.port
)
).add(
self.resp
) # noqa: E501
| guard-project/cb-manager | resource/ebpf_program/handler/lcp.py | lcp.py | py | 3,789 | python | en | code | 1 | github-code | 13 |
41237702720 | #Various Barcharts Codes
#Code 1
#https://stackoverflow.com/questions/43554521/add-data-label-to-grouped-bar-chart-in-matplotlib
#Code adapted from:
#https://chrisalbon.com/python/matplotlib_grouped_bar_plot.html
#matplotlib online
#Grouped bars
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import requests
import io
raw_data = {'plan_type': ['A1', 'A2', 'A3', 'A4', 'A5', 'A6'],
'Group A': [100, 0, 0, 0, 0, 0],
'Group B': [48, 16, 9, 22, 5, 0],
'Group C': [18, 28, 84, 34, 11, 0],
'Group D': [49, 13, 7, 23, 6, 0],
'Group E': [57, 16, 9, 26, 3, 0]
}
df = pd.DataFrame(raw_data, columns = ['plan_type', 'Group B', 'Group C', 'Group D', 'Group E'])
df2 =pd.DataFrame(raw_data, columns = ['plan_type', 'Group A'])
# Setting the positions and width for the bars
pos = list(range(len(df['Group B'])))
width = 0.3
# Plotting the bars
fig, ax = plt.subplots(figsize=(8, 5))
#This creates another y-axis that shares the same x-axis
# Create a bar with Group A data,
# in position pos + some width buffer,
plt.bar(pos,
#using df['Group E'] data,
df2['Group A'],
# of width
width*8,
# with alpha 0.5
alpha=1,
# with color
color='gray',
# with label the fourth value in plan_type
label=df2['plan_type'][0])
# Create a bar with Group B data,
# in position pos,
plt.bar(pos,
#using df['Group B'] data,
df['Group B'],
# of width
width,
# with alpha 1
alpha=1,
# with color
color='#900C3F',
# with label the first value in plan_type
label=df['plan_type'][0])
# Create a bar with Group C data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['Group C'] data,
df['Group C'],
# of width
width,
# with alpha 1
alpha=1.0,
# with color
color='#C70039',
# with label the second value in plan_type
label=df['plan_type'][1])
# Create a bar with Group D data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['Group D'] data,
df['Group D'],
# of width
width,
# with alpha 1
alpha=1,
# with color
color='#FF5733',
# with label the third value in plan_type
label=df['plan_type'][2])
# Create a bar with Group E data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['Group E'] data,
df['Group E'],
# of width
width,
# with alpha 1
alpha=1,
# with color
color='#FFC300',
# with label the fourth value in plan_type
label=df['plan_type'][3])
# Set the y axis label
ax.set_ylabel('Percent')
# Set the chart's title
ax.set_title('Grouped Data', fontweight = "bold")
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(df['plan_type'])
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*5)
plt.ylim([0, 100] )
#plt.ylim([0, max(df['Group B'] + df['Group C'] + df['Group D'] + df['Group E'])] )
# Adding the legend and showing the plot. Upper center location, 5 columns,
#Expanded to fit on one line.
plt.legend(['Group A','Group B', 'Group C', 'Group D', 'Group E'], loc='upper center', ncol=5, mode='expand', fontsize ='x-small')
#plt.grid() --> This would add a Grid, but I don't want that.
plt.show()
#Code 2
#https://stackoverflow.com/questions/43554521/add-data-label-to-grouped-bar-chart-in-matplotlib
#Code adapted from:
#https://chrisalbon.com/python/matplotlib_grouped_bar_plot.html
#matplotlib online
raw_data = {'plan_type': ['Type 1', 'Type 2', 'Type 3', 'Type 4', 'Type 5', 'Type 6'],
'Group A': [48, 16, 9, 22, 5, 12],
'Group B': [18, 28, 84, 34, 11, 36],
'Group C': [49, 13, 7, 23, 6, 70],
'Group D': [57, 16, 9, 26, 3, 40]
}
df = pd.DataFrame(raw_data, columns = ['plan_type', 'Group A', 'Group B', 'Group C', 'Group D'])
# Setting the positions and width for the bars
pos = list(range(len(df['Group A'])))
width = 0.22 #Change to 0.25
# Plotting the bars
fig, ax = plt.subplots(figsize=(10, 5))
#This creates another y-axis that shares the same x-axis
# Create a bar with Group A data,
# in position pos + some width buffer,
# Create a bar with Group B data,
# in position pos,
plt.bar(pos,
#using df['Group A'] data,
df['Group A'],
# of width
width,
# with alpha 1
alpha=1,
# with color
color='#900C3F',
# with label the first value in plan_type
label=df['plan_type'][0])
# Create a bar with Group B data,
# in position pos + some width buffer,
plt.bar([p + width for p in pos],
#using df['Group B'] data,
df['Group B'],
# of width
width,
# with alpha 1
alpha=1.0,
# with color
color='#C70039',
# with label the second value in plan_type
label=df['plan_type'][1])
# Create a bar with Group D data,
# in position pos + some width buffer,
plt.bar([p + width*2 for p in pos],
#using df['Group C'] data,
df['Group C'],
# of width
width,
# with alpha 1
alpha=1,
# with color
color='#FF5733',
# with label the third value in plan_type
label=df['plan_type'][2])
# Create a bar with Group E data,
# in position pos + some width buffer,
plt.bar([p + width*3 for p in pos],
#using df['Group D'] data,
df['Group D'],
# of width
width,
# with alpha 1
alpha=1,
# with color
color='#FFC300',
# with label the fourth value in plan_type
label=df['plan_type'][3])
# Set the y axis label
ax.set_ylabel('Frequency')
# Set the chart's title
ax.set_title('Grouped Data', fontweight = "bold")
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(df['plan_type'])
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
plt.ylim([0, 100] )
#plt.ylim([0, max(df['Group A'] + df['Group B'] + df['Group C'] + df['Group D'])] )
# Adding the legend and showing the plot. Upper center location, 5 columns,
#Expanded to fit on one line.
plt.legend(['Group A', 'Group B', 'Group C', 'Group D'], loc='upper center', ncol=5, mode='expand', fontsize ='15')
#plt.grid() --> This would add a Grid, but I don't want that.
textstr = 'Created at \nwww.tssfl.com'
#plt.text(0.02, 0.5, textstr, fontsize=14, transform=plt.gcf().transFigure)
plt.gcf().text(0.02, 0.92, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.show()
#Code 3
raw_data = {'plan_type': ['Type 1', 'Type 2', 'Type 3', 'Type 4', 'Type 5', 'Type 6'],
'Group A': [48, 16, 9, 22, 5, 12],
'Group B': [18, 28, 84, 34, 11, 36],
'Group C': [49, 13, 7, 23, 6, 70],
'Group D': [57, 16, 9, 26, 3, 40]
}
#df2 =pd.DataFrame(raw_data, columns = ['plan_type', 'Group A'])
df = pd.DataFrame(raw_data,
columns = ['plan_type', 'Group A', 'Group B', 'Group C', 'Group D'])
fig, ax = plt.subplots(figsize=(10, 6))
#ax = df2.plot.bar(rot=0,color='#E6E9ED',width=1)
ax = df.plot.bar(rot=0, ax=ax, color=["#900C3F", '#C70039', '#FF5733', '#FFC300'],
width = 0.85)
for p in ax.patches[0:]:
h = p.get_height()
x = p.get_x()+p.get_width()/2.0
if h != 0:
ax.annotate("%g" % p.get_height(), xy=(x,h), xytext=(0,4), rotation=90,
textcoords="offset points", ha="center", va="bottom")
# Setting the positions and width for the bars
pos = list(range(len(df['Group A'])))
width = 0.22 #Change to 0.25
#ax.set_xlim(-0.5, None)
#ax.margins(y=0)
plt.xlim(min(pos)-width*2, max(pos)+width*2)
plt.ylim([0, 100] )
ax.legend(ncol=len(df.columns), loc="lower left", bbox_to_anchor=(0,1.02,1,0.08),
borderaxespad=0, mode="expand", fontsize='15')
ax.set_xticklabels(df["plan_type"])
textstr = 'Created at \nwww.tssfl.com'
#plt.text(0.02, 0.5, textstr, fontsize=14, transform=plt.gcf().transFigure)
plt.gcf().text(0.6, 0.75, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.show()
#Code 4
# fig, is the whole thing; ax1 is a subplot in the figure,
# so we reference it to plot bars and lines there
fig, ax1 = plt.subplots()
ind = np.arange(3)
width = 0.15
# per dimension
colors = ['#00ff00', '#0000ff', '#ff00ff']
markers = ['x','o','v']
xticklabels = ['50/50', '60/40', '70/30']
#
group1 = [12,6,5]
group2 = [6,8,12]
group3 = [2,4,9]
#
all_groups = [ group1, group2, group3 ]
# plot each group of bars; loop-variable bar_values contains values for bars
for i, bar_values in enumerate( all_groups ):
# compute position for each bar
bar_position = width*i
ax1.bar( ind + bar_position, bar_values, width, color=colors[i] )
# plot line for each group of bars; loop-variable y_values contains values for lines
for i, y_values in enumerate( all_groups ):
# moves the beginning of a line to the middle of the bar
additional_space = (width*i) + (width/2);
# x_values contains list indices plus additional space
x_values = [ x + additional_space for x,_ in enumerate( y_values ) ]
# simply plot the values in y_values
ax1.plot( x_values, y_values, marker=markers[i], color=colors[i] )
plt.setp([ax1], xticks=ind + width, xticklabels=xticklabels)
plt.tight_layout()
plt.show()
#Code 5
ind = np.arange(5)
avg_bar1 = (71191,2318,57965,40557,14793)
avg_bar2 = (26826,26615,31364,41088,50472)
avg_bar3 = (36232,38038,38615,39014,40812)
avg_bar4 = (26115,25879,55887,28326,27988)
plt.figure(figsize=(9.5, 6.5), tight_layout=True)
rects1 = plt.bar(ind, avg_bar1, 0.20, color='#900C3F',label='Group A')
rects2 = plt.bar(ind + 0.20, avg_bar2, 0.20, color='#C70039', label='Group B')
rects3 = plt.bar(ind + 0.40, avg_bar3, 0.20, color='#FF5733', label='Gropu C')
rects4 = plt.bar(ind + 0.60, avg_bar4, 0.20, color='#FFC300', label='Group D')
high_point_x = []
high_point_y = []
for i in range(0,5):
single_bar_group={rects1[i].get_height():rects1[i].get_x() + rects1[i].get_width()/2.0,
rects2[i].get_height():rects2[i].get_x() + rects2[i].get_width()/2.0,
rects3[i].get_height():rects3[i].get_x() + rects3[i].get_width()/2.0,
rects4[i].get_height():rects4[i].get_x() + rects4[i].get_width()/2.0}
height_list = list(single_bar_group.keys())
height_list.sort(reverse=True)
for single_height in height_list:
high_point_y.append(single_height)
high_point_x.append(single_bar_group[single_height])
break
trend_line = plt.plot(high_point_x,high_point_y,marker='o', color='mediumblue', label='Trend Line')
plt.xlabel('Categories')
plt.ylabel('Quantities')
plt.title("Grouped Data")
plt.xticks(ind+0.30, ('Type 1', 'Type 2', 'Type 3', 'Type 4', 'Type 5'))
plt.legend(fontsize='15', loc=1)
textstr = 'Created at \nwww.tssfl.com'
#plt.text(0.02, 0.5, textstr, fontsize=14, transform=plt.gcf().transFigure)
plt.gcf().text(0.3, 0.85, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.show()
#Code 6
myDict = {'Type 1':[3,13,18,16,19,9,13,15,0,2],\
'Type 2':[23,14,18,24,19,9,14,13,21,22],\
'Type 3':[38,17,12,15,39,38,23,19,16,16]}
df = pd.DataFrame(myDict)
df_melted = df.melt(value_vars=['Type 1','Type 2','Type 3'])
#Use a lineplot but first, you need to keep the same order because lineplot does not have the order argument as barplot. The steps are:
#1. Create a copy of the dataframe
#2. Set variable to be categorical with the order of ['b','a','c']
#3. lineplot in the same ax
order = ['Type 2', 'Type 1', 'Type 3'] #Try ['a','b','c']
df_2 = df_melted.copy()
df_2['Variable'] = pd.Categorical(df_2['variable'], order)
df_2.sort_values('Variable', inplace=True)
#plot
fig, ax1 = plt.subplots()
sns.barplot(x='variable', y='value', data=df_melted, capsize=0.1, ax=ax1,
order=order)
sns.lineplot(x='variable', y='value', data=df_2,
ax=ax1, color='#FF5733', marker='o', linewidth=5, ci=None)
plt.title("Categorical Data")
plt.xlabel("Categories")
plt.ylabel("Quantities")
#plt.grid() --> This would add a Grid, but I don't want that.
textstr = 'Created at \nwww.tssfl.com'
#plt.text(0.02, 0.5, textstr, fontsize=14, transform=plt.gcf().transFigure)
plt.gcf().text(0.2, 0.75, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.show()
#Code 7
#Creating dataframe
dataFrame = pd.DataFrame({"Car": ['Land Rover', 'Range Rover', 'BMW', 'Hammer', 'Mercedes', 'Jaguar'],"Cubic Capacity": [2800, 3800, 2800, 4500, 2200, 3400],"Price": [5000, 10000, 6000, 12000, 4000, 6500],
})
plt.figure(figsize=(10,6), tight_layout=True)
#Plotting grouped Horizontal Bar Chart with all the columns
dataFrame.plot.barh(x = "Car", title='Car CC and Price', color=("blue", "orange"))
#Display the plotted Horizontal Bar Chart
textstr = 'Created at \nwww.tssfl.com'
#plt.text(0.02, 0.5, textstr, fontsize=14, transform=plt.gcf().transFigure)
plt.gcf().text(0.65, 0.15, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.subplots_adjust(left=0.20)
plt.legend(loc=1)
plt.show()
plt.clf()
#Code 8
# importing package #Ref https://www.geeksforgeeks.org/create-a-grouped-bar-plot-in-matplotlib/
# https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html
#Code 1
# create data
x = np.arange(5)
y1 = [45, 35, 28, 72, 56]
y2 = [20, 65, 50, 45, 78]
width = 0.40
# plot data in grouped manner of bar type
plt.bar(x-0.2, y1, width)
plt.bar(x+0.2, y2, width)
plt.title("Grouped Data")
textstr = 'Created at \nwww.tssfl.com'
#plt.text(0.02, 0.5, textstr, fontsize=14, transform=plt.gcf().transFigure)
plt.gcf().text(0.22, 0.76, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.show()
plt.clf()
#Code 2
# create data
x = np.arange(5)
y1 = [45, 35, 28, 72, 56]
y2 = [20, 65, 50, 45, 78]
y3 = [25, 32, 60, 40, 80]
width = 0.2
# plot data in grouped manner of bar type
plt.bar(x-0.2, y1, width, color='green')
plt.bar(x, y2, width, color='cyan')
plt.bar(x+0.2, y3, width, color='orange')
plt.xticks(x, ['Player 1', 'Player 2', 'Player 3', 'Player 4', 'Player 5'])
plt.xlabel("Players")
plt.ylabel("Scores")
plt.legend(["UEFA", "La Liga", "World Cup"])
plt.gcf().text(0.42, 0.79, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.show()
plt.clf()
#Code 3
# create data
df = pd.DataFrame([['A', 10, 20, 10, 30], ['B', 18, 25, 15, 16], ['C', 12, 15, 19, 6],
['D', 10, 29, 13, 19]],
columns=['Streams', 'Group A', 'Group B', 'Group C', 'Group D'])
plt.figure(figsize=(8,5), tight_layout=True)
# plot grouped bar chart
df.plot(x='Streams',
kind='bar',
stacked=False,
title='Grouped Bar Charts')
plt.legend(loc="upper center")
plt.gcf().text(0.15, 0.90, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.show()
#Code 9: Per Capita GDP 2020
#We use the dataset called "2019.csv" found at https://github.com/fati8999-tech/Data-visualization-with-Python-Using-Seaborn-and-Plotly_-GDP-per-Capita-Life-Expectency-Dataset/blob/master/2019.csv
#Pull the "raw" GitHub content
url = 'https://raw.githubusercontent.com/TSSFL/Dataset_Archives/main/GDP_per_capita_World_Data.csv'
download = requests.get(url).content
#Reading the downloaded content and turning it into a pandas dataframe
df = pd.read_csv(io.StringIO(download.decode('utf-8')), error_bad_lines=False, skiprows=4)
print(df.head(5))
#Configure plotting parameters
import seaborn as sns
#plt.style.use('ggplot')
sns.set_style('darkgrid') # darkgrid, white grid, dark, white and ticks
plt.rc('axes', titlesize=18) # fontsize of the axes title
plt.rc('axes', labelsize=14) # fontsize of the x and y labels
plt.rc('xtick', labelsize=13) # fontsize of the tick labels
plt.rc('ytick', labelsize=13) # fontsize of the tick labels
plt.rc('legend', fontsize=13) # legend fontsize
plt.rc('font', size=13)
colors1 = sns.color_palette('pastel')
colors2 = sns.color_palette('deep')
#colors = sns.color_palette("Set2")
df_sorted = df.sort_values('2020',ascending=False)
#Let's plot categorical GDP per capita for top ten countries
plt.figure(figsize=(9.5, 6), tight_layout=True)
sns.barplot(x=df_sorted['2020'],y=df_sorted['Country Name'].head(10),data=df_sorted, color="yellowgreen")
plt.xticks(rotation=90)
plt.title("Countries with Highest GDP per Capita in 2020")
for i, v in enumerate(df_sorted['2020'].head(10)):
plt.text(v+1000, i, str(round(v, 4)), color='steelblue', va="center")
plt.text(v+30000, i, str(i+1), color='black', va="center")
print(df_sorted['Country Name'].head(10))
print(df_sorted['2020'].head(10))
#plt.subplots_adjust(right=0.3)
textstr = 'Created at \nwww.tssfl.com'
#plt.text(0.02, 0.5, textstr, fontsize=14, transform=plt.gcf().transFigure)
plt.gcf().text(0.02, 0.92, textstr, fontsize=14, color='green') # (0,0) is bottom left, (1,1) is top right
plt.xlabel("GDP per Capita (US$)")
plt.ylabel("Country Name")
plt.show()
plt.clf()
df_sorted = df.sort_values('2020',ascending=False)
#Let's plot categorical GDP per capital for top ten countries
plt.figure(figsize=(8,6), tight_layout=True)
sns.barplot(x=df_sorted['Country Name'].head(10), y=df_sorted['2020'],data=df_sorted, color="yellowgreen")
plt.xticks(rotation=90)
plt.title("Countries with Highest GDP per Capita in 2020", y = 1.08)
xlocs, xlabs = plt.xticks()
for i, v in enumerate(df_sorted['2020'].head(10)):
plt.text(xlocs[i] - 0.25, v + 0.05, str(round(v, 4)), color='red', va="center", rotation=45)
plt.gcf().text(0.02, 0.03, textstr, fontsize=14, color='green')
plt.xlabel("Country Name")
plt.ylabel("GDP per Capita (US$)")
plt.show()
plt.clf()
#Let's plot categorical GDP per capital for top ten countries
df_sorted = df.sort_values('2020',ascending=True)
plt.figure(figsize=(8,6), tight_layout=True)
sns.barplot(x=df_sorted['2020'],y=df_sorted['Country Name'].head(10),data=df_sorted, color="cadetblue")
plt.xticks(rotation=90)
plt.title("Countries with Lowest GDP per Capita in 2020")
for i, v in enumerate(df_sorted['2020'].head(10)):
plt.text(v+10, i, str(round(v, 4)), color='teal', va="center")
plt.gcf().text(0.8, 0.85, textstr, fontsize=14, color='green')
plt.xlabel("GDP per Capita (US$)")
plt.ylabel("Country Name")
plt.show()
plt.clf()
df_sorted = df.sort_values('2020',ascending=True)
#Let's plot categorical GDP per capital for top ten countries
plt.figure(figsize=(8,6), tight_layout=True)
sns.barplot(x=df_sorted['Country Name'].head(10), y=df_sorted['2020'],data=df_sorted, color="cadetblue")
plt.xticks(rotation=90)
plt.title("Countries with Lowest GDP per Capita in 2020", y = 1.08)
xlocs, xlabs = plt.xticks()
for i, v in enumerate(df_sorted['2020'].head(10)):
plt.text(xlocs[i] - 0.25, v + 0.5, str(round(v, 4)), color='crimson', va="center", rotation=90)
plt.gcf().text(0.1, 0.1, textstr, fontsize=14, color='green')
plt.xlabel("Country Name")
plt.ylabel("GDP per Capita (US$)")
plt.show()
plt.clf()
df_sorted = df.sort_values('2020',ascending=True)
#Let's plot categorical GDP per capital for top ten countries
plt.figure(figsize=(15,70), tight_layout=True)
sns.barplot(x=df_sorted['2020'],y=df_sorted['Country Name'],data=df_sorted, color="deepskyblue")
plt.xticks(rotation=90)
plt.title("Global GDP per Capita in 2020")
for i, v in enumerate(df_sorted['2020']):
plt.text(v+1000, i, str(round(v, 4)), color='teal', va="center")
plt.text(v+22000, i, str(226-(i+1)), color='black', va="center")
plt.gcf().text(0.55, 0.98, textstr, fontsize=14, color='green')
plt.xlabel("GDP per Capita (US$)")
plt.ylabel("Country Name")
plt.show()
plt.clf()
df_sorted = df.sort_values('2020',ascending=False)
#Let's plot categorical GDP per capital for top ten countries
plt.figure(figsize=(15,70), tight_layout=True)
sns.barplot(x=df_sorted['2020'],y=df_sorted['Country Name'],data=df_sorted, color="deepskyblue")
plt.xticks(rotation=90)
plt.title("Global GDP per Capita in 2020")
for i, v in enumerate(df_sorted['2020']):
plt.text(v+1000, i, str(round(v, 4)), color='teal', va="center")
plt.text(v+22000, i, str(i+1), color='black', va="center")
plt.gcf().text(0.1, 0.99, textstr, fontsize=14, color='green')
plt.xlabel("GDP per Capita (US$)")
plt.ylabel("Country Name")
plt.show()
plt.clf()
df_sorted = df.sort_values('2020',ascending=False)[:225]
#Let's plot categorical GDP per capital for top ten countries
plt.figure(figsize=(15,70), tight_layout=True)
sns.barplot(x=df_sorted['2020'],y=df_sorted['Country Name'],data=df_sorted, color="deepskyblue")
plt.xticks(rotation=90)
plt.title("Global GDP per Capita in 2020")
for i, v in enumerate(df_sorted['2020']):
plt.text(v+1000, i, str(round(v, 4)), color='teal', va="center")
plt.text(v+22000, i, str(i+1), color='black', va="center")
plt.gcf().text(0.1, 0.99, textstr, fontsize=14, color='green')
plt.xlabel("GDP per Capita (US$)")
plt.ylabel("Country Name")
plt.show()
plt.clf()
| TSSFL/Dataset_Archives | barcharts_demo.py | barcharts_demo.py | py | 21,062 | python | en | code | 0 | github-code | 13 |
8775258853 | ch=input()
def check(ch):
if(ch>="a" and ch<="z") or (ch>="A" and ch<="Z"):
if ch in 'aeiou' or ch in "AIEOU":
print("Vowel")
else:
print("Consonent")
else:
print("invalid")
check(ch)
| sinha414tanya/tsinha | vowel_consonent.py | vowel_consonent.py | py | 252 | python | en | code | 1 | github-code | 13 |
13420087231 | import tensorflow as tf
import numpy as np
import os
from datetime import datetime
from numpy import linalg as LA
from convnet import convnet_inference
from resnet_model import resnet_inference
from os import listdir
import pandas as pd
import cifar_input as cifar_data
import my_utils
tf.logging.set_verbosity(tf.logging.WARN)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.reset_default_graph()
try:
FLAGS.activation
except:
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model', 'resnet', '''which model to train: resnet or convnet''')
tf.app.flags.DEFINE_string('activation', 'elu', '''activation function to use: relu or elu''')
tf.app.flags.DEFINE_integer('random_seed', 123, '''input the same random seed used for training models''')
tf.app.flags.DEFINE_boolean('is_tune', False, '''if True, split train dataset (50K) into 45K, 5K as train/validation data. *only use in training models''') # don't change this here
tf.app.flags.DEFINE_float('lr', 0.1, '''doing nothing here''') # don't change this here
tf.app.flags.DEFINE_integer('train_batch_size', 128, '''batch_size''')
tf.app.flags.DEFINE_integer('dataset', 100, '''dataset to evalute''')
tf.app.flags.DEFINE_integer('resnet_layers', 20, '''number of layers to use in ResNet: 56 or 20; if convnet, make it to 3''')
tf.app.flags.DEFINE_boolean('use_L2', False, '''whether to use L2 regularizer ''')
tf.app.flags.DEFINE_integer('version', 2, '''[0: c/t, 1: c/sqrt(t), 2:SGDv1]''')
print ('-'*20 + '\nEvaluations on MU & Theta...\n' + '-'*20)
# Training Parameters
initial_learning_rate = FLAGS.lr
batch_size = FLAGS.train_batch_size
inference = resnet_inference if FLAGS.model == 'resnet'else convnet_inference
# tf Graph input
X = tf.placeholder(tf.float32, [batch_size, 32, 32, 3])
Y = tf.placeholder(tf.float32, [batch_size,])
phase_train = tf.placeholder(tf.bool, name='phase_train')
# do inference
logits = inference(X, num_classes=FLAGS.dataset, num_layers=FLAGS.resnet_layers, activations=FLAGS.activation, phase_train=phase_train) # when resnet you need to pass number of layers
# Define loss and optimizer
W = [var for var in tf.trainable_variables ()]
loss_op = my_utils.cross_entropy_loss_with_l2(logits, Y, W, use_L2=FLAGS.use_L2)
# Call: gradients
grads = tf.gradients(loss_op, W)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5000)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False))
sess.run(init)
# we don't need to load data every in the current session
cifar_data_dir = './cifar%d_data/raw_data_C%d.npy'%(FLAGS.dataset, FLAGS.dataset)
cifar_label_dir = './cifar%d_data/raw_label_C%d.npy'%(FLAGS.dataset, FLAGS.dataset)
if os.path.isfile(cifar_data_dir) and os.path.isfile(cifar_label_dir):
raw_data = np.load(cifar_data_dir)
raw_label = np.load(cifar_label_dir)
else:
(raw_data, raw_label), (test_data, test_labels) = cifar_data.load_data(FLAGS.dataset, FLAGS.is_tune)
np.save('./cifar%d_data/raw_data_C%d.npy'%(FLAGS.dataset, FLAGS.dataset), raw_data)
np.save('./cifar%d_data/raw_label_C%d.npy'%(FLAGS.dataset, FLAGS.dataset), raw_label)
print ('load dataset: [CIFAR%d]'%FLAGS.dataset)
num_batches = raw_data.shape[0]//batch_size
# read all models
random_seed = FLAGS.random_seed
checkpoint_dir = '../ImprovedICLR_v2/stagewise_sgd/models_%s-%d_v%d_%s_L2_%s/C%d/exp_%d/'%(FLAGS.model, FLAGS.resnet_layers, FLAGS.version, FLAGS.activation, str(FLAGS.use_L2), FLAGS.dataset, FLAGS.random_seed)
checkpoint_dir = './models_%s-%d_v%d_%s_L2_%s/C%d/exp_%d/'%(FLAGS.model, FLAGS.resnet_layers, FLAGS.version, FLAGS.activation, str(FLAGS.use_L2), FLAGS.dataset, FLAGS.random_seed)
model_dir = [checkpoint_dir + f.split('.data')[0] for f in listdir(checkpoint_dir) if 'data-' in f ] #and '120000' in f]
model_dir = my_utils.natural_sort(model_dir)
mode_optiomal_dir = model_dir[-1]
mode_optiomal_dir = [m for m in model_dir if '200000' in m ][0]
model_dir = model_dir[:-1]
# get W optimal
saver.restore(sess, mode_optiomal_dir)
load_iter = int(mode_optiomal_dir.split('-')[-1])
W_opt = sess.run(W)
print ('W optimal: %.5f'%(W_opt[0].sum()))
loss_W_optimal = []
for n in range(num_batches):
offset = (n) * batch_size
print ('\rmodel-[%d]-batch-[%d]'%(load_iter, n), end='\r')
train_batch_data = raw_data[offset:offset+batch_size, ...]
train_batch_labels = raw_label[offset:offset+batch_size]
feed_dict = {X: train_batch_data, Y:train_batch_labels, phase_train:False}
loss_w_optimal_n = sess.run(loss_op, feed_dict)
loss_W_optimal.append(loss_w_optimal_n)
loss_opt = np.mean(loss_W_optimal) # 0.00001 #
print('model*-[%d]-optimal_loss: %.5f\n'%(load_iter, loss_opt))
save_csv = []
log_iter = []
log_ratio = []
log_mu = []
for idx, model__ in enumerate(model_dir):
load_iter = int(model__.split('-')[-1])
# uncomment the below lines if you want to check less number of points
#if load_iter not in [10000, 20000, 30000, 40000, 50000, 60000, 70000, 80000, 90000]:
# continue
saver.restore(sess, model__)
W_t = sess.run(W)
ratio, loss_W_current = [] ,[]
mean_grad_sum = [np.zeros((w.shape.as_list())) for w in W]
for n in range(num_batches):
offset = (n) * batch_size
print ('\rmodel-[%d]-batch-[%d]'%(load_iter, n), end='\r')
train_batch_data = raw_data[offset:offset+batch_size, ...]
train_batch_labels = raw_label[offset:offset+batch_size]
feed_dict = {X: train_batch_data, Y:train_batch_labels, phase_train:False}
grads_n = sess.run(grads, feed_dict)
loss_n = sess.run(loss_op, feed_dict)
# compute mean gradients for each layer (divide by "N"(#batches) after for loop)
mean_grad_sum = my_utils.cumulative_sum(mean_grad_sum, grads_n)
# save resluts
loss_W_current.append(loss_n)
loss_t = np.mean(loss_W_current)
loss_l2_square_t = np.sum([np.square(LA.norm(g_mean/num_batches)) for g_mean in mean_grad_sum ])
ratio_t = np.sum([np.inner(g.flatten()/num_batches, (w_t - w_opt).flatten()) for g, w_t, w_opt in zip(mean_grad_sum, W_t, W_opt) ])
# compute theta
pl_i = (loss_l2_square_t)/(loss_t - loss_opt)
ratio_i = ratio_t/(loss_t - loss_opt)
# compute mu
w_diff_norm_square = np.sum([(np.square(LA.norm(w_t - w_opt))) for w_t, w_opt in zip(W_t, W_opt)])
estimated_mu = (loss_t - loss_opt)/(w_diff_norm_square*2)
result_outptus = ('model-[%d]-PL:, %.5f, Ratio:, %.5f, grads_l2:%.5f, loss_t: %.5f, mu:, %.5f '%( load_iter, pl_i, ratio_i, loss_l2_square_t, loss_t, estimated_mu))
print(result_outptus)
log_iter.append(load_iter)
log_ratio.append(ratio_i)
log_mu.append(estimated_mu)
df = pd.DataFrame(data={'model':log_iter, 'Ratio':log_ratio, 'mu':log_mu})
if not os.path.exists('./logs_eval/'):
os.makedirs('./logs_eval/')
df.to_csv('./logs_eval/%s-%d-v%d_%s_C%d_theta_mu_use_L2_%s_exp_%d.csv'%(FLAGS.model, FLAGS.resnet_layers, FLAGS.version, FLAGS.activation, FLAGS.dataset, str(FLAGS.use_L2), FLAGS.random_seed))
| yzhuoning/StagewiseSGD | eval_compute_theta_mu.py | eval_compute_theta_mu.py | py | 7,415 | python | en | code | 3 | github-code | 13 |
20884705523 | import os
from discord.ext import commands
import discord
'''Handles the voice state updates logger for moderation purposes.
Built with Love <3 by Afnan for the Piano Planet Discord Server.'''
# Dictionary of Guild IDs and their corresponding logs channel IDs
# Used to route the logs to the correct logging channel
# Format:
# {Guild_Id : Logs_Channel_Id}
VC_LOGS_CHANNEL_IDS = {1012056613798019092:1066056878242680935, # Oofnan's Bot Playground
686016539094417478:849320334641463356, # Piano-Planet-Staging
}
class voiceChannelsLogger(commands.Cog):
def __init__(self, bot) -> None:
self.bot = bot
@commands.Cog.listener()
async def on_voice_state_update(self, member, before, after):
## Sending info to the logs channel
# Getting the logs channel for that server
try:
logs_channel_id = VC_LOGS_CHANNEL_IDS[member.guild.id]
except KeyError:
print("Logging Channel not found for this guild. Please set it up.")
return
# Getting the logs channel object
logs_channel = self.bot.get_channel(logs_channel_id) # Is this inefficient? Getting the channel every time?
# Getting the timestamp
utc_time_now = discord.utils.utcnow()
the_time_now = discord.utils.format_dt(utc_time_now, style='T') # Formatting the datetime
# bulk_insert = '' # here or global?
if after.channel and before.channel:
# Avoiding mute/unmute/deafen/undeafen stuff
if before.channel.id == after.channel.id:
return
# Moving between channels
send_msg = f"[{the_time_now}] {member.mention} has moved from {before.channel.mention} to {after.channel.mention}"
await logs_channel.send(send_msg)
elif after.channel is None and before.channel:
# Leaving the vc
send_msg = f"[{the_time_now}] {member.mention} has left {before.channel.mention}"
await logs_channel.send(send_msg)
elif after.channel:
# Joining the vc
send_msg = f"[{the_time_now}] {member.mention} has joined {after.channel.mention}"
await logs_channel.send(send_msg)
# For DEBUGGING:
print("A VC Log message was sent.")
## Need to implement a cooldown here:
# bulk_insert = bulk_insert + '\n' + send_msg
#1 Check for a cooldown - Otherwise, the Bot might get flagged for spamming
#2 send the bulk insert and reset it
#3 or send a single message
async def setup(bot):
await bot.add_cog(voiceChannelsLogger(bot)) | Sayed-Afnan-Khazi/My-First-Discord-Bot | ext/voicelogger.py | voicelogger.py | py | 2,660 | python | en | code | 0 | github-code | 13 |
22757618115 | # head, eyes, spine, legs, arms
template = """|------
| |
|
|
|
|
|
--------"""
template1 = """|------
| |
| ( )
|
|
|
|
--------"""
template2 = """|------
| |
| (ยฐ ยฐ)
|
|
|
|
--------"""
template3 = """|------
| |
| (ยฐ ยฐ)
| |
| |
|
|
--------"""
template4 = """|------
| |
| (ยฐ ยฐ)
| |
| |
| / \
|
--------"""
template_failed = """|------
| |
| (ยฐ ยฐ)
| __|__
| |
| / \
|
--------"""
topic = input("""Player one, what is the topic?
""")
word = input("""What is the word?
""")
print("""
""")
tries = 1
while tries < 6:
print(f"The topic is: {topic}")
attempt = input(f"What is guess #{tries}? ")
if attempt.lower() == word:
print("That is correct!")
break
elif tries < 5:
if tries == 1:
print(template1)
if tries == 2:
print(template2)
if tries == 3:
print(template3)
if tries == 4:
print(template4)
else:
print(template_failed)
print(f"Failed. The correct word was {word}. Try again next time!")
tries += 1
| SwyftAx/Hangman | hangman.py | hangman.py | py | 1,133 | python | en | code | 0 | github-code | 13 |
13611822367 | """
Exercรญcio
Crie uma funรงรฃo que encontra o primeiro duplicado considerando o segundo
nรบmero como a duplicaรงรฃo. Retorne a duplicaรงรฃo considerada.
Requisitos:
A ordem do nรบmero duplicado รฉ considerada a partir da segunda
ocorrรชncia do nรบmero, ou seja, o nรบmero duplicado em si.
Exemplo:
[1, 2, 3, ->3<-, 2, 1] -> 1, 2 e 3 sรฃo duplicados (retorne 3)
[1, 2, 3, 4, 5, 6] -> Retorne -1 (nรฃo tem duplicados)
[1, 4, 9, 8, ->9<-, 4, 8] (retorne 9)
Se nรฃo encontrar duplicados na lista, retorne -1
"""
lista_de_listas_de_inteiros = [
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[9, 1, 8, 9, 9, 7, 2, 1, 6, 8],
[1, 3, 2, 2, 8, 6, 5, 9, 6, 7],
[3, 8, 2, 8, 6, 7, 7, 3, 1, 9],
[4, 8, 8, 8, 5, 1, 10, 3, 1, 7],
[1, 3, 7, 2, 2, 1, 5, 1, 9, 9],
[10, 2, 2, 1, 3, 5, 10, 5, 10, 1],
[1, 6, 1, 5, 1, 1, 1, 4, 7, 3],
[1, 3, 7, 1, 10, 5, 9, 2, 5, 7],
[4, 7, 6, 5, 2, 9, 2, 1, 2, 1],
[5, 3, 1, 8, 5, 7, 1, 8, 8, 7],
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
]
def primeiro_duplicado_tosco(lista):
posicao_duplicado = -1
for i, valor_i in enumerate(lista):
if i == posicao_duplicado:
break
j = i + 1
while j < len(lista):
if(i < j and valor_i== lista[j]):
if(posicao_duplicado == -1 or posicao_duplicado > j):
posicao_duplicado = j
break
if(j == posicao_duplicado):
break
j += 1
if posicao_duplicado > 0:
return lista[posicao_duplicado]
else:
return posicao_duplicado
def primeiro_duplicado(lista):
numeros_checados = set()
primeiro_duplicado = -1
for numero in lista:
if numero in numeros_checados:
primeiro_duplicado = numero
break
numeros_checados.add(numero)
return primeiro_duplicado
for lista in lista_de_listas_de_inteiros:
print(primeiro_duplicado_tosco(lista))
| marcosab10/python | curso/exercicio_listas.py | exercicio_listas.py | py | 1,993 | python | pt | code | 0 | github-code | 13 |
15499656392 | #!/usr/bin/python
from math import sqrt
users = {"Angelica": {"Blues Traveler": 3.5, "Broken Bells": 2.0,
"Norah Jones": 4.5, "Phoenix": 5.0,
"Slightly Stoopid": 1.5,
"The Strokes": 2.5, "Vampire Weekend": 2.0},
"Bill": {"Blues Traveler": 2.0, "Broken Bells": 3.5,
"Deadmau5": 4.0, "Phoenix": 2.0,
"Slightly Stoopid": 3.5, "Vampire Weekend": 3.0},
"Chan": {"Blues Traveler": 5.0, "Broken Bells": 1.0,
"Deadmau5": 1.0, "Norah Jones": 3.0,
"Phoenix": 5, "Slightly Stoopid": 1.0},
"Dan": {"Blues Traveler": 3.0, "Broken Bells": 4.0,
"Deadmau5": 4.5, "Phoenix": 3.0,
"Slightly Stoopid": 4.5, "The Strokes": 4.0,
"Vampire Weekend": 2.0},
"Hailey": {"Broken Bells": 4.0, "Deadmau5": 1.0,
"Norah Jones": 4.0, "The Strokes": 4.0,
"Vampire Weekend": 1.0},
"Jordyn": {"Broken Bells": 4.5, "Deadmau5": 4.0, "Norah Jones": 5.0,
"Phoenix": 5.0, "Slightly Stoopid": 4.5,
"The Strokes": 4.0, "Vampire Weekend": 4.0},
"Sam": {"Blues Traveler": 5.0, "Broken Bells": 2.0,
"Norah Jones": 3.0, "Phoenix": 5.0,
"Slightly Stoopid": 4.0, "The Strokes": 5.0},
"Veronica": {"Blues Traveler": 3.0, "Norah Jones": 5.0,
"Phoenix": 4.0, "Slightly Stoopid": 2.5,
"The Strokes": 3.0}}
""" give it data, which neighbor to use, which calculation to use, and
the number of recommendations it should make"""
class recommender:
def __init__(self, data, k=1, metric='pearson', n=5):
self.k = k
self.n = n
self.data = data
self.username2id = {}
self.productid2name = {}
self.metric = metric
if self.metric == 'pearson':
self.fn = self.pearson
#willl have to rewrite this part for our data
def convertProudctID2name(self, id):
# shoot in the product number, gives out the product name, should be easy enough
if id in self.productid2name:
return self.productid2name[id]
else:
return id
def pearson(self, rating1, rating2):
""" my implementation of the pearson correlation formula wiki that shit if you forget what it is"""
# numerator stuff
xy = 0
x = 0
y = 0
n = 0
xx = 0
yy = 0
for key in rating1:
if key in rating2:
x = x + rating1[key]
y = y + rating2[key]
yy = yy + (rating2[key] ** 2)
xx = xx + (rating1[key] ** 2)
n = n + 1
xy = xy + (rating1[key] * rating2[key])
avgxy = (x * y) / n
numerator = xy - avgxy
denomenator = sqrt(abs(xx - ((x**2)/n))) * sqrt(abs(yy - (y**2)/n))
if denomenator == 0:
return 0
else:
return numerator/denomenator
def computeNearestNeighbor(self, username):
""" what user is most like, or closest to the other neighbors"""
distances = []
for instance in self.data:
if instance != username:
distance = self.fn(self.data[username], self.data[instance])
distances.append((instance, distance))
# need to figure out what this is really doing. why aren't we just sorting how we did last time?
distances.sort(key=lambda beerTuple: beerTuple[1], reverse=True)
return distances
def recommend(self, user):
""" shoot me the recommendations mothafucka!"""
recommendations = {}
#get the nearest neighbors + ratings
nearest = self.computeNearestNeighbor(user)
userRatings = self.data[user]
totalDistance = 0.0
#this part will take the range, k, right now is 1, and do this for the 0 to kth user
for i in range(self.k):
totalDistance += nearest[i][1]
for i in range(self.k):
weight = nearest[i][1] / totalDistance
name = nearest[i][0]
neighborRatings = self.data[name]
# now find what the neighbor rated, but the user didnt
for beer in neighborRatings:
if not beer in userRatings:
if beer not in recommendations:
recommendations[beer] = neighborRatings[beer] * weight
# make a list from this dictionary
recommendations = list(recommendations.items())
recommendations = [(self.convertProudctID2name(k), v) for (k, v) in recommendations]
recommendations.sort(key=lambda beerTuple: beerTuple[1], reverse=True)
return recommendations[:self.n]
r = recommender(users)
print(r.recommend('Jordyn'))
print(r.recommend('Hailey'))
| erikmingo/beerbuddy | recommender.py | recommender.py | py | 4,833 | python | en | code | 1 | github-code | 13 |
73582678417 | from .todo_server import todo_server, mocked_todo_server
from .server_responses import AllTasksServerResponse, Task, TaskServerResponse
from reports import models
class __ReportsManager:
def save_task(self, response_or_task: TaskServerResponse | Task) -> models.CompletedTaskReport | models.PendingTaskReport | None:
task: Task | None
if isinstance(response_or_task, TaskServerResponse):
task = response_or_task.content
elif isinstance(response_or_task, Task):
task = response_or_task
else:
return None
if task is None:
return None
if task.is_completed:
c_model = models.CompletedTaskReport.objects.create(task_pk=task.id,
task_created_at=task.created_at_as_datetime(),
task_completed_at=task.completed_at_as_datetime())
return c_model
p_model = models.PendingTaskReport.objects.create(task_pk=task.id,
task_created_at=task.created_at_as_datetime())
return p_model
def save_tasks(self, response_or_tasks: AllTasksServerResponse | list[Task]) -> list[models.CompletedTaskReport | models.PendingTaskReport] | None:
tasks: list[Task] | None
if isinstance(response_or_tasks, AllTasksServerResponse):
tasks = response_or_tasks.content
elif isinstance(response_or_tasks, list):
tasks = response_or_tasks
else:
return None
if tasks is None:
return None
models_list: list[models.CompletedTaskReport | models.PendingTaskReport] = []
for task in tasks:
task_model = self.save_task(task)
if task_model is None:
continue
models_list.append(task_model)
return models_list
def count_tasks(self) -> models.TasksCounterReport | None:
try:
self.reset_task_count()
completed_tasks = self.get_all_completed_tasks()
pending_tasks = self.get_all_pending_tasks()
completed_count = completed_tasks.count()
pending_count = pending_tasks.count()
task_count = completed_count + pending_count
c_model = models.TasksCounterReport.objects.create(task_count=task_count,
completed_count=completed_count,
pending_count=pending_count)
c_model.save()
for c_task in completed_tasks:
c_model.completed_tasks.add(c_task.pk)
for p_task in pending_tasks:
c_model.pending_tasks.add(p_task.pk)
c_model.save()
return c_model
except Exception as e:
print(f'Failed to count tasks: {e}')
return None
def reset_task_count(self) -> bool:
if models.TasksCounterReport.objects.all().first() is not None:
models.TasksCounterReport.objects.all().delete()
if models.TasksCounterReport.objects.all().first() is not None:
for t_counter in models.TasksCounterReport.objects.all():
t_counter.delete()
counter_cleared = models.TasksCounterReport.objects.all().first() is None
return counter_cleared
def reset_database(self) -> bool:
if models.CompletedTaskReport.objects.all().first() is not None:
models.CompletedTaskReport.objects.all().delete()
if models.CompletedTaskReport.objects.all().first() is not None:
for c_task in models.CompletedTaskReport.objects.all():
c_task.delete()
if models.PendingTaskReport.objects.all().first() is not None:
models.PendingTaskReport.objects.all().delete()
if models.PendingTaskReport.objects.all().first() is not None:
for p_task in models.PendingTaskReport.objects.all():
p_task.delete()
completed_cleared = models.CompletedTaskReport.objects.all().first() is None
pending_cleared = models.PendingTaskReport.objects.all().first() is None
counter_cleared = self.reset_task_count()
return completed_cleared and pending_cleared and counter_cleared
def refresh_database(self, response: TaskServerResponse | AllTasksServerResponse) -> bool:
database_reset = self.reset_database()
if not database_reset:
print('Failed to reset database')
tasks_created: bool = False
if isinstance(response, TaskServerResponse):
task = self.save_task(response)
tasks_created = task is not None
elif isinstance(response, AllTasksServerResponse):
tasks = self.save_tasks(response)
tasks_created = tasks is not None
else:
return False
if not tasks_created:
print('Failed to create tasks')
count = self.count_tasks()
success_counting_tasks = count is not None
if not success_counting_tasks:
print('Failed to count tasks')
return database_reset and tasks_created and success_counting_tasks
def populate_database(self) -> bool:
tasks = todo_server.get_all_tasks()
if tasks is None:
return False
return self.refresh_database(tasks)
def populate_database_with_mocked_data(self) -> bool:
mocked_tasks = mocked_todo_server.get_all_tasks()
if mocked_tasks is None:
return False
return self.refresh_database(mocked_tasks)
def get_all_completed_tasks(self):
return models.CompletedTaskReport.objects.all()
def get_all_pending_tasks(self):
return models.PendingTaskReport.objects.all()
def get_count(self):
return models.TasksCounterReport.objects.last()
reports_manager = __ReportsManager()
| TR0NZ0D/Distributed-System-Task-Server | ReportsServer/templates/utils/reports_manager.py | reports_manager.py | py | 6,085 | python | en | code | 1 | github-code | 13 |
27151768084 | from sqlite3 import PrepareProtocol
from django.test import TestCase, Client
from django.contrib.auth.models import User
from issue.views import filter_issues
from label.models import Label
from repository.models import Repository
from django.urls import reverse
from issue.models import Issue as Iss
from milestone.models import Milestone
from datetime import date
from project.models import Project
from pullrequest.models import Pullrequest
from history.models import History
class Issue(TestCase):
def setUp(self):
user = User.objects.create(id = 1,username='testuser1')
user2 = User.objects.create(id = 2,username='testuser2')
user2.set_password('testuser2')
user.set_password('testuser1')
user2.save()
user.save()
client = Client()
client.login(username='testuser1', password='testuser1')
# create repository
repository = Repository.objects.create(id= 1, name='Repo1', status='public', creator = user)
repository2 = Repository.objects.create(id= 2, name='Repo2', status='public', creator = user)
repository2.save()
repository.save()
# labels
l1 = Label.objects.create(name ="plava", description = "for feature")
l2 = Label.objects.create(name ="crvena", description = "for bug")
l1.save()
l2.save()
# add collaborators
collaborator1 = User.objects.create(id=3, username='collaborator1')
repository.developers.add(collaborator1)
collaborator2 = User.objects.create(id=4, username='collaborator2')
repository.developers.add(collaborator2)
repository.developers.add(user)
# create milestone
m1 = Milestone.objects.create(id = 1, title = 'Milestone1', description = 'first milestone', status = 'Opened', created=date.today(), due_date=date.today(), repository = repository)
m2 = Milestone.objects.create(id = 2, title = 'Milestone2', description = 'second milestone', status = 'Opened', created=date.today(), due_date=date.today(), repository = repository)
m1.save()
m2.save()
# create projects
p1 = Project.objects.create(id = 1, name = 'project1', repository = repository)
p2 = Project.objects.create(id = 2, name = 'project2', repository = repository)
p1.save()
p2.save()
# create pullrequests
pr1= Pullrequest.objects.create(id = 1, prRepository = repository)
pr2 = Pullrequest.objects.create(id = 2, prRepository = repository)
pr1.save()
pr2.save()
#create issue
is1 = Iss.objects.create(id=11, issue_title='Issue1', milestone = m1,description=" issue for authors", state = "Opened", opened_by = user, repository=repository)
is1.assignees.add(user)
is1.projects.add(p1)
is1.labels.add(l1)
is1.labels.add(l2)
is2 = Iss.objects.create(id=12, issue_title='Issue2', milestone = m2,description=" issue for projects", state = "Opened", opened_by = user, repository=repository)
is2.assignees.add(user)
is2.projects.add(p1)
is2.labels.add(l1)
is3 = Iss.objects.create(id=13, issue_title='task1', milestone = m2,description=" issue for labels", state = "Opened", opened_by = user2, repository=repository)
is3.assignees.add(user)
is3.projects.add(p2)
is3.labels.add(l1)
is4 = Iss.objects.create(id=14, issue_title='task2', milestone = m1,description=" issue for state", state = "Closed", opened_by = user, repository=repository)
is4.assignees.add(user)
is4.projects.add(p2)
is4.labels.add(l2)
def test_filter_by_title(self):
is1 = Iss.objects.get(id = 11)
is1 = Iss.objects.get(id = 12)
is1 = Iss.objects.get(id = 13)
is1 = Iss.objects.get(id = 14)
repository = Repository.objects.get(id = 1)
data = {}
response = self.client.post(reverse('filter_issues', kwargs={'repo_id': repository.id, 'pk':"title:Issue1"}), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('issues' in response.context)
self.assertEqual(len(response.context['issues']), 1)
def test_filter_by_title_or_body(self):
is1 = Iss.objects.get(id = 11)
is1 = Iss.objects.get(id = 12)
is1 = Iss.objects.get(id = 13)
is1 = Iss.objects.get(id = 14)
repository = Repository.objects.get(id = 1)
data = {}
response = self.client.post(reverse('filter_issues', kwargs={'repo_id': repository.id, 'pk':"issue"}), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('issues' in response.context)
self.assertEqual(len(response.context['issues']), 4)
def test_filter_by_project(self):
is1 = Iss.objects.get(id = 11)
is1 = Iss.objects.get(id = 12)
is1 = Iss.objects.get(id = 13)
is1 = Iss.objects.get(id = 14)
repository = Repository.objects.get(id = 1)
data = {}
response = self.client.post(reverse('filter_issues', kwargs={'repo_id': repository.id, 'pk':"project:project1"}), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('issues' in response.context)
self.assertEqual(len(response.context['issues']), 2)
def test_filter_by_assigned(self):
is1 = Iss.objects.get(id = 11)
is1 = Iss.objects.get(id = 12)
is1 = Iss.objects.get(id = 13)
is1 = Iss.objects.get(id = 14)
repository = Repository.objects.get(id = 1)
data = {}
response = self.client.post(reverse('filter_issues', kwargs={'repo_id': repository.id, 'pk':"assigned:testuser1"}), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('issues' in response.context)
self.assertEqual(len(response.context['issues']), 4)
def test_filter_by_label(self):
is1 = Iss.objects.get(id = 11)
is1 = Iss.objects.get(id = 12)
is1 = Iss.objects.get(id = 13)
is1 = Iss.objects.get(id = 14)
repository = Repository.objects.get(id = 1)
data = {}
response = self.client.post(reverse('filter_issues', kwargs={'repo_id': repository.id, 'pk':"label:crvena"}), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('issues' in response.context)
self.assertEqual(len(response.context['issues']), 2)
def test_add_issue(self):
client = Client()
client.login(username='testuser1', password='testuser1')
# get collaborators
assignees = []
assignees.append(User.objects.get(id=1).username)
assignees.append(User.objects.get(id=2).username)
# get projects
projects_ids = []
projects = Project.objects.all()
for proj in projects:
projects_ids.append(proj.id)
# get pullrequeests
pullrequests_ids = []
pullrequests = Pullrequest.objects.all()
# get milestone
milestone_id = []
milestone_id = Milestone.objects.all()
for pr in pullrequests:
pullrequests_ids.append(pr.id)
data = {
'title': 'Issue1',
'description':'first issue',
'repository': 1,
'milestone_id': milestone_id[0].id,
'developers': assignees,
'projects_ids': projects_ids,
'pullrequests_ids': pullrequests_ids
}
response = client.post(reverse('add_issue'), data, follow=True)
self.assertEqual(response.status_code, 200)
def test_view_issue(self):
issue = Iss.objects.get(issue_title='Issue1')
data = {}
response = self.client.post(reverse('view_issue', kwargs={'id': issue.id}), data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTrue('milestones' in response.context)
self.assertTrue('developers' in response.context)
self.assertTrue('projects' in response.context)
self.assertTrue('pullrequests' in response.context)
self.assertEqual(len(response.context['milestones']), 2)
self.assertEqual(len(response.context['developers']), 3)
self.assertEqual(len(response.context['projects']), 2)
self.assertEqual(len(response.context['pullrequests']), 2)
def test_all_issues(self):
data = {}
response = self.client.post(reverse('all_issues'), data, follow=True)
self.assertEqual(response.status_code, 200)
def test_all_issues_by_repository(self):
data = {}
response = self.client.post(reverse('issues', kwargs={'id': 1}), data, follow=True)
self.assertEqual(response.status_code, 200)
def test_update_issue(self):
user = User.objects.create(username='testuser')
user.set_password('12345')
user.save()
client = Client()
client.login(username='testuser', password='12345')
issue = Iss.objects.get(id = 11)
issue.issue_title = 'UpdatedTitle'
issue.description = 'UpdatedDescription'
# get collaborators
assignees = []
assignees.append(User.objects.get(id=1).username)
# get projects
projects_ids = []
projects = Project.objects.all()
for proj in projects:
projects_ids.append(proj.id)
# get pullrequeests
pullrequests_ids = []
pullrequests = Pullrequest.objects.all()
# get milestone
milestone_id = []
milestone_id = Milestone.objects.all()
for pr in pullrequests:
pullrequests_ids.append(pr.id)
data = {
'title': issue.issue_title,
'description': issue.description,
'state': 'Close',
'milestone_id': milestone_id[0].id,
'developers': assignees,
'projects_ids': projects_ids,
'pullrequests_ids': pullrequests_ids
}
response = client.post(reverse('update_issue', kwargs={'id': issue.id}),data, follow=True)
self.assertEqual(response.status_code, 200)
def test_delete_issue(self):
issue = Iss.objects.get(id = 11)
data = {}
response = self.client.post(reverse('delete_issue', kwargs={'id': issue.id}),data, follow=True)
self.assertEqual(response.status_code, 200)
| marijamilanovic/UksGitHub | Uks/issue/tests/test_views.py | test_views.py | py | 10,482 | python | en | code | 0 | github-code | 13 |
10589093766 | """ Testing admin stuff
"""
import os
import re
import sys
import warnings
import pytest
import country_converter as coco # noqa
TESTPATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(TESTPATH, ".."))
CHANGELOG_FILE = os.path.join(TESTPATH, "..", "CHANGELOG.md")
def test_version_consistency():
"""Test CHANGELOG.md latest version consistency with module version"""
# Assumption: version info is in a header line (starting with #)
# We capture the version info in the second group
version_match = re.compile(r"(#*.*)(\d+\.\d+\.\d+[a-zA-Z0-9_.]*)")
with open(CHANGELOG_FILE, "r") as cf:
for line in cf:
pot_match = re.match(version_match, line)
if pot_match:
version_changelog = pot_match.groups()[1]
break
else:
raise ValueError("No version information found in the CHANGELOG file")
assert (
coco.__version__ == version_changelog
), f"Version module({coco.__version__}) - CHANGELOG.rst do not match({version_changelog})"
| IndEcol/country_converter | tests/test_admin.py | test_admin.py | py | 1,077 | python | en | code | 188 | github-code | 13 |
4776614198 | #functions
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization, Activation
from keras.optimizers import Adam
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import load_model
#read fasta
def read_fasta(fa):
name, seq = None, []
for line in fa:
line = line.strip()
if line.startswith(">"):
if name: yield(name, ''.join(seq))
name, seq = line, []
else:
seq.append(line)
if name: yield (name, ''.join(seq))
#count kmer
def countoverlap(seq,kmer):
return len([1 for i in range(len(seq)) if seq.startswith(kmer,i)])
#get the kmer
def get_kmer(seq):
ntarr = ("A","C","G","T")
kmerArray = []
kmerre = []
rst = []
fst = 0
total = 0.0
pp = 0.0
item = 0.0
for n in range(4):
kmerArray.append(ntarr[n])
for n in range(4):
str1 = ntarr[n]
for m in range(4):
str2 = str1 + ntarr[m]
kmerArray.append(str2)
#############################################
for n in range(4):
str1 = ntarr[n]
for m in range(4):
str2 = str1 + ntarr[m]
for x in range(4):
str3 = str2 + ntarr[x]
kmerArray.append(str3)
#############################################
#change this part for 3mer or 4mer
for n in range(4):
str1 = ntarr[n]
for m in range(4):
str2 = str1 + ntarr[m]
for x in range(4):
str3 = str2 + ntarr[x]
for y in range(4):
str4 = str3 + ntarr[y]
kmerArray.append(str4)
############################################
for i in ntarr:
kmerre.append(i)
for m in kmerArray:
st = i + m
kmerre.append(st)
############################################
#get the second part of features
for n in range(len(kmerre)):
item = countoverlap(seq,kmerre[n])
total = total + item
rst.append(item)
sub_seq = []
if seq.startswith("T"):
sub_seq.append(seq[0:1])
sub_seq.append(seq[0:2])
sub_seq.append(seq[0:3])
sub_seq.append(seq[0:4])
sub_seq.append(seq[0:5])
if seq[9:10] == "A":
sub_seq.append(seq[9:10])
sub_seq.append(seq[8:10])
sub_seq.append(seq[7:10])
sub_seq.append(seq[6:10])
sub_seq.append(seq[5:10])
sub_seq.append(seq[9:11])
sub_seq.append(seq[9:12])
sub_seq.append(seq[9:13])
sub_seq.append(seq[9:14])
for i in sub_seq:
if "N" not in i:
inx = kmerre.index(i)
rst[inx] += 1
for n in range(len(rst)):
rst[n] = rst[n]/total
return rst
#prediction
def prediction(dat, sp):
if sp == 1:
model = load_model('Ele_piRNN.h5')
elif sp == 2:
model = load_model('Dro_piRNN.h5')
elif sp == 3:
model = load_model('Rat_piRNN.h5')
elif sp == 4:
model = load_model('Hum_piRNN.h5')
Y = model.predict_classes(dat, verbose = 0)
return(Y)
#output
def output(Y_pre, ids, dics):
new_dict = {}
for i in range(len(Y_pre)):
if Y_pre[i] == 1:
new_dict[ids[i]] = dics[ids[i]]
return(new_dict)
| bioinfolabmu/piRNN | functions.py | functions.py | py | 2,785 | python | en | code | 2 | github-code | 13 |
39243164559 | # -*- coding: utf-8 -*-
# python้ป่ฎค็ๆๅคง้ๅฝๆทฑๅบฆไธบ998
# ่ฟ้ๆไปฌๅฏไปฅ่ชๅฎไนๆๅคง้ๅฝๆทฑๅบฆ
import sys
sys.setrecursionlimit(30000)
class Solution:
def NumberOf1Between1AndN_Solution(self, n):
# write code here
def sub_count(f,i):
if i>n:
return f
count=self.count_1(i)
f+=count
return sub_count(f,i+1)
i=1;f=0
return sub_count(f,i)
def count_1(self,a):
count=0
while a>0:
if a % 10==1:
count+=1
a=a//10
if a==1: count+=1
return count
if __name__=="__main__":
s=Solution()
print(s.NumberOf1Between1AndN_Solution(10000)) | RellRex/Sword-for-offer-with-python-2.7 | test31_ๆดๆฐไธญ1ๅบ็ฐ็ๆฌกๆฐ.py | test31_ๆดๆฐไธญ1ๅบ็ฐ็ๆฌกๆฐ.py | py | 775 | python | en | code | 2 | github-code | 13 |
38072030018 | #####################################################################################################
#
# top level jobOptions to run Muon chains in the RTT or standalone
# sets some global variables that adjust the execution of TrigInDetValidation_RTT_Common.py
#
# Jiri.Masik@manchester.ac.uk
#
#####################################################################################################
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
#set athenaCommonFlags.FilesInput to be able to use this job options standalone without RTT
#secondSet of files can be activated by the if statement below
#if athenaCommonFlags.FilesInput()==[]:
# athenaCommonFlags.FilesInput=[
# "root://eosatlas//eos/atlas/atlascerngroupdisk/proj-sit/trigindet/mc15_13TeV.361107.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Zmumu.recon.RDO.e3601_s2576_s2132_r7143/RDO.06718162._000013.pool.root.1"
# ]
###XMLDataSet='TrigInDetValidation_mu_single_mu_100' # <-- RTT jobID
#from AthenaCommon.AppMgr import release_metadata
#d = release_metadata()
##TestMonTool.releaseMetaData = d['nightly name'] + " " + d['nightly release'] + " " + d['date'] + " " + d['platform'] + " " + d['release']
#print d['nightly name']
#if d['nightly name']=='20.1.X.Y.Z-VAL-TrigMC' or d['nightly name']=='20.X.Y-VAL' or d['nightly name']=='21.X.Y' or d['nightly name']=='20.7.X-VAL' or '20.7.3.Y-VAL' in d['nightly name'] or '20.7.4.Y-VAL' in d['nightly name'] :
# print '***JK This is a realease with FTK, will include chains '
#else:
# print '***JK This release does not include FTK, will set doFTK=False'
# doFTK=False
include("TrigInDetValidation/TrigInDetValidation_RTT_Chains.py")
rID=False
if 'doIDNewTracking' in dir() and doIDNewTracking==True:
rID = True
rFTK=False
if 'doFTK' in dir() and doFTK==True:
from TriggerJobOpts.TriggerFlags import TriggerFlags
TriggerFlags.doFTK=True
rFTK=True
(idtrigChainlist, tidaAnalysischains) = muonChains(rID,rFTK)
def resetSigs():
TriggerFlags.Slices_all_setOff()
TriggerFlags.MuonSlice.setAll();
TriggerFlags.MuonSlice.signatures = idtrigChainlist
PdgId=13
include("TrigInDetValidation/TrigInDetValidation_RTT_Common.py")
#if 'doFTK' in dir() and doFTK==True:
## ServiceMgr.TrigFTK_DataProviderSvc.OutputLevel=DEBUG
# ServiceMgr.TrigFTK_DataProviderSvc.TrainingBeamspotX= -0.0497705
# ServiceMgr.TrigFTK_DataProviderSvc.TrainingBeamspotY=1.06299
# ServiceMgr.TrigFTK_DataProviderSvc.TrainingBeamspotZ = 0.0
# ServiceMgr.TrigFTK_DataProviderSvc.TrainingBeamspotTiltX= 0.0 # -1.51489e-05
# ServiceMgr.TrigFTK_DataProviderSvc.TrainingBeamspotTiltY= 0.0 # -4.83891e-05
## topSequence.TrigSteer_HLT.TrigFastTrackFinder_Muon.OutputLevel=DEBUG
## topSequence.TrigSteer_HLT.TrigFastTrackFinder_Muon_IDTrig.FTK_Mode=True
## topSequence.TrigSteer_HLT.TrigFastTrackFinder_Muon_IDTrig.FTK_Refit=False
| rushioda/PIXELVALID_athena | athena/Trigger/TrigValidation/TrigInDetValidation/share/TrigInDetValidation_RTT_topOptions_MuonSlice.py | TrigInDetValidation_RTT_topOptions_MuonSlice.py | py | 2,847 | python | en | code | 1 | github-code | 13 |
19905246837 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
#method_1
if not head or not head.next:
return
a, b = self._splitList(head)
b = self._revereList(b)
head = self._mergeLists(a, b)
#method_mine
if not head or not head.next or not head.next.next:
return
slow, fast = head, head.next.next
while fast and fast.next:
fast = fast.next.next
slow = slow.next
mid = slow.next
stack = []
current = mid.next
while current:
stack.append(current)
current = current.next
mid.next = None
while head:
if stack:
node = stack.pop()
node.next = head.next
head.next = node
head = head.next.next
else:
break
def _splitList(self, head):
fast = head
slow = head
while fast and fast.next:
slow = slow.next
fast = fast.next
fast = fast.next
middle = slow.next
slow.next = None
return head, middle
def _revereList(self, head):
last = None
currentNode = head
while currentNode:
nextNode = currentNode.next
currentNode.next = last
last = currentNode
currentNode = nextNode
return last
def _mergeLists(self, a, b):
tail = a
head = a
a = a.next
while b:
tail.next = b
tail = tail.next
b = b.next
if a:
a, b = b, a
return head
s = Solution()
a = s.partition('aab')
print(a)
| littleliona/leetcode | medium/143.reorder_list.py | 143.reorder_list.py | py | 2,026 | python | en | code | 0 | github-code | 13 |
16723512138 | """Unit tests for the date parsing method"""
import os
import sys
import builtins
from datetime import datetime, timedelta
import mock
from tp_timesheet.date_utils import get_start_date, assert_start_date
from tp_timesheet.config import Config
# Import config fixture from adjacent test
# pylint: disable=(unused-import)
from .test_config import fixture_create_tmp_mock_config
tests_path = os.path.dirname(os.path.abspath(__file__))
src_path = tests_path + "/../"
sys.path.insert(0, src_path)
# fmt: off
TEST_CASES_FORMATS_DMY = [
# 4-digit year(%Y)
# slashes
'%d/%m/%Y', # "05/02/2022"
'%-d/%-m/%Y', # "5/2/2022"
'%d/%-m/%Y', # "05/2/2022"
'%-d/%m/%Y', # "5/02/2022"
# hyphens
'%d-%m-%Y', # "05-02-2022"
'%-d-%-m-%Y', # "5-2-2022"
'%d-%-m-%Y', # "05-2-2022"
'%-d-%m-%Y', # "5-02-2022"
# spaces
'%d %m %Y', # "05 02 2022"
'%-d %-m %Y', # "5 2 2022"
'%d %-m %Y', # "05 2 2022"
'%-d %m %Y', # "5 02 2022"
# 2-digit year(%y)
# slashes
'%d/%m/%y', # "05/02/22"
'%-d/%-m/%y', # "5/2/22"
'%d/%-m/%y', # "05/2/22"
'%-d/%m/%y', # "5/02/22"
# hyphens
'%d-%m-%y', # "05-02-22"
'%-d-%-m-%y', # "5-2-22"
'%d-%-m-%y', # "05-2-22"
'%-d-%m-%y', # "5-02-22"
# spaces
'%d %m %y', # "05 02 22"
'%-d %-m %y', # "5 2 22"
'%d %-m %y', # "05 2 22"
'%-d %m %y', # "5 02 22"
]
TEST_CASES_FORMATS_YMD = [
# 4-digit year(%Y)
# slashes
'%Y/%m/%d', # "2022/02/05"
'%Y/%-m/%-d', # "2022/2/5"
'%Y/%-m/%d', # "2022/2/05"
'%Y/%m/%-d', # "2022/02/5"
# hyphens
'%Y-%m-%d', # "2022-02-05"
'%Y-%-m-%-d', # "2022-2-5"
'%Y-%-m-%d', # "2022-2-05"
'%Y-%m-%-d', # "2022-02-5"
# spaces
'%Y %m %d', # "2022 02 05"
'%Y %-m %-d', # "2022 2 5"
'%Y %-m %d', # "2022 2 05"
'%Y %m %-d', # "2022 02 5"
# 2-digit year(%y)
# slashes
'%y/%m/%d', # "22/02/05"
'%y/%-m/%-d', # "22/2/5"
'%y/%-m/%d', # "22/2/05"
'%y/%m/%-d', # "22/02/5"
# hyphens
'%y-%m-%d', # "22-02-05"
'%y-%-m-%-d', # "22-2-5"
'%y-%-m-%d', # "22-2-05"
'%y-%m-%-d', # "22-02-5"
# spaces
'%y %m %d', # "22 02 05"
'%y %-m %-d', # "22 2 5"
'%y %-m %d', # "22 2 05"
'%y %m %-d', # "22 02 5"
]
# fmt: on
def test_various_date_formats():
"""
test variaous date formats
formats : listed on TEST_CASES_FORMATS_DMY, TEST_CASES_FORMATS_YMD
"""
# A range beyond 6 month is not supported by the current logic. for instance,
# today : 2022-10-10 / target : 2023-4-22 / parsed : 2022-4-23
days_span = 180
today = datetime.today()
for target_date in (
today + timedelta(n) for n in range(-1 * days_span, days_span + 1)
):
year, month, day = target_date.year, target_date.month, target_date.day
for date_format in TEST_CASES_FORMATS_DMY + TEST_CASES_FORMATS_YMD:
parsed = get_start_date(target_date.strftime(date_format))
query_str = target_date.strftime(date_format)
assert (
parsed.year == year
), f"parsing error, query:{query_str} and parsed:{parsed}"
assert (
parsed.month == month
), f"parsing error, query:{query_str} and parsed:{parsed}"
assert (
parsed.day == day
), f"parsing error, query:{query_str} and parsed:{parsed}"
def test_assert_start_date(mock_config):
"""
test the assert function of start date
"""
Config(config_filename=mock_config)
date_range = int(Config.SANITY_CHECK_RANGE)
today = datetime.today().date()
# Test date is valid or user has confirmed to proceed
with mock.patch.object(builtins, "input", lambda _: "y"):
for delta in range(-5 * date_range, 5 * date_range):
assertion_result = assert_start_date(today + timedelta(delta))
assert assertion_result, "start date assertion failed"
with mock.patch.object(builtins, "input", lambda _: "n"):
# Test invalid dates and user has chosen not to proceed
for delta in range(-5 * date_range, -1 * date_range):
assertion_result = assert_start_date(today + timedelta(delta))
assert not assertion_result, "start date assertion failed"
# Test invalid dates and user has chosen not to proceed
for delta in range(date_range + 1, 5 * date_range):
assertion_result = assert_start_date(today + timedelta(delta))
assert not assertion_result, "start date assertion failed"
# Test date is valid, requires no input from user (**Will fail if it prompts user)
for delta in range(-1 * date_range, date_range + 1):
assertion_result = assert_start_date(today + timedelta(delta))
assert assertion_result, "start date assertion failed"
| ThorpeJosh/tp-timesheet | tp_timesheet/tests/test_date_utils.py | test_date_utils.py | py | 5,348 | python | en | code | 4 | github-code | 13 |
3302376037 | import nltk
import re
import signal
from mosestokenizer import MosesSentenceSplitter, MosesTokenizer
from string import punctuation
from text_categorizer import constants, pickle_manager
from text_categorizer.logger import logger
from text_categorizer.SpellChecker import SpellChecker
from text_categorizer.ui import get_documents, progress
from traceback import format_exc
class Preprocessor:
def __init__(self, mosestokenizer_language_code="en", store_data=False, spell_checker_lang=None, n_jobs=1):
self.mosestokenizer_language_code = mosestokenizer_language_code
self.splitsents = MosesSentenceSplitter(self.mosestokenizer_language_code)
self.tokenize = MosesTokenizer(self.mosestokenizer_language_code)
nltk.download('wordnet', quiet=False)
self.lemmatizer = nltk.stem.WordNetLemmatizer()
self.stop = False
self.store_data = store_data
if spell_checker_lang is None:
logger.info("The spell checker is disabled.")
self.spell_checker = None
else:
logger.info("The spell checker is enabled for %s." % (spell_checker_lang))
self.spell_checker = SpellChecker(language=spell_checker_lang, n_jobs=n_jobs)
def preprocess(self, text_field, preprocessed_data_file=None, docs=None):
if self.store_data:
self._set_signal_handlers()
logger.info("Send a SIGTERM signal to stop the preprocessing phase. (The preprocessed documents will be stored.)")
description = "Preprocessing"
if docs is None:
docs = get_documents(preprocessed_data_file, description=description)
else:
docs = progress(iterable=docs, desc=description, unit="doc")
if self.store_data:
metadata = pickle_manager.get_docs_metadata(preprocessed_data_file)
pda = pickle_manager.PickleDumpAppend(metadata=metadata, filename=preprocessed_data_file)
token_to_lemma = dict()
pattern = re.compile(r'\r\n|\r|\n')
for doc in docs:
if not self.stop and doc.analyzed_sentences.get(text_field) is None:
text = doc.fields[text_field]
text = pattern.sub(" ", text)
sentences = self.splitsents([text])
sentences = [self.tokenize(sent) for sent in sentences]
if self.spell_checker is not None:
sentences = self.spell_checker.spell_check(sentences)
analyzed_sentences = []
for sent in sentences:
tokens = []
for word in sent:
token = word.lower()
lemma = token_to_lemma.get(token)
if lemma is None:
lemma = self.lemmatizer.lemmatize(token)
token_to_lemma[token] = lemma
token = {
'form': word,
'lemma': lemma,
'upostag': 'PUNCT' if lemma in punctuation else None
}
tokens.append(token)
analyzed_sentences.append(tokens)
doc.analyzed_sentences[text_field] = analyzed_sentences
if self.store_data:
pda.dump_append(doc)
if self.store_data:
pda.close()
self._reset_signal_handlers()
if self.stop:
exit(0)
def _signal_handler(self, sig, frame):
if sig in constants.stop_signals:
if not self.stop:
print()
logger.info("Stopping the preprocessing phase.")
self.stop = True
def _set_signal_handlers(self):
self.old_handlers = dict()
for sig in constants.stop_signals:
self.old_handlers[sig] = signal.signal(sig, self._signal_handler)
def _reset_signal_handlers(self):
for sig, old_handler in self.old_handlers.items():
signal.signal(sig, old_handler)
self.old_handlers.clear()
| LuisVilarBarbosa/TextCategorizer | text_categorizer/Preprocessor.py | Preprocessor.py | py | 4,106 | python | en | code | 0 | github-code | 13 |
11163451717 | '''
Tests for basic HTTP request handling
'''
from unittest import TestCase
from urllib import parse
from tornado.web import Application
from tornado.httputil import HTTPHeaders
from tornado.httputil import HTTPConnection
from tornado.httputil import HTTPServerRequest
from f5.handlers import BaseRequestHandler
class TestBuildURL(TestCase):
def setUp(self):
def set_close_callback(_, *args, **kwargs):
return None
app = Application()
app.configuration = {
'tornado': {'debug': True}
}
conn = HTTPConnection()
conn.set_close_callback = set_close_callback
self.app = app
self.conn = conn
self.protocol = 'https'
self.host = 'www.example.com'
self.prefix = '/a'
def get_handler(self, protocol=None, host=None, prefix=None):
req = HTTPServerRequest(method='GET', uri='/',
headers=HTTPHeaders({'X-Path-Prefix': prefix or self.prefix})
)
req.protocol = protocol or self.protocol
req.host = host or self.host
req.connection = self.conn
return BaseRequestHandler(self.app, req)
def test_emtpy_string(self):
'''
build_url returns <PROTOCOL>://<HOST><PREFIX>
'''
handler = self.get_handler()
self.assertEqual(handler.build_url(''), '{0}://{1}{2}'.format(
self.protocol, self.host, self.prefix))
def test_path_prefix(self):
'''
build_url returns <PROTOCOL>://<HOST><PREFIX>
'''
base_url = '{0}://{1}'.format(self.protocol, self.host)
handler = self.get_handler(prefix='/apple')
self.assertEqual(handler.build_url(''), base_url + '/apple')
handler = self.get_handler(prefix='/banana/')
self.assertEqual(handler.build_url(''), base_url + '/banana')
handler = self.get_handler(prefix='cherry/')
self.assertEqual(handler.build_url(''), base_url + '/cherry')
def test_path_parameters(self):
'''
build_url constructs parameterized paths
'''
base_url = '{0}://{1}{2}'.format(self.protocol, self.host, self.prefix)
handler = self.get_handler()
self.assertEqual(handler.build_url('/item/{0}', ['123']), base_url + '/item/123')
self.assertEqual(handler.build_url('/item/{id}', {'id': '123'}), base_url + '/item/123')
self.assertEqual(handler.build_url('/item/{0}/more', [123]), base_url + '/item/123/more')
def test_query_arguments(self):
'''
build_url constructs querystring arguments from a dictionary
'''
handler = self.get_handler()
def get_query_args(url):
query_str = parse.urlparse(url).query
return parse.parse_qs(query_str) if query_str else {}
url = handler.build_url('/', query={'a': 'apple'})
self.assertEqual(get_query_args(url), {'a': ['apple']})
url = handler.build_url('/', query={'a': 'apple', 'b': 'banana'})
self.assertEqual(get_query_args(url), {'a': ['apple'], 'b': ['banana']})
| brendanberg/f5 | test/test_handlers.py | test_handlers.py | py | 3,113 | python | en | code | 0 | github-code | 13 |
74881411538 | import os
from azure.identity import DefaultAzureCredential
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
from dotenv import load_dotenv
load_dotenv()
dirname = os.path.dirname(__file__)
local_path_noleak = os.path.join(dirname, "../../videos/results")
if not os.path.exists(local_path_noleak):
os.makedirs(local_path_noleak)
files_leak = [
"MOV_1650.mp4",
"MOV_1669.mp4",
"MOV_1544.mp4",
"MOV_1616.mp4",
"MOV_1546.mp4",
]
local_path_leak = os.path.join(dirname, "../../videos/leak")
if not os.path.exists(local_path_leak):
os.makedirs(local_path_leak)
files_noleak = ["MOV_1662.mp4", "MOV_1541.mp4", "MOV_1543.mp4"]
local_path_noleak = os.path.join(dirname, "../../videos/noleak")
if not os.path.exists(local_path_noleak):
os.makedirs(local_path_noleak)
connect_str = os.getenv("AZURE_STORAGE_CONNECTION_STRING")
container_client_leak = ContainerClient.from_connection_string(
connect_str, "mongstad-nov-2022-leak"
)
container_client_noleak = ContainerClient.from_connection_string(
connect_str, "mongstad-nov-2022-noleak"
)
for f in files_leak:
download_file_path = os.path.join(local_path_leak, f)
print("\nDownloading blob to \n\t" + download_file_path)
with open(file=download_file_path, mode="wb") as download_file:
download_file.write(container_client_leak.download_blob(f).readall())
for f in files_noleak:
download_file_path = os.path.join(local_path_noleak, f)
print("\nDownloading blob to \n\t" + download_file_path)
with open(file=download_file_path, mode="wb") as download_file:
download_file.write(container_client_noleak.download_blob(f).readall())
| equinor/gas-analysis | src/gas_analysis/download_dataset.py | download_dataset.py | py | 1,682 | python | en | code | 0 | github-code | 13 |
22223792994 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
import csv
import os
import requests
from itemadapter import ItemAdapter
from yaofangwang.settings import IMG_PATH, DEFAULT_REQUEST_HEADERS
class YaofangwangPipeline:
def process_item(self, item, spider):
with open("./ไธญ่ฅฟ่ฏ.csv", "a+", encoding="utf-8-sig", newline="") as ff:
csv_file = csv.writer(ff)
csv_file.writerow(list(dict(item).values()))
# IMG_PATH = "./imgs"
if not os.path.exists(IMG_PATH):
os.mkdir(IMG_PATH)
with open(f"{IMG_PATH}/{item['name']}.gif", "wb+") as img:
res = requests.get(item["img_path"], headers=DEFAULT_REQUEST_HEADERS).content
print(item['name'])
img.write(res)
img.close()
return item
| qifiqi/codebase | python_codebase/็ฌ่ซ/yaofangwang-ๆชๅฎๆ/yaofangwang/pipelines.py | pipelines.py | py | 994 | python | en | code | 3 | github-code | 13 |
6576197410 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 19 12:50:26 2021
@author: seoleary
Provides a simple example class for implementing a thread for counting down,
in the commented out code at the bottom, this class will implement two threads
that will not execute sequentially because of a built in delay
"""
import threading as th
import time
class myThread(th.Thread):
def __init__(self, name, delay):
th.Thread.__init__(self)
self.name = name
self.delay = delay
def run(self):
print('Starting thread %s.' % self.name)
thread_count_down(self.name, self.delay)
print('Finished thread %s.' % self.name)
def thread_count_down(name, delay):
counter = 5
while counter:
time.sleep(delay)
print('Thread %s is counting down: %i...'%(name, counter))
counter-=1
'''
from myThread import myThread
thread1 = myThread('A',.5)
thread2 = myThread('B',.5)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
''' | seanmoleary/asynchronous | myThread.py | myThread.py | py | 1,026 | python | en | code | 0 | github-code | 13 |
4064974811 | import sys
from functools import reduce
def solution():
n = int(sys.stdin.readline())
li = [*range(n+1)]
suming = reduce(lambda a, b: a + b, li, 0)
print(suming)
if __name__ == '__main__':
solution()
| GoodDonkey/algorithm_study | acmicpc/8393.py | 8393.py | py | 224 | python | en | code | 0 | github-code | 13 |
23160168456 | #!/usr/bin/env python
# coding: utf-8
# In[55]:
import os
import gzip
import numpy as np
import pandas as pd
from keras.datasets import fashion_mnist
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
from sklearn import metrics
# In[56]:
def load_mnist(path, kind='train'):
labels_path = os.path.join(path, '%s-labels-idx1-ubyte.gz.cpgz'% kind)
images_path = os.path.join(path,'%s-images-idx3-ubyte.gz.cpgz'% kind)
with gzip.open(labels_path, 'rb') as lbpath:
labels = np.frombuffer(lbpath.read(), dtype=np.uint8,
offset=8)
with gzip.open(images_path, 'rb') as imgpath:
images = np.frombuffer(imgpath.read(), dtype=np.uint8,
offset=16).reshape(len(labels), 784)
return images, labels
# In[ ]:
def plot_graph(y_train, y_val, y_test, accuracy_train, accuracy_validation, accuracy_test):
plt.figure(num=1)
plt.plot(accuracy_train/len(train_y), color="red",
label="train_accuracy")
plt.plot(accuracy_validation/len(val_y),
color="green", label="validation_accuracy")
plt.plot(accuracy_test/len(test_y), color="blue",
label="test_accuracy")
plt.xticks(range(9), iteration)
plt.legend()
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.savefig('SVM_graph.jpg')
# In[57]:
def LinearSVM(train_x, train_y, val_x, val_y, test_x, test_y):
accuracy_test = np.zeros(len(range(-4, 5)))
accuracy_train = np.zeros(len(range(-4, 5)))
accuracy_val = np.zeros(len(range(-4, 5)))
number_of_iterations = []
for i in np.arange(-4, 5, dtype=float):
c = 10**i
number_of_iterations.append(str(c))
classifier = LinearSVC(C=c)
classifier.fit(train_x, train_y)
index = int(i+4)
accuracy_train[index] = np.sum(train_y == classifier.predict(train_x))
accuracy_val[index] = np.sum(val_y ==classifier.predict(val_x))
accuracy_test[index] = np.sum(y_test == classifier.predict(test_x))
plot_graph(train_x, train_y, val_x, val_y, test_x, test_y, train_accuracy, validation_accuracy, test_accuracy)
function = validation_accuracy_val.argmax()-4
C = int(function)
best_value_c = 10**best
best_element = [train_accuracy[C]/len(train_y), validation_accuracy[C]/len(val_y), test_accuracy[C]/len(test_y)]
print('Best Value of C:', best_value_c)
return best_value_c, best_element
# In[58]:
def confusion_matrix_svm(max_c, x_train, y_train, x_test, y_test):
classifier = LinearSVC(C=max_c)
classifier.fit(x_train, y_train)
y_predict = classifier.predict(x_test)
x = y_test == y_predict
test_accuracy = np.sum(x)
cm_test = metrics.confusion_matrix(y_test, y_predict)
print('Testing accuracy:', test_accuracy/len(y_test))
print('Confusion Matrix:', cm_test)
return test_accuracy, cm_test
# In[59]:
def poly_kernel_svm(max_c, linearSVM, x_train, y_train, x_validation, y_validation, x_test, y_test):
train_accuracy = np.zeros(4)
train_accuracy[0] = linearSVM[0]
validation_accuracy = np.zeros(4)
validation_accuracy[0] = linearSVM[1]
test_accuracy = np.zeros(4)
test_accuracy[0] = linearSVM[2]
SV_number = [0]
degrees = [2, 3, 4]
for d in degrees:
classifier = SVC(kernel='poly', degree=d, C=max_c, gamma='auto')
classifier.fit(x_train, y_train)
train_accuracy[i-1] = np.sum(y_train ==classifier.predict(x_train))/len(y_train)
validation_accuracy[i-1] = np.sum(y_validation == classifier.predict(x_validation))/len(y_validation)
test_accuracy[i-1] = np.sum(y_test == classifier.predict(x_test))/len(y_test)
SV_number.append(classifierclf.n_support_)
result = {'train_accuracy': train_accuracy,'validation_accuracy': validation_accuracy,'test_accuracy': test_accuracy,'Number of Support Vectors': SV_number}
final_test_accuracy = test_accuracy.argmax()+1
print(result)
print(test_accuracy.argmax()+1)
return result, final_test_accuracy
# In[62]:
if __name__ == "__main__":
X, Y = load_mnist('/Users/coraljain/Desktop/data/')
x_test, y_test = load_mnist('/Users/coraljain/Desktop/data/', kind='t10k')
images_validation, labels_validation = images[int(0.8*len(images)):],labels[int(0.8*len(labels)):]
images_train, labels_train = images[:int(0.8*len(images))],labels[:int(0.8*len(labels))]
print(len(images_train), len(labels_train), len(images_validation), len(labels_validation), len(images_test), len(labels_test))
maxc, linearSVM = LinearSVM(images_train, labels_train,images_validation, labels_validation, images_test, labels_test)
test_accuracy, confusion_matrix = confusion_matrix_svm(maxc, images_train, labels_train, images_test, labels_test)
degree = poly_kernel_svm(maxc, linearSVM, images_train, labels_train, images_validation, labels_validation,images_test, labels_test)
| coraljain/Machine-Learning-CPT_S-570 | Support Vector Machines.py | Support Vector Machines.py | py | 5,142 | python | en | code | 0 | github-code | 13 |
27834879944 | # Write a class called Converter.
# The user will pass a length and a unit when declaring an object from
# the classโfor example, c = Converter (9,'inches').
# The possible units are inches, feet, yards, miles, kilometers,
# meters, centimeters, and millimeters. For each of these units
# there should be a method that returns the length converted into those units.
# For example, using the Converter object created above,
# the user could call c.feet() and should get 0.75 as the result.
scale = {
'feet' : 1,
'inch' : 12,
'yard' : 0.333,
'mile' : 0.00018939375,
'mm' : 304.7996952,
'cm' : 30.47996952,
'metre': 0.3047996952,
'km' : 0.0003047996952,
}
class Converter:
feet = 0
def __init__(self, mes, type):
self.type = type
#self.index = scale(type)
self.feet = mes / scale(type)
def main():
feet = Converter(12, 'inch')
print(feet.feet)
main() | rbrox/Python | 30.py | 30.py | py | 983 | python | en | code | 0 | github-code | 13 |
30305553585 | import numpy as np
import h5py
def grad(X, Y, W, lambd=0):
return np.dot(np.asarray(X).T, np.dot(X, W) - np.asarray(Y)) + lambd * W
def decent(W, alpha, grad):
return W - alpha * grad
def SSE(Y, Y_pred):
return np.sum(0.5 * np.square(Y_pred - Y))
def cost_with_regular(Y, Y_pred, lambd, W):
return np.sum(0.5 * np.square(Y_pred - Y) + 0.5 * lambd * (1 / np.dot(W.T, W)))
def conjugate_grad(W, X, Y, epsilon=1e-3, epochs=1000000, lambd=0):
X_ = np.dot(X.T, X) + lambd * np.eye(X.shape[1])
r = np.dot(X.T, (Y - np.dot(X, W)))
p = r
k = 0
costs = []
while k < epochs:
# print(k)
alpha = np.dot(r.T, r) / np.dot(np.dot(p.T, X_), p)
# print(alpha)
W = W + alpha[0][0] * p
this_r = r - np.dot(alpha * X_, p)
cost_ = SSE(Y, np.dot(X, W))
costs.append(cost_)
if np.all(cost_ < epsilon):
return W, costs
k += 1
beta = np.dot(this_r.T, this_r) / np.dot(r.T, r)
r = this_r
p = r + beta * p
return W, costs
def analytical(X, Y, lambd):
return np.linalg.inv(np.dot(X.T, X) + lambd * np.eye(X.shape[1])).dot(X.T).dot(Y)
def cost(y, y_pred):
return np.sum(y * np.log(y_pred))
def CrossEntropy(y, y_pred):
return -1 * np.sum(y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred))
def logistic_forward(W, X):
return sigmoid(np.dot(W, X))
def sigmoid(x):
return 1 / (1 + np.exp(-1 * x))
def logistic_grad(X, Y, W, lambd=0, random=False):
if random:
i = np.random.randint(0, X.shape[1])
p = X.T
p = p[i]
return p * np.squeeze((logistic_forward(W, X) - Y))[i] + (lambd * W) / X.shape[1]
# print(a.shape)
return np.dot(logistic_forward(W, X) - Y, X.T) / X.shape[1] + (lambd * W) / X.shape[1]
def norm(x, axis=0, mu=None, max_=None, min_=None):
if axis == 0:
shape = (1, -1)
else:
shape = (-1, 1)
if mu is not None:
return ((x - mu) / (max_ - min_))
mu = np.mean(x, axis=axis).reshape(shape)
max_ = np.max(x, axis=axis).reshape(shape)
min_ = np.min(x, axis=axis).reshape(shape)
return ((x - mu) / (max_ - min_)), mu, max_, min_
def load_data(path):
x = np.loadtxt(path, dtype=np.float, delimiter=",")
X = x[:, :-1].tolist()
Y = x[:, -1].tolist()
return X, Y
def EM_E(k, X, mu, pi, sigma):
n = len(X)
gamma = np.zeros((n, k))
for i in range(0, n):
tmp = 0
for j in range(0, k):
tmp += pi[j] * norm_2(X[i], mu[j], sigma[j])
for j in range(0, k):
gamma[i, j] = pi[j] * norm_2(X[i], mu[j], sigma[j]) / tmp
labels = np.argmax(gamma, axis=1)
return gamma, labels
def EM_M(num_of_k, X, gamma):
n = len(X)
sigma = np.zeros((num_of_k, 2, 2))
# u = np.zeros((num_of_k, 2))
N = np.sum(gamma, axis=0)
total = np.dot(gamma.T, X)
mu = total / N.reshape(num_of_k, 1)
for i in range(0, num_of_k):
s = np.zeros((2, 2))
for j in range(0, n):
temp = np.asmatrix([X[j] - mu[i]])
s += gamma[j, i] * temp.T * temp
sigma[i] = (s / N[i]).tolist()
pi = (N / n).tolist()
return mu, sigma, pi
def norm_2(X, mu, sigma):
sigma = np.asarray(sigma, dtype=np.float32)
return 1 / (2 * np.pi * np.sqrt(np.linalg.det(sigma))) * np.exp(
-0.5 * np.dot((X - mu).T, np.dot(np.linalg.inv(sigma), (X - mu))))
def EM_lnp(num_of_k, X, mu, pi, sigma):
n = len(X)
ans = 0
for i in range(n):
sum = 0
for j in range(num_of_k):
sum += pi[j] * norm_2(X[i], mu[j], sigma[j])
ans += np.log(sum)
return ans
def PCA(X, k):
n=len(X)
mean = np.mean(X, axis=0)
X_ = X - mean
S = np.dot(X_.T, X_) / n
# lambd, u = np.linalg.eig(S)
a, b, c = np.linalg.svd(S)
if len(b) < len(S):
b = b.tolist() + [0] * (len(S) - len(b))
return b[:k], c[:k], mean
def psnr(img1, img2):
mse = np.mean((img1 / 255. - img2 / 255.) ** 2)
#ไธบไบ้ฒๆญขๆฐๆฎ่ฟๅฐ้ ๆmseๅๆ0๏ผๅฏนๅ
ถๅ็ฌๅค็ใ
if mse < 1.0e-10:
return 100
PIXEL_MAX = 1
return 20 * np.log10(PIXEL_MAX / np.sqrt(mse)) | WangXurun/HIT-MLlab | util/util.py | util.py | py | 4,198 | python | en | code | 0 | github-code | 13 |
3980108084 | import os
import data
import header
import threading
def scan_destination_for_mp4_files():
"""
scans the destination folder and collects all mp4 files names
:return: void
"""
for file in os.listdir(header.source_folder_path):
if "mp4" in file:
header.source_files_list.append(str(file))
def extract_data_from_mp4_for_all_files_in_the_folder(self):
"""
extracting all meta data from source files via exiftool
:param self:
:return:
"""
threads = []
for file in header.source_files_list:
threads.append(threading.Thread(target=os.system, args=("exiftool -ee -a -u -g -b -p \"$accelerometer\" "+header.source_folder_path+"\\"+str(file)+" > "+header.source_folder_path+"\\"+(str(file).split('.'))[0]+".txt",)))
for thread in threads:
thread.start()
def raw_data_file_hanling():
pass
def text_file_handling(file):
file_io = open(header.source_folder_path + "\\" + (str(file).split('.'))[0] + ".txt", "r")
header.raw_data[(str(file).split('.'))[0] + ".txt"] = file_io.read()
def read_data_from_text_meta_data(self):
threads = []
for file in header.source_files_list:
threads.append(threading.Thread(target=text_file_handling, args=(str(file),)))
for thread in threads:
thread.start()
| 311725154/TelemetryPyExtractor | mission.py | mission.py | py | 1,322 | python | en | code | 0 | github-code | 13 |
23007067189 | import asyncio
import discord
import frosch2010_Console_Utils as fCU
async def send_edit_embed_msg(term, term_words, tabuLanguage, channel):
embed = discord.Embed(title=tabuLanguage.tabu_card_term_prefix + term, description=tabuLanguage.tabu_edit_description, color=0x22a7f0)
embed.add_field(name="###############################", value=term_words.replace(",", "\n"), inline=True)
botMessage = await channel.send(embed=embed)
await botMessage.add_reaction("โ๏ธ")
await botMessage.add_reaction("โ๏ธ")
await botMessage.add_reaction("๐")
await botMessage.add_reaction("โ
")
return botMessage
async def remove_user_from_edit_list_if_possible(user, tabuVars):
if user.id in tabuVars.tabu_edit_delete_word_list:
del tabuVars.tabu_edit_delete_word_list[user.id]
if user.id in tabuVars.tabu_edit_delete_card_list:
try:
await tabuVars.tabu_edit_delete_card_list[user.id].delete()
except:
fCU.log_In_Console("Failed to delete 'edit delete card'-message.", "EDITSYS-RMU", "err")
del tabuVars.tabu_edit_delete_card_list[user.id]
if user.id in tabuVars.tabu_edit_messages_list:
del tabuVars.tabu_edit_term_list[tabuVars.tabu_edit_messages_list[user.id][1].content.replace(tabuVars.tabu_edit_messages_list[user.id][1].content.split(" ")[0] + " ", "")]
for msg in tabuVars.tabu_edit_messages_list[user.id][0]:
try:
await msg.delete()
except:
fCU.log_In_Console("Failed to delete edit message.", "EDITSYS-RMU", "err")
del tabuVars.tabu_edit_messages_list[user.id]
if user.id in tabuVars.tabu_edit_word_list:
del tabuVars.tabu_edit_word_list[user.id]
async def delete_edit_msgs(reaction_msg, edit_msgs):
for msg in edit_msgs:
try:
await msg.delete()
except:
fCU.log_In_Console("Failed to delete edit message.", "EDITSYS-DEL-MSGS", "err")
try:
await reaction_msg.delete()
except:
fCU.log_In_Console("Failed to delete edit reaction message.", "EDITSYS-DEL-MSGS", "err") | Frosch2010/discord-taboo | code-files/frosch2010_Tabu_edit_system_functions.py | frosch2010_Tabu_edit_system_functions.py | py | 2,219 | python | en | code | 1 | github-code | 13 |
36574469465 | # Image Censor Application
# Assignment 1 - Image Enhancement in Spatial Domain
# 1. Blacken part of the image
# 2. Darken part of the image
# 3. Brighten pat of the image
import cv2 as cv
import numpy as np
import tkinter as tk
from tkinter import *
from tkinter import filedialog
from PIL import ImageTk, Image
class ImageCensor:
def __init__(self,root):
self.root = root
self.menu()
#---GUI---#
def menu(self):
#---Setup---#
self.root.title("Image Censor Application")
self.root.iconbitmap("assets/photo-editor.ico")
self.root.resizable(False, False)
self.root.geometry("430x600+100+100")
#---End Of Setup---#
#---Title Frame---#
self.title_frame = LabelFrame(self.root)
self.title_frame.grid(row=0, column=0, columnspan=2, padx=20, pady=10)
self.title_lbl = Label(self.title_frame, text="Image Censor Application", font=("Arial", 24), bg="#fff", width=20)
self.title_lbl.pack()
#---End Of Title Frame---#
#---Tool Frame---#
self.tool_frame = LabelFrame(self.root)
self.tool_frame.grid(row=1, column=0, columnspan=2, padx=20, pady=20, sticky=W)
#Open an image
self.empty_lbl = Label(self.tool_frame, text=" ").grid(row=1, column=0, pady=2)
self.img_lbl = Label(self.tool_frame, text="1. Choose An Image To Censor:", font=("Arial", 16))
self.img_lbl.grid(row=2, column=0, columnspan=2, padx=20, sticky=W)
self.img_button = Button(self.tool_frame, text='Open File...', command=self.open, bg="#808080", fg="#fff", font=("Arial", 12), width=15)
self.img_button.grid(row=3, column=0, columnspan=2, padx=20, ipadx=5, ipady=5, sticky=W)
#Censor type
self.empty_lbl = Label(self.tool_frame, text=" ").grid(row=4, column=0, pady=2)
self.type_lbl = Label(self.tool_frame, text="2. Choose Type Of Censor:", font=("Arial", 16))
self.type_lbl.grid(row=5, column=0, columnspan=2, padx=20, pady=5, sticky=W)
self.blacken_button = Button(self.tool_frame, text='Blacken Effects', command=self.blacken, bg="#808080", fg="#fff", font=("Arial", 12), width=15)
self.blacken_button.grid(row=6, column=0, padx=20, ipadx=5, ipady=5, sticky=W)
self.darken_button = Button(self.tool_frame, text='Darken Effects', command=self.darken, bg="#808080", fg="#fff", font=("Arial", 12), width=15)
self.darken_button.grid(row=7, column=0, padx=20, pady=5, ipadx=5, ipady=5, sticky=W)
self.lighten_button = Button(self.tool_frame, text='Lighten Effects', command=self.lighten, bg="#808080", fg="#fff", font=("Arial", 12), width=15)
self.lighten_button.grid(row=7, column=1, pady=5, ipadx=5, ipady=5, sticky=W)
self.type_lbl = Label(self.tool_frame, text="Lighten/Darken (%) :", font=("Arial", 12))
self.type_lbl.grid(row=8, column=0, padx=20, sticky=SW)
self.slide = Scale(self.tool_frame, from_=0, to=100, orient=HORIZONTAL, length=150)
self.slide.grid(row=8, column=1, ipadx=5, sticky=SW)
self.slide.set(50)
#Select ROI
self.empty_lbl = Label(self.tool_frame, text=" ").grid(row=9, column=0, pady=2)
self.type_lbl = Label(self.tool_frame, text="3. Select a Region Of Interest (ROI)", font=("Arial", 16))
self.type_lbl.grid(row=10, column=0, columnspan=2, padx=20, pady=5, sticky=W)
self.type_lbl = Label(self.tool_frame, text=" - Apply by pressing SPACE or ENTER button", font=("Arial", 12))
self.type_lbl.grid(row=11, column=0, columnspan=2, padx=20, sticky=W)
self.type_lbl = Label(self.tool_frame, text=" - Cancel by pressing C button", font=("Arial", 12))
self.type_lbl.grid(row=12, column=0, columnspan=2, padx=20, sticky=W)
#Action buttons
self.save_button = Button(self.tool_frame, text='Save As...', command=self.save, bg="#808080", fg="#fff", font=("Arial", 12), width=15)
self.save_button.grid(row=19, column=0, padx=20, pady=20, ipadx=5, ipady=5, sticky=W)
self.clear_button = Button(self.tool_frame, text='Reset To Original', command=self.clear, bg="#808080", fg="#fff", font=("Arial", 12), width=15)
self.clear_button.grid(row=19, column=1, pady=20, ipadx=5, ipady=5, sticky=W)
#---End Of Tool Frame---#
#---End Of GUI---#
#---Functions---#
# raw_img : the uploaded original image
# img : the image to apply changes
# temp_img : temporary image
# copy_img : copy of raw image
def open(self):
self.filename = filedialog.askopenfilename(
initialdir = "./img",
title = "Choose An Image",
filetypes=(
("JPG files", "*.jpg"),
("PNG files", "*.png"),
("TIF files", "*.tif"),
("All files", "*.*")
)
)
self.raw_img = cv.imread(self.filename)
self.copy_img = self.raw_img.copy()
self.img = self.raw_img
cv.destroyAllWindows()
cv.imshow('Image', self.img)
cv.moveWindow("Image", 550, 250)
def blacken(self):
cv.destroyAllWindows()
(x,y,z) = self.img.shape
self.rectangle = 255*np.ones((x,y,z), dtype="uint8")
self.roi = cv.selectROI(self.img)
self.rectangle[int(self.roi[1]):int(self.roi[1]+self.roi[3]),
int(self.roi[0]):int(self.roi[0]+self.roi[2])] = 0
self.temp_img = cv.bitwise_and(self.rectangle, self.img)
self.img = self.temp_img
cv.destroyAllWindows()
cv.imshow("Image", self.img)
cv.moveWindow("Image", 550, 250)
def darken(self):
cv.destroyAllWindows()
(x,y,z) = self.img.shape
self.rectangle = 255*np.ones((x,y,z), dtype="uint8")
self.roi = cv.selectROI(self.img)
self.rectangle[int(self.roi[1]):int(self.roi[1]+self.roi[3]),
int(self.roi[0]):int(self.roi[0]+self.roi[2])] = 0
#subtraction truncate arithmetic
for i in range(0,x):
for j in range(0,y):
for k in range(0,z):
if self.rectangle[i,j,k] != 255: #ignore not ROI
total = self.img[i,j,k] - (self.slide.get() / 100 * 255) #percentage = x/100 *255
if (total < 0):
self.img[i,j,k]= 0
else:
self.img[i,j,k] = total
cv.destroyAllWindows()
cv.imshow("Image", self.img)
cv.moveWindow("Image", 550, 250)
def lighten(self):
cv.destroyAllWindows()
(x,y,z) = self.img.shape
self.rectangle = 255*np.ones((x,y,z), dtype="uint8")
self.roi = cv.selectROI(self.img)
self.rectangle[int(self.roi[1]):int(self.roi[1]+self.roi[3]),
int(self.roi[0]):int(self.roi[0]+self.roi[2])] = 0
#addition truncate arithmetic
for i in range(0,x):
for j in range(0,y):
for k in range(0,z):
if self.rectangle[i,j,k] != 255: #ignore not ROI
total = self.img[i,j,k] + (self.slide.get() / 100 * 255) #percentage = x/100 *255
if (total > 255):
self.img[i,j,k] = 255
else:
self.img[i,j,k] = total
cv.destroyAllWindows()
cv.imshow("Image", self.img)
cv.moveWindow("Image", 550, 250)
def save(self):
original_file_type = self.filename.split('.')[-1]
filename = filedialog.asksaveasfilename()
filename = filename + "." + original_file_type
save_as_image = self.img
cv.imwrite(filename, save_as_image)
self.filename = filename
def clear(self):
self.img = self.copy_img
cv.destroyAllWindows()
cv.imshow("Image", self.img)
cv.moveWindow("Image", 550, 250)
#---End Of Functions---#
#---End Of Class---#
mainWindow = Tk()
ImageCensor(mainWindow)
mainWindow.mainloop() | tasyadew/image-censor-app | imageCensor.py | imageCensor.py | py | 8,128 | python | en | code | 0 | github-code | 13 |
20266852875 | from csv import reader
import sys
from tkinter import messagebox, ttk
from tkinter import *
import Relay
class solenoid_valve_control(Frame):
font_size = 20
sv_num = 8
on_time_ms = 100
def __init__(self, master=None):
# ใฆใฃใณใใฆๅๆๅ
super().__init__(master)
self.master = master
self.master.title('้ป็ฃๅผๆไฝ')
self.pack()
self.label1 = ttk.Label(self, text="on time", padding=(5,2))
self.label1.grid(row=0, column=1, sticky=E)
self.on_time_ms = StringVar()
self.time_entry = ttk.Entry(self, textvariable=self.on_time_ms, width = 10, justify=RIGHT)
self.time_entry.insert(0, "100")
self.time_entry.grid(row=0, column=2)
self.label2 = ttk.Label(self, text="[ms]", padding=(5,2))
self.label2.grid(row=0, column=3, sticky=W)
self.label_sv_num = []
self.button_pulse = []
self.button_on = []
self.button_off = []
for i in range(self.sv_num):
self.label_sv_num.append(ttk.Label(self, text="SV"+str(i+1), padding=(5,2)))
self.label_sv_num[i].grid(row=i+1, column=0, sticky=E)
self.button_pulse.append(ttk.Button(self, text="Pulse", command=self.sv_pulse(i)))
self.button_pulse[i].grid(row=i+1, column=1, sticky=E)
self.button_on.append(ttk.Button(self, text="ON", command=self.sv_on(i)))
self.button_on[i].grid(row=i+1, column=2)
self.button_off.append(ttk.Button(self, text="OFF", command=self.sv_off(i)))
self.button_off[i].grid(row=i+1, column=3)
def sv_pulse(self, ch):
ch = ch+1
def x():
on_time_ms = int(self.on_time_ms.get())
try:
on_time_s = on_time_ms/1000
Relay.pulse(ch, on_time_s)
except:
messagebox.showinfo("ใจใฉใผ", "ใปใon timeใใฎๅ
ฅๅใๅ่งๆฐๅญใซใชใฃใฆใใใ\nใปUSBใชใฌใผใๆฅ็ถใใใฆใใใ\n็ขบ่ชใใฆใใ ใใ")
return x
def sv_on(self, ch):
ch = ch+1
def x():
try:
Relay.on(ch)
except:
messagebox.showinfo("ใจใฉใผ", "USBใชใฌใผใๆฅ็ถใใใฆใใใ็ขบ่ชใใฆใใ ใใ")
return x
def sv_off(self, ch):
ch = ch+1
def x():
try:
Relay.off(ch)
except:
messagebox.showinfo("ใจใฉใผ", "USBใชใฌใผใๆฅ็ถใใใฆใใใ็ขบ่ชใใฆใใ ใใ")
return x
if __name__ == "__main__":
root = Tk()
app = solenoid_valve_control(master=root)
app.mainloop() | cherry2022automation/cherry_classifier | solenoid_valve.py | solenoid_valve.py | py | 2,717 | python | en | code | 0 | github-code | 13 |
14629280087 | from hypothesis import given
from swagger_server.models import Leaf
from swagger_server.test.strategies import leaves
@given(leaf_1=leaves(), leaf_2=leaves())
def test_creating_leaves_with_existing_leaf_ids(leaf_1, leaf_2, create_leaf, sample_graph):
leaf_2.leaf_id = leaf_1.leaf_id
try:
create_leaf(leaf_1, ensure=True)
response = create_leaf(leaf_2)
assert response.status_code == 409
assert len(sample_graph.nodes) == 1
finally:
sample_graph.delete_all()
@given(leaf=leaves())
def test_creating_leaves(leaf, create_leaf, sample_graph):
try:
response = create_leaf(leaf)
created = Leaf.from_dict(response.json)
assert response.status_code == 201
assert created == leaf
finally:
sample_graph.delete_all()
| Mykrobe-tools/mykrobe-atlas-distance-api | swagger_server/test/e2e/test_tree_post_controller.py | test_tree_post_controller.py | py | 815 | python | en | code | 0 | github-code | 13 |
19191703005 | import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, LeakyReLU, ConvLSTM2D, Concatenate, Reshape
import random
from tensorflow.keras.models import Model
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import os
from tensorflow.python.keras.callbacks import ModelCheckpoint
import config as cfg
import data_process
from keras_sequence import KerasSequence
def kernel(x):
return (x, x)
class UnFreezeWeight(tf.keras.callbacks.Callback):
def __init__(self, freeze_before_epoch):
super().__init__()
self.freeze_before_epoch = freeze_before_epoch
def on_epoch_begin(self, epoch, logs=None):
if self.freeze_before_epoch != epoch:
return
# Unfreeze all weight.
self.model.make_train_function(force=True)
print('set trainable to True.')
for layer in self.model.layers:
layer.trainable = True
def create_model(shape:tuple):
inputs = keras.layers.Input(shape=shape)
"""
conv_1 = Conv2D(64, kernel(3), kernel(1), activation="tanh")
conv_2 = Conv2D(96, kernel(3), kernel(1), activation="tanh")
conv_3 = Conv2D(128, kernel(3), kernel(1), activation="tanh")
"""
resnetCnn = tf.keras.applications.ResNet50(
include_top=False,
weights="imagenet",
input_tensor = None,
input_shape=inputs[:,0].shape[1:],
pooling=None,
)
resnetCnn.trainable = False
index = 0
for layer in resnetCnn.layers:
if layer.name == 'conv3_block1_1_conv':
index = resnetCnn.layers.index(layer)
break
model = tf.keras.models.Model(resnetCnn.input, resnetCnn.layers[index-1].output)
"""
index = 0
for layer in resnetCnn.layers:
if layer.name == 'conv3_block1_1_conv':
index = resnetCnn.layers.index(layer)
break
del resnetCnn.layers[index:]
while len(resnetCnn.layers) > index:
resnetCnn._layers.pop()
resnetCnn.trainable = False
"""
"""
x = conv_1(inputs[:, 0])
x = conv_2(x)
x = conv_3(x)
x = MaxPool2D()(x)
y = conv_1(inputs[:, 1])
y = conv_2(y)
y = conv_3(y)
y = MaxPool2D()(y)
"""
x = model(inputs[:, 0])
#x = MaxPool2D()(x)
y = model(inputs[:, 1])
#y = MaxPool2D()(y)
x = Reshape((1, x.shape[1], x.shape[2], x.shape[3]))(x)
y = Reshape((1, y.shape[1], y.shape[2], y.shape[3]))(y)
x = Concatenate(axis=1)([x, y])
#x = ConvLSTM2D(64, kernel(3), kernel(1), return_sequences=True)(x)
x = ConvLSTM2D(64, kernel(3), kernel(1), return_sequences=True, activation = LeakyReLU())(x)
x = ConvLSTM2D(64, kernel(3), kernel(1), return_sequences=False, activation = LeakyReLU())(x)
x = Dense(32, activation=LeakyReLU())(x)
x = Dense(4, activation="sigmoid")(x)
return keras.models.Model(inputs, x)
def plot_y(y):
f, axarr = plt.subplots(2,2)
axarr[0,0].imshow(y[:,:,0])
axarr[0,0].set_title('Tรชte')
axarr[0,1].imshow(y[:,:,1])
axarr[0,1].set_title('Maillot')
axarr[1,0].imshow(y[:,:,2])
axarr[1,0].set_title('Bras droit')
axarr[1,1].imshow(y[:,:,3])
axarr[1,1].set_title('Bras gauche')
plt.show()
if __name__ == '__main__':
modele = create_model((2, cfg.height, cfg.width, 3))
modele.summary()
modele.compile(optimizer=keras.optimizers.Adam(), loss='binary_crossentropy')
es = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0001, patience=2, restore_best_weights=True)
cb = UnFreezeWeight(4)
entrees = data_process.load_data()
random.seed(42)
random.shuffle(entrees)
train = KerasSequence(entrees[:int(len(entrees)*0.8)])
validation = KerasSequence(entrees[int(len(entrees)*0.8):int(len(entrees)*0.98)])
test = entrees[int(len(entrees)*0.98):]
modele.fit(train, validation_data=validation, epochs=10, callbacks=[es, cb])
#modele = keras.models.load_model('modele.h5')
modele.save('modele.h5')
for e in test:
print(e.fichier_2)
x = e.x()
y = e.y()
pred = modele(np.array([x]))
plt.imshow(x[1])
plt.show()
plot_y(y)
plot_y(pred[0])
breakpoint()
| Belzerion/SwimDetect | LSTM_ResNet.py | LSTM_ResNet.py | py | 4,303 | python | en | code | 0 | github-code | 13 |
3021846506 | import collections
import datetime
import os
import random
import sys
import struct
import threading
import time
def log_msg(msg):
dtstr = str(datetime.datetime.now()).split('.')[0]
print('{0}: {1}'.format(dtstr, msg))
class VIOSApp(threading.Thread):
def __init__(self, _queueHandler):
threading.Thread.__init__(self)
self.queueHandler = _queueHandler
self.name = 'Default'
# indicates whether initialization has finished
self.initialized = False
# indicates whether this app is currently foregrounded
self.active = False
# this flag is used to break app out of any loops on system shutdown
self.interrupted = False
# indicates if app has finished
self.exited = False
# current grammar
self.choices = []
# used to disable/reenable grammar
self.disabledChoices = []
self.disabledGrammar = False
# used to re-prompt when app is foregrounded
self.lastSynthesis = ''
# used to protect changes to the app's current grammar
self.grammarLock = threading.Lock()
def cleanup(self):
self.initialized = False
self.active = False
self.choices = []
self.disabledChoices = []
self.disabledGrammar = False
self.lastSynthesis = ''
self.exited = True
# causes grammar-matching to be reset for this instance
self.queueHandler.grammarMapper.set_grammar(self.instanceId, [])
# TODO: need to do something about QueueHandler registration here?
def run(self):
# get unique instance id
self.instanceId = self.queueHandler.get_instance_id()
# register instance with QueueHandler
self.queueHandler.register_instance(self.instanceId)
# this also happens in foreground(), would be nice to reduce to 1 place
self.queueHandler.grammarMapper.activeApp = self
self.initialized = True
self.active = True
self.exited = False
self.choices = []
self.lastSynthesis = ''
def background(self):
self.queueHandler.grammarMapper.activeApp = None
self.active = False
def foreground(self):
self.active = True
self.queueHandler.grammarMapper.activeApp = self
# re-activate grammar
self.set_choices(self.choices)
# re-synthesize last output
if self.lastSynthesis != '':
self.synthesize(self.lastSynthesis)
def disable_grammar(self):
self.disabledChoices = self.choices
self.disabledGrammar = True
# causes grammar-matching to be reset for this instance
self.set_choices([])
def reenable_grammar(self):
# causes grammar-matching to be re-enabled for this instance
self.set_choices(self.disabledChoices)
self.disabledChoices = []
self.disabledGrammar = False
def synthesize(self, text):
if self.initialized == False:
return
# remember text in case it must be re-synthesized when app is foregrounded
self.lastSynthesis = text
# if app is backgrounded, don't actually synthesize anything
if self.active == False:
# limit a backgrounded app's synthesizes to one per second
time.sleep(1)
return
# start by requesting notification of synthesization availability
command = self.send_command('synthesisDone')
# block for confirmation of availability
self.read(messageId = command.messageId)
# send synthesis command
self.queueHandler.write(Message(self.instanceId,
'speechSynth',
self.queueHandler.get_message_id(),
text))
def trigger_grammar_update(self):
# sort of a hacky way of not executing if shell hasn't initialized yet
if '1' not in self.queueHandler.instanceRecvDequeDict:
return
# get grammar choices through GrammarMapper
choices = self.queueHandler.grammarMapper.get_grammar()
setMsg = Message()
setMsg.instanceId = self.instanceId
setMsg.type = 'grammarSet'
setMsg.messageId = self.queueHandler.get_message_id()
# build grammar choices into comma-delimited string
choice_str = ''
for i in range(len(choices)):
choice_str += choices[i] + ','
choice_str = choice_str.rstrip(',')
setMsg.args = choice_str
# send grammar set
self.queueHandler.write(setMsg)
return setMsg
def set_choices(self, newChoices):
self.grammarLock.acquire()
try:
# remember new grammar
self.choices = list(newChoices)
self.queueHandler.grammarMapper.set_grammar(self.instanceId, list(newChoices))
# do an actual update if app is active
if self.active:
self.trigger_grammar_update()
finally:
self.grammarLock.release()
# although we're not actually necessarily generating a Message() on a
# set_choices anymore, certain app code needs to be able to do blocking
# reads using a MessageId tied to both the grammar and ongoing synthesis
# (so that it can stop blocking on a grammar choice once synthesis is
# complete).
# it should continue to work for now if we just return a valid message
# with a unique MessageId each time set_choices() is called.
return Message('', '', self.queueHandler.get_message_id(), '')
def send_command(self, type, args = '', messageId = None):
if self.initialized == False:
return
command = Message()
command.instanceId = self.instanceId
command.type = type
if messageId == None:
command.messageId = self.queueHandler.get_message_id()
else:
command.messageId = messageId
command.args = args
# send command
self.queueHandler.write(command)
return command
def read(self, messageId = None, block = True):
if self.initialized == False:
return None
result = self.queueHandler.read(self.instanceId, messageId = messageId, block = block)
if result != None:
return result.args
return None
# sets grammar choices, outputs prompt, and blocks until it can return with input
# set choices to [] to trigger dictation-mode
# set choices to None to use existing grammar
def grammar_prompt_and_read(self, newChoices, prompt):
if self.initialized == False:
return None
# previously the grammarset was resulting in a messageId which was then passed
# to the read() down below ... thereby associating the read with the grammarset.
# taking that out due to multi-instance handling being moved from .Net-side to
# just python-side. Not sure if this is going to affect certain blocking call
# behavior, so just going to test it
# set instance grammar, if any choices provided
if newChoices != None:
self.set_choices(newChoices)
# start synthesis, if any prompt provided
if prompt != '':
self.synthesize(prompt)
# wait for feedback
result = self.read()
if prompt != '':
self.send_command('break')
log_msg('grammar_prompt_and_read(): ' + result)
return result
def start_dictation(self, endDictationToken, prompt):
if self.initialized == False:
return None
if prompt != '':
self.synthesize(prompt)
self.send_command('startDictation,' + endDictationToken)
# get dictation result
result = self.read()
log_msg('start_dictation(): ' + result)
return result
# used to build list of currently valid grammar choices to send to the recognizer
# also can map a match back to the app it belongs to
class GrammarMapper():
def __init__(self):
self.instanceDict = {}
self.instanceLock = threading.Lock()
self.activeApp = None
def register_instance(self, instanceId):
self.instanceDict[instanceId] = []
def set_grammar(self, instanceId, choices):
self.instanceDict[instanceId] = choices
# returns list of current grammar choices from across all apps
def get_grammar(self):
# start with shell grammar
choices = list(self.instanceDict['1'])
# extend with active app's grammar
activeAppChoices = None
if self.activeApp is not None:
activeAppChoices = list(self.instanceDict[self.activeApp.instanceId])
# return [] to represent dictation active in app or shell
if activeAppChoices == [] and self.activeApp.disabledGrammar == False:
return []
elif activeAppChoices == None and choices == []:
return []
# merge active app and shell grammars
if activeAppChoices is not None:
choices += activeAppChoices
# weed out duplicates between active app and shell
temp_dict = {}
new_list = []
for choice in choices:
if choice not in temp_dict:
temp_dict[choice] = choice
new_list.append(choice)
return new_list
# returns the instance id of the app a grammar match belongs to
def get_instance(self, match):
## if self.activeAppId is not None:
## appGrammarList = self.instanceDict[self.activeAppId]
## if appGrammarList == [] or match in self.instanceDict[self.activeAppId]:
## return self.activeAppId
## else:
## # this is slightly weird since if there is no active app, the only grammar
## # matches that should occur would be the shell. In fact, isn't get_instance()
## # really only deciding between the active_app and the shell? Couldn't this
## # entire function conceivably be reduced to an if-statement? Possibly it
## # will eventually work differently and multiple apps can be receiving input,
## # but for now it seems simpler.
for key1, value1 in self.instanceDict.items():
if match in value1:
return key1
return None
# returns contents of GrammarMapper as a string
def dump(self):
dump_str = 'activeAppId={0}\n'.format(self.activeApp.instanceId)
for key1, value1 in self.instanceDict.items():
dump_str += '\tappId={0}\n'.format(key1)
for value2 in value1:
dump_str += '\t\t{0}\n'.format(value2)
return dump_str
class Message():
def __init__(self, _instanceId = None, _type = None, _messageId = None, _args = None):
self.instanceId = _instanceId
self.type = _type
self.messageId = _messageId
self.args = _args
def from_str(self, rawStr):
# verify string format
if rawStr.startswith('>>') == False or \
rawStr.endswith('<<') == False:
raise Exception('Invalid raw string to Message(): ' + rawStr)
# trim bracketing characters
trimStr = rawStr.lstrip('>').rstrip('<')
# split string into fields
strElems = trimStr.split('|')
# verify number of fields
if len(strElems) != 4:
raise Exception('Invalid number of fields in Message(): ' + rawStr)
# store fields
self.instanceId = strElems[0]
self.type = strElems[1]
self.messageId = strElems[2]
self.args = strElems[3]
return self
def to_str(self):
return '>>' + self.instanceId + '|' + \
self.type + '|' + \
self.messageId + '|' + \
self.args + '<<'
class QueueHandler(threading.Thread):
def __init__(self, _recvPipe, _sendPipe):
threading.Thread.__init__(self)
self.recvPipe = _recvPipe
self.sendPipe = _sendPipe
self.writeLock = threading.Lock()
self.nextInstanceId = 1
self.nextMessageId = 1
self.instanceRecvDequeDict = {}
self.instanceLock = threading.Lock()
# initialize GrammarMapper that helps manage grammars across apps
# this is attached to QueueHandler for convenient app access
self.grammarMapper = GrammarMapper()
def get_instance_id(self):
instanceId = ''
self.instanceLock.acquire()
instanceId = str(self.nextInstanceId)
self.nextInstanceId += 1
self.instanceLock.release()
return instanceId
def get_message_id(self):
messageId = ''
self.instanceLock.acquire()
messageId = str(self.nextMessageId)
self.nextMessageId += 1
self.instanceLock.release()
return messageId
def register_instance(self, instanceId):
self.instanceLock.acquire()
self.instanceRecvDequeDict[instanceId] = collections.deque()
self.grammarMapper.register_instance(instanceId)
self.instanceLock.release()
def run(self):
# start reader thread
self.readerThread = threading.Thread(target = self.process_reads)
self.readerThread.setDaemon(True)
self.readerThread.start()
# used to wake up sleeping apps on system shutdown
# possibly should become part of VIOSApp eventually
def wakeup(self, instanceId):
self.instanceRecvDequeDict[instanceId].append(Message(instanceId,
'grammarMatch',
self.get_message_id(),
'wakeup'))
def process_reads(self):
while True:
# deserialize a message from incoming pipe
message = Message().from_str(pipe_read(self.recvPipe))
# use GrammarMapper to look up receiving app for grammar matches
instanceRecvDeque = None
instanceId = None
try:
if message.type == 'grammarMatch':
instanceId = self.grammarMapper.get_instance(message.args)
elif message.type == 'dictationResult':
instanceId = self.grammarMapper.activeApp.instanceId
else:
# for all other msgs, rely on message's instance id
instanceId = message.instanceId
except:
log_msg('Caught exception in QueueHandler while looking up instance: {0}'.format(message.to_str()))
if instanceId is not None:
instanceRecvDeque = self.instanceRecvDequeDict[instanceId]
# GrammarMapper should never not return a valid instance
if instanceRecvDeque == None:
log_msg('process_reads(): no instanceRecvDeque found. Dumping grammarMapper:\n{0}'.format(self.grammarMapper.dump()))
# perform proxy function by placing message on instance's deque
instanceRecvDeque.append(message)
log_msg('Message delivered to instance {0}'.format(instanceId))
# drop oldest message if instance's deque exceeds maximum
if len(instanceRecvDeque) > 10:
instanceRecvDeque.popleft()
# avoid busy loop
time.sleep(.1)
# performs a blocking or non-blocking Message object read for a given instance
def read(self, instanceId, messageId = None, block = True):
# TODO: verify or fix for thread-safety ...
# reference instance-specific deque
recvDeque = self.instanceRecvDequeDict[instanceId]
# Either pull messageId-specific message or just any instance message
message = None
if messageId == None:
try:
message = recvDeque.pop()
except:
# implement blocking message retrieval
while block:
time.sleep(.2)
try:
message = recvDeque.pop()
break
except:
pass
else:
# NOTE: due to a change to handling multiple grammar sets, namely
# handling it now on the python-side rather than in .Net, I am
# going to have to hack a change in below to not filter based on
# MessageId if the message is a grammarMatch. The reason for this
# is that the .Net code is no longer remembering the mapping
# from a grammar match to a particular app/message. Now only the
# python code knows that relationship.
# Essentially now when an app calls read(messageId), it will always
# return if there is any grammar match belonging to the app. So,
# this means the same app could not have two reads blocking on
# different messageIds. I don't think this currently poses an issue.
# iterate searching for messageId match
for msg in recvDeque:
if msg.messageId == messageId or msg.type == 'grammarMatch' or msg.type == 'dictationResult':
# pop matching message from middle of deque
message = msg
recvDeque.remove(msg)
break
if message == None:
# implement blocking message retrieval
while block:
time.sleep(.2)
for msg in recvDeque:
if msg.messageId == messageId or msg.type == 'grammarMatch' or msg.type == 'dictationResult':
# pop matching message from middle of deque
message = msg
recvDeque.remove(msg)
break
# break out of block loop
if message != None:
break
return message
# thread-protected write on shared pipe
def write(self, message):
self.writeLock.acquire()
pipe_write(self.sendPipe, message.to_str())
self.writeLock.release()
# writes msg to pipe using simple protocol of length followed by msg
def pipe_write(pipe, writeString):
log_msg('Sending message: ' + writeString)
# write string length followed by string
pipe.write(struct.pack('I', len(writeString)) + writeString.encode('ascii'))
pipe.seek(0)
# reads msg from pipe using simple protocol of length followed by msg
def pipe_read(pipe):
# read length of expected
readBytes = pipe.read(4)
# seek to beginning of stream
pipe.seek(0)
# error check
bytesRead = len(readBytes)
if len(readBytes) < 4:
log_msg('Returned {0} bytes, expecting 4.'.format(len(readBytes)))
if bytesRead == 0:
raise NameError('Error on connection.')
return ''
# convert length
stringLength = struct.unpack('I', readBytes)[0]
# read expected number of bytes
bytesRead = 0
readBytes = bytes()
currentReadBytes = []
while (bytesRead < stringLength):
currentReadBytes = pipe.read(stringLength - bytesRead)
# seek to beginning of stream
pipe.seek(0)
if len(currentReadBytes) == 0:
log_msg('0 bytes read error.')
return ''
readBytes = readBytes + currentReadBytes
bytesRead += len(currentReadBytes)
# convert string
readString = readBytes.decode('ascii')
log_msg('Read message: ' + readString)
return readString
# waits for yes/no (or break)
def pipe_wait_for_confirm(queueHandler, command):
return pipe_wait_for_choice(queueHandler,
'Confirm ' + command + '. Yes or No.',
[ 'yes', 'no' ])
| manesajian/VIOS | vioslib.py | vioslib.py | py | 20,297 | python | en | code | 0 | github-code | 13 |
3698195897 | DOUBLE_ISLAND_POINT = 543
TEA_TREE_NOOSA = 544
COOLUM_BEACH = 545
THE_BLUFF = 546
HAPPYS_CALOUNDRA = 547
AGNES_WATER = 1001
FRASER_ISLAND = 1002
ALEXANDRIA_BAY_NOOSA = 1003
SUNSHINE_BEACH = 1004
PIN_CUSHION_MAROOCHYDORE = 1005
KAWANA = 1006
POINT_CARTWRIGHT = 1007
MOFFATS = 1008
NORTH_STRADBROKE_ISLAND = 1009
SOUTH_STRADBROKE_ISLAND = 1010
| hhubbell/python-msw | msw/spots/australasia/sunshine_coast.py | sunshine_coast.py | py | 342 | python | en | code | 1 | github-code | 13 |
23826411409 | #!/usr/bin/python
# Script to estimate the reef structure underneath the corals. Used to help close the meshes of individual colonies
# extracted from a reef record (e.g. for the Palau data).
from osgeo import gdal
import numpy as np
# import cv2
import matplotlib.pyplot as plt
# import os
# import scipy.ndimage
# import time
from itertools import islice,product
# from scipy.signal import find_peaks
#
from mpl_toolkits.mplot3d import Axes3D
from sklearn import gaussian_process
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
def min2D(data):
if data[0] == 10 or data[-2]==10:
minz = 10
else:
minz = np.min(data)
return minz
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
# read the DEM into a numpy array
DEMpth = '/home/nader/scratch/palau/Palau_DEMs/022_pats_dropoff_circle_01/pats_dropoff_022_circle01_DEM.tif'
MAKSpth = '/home/nader/scratch/palau/Pats_01_Colonies/segmented/coral_binary_mask.tif'
ds = gdal.Open(DEMpth)
RB = ds.GetRasterBand(1)
dmat = np.array(RB.ReadAsArray())
ds = gdal.Open(MAKSpth)
RB = ds.GetRasterBand(1)
mask = np.array(RB.ReadAsArray())
# cv2.imshow("Depth", depth)
# plt.imshow(depth)
# make areas of no data 10m above the surface so they don't interfere with the minimum filter
depth_log = dmat==dmat[0][0]
depth_log = depth_log.astype(int)*(dmat[0][0]-10)
depth = dmat-depth_log
# plt.imshow(depth)
# plt.colorbar()
# plt.show()
rowN = 2000
dline = depth[:][rowN]
Mline = mask[:][rowN]
minz_100 = np.load('min_filter_100.npy')
minz_200 = np.load('min_filter_200.npy')
mline100 = minz_100[:][rowN]
mline200 = minz_200[:][rowN]
#
# # peaks = find_peaks(-dline,distance=100)
#
# # tst = []
# # wind = window(dline,300)
# # for w in wind:
# # if w[0]==10:
# # tst.append(10)
# # else:
# # tst.append(min(w))
# plt.plot(dline)
# plt.plot(mline100)
# plt.plot(mline200)
# # plt.plot(tst)
# # plt.plot(peaks[0],dline[peaks[0]])
# plt.plot(Mline)
#
#
# plt.show()
#
# plt.imshow(((depth - minz_100)<0.01).astype(int))
# plt.colorbar()
# plt.show()
# generate training points for GP
depth_log = dmat==dmat[0][0]
depth_log = depth_log.astype(int)*(-10)
depth=depth-depth_log
# plt.imshow(depth)
# plt.show()
x0,x01 = np.where(depth==minz_100)
# plt.scatter(x0,x01)
# plt.show()
print(len(x0))
X = np.empty((len(x0),2), int)
y = np.zeros((len(x0)))
for i in range(len(x0)):
X[i]= [int(x0[i]),int(x01[i])]
y[i] = depth[X[i][0]][X[i][1]]
# Input space
rs = 100
x1 = np.linspace(X[:,0].min(), X[:,0].max(),num=rs) #p
x2 = np.linspace(X[:,1].min(), X[:,1].max(),num=rs) #q
x = (np.array([x1, x2])).T
kernel = C(1.0, (1e-3, 1e3)) * RBF([100,100], (1e-7, 1e7))
gp = gaussian_process.GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=15)
gp.fit(X, y)
x1x2 = np.array(list(product(x1, x2)))
y_pred, MSE = gp.predict(x1x2, return_std=True)
X0p, X1p = x1x2[:,0].reshape(rs,rs), x1x2[:,1].reshape(rs,rs)
Zp = np.reshape(y_pred,(rs,rs))
# alternative way to generate equivalent X0p, X1p, Zp
# X0p, X1p = np.meshgrid(x1, x2)
# Zp = [gp.predict([(X0p[i, j], X1p[i, j]) for i in range(X0p.shape[0])]) for j in range(X0p.shape[1])]
# Zp = np.array(Zp).T
fig = plt.figure(figsize=(10,8))
# ax = fig.add_subplot(111)
# pcm = ax.pcolormesh(X0p, X1p, Zp)
# ax.invert_yaxis()
# fig.colorbar(pcm, ax=ax)
# plt.scatter(x0,x1)
ax = fig.add_subplot(111, projection='3d')
surf = ax.plot_surface(X0p, X1p, Zp, rstride=1, cstride=1, cmap='jet', linewidth=0, antialiased=False)
# plt.scatter(x0,x01)
# plt.show()
plt.figure()
plt.imshow(depth)
plt.colorbar()
plt.scatter(x0,x01)
# plt.show()
predIm = np.zeros(np.shape(depth))
it = 0
for el in x1x2:
x_c,y_c = np.round(el).astype(int)
predIm[x_c][y_c] = y_pred[it]
it+=1
plt.figure()
plt.imshow(predIm)
plt.show()
# plt.imshow(dmat)
# plt.show()
# print(len(tst),len(dline))
# window_size = 300
# min_img = np.zeros((1,np.shape(depth)[1]-window_size+1))
# for ro in range(0,np.shape(depth)[0]):
# dline = depth[:][ro]
# # plt.plot(dline)
#
# tst = []
# wind = window(dline,window_size)
# for w in wind:
# tst.append(min(w))
# tst = np.reshape(np.array(tst),(1,np.shape(depth)[1]-window_size+1))
# min_img = np.vstack((min_img,tst))
# # plt.plot(dline)
# # plt.plot(tst)
# # plt.pause(0.2)
# # plt.cla()
#
# plt.imshow(min_img)
# plt.show() | nbou/reefMin | reefMin2D.py | reefMin2D.py | py | 4,680 | python | en | code | 0 | github-code | 13 |
29008231150 |
import argparse
import os
from common.functionutil import makedir, removefile, removedir, join_path_names, is_exist_exec, is_exists_hexec, \
list_files_dir, basename, basename_filenoext, fileextension, get_regex_pattern_filename, \
find_file_inlist_with_pattern
from common.exceptionmanager import catch_error_exception
from dataloaders.imagefilereader import ImageFileReader, NiftiReader, DicomReader
BIN_DICOM2NIFTI = '/home/antonio/Libraries/mricron_dcm2niix/dcm2niix'
BIN_DECOMPDICOM = 'dcmdjpeg'
BIN_HR22NIFTI = '/home/antonio/Libraries/image-feature-extraction/build/tools/ConvertHR2'
def main(args):
def names_output_files(in_name: str):
return basename_filenoext(in_name) + '.nii.gz'
list_input_files = list_files_dir(args.input_dir)
makedir(args.output_dir)
files_extension = fileextension(list_input_files[0])
if files_extension == '.dcm':
files_type = 'dicom'
def tmpfile_template(in_name: str):
return basename_filenoext(in_name) + '_dec.dcm'
tmpsubdir = join_path_names(args.input_dir, 'tmp')
makedir(tmpsubdir)
if not is_exist_exec(BIN_DICOM2NIFTI):
message = 'Executable to convert dicom to nifti not found in: %s' % (BIN_DICOM2NIFTI)
catch_error_exception(message)
if not is_exists_hexec(BIN_DECOMPDICOM):
message = 'Executable to decompress dicom not found in: %s' % (BIN_DECOMPDICOM)
catch_error_exception(message)
elif files_extension == '.hr2':
files_type = 'hr2'
elif files_extension == '.mhd':
files_type = 'mhd'
if not args.input_refdir:
message = 'need to set argument \'input_refdir\''
catch_error_exception(message)
list_input_files = list_files_dir(args.input_dir, '*.mhd')
list_reference_files = list_files_dir(args.input_refdir)
pattern_search_infiles = get_regex_pattern_filename(list_reference_files[0])
if not is_exist_exec(BIN_HR22NIFTI):
message = 'Executable to convert hr2 to nifti not found in: %s' % (BIN_HR22NIFTI)
catch_error_exception(message)
else:
message = 'Extension file \'%s\' not known...' % (files_extension)
catch_error_exception(message)
# ******************************
for in_file in list_input_files:
print("\nInput: \'%s\'..." % (basename(in_file)))
out_file = join_path_names(args.output_dir, names_output_files(in_file))
print("Output: \'%s\'..." % (basename(out_file)))
if files_type == 'dicom':
case_file = basename(in_file)
in_tmp_file = join_path_names(tmpsubdir, tmpfile_template(in_file))
# 1st step: decompress input dicom file
command_string = BIN_DECOMPDICOM + ' ' + in_file + ' ' + in_tmp_file
print("%s" % (command_string))
os.system(command_string)
# 2nd step: convert decompressed dicom
command_string = BIN_DICOM2NIFTI + ' -o ' + args.output_dir + ' -f ' + case_file + ' -z y ' + in_tmp_file
print("%s" % (command_string))
os.system(command_string)
# remove tmp input file and aux. .json file
out_json_file = join_path_names(args.output_dir, basename_filenoext(out_file) + '.json')
removefile(in_tmp_file)
removefile(out_json_file)
# 3rd step: fix dims of output nifti image and header affine info
# (THE OUTPUT NIFTI BY THE TOOL dcm2niix HAVE ONE DIMENSION FLIPPED)
out_image = ImageFileReader.get_image(out_file)
out_image = NiftiReader.fix_dims_image_from_dicom2niix(out_image)
metadata_affine = NiftiReader.get_image_metadata_info(out_file)
image_position = DicomReader.get_image_position(in_file)
metadata_affine[1, -1] = image_position[1]
metadata_affine = NiftiReader.fix_dims_image_affine_matrix_from_dicom2niix(metadata_affine)
print("Fix dims of output nifti: \'%s\', with dims: \'%s\'" % (out_file, out_image.shape))
ImageFileReader.write_image(out_file, out_image, metadata=metadata_affine)
elif files_type == 'hr2':
command_string = BIN_HR22NIFTI + ' ' + in_file + ' ' + out_file
print("%s" % (command_string))
os.system(command_string)
elif files_type == 'mhd':
inout_image = ImageFileReader.get_image(in_file)
in_reference_file = find_file_inlist_with_pattern(basename(in_file), list_reference_files,
pattern_search=pattern_search_infiles)
in_metadata = ImageFileReader.get_image_metadata_info(in_reference_file)
print("Metadata from file: \'%s\'..." % (basename(in_reference_file)))
ImageFileReader.write_image(out_file, inout_image, metadata=in_metadata)
# endfor
if files_type == 'dicom':
removedir(tmpsubdir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=str)
parser.add_argument('output_dir', type=str)
parser.add_argument('--input_refdir', type=str, default=None)
args = parser.parse_args()
print("Print input arguments...")
for key, value in vars(args).items():
print("\'%s\' = %s" % (key, value))
main(args)
| antonioguj/bronchinet | src/scripts_util/convert_images_to_nifti.py | convert_images_to_nifti.py | py | 5,426 | python | en | code | 42 | github-code | 13 |
16682365904 | import os
import pathlib
import time
from prometheus_client import start_http_server, Gauge, Enum
def main():
# exporter ็ๅฌ็็ซฏๅฃ
exporter_port: int = int(os.getenv("EXPORTER_PORT", "9876"))
# ๆฐๆฎ้้้ด้
polling_interval_seconds: int = int(os.getenv("POLLING_INTERVAL_SECONDS", "5"))
# ๅฎไน้้ๆๆ
file_map = {
"file_1": "file_1.txt",
"file_2": "file_2.txt",
"file_3": "file_3.txt",
}
file_size_metric = Gauge(
name="file_size",
documentation="file size metrics",
labelnames=["file_map"]
)
# ๅฏๅจexporterๆๅก: prometheusๅฏไปฅ้่ฟip:9876่ฎฟ้ฎๅฐ้้็ๆฐๆฎ.
start_http_server(exporter_port)
while True:
for k, v in file_map.items():
# ๆๅๆฐๆฎ
metric = pathlib.Path(v).stat().st_size
# ๅๅ
ฅๆๆ ๅฏน่ฑก
file_size_metric.labels(k).set(metric)
# ๆฐๆฎ้้้ด้
time.sleep(polling_interval_seconds)
if __name__ == '__main__':
main()
| zhengtong0898/notebook | devops/alertmanager/multiple_file/exporter.py | exporter.py | py | 1,054 | python | en | code | 4 | github-code | 13 |
14567353400 | import numpy as np
import cv2
img = cv2.imread('bgorig.png')
mask = cv2.imread('bgmask.png', 0)
inpaintRadius = 5
dstTelea = cv2.inpaint(img, mask, inpaintRadius, cv2.INPAINT_TELEA)
dstNs = cv2.inpaint(img, mask, inpaintRadius, cv2.INPAINT_NS)
cv2.imshow('Telea', dstTelea)
cv2.imshow('Navier-Stokes', dstNs)
cv2.waitKey(0)
cv2.destroyAllWindows()
| coollog/VideoBarcode | matcher/infill.py | infill.py | py | 351 | python | en | code | 1 | github-code | 13 |
9821233263 | import datetime
import db.db_handler as database
from flask import request,make_response,jsonify
def GetMaterialOnWS():
conn = database.connector()
cursor = conn.cursor()
query = "SELECT * FROM mat_d_materialonws"
cursor.execute(query)
row_headers = [x[0] for x in cursor.description]
json_data = []
records = cursor.fetchall()
for data in records:
json_data.append(dict(zip(row_headers,data)))
return make_response(jsonify(json_data),200)
def GetMaterialStockOnWsByIdStock(idStock):
conn = database.connector()
cursor = conn.cursor()
query = "SELECT a.id,a.merk,b.workstationCode,b.login,b.logout FROM mat_d_materialstock a JOIN mat_d_materialonws01 b ON b.materialStock = a.id WHERE a.id = '"+idStock+"'"
cursor.execute(query)
row_headers = [x[0] for x in cursor.description]
json_data = []
records = cursor.fetchall()
for data in records:
json_data.append(dict(zip(row_headers,data)))
cursor.close()
conn.close()
return make_response(jsonify(json_data),200)
def AddMaterialLogin(idOperasi):
conn = database.connector()
cursor = conn.cursor()
stasiunKerja = ""
records = []
query_select = "SELECT stasiunKerja FROM cpl_oprlayak WHERE id = '"+idOperasi+"'"
cursor.execute(query_select)
records = cursor.fetchall()
for index in records:
stasiunKerja = index[0]
print("WS : ",stasiunKerja)
query = "INSERT INTO cpl_matlogin(stasiunKerja,idMat,waktu,keterangan,status01)VALUES(%s,%s,%s,%s)"
try:
data = request.json
idMat = data["idMat"]
waktu = datetime.datetime.now()
keterangan = "material berhasil login"
status01 = waktu
values = (stasiunKerja,idMat,waktu,keterangan,status01)
cursor.execute(query,values)
conn.commit()
cursor.close()
conn.close()
hasil = {"status" : "berhasil"}
except Exception as e:
hasil = {"status" : "gagal"}
print("Error",str(e))
return hasil
def GetMaterialLogin():
conn = database.connector()
cursor = conn.cursor()
query = "SELECT * FROM cpl_matlogin"
cursor.execute(query)
row_headers = [x[0] for x in cursor.description]
json_data = []
records = cursor.fetchall()
for data in records:
json_data.append(dict(zip(row_headers,data)))
cursor.close()
conn.close()
return make_response(jsonify(json_data),200)
| lunaticXOXO/INKA-Full | backend/material/MaterialOnWorkstation/controller/MaterialOnWorkstationController.py | MaterialOnWorkstationController.py | py | 2,481 | python | en | code | 2 | github-code | 13 |
12111057913 | '''
3.ๅฎ็ฐ strStr() ๅฝๆฐ
็ปไฝ ไธคไธชๅญ็ฌฆไธฒ haystack ๅ needle ๏ผ่ฏทไฝ ๅจ haystack ๅญ็ฌฆไธฒไธญๆพๅบ needle ๅญ็ฌฆไธฒๅบ็ฐ็็ฌฌไธไธชไฝ็ฝฎ๏ผไธๆ ไป 0 ๅผๅง๏ผใๅฆๆไธๅญๅจ๏ผๅ่ฟๅ -1 ใ
'''
def strStr(haystack: str, needle: str) -> int:
'''
ๆฅๆพๅญๅญ็ฌฆไธฒๅจๅญ็ฌฆไธฒไธญ็็ดขๅผๅผไฝ็ฝฎ
:param haystack: ๅญ็ฌฆไธฒ
:param needle: ๅญๅญ็ฌฆไธฒ
:return: ็ดขๅผๅผ
'''
# 1 ๅญๅญ็ฌฆไธฒ้ฟๅบฆๅคงไบๅญ็ฌฆไธฒ็้ฟๅบฆ๏ผ็ดๆฅ่ฟๅ-1
if len(needle) > len(haystack):
return -1
# 2 ไฝฟ็จๅ็๏ผๅบไบๅญ็ฌฆไธฒ็้ฟๅบฆๆฏ่พ
i = 0
length = len(needle)
while i + length <= len(haystack):
if haystack[i: i + length] == needle:
return i
i += 1
return -1
print(strStr('hello', 'll'))
print(strStr('aaaaa', 'bba'))
print(strStr('', ''))
| 15149295552/Code | Month06/day21/exercise03.py | exercise03.py | py | 864 | python | zh | code | 1 | github-code | 13 |
28352180395 | #!/usr/bin/env python3
# Python3
#
# Simple array class that dynamically saves temp files to disk to conserve memory
#
import logging
import pickle
from datetime import timedelta
from itertools import islice
from os import makedirs, remove
from os.path import exists
from shutil import rmtree
from time import time
startime = time()
logging.getLogger(__name__).addHandler(logging.NullHandler())
class Array():
"""1D Array class
Dynamically saves temp files to disk to conserve memory"""
def __init__(self, name="Array", cachedirectory=".cache/", a=None, maxitems=1):
# How much data to keep in memory before dumping to disk
self.maxitems = int(maxitems*1e6)
self.fc = 0 # file counter
self.uuid = id(self)
self.name = name
logging.debug("[largearray.Array] Instance %d %s created | %s" % (self.uuid, self.name, str(timedelta(seconds=time()-startime))))
self.dir = cachedirectory + str(self.uuid) # make a unique subfolder (unique as long as the array exists)
if exists(self.dir):
rmtree(self.dir)
makedirs(self.dir)
logging.debug("[largearray.Array] Instance %d caches in %s with %d items per file" % (self.uuid, self.dir, self.maxitems))
self.path = self.dir + "/temp%d.dat" # Name of temp files
self.hastrim = False
self.a = []
if a is not None:
self.extend(a)
def append(self, n):
"""Append n to the array.
If size exceeds self.maxitems, dump to disk
"""
if self.hastrim:
raise Exception("ERROR: Class [array] methods append() and extend() cannot be called after method trim()")
else:
self.a.append(n)
if len(self.a) >= self.maxitems:
logging.debug("[largearray.Array] Instance %d dumps temp %d | %s" % (self.uuid, self.fc, str(timedelta(seconds=time()-startime))))
with open(self.path % self.fc, 'wb') as pfile:
pickle.dump(self.a, pfile) # Dump the data
self.a = []
self.fc += 1
def trim(self):
"""If there are remaining values in the array stored in memory, dump them to disk (even if there is less than maxitems.
NOTE: Only run this after all possible appends and extends have been done
WARNING: This cannot be called more than once, and if this has been called, append() and extend() cannot be called again"""
if len(self.a) > 0:
if self.hastrim:
raise Exception("ERROR: Class [array] method trim() can only be called once")
else:
self.hastrim = True
self.trimlen = len(self.a)
logging.debug("[largearray.Array] Instance %d trims temp %d | %s" % (self.uuid, self.fc, str(timedelta(seconds=time()-startime))))
with open(self.path % self.fc, 'wb') as pfile:
pickle.dump(self.a, pfile) # Dump the data
self.a = []
self.fc += 1
def extend(self, values):
"""Convenience method to append multiple values"""
for n in values:
self.append(n)
def __iter__(self):
"""Allows iterating over the values in the array.
Loads the values from disk as necessary."""
for fc in range(self.fc):
logging.debug("[largearray.Array] Instance %d iterates temp %d | %s" % (self.uuid, fc, str(timedelta(seconds=time()-startime))))
with open(self.path % fc, 'rb') as pfile:
yield from pickle.load(pfile)
yield from self.a
def __repr__(self):
"""The values currently in memory"""
s = "[..., " if self.fc else "["
return s + ", ".join(map(str, self.a)) + "]"
def __getitem__(self, index):
"""Get the item at index or the items in slice.
Loads all dumps from disk until start of slice for the latter."""
if isinstance(index, slice):
return list(islice(self, index.start, index.stop, index.step))
else:
fc, i = divmod(index, self.maxitems)
with open(self.path % fc, 'rb') as pfile:
return pickle.load(pfile)[i]
def __len__(self):
"""Length of the array (including values on disk)"""
if self.hastrim:
return (self.fc-1) * self.maxitems + self.trimlen
return self.fc * self.maxitems + len(self.a)
def __delattr__(self, item):
"""Calling" del <object name>.a
will delete entire array"""
if item == 'a':
super().__delattr__('a')
rmtree(self.dir)
logging.debug("[largearray.Array] Instance %d deletes all array data | %s" % (self.uuid, str(timedelta(seconds=time()-startime))))
else:
super(Array, self).__delattr__(item)
def __setitem__(self, key, value):
if isinstance(key, slice):
l = list(islice(self, key.start, key.stop, key.step))
for i in l:
l[i].__setitem__(value)
set()
else:
fc, i = divmod(key, self.maxitems)
with open(self.path % fc, 'rb') as pfile:
l = pickle.load(pfile)
l[i] = value
remove(self.path % fc)
with open(self.path % fc, 'wb') as pfile:
pickle.dump(l, pfile)
def __delitem__(self, key):
fc, i = divmod(key, self.maxitems)
with open(self.path % fc, 'rb') as pfile:
l = pickle.load(pfile)
del l[i]
remove(self.path % fc)
with open(self.path % fc, 'wb') as pfile:
pickle.dump(l, pfile)
| logwet/genome-imager | largearray.py | largearray.py | py | 5,693 | python | en | code | 0 | github-code | 13 |
14759201707 | # -*- coding: utf-8 -*-
"""import_settings.py - Contains ImportSettings class definition."""
# This file is part of Telemetry-Grapher.
# Telemetry-Grapher is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Telemetry-Grapher is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY
# without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Telemetry-Grapher. If not, see < https: // www.gnu.org/licenses/>.
__author__ = "Ryan Seery"
__copyright__ = 'Copyright 2019 Max-Planck-Institute for Solar System Research'
__license__ = "GNU General Public License"
import io
import csv
import copy
import pandas as pd
from PyQt5.QtWidgets import (QApplication, QDialog,
QVBoxLayout, QHBoxLayout, QSizePolicy,
QSplitter, QPushButton, QLabel,
QAbstractItemView, QHeaderView,
QTableView, QTableWidget, QTableWidgetItem)
from PyQt5.QtGui import QKeySequence, QIcon, QBrush, QColor
from PyQt5.QtCore import Qt, QSortFilterProxyModel
from telemetry_grapher.classes.internal.pandas_model import PandasModel
class ImportSettings(QDialog):
def __init__(self, parent, group_files):
super().__init__()
self.parent = parent
self.setWindowTitle('Review Import Settings')
self.setWindowIcon(QIcon('rc/satellite.png'))
gt = self.parent
dm = gt.parent
ui = dm.parent
self.resize(1000,500)
self.group_files = group_files
vbox = QVBoxLayout()
splitter = QSplitter(Qt.Vertical)
self.kwargTable = QTableWidget()
w = self.kwargTable
v_header = w.verticalHeader()
v_header.setDefaultSectionSize(v_header.minimumSectionSize())
v_header.setSectionResizeMode(QHeaderView.Fixed)
w.setRowCount(len(self.group_files))
w.setColumnCount(5)
w.setHorizontalHeaderLabels(['File',
'Datetime Format',
'Header Row',
'Index Column',
'Skip Rows'])
h_header = w.horizontalHeader()
h_header.setSectionResizeMode(0, QHeaderView.Stretch)
h_header.setSectionResizeMode(1, QHeaderView.Fixed)
h_header.setSectionResizeMode(2, QHeaderView.Fixed)
h_header.setSectionResizeMode(3, QHeaderView.Fixed)
h_header.setSectionResizeMode(4, QHeaderView.Fixed)
w.itemSelectionChanged.connect(self.preview_df)
w.cellChanged.connect(self.update_path_kwargs)
splitter.addWidget(w)
self.previewTable = QTableView()
self.proxy = QSortFilterProxyModel()
self.model = PandasModel(pd.DataFrame())
self.proxy.setSourceModel(self.model)
self.previewTable.setModel(self.proxy)
self.previewTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
v_header = self.previewTable.verticalHeader()
v_header.setDefaultSectionSize(v_header.minimumSectionSize())
v_header.hide()
splitter.addWidget(self.previewTable)
hbox = QHBoxLayout()
self.auto_detect_button = QPushButton('Auto-Detect')
self.auto_detect_button.clicked.connect(self.auto_detect)
hbox.addWidget(self.auto_detect_button)
self.reset_button = QPushButton('Reset')
self.reset_button.clicked.connect(self.reset)
hbox.addWidget(self.reset_button)
self.feedback = QLabel()
self.feedback.setSizePolicy(QSizePolicy.MinimumExpanding,
QSizePolicy.Preferred)
hbox.addWidget(self.feedback)
self.ok_button = QPushButton('Confirm')
self.ok_button.clicked.connect(self.accept)
self.ok_button.clicked.connect(self.apply_kwargs)
hbox.addWidget(self.ok_button)
self.cancel_button = QPushButton('Cancel')
self.cancel_button.clicked.connect(self.reject)
hbox.addWidget(self.cancel_button)
vbox.addWidget(splitter)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.original_kwargs = copy.deepcopy(ui.path_kwargs)
self.current_kwargs = copy.deepcopy(self.original_kwargs)
for i, file in enumerate(self.group_files):
kwargs = self.current_kwargs[gt.path_dict[file]]
self.kwargTable.setItem(i, 0, QTableWidgetItem(file))
self.kwargTable.item(i, 0).setFlags(Qt.ItemIsSelectable)
self.update_row_kwargs(i, kwargs)
self.kwargTable.setCurrentCell(0, 1)
self.ok_button.setFocus()
if self.parent.parent.debug:
self.accept()
def update_row_kwargs(self, row, kwargs):
w = self.kwargTable
w.setItem(row, 1, QTableWidgetItem(str(kwargs['format'])))
w.setItem(row, 2, QTableWidgetItem(str(kwargs['header'])))
w.setItem(row, 3, QTableWidgetItem(str(kwargs['index_col'])))
w.setItem(row, 4, QTableWidgetItem(str(kwargs['skiprows'])))
def auto_detect(self):
gt = self.parent
selection = self.kwargTable.selectedIndexes()
rows = set(sorted(index.row() for index in selection))
for row in rows:
file = self.kwargTable.item(row, 0).text()
path = gt.path_dict[file]
dtf, r, c, skiprows = gt.interpret_data(path)
kwargs = {'format':dtf,
'header':r,
'index_col':c,
'skiprows':skiprows}
self.update_row_kwargs(row, kwargs)
def reset(self):
gt = self.parent
for i, file in enumerate(self.group_files):
kwargs = self.original_kwargs[gt.path_dict[file]]
self.kwargTable.setItem(i, 0, QTableWidgetItem(file))
self.kwargTable.item(i, 0).setFlags(Qt.ItemIsSelectable)
self.update_row_kwargs(i, kwargs)
def update_path_kwargs(self, row, col):
gt = self.parent
pick_kwargs = {1:'format', 2:'header', 3:'index_col', 4:'skiprows'}
if col not in pick_kwargs: return
kwarg = pick_kwargs[col]
file = self.kwargTable.item(row, 0).text()
path = gt.path_dict[file]
text = self.kwargTable.item(row, col).text().strip()
### input permissions
# NO INPUT CONTROL ON FORMAT FIELD, SO YOU BETTER KNOW WHAT YOU'RE DOIN
self.kwargTable.blockSignals(True)
if kwarg == 'format':
value = text
elif kwarg == 'header':
if not text or text.lower() == 'none':
value = None
else:
try:
value = int(text)
except ValueError:
self.feedback.setText('Header row must be an integer'
'less than 9 or left blank.')
self.kwargTable.setItem(row, col, QTableWidgetItem(
str(self.current_kwargs[path][kwarg])))
self.kwargTable.blockSignals(False)
return
elif kwarg == 'index_col':
try:
value = int(text)
except ValueError:
self.feedback.setText('Index column must be an integer.')
self.kwargTable.setItem(row, col, QTableWidgetItem(
str(self.current_kwargs[path][kwarg])))
self.kwargTable.blockSignals(False)
return
elif kwarg == 'skiprows':
if text.lower() == 'none':
value = []
else:
value = []
for i in text:
if i.isdigit() and int(i) not in value:
value.append(int(i))
elif i in ', []': # ignore commas, spaces, and brackets
continue
else:
self.feedback.setText('Only list of integers from 0-9'
'or "None" allowed.')
self.kwargTable.setItem(row, col, QTableWidgetItem(
str(self.current_kwargs[path][kwarg])))
self.kwargTable.blockSignals(False)
return
value = sorted(value)
if not value: value = None
self.feedback.setText('')
self.kwargTable.setItem(row, col, QTableWidgetItem(str(value)))
self.kwargTable.blockSignals(False)
self.current_kwargs[path][kwarg] = value
self.preview_df()
def preview_df(self):
gt = self.parent
selection = self.kwargTable.selectedIndexes()
if selection:
rows = sorted(index.row() for index in selection)
# can only preview one row at a time.
if all(x==rows[0] for x in rows):
# Populate preview table with preview of selected
row = selection[0].row()
file = self.kwargTable.item(row, 0).text()
path = gt.path_dict[file]
shown_df = gt.df_preview[path]
self.model = PandasModel(shown_df)
self.proxy.setSourceModel(self.model)
h_header = self.previewTable.horizontalHeader()
h_header.setSectionResizeMode(0, QHeaderView.ResizeToContents)
# Highlight selected rows/columns according to parse_kwargs
header = self.current_kwargs[path]['header']
index_col = self.current_kwargs[path]['index_col']
skiprows = self.current_kwargs[path]['skiprows']
# if skiprows == 'None': skiprows = None
if index_col is not None:
for r in range(len(shown_df.index)):
self.model.setData(self.model.index(r,int(index_col)),
QBrush(QColor.fromRgb(255, 170, 0)),
Qt.BackgroundRole)
if skiprows is not None:
for r in skiprows:
for c in range(len(shown_df.columns)):
self.model.setData(self.model.index(r,c),
QBrush(Qt.darkGray),
Qt.BackgroundRole)
if header is not None:
for r in range(int(header)):
for c in range(len(shown_df.columns)):
self.model.setData(self.model.index(r,c),
QBrush(Qt.darkGray),
Qt.BackgroundRole)
for c in range(len(shown_df.columns)):
self.model.setData(self.model.index(int(header),c),
QBrush(QColor.fromRgb(0, 170, 255)),
Qt.BackgroundRole)
else:
self.model = PandasModel(pd.DataFrame())
self.proxy.setSourceModel(self.model)
# if hasattr(self, 'proxy'): self.proxy.deleteLater()
else:
self.model = PandasModel(pd.DataFrame())
self.proxy.setSourceModel(self.model)
# if hasattr(self, 'proxy'): self.proxy.deleteLater()
def keyPressEvent(self, event):
"""Enables single row copy to multirow paste.
Column dimensions must be the same, using Ctrl+C/V."""
if event.matches(QKeySequence.Copy):
selection = self.kwargTable.selectedIndexes()
if selection:
rows = sorted(index.row() for index in selection)
# can only copy one row at a time.
if all(x==rows[0] for x in rows):
columns = sorted(index.column() for index in selection)
selection_col_span = columns[-1] - columns[0] + 1
table = [[''] * selection_col_span]
for index in selection:
column = index.column() - columns[0]
table[0][column] = index.data()
stream = io.StringIO()
csv.writer(stream).writerows(table)
QApplication.clipboard().setText(stream.getvalue())
if event.matches(QKeySequence.Paste):
selection = self.kwargTable.selectedIndexes()
if selection:
model = self.kwargTable.model()
buffer = QApplication.clipboard().text()
rows = sorted(index.row() for index in selection)
columns = sorted(index.column() for index in selection)
selection_col_span = columns[-1] - columns[0] + 1
reader = csv.reader(io.StringIO(buffer), delimiter='\t')
arr = [row[0].split(',') for row in reader]
arr = arr[0]
if selection_col_span == len(arr):
for index in selection:
column = index.column() - columns[0]
model.setData(model.index(index.row(), index.column()),
arr[column])
# Close dialog from escape key.
if event.key() == Qt.Key_Escape:
self.close()
def apply_kwargs(self):
gt = self.parent
dm = gt.parent
ui = dm.parent
# read current kwargs into ui.path_kwargs
for file in self.group_files:
path = gt.path_dict[file]
k = self.current_kwargs[path]
if k['skiprows']:
k['skiprows'] = [i for i in k['skiprows'] if i > k['header']]
for kwarg in ('format', 'header', 'index_col', 'skiprows'):
ui.path_kwargs[path][kwarg] = self.current_kwargs[path][kwarg]
| rysoseeryous/Telemetry-Grapher | classes/manager/import_settings.py | import_settings.py | py | 14,251 | python | en | code | 5 | github-code | 13 |
38235452899 | import pytesseract
from typing import List
from numpy import ndarray
from bpmn_redrawer_backend.bpmn.bpmn_elements import Participant, Element
from bpmn_redrawer_backend.bpmn.predictions import Text
from bpmn_redrawer_backend.commons.utils import get_nearest_element
def get_text_from_img(img: ndarray) -> List[Text]:
"""Extract all the text from an image using OCR with pytesseract
Parameters
----------
img: ndarray
The image to use for the text extraction (as Numpy ndarray)
Returns
-------
List[Text]
The list of detected Text
"""
text_list = []
d = pytesseract.image_to_data(
img, output_type=pytesseract.Output.DICT, config="--psm 12"
)
n_boxes = len(d["level"])
for i in range(n_boxes):
text = d["text"][i]
if (
len(text) == 0
or any(not c.isalnum() for c in text[:-1])
or len(text) > 1
and not (text[-1].isalnum() or text[-1] in "-?")
or text.lower().count(text[0].lower()) == len(text)
):
continue
(x, y, w, h) = (d["left"][i], d["top"][i], d["width"][i], d["height"][i])
# cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
text_list.append(([x, y, w, h], text))
# cv2.imshow("img", img)
# cv2.waitKey(0)
return [Text(txt, *box) for box, txt in text_list]
def link_text(texts: List[Text], elements: List[Element]):
"""Method that links the Text to the corresponding Elements
Parameters
----------
texts: List[Text]
List of detected Text
elements: List[Element}
List of Element to be linked
Returns
-------
List[Element]
The list of updated Element
"""
for el in elements:
if isinstance(el, Participant):
el.prediction.center = (
el.prediction.top_left_x,
el.prediction.top_left_y + el.prediction.height / 2,
)
for text in texts:
nearest = get_nearest_element(text.center, elements)
nearest.name.append(text)
return elements
| PROSLab/BPMN-Redrawer | backend/bpmn_redrawer_backend/api/services/ocr_service.py | ocr_service.py | py | 2,115 | python | en | code | 4 | github-code | 13 |
14578484626 | from time import time
data = 3017957
def josephus(n):
bn = bin(n)[2:]
return int(bn[1:]+bn[0], 2)
print(josephus(data))
class Item:
def __init__(self, pos):
self.pos = pos
self.n = None
self.p = None
def steal(self):
self.p.n = self.n
self.n.p = self.p
def eliminate(n):
circle = [Item(x) for x in xrange(n)]
for elf in xrange(n):
circle[elf].n = circle[(elf+1)%n]
circle[elf].p = circle[(elf-1)%n]
current = circle[0]
mid = circle[n/2]
for elf in xrange(n-1):
mid.steal()
mid = mid.n
if (n-elf)%2 == 1:
mid = mid.n
current = current.n
return current.pos+1
#eliminate(data)
def mathy_part2(n, pos=1):
while 3 * pos <= n:
pos *= 3
if n == pos:
return n
return n - pos + max(n-2*pos, 0)
| kryptn/Challenges | Advent/2016/day_19/nineteen.py | nineteen.py | py | 865 | python | en | code | 1 | github-code | 13 |
16863779603 | from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import map
from builtins import object
import sys
import networkx as nx
import greedy_chicagoan as gs
import math
import random
default_gapsize=100
def same_component(s1,s2,ccd):
# print "same component?",ccd[s1],ccd[s2]
cs1=ccd[s1]
while cs1 in ccd: cs1=ccd[cs1]
cs2=ccd[s2]
while cs2 in ccd: cs2=ccd[cs2]
if cs1==cs2: return True
return False
def merge_components(s1,s2,ccd):
cs1=ccd[s1]
while cs1 in ccd: cs1=ccd[cs1]
cs2=ccd[s2]
while cs2 in ccd: cs2=ccd[cs2]
if not cs1==cs2:
ccd[cs1]=cs2
# print cs2,cs1,"ccd[{}]={}".format(cs1,cs2)
class ScaffoldEdit(object):
def __init__(self,repr):
if type(repr)==tuple:
self.score=repr[0]
self.breaks=repr[1]
self.joins=repr[2]
elif type(repr)==dict:
self.score=repr['score']
self.breaks=repr.get('breaks',[])
self.joins=repr['joins']
def is_valid(self,links,ccd):
# print "#valid?",self
freed=[]
for a,b in self.breaks:
if not (a,b) in links: return False # False
freed.append(a)
freed.append(b)
if len(self.joins)==0: return True
# print "#"
# print self.joins
for a,b in self.joins:
# print a,b,a in links, b in links, a in freed, b in freed
# print b in freed
if a in links and not a in freed: return False # False
if b in links and not b in freed: return False #False
if same_component(a,b,ccd): return False #False
return True
def implement(self,links,ccd,g):
# return
# print "implement:",self
for a,b in self.breaks:
if (a,b) in links: del links[a,b]
if (b,a) in links: del links[b,a]
del links[a]
del links[b]
if g.has_edge(a,b):
g.remove_edge(a,b)
else:
print("how come there's not edge ?",a,b)
raise Exception
# if (a,b) in g: g.remove_edge(a,b)
for a,b in self.joins:
links[a,b]=1
links[b,a]=1
links[a]=b
links[b]=a
# g.add_edge(a,b,weight=self.score)
g.add_edge(a,b, {'length': default_gapsize, 'contig': False} )
merge_components(a,b,ccd)
if not (a,b) in links: isv=False
def __repr__(self):
return "\t".join(map(str,[ self.score, self.joins, self.breaks ]))
def update_end_distance(end_distance,n,g):
x=0
q=[n]
seen={}
last=False
while len(q)>0:
m=q.pop(0)
if last:
try:
x+=g[last][m]['length']
except Exception as e:
print("wtf?",last,m,x,n)
print(math.log(-1.0))
last=m
if (not m in end_distance) or end_distance[m]>x: end_distance[m]=x
seen[m]=True
if len(g.neighbors(m))>2:
print("topology error:",m,list(g.neighbors(m)))
print(math.log(-1.0))
for l in g.neighbors(m):
if not l in seen:
q.append(l)
return x
# print end_distance
#def llr(e1,e2):
# return 1.0
L=200000.0
if __name__=="__main__":
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-d','--debug',default=False,action='store_true')
parser.add_argument('-p','--progress',default=False,action='store_true')
# parser.add_argument('-L','--links')
parser.add_argument('-s','--scaffolds')
# parser.add_argument('-S','--alreadyDone')
parser.add_argument('-b','--besthits')
parser.add_argument('-l','--lengths')
parser.add_argument('-E','--edgefile')
parser.add_argument('-F','--filter')
parser.add_argument('-N','--name')
parser.add_argument('-m','--minscore' ,default=5.0,type=float)
parser.add_argument('--seed',required=False,type=int,default=1, help="Seed for random number generation, use -1 for no seed")
# parser.add_argument('-K','--slices' ,default=1,type=int)
# parser.add_argument('-k','--slice' ,default=0,type=int)
args = parser.parse_args()
if args.seed != -1 :
random.seed(args.seed)
if args.debug:
args.progress=True
if args.progress: log( str(args) )
name_prefix=""
if args.name:
name_prefix=args.name
else:
import idGen as idGen
name_prefix="Scf" + idGen.id()
ll={}
if args.lengths:
f = open(args.lengths)
while True:
l = f.readline()
if not l: break
if l[0]=="#": continue
c=l.strip().split()
ll[c[0]]=int(c[1])
f.close()
besthit={}
if args.besthits:
# besthit={}
if args.besthits:
f = open(args.besthits)
while True:
l = f.readline()
if not l: break
if not l[:5]=="best:": continue
c=l.strip().split()
besthit[c[1]]=c[2:]
# print c[1],besthit[c[1]]
f.close()
if args.progress: print("#Done reading besthits")
linked={}
g=nx.Graph()
if args.scaffolds:
f=open(args.scaffolds)
while True:
l=f.readline()
if not l: break
c=l.strip().split()
if c[0]=="#edge:":
at=eval(" ".join(c[3:]))
a,b=c[1],c[2]
g.add_edge(a,b,at)
if not at['contig']:
linked[a]=1
linked[b]=1
linked[a,b]=1
linked[b,a]=1
# print "#add edge",c[1],c[2],eval(" ".join(c[3:]))
sys.stdout.flush()
sc=1
scaffold={}
for c in nx.connected_components(g):
for cc in c:
scaffold[cc]=sc
scaffold[cc[:-2]]=sc
sc+=1
# scaffold_pairs_tested={}
#Scaffold50016_1 Scaffold40593_1 ['Scaffold77744_1.5', 'Scaffold246520_1.5'] ['Scaffold111955_1.3', 'Scaffold216064_1.3'] 1141 455 1 15 1
# joins_g=nx.Graph()
moves=[]
#={}
while True:
l=sys.stdin.readline()
if not l: break
# print l
# print "\""+l[:10]+"\""
if l[:10]=="link score":
c=l.strip().split()
x=max(list(map(float,c[4:])))
if x > args.minscore:
moves.append( ScaffoldEdit({ 'score':x , 'joins': ((c[2],c[3]),) }) )
elif l[:7]=="interc:":
c=l.strip().split()
x=eval(" ".join(c[1:]))
if x[0] > args.minscore:
moves.append( ScaffoldEdit({ 'score':x[0] , 'breaks': x[2] , 'joins': x[1] }) )
moves.sort(key=lambda x: x.score,reverse=True)
cnx=1
ccd={}
for c in nx.connected_components(g):
for cc in c:
ccd[cc]=cnx
cnx+=1
for m in moves:
print(m,m.is_valid(linked,ccd))
if m.is_valid(linked,ccd)==True:
m.implement(linked,ccd,g)
# exit(0)
end_distance={}
sn=1
for sg in nx.connected_component_subgraphs(g):
ends=[]
bh_stats={}
for n in sg.nodes():
if sg.degree(n)==1:
ends.append(n)
if len(ends)==0:
print("why no ends?", sn)
sn+=1
continue
maxx=update_end_distance(end_distance,ends[0],sg)
# ['34329.0', '3', '+', '71834554', '71853152', '1', '1', '18598']
t=0
gap_len=0
for s1,s2 in sg.edges():
t+=sg[s1][s2]['length']
if not sg[s1][s2]['contig']: gap_len += sg[s1][s2]['length']
print("#",sn,s1,s2,sg[s1][s2]['length'],t)
print(t,n,"slen",gap_len,t-gap_len)
node_tlen=0
nodes_list = list(sg.nodes())
nodes_list.sort(key=lambda x: end_distance[x])
for n in nodes_list:
base_name,end_id=n[:-2],n[-1:]
if end_id=="5": node_tlen+= ll[base_name]
bh = besthit.get(base_name,False)
x=-1
chr="-"
if bh:
chr=bh[1]
if bh[2]=="+":
if n[-1:]=="5":
x=int(bh[3])
else:
x=int(bh[4])
if bh[2]=="-":
if n[-1:]=="5":
x=int(bh[4])
else:
x=int(bh[3])
print("p:",sn,n,end_distance[n],chr,x,t,ll[base_name],bh)
print(node_tlen,"node_tlen")
sn+=1
exit(0)
exit(0)
| DovetailGenomics/HiRise_July2015_GR | scripts/hiriseJoin.py | hiriseJoin.py | py | 8,837 | python | en | code | 28 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.