seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
9770951786 | class Repository:
import os.path
global surveys, question_file
path = os.path.dirname(__file__) + r'.\data'
try:
surveys = open(path + r"\first_survey.csv", "a+")
surveys.seek(62)
question_file = open(path + r"\questions.txt", "r")
except FileNotFoundError:
raise FileNotFoundError("Couldn't find file")
@staticmethod
def read_database():
polls = surveys.readlines()
for entry in polls:
entry = entry[:-1]
line = entry.split(",")
yield line
@staticmethod
def read_questions():
questions = []
for line in question_file.readlines():
questions.append(line[:-1])
return questions
@staticmethod
def write_to_database(answers):
surveys.write("{},{},{},{},{}\n".format(answers[0], answers[1], answers[2], answers[3], answers[4]))
surveys.close()
| Matixo55/SimplePoll | repository/repository.py | repository.py | py | 922 | python | en | code | 0 | github-code | 13 |
7092489107 | class Solution(object):
def minAbsoluteDifference(self, nums, x):
minimo=-1
for i in range(len(nums)):
for j in range(len(nums)):
if abs(i - j) >= x:
if minimo==-1:
minimo=abs(nums[i] - nums[j])
elif abs(nums[i] - nums[j])<minimo:
minimo=abs(nums[i] - nums[j])
#print(i,j,nums[i],nums[j],'minimo=',minimo)
return(minimo)
objeto = Solution()
nums = [4,3,2,4]
x = 2
'''
nums = [5,3,2,10,15]
x = 1
nums = [1,2,3,4]
x = 3
'''
print(objeto.minAbsoluteDifference(nums, x))
| alexandreborgmann/leetcode | MinimumAbsoluteDifferenceBetweenElements.py | MinimumAbsoluteDifferenceBetweenElements.py | py | 661 | python | en | code | 0 | github-code | 13 |
34799497172 | #coding:utf-8
import os
def text_save(content,filename,mode='a'):
# Try to save a list variable in txt file.
file = open(filename, mode)
for i in range(len(content)):
file.write(str(content[i])+'\n')
file.close()
# file_dir = "/home/zx/desktop/华谊化工/"
def get_files(file_dir):
namelist = []
for file in os.listdir(file_dir):
name = file.split('.')
namelist.append(name[0])
return(namelist)
def main(file_dir):
a=get_files(file_dir)
return a
# text_save(a, '/home/zx/desktop/华谊化工/Annotations.txt')
#
# if __name__ == '__main__':
# main(file_dir) | Xiehuaiqi/python_script | cutimage/makedir1.py | makedir1.py | py | 631 | python | en | code | 0 | github-code | 13 |
11502353956 | import socket
import os
import time
class UDPPing:
udp_ip = ""
udp_port = 0
packet_loss = 0
packet_count = 0
message = ""
sock = None
def __init__(self, udp_ip='ibiza.dcc.ufla.br', udp_port=5002, packet_count=20, message="Default"):
self.udp_ip = udp_ip
self.udp_port = udp_port
self.packet_count = packet_count
self.message = message
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def set_timeout(self, timeout=0.250):
self.sock.settimeout(timeout)
def get_response(self):
while True:
try:
message, addr = self.sock.recvfrom(self.udp_port)
return message
except Exception:
return 'timeout'
def send_message(self):
self.sock.sendto(self.message, (self.udp_ip, self.udp_port))
return self.get_response()
def ping(self):
i = 0
sum_rtt = 0
while i < self.packet_count:
time.sleep(1)
self.message = 'PING ' + str(i)
send_time_ms = time.time()
response = self.send_message()
recv_time_ms = time.time()
rtt_in_ms = round((recv_time_ms - send_time_ms) * 1000, 3)
if rtt_in_ms < 250:
sum_rtt = sum_rtt + rtt_in_ms
if response == 'timeout' or rtt_in_ms >= 250:
self.packet_loss = self.packet_loss + 1
response = 'timeout'
elif response != 'timeout':
response = ""
print(
str(len(self.message.encode('utf-8'))) + 'bytes ' + self.udp_ip + ':'
+ str(self.udp_port)
+ ' icmp_seq=' + str(i)
+ ' time=' + str(rtt_in_ms)
+ ' ' + response
)
i = i + 1
print('MEAN RTT: ' + str((sum_rtt/(self.packet_count - self.packet_loss))))
print('PACKET LOSS: ' + str((self.packet_loss * 100)/20) + '%')
| francislz/college_codes | computer_networks/tp_ping/UDPPing.py | UDPPing.py | py | 2,034 | python | en | code | 2 | github-code | 13 |
11576947844 | import spacy
import json
from tqdm import tqdm
nlp = spacy.load('en_core_web_sm')
# dataset_dir = '/home/hoang/Datasets/MIMIC/'
dataset_dir = '/home/hoang/Datasets/NLMCXR/'
count_sentence = json.load(open(dataset_dir + 'count_sentence.json', 'r'))
np_count = {}
for k,v in tqdm(count_sentence.items()):
doc = nlp(k)
for np in doc.noun_chunks:
if np.text not in np_count:
np_count[np.text] = v
else:
np_count[np.text] += v
json.dump(np_count, open(dataset_dir + 'count_nounphrase.json', 'w'))
| ginobilinie/xray_report_generation | tools/nounphrase_extractor.py | nounphrase_extractor.py | py | 576 | python | en | code | 58 | github-code | 13 |
19469004255 | skates = int(input('\nВведите кол-во коньков: '))
skates_list = []
count = 0
for s in range(1,skates+1):
print('Введите размер',s,'человека: ',end='')
skates_list.append(input())
legs = int(input('\nВведите кол-во людей: '))
legs_list = []
for l in range(1,legs+1):
print('Введите размер',l,'человека: ',end='')
legs_list.append(input())
for counts in legs_list:
for j in range(len(skates_list)):
if skates_list[j] >= counts:
skates_list.remove(skates_list[j])
count += 1
break
print('\nНаибольшее кол-во людей, которые могут взять ролики: ',count) | TurovD/Skillbox_Tasks | 17_List_methods/07_roller_skates/main.py | main.py | py | 736 | python | ru | code | 0 | github-code | 13 |
9088455740 | #https://www.acmicpc.net/problem/13707
#백준 13707번 합분해 2 (DP)
#import sys
#input = sys.stdin.readline
INF = int(1e9)
n, k = map(int, input().split())
dp = [[0]*(k+1) for _ in range(n+1)]
for i in range(1,n+1):
for j in range(1,k+1):
if i == 1 :
dp[i][j] = j
else:
dp[i][j] = (dp[i-1][j]+dp[i][j-1])%INF
print(dp[n][k]%INF) | MinsangKong/DailyProblem | 08-16/3-1.py | 3-1.py | py | 392 | python | en | code | 0 | github-code | 13 |
26413075167 | n=int(input())
k=9
a=[]
if n<10:
print(10+n)
else:
while k>1:
if n%k==0:
a.append(str(k))
n=n//k
else:
k=k-1
b=a[::-1]
print(''.join(b)) | PREMSAI2K1/code1 | smallestnumber.py | smallestnumber.py | py | 205 | python | en | code | 0 | github-code | 13 |
42588814503 | #!/usr/bin/python3
from os import read
import socket
import threading
import sys
class server_INFO(object):
def __init__(self,IP,port) -> None:
self.IP=str(IP)
self.port=int(port)
def main():
if len(sys.argv) < 3:
print(f"\tUsage {sys.argv[0]} <IP> <Port> ")
exit(-1)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.connect((sys.argv[1], int(sys.argv[2])))
smsg = server.recv(1024).decode()
print(smsg)
msg = input("% ")
server.send(msg.encode())
smsg = server.recv(1024).decode()
while smsg=="The username is already used!":
print(smsg)
msg = input("% ")
server.send(msg.encode())
smsg = server.recv(1024).decode()
print(smsg)
msg=""
while(1):
msg = input("% ")
cmd = msg.split(" ")[0]
server.send(msg.encode())
smsg = server.recv(1024).decode()
print(smsg,end="")
if cmd=="exit":
server.close()
break
if __name__ == "__main__":
main()
| axde954e6/NCTU-Intro.2_NP | NP/mid/0712534/P2/client.py | client.py | py | 1,085 | python | en | code | 0 | github-code | 13 |
36146557716 | #!/usr/bin/env python3
import argparse
import atexit
import logging
import os
import random
import requests
import string
import subprocess
import sys
import tempfile
import time
from packaging import version
sys.path.append(f"{os.path.abspath(os.path.dirname(__file__))}/..") # noqa
from lib.commands import ssh, scp, SSHCommandFailed
from lib.common import wait_for, is_uuid
from lib.host import host_data
from lib.pool import Pool
from lib.vm import VM
logging.basicConfig(format='[%(levelname)s] %(message)s', level=logging.INFO)
PXE_CONFIG_DIR = "/pxe/configs/custom"
def pxe_address():
try:
from data import PXE_CONFIG_SERVER
return PXE_CONFIG_SERVER
except ImportError:
raise Exception('No address for the PXE server found in data.py (`PXE_CONFIG_SERVER`)')
def generate_boot_conf(directory, installer, action):
# in case of restore, we disable the text ui from the installer completely,
# to workaround a bug that leaves us stuck on a confirmation dialog at the end of the operation.
rt = 'rt=1' if action == 'restore' else ''
with open(f'{directory}/boot.conf', 'w') as bootfile:
bootfile.write(f"""answerfile=custom
installer={installer}
is_default=1
{rt}
""")
def generate_answerfile(directory, installer, hostname_or_ip, target_hostname, action, hdd, netinstall_gpg_check):
pxe = pxe_address()
password = host_data(hostname_or_ip)['password']
cmd = ['openssl', 'passwd', '-6', password]
res = subprocess.run(cmd, stdout=subprocess.PIPE)
encrypted_password = res.stdout.decode().strip()
if target_hostname is None:
target_hostname = "xcp-ng-" + "".join(
random.choice(string.ascii_lowercase) for i in range(5)
)
with open(f'{directory}/answerfile.xml', 'w') as answerfile:
if action == 'install':
answerfile.write(f"""<?xml version="1.0"?>
<installation{netinstall_gpg_check}>
<keymap>fr</keymap>
<primary-disk>{hdd}</primary-disk>
<guest-disk>{hdd}</guest-disk>
<root-password type="hash">{encrypted_password}</root-password>
<source type="url">{installer}</source>
<admin-interface name="eth0" proto="dhcp" />
<timezone>Europe/Paris</timezone>
<hostname>{target_hostname}</hostname>
<script stage="filesystem-populated" type="url">
http://{pxe}/configs/presets/scripts/filesystem-populated.py
</script>
</installation>
""")
elif action == 'upgrade':
answerfile.write(f"""<?xml version="1.0"?>
<installation mode="upgrade"{netinstall_gpg_check}>
<existing-installation>{hdd}</existing-installation>
<source type="url">{installer}</source>
<script stage="filesystem-populated" type="url">
http://{pxe}/configs/presets/scripts/filesystem-populated.py
</script>
</installation>
""")
elif action == 'restore':
answerfile.write(f"""<?xml version="1.0"?>
<restore>
</restore>
""")
else:
raise Exception(f"Unknown action: `{action}`")
def copy_files_to_pxe(mac_address, tmp_local_path):
assert mac_address
pxe = pxe_address()
remote_dir = f'{PXE_CONFIG_DIR}/{mac_address}/'
clean_files_on_pxe(mac_address)
ssh(pxe, ['mkdir', '-p', remote_dir])
scp(pxe, f'{tmp_local_path}/boot.conf', remote_dir)
scp(pxe, f'{tmp_local_path}/answerfile.xml', remote_dir)
def clean_files_on_pxe(mac_address):
assert mac_address # protection against deleting the whole parent dir!
pxe = pxe_address()
remote_dir = f'{PXE_CONFIG_DIR}/{mac_address}/'
ssh(pxe, ['rm', '-rf', remote_dir])
def clean_bootconf_on_pxe(mac_address):
assert mac_address
pxe = pxe_address()
distant_file = f'{PXE_CONFIG_DIR}/{mac_address}/boot.conf'
try:
ssh(pxe, ['rm', '-rf', distant_file])
except SSHCommandFailed as e:
raise Exception('ERROR: failed to clean the boot.conf file.' + e)
def get_candidate_ips(mac_address):
pxe = pxe_address()
output = ssh(
pxe,
['arp', '-n', '|', 'grep', mac_address, '|', 'awk', '\'{ print $1 }\'']
)
candidate_ips = output.splitlines()
return candidate_ips
def is_ip_active(ip):
return not os.system(f"ping -c 3 -W 10 {ip} > /dev/null 2>&1")
def is_ssh_up(ip):
try:
ssh(ip, ['true'], options=['-o "ConnectTimeout 10"'])
return True
except SSHCommandFailed:
# probably not up yet
return False
def get_new_host_ip(mac_address):
candidate_ips = get_candidate_ips(mac_address)
logging.debug("Candidate IPs: " + ", ".join(candidate_ips))
for ip in candidate_ips:
if is_ip_active(ip) and is_ssh_up(ip):
return ip
return None
def is_new_host_ready(ip_address):
try:
output = ssh(ip_address, ['xe', 'host-list', 'enabled=true', '--minimal'])
return is_uuid(output)
except Exception:
return False
def check_mac_address(host, mac_address):
bridge = host.inventory['MANAGEMENT_INTERFACE']
host_mac_address = host.ssh(['cat', f'/sys/class/net/{bridge}/address'])
if mac_address != host_mac_address:
raise Exception(
f"Unexpected MAC address `{host_mac_address}` for host `{host.hostname_or_ip}`. "
f"Expected: `{mac_address}`"
)
def url_checker(url):
try:
response = requests.get(url)
if not response:
raise Exception(f"{url}: URL is not reachable, status_code: {response.status_code}")
except requests.exceptions.RequestException as e:
raise SystemExit(f"{url}: URL is not reachable\nErr: {e}")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"host",
help="hostname or IP address of the host hosting the VM that will be installed/upgraded/restored"
)
parser.add_argument("vm_uuid", help="UUID of an existing VM in which XCP-ng will be installed/upgraded/restored")
parser.add_argument(
"action", metavar='action', choices=['install', 'upgrade', 'restore'], help="install, upgrade or restore"
)
parser.add_argument(
"xcpng_version",
help="target version, used to build the installer URL if none provided via --installer, "
"and also used to check the system version at the end. Example: 8.2.1"
"In case of a restore, specify the version of the installer."
)
parser.add_argument("--installer", help="URL of the installer")
parser.add_argument(
"-t", "--target-hostname",
help="The hostname of the VM in which XCP-ng will be installed. By default "
"a hostname is generated starting with xcp-ng-XXXXX where XXXXX is "
"randomly generated using lowercase characters.")
parser.add_argument("--netinstall-gpg-check", default=False, action='store_true', help="Disable GPG Check")
args = parser.parse_args()
# *** "fail early" checks
pxe = pxe_address() # raises if not defined
if not is_uuid(args.vm_uuid):
raise Exception(f'The provided VM UUID is invalid: {args.vm_uuid}')
if args.xcpng_version[0].isdigit():
xcp_version = args.xcpng_version
else:
raise Exception(f'The version does not seem valid: {args.xcpng_version}')
if args.netinstall_gpg_check:
netinstall_gpg_check = " netinstall-gpg-check=\"false\""
else:
netinstall_gpg_check = ""
# *** slower checks (involving network, SSH...)
if not is_ssh_up(args.host):
raise Exception(f"Could not SSH into host `{args.host}`")
try:
pool = Pool(args.host) # will fail if host is not XCP-ng or XAPI doesn't respond yet
except Exception as e:
raise Exception(f"Host `{args.host}` isn't ready or isn't an XCP-ng host")
host = pool.master
assert host.is_enabled()
if not args.installer:
installer = f"http://{pxe}/installers/xcp-ng/{xcp_version}/"
else:
installer = args.installer
try:
url_checker(f"{installer}.treeinfo")
except Exception:
raise Exception(f"No installer found at URL `{installer}`")
vm = VM(args.vm_uuid, host)
vif = vm.vifs()[0]
mac_address = vif.param_get('MAC')
with tempfile.TemporaryDirectory(suffix=mac_address) as tmp_local_path:
logging.info('Generate files: answerfile.xml and boot.conf')
hdd = 'nvme0n1' if vm.is_uefi else 'sda'
generate_answerfile(tmp_local_path, installer, args.host, args.target_hostname, args.action, hdd,
netinstall_gpg_check)
generate_boot_conf(tmp_local_path, installer, args.action)
logging.info('Copy files to the pxe server')
copy_files_to_pxe(mac_address, tmp_local_path)
atexit.register(lambda: clean_files_on_pxe(mac_address))
if (vm.is_running()):
try:
vm.shutdown(verify=True)
except Exception:
vm.shutdown(force=True, verify=True)
vm.start()
# wait a bit to let the PXE server give the boot configuration to the VM, then disable the specific boot config
time.sleep(20)
clean_bootconf_on_pxe(mac_address)
wait_for(
lambda: get_new_host_ip(mac_address) is not None,
"Waiting for the installation process to complete and the VM to reboot and be up", 3600, 10
)
vm_ip_address = get_new_host_ip(mac_address)
logging.info('The IP address of the installed XCP-ng is: ' + vm_ip_address)
wait_for(lambda: is_new_host_ready(vm_ip_address), "Waiting for XAPI to be ready", 600, 10)
pool2 = Pool(vm_ip_address)
host2 = pool2.master
host2.inventory = host2._get_xensource_inventory()
check_mac_address(host2, mac_address)
logging.info(f'Target host is started and enabled in version: {host2.xcp_version}')
if args.action == 'restore' and host2.xcp_version >= version.parse(xcp_version):
raise Exception(
f"The installed host ({vm_ip_address}) is not in a previous version. Got: {host2.xcp_version}.\n"
)
elif args.action != 'restore' and host2.xcp_version != version.parse(xcp_version):
raise Exception(
f"The installed host ({vm_ip_address}) is not in the expected version. Got: {host2.xcp_version}.\n"
f"Expected: {xcp_version}."
)
if __name__ == '__main__':
main()
| xcp-ng/xcp-ng-tests | scripts/install_xcpng.py | install_xcpng.py | py | 10,440 | python | en | code | 3 | github-code | 13 |
21571761016 | import params
from google.cloud import datastore, storage, logging
import time
import pickle
import hashlib
import sys
import numpy as np
import portfolioGeneration
import portfolio
import dataAck
import warnings
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
import multiprocessing as mp
import autoPortfolioTree
import curveTreeDB
import portfolio
# In[ ]:
print("STARTING OBJECT DOWNLOAD")
##DISABLE TO TEST FACTORS
treeModels = []
tickersSeen = []
dataObjs = curveTreeDB.getValidModels(params.treeModels, returnEntireObject=True)
for item in dataObjs:
try:
if item["IS_PROFITABILITY SLIPPAGE"] > 0.51 and item["IS_ANNUALIZED RETURN"] > 0.05: #and item["IS_BETA"] < 0.15:
model = item["model"]
print(model.targetTicker, item["IS_BETA"], item["OOS_BETA"], item["IS_ANNUALIZED RETURN"], item["OOS_ANNUALIZED RETURN"])
treeModels.append(model)
if model.targetTicker not in tickersSeen:
tickersSeen.append(model.targetTicker)
except:
continue
print("MODELS ACCEPTED:", len(treeModels))
print("TICKERS ACCEPTED:", len(tickersSeen))
# In[ ]:
# len(allModels)
# In[ ]:
# len(tickersSeen)
# In[ ]:
import random
factorToTrade = "VTI"#tickersSeen[random.randint(0, len(tickersSeen) - 1)]
# factorToTrade
# In[ ]:
uniqueModels, modelReturns, modelPredictions, modelSlippageReturns, modelReturnsWithFactor, joinedData = autoPortfolioTree.computeReturnsForUniqueModelsCache(treeModels, factorToTrade)
# In[ ]:
cleanedReturns = modelReturns.fillna(0)
cleanedReturns.columns = [item.getHash() for item in uniqueModels]
cleanedReturns = cleanedReturns["2008-01-01":]
cleanedPredictions = modelPredictions.fillna(0)
cleanedPredictions.columns = [item.getHash() for item in uniqueModels]
cleanedPredictions = cleanedPredictions["2008-01-01":]
hashToModel = {}
for item in uniqueModels:
hashToModel[item.getHash()] = item
# In[ ]:
# cleanedReturns
def storePortfolioInputData(cleanedReturns, cleanedPredictions, hashToModel, joinedData):
storageClient = storage.Client('money-maker-1236')
while True:
try:
bucket = storageClient.get_bucket(params.validModelsCache)
blob = storage.Blob(params.validModelsLookup, bucket)
blob.upload_from_string(pickle.dumps((cleanedReturns, cleanedPredictions, hashToModel, joinedData)))
print("STORING", params.validModelsLookup)
break
except:
print("UPLOAD BLOB ERROR:", str(sys.exc_info()))
time.sleep(10)
pass
##NEED TO STORE
storePortfolioInputData(cleanedReturns, cleanedPredictions, hashToModel, joinedData) | SignalBuilders/walkforwardTrader | fastAutomaticPortfolioDataCache.py | fastAutomaticPortfolioDataCache.py | py | 2,690 | python | en | code | 1 | github-code | 13 |
16882491331 | from selenium.webdriver.support.ui import WebDriverWait
from tests.Pages.Components.HeaderComponent import HeaderComponent
from tests.Pages.Components.FooterComponent import FooterComponent
from tests.Pages.Components.AsideFilterSortComponent import AsideFilterSortComponent
from tests.config import blog_list_url
from selenium.webdriver.common.by import By
from utils import wait_for_element
from tests.Locators.BlogListPageLocators import BlogListPageLocators
class BlogListPage(HeaderComponent, FooterComponent, AsideFilterSortComponent):
"""BlogList page action methods come here. I.e. Python.org"""
name = 'blog_list'
element_locators = {
'page_title': BlogListPageLocators.PAGE_TITLE,
'join_button': BlogListPageLocators.JOIN_BUTTON,
'blog_item': BlogListPageLocators.BLOG_ITEM
}
def __init__(self, driver, independent: bool = True):
"""
independent param: whether driver directory load this page independently (true) or load from another page (e.g., Home Page) as dependency
"""
super().__init__(driver)
# merge all parent element locators with this element locators
# ends up self.element_locators include all parent element locators
self.element_locators = {
**self.element_locators,
**HeaderComponent.element_locators,
**FooterComponent.element_locators,
**AsideFilterSortComponent.element_locators
}
if independent:
self.driver.get(blog_list_url)
# need this one to avoid 'NosuchElementException'
# - esp for when find element by link test
# reference: https://stackoverflow.com/questions/6936149/how-to-use-find-element-by-link-text-properly-to-not-raise-nosuchelementexcept
wait_for_element(self.driver, By.ID, 'root')
def get_number_of_blog_item_displayed(self):
"""those blogs are fetched at initial loading (filter: 'recent')
- the element to be found is blog title element not wrapper. this is
to make sure all details of blog are loaded correctly
"""
# need to wait for initial fetching
WebDriverWait(self.driver, 500).until(
lambda driver: driver.find_elements(*self.element_locators['blog_item'])
)
blog_title_element_list = self.driver.find_elements(*self.element_locators['blog_item'])
return len(blog_title_element_list)
| stsiwo/python-selenium-testing | tests/Pages/BlogListPage.py | BlogListPage.py | py | 2,528 | python | en | code | 0 | github-code | 13 |
73607343696 | import os
import sys
import urllib.parse
import warnings
from flask import Response
import flask_frozen
import click
from ._deployment import deploy as deploy_
from ._shutdown import ShutdownableFreezer, inject_shutdown
def port_option():
return click.option(
'--port', type=int, default=8003,
help='Port to listen at')
def cname_option():
return click.option(
'--cname/--no-cname', default=True,
help='Whether to create the CNAME file, default is to create it')
def path_option(app):
return click.option(
'--path', default=os.path.join(app.root_path, '_build'),
help='Input path, default _build')
def verbose_option():
return click.option(
'-v/-q', '--verbose/--quiet',
help='Print out page URLs as they are frozen')
def host_option():
return click.option(
'--host',
help='Host to listen at when serving')
def freeze_app(app, freezer, path, base_url, verbose):
if not base_url:
raise click.UsageError('No base URL provided, use --base-url')
print('Generating HTML...')
app.config['FREEZER_DESTINATION'] = path
app.config['FREEZER_BASE_URL'] = base_url
app.config['SERVER_NAME'] = urllib.parse.urlparse(base_url).netloc
# make sure Frozen Flask warnings are treated as errors
warnings.filterwarnings('error', category=flask_frozen.FrozenFlaskWarning)
try:
for page in freezer.freeze_yield():
if verbose:
print('Frozen', page.url, file=sys.stderr)
except flask_frozen.FrozenFlaskWarning as w:
print('Error:', w, file=sys.stderr)
sys.exit(1)
def inject_cname(app):
"""Create CNAME route for GitHub pages"""
@app.route('/CNAME')
def cname():
return Response(app.config['SERVER_NAME'],
mimetype='application/octet-stream')
def cli(app, *, freezer=None, base_url=None, invoke_cli=True):
""" Generates command-line interface for the provided app.
If ``invoke_cli`` is set to ``True`` (the default),
the cli is invoked right away,
otherwise it's returned so it can be used further.
"""
if not freezer:
freezer = ShutdownableFreezer(app)
@click.group(context_settings=dict(help_option_names=['-h', '--help']),
help=__doc__)
def command():
pass
@command.command()
@port_option()
@cname_option()
@host_option()
def serve(port, cname, host):
"""Run a debug server"""
# Workaround for https://github.com/pallets/flask/issues/1907
auto_reload = app.config.get('TEMPLATES_AUTO_RELOAD')
if auto_reload or auto_reload is None:
app.jinja_env.auto_reload = True
inject_shutdown(app)
if cname:
inject_cname(app)
kwargs = {}
if host is not None:
kwargs['host'] = host
app.run(port=port, debug=True, **kwargs)
@command.command()
@path_option(app)
@click.option('--base-url', default=base_url,
help='URL for the application, used for external links, ' +
('default {}'.format(base_url) if base_url else 'mandatory'))
@click.option('--serve/--no-serve',
help='After building the site, run a server with it')
@verbose_option()
@port_option()
@cname_option()
@host_option()
def freeze(path, base_url, serve, port, cname, verbose, host):
"""Build a static site"""
if cname:
inject_cname(app)
freeze_app(app, freezer, path, base_url, verbose=verbose)
kwargs = {}
if host is not None:
kwargs['host'] = host
if serve:
freezer.serve(port=port, **kwargs)
@command.command()
@path_option(app)
@click.option('--base-url', default=base_url,
help='URL for the application, used for external links, ' +
('default {}'.format(base_url) if base_url else 'mandatory'
' with --freeze'))
@click.option('--remote', default='origin',
help='The name of the remote to push to, '
'default origin')
@click.option('--push/--no-push', default=None,
help='Whether to push the gh-pages branch, '
'deprecated default is to push')
@click.option('--freeze/--no-freeze', default=True,
help='Whether to freeze the site before deploying, '
'default is to freeze')
@click.option('--show-git-push-stderr', is_flag=True,
help='Show the stderr output of `git push` failure, '
'might be dangerous if logs are public')
@verbose_option()
@cname_option()
def deploy(path, base_url, remote, push, freeze,
show_git_push_stderr, cname, verbose):
"""Deploy the site to GitHub pages"""
if push is None:
warnings.simplefilter('always')
msg = ('Using deploy without explicit --push/--no-push is '
'deprecated. Assuming --push for now. In future versions '
'of elsa, the deploy command will not push to the remote '
'server by default. Use --push explicitly to maintain '
'current behavior.')
warnings.warn(msg, DeprecationWarning)
push = True
if freeze:
if cname:
inject_cname(app)
freeze_app(app, freezer, path, base_url, verbose=verbose)
deploy_(path, remote=remote, push=push, show_err=show_git_push_stderr)
if invoke_cli:
return command()
else:
return command
| pyvec/elsa | elsa/_cli.py | _cli.py | py | 5,710 | python | en | code | 27 | github-code | 13 |
40414084010 | from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_volume
short_description: Create/Delete Cinder Volumes
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove cinder block storage volumes
options:
size:
description:
- Size of volume in GB. This parameter is required when the
I(state) parameter is 'present'.
display_name:
description:
- Name of volume
required: true
display_description:
description:
- String describing the volume
volume_type:
description:
- Volume type for volume
image:
description:
- Image name or id for boot from volume
snapshot_id:
description:
- Volume snapshot id to create from
volume:
description:
- Volume name or id to create from
version_added: "2.3"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
scheduler_hints:
description:
- Scheduler hints passed to volume API in form of dict
version_added: "2.4"
metadata:
description:
- Metadata for the volume
version_added: "2.8"
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Creates a new volume
- name: create a volume
hosts: localhost
tasks:
- name: create 40g test volume
os_volume:
state: present
cloud: mordred
availability_zone: az2
size: 40
display_name: test_volume
scheduler_hints:
same_host: 243e8d3c-8f47-4a61-93d6-7215c344b0c0
'''
RETURNS = '''
id:
description: Cinder's unique ID for this volume
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
volume:
description: Cinder's representation of the volume object
returned: always
type: dict
sample: {'...'}
'''
from distutils.version import StrictVersion
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs, openstack_cloud_from_module
def _needs_update(module, volume):
'''
check for differences in updatable values, at the moment
openstacksdk only supports extending the volume size, this
may change in the future.
:returns: bool
'''
compare_simple = ['size']
for k in compare_simple:
if module.params[k] is not None and module.params[k] != volume.get(k):
return True
return False
def _modify_volume(module, cloud):
'''
modify volume, the only modification to an existing volume
available at the moment is extending the size, this is
limited by the openstacksdk and may change whenever the
functionality is extended.
'''
volume = cloud.get_volume(module.params['display_name'])
diff = {'before': volume, 'after': ''}
size = module.params['size']
if size < volume.get('size'):
module.fail_json(
msg='Cannot shrink volumes, size: {0} < {1}'.format(size, volume.get('size'))
)
if not _needs_update(module, volume):
diff['after'] = volume
module.exit_json(changed=False, id=volume['id'], volume=volume, diff=diff)
if module.check_mode:
diff['after'] = volume
module.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
cloud.volume.extend_volume(
volume.id,
size
)
diff['after'] = cloud.get_volume(module.params['display_name'])
module.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
def _present_volume(module, cloud):
if cloud.volume_exists(module.params['display_name']):
v = cloud.get_volume(module.params['display_name'])
if not _needs_update(module, v):
module.exit_json(changed=False, id=v['id'], volume=v)
_modify_volume(module, cloud)
diff = {'before': '', 'after': ''}
volume_args = dict(
size=module.params['size'],
volume_type=module.params['volume_type'],
display_name=module.params['display_name'],
display_description=module.params['display_description'],
snapshot_id=module.params['snapshot_id'],
availability_zone=module.params['availability_zone'],
)
if module.params['image']:
image_id = cloud.get_image_id(module.params['image'])
volume_args['imageRef'] = image_id
if module.params['volume']:
volume_id = cloud.get_volume_id(module.params['volume'])
if not volume_id:
module.fail_json(msg="Failed to find volume '%s'" % module.params['volume'])
volume_args['source_volid'] = volume_id
if module.params['scheduler_hints']:
volume_args['scheduler_hints'] = module.params['scheduler_hints']
if module.params['metadata']:
volume_args['metadata'] = module.params['metadata']
if module.check_mode:
diff['after'] = volume_args
module.exit_json(changed=True, id=None, volume=volume_args, diff=diff)
volume = cloud.create_volume(
wait=module.params['wait'], timeout=module.params['timeout'],
**volume_args)
diff['after'] = volume
module.exit_json(changed=True, id=volume['id'], volume=volume, diff=diff)
def _absent_volume(module, cloud, sdk):
changed = False
diff = {'before': '', 'after': ''}
if cloud.volume_exists(module.params['display_name']):
volume = cloud.get_volume(module.params['display_name'])
diff['before'] = volume
if module.check_mode:
module.exit_json(changed=True, diff=diff)
try:
changed = cloud.delete_volume(name_or_id=module.params['display_name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
except sdk.exceptions.ResourceTimeout:
diff['after'] = volume
module.exit_json(changed=changed, diff=diff)
module.exit_json(changed=changed, diff=diff)
def main():
argument_spec = openstack_full_argument_spec(
size=dict(default=None, type='int'),
volume_type=dict(default=None),
display_name=dict(required=True, aliases=['name']),
display_description=dict(default=None, aliases=['description']),
image=dict(default=None),
snapshot_id=dict(default=None),
volume=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
scheduler_hints=dict(default=None, type='dict'),
metadata=dict(default=None, type='dict')
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['image', 'snapshot_id', 'volume'],
],
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, **module_kwargs)
state = module.params['state']
if state == 'present' and not module.params['size']:
module.fail_json(msg="Size is required when state is 'present'")
sdk, cloud = openstack_cloud_from_module(module)
try:
if state == 'present':
_present_volume(module, cloud)
if state == 'absent':
_absent_volume(module, cloud, sdk)
except sdk.exceptions.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| RavindraSingh12/Ansible-Runner | venv/lib/python3.6/site-packages/ansible/modules/cloud/openstack/os_volume.py | os_volume.py | py | 7,700 | python | en | code | 2 | github-code | 13 |
11867518761 | import random
class Mp3calar ():
def __init__(self):
self.sarkilar = []
self.calansarki = ""
self.sesduzeyi = 50
self.mp3durumu = True
self.kacsarkivar = 0
self.controller()
self.anamenu()
def controller (self):
if self.calansarki == "":
self.calansarki = "Şarkı Çalmıyor"
if self.calansarki not in self.sarkilar:
self.calansarki = "Şarkı Çalmıyor"
def anamenu (self):
print("****/MP3 Çalara Hoşgeldiniz\****")
while (self.mp3durumu):
self.controller()
print(f"""
Şarkılar: {self.sarkilar}
Çalan Şarkı: {self.calansarki}
Ses Düzeyi: {self.sesduzeyi}
1-Şarkı Ekle
2-Şarkı Sil
3-Şarkı Seç
4-Rastgele Şarkı Seç
5-Ses Arttır
6-Ses Azalt
7-Şarkıyı kapat
8-Kapat
""")
self.menusec = int(input("Yapmak istediğiniz işlemin yanındaki numarayı giriniz: "))
if self.menusec == 1:
print("")
self.sarkiekle = input("Lütfen şarkı ismini giriniz: ")
self.sarkilar.append(self.sarkiekle)
self.kacsarkivar += 1
print("Şarkı başarıyla eklendi")
if self.menusec == 2:
if len(self.sarkilar) <= 0:
print("Şarkınız bulunmamakta")
else:
print("")
id = 1
sarkilarinuzunlugu = len(self.sarkilar)
for i in range(0,sarkilarinuzunlugu):
print(f"{id}) {self.sarkilar[i]}")
id += 1
print("")
self.sarkisil = int(input("Lütfen şarkının yanındaki numarayı yazınız: "))
if self.sarkisil > len(self.sarkilar):
print("")
print("Lütfen geçerli bir sayı giriniz!")
elif self.sarkisil <= 0:
print("")
print("Lütfen geçerli bir sayı giriniz")
else:
self.sarkilar.pop(self.sarkisil -1)
print("")
print("Şarkı başarıyla silindi")
if self.menusec == 3:
if len(self.sarkilar) <= 0:
print("Şarkınız bulunmamakta")
else:
print("")
id = 1
sarkilarinuzunlugu = len(self.sarkilar)
for i in range(0,sarkilarinuzunlugu):
print(f"{id}) {self.sarkilar[i]}")
id += 1
print("")
self.sarkisec = int(input("Lütfen seçmek istediğiniz şarkının yanındaki numarayı giriniz: "))
if self.sarkisec > len(self.sarkilar):
print("")
print("Lütfen geçerli bir sayı giriniz!")
elif self.sarkisec <= 0:
print("")
print("Lütfen geçerli bir sayı giriniz")
else:
self.calansarki = self.sarkilar [self.sarkisec -1]
print("")
print("Şarkı seçildi")
if self.menusec == 4:
if self.kacsarkivar >= 2:
self.rastgelesarkisec = random.choice(self.sarkilar)
self.calansarki = self.rastgelesarkisec
print("")
print(f"Rastgele seçilen şarkı: {self.calansarki}")
else:
print("")
print("Rastgele şarkı seçmek için 2'den fazla şarkınızın olması lazım!")
if self.menusec == 5:
if self.sesduzeyi >= 100:
print("")
print("Ses düzeyi zaten MAX Durumda")
else:
self.sesduzeyi += 10
print("")
print("Ses arttırıldı")
if self.menusec == 6:
if self.sesduzeyi <= 0:
print("")
print("Ses düzeyi zaten MIN Durumda")
else:
print("")
print("Ses arttırıldı")
if self.menusec == 7:
if self.calansarki == "" or self.calansarki == "Şarkı Çalmıyor":
print("")
print("Zaten şarkı çalmıyor")
else:
self.calansarki = "Şarkı Çalmıyor"
print("")
print("Şarkı durduruldu")
if self.menusec == 8:
self.mp3durumu = False
mp3calar = Mp3calar
mp3calar()
| zaFer234/Temel-Python-Projeleri | mp3 çalar.py | mp3 çalar.py | py | 5,017 | python | tr | code | 0 | github-code | 13 |
27961748602 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 25 16:52:51 2020
@author: pradeep
"""
import pandas as pd
import numpy
from numpy import diff
import matplotlib.pyplot as plt
from scipy import signal
from scipy.fftpack import fft, fftshift
import pylab
#import cv2
import scipy.ndimage as ndimage
from scipy.interpolate import UnivariateSpline
from scipy.signal import wiener, filtfilt, butter, gaussian, freqz
from scipy.ndimage import filters
import scipy.optimize as op
import matplotlib.pyplot as plt
fs=1200 # Sampling rate of the signal
path = 'D:\Thesis data\polhemus'
file = 'AB_13_G1_0 - Report1.txt'
position_data= pd.read_csv('AB_13_G1_0 - Report1.txt', sep="\t",skiprows=4)
#position_data.describe()
X= position_data.iloc[:,3][1:]
X=(X.dropna()).to_numpy()
#K=np.multiply(X,100)
time=position_data.iloc[:,2][1:]
t=numpy.linspace(0.1 ,len(X)/fs,len(X))
#def testGauss(t, X, s, fs):
# b = gaussian(39, 10)
# ga = filters.convolve1d(X, b/b.sum())
# plt.plot(t, ga)
# print ("gaerr", ssqe(ga, s, fs))
# return ga
#
#img = ndimage.gaussian_filter(X, order=0)
#plt.imshow(img, interpolation='nearest')
#plt.show()
#img = cv2.imread('opencv_logo.png')
#blur = cv2.Gaussian.Blur(X,(5,5))
#
#plt.subplot(121),plt.imshow(X),plt.title('Original')
#plt.xticks([]), plt.yticks([])
#plt.subplot(122),plt.imshow(blur),plt.title('Blurred')
#plt.xticks([]), plt.yticks([])
#plt.show()
#def smoothListGaussian(X,degree=10):
# window = degree*2-1
# weight = numpy.array([1.0]*window)
# weightGauss = []
# for i in range(window):
# i = i-degree+1
# frac = i/float(window)
# gauss = 1/(numpy.exp((4*(frac))**2))
# weightGauss.append(gauss)
# weight = numpy.array(weightGauss)*weight
# smoothed = [0.0]*(len(X)-window)
# for i in range(len(smoothed)):
# smoothed[i] = sum(numpy.array(X[i:i+window])*weight)/sum(weight)
# return smoothed
#newdata=smoothListGaussian(X)
def smoothList(X, strippedXs=False, degree=25):
if strippedXs == True:
return strippedXs[0:-(len(X)-(len(X)-degree+1))]
smoothed = [0]*(len(X)-degree+1)
for i in range(len(smoothed)):
smoothed[i] = sum(X[i:i+degree])/float(degree)
return smoothed
smoothdata=smoothList(X, strippedXs=False, degree=25)
plt.figure()
plt.plot(t[24:],smoothdata)
plt.title()
def smoothListTriangle(X, strippedXs=False, degree=25):
weight = []
window = degree*2-1
smoothed = [0.0]*(len(X)-window)
for x in range(1, 2*degree):
weight.append(degree-abs(degree-x))
w = numpy.array(weight)
for i in range(len(smoothed)):
smoothed[i] = sum(numpy.array(X[i:i+window])*w)/float(sum(w))
return smoothed
def smoothListGaussian(X, strippedXs=False, degree=25):
window = degree*2-1
weight = numpy.array([1.0]*window)
weightGauss = []
for i in range(window):
i = i-degree+1
frac = i/float(window)
gauss = 1/(numpy.exp((4*(frac))**2))
weightGauss.append(gauss)
weight = numpy.array(weightGauss)*weight
smoothed = [0.0]*(len(X)-window)
for i in range(len(smoothed)):
smoothed[i] = sum(numpy.array(X[i:i+window])*weight)/float(sum(weight))
return smoothed
## DUMMY DATA ###
data = X # [0]*30 # 30 "0"s in a row
#data[15] = 1 # the middle one is "1"
### PLOT DIFFERENT SMOOTHING FUNCTIONS ###
pylab.figure()#figsize=(550/80, 700/80)
pylab.suptitle('Data Smoothing', fontsize=15)
pylab.subplot(4, 1, 1)
p1 = pylab.plot(data )
p1 = pylab.plot(data)
a = pylab.axis()
pylab.axis([a[0], a[1], -.1, 1.1])
pylab.text(2,.3,"raw data", fontsize=14)
pylab.subplot(4, 1, 2)
p1 = pylab.plot(smoothList(data))
p1 = pylab.plot(smoothList(data))
#a = pylab.axis()
#pylab.axis([a[0], a[1], -.1, .4])
pylab.text(2, .3, "moving window average", fontsize=14)
pylab.subplot(4, 1, 3)
p1 = pylab.plot(smoothListTriangle(data))
p1 = pylab.plot(smoothListTriangle(data))
pylab.axis([a[0], a[1], -.1, .4])
pylab.text(2, .3, "moving triangle", fontsize=14)
pylab.subplot(4, 1, 4)
p1 = pylab.plot(smoothListGaussian(data))
p1 = pylab.plot(smoothListGaussian(data))
pylab.axis([a[0], a[1], -.1, .4])
pylab.text(2, .3, "moving gaussian", fontsize=14)
pylab.show()
#pylab.savefig("smooth.png", dpi=80)
from matplotlib import pyplot
series= position_data.iloc[:,3][1:]
#series = read_csv('daily-total-female-births.csv', header=0, index_col=0)
# Tail-rolling average transform
rolling = series.rolling(window=3)
rolling_mean = rolling.mean()
print(rolling_mean.head(10))
# plot original and transformed dataset
series.plot()
rolling_mean.plot(color='red')
pyplot.show() | pradeeps147/Inv-kinematics-Biomachanical- | smoothing.py | smoothing.py | py | 4,734 | python | en | code | 0 | github-code | 13 |
73469392336 | import pandas as pd
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
from torch.optim import Adam, SGD
from load_data import mask
from torch.utils.data import DataLoader
import csv
from torch.optim.lr_scheduler import StepLR
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 3, stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, 1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, input):
residual = input
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.downsample:
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
class BottleNeck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride=1, downsample=None):
super(BottleNeck, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, 1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, 3, stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv3 = nn.Conv2d(out_channels, out_channels*self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, input):
residual = input
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.downsample:
residual = self.downsample(residual)
x += residual
x = self.relu(x)
return x
class Resnet(nn.Module):
# 224*224
def __init__(self, block, num_layer, n_classes=2, input_channels=3):
super(Resnet, self).__init__()
self.in_channels = 64
self.conv1 = nn.Conv2d(input_channels, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, num_layer[0])
self.layer2 = self._make_layer(block, 128, num_layer[1], 2)
self.layer3 = self._make_layer(block, 256, num_layer[2], 2)
self.layer4 = self._make_layer(block, 512, num_layer[3], 2)
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1)
self.fc1 = nn.Linear(block.expansion * 512, block.expansion * 128)
self.fc2 = nn.Linear(block.expansion * 128, block.expansion * 16)
self.fc3 = nn.Linear(block.expansion * 16, n_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def _make_layer(self, block, out_channels, num_block, stride=1):
downsample = None
if stride != 1 or self.in_channels != out_channels * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.in_channels, out_channels * block.expansion, 1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * block.expansion)
)
layers = []
layers.append(block(self.in_channels, out_channels, stride, downsample))
self.in_channels = out_channels * block.expansion
for _ in range(1, num_block):
layers.append(block(self.in_channels, out_channels))
return nn.Sequential(*layers)
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def resnet18(pretrained=False, **kwargs):
model = Resnet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def main():
history_path = './history.csv'
num_class = 2
model = resnet18().to(device)
optimizer = SGD(model.parameters(), lr=lr,momentum=0.9, nesterov=True)
# scheduler = StepLR(optimizer, step_size=10, gamma=0.1)
criteon = CrossEntropyLoss()
best_acc, best_epoch = 0, 0
global_step = 0
# viz.line([0], [-1], win='loss', opts=dict(title='loss'))
# viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
correct = 0
total = len(train_loader.dataset)
# scheduler.step()
for step, (x, y) in enumerate(train_loader):
# x: [b, 3, 224, 224], y: [b]
# print(step)
# if step>=2:
# break
x, y = x.to(device), y.to(device)
model.train() # 必须加入model.train(),防止和验证时候BN一样
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# viz.line([loss.item()], [global_step], win='loss', update='append') # loss可视化
global_step += 1
train_acc = correct / total
print('train acc:', train_acc, 'epoch=', epoch)
val_acc = evalute(model, val_loader)
val_loss = evalute_loss(model, val_loader)
print('val acc:', val_acc)
print(loss.data.cpu().numpy())
if val_acc > best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(), 'best.mdl') # 保存模型权重
# viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc:', best_acc, 'best epoch:', best_epoch)
model.load_state_dict(torch.load('best.mdl')) # 加载最好的模型权重
# print('loaded from ckpt!')
test_acc = evalute(model, test_loader) # 验证模型,evalute需要我们自己写
print('test acc:', test_acc)
with open(history_path, "a+") as f:
csv_write = csv.writer(f)
csv_write.writerow([epoch, ';', train_acc, ';', val_acc, ';', loss.data.cpu().numpy(),';', val_loss.cpu().numpy()])
def evalute(model, loader):
model.eval() #必须要加入 model.eval() ,因为训练和测试BN不一致
correct = 0
total = len(loader.dataset)
for step, (x, y) in enumerate(loader):
# if step>=1:
# break
x, y = x.to(device), y.to(device)
with torch.no_grad(): #不需要计算梯度,所以加上不求导,验证集一定要加上这几句话
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def evalute_loss(model, loader):
model.eval() #必须要加入 model.eval() ,因为训练和测试BN不一致
correct = 0
criteon = CrossEntropyLoss()
total = len(loader.dataset)
for step, (x, y) in enumerate(loader):
# if step>=1:
# break
x, y = x.to(device), y.to(device)
with torch.no_grad(): #不需要计算梯度,所以加上不求导,验证集一定要加上这几句话
logits = model(x)
loss = criteon(logits, y)
return loss
batchsz = 128
lr = 1e-6
epochs = 500
device = torch.device('cuda')
torch.manual_seed(1234)
train_db = mask('/home/xiejun/mask_PUF/data/4up/train', 224)
val_db = mask('/home/xiejun/mask_PUF/data/4up/val', 224)
test_db = mask('/home/xiejun/mask_PUF/data/4up/test', 224)
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True, num_workers=0)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=0)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=0)
if __name__ == '__main__':
main()
| AugustusXie-rgb/mask_PUF | Resnet_model.py | Resnet_model.py | py | 8,907 | python | en | code | 0 | github-code | 13 |
74024682579 | #!/usr/bin/env python
from websocket import create_connection
import json
# import subprocess
import os
"""
Prerequisites:
Choco - https://chocolatey.org/install#install-with-cmdexe
Python3 - choco install python
Websocket - pip install websocket-client
"""
ip = "10.8.30.11"
directory = "2019vision"
command = "cd {};make clean;make install".format(directory)
print("Connecting to Raspberry Pi Server @ {}".format(ip))
connection = create_connection("ws://" + ip)
print("Disabling Camera")
connection.send(json.dumps({"type":"visionDown"}))
print("Enabling write")
connection.send(json.dumps({"type":"systemWritable"}))
print("Sending *.cpp & *.h files to Raspberry Pi/{}".format(directory))
os.system("scp *.cpp *.h MAKEFILE pi@{}:{}".format(ip, directory))
print("Connecting to Pi & Building Code")
os.system("ssh -t pi@{} '{}'".format(ip,command))
print("Re-Enabling Camera")
connection.send(json.dumps({"type":"visionUp"}))
| FRC830/2019vision | upload_code.py | upload_code.py | py | 935 | python | en | code | 0 | github-code | 13 |
24398402441 | from django.shortcuts import render,redirect,get_object_or_404
from django.views import generic
from . import models
from . import forms
from accounts import models as accounts_models
from django.urls import reverse_lazy,reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
# Create your views here.
class GamesList(generic.ListView):
template_name='games_list.html'
model=models.Game
context_object_name='list'
class GameDetail(generic.DetailView):
template_name='game_detail.html'
model=models.Game
context_object_name='game'
class GameAdd(LoginRequiredMixin,generic.CreateView):
model=models.Game
template_name='game_add.html'
form_class=forms.GameForm
class EditGame(LoginRequiredMixin,generic.UpdateView):
model=models.Game
template_name='game_add.html'
form_class=forms.GameForm
class DeleteGame(LoginRequiredMixin,generic.DeleteView):
model=models.Game
success_url=reverse_lazy('games:list')
template_name='confirm_delete.html'
@login_required
def addComment(request,pk):
game=get_object_or_404(models.Game,pk=pk)
_user=get_object_or_404(accounts_models.User,pk=request.user.pk)
if(request.method=="POST"):
form=forms.CommentForm(request.POST)
if form.is_valid():
comment=form.save(commit=False)
comment.game=game
comment.author=_user
comment.save()
return redirect('games:game',pk=pk)
else:
form=forms.CommentForm()
return render(request,'comment_add.html',{'form':form,'pk':pk})
@login_required
def gameAdd(request):
user=get_object_or_404(accounts_models.User,pk=request.user.pk)
if(request.method=="POST"):
form=forms.GameForm(request.POST,request.FILES)
if form.is_valid():
game=form.save(commit=False)
game.author=user
game.save()
return redirect('games:game',pk=game.pk)
else:
form=forms.GameForm()
return render(request,'game_add.html',{'form':form})
@login_required
def addVersion(request,pk):
user=get_object_or_404(accounts_models.User,pk=request.user.pk)
game=get_object_or_404(models.Game,pk=pk)
if(request.method=="POST"):
form=forms.VersionForm(request.POST,request.FILES)
if form.is_valid():
ver=form.save(commit=False)
ver.author=user
ver.game=game
ver.save()
return redirect('games:game',pk=pk)
else:
form=forms.VersionForm()
return render(request,'version_add.html',{'form':form})
class DeleteVersion(LoginRequiredMixin,generic.DeleteView):
model=models.Version
success_url=reverse_lazy('games:list')
template_name='confirm_delete.html'
@login_required
def addImage(request,pk):
game=get_object_or_404(models.Game,pk=pk)
if(request.method=="POST"):
form=forms.ImageForm(request.POST,request.FILES)
if form.is_valid():
image=form.save(commit=False)
image.game=game
image.save()
return redirect('games:game',pk=pk)
else:
form=forms.ImageForm()
return render(request,'image_add.html',{'form':form,'pk':pk}) | stachurski2k4/GamesZoNe | gameszone/games/views.py | views.py | py | 3,265 | python | en | code | 0 | github-code | 13 |
35265550526 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse
from django.template import loader
from django.urls import reverse
from django.views import View
from django.utils import timezone
from .models import Question, Choice
from .models import KakaoFriend
from django.core.serializers import serialize
import json
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
def index(request):
# latest_question_list = Question.objects.order_by('-pub_date')[:5]
# template = loader.get_template('polls/index.html')
# context = {
# 'latest_question_list': latest_question_list,
# }
# return HttpResponse(template.render(context, request))
#위 와 같은 코드
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {
'latest_question_list' : latest_question_list
}
return render(request, 'polls/index.html', context)
# Create your views here.
def detail(request, question_id):
# try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404("Question does not exit")
# return render(request, 'polls/detail.html', {'question': question})
# 위와 같은 코드 아래와
question = get_object_or_404(Question, pk=question_id)
return render(request, "polls/detail.html", {'question' : question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html',{
'question' : question,
'error_message' : "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
# @method_decorator(csrf_exempt, name='dispatch')# 실제로 할땐 없애야함 ( 보안 )
# class testView(View):
# def get(self, request):
# dummy_data = {
# 'name': '죠르디',
# 'type': '공룡',
# 'job': '편의점알바생',
# 'age': 5
# }
# return JsonResponse(dummy_data)
# def post(self, request):
# return HttpResponse("post 요청")
# def put(self, request):
# return HttpResponse("Put 요청을 잘받았다")
# def delete(self, request):
# return HttpResponse("Delete 요청을 잘받았다")
@method_decorator(csrf_exempt, name='dispatch')
class test(View):
def get(self, request):
friends = KakaoFriend.objects.all().order_by('-id')
data = json.loads(serialize('json', friends))
return JsonResponse({'items': data})
def post(self, request):
if request.META['CONTENT_TYPE'] == "application/json":
request = json.loads(request.body)
friend = KakaoFriend(name = request['name'],
type = request['type'],
job = request['job'],
age = request['age'])
else:
friend = KakaoFriend(name = request.POST['name'],
type = request.POST['type'],
job = request.POST['job'],
age = request.POST['age'])
friend.save()
return HttpResponse(status=200)
@method_decorator(csrf_exempt, name='dispatch')# 실제로 할땐 없애야함 ( 보안 )
class API(View):
def get(self, request):
json = {'code': 'get'}
return JsonResponse(json)
def post(self, request):
return HttpResponse("Post 요청을 잘받았다")
# json = {'code': 'post'}
# return JsonResponse(json)
def put(self, request):
json = {'code': 'put'}
return JsonResponse(json)
def delete(self, request):
json = {'code': 'delete'}
return JsonResponse(json)
# @method_decorator(csrf_exempt, name='dispatch')# 실제로 할땐 없애야함 ( 보안 )
# class IndexView(View):
# def get(self, request):
# friends = KakaoFriend.objects.all().order_by('-id')
# data = json.loads(serialize('json', friends))
# return JsonResponse({'items': data})
# def post(self, request):
# if request.META['CONTENT_TYPE'] == "application/json":
# request = json.loads(request.body)
# friend = KakaoFriend(name = request['name'],
# type = request['type'],
# job = request['job'],
# age = request['age'])
# else:
# friend = KakaoFriend(name = request.POST['name'],
# type = request.POST['type'],
# job = request.POST['job'],
# age = request.POST['age'])
# friend.save()
# return HttpResponse(status=200)
# def put(self, request):
# request = json.loads(request.body)
# id = request['id']
# age = request['age']
# friend = get_object_or_404(KakaoFriend, pk=id)
# friend.age = age
# friend.save()
# return HttpResponse(status=200)
# def delete(self, request):
# request = json.loads(request.body)
# id = request['id']
# friend = get_object_or_404(KakaoFriend, pk=id)
# friend.delete()
# return HttpResponse(status=200)
| BakJunGoen/python_study_django | study_django/mysite/polls/views.py | views.py | py | 5,893 | python | en | code | 0 | github-code | 13 |
71454083217 | print()
from openpyxl import Workbook
from openpyxl.drawing.image import Image
wb = Workbook()
ws = wb.active
# ImportError: You must install Pillow to fetch image objects
# 추가 라이브러리 필요 : pip install Pillow
img = Image("./RPAbasic/excel/dog.jpg")
ws.add_image(img, "C3")
wb.save("./RPAbasic/excel/image.xlsx")
print()
| HwangJuu/pythonsource | RPAbasic/excel/14_image.py | 14_image.py | py | 341 | python | en | code | 0 | github-code | 13 |
19466026425 | # Did not work locally. Test accepted.
import string
from string import maketrans, lowercase as lc, uppercase as uc
def rot13(message):
tran = maketrans(lc + uc, lc[13:] + lc[:13] + uc[13:] + uc[:13])
return message.translate(tran)
def main():
new_mess = rot13("Test")
print(new_mess)
if __name__ == "__main__":
main() | turo62/exercise | exercise/codewar/rot13_bp.py | rot13_bp.py | py | 353 | python | en | code | 0 | github-code | 13 |
36785429920 | import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import torch
from torch.nn.parameter import Parameter
import torchvision
import kornia as K
def plot_tensor(t):
plt.imshow(np.array(t.permute(1,2,0)))
def kornia_rotation(img, degrees):
# unsqueeze img
img = img.unsqueeze(0)
# create transformation (rotation)
alpha: float = 45.0 # in degrees
angle: torch.tensor = torch.ones(1) * alpha
# define the rotation center
center: torch.tensor = torch.ones(1, 2)
center[..., 0] = img.shape[3] / 2 # x
center[..., 1] = img.shape[2] / 2 # y
# define the scale factor
scale: torch.tensor = torch.ones(1, 2)
# compute the transformation matrix
M: torch.tensor = K.geometry.get_rotation_matrix2d(center, angle, scale)
_, _, h, w = img.shape
img_warped: torch.tensor = K.geometry.warp_affine(img, M, dsize=(h, w))
return img_warped.squeeze(0)
def show_sample(input, size: tuple = None):
images = torch.stack(input, dim=0)
out = torchvision.utils.make_grid(images, nrow=4, padding=5, pad_value=1)
out_np: np.ndarray = K.utils.tensor_to_image(out)
plt.figure(figsize=size)
plt.imshow(out_np)
plt.axis('off')
plt.show()
def learning_grid(img_dict: dict, title, save=None):
fig = plt.figure(figsize=(15,10))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(1, 3), # creates 2x2 grid of axes
axes_pad=(2,10), # pad between axes in inch.
)
for ax, im_name in zip(grid, img_dict.keys()):
# Iterating over the grid returns the Axes.
ax.set_title(im_name, fontsize=20)
im = np.array(img_dict[im_name].permute(1,2,0).detach().cpu())
ax.imshow(im)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.suptitle(title, fontsize=25)
if save:
plt.savefig(save)
plt.close()
else:
plt.show()
def old_create_loss_map(model_constructor, linspace, dataloader, loss_function, save):
loss_maps_path = 'loss_maps'
os.makedirs(loss_maps_path, exist_ok=True)
map_path = f'{loss_maps_path}/{save}.npy'
if not os.path.exists(map_path):
with torch.no_grad():
p_losses = []
for p in linspace:
p_model = model_constructor(init_param_val=p)
for i, data in enumerate(dataloader): # calc for one batch
source_images, target_images = data
source_images, target_images = source_images, target_images
output_images = p_model(source_images)
loss = loss_function(output_images, target_images)
p_losses.append(loss.item())
break
if save:
np.save(map_path, p_losses)
else:
p_losses = np.load(map_path)
return p_losses
def create_loss_map(model_constructor, training_aug_constructor, training_aug_constructor_args,
aug_learnable_params, main_parameter_name, aug_bounds,
linspace, dataloader, loss_function, save):
loss_maps_path = 'loss_maps'
os.makedirs(loss_maps_path, exist_ok=True)
map_path = f'{loss_maps_path}/{save}.npy'
if not os.path.exists(map_path):
with torch.no_grad():
p_losses = []
for p in linspace:
p_aug_learnable_params = aug_learnable_params.copy()
p_aug_learnable_params[main_parameter_name] = Parameter(torch.Tensor([p]))
p_model = model = model_constructor(
aug_constructor=training_aug_constructor,
learnable_params=p_aug_learnable_params,
aug_constructor_args=training_aug_constructor_args,
aug_bounds=aug_bounds
)
for i, data in enumerate(dataloader): # calc for one batch
source_images, target_images = data
source_images, target_images = source_images, target_images
output_images = p_model(source_images)
loss = loss_function(output_images, target_images)
p_losses.append(loss.item())
break
if save:
np.save(map_path, p_losses)
else:
p_losses = np.load(map_path)
return p_losses
| TomBekor/AugmentationsLearning | utils.py | utils.py | py | 4,550 | python | en | code | 0 | github-code | 13 |
32757351479 | from twilio.rest import Client
TWILIO_SID = '' # Add in your Twilio's SID
TWILIO_AUTH_TOKEN = '' # Add in your Twilio's Auth token
TWILIO_VIRTUAL_NUMBER = "" # Add in your Twilio's virtual number
TWILIO_VERIFIED_NUMBER = "" # Add in your phone number to send to
class NotificationManager:
def __init__(self):
self.client = Client(TWILIO_SID, TWILIO_AUTH_TOKEN)
def send_sms(self, message):
message = self.client.messages.create(
body=message,
from_=TWILIO_VIRTUAL_NUMBER,
to=TWILIO_VERIFIED_NUMBER,
)
# Prints if successfully sent.
print(message.sid) | fells/100-Days-Challenge | Day39/notification_manager.py | notification_manager.py | py | 649 | python | en | code | 1 | github-code | 13 |
23082930384 | #Exercício Python 56: Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas.
#No final do programa, mostre: a média de idade do grupo, qual é o nome do homem mais velho e quantas mulheres têm menos de 20 anos.
age = 0
mediage = 0
velho = 0
nomevelho = ''
mav = 0
for p in range(1, 5):
print('----- {}ª PESSOA -----'.format(p))
nome = str(input('Nome: ')).strip()
idade = int(input('Idade: '))
sexo = str(input('Sexo [M/F]: ')).strip()
age += idade
if p == 1 and sexo in 'Mm':
velho == idade
nomevelho = nome
if idade > velho and sexo in 'Mm':
velho = idade
nomevelho = nome
if sexo in 'Ff' and idade < 20:
mav += 1
mediage = age/4
print('A média de idade do grupo é de {}'.format(mediage))
print('O homem mais velho tem {} anos e se chama {}'.format(velho, nomevelho))
print('Ao todo são {} mulheres com menos de 20 anos.'.format(mav))
| nicole-pereira/Python-Desafios | ex056.py | ex056.py | py | 948 | python | pt | code | 0 | github-code | 13 |
3283502993 | import logging
import argparse as arg
# ---------------------
# Logging
# ---------------------
log = logging.getLogger('daikon')
# ---------------------
# Classes
# ---------------------
class Parser(object):
def __init__(self, version):
self._version = version
self._main = None
def setup(self):
self._main = arg.ArgumentParser(description='ElasticSearch CLI v%s' %
(self._version))
self._main.add_argument('--version', action='version',
version=self._version)
self._main.add_argument('--cluster')
self._main.add_argument('--host')
self._main.add_argument('--port')
main_sub = self._main.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help',
dest='main_sub')
# index
index = main_sub.add_parser('index')
index = index.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help',
dest='index_name')
# index create
index_create = index.add_parser('create')
index_create.add_argument('index_create_indexname',
metavar='indexname')
index_create.add_argument('--cluster')
index_create.add_argument('--shards')
index_create.add_argument('--replicas')
index_create.add_argument('--host')
index_create.add_argument('--port')
# index delete
index_delete = index.add_parser('delete')
index_delete.add_argument('index_delete_indexname',
metavar='indexname')
index_delete.add_argument('--cluster')
index_delete.add_argument('--host')
index_delete.add_argument('--port')
# index open
index_open = index.add_parser('open')
index_open.add_argument('index_open_indexname',
metavar='indexname')
index_open.add_argument('--cluster')
index_open.add_argument('--host')
index_open.add_argument('--port')
# index close
index_close = index.add_parser('close')
index_close.add_argument('index_close_indexname',
metavar='indexname')
index_close.add_argument('--cluster')
index_close.add_argument('--host')
index_close.add_argument('--port')
# index status
index_status = index.add_parser('status')
index_status.add_argument('index_status_indexname',
metavar='indexname')
index_status.add_argument('--cluster')
index_status.add_argument('--host')
index_status.add_argument('--port')
index_status.add_argument('--extended', action='store_true')
index_status.add_argument('--display', choices=['extended', 'regular'])
# index list
index_list = index.add_parser('list')
index_list.add_argument('--cluster')
index_list.add_argument('--host')
index_list.add_argument('--port')
index_list.add_argument('--extended', action='store_true')
# cluster
cluster = main_sub.add_parser('cluster')
cluster = cluster.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help',
dest='cluster_name')
# cluster status
cluster_status = cluster.add_parser('status')
cluster_status.add_argument('--cluster')
cluster_status.add_argument('--host')
cluster_status.add_argument('--port')
cluster_status.add_argument('--extended', action='store_true')
# cluster shutdown
cluster_shutdown = cluster.add_parser('shutdown')
cluster_shutdown.add_argument('--cluster')
cluster_shutdown.add_argument('--host')
cluster_shutdown.add_argument('--port')
# node
node = main_sub.add_parser('node')
node = node.add_subparsers(title='subcommands',
description='valid subcommands',
help='additional help',
dest='node_name')
# node list
node_list = node.add_parser('list')
node_list.add_argument('--cluster')
node_list.add_argument('--host')
node_list.add_argument('--port')
node_list.add_argument('--extended', action='store_true')
# node status
node_status = node.add_parser('status')
node_status.add_argument('node_status_hostname',
metavar='hostname')
node_status.add_argument('--cluster')
node_status.add_argument('--port')
node_status.add_argument('--extended', action='store_true')
# node shutdown
node_shutdown = node.add_parser('shutdown')
node_shutdown.add_argument('node_shutdown_hostname',
metavar='hostname')
node_shutdown.add_argument('--delay', default=0)
node_shutdown.add_argument('--port')
node_shutdown.add_argument('--cluster')
def get_results(self):
return self._main.parse_args()
| neogenix/daikon | daikon/parser.py | parser.py | py | 5,564 | python | en | code | 56 | github-code | 13 |
41736759785 | import sshconnect
import Initializer
import time
import threading
names = Initializer.language
server_settings = Initializer.server_settings
client_settings = Initializer.client_settings
frame = None
ssh = None
def run(fr):
global frame, ssh
frame = fr
ssh = sshconnect.ssh
try:
ssh.exec_command('ls', timeout=3)
except:
addstr(Initializer.language['errors']['ConnectionFailed'])
return
python_check()
availability_check()
check_net_thread = threading.Thread(target=check_network)
check_net_thread.start()
def cronStart():
import CronSet
if CronSet.BackupCron():
if CronSet.add_cron_task():
addstr(Initializer.language['TracerReport']['CronOk'])
else:
addstr(Initializer.language['TracerReport']['CronFall'])
else:
addstr(Initializer.language['TracerReport']['CronBackFall'])
def addstr(line):
frame.tab_three.report.InsertItem(frame.tab_three.report.GetItemCount(), line)
def check_network():
path = client_settings["Main"]["path_to_program"] + '/NetWorkTest.py'
stdin, stdout, stderr = ssh.exec_command('python3 ' + path)
output = stdout.read().decode('utf-8').strip()
if output:
if output == 'NotConnect':
addstr(Initializer.language['TracerReport']['TelegramNetFall'])
if output == 'Unauthorized':
addstr(Initializer.language['TracerReport']['TelegramUnAuth'])
if output == 'True':
addstr(Initializer.language['TracerReport']['TelegramCheckOk'])
cronStart()
check_file_thread = threading.Thread(target=get_remote_report)
check_file_thread.start()
def check_script_location():
ssh = sshconnect.ssh
path = client_settings["Main"]["path_to_program"] + '/start.py'
report = frame.tab_three.report
# Выполнение команды ls и получение вывода
stdin, stdout, stderr = ssh.exec_command('ls ' + path)
# Проверка наличия папки в выводе
output = stdout.read().decode('utf-8').strip()
addstr(names['TracerReport']['AvailabilityCheck'])
if 'start.py' in output:
addstr(names['TracerReport']['AvailabilityOK'])
addstr(f"{names['TracerReport']['AvailabilityAdress']}{path}")
return True
else:
addstr(f"{names['TracerReport']['AvailabilityFall']}")
addstr(f"{names['TracerReport']['AvailabilityAdress']}{path}")
return False
def get_remote_report():
content = ''
sftp = ssh.open_sftp()
path = client_settings["Main"]["path_to_program"] + '/report/TestReport.txt'
addstr(names['TracerReport']['GetReportPromt'])
timer = 1
files = sftp.listdir(client_settings["Main"]["path_to_program"] + '/report/')
if 'TestReport.txt' in files:
remote_file_path = path
sftp.remove(remote_file_path)
while True:
files = sftp.listdir(client_settings["Main"]["path_to_program"] + '/report/')
if timer > 59:
addstr(names['TracerReport']['GetReportFall'])
break
if 'TestReport.txt' not in files:
time.sleep(1)
timer += 1
continue
with sftp.open(path, 'r') as f:
content = f.read().decode('utf-8')
decoded_text = content.encode('utf-8').decode('unicode-escape')
addstr(names['TracerReport']['GetReportOK'])
decoded_text = decoded_text.splitlines()
for line in decoded_text:
addstr(line.strip('"').strip())
remote_file_path = path
sftp.remove(remote_file_path)
break
sftp.close()
def python_check():
stdin, stdout, stderr = ssh.exec_command('python3 -c "print(\'OK\')"')
output = stdout.read().decode()
error = stderr.read().decode()
addstr(names['TracerReport']['SeekPython'])
if not error:
stdin, stdout, stderr = ssh.exec_command('python3 --version')
output = stdout.read().decode()
addstr(f"{names['TracerReport']['PythonDetected']}{output}")
else:
addstr(f"{names['TracerReport']['PythonFall']}{error}")
raise
def availability_check():
if not check_script_location():
raise
path = client_settings["Main"]["path_to_program"] + '/TestFolder'
stdin, stdout, stderr = ssh.exec_command('ls ' + path)
output = stdout.read().decode('utf-8').strip()
addstr(names['TracerReport']['ContentFind'])
if 'Must_Send.test' in output:
output = output.splitlines()
for line in output:
addstr(line.strip())
addstr(names['TracerReport']['ContentFindOK'])
else:
addstr(names['TracerReport']['ContentFindFall'])
raise
def start_test_mode():
pass
def finish_test_mode():
pass
| fire-neuron/CopyGram | tracer.py | tracer.py | py | 4,829 | python | en | code | 1 | github-code | 13 |
3876677585 | import argparse
import os
from src.lfr.run_benchmark import evaluate_clustering_results
def eval_lfr_k_n_mu(avg_degree_list, n_list, methods, metric='ami', base_path='./'):
base_path = base_path + '/' if base_path[-1] != '/' else base_path
i = 0
num_benchmark_sets = len(n_list) * len(avg_degree_list)
for avg_degree in avg_degree_list:
for n in n_list:
i += 1
print(f'Evaluating predicted clusterings for benchmark set {i}/{num_benchmark_sets}...')
benchmark_dir = base_path + f'data/lfr_benchmark/{avg_degree}deg/{n}n/'
for j, method in enumerate(methods):
print(f'Evaluating method {j + 1}/{len(methods)} ({method})...')
pred_dir = base_path + 'results/lfr/clustering/' + method + f'/{avg_degree}deg/{n}n/'
results = evaluate_clustering_results(benchmark_dir, pred_dir, metric, variable='mu')
results_dir = base_path + 'results/lfr/' + metric + '/' + method + f'/{avg_degree}deg/'
os.makedirs(results_dir, exist_ok=True)
results.save(results_dir + f'{n}n.pkl')
def eval_lfr_k_mu_n(avg_degree_list, mu_list, methods, metric='ami', base_path='./'):
base_path = base_path + '/' if base_path[-1] != '/' else base_path
i = 0
num_benchmark_sets = len(mu_list) * len(avg_degree_list)
for avg_degree in avg_degree_list:
for mu in mu_list:
i += 1
print(f'Evaluating predicted clusterings for benchmark set {i}/{num_benchmark_sets}...')
benchmark_dir = base_path + f'data/lfr_benchmark/{avg_degree}deg/{int(100 * mu)}mu/'
for j, method in enumerate(methods):
print(f'Evaluating method {j + 1}/{len(methods)} ({method})...')
pred_dir = base_path + 'results/lfr/clustering/' + method + f'/{avg_degree}deg/{int(100 * mu)}mu/'
results = evaluate_clustering_results(benchmark_dir, pred_dir, metric, variable='n')
results_dir = base_path + 'results/lfr/' + metric + '/' + method + f'/{avg_degree}deg/'
os.makedirs(results_dir, exist_ok=True)
results.save(results_dir + f'{int(100 * mu)}mu.pkl')
def main(argd):
""" Run selected community detection methods on generated LFR benchmark graphs.
Parameters
----------
argd : dict
Dictionary of parsed command line arguments.
"""
if argd['network_sizes'] is not None:
eval_lfr_k_n_mu(argd['avg_degrees'][0], argd['network_sizes'][0], argd['methods'][0], argd['metric'],
argd['base_path'])
else:
eval_lfr_k_mu_n(argd['avg_degrees'][0], argd['mixing_params'][0], argd['methods'][0], argd['metric'],
argd['base_path'])
if __name__ == "__main__":
parser = argparse.ArgumentParser('Evaluate detected LFR clusterings.')
parser.add_argument('--avg-degrees', action='append', nargs='+', type=int,
help='List of average degrees of the benchmark graphs')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--network-sizes', action='append', nargs='+', type=int,
help='List of network-sizes of the benchmark graphs')
group.add_argument('--mixing-params', action='append', nargs='+', type=float,
help='List of mixing-params of the benchmark graphs')
parser.add_argument('-m', '--methods', action='append', nargs='+', type=str,
help='List of methods to be benchmarked.')
parser.add_argument('--metric', type=str, default='ami', help='Evaluation metric')
parser.add_argument('--base-path', type=str, default='./', help='Base path of the project directory')
args = parser.parse_args()
main(vars(args))
| synwalk/synwalk-analysis | src/scripts/evaluate_lfr.py | evaluate_lfr.py | py | 3,835 | python | en | code | 7 | github-code | 13 |
31625816434 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 17:46:36 2016
@author: ajaver
"""
import pandas as pd
import os
import tables
import numpy as np
import matplotlib.pylab as plt
from collections import OrderedDict
from scipy.signal import savgol_filter
from scipy.signal import medfilt
import sys
sys.path.append('/Users/ajaver/Documents/GitHub/Multiworm_Tracking')
from MWTracker.helperFunctions.timeCounterStr import timeCounterStr
if __name__ == '__main__':
#base directory
#masked_image_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/MaskedVideos/CSTCTest_Ch5_17112015_205616.hdf5'
#masked_image_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/MaskedVideos/CSTCTest_Ch3_17112015_205616.hdf5'
masked_image_file = '/Users/ajaver/Desktop/Videos/Avelino_17112015/MaskedVideos/CSTCTest_Ch1_18112015_075624.hdf5'
#masked_image_file = '/Users/ajaver/Desktop/Videos/04-03-11/MaskedVideos/575 JU440 swimming_2011_03_04__13_16_37__8.hdf5'
#masked_image_file = '/Users/ajaver/Desktop/Videos/04-03-11/MaskedVideos/575 JU440 on food Rz_2011_03_04__12_55_53__7.hdf5'
skeletons_file = masked_image_file.replace('MaskedVideos', 'Results1')[:-5] + '_skeletons.hdf5'
intensities_file = skeletons_file.replace('_skeletons', '_intensities')
min_block_size = 1
#get the trajectories table
with pd.HDFStore(skeletons_file, 'r') as fid:
trajectories_data = fid['/trajectories_data']
#at this point the int_map_id with the intensity maps indexes must exist in the table
assert 'int_map_id' in trajectories_data
trajectories_data = trajectories_data[trajectories_data['int_map_id']>0]
grouped_trajectories = trajectories_data.groupby('worm_index_joined')
tot_worms = len(grouped_trajectories)
base_name = skeletons_file.rpartition('.')[0].rpartition(os.sep)[-1].rpartition('_')[0]
progress_timer = timeCounterStr('');
valid_data = OrderedDict()
for index_n, (worm_index, trajectories_worm) in enumerate(grouped_trajectories):
if index_n % 10 == 0:
dd = " Correcting Head-Tail using intensity profiles. Worm %i of %i." % (index_n+1, tot_worms)
dd = base_name + dd + ' Total time:' + progress_timer.getTimeStr()
print(dd)
int_map_id = trajectories_worm['int_map_id'].values
first_frame = trajectories_worm['frame_number'].min()
last_frame = trajectories_worm['frame_number'].max()
#only analyze data that contains at least min_block_size intensity profiles
if int_map_id.size < min_block_size:
continue
#read the worm intensity profiles
with tables.File(intensities_file, 'r') as fid:
worm_int_profile = fid.get_node('/straighten_worm_intensity_median')[int_map_id,:]
#%%
#normalize intensities of each individual profile
frame_med_int = np.median(worm_int_profile, axis=1);
worm_int_profile = worm_int_profile - frame_med_int[:, np.newaxis]
#worm median intensity
median_profile = np.median(worm_int_profile, axis=0).astype(np.float)
valid_data[worm_index] = {'median_profile' : median_profile,
'int_map_id' : int_map_id, 'frame_med_int' : frame_med_int,
'frame_range' : (first_frame,last_frame)}
#%% get valid indexes
traj_groups = {}
joined_indexes = {}
dd = trajectories_data[trajectories_data['worm_label']==1]
grouped_trajectories_N = dd.groupby('worm_index_N')
for index_n, (worm_index, trajectories_worm) in enumerate(grouped_trajectories_N):
joined_indexes[worm_index] = trajectories_worm['worm_index_joined'].unique()
for wi in joined_indexes[worm_index]:
traj_groups[wi] = worm_index
#%%
best_match1 = []
best_match2 = []
length_resampling = len(median_profile)
tot_index = len(valid_data)
prob_data = {}
valid_worm_index = list(valid_data.keys())
valid_worm_order = {x:n for n,x in enumerate(valid_worm_index)}
tot_valid_worms = len(valid_worm_index)
prob_mat = np.full((tot_valid_worms, tot_valid_worms), np.nan)
prob_mat2 = np.full((tot_valid_worms, tot_valid_worms), np.nan)
#%%
rr = (np.arange(20)/19)*0.9 + 0.1
damp_factor = np.ones(131);
damp_factor[:20] = rr
damp_factor[-20:] = rr[::-1]
#%%
for worm_index in valid_data:
first_frame = valid_data[worm_index]['frame_range'][0]
last_frame = valid_data[worm_index]['frame_range'][1]
#filter pausible indexes
other_worms_ind = valid_worm_index[:]
other_worms_ind.remove(worm_index)
#trajectories that do not overlap with the current one
other_worms_ind = [x for x in other_worms_ind
if (last_frame < valid_data[x]['frame_range'][0]) or
(first_frame > valid_data[x]['frame_range'][1])]
if len(other_worms_ind) == 0:
continue
#
other_worm_profile = np.zeros((length_resampling, len(other_worms_ind)))
for w_ii, w_ind in enumerate(other_worms_ind):
other_worm_profile[:,w_ii] = valid_data[w_ind]['median_profile']
trajectories_worm = grouped_trajectories.get_group(worm_index)
int_map_id = valid_data[worm_index]['int_map_id'].copy()
frame_med_int = valid_data[worm_index]['frame_med_int'].copy()
median_profile = valid_data[worm_index]['median_profile'].copy()
with tables.File(intensities_file, 'r') as fid:
worm_int_profile = fid.get_node('/straighten_worm_intensity_median')[int_map_id,:]
worm_int_profile -= frame_med_int[:, np.newaxis]
#for ii in range(worm_int_profile.shape[1]):
# worm_int_profile[:,ii] = medfilt(worm_int_profile[:,ii], 101)
worm_int_profile *= damp_factor
median_profile *= damp_factor
other_worm_profile *= damp_factor[:, np.newaxis]
# median_profile = savgol_filter(median_profile, 15, 3)
# for ii in range(worm_int_profile.shape[0]):
# worm_int_profile[ii,:] = savgol_filter(worm_int_profile[ii,:], 15, 3)
# for ii in range(other_worm_profile.shape[1]):
# other_worm_profile[:,ii] = savgol_filter(other_worm_profile[:,ii], 15, 3)
#
# median_profile = np.diff(median_profile)
# worm_int_profile = np.diff(worm_int_profile, axis=1)
# other_worm_profile = np.diff(other_worm_profile, axis=0)
#%%
# DD_ori = worm_int_profile[:,:,np.newaxis] - other_worm_profile[np.newaxis,:,:]
# DD_inv = worm_int_profile[:,:,np.newaxis] - other_worm_profile[np.newaxis,::-1,:]
#
# DD_ori = np.mean(np.abs(DD_ori), axis = 1)
# DD_inv = np.mean(np.abs(DD_inv), axis = 1)
#
# is_ori = np.mean(np.argmin((DD_ori, DD_inv), axis=0), axis=0)
#
#
# DD_best = np.zeros_like(DD_ori)
# for ii, flag in enumerate(is_ori):
# DD_best[:,ii] = DD_ori[:,ii] if flag else DD_inv[:,ii]
#
#
# worm_prob = -np.mean(DD_best, axis=0)
#p = np.exp(-DD_best)
#z = np.sum(p, axis=1)
#p = p/z[:,np.newaxis]
#worm_prob = np.sum(p,axis=0)/p.shape[0]
#best_ind = np.argmax(p, axis=1)
#worm_prob = np.bincount(best_ind)/best_ind.size
#%%
DD_ori = other_worm_profile - median_profile[:, np.newaxis]
DD_ori = np.mean(np.abs(DD_ori), axis=0)
DD_inv = other_worm_profile - median_profile[::-1, np.newaxis]
DD_inv = np.mean(np.abs(DD_inv), axis=0)
DD_best = np.min((DD_ori, DD_inv), axis=0)
worm_prob2 = -DD_best
wi1 = valid_worm_order[worm_index]
for ii, x in enumerate(other_worms_ind):
wi2 = valid_worm_order[x]
prob_mat2[wi1,wi2] = DD_best[ii]
#best_inv = np.argmin((DD, DD_inv), axis=0)
#DD = np.exp(-DD)
#worm_prob2 = DD/np.sum(DD)
#%%
# #%%
if False:
dd = joined_indexes[traj_groups[worm_index]]
title_str = '%i: %s' % (worm_index, str(dd))
plt.figure()
plt.subplot(1,2,1)
plt.plot(median_profile, 'k')
best1 = np.argsort(worm_prob)[:-4:-1]
for x in best1:
plt.plot(other_worm_profile[:,x], label=other_worms_ind[x])
plt.legend(loc=4)
plt.title(title_str)
plt.subplot(1,2,2)
plt.plot(median_profile, 'k')
best1 = np.argsort(worm_prob2)[:-4:-1]
for x in best1:
plt.plot(other_worm_profile[:,x], label=other_worms_ind[x])
plt.legend(loc=4)
plt.title(title_str)
#%%
#from scipy.signal import medfilt
#worm_int_smooth = np.zeros_like(worm_int_profile)
#
#for ii in range(worm_int_profile.shape[1]):
# worm_int_smooth[:,ii] = medfilt(worm_int_profile[:,ii], 5)
# #worm_int_smooth[:,ii] = savgol_filter(worm_int_profile[:,ii], 15, 3)
#
#
#plt.figure()
#plt.imshow(worm_int_profile.T, interpolation='none', cmap='gray')
#plt.grid('off')
#
#plt.figure()
#plt.imshow(worm_int_smooth.T, interpolation='none', cmap='gray')
#plt.grid('off')
#
#%%
#for traj_ind in joined_indexes:
# plt.figure()
# for w_ind in joined_indexes[traj_ind]:
# xx = valid_data[w_ind]['median_profile']
# plt.subplot(1,2,1)
# plt.plot(xx)
#
# plt.subplot(1,2,2)
# plt.plot(np.diff(savgol_filter(xx, 15, 3)))
#%%
# #%%
# prob_data[worm_index] = {'other_worms_ind':other_worms_ind,
# 'worm_prob':worm_prob, 'worm_prob2':worm_prob2}
#
# if len(worm_prob) == 0:
# continue
# ii = np.argmax(worm_prob)
# best_match1.append((worm_index, other_worms_ind[ii], worm_prob[ii]))
#
# ii = np.argmax(worm_prob2)
# best_match2.append((worm_index, other_worms_ind[ii], worm_prob2[ii]))
# #%%
#
# #%%
# good = ~np.isnan(prob_mat)
# DD = np.exp(-prob_mat[good])
# prob_mat[good] = DD/np.sum(DD)
#
# good = ~np.isnan(prob_mat2)
# DD = np.exp(-prob_mat2[good])
# prob_mat2[good] = DD/np.sum(DD)
#
# plt.figure()
# plt.plot(np.sort(prob_mat[~np.isnan(prob_mat)]), '.')
# plt.plot(np.sort(prob_mat2[~np.isnan(prob_mat2)]), '.')
# #%%
# worm_index = 3
# prob_data[worm_index]
# worm_prob2 = prob_data[worm_index]['worm_prob2']
# other_worms_ind = prob_data[worm_index]['other_worms_ind']
#
#
##%%
#from sklearn.cluster import k_means
#
#tot_prof = len(valid_data)
#
#median_profiles = np.zeros((2*tot_prof, length_resampling))
#for ii, worm_index in enumerate(valid_data.keys()):
# median_profiles[2*ii, :] = valid_data[worm_index]['median_profile']
# median_profiles[2*ii+1, :] = valid_data[worm_index]['median_profile'][::-1] #consider the case that there are wrong head tail assigments
#
##%%
#
#centroid, label, inertia = k_means(median_profiles, 16)
#plt.figure()
#for ii in range(16):
# plt.plot(centroid[ii])
#
| ver228/work-in-progress | work_in_progress/join_trajectories_next_gen/_old/try2joinTrajectoriesInt.py | try2joinTrajectoriesInt.py | py | 11,448 | python | en | code | 0 | github-code | 13 |
29859731817 | from intelmq.lib.bot import ExpertBot
try:
from geolib import geohash
except ImportError:
geohash = None
class GeohashExpertBot(ExpertBot):
"""Compute the geohash from longitude/latitude information, save it to extra.(source|destination)"""
overwrite: bool = False
precision: int = 7
def init(self):
if not geohash:
raise ValueError("Library 'geolib' is required, please install it.")
def process(self):
event = self.receive_message()
for key in ['source.geolocation.', 'destination.geolocation.']:
latitude_key = key + "latitude"
longitude_key = key + "longitude"
geohash_key = "extra." + key + "geohash"
if not (latitude_key in event and longitude_key in event):
continue
event.add(geohash_key,
geohash.encode(event[latitude_key],
event[longitude_key],
precision=self.precision),
overwrite=self.overwrite)
self.send_message(event)
self.acknowledge_message()
BOT = GeohashExpertBot
| certtools/intelmq | intelmq/bots/experts/geohash/expert.py | expert.py | py | 1,166 | python | en | code | 856 | github-code | 13 |
27967884298 | #!python3
"""DJZ Char Counter
Usage:
dzjcount.py [--dir <folder>]
Options:
-h --help Show this screen.
--version Show version.
-d --dir Specify the folder to store DZJ files
"""
import os
import sys
import glob
import re
import math
import statistics as sta
# Global Data
DATA = {
"name": "大藏经",
"zizhong_count": 0,
"pstdev_total": 1.0,
"all_zizhong": {
# sample: "大": 101,
},
"all_sutra": [
# sample: { "name": "abc", "book": "10", "order" : "20", "zizhong": { "大": 10, ...}, "pstdev": 1.0},
]
}
# -------------- ORM BEGIN --------------
from peewee import SqliteDatabase
from peewee import Model
from peewee import CharField, ForeignKeyField, IntegerField, DoubleField
DB_FP = 'sutra_chars.db'
if os.path.exists(DB_FP):
os.remove(DB_FP)
db = SqliteDatabase(DB_FP)
db.set_autocommit(False)
class Zangjing_tongji(Model):
zangjing_ming = CharField(max_length=100, default='大藏经')
zizhong_shu = IntegerField()
ping_heng_xing = DoubleField()
class Meta:
database = db
class Zangjing_zizhong_tongji(Model):
zangjing_tongji_id = ForeignKeyField(Zangjing_tongji, related_name='all_zizhong_tongji')
zizhong = CharField(max_length=100)
ci_shu = IntegerField()
class Meta:
database = db
class Jingwen_tongji(Model):
zangjing_tongji_id = ForeignKeyField(Zangjing_tongji, related_name='all_jingwen_tongji')
ce_shu = CharField(max_length=4)
xu_hao = CharField(max_length=5)
zizhong_shu = IntegerField()
jingwen_ming = CharField(max_length=100)
ping_heng_xing = DoubleField()
class Meta:
database = db
class Jingwen_zizhong_tongji(Model):
jingwen_tongji = ForeignKeyField(Jingwen_tongji, related_name='all_zizhong')
zizhong = CharField(max_length=100)
ci_shu = IntegerField()
class Meta:
database = db
# -------------- SCAN BEGIN --------------
RE_TITLE_LINE = re.compile(r'.*第\s*(\d+)\s*(?:冊|卷)\s*No.\s*(\w+)\s*(.*)$')
PUNCTUATIONS = set([' '] # full-corner space
)
# prepare punctations
def prep_punctations_list():
global PUNCTUATIONS
PUNCTUATIONS.update([chr(v) for v in range(1, 255)] +
[chr(v) for v in range(ord('0'), ord('9') + 1)] +
[chr(v) for v in range(ord('a'), ord('z') + 1)] +
[chr(v) for v in range(ord('A'), ord('Z') + 1)] +
[chr(v) for v in range(ord('─'), ord('╿') + 1)] # all table symbols
)
with open('fuhao.txt', encoding='utf-8') as f:
PUNCTUATIONS.update([p.strip() for p in f.readlines()])
def dump_result():
""" Dump to DATABASE """
# calc each sutra variance and merge all zizhong to one dict
all_zizhong = {}
for sutra in DATA['all_sutra']:
sutra['pstdev'] = sta.pstdev(sutra['zizhong'].values())
for ch, count in sutra['zizhong'].items():
if ch in all_zizhong:
all_zizhong[ch] += count
else:
all_zizhong[ch] = count
# all zizhong pstdev value
DATA['pstdev_total'] = sta.pstdev(all_zizhong.values())
DATA['all_zizhong'] = all_zizhong
DATA['zizhong_count'] = len(all_zizhong.keys())
################# DATABASE ###################
# initialize DB
db.connect()
db.create_tables([Jingwen_tongji, Jingwen_zizhong_tongji, Zangjing_tongji, Zangjing_zizhong_tongji])
db.begin()
r_zangjing = Zangjing_tongji(zangjing_ming='大正藏',
zizhong_shu=DATA['zizhong_count'],
ping_heng_xing=DATA['pstdev_total'])
r_zangjing.save()
for sch, count in DATA['all_zizhong'].items():
r_zangjing_zizhong = Zangjing_zizhong_tongji(zangjing_tongji_id=r_zangjing,
zizhong=sch,
ci_shu=count)
r_zangjing_zizhong.save()
for sutra in DATA['all_sutra']:
r_sutra = Jingwen_tongji(zangjing_tongji_id=r_zangjing,
ce_shu=sutra['book'],
xu_hao=sutra['order'],
zizhong_shu=len(sutra['zizhong']),
jingwen_ming=sutra['name'],
ping_heng_xing=sutra['pstdev'])
r_sutra.save()
for sch, count in sutra['zizhong'].items():
zizhong = Jingwen_zizhong_tongji(jingwen_tongji_id=r_sutra,
zizhong=sch,
ci_shu=count)
zizhong.save()
print('DB writing start ...',)
db.commit()
print('DONE')
def scan_sutras(folder):
RE_DIR_PART = re.compile(r'\d+-(\w+)$', re.I)
RE_DIR_BOOK = re.compile(r'T(\d+)-f$', re.I)
RE_FP_SUTRA = re.compile(r'T(\d+)n(\w+).txt$', re.I)
skip_parts = {"18-xujingshu", "19-xulushu", "20-xuzhuzong", "21-xitan"}
for d_part in glob.glob(os.path.join(folder, '??-*')):
if not os.path.isdir(d_part):
continue
if d_part.split(os.path.sep)[-1] in skip_parts:
continue
m = RE_DIR_PART.search(d_part)
if not m:
continue
for d_book in glob.glob(os.path.join(d_part, 'T*')):
if not os.path.isdir(d_book):
continue
m = RE_DIR_BOOK.search(d_book)
if not m:
continue
for f_sutra in glob.glob(os.path.join(d_book, 'T*.txt')):
if not os.path.isfile(f_sutra):
continue
m = RE_FP_SUTRA.search(f_sutra)
if not m:
print('WARN: Found unexpected file:', f_sutra)
continue
s_book, s_order = m.groups()
#print(f_sutra, s_book, s_order)
scan_single_sutra(f_sutra, s_book, s_order)
def scan_single_sutra(fp, s_book, s_order):
chs = {}
first_line = None
with open(fp, encoding='utf8') as f:
try:
for line in f:
line = line.strip()
if first_line is None:
first_line = line
rem = re.match(RE_TITLE_LINE, line)
if rem:
v_book, v_order, s_name = rem.groups()
if not s_name.strip():
s_name = 'Unknown'
else:
print('WARN: file %s first line unmatched: \"%s\"' % (fp, line))
s_name = 'Unknown'
v_book='0000'
v_order='0000'
if not line or 'No.' in line or 'NO.' in line or 'no.' in line:
continue
# only appear in 'SKIPPED' parts
if '暫未輸入' in line:
print('WARN: %s/%s/%s 暫未輸入, skip' %(sbook, s_order, fp))
return
to_wait_bracket = False
for ch in line:
if ch == '[':
to_wait_bracket = True
combo_ch = ''
continue
if to_wait_bracket:
if ch == ']':
ch = combo_ch
to_wait_bracket = False
if combo_ch in {"中阿含 (98)",
"中阿含 (59)",
"燉煌出 S. 700",
"āmlam",
"a-",
"adhyāśsya*",
"ta",
"ka",
"Paramārtha-deva",
"Moksa-deva",
"Mahāyāna-deva"}:
# skip it, TODO save these 3 chars if to be better than 99.99% perfect
continue
else:
combo_ch += ch
continue
if ch == '' or ch in PUNCTUATIONS:
continue
if ch not in chs:
chs[ch] = 1
else:
chs[ch] += 1
except UnicodeDecodeError as err:
print(err)
if v_book != s_book or v_order != s_order:
pass
# it's only one exception: 0220 大般若波羅蜜多經(第1卷-第200卷)
#print('WARN: first line unmatching pathinfo',)
#print(fp, s_book, v_book, s_order, v_order, s_name, len(chs.keys()))
# merge 大般若波羅蜜多經
if s_order == '0220a':
s_order = '0220'
s_name = '大般若波羅蜜多經'
elif s_order == '0220b' or s_order == '0220c':
# merge to LAST one
great_boruo = DATA['all_sutra'][-1]
for ch, cnt in chs.items():
if ch in great_boruo['zizhong']:
great_boruo['zizhong'][ch] += cnt
else:
great_boruo['zizhong'][ch] = cnt
return # on purpose
DATA['all_sutra'].append({
'name': s_name,
'book': s_book,
'order': s_order,
'zizhong': chs})
if __name__ == '__main__':
from docopt import docopt
args = docopt(__doc__, version='DZJ char counter version 0.1')
if args['--dir'] and args['dir']:
folder = args['dir']
else:
#folder = 'samples'
folder = '..\\T'
prep_punctations_list()
scan_sutras(folder)
dump_result()
| jfding/dzj-zizhong | dzjcount.py | dzjcount.py | py | 9,913 | python | en | code | 0 | github-code | 13 |
74189502417 | from airflow.contrib.hooks.aws_hook import AwsHook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class StageToRedshiftOperator(BaseOperator):
ui_color = '#358140'
template_fields = ("s3_key",)
copy_sql = """
COPY {}
FROM '{}'
ACCESS_KEY_ID '{}'
SECRET_ACCESS_KEY '{}'
REGION '{}'
{}
"""
@apply_defaults
def __init__(self,
redshift_conn_id="",
aws_credentials_id="",
s3_bucket="",
s3_key="",
table="",
region="",
extra_params="",
*args, **kwargs):
super(StageToRedshiftOperator, self).__init__(*args, **kwargs)
# Map params here
self.redshift_conn_id = redshift_conn_id
self.aws_credentials_id = aws_credentials_id
self.s3_bucket = s3_bucket
self.s3_key = s3_key
self.table = table
self.region = region
self.extra_params = extra_params
def execute(self, context):
self.log.info(" StageToRedshiftOperator has started COPY command. It may take multiple mins to finish.")
aws_hook = AwsHook(self.aws_credentials_id)
credentials = aws_hook.get_credentials()
redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)
rendered_key = self.s3_key.format(**context)
s3_path = "s3://{}/{}".format(self.s3_bucket,rendered_key)
# Format the SQL command
formatted_sql = StageToRedshiftOperator.copy_sql.format(
self.table,
s3_path,
credentials.access_key,
credentials.secret_key,
self.region,
self.extra_params
)
self.log.info("COPY command has been initiated now. We will inform once command is finished\
Thanks for your patience")
redshift.run(formatted_sql)
self.log.info(" COPY command has finished successfully. ")
| guptrakeshk/my_music_store_airflow | plugins/operators/stage_redshift.py | stage_redshift.py | py | 2,173 | python | en | code | 1 | github-code | 13 |
9936849467 | from flask import Flask, request
from re import match
from requests import get, post
from keys import APP_ID, API_KEY
app = Flask(__name__)
@app.route('/symptoms', methods=['GET'])
def symptoms():
return {'symptoms': infermedica_symptoms}, 200
@app.route('/diagnosis', methods=['POST'])
def diagnosis():
arguments = request.json if request.json else {}
try:
assert 'gender' in arguments, 'Missing <gender> argument'
assert str(arguments['gender']).lower() in ['male', 'female'], '<gender> argument requires value of "male" or "female"'
assert 'age' in arguments, 'Missing <age> argument'
assert match(r'^[0-9]+$', str(arguments['age'])), '<age> argument requires integer value'
assert 'symptoms' in arguments, 'Missing <symptoms> argument'
assert isinstance(arguments['symptoms'], list), '<symptoms> argument requires list of symptoms'
except AssertionError as error:
return {'message': str(error)}, 400
body = {'sex': arguments['gender'].lower(), 'age': int(arguments['age'])}
symptom_ids = []
for experienced_symptom in arguments['symptoms']:
symptom_ids += [{'id': experienced_symptom, 'choice_id': 'present'}]
body.update({'evidence': symptom_ids})
diagnosis_request = post(base+'/diagnosis', json=body, headers=__headers).json()
infermedica_diagnosis = []
try:
conditions = diagnosis_request['conditions']
for condition in conditions:
infermedica_diagnosis += [{'name': condition['common_name'], 'probability': condition['probability']*100}]
except KeyError:
return {'message': 'Invalid request'}, 400
return {'conditions': infermedica_diagnosis}, 200
__headers = {'App-Id': APP_ID, 'App-Key': API_KEY}
base = 'https://api.infermedica.com/v2'
infermedica_symptoms = []
symptoms_request = get(base+'/symptoms', headers=__headers).json()
for symptom in symptoms_request:
infermedica_symptoms += [{'id': symptom['id'], 'name': symptom['common_name']}]
if __name__ == '__main__':
app.run()
| arnavs-0/PocDoc-API | api.py | api.py | py | 2,065 | python | en | code | 2 | github-code | 13 |
17521164587 | from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from scipy.stats import norm
import seaborn as sns
import textwrap
def ConductAnomalyDetection(dataframe,
list_of_predictor_variables,
anomaly_threshold=0.95,
plot_detection_summary=True,
summary_plot_size=(20, 20),
column_name_for_anomaly_prob='Anomaly Probability',
column_name_for_anomaly_flag='Anomaly Detected'):
"""_summary_
This function conducts anomaly detection on a dataset using the z-score method.
Args:
dataframe (Pandas dataframe): Pandas dataframe containing the data to be analyzed.
list_of_predictor_variables (list): List of predictor variables to be analyzed.
anomaly_threshold (float, optional): _description_. The threshold for the probability of an anomaly. Defaults to 0.95.
plot_detection_summary (bool, optional): _description_. Whether to plot a summary of the anomaly detection. Defaults to True.
summary_plot_size (tuple, optional): _description_. The size of the summary plot. Defaults to (20, 20).
column_name_for_anomaly_prob (str, optional): _description_. The name of the column for the anomaly probability. Defaults to 'Anomaly Probability'.
Returns:
Pandas dataframe: Pandas dataframe containing the data to be analyzed with the anomaly probability and flag.
"""
# If column_name_for_anomaly_prob is in the dataframe, drop it
if column_name_for_anomaly_prob in dataframe.columns:
dataframe = dataframe.drop(column_name_for_anomaly_prob, axis=1)
# If column_name_for_anomaly_flag is in the dataframe, drop it
if column_name_for_anomaly_flag in dataframe.columns:
dataframe = dataframe.drop(column_name_for_anomaly_flag, axis=1)
# Keep only the predictor variables
dataframe_anomaly = dataframe[list_of_predictor_variables].copy()
# Keep complete cases
dataframe_anomaly = dataframe_anomaly.replace([np.inf, -np.inf], np.nan)
dataframe_anomaly = dataframe_anomaly.dropna()
# Get z-score from probability
z_score = norm.ppf(anomaly_threshold / 2)
# Iterate through each predictor variable
dataframe_anomaly[column_name_for_anomaly_prob] = 1
for predictor_variable in list_of_predictor_variables:
# Get the mean and standard deviation of the predictor variable
variable_mean = dataframe_anomaly[predictor_variable].mean()
variable_std = dataframe_anomaly[predictor_variable].std()
# Get z-score for each observation
dataframe_anomaly[predictor_variable + ' z-score'] = (dataframe_anomaly[predictor_variable] - variable_mean) / variable_std
# Update probability of anomaly
dataframe_anomaly[column_name_for_anomaly_prob] *= norm.cdf(abs(dataframe_anomaly[predictor_variable + ' z-score']))
# Drop the z-score column
dataframe_anomaly = dataframe_anomaly.drop(predictor_variable + ' z-score', axis=1)
# Join anomaly probability to original dataset
dataframe = dataframe.merge(
dataframe_anomaly[[column_name_for_anomaly_prob]],
how='left',
left_index=True,
right_index=True
)
# Add flag for anomaly if probability is below threshold
dataframe[column_name_for_anomaly_flag] = np.where(
dataframe[column_name_for_anomaly_prob] > anomaly_threshold,
True,
False
)
# Show pairplot of the data
if plot_detection_summary:
# Generate a pairplot of the data
plt.figure(figsize=summary_plot_size)
sns.pairplot(
data=dataframe[list_of_predictor_variables + [column_name_for_anomaly_flag]],
hue=column_name_for_anomaly_flag
)
# Word wrap the axis labels
for ax in plt.gcf().axes:
ax.set_xlabel(textwrap.fill(ax.get_xlabel(), 40))
ax.set_ylabel(textwrap.fill(ax.get_ylabel(), 40))
# Show the plot
plt.show()
# Return the dataframe
return(dataframe)
# # Test the function
# # dataset = ConductAnomalyDetection(
# # dataframe=pd.read_csv("C:/Users/oneno/OneDrive/Documents/Continuing Education/Udemy/Data Mining for Business in Python/5. Dimension Reduction/houses_to_rent.csv"),
# # list_of_predictor_variables=['area', 'floor', 'parking spaces']
# # )
# dataset = pd.read_csv("C:/Users/oneno/OneDrive/Documents/Continuing Education/Udemy/Data Mining for Business in Python/5. Dimension Reduction/houses_to_rent.csv")
# # Mask some values in the dataset
# dataset = dataset.mask(np.random.random(dataset.shape) < .1)
# dataset = ConductAnomalyDetection(
# dataframe=dataset,
# list_of_predictor_variables=['area', 'floor', 'parking spaces']
# )
| KyleProtho/AnalysisToolBox | Python/DataProcessing/ConductAnomalyDetection.py | ConductAnomalyDetection.py | py | 4,936 | python | en | code | 0 | github-code | 13 |
39109969402 | import time, pickle
from asynch_mb.logger import logger
from asynch_mb.workers_multi_machines.base import Worker
import ray
@ray.remote(num_cpus=3)
class WorkerPolicy(Worker):
def __init__(self, model_ps, policy_ps, name, exp_dir, n_itr, stop_cond):
super().__init__(name, exp_dir, n_itr, stop_cond)
self.model_ps = model_ps
self.policy_ps = policy_ps
self.policy = None
self.baseline = None
self.model_sampler = None
self.model_sample_processor = None
def prepare_start(self, env_pickle, policy_pickle, baseline_pickle, dynamics_model_pickle, feed_dict, algo_str, config):
import tensorflow as tf
self.sess = sess = tf.Session(config=config)
with sess.as_default():
""" --------------------- Construct instances -------------------"""
from asynch_mb.samplers.bptt_samplers.bptt_sampler import BPTTSampler
from asynch_mb.samplers.base import SampleProcessor
from asynch_mb.algos.ppo import PPO
from asynch_mb.algos.trpo import TRPO
env = pickle.loads(env_pickle)
policy = pickle.loads(policy_pickle)
baseline = pickle.loads(baseline_pickle)
dynamics_model = pickle.loads(dynamics_model_pickle)
sess.run(tf.initializers.global_variables())
self.policy = policy
self.baseline = baseline
self.model_sampler = BPTTSampler(env=env, policy=policy, dynamics_model=dynamics_model, **feed_dict['model_sampler'])
self.model_sample_processor = SampleProcessor(baseline=baseline, **feed_dict['model_sample_processor'])
if algo_str == 'meppo':
self.algo = PPO(policy=policy, **feed_dict['algo'])
elif algo_str == 'metrpo':
self.algo = TRPO(policy=policy, **feed_dict['algo'])
else:
raise NotImplementedError(f'got algo_str {algo_str}')
""" -------------------- Pull pickled model from model parameter server ---------------- """
dynamics_model = pickle.loads(dynamics_model_pickle)
self.model_sampler.dynamics_model = dynamics_model
if hasattr(self.model_sampler, 'vec_env'):
self.model_sampler.vec_env.dynamics_model = dynamics_model
""" -------------------- Step and Push ------------------- """
self.step()
self.push()
logger.dumpkvs()
return 1
def step(self):
time_step = time.time()
""" -------------------- Sampling --------------------------"""
if self.verbose:
logger.log("Policy is obtaining samples ...")
paths = self.model_sampler.obtain_samples(log=True, log_prefix='Policy-')
""" ----------------- Processing Samples ---------------------"""
if self.verbose:
logger.log("Policy is processing samples ...")
samples_data = self.model_sample_processor.process_samples(
paths,
log='all',
log_prefix='Policy-'
)
if type(paths) is list:
self.log_diagnostics(paths, prefix='Policy-')
else:
self.log_diagnostics(sum(paths.values(), []), prefix='Policy-')
""" ------------------ Policy Update ---------------------"""
if self.verbose:
logger.log("Policy optimization...")
# This needs to take all samples_data so that it can construct graph for meta-optimization.
self.algo.optimize_policy(samples_data, log=True, verbose=False, prefix='Policy-')
self.policy = self.model_sampler.policy
logger.logkv('Policy-TimeStep', time.time() - time_step)
def step_wrapper(self):
self.pull()
self.step()
self.push()
return 1, 1
def pull(self):
time_synch = time.time()
if self.verbose:
logger.log('Policy is synchronizing...')
model_params = ray.get(self.model_ps.pull.remote())
assert isinstance(model_params, dict)
self.model_sampler.dynamics_model.set_shared_params(model_params)
if hasattr(self.model_sampler, 'vec_env'):
self.model_sampler.vec_env.dynamics_model.set_shared_params(model_params)
logger.logkv('Policy-TimePull', time.time() - time_synch)
def push(self):
time_push = time.time()
params = self.policy.get_shared_param_values()
assert params is not None
self.policy_ps.push.remote(params)
logger.logkv('Policy-TimePush', time.time() - time_push)
def log_diagnostics(self, paths, prefix):
self.policy.log_diagnostics(paths, prefix)
self.baseline.log_diagnostics(paths, prefix)
| zzyunzhi/asynch-mb | asynch_mb/workers_multi_machines/metrpo/worker_policy.py | worker_policy.py | py | 4,765 | python | en | code | 12 | github-code | 13 |
39373150746 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 4 12:25:15 2022
@author: admin
"""
import json
import pandas as pd
import numpy as np #We will use often for linear algebra, arras, etc.
import matplotlib.pyplot as plt
#Method 1 to read json data
json_file = open("loan_data_json.json")
data = json.load(json_file)
#Another method for loading json data
with open("loan_data_json.json") as json_file:
data = json.load(json_file)
#Transform to dataframe
LoanData = pd.DataFrame(data)
#Finding unique values for the purpose column
LoanData["purpose"].unique()
#Describe Data
LoanData.describe()
LoanData["int.rate"].describe()
LoanData["fico"].describe()
LoanData["dti"].describe()
#Using exp() to get annual income
Income = np.exp(LoanData["log.annual.inc"])
LoanData["annualincome"] = Income
#Arrays, data structure that stores data, they are ordered, and able to store non unique items. Have to be declared!
#1D Array
Arr = np.array([1,2,3,4])
#0D array
Arr = np.array(43)
#2D Array
Arr = np.array([[1, 2, 3,],[4, 5, 6]])
#Using If statements
A = 40
B = 500
C = 1000
if B > A:
print("B is Greater Than A")
#More Conditions
#And and or is interchangable
if B > A and B < C:
print("B is Greater Than A But Less Than C")
#When Condition Is Not Met?
A = 40
B = 500
C = 20
if B > A and B < C:
print("B is Greater Than A But Less Than C")
else:
print("It is Not")
#Another Condition, Different Metrics
A = 40
B = 0
C = 30
if B > A and B < C:
print("B is Greater Than A But Less Than C")
elif B > A and B > C:
print("B is Greater Than A and C")
else:
print("No Conditions Met")
#Fico Score
Fico = 200
# fico >= 300 and < 400: 'Very Poor'
# fico >= 400 and ficoscore < 600: 'Poor'
# fico >= 601 and ficoscore < 660: 'Fair'
# fico >= 660 and ficoscore < 780: 'Good
# fico >=780: 'Excellent'
if Fico >= 300 and Fico < 400:
ficocat = "Very Poor"
elif Fico >= 400 and Fico < 600:
ficocat = "Poor"
elif Fico >= 601 and Fico < 660:
ficocat = "Fair"
elif Fico >= 660 and Fico < 780:
ficocat = "Good"
elif Fico >= 780:
ficocat = "Excellent"
else:
ficocat = "Uknown"
print(ficocat)
#For Loops
fruits = ["apple", "pear", "banana", "cherry"]
for x in fruits:
print(x)
y = x + " Fruit"
print(y)
#Loops based on Position
for x in range(0,3):
y = fruits[x]
print(y)
#applying for loops to loan data
#using first 10
length = len(LoanData) #finds the lenght of rows and columns
ficocat = []
for x in range(0,length):
category = LoanData["fico"][x]
if category >= 300 and category < 400:
cat = "Very Poor"
elif category >= 400 and category < 600:
cat = "Poor"
elif category >= 601 and category < 660:
cat = "Fair"
elif category >= 660 and category < 700:
cat = "Good"
elif category >= 700:
cat = "Excellent"
else:
cat = "Unknown"
ficocat.append(cat)
ficocat = pd.Series(ficocat)
LoanData["fico.category"] = ficocat
#while loops
I = 1
while I > 10:
print(I)
I = I +1
#df.loca as conditional statements
# df.loc[df[columnname] conddtion, newcolumname] = "value if the condition is met"
#New column for interest rates if rate is > 0.12 then high, else low.
LoanData.loc[LoanData["int.rate"] > 0.12, "int.rate.type"] = "High"
LoanData.loc[LoanData["int.rate"] <= 0.12, "int.rate.type"] = "Low"
#Number of loans/rows by fico.category
catplot = LoanData.groupby(["fico.category"]).size()
catplot.plot.bar() #please plot bar chart from our variable catplot()
plt.show()
#Changing colors
#catplot.plot.bar(color = "green", width = 0.1)
Purpplot = LoanData.groupby(["purpose"]).size()
Purpplot.plot.bar(color = (1,0,1) , width = 0.5) #can use rgb but not hex, only rgb 0-1
plt.show()
#Scatter Plots, you always need an X and a Y
xpoint = LoanData["annualincome"]
ypoint = LoanData["dti"]
plt.scatter(xpoint, ypoint, color ="#4caf50", linewidths = 0.1)
plt.show()
#Writing to csv
LoanData.to_csv("loan_cleaned.csv", index = True) | emerk101/PythonandTableau | Blue_Bank.py | Blue_Bank.py | py | 4,266 | python | en | code | 0 | github-code | 13 |
13947324733 | # python3
'''
BugCam-run.py
BugCam monitors timelapse photography experiments.
It checks a Dropbox folder's pictures, and sends Slack messages if it
finds problems:
- No new picture in specified time period.
- Significantly different brightness from one picture to the next.
TO_DO
- Add apscheduler listener: https://apscheduler.readthedocs.io/en/latest/userguide.html#missed-job-executions-and-coalescing
- Add support to read link and time from config file
- Add browser-based(?) GUI, that displays current image and log.
'''
import argparse
import json
import os
from io import BytesIO
from datetime import datetime
import time
import dropbox
from slackclient import SlackClient
import colors
from PIL import Image, ImageStat
from apscheduler.schedulers.background import BackgroundScheduler
__author__ = 'Natalia Quinones-Olvera'
__email__ = "nquinones@g.harvard.edu"
LAST_FILE_MEM = None
DPX_CLIENT = None
SLACK_CLIENT = None
CONFIG = None
NEWPHOTO_STATUS = None
BRIGHTNESS_STATUS = None
# .............................FUNCTIONS................................
# ...............................init...................................
def main_argparser():
"""
Command line argument parser.
"""
script_path = os.path.split(os.path.realpath(__file__))[0]
default_config = os.path.join(script_path, 'config/config.json')
parser = argparse.ArgumentParser()
parser.add_argument('url',
metavar='<dropbox_url>',
help='Dropbox share url of the folder to monitor.',
type=str)
parser.add_argument('time',
metavar='<time>',
help='''Monitoring time interval in minutes.
(Should be the time interval of timelapse.)''',
type=int)
parser.add_argument('name',
metavar='<name>',
help='''Name of the project being monitored.''')
parser.add_argument('-c', '--config',
metavar='<json>',
help='''Path for .json config file. If not specified,
it will look in in the script's path for
config/config.json''',
default=default_config)
if parser.parse_args().time == 0:
parser.error('Time can\'t be 0 minutes! ¯\\_(ツ)_/¯')
return parser.parse_args()
def init(args):
'''
Initializes Dropbox and Slack clients. Fetches metadata of Dropbox folder.
'''
global CONFIG
global DPX_CLIENT
global SLACK_CLIENT
CONFIG = json.load(open(args.config))
DPX_CLIENT = dropbox.Dropbox(CONFIG['private_tokens']['dropbox'])
SLACK_CLIENT = SlackClient(CONFIG['private_tokens']['slack_bot'])
folder_info = DPX_CLIENT.sharing_get_shared_link_metadata(args.url)
return folder_info
# ...............................general...................................
def get_timestamps(folder_info):
'''
Fetches folder metadata from Dropbox,
returns sorted list of timestamps in photos.
'''
# list files in folder
dir_content = DPX_CLIENT.files_list_folder(folder_info.id,
include_media_info=True,
recursive=False)
time_stamps = []
# fetch timestamps from metadata
for entry in dir_content.entries:
if isinstance(entry, dropbox.files.FileMetadata):
filename = entry.name
date = entry.media_info.get_metadata().time_taken
time_stamps.append((filename, date))
# sort time stamps
timestamps_list = sorted(time_stamps, key=lambda x: x[1], reverse=True)
return timestamps_list
def download_photo(folder_info, file):
'''
Downloads the picture from Dropbox, returns bytes object.
'''
# path in dropbox that files_downloads requires
path = '{0}/{1}'.format(folder_info.path_lower, file)
metadata, response = DPX_CLIENT.files_download(path)
data = response.content
img = BytesIO(data)
return img
def get_brightness(img, mask=None):
'''
Using a bytes object, opens uses PIL to convert to grayscale, get hist,
and compute mean.
'''
# read image as grayscale
bwimg = Image.open(img).convert('L')
# average pixel level for each band (1) in the image
avgpx = ImageStat.Stat(bwimg, mask=mask).mean[0]
return avgpx
# ...............................checks...................................
def check_newphoto(timestamps_list):
'''
Checks if most recent file from sorted timestamps_list is the same as
the last file stored in memory. Updates LAST_FILE_MEM.
'''
global NEWPHOTO_STATUS
global LAST_FILE_MEM
most_recent = timestamps_list[0]
if most_recent == LAST_FILE_MEM:
NEWPHOTO_STATUS = 'absent'
else:
NEWPHOTO_STATUS = 'present'
LAST_FILE_MEM = most_recent
def check_brightness(folder_info, timestamps_list):
'''
Checks if the 2 last most recent pictures have a difference in
mean brigthness and updates BRIGHTNESS STATUS. It uses threshold
defined in json config file.
'''
global BRIGHTNESS_STATUS
th_light = CONFIG['brightness_threshold']['light']
th_dark = CONFIG['brightness_threshold']['dark']
file1 = timestamps_list[0][0]
file2 = timestamps_list[1][0]
img1 = download_photo(folder_info, file1)
img2 = download_photo(folder_info, file2)
brightness_diff = get_brightness(img1) - get_brightness(img2)
if brightness_diff < th_dark:
BRIGHTNESS_STATUS = 'decrease'
elif brightness_diff > th_light:
BRIGHTNESS_STATUS = 'increase'
else:
BRIGHTNESS_STATUS = 'stable'
def checks_response():
'''
Sees status variables (affected by checks) and conditionally performs
actions based on status. (Prints statuses, sends slack messages.)
'''
current_time = datetime.now().isoformat(' ', 'seconds')
# LAST FILE CHECK
# new photo missing
if NEWPHOTO_STATUS is 'absent':
# local response
print('{note} {desc: <25} {time}'.format(note=colors.red('WARNING!'),
desc='No new photo.',
time=current_time))
# slack response
SLACK_CLIENT.api_call("chat.postMessage",
channel="monitor_test",
text=":warning:\t*{name}*: New photo missing\t{time}".format(name=NAME,
time=current_time))
# BRIGHTNESS CHECK
elif NEWPHOTO_STATUS is 'present':
# decrease in brightness
if BRIGHTNESS_STATUS is 'decrease':
# local response
print('{note} {desc: <25} {time}'.format(note=colors.red('WARNING!'),
desc='Brightness decrease.',
time=current_time))
# slack response
SLACK_CLIENT.api_call("chat.postMessage",
channel="monitor_test",
text=":warning:\t*{name}*:\tBrightness decrease\t{time}".format(name=NAME,
time=current_time))
# increase in brightness
elif BRIGHTNESS_STATUS is 'increase':
# local response
print('{note} {desc: <25} {time}'.format(note=colors.red('WARNING!'),
desc='Brightness increase.',
time=current_time))
# slack response
SLACK_CLIENT.api_call("chat.postMessage",
channel="monitor_test",
text=":warning:\t*{name}*:\tBrightness increase\t{time}".format(name=NAME,
time=current_time))
# stable brightness
elif BRIGHTNESS_STATUS is 'stable':
# local response
print('{note} {desc: <25} {time}'.format(note='ok',
desc='',
time=current_time))
# .................................main............................................
def main():
'''
Main function: fetches timestamps list, performs checks on filenames and
brightness, performs actions based on statuses.
'''
# fetch file list
timestamps_list = get_timestamps(FOLDER_INFO)
# do newphoto check
check_newphoto(timestamps_list)
# do brightness check
check_brightness(FOLDER_INFO, timestamps_list)
# respond according to STATUS
checks_response()
# .............................................................................
if __name__ == '__main__':
arguments = main_argparser()
FOLDER_INFO = init(arguments)
NAME = arguments.name
scheduler = BackgroundScheduler(daemon=False)
scheduler.add_job(main, 'interval', minutes=arguments.time)
scheduler.start()
print('# -----------------------------------------------------------')
print('# BugCam Daemon')
print('# -----------------------------------------------------------')
print('# Project: {0}'.format(NAME))
print('# Monitoring folder: {0}, every {1} minutes.'.format(FOLDER_INFO.name,
arguments.time))
print('# Started at: {0}'.format(datetime.now().isoformat(' ', 'seconds')))
print('# (Press Ctrl+{0} to exit)'.format('Break' if os.name == 'nt' else 'C'))
print('#')
SLACK_CLIENT.api_call("chat.postMessage",
channel="monitor_test",
text="*BugCam START:* {name}\t{time}".format(name=NAME,
time=datetime.now().isoformat(' ', 'seconds')))
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
print('#')
print('# Stopped at {0}'.format(datetime.now()))
print('# -----------------------------------------------------------')
SLACK_CLIENT.api_call("chat.postMessage",
channel="monitor_test",
text="*BugCam STOP:* {name}\t{time}".format(name=NAME,
time=datetime.now().isoformat(' ', 'seconds')))
scheduler.shutdown()
| nataquinones/BugCam | BugCam-run.py | BugCam-run.py | py | 11,003 | python | en | code | 1 | github-code | 13 |
24306788924 | import pandas
import pathlib
import numpy
from os import listdir, stat, makedirs
from os.path import isfile, isdir, join, basename, exists
from itertools import chain
from scipy.stats import mannwhitneyu
from scipy.stats import shapiro
MINIMUM_COMMIT_NUMBER = 100
DATA_DIR = 'data'
CACHE_DIR = '__cache__'
HDF5_CACHE = CACHE_DIR + '/{}.h5'
TABLE_FOLDER = 'tables/'
def pretty_smell_name(name):
if name == 'ON_THE_FLY':
return "On the Fly"
if name == 'COMPLICATED_SETUP_SCENARIOS':
return "Complex Scenario"
if name == 'CONDITIONAL_ASSERTION':
return 'Conditional Assertion'
if name == 'EAGER_TEST':
return 'Eager Test'
if name == 'HARDCODED_ENVIRONMENT_CONFIGURATIONS':
return 'Hardcoded Environment'
if name == 'HIDING_TEST_DATA':
return 'Hidden Test Data'
if name == 'LACK_OF_ENCAPSULATION':
return 'Lack of Encapsulation'
if name == 'NOISY_LOGGING':
return 'Noisy Logging'
if name == 'LONG_TEST_STEPS':
return 'Long Test Steps'
if name == 'MIDDLE_MAN':
return 'Middle Man'
if name == 'MISSING_ASSERTION':
return 'Missing Assertion'
if name == 'HARD_CODED_VALUES':
return 'Hardcoded Values'
if name == 'OVER_CHECKING':
return 'Over Checking'
if name == 'SENSITIVE_LOCATOR':
return 'Sensitive Locator'
if name == 'SNEAKY_CHECKING':
return 'Sneaky Checking'
if name == 'STINKY_SYNCHRONIZATION_SYNDROME':
return 'Stinky Synchronization'
if name == 'ARMY_OF_CLONES':
return 'Army of Clones'
if name == 'NARCISSISTIC':
return 'Narcissistic'
if name == 'MISSING_DOCUMENTATION':
return 'Missing Documentation'
if name == 'SAME_DOCUMENTATION':
return 'Same Documentation'
raise RuntimeError('Invalid metric name: ' + name)
def pretty_name(metric, text):
name = remove_suffix(remove_prefix(metric, 'number_'), '_value') + '-' + text
return name.replace('_', '-').replace(' ', '-').lower()
def remove_suffix(string, suffix):
if string.endswith(suffix):
offset = len(suffix)
string = string[:-offset]
return string
def remove_prefix(string, prefix):
if string.startswith(prefix):
offset = len(prefix)
string = string[offset:]
return string
def get_files_list(parent, suffix):
if not isdir(parent):
return []
return [join(parent, f) for f in listdir(parent) if isfile(join(parent, f)) and f.endswith(suffix)]
def get_project_list():
projects = []
for project_file in get_files_list(DATA_DIR, '-projects.csv'):
if stat(project_file).st_size == 0:
continue
if len(open(project_file).readlines()) < MINIMUM_COMMIT_NUMBER + 1:
continue
projects.append(remove_suffix(basename(project_file), '-projects.csv'))
return projects
def load_projects():
cache_file = HDF5_CACHE.format('projects')
if not isfile(cache_file):
print('generating cache for ' + cache_file)
_generate_cache_folder()
df = pandas.DataFrame()
for project in get_project_list():
current = pandas.read_csv(join('data', project + '-projects.csv'))
current['project'] = project
current['origin'] = 'industrial' if project == 'bgl' else 'open-source'
current['date'] = current['date'].astype('datetime64[ns]')
df = df.append(current)
_store_file_in_cache(df, cache_file)
return _load_cache(cache_file)
def load_smells():
cache_file = HDF5_CACHE.format('smells')
if not isfile(cache_file):
print('generating cache for ' + cache_file)
_generate_cache_folder()
df = pandas.DataFrame()
for project in get_project_list():
current = pandas.read_csv(join('data', project + '-smells.csv'))
current['project'] = project
current['origin'] = 'industrial' if project == 'bgl' else 'open-source'
current['version'] = current['version'].astype('datetime64[ns]')
current['smell_name'] = current['smell_name'].apply(lambda x: pretty_smell_name(x))
df = df.append(current)
_store_file_in_cache(df, cache_file)
return _load_cache(cache_file)
def get_quantile_values(df, column, number_quantiles=1000):
indexes = list(range(1, number_quantiles + 1, 1))
quantiles_df = pandas.DataFrame(numpy.nan, index=indexes, columns=['Quantile', 'Value'])
for i in indexes:
quantile = i / number_quantiles
value = df[column].quantile(q=quantile)
quantiles_df.at[i, 'Quantile'] = quantile
quantiles_df.at[i, 'Value'] = value
return quantiles_df
def load_step_sequences():
df = pandas.read_csv(join('data', 'total-test-statistics.csv'))
step_sequences = df['step_sequences_sizes'].apply(lambda x: _string_to_array(x))
values = list(filter(lambda x: x > 0, chain.from_iterable(step_sequences.values)))
return pandas.DataFrame(values, columns=['Step Sizes'])
def save_table(data, name, groups=None, value=None, labels=None):
_generate_folder(pathlib.Path(TABLE_FOLDER).resolve())
if groups and value:
table = data.groupby(groups)[value].describe().to_latex()
elif labels:
labels.append(value)
table = data[labels].to_latex()
else:
table = data.to_latex()
with open(TABLE_FOLDER + name + ".tex", "w") as text_file:
text_file.write(table)
def _store_file_in_cache(df, cache_file):
df.to_hdf(cache_file, key='data', mode='w')
def _load_cache(filename):
return pandas.read_hdf(filename, 'data')
def _generate_cache_folder():
_generate_folder(pathlib.Path(HDF5_CACHE.format('', '')).parent.resolve())
def _generate_folder(folder):
if not exists(folder):
makedirs(folder)
def _string_to_array(string):
return [int(e) for e in string.replace('[', '').replace(']', '').split(',')]
def normal_distribution(series, name, alpha=0.05):
stat, p = shapiro(series)
def compare_distribution(data, name, group, series, criteria, alpha=0.05):
total = data.groupby([group]).apply(compute_mannwhitneyu, series, criteria, alpha).reset_index()
save_table(total, name)
def compute_mannwhitneyu(x, series, criteria, alpha):
category = x[series].unique()
try:
stat, p = mannwhitneyu(x.loc[x[series] == category[0]][criteria], x.loc[x[series] == category[1]][criteria])
results = {}
results['statistics'] = stat
results['p-value'] = p
results['reject'] = p < alpha
except:
results = {}
results['statistics'] = numpy.nan
results['p-value'] = numpy.nan
results['reject'] = False
return pandas.Series(results, index=['statistics', 'p-value', 'reject'])
| kabinja/suit-smells-replication-package | data.py | data.py | py | 6,924 | python | en | code | 0 | github-code | 13 |
2345871027 | # 문자열 = 'hello world, my name is python'
# 정수 = 314
# 실수 = 3.14
# for i in 문자열:
# print(i, end=' ')
# i = 0
# while i < len(문자열):
# print(문자열[i], end=' ')
# i += 1
# 문제 : 문자열에서 알파벳 o 의 갯수를 알려주세요
문자열 = 'hello world, my name is python'
a = 0
for i in 문자열:
if i == 'o':
a += 1
print(a)
month = int(input('1~12월중에서 아무 월이나 입력하세요>>'))
for i in range(1,13):
if i == month:
continue
print(i,'월',end=(' '))
월 = int(input('1~12월중에서 아무 월이나 입력하세요>>'))
for i in range(1,월):
print(i,'월',end=(' '))
print()
break_month = int(input('몇 월부터 스킵할까요??>>'))
for i in range(1,13):
if i == break_month:
break
print(i,'월',end=(' '))
for i in range(1,10):
for j in range(2,10):
print(j,'x',i,'=',i*j, end='\t')
#str,int,float,list,tuple,dict,set
# 리스트
# 지하철 3칸, [10,15,12]
subway1 = 10
subway2 = 15
subway3 = 12
print()
# 리스트(list) : 같은 주제의 변수들을 묶음으로 보관 (전체 출력이 가능)
리스트 = [10,15,12,11,22,33,44,55,66]
for i in 리스트:
print(i, '명',end=(' ')) | mymymymyyy/python_korea | day03_5.py | day03_5.py | py | 1,249 | python | ko | code | 0 | github-code | 13 |
14204585379 | import time
import os
import requests
import datetime
from bs4 import BeautifulSoup as Soup
from django.core.management.base import BaseCommand
from django.db import IntegrityError, OperationalError
import asyncio
import aiohttp
import logging
from server.utils import getRange
from server.models import Faculties, Groups, Classes
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
logging.basicConfig(level=logging.DEBUG, filename='parser_log.log', filemode='w',
format="%(asctime)s %(levelname)s %(message)s")
logger = logging.getLogger()
class Parse:
def __init__(self):
self.session = requests.session()
self.session.headers = {
'User-Agent': 'Chrome/107.0.0.0 Safari/537.36',
'Accept-Language': 'ru',
}
async def get_page_data(self, session, link, group, start_parse):
async with session.get(url=link, headers=self.session.headers) as response:
response_text = await response.text() # получаем сухой html-код
html = Soup(response_text, 'lxml').find(
class_='vt244b').contents # СТАБИЛЬНО. ищем блок с расписанием, и обращаемся к его потомкам. Если их нет, то html = []
for week in html: # если есть хотя бы одна пара, то цикл запустится, если поле пустое, то ничего не произойдет
time = week.find(
class_='vt283').parent.text # СТАБИЛЬНО. получения тега, содержащего время. Далее разбиваем на два объекта date с началом и концом пары
end = datetime.datetime.strptime(time[-5::], "%H:%M").time()
start = datetime.datetime.strptime(time[-10:-5], "%H:%M").time()
for day in week.find_all(class_='vt258'): # СТАБИЛЬНО.
try:
date_offset = int(day.parent.get('class')[-1][
-1]) - 1 # СТАБИЛЬНО. ['vt239', 'rasp-day', 'rasp-day1'] это особенность bs4.
datepush = start_parse + datetime.timedelta(
days=date_offset) # дата понедельника + номер текущего дня
name = day.find(class_="vt240").text.strip() # +-СТАБИЛЬНО.
type = day.find(
class_="vt243").text.strip() # +-СТАБИЛЬНО - выбирается из списка. Врядли можно не выбрать, хотя....
try:
teachers = day.find(class_="teacher").text.strip().split(' ') # НЕСТАБИЛЬНО.
teachers = (lambda a, n=2: [' '.join(a[i:i + n]).replace(";", '') for i in range(0, len(a), n)])(
teachers) # превращаем в массив учителей
except AttributeError:
teachers = []
if name != 'Военная подготовка' and name != 'Строевая подготовка':
logging.info(f'{group.group_name, str(datepush)} ERROR, got []:{link}')
try:
building = None
place = day.find(class_="vt242").text.strip() # НЕСТАБИЛЬНО.
place = place.split(':')[1].strip().split(';')
aud = place[0]
if len(place) != 1:
try:
building = place[1]
except IndexError:
logging.info(f'{group.group_name, str(datepush)} NO BUILDING:{link}')
except (AttributeError, IndexError) as error:
if week.find(class_='vt283').text == 'ФЗ':
aud = 'Спортивные площадки'
building = None
if name != 'Элективные дисциплины по физической культуре и спорту' and name != 'Физическая культура и спорт':
logging.info(f'{group.group_name, str(datepush)} GOT FZ:{link}')
else:
aud = None
building = None
if name != 'Военная подготовка' and name != 'Строевая подготовка':
logging.info(f'{group.group_name, str(datepush)} ERROR, not FZ:{link}')
try:
obj, created = Classes.objects.get_or_create(
class_name=name,
class_audience=aud,
class_building=building,
class_type=type,
class_date=datepush,
class_start=start,
class_end=end,
class_teachers=teachers,
group_id=group,
)
# if created:
# logging.info(f'{"Создана запись с id", obj.id, ". Дата: ", str(datepush)}')
except UnboundLocalError:
logging.exception('DataError')
logging.error(f'{group.group_name, str(datepush), day}')
# except OperationalError:
# logging.error(f'{"OperationalError"}')
except AttributeError:
print(datepush, group.group_name)
logging.exception('AttributeError')
logging.info(f'{group.group_name, str(datepush), day}')
async def gather_data(self, pfrom, pto, start_parse, end_parse):
# print(pfrom, pto, start_parse, end_parse)
async with aiohttp.ClientSession() as session:
tasks = []
general_url = 'https://www.sut.ru/studentu/raspisanie/raspisanie-zanyatiy-studentov-ochnoy-i-vecherney-form-obucheniya'
for i in range(pfrom, pto + 1): # Мы собираем все Группы от и до какого-то id. ЕСЛИ УКАЗАТЬ ДВА ОДИНАКОВЫХ ЗНАЧЕНИЯ, ТО БУДЕТ ПАРСИНГ ОДНОЙ КОНКРЕТНОЙ ГРУППЫ
group = Groups.objects.get(pk=i)
while_start_parse = start_parse
while end_parse >= while_start_parse:
url = general_url + group.group_link + '&date=' + str(while_start_parse)
task = asyncio.create_task(self.get_page_data(session, url, group, while_start_parse))
tasks.append(task)
while_start_parse += datetime.timedelta(days=7)
await asyncio.gather(*tasks)
def main(self, gfrom, gto, tfrom, tto):
print("FROM MAIN FUNCTION", gfrom, gto, tfrom, tto)
getRange.getRange(gfrom, gto, tfrom, tto, self.gather_data)
def groups(self):
general_url = 'https://www.sut.ru/studentu/raspisanie/raspisanie-zanyatiy-studentov-ochnoy-i-vecherney-form-obucheniya'
html = self.session.get(general_url).text
soup = Soup(html, 'lxml')
for faculty in soup.find_all(class_='vt252'):
try:
Faculties(
faculty_name=faculty.find(class_='vt253').text.strip()
).save()
except IntegrityError:
pass
# fk = Faculties.objects.get(faculty_name=faculty.find(class_='vt253').text.strip()).pk
for group in faculty.find_all(class_='vt256'):
try:
Groups(
group_name=group.get('data-nm'),
group_faculty=Faculties.objects.get(faculty_name=faculty.find(class_='vt253').text.strip()),
group_link=group.get('href'),
).save()
except IntegrityError:
pass
def classes(self):
general_url = 'https://www.sut.ru/studentu/raspisanie/raspisanie-zanyatiy-studentov-ochnoy-i-vecherney-form-obucheniya'
volume = len(Groups.objects.all())
for i in range(1, volume + 1):
group = Groups.objects.get(pk=i)
group_url = self.session.get(general_url + group.group_link + '&date=' + str(group.end_parse)).text
html = Soup(group_url, 'lxml').find(class_='vt244b').contents
while len(html) != 0:
for week in html:
time = week.find(
class_='vt283').parent.text # получения тега, содержащего время. Далее разбиваем на два объекта date с началом и концом
end = datetime.datetime.strptime(time[-5::], "%H:%M").time()
start = datetime.datetime.strptime(time[-10:-5], "%H:%M").time()
for day in week.find_all(class_='vt258'):
date_offset = int(day.parent.get('class')[-1][
-1]) - 1 # этот цикл нужен, чтобы иметь возможность добавить два занятия в одну пару
# try:
name = day.find(class_="vt240").text.strip()
type = day.find(class_="vt243").text.strip()
teachers = day.find(class_="teacher").text.strip().split(' ')
teachers = (lambda a, n=2: [' '.join(a[i:i + n]) for i in range(0, len(a), n)])(
teachers) # превращаем в массив учителей
datepush = group.end_parse + datetime.timedelta(
days=date_offset) # дата понедельника + номер текущего дня
place = day.find(class_="vt242").text.strip()
place = place.split(':')[1].strip().split(';')
aud = place[0]
building = None
if (len(place) == 2):
if '/' in place[1]:
building = place[1][-1]
else:
building = 'k'
Classes(
class_name=name,
class_audience=aud,
class_building=building,
class_type=type,
class_date=datepush,
class_start=start,
class_end=end,
class_teachers=teachers,
group_id=group,
).save()
# except AttributeError:
# pass
group.end_parse += datetime.timedelta(days=7)
group.save(update_fields=["end_parse"])
# Groups.objects.get(group).update(end_parse=F("end_parse") + datetime.timedelta(days=7))
group_url = self.session.get(general_url + group.group_link + '&date=' + str(group.end_parse)).text
html = Soup(group_url, 'lxml').find(class_='vt244b').contents
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('obj', type=str, help='Choose the object of parsing: groups, classes')
parser.add_argument('-gfrom', type=int, default=1)
parser.add_argument('-gto', type=int)
parser.add_argument('-tfrom', type=str)
parser.add_argument('-tto', type=str)
parser.add_argument('-group', type=str)
parser.add_argument('-time', type=str)
def handle(self, *args, **options):
gfrom = options['gfrom']
gto = options['gto']
tfrom = options['tfrom']
tto = options['tto']
group = options['group']
timeq = options['time']
obj = options['obj']
if obj == 'groups':
Parse().groups()
elif obj == 'classes':
starttime = time.time()
if group:
if group == "all":
gto = 'all'
else: gfrom, gto = group, group
if timeq:
if timeq == 'all':
if (datetime.datetime.now().month <= 7):
tfrom = datetime.date(datetime.datetime.now().year, 2, 1)
tto = datetime.date(datetime.datetime.now().year, 8, 1)
else:
tfrom = datetime.date(datetime.datetime.now().year, 9, 1)
tto = datetime.date(datetime.datetime.now().year, 2, 1)
else:
tfrom, tto = timeq, timeq
tto = datetime.datetime.strptime(tto,
'%Y-%m-%d').date() # Выбираем либо дефолтное (конец года?), либо парсим то, что указано пользователем
tfrom = datetime.datetime.strptime(tfrom, '%Y-%m-%d').date()
tfrom = datetime.datetime.fromisocalendar(tfrom.year, tfrom.isocalendar().week,
1).date() # выбор понедельника для выбранной недели начала парсинга
else:
tto = datetime.datetime.strptime(tto,
'%Y-%m-%d').date() # Выбираем либо дефолтное (конец года?), либо парсим то, что указано пользователем
tfrom = datetime.datetime.strptime(tfrom, '%Y-%m-%d').date()
tfrom = datetime.datetime.fromisocalendar(tfrom.year, tfrom.isocalendar().week,
1).date() # выбор понедельника для выбранной недели начала парсинга
Parse().main(gfrom, gto, tfrom, tto)
print(time.time() - starttime)
else:
self.stdout.write('wrong arguments')
"""
Какие нужны функции?
Пока остановимся на двух моделях:
>> Парсинг всех групп. Одноразовая операция
> Получить суп (в этом случае всего один раз) - для этого есть
библиотека. нет смысла строить велосипед
>
>
>> Парсинг расписания у всех групп по порядку
>
>
>
"""
| rikkaar/Bonch-back | server/management/commands/parse.py | parse.py | py | 15,423 | python | ru | code | 0 | github-code | 13 |
7023627884 | from Help import *
from FileHelp import *
#################################################
# specific functions #
#################################################
def StoreData_toPath(Data : dict, filename : str, path="", sort_alphabetic=False):
if path:
filename = path_join(path, filename)
if sort_alphabetic:
Data = sortData_Alphabetic(Data)
store_mat(Data, filename)
def StoreData(Data : dict, datestring : str, logname : str, postfix="", sort_alphabetic=False):
"""
Stores given Data
:param Data: Data to Store
:param datestring:
:param lognum: Lognum or Filename to store to
:param postfix: Postfix to append like LogXX_POSTFIX
:param sort_alphabetic:
"""
filename = logname
if postfix:
filename += "_" + postfix
filepath = path_join(getLogfilefolder(datestring), filename)
if sort_alphabetic:
Data = sortData_Alphabetic(Data)
store_mat(Data, filepath)
def sortData_Alphabetic(Data : dict) -> dict:
from TraceHelp import Group_Exists, Group_sortTraces
def addgroup(GROUP):
if (Group_Exists(Data, GROUP)):
nData[GROUP] = Data[GROUP]
Group_sortTraces(nData, GROUP)
else:
from TraceHelp import Group_Create_Tstartend
Group_Create_Tstartend(nData, GROUP, 1, t_end=1)
nData = {}
for GROUP in sorted(list(Data.keys())):
addgroup(GROUP)
#nData.update(Data)
return nData
def LoadData_byPath(path : str) -> dict:
return load_mat(path)
def getLogfilefolder(datestring : str):
return path_join(PATH.DIR_LOGFILES, datestring)
def getAllOsciLogfiles(datestring : str):
return get_files_in_Folder(getLogfilefolder(datestring), ".csv", withpath=False)
def LoadData(datestring : str, logname : str) -> dict:
"""
Load Data from datestring
:param datestring:
:param logname: filename of log without extension
:param RAW: if true unprocessed logfile is loaded.
:return: Data
"""
filename = path_join(getLogfilefolder(datestring), logname)
if not filename.endswith(".mat"):
filename += ".mat"
if not file_exists(filename):
print("LoadData: Logfile '{}' not found".format(filename))
return {}
return LoadData_byPath(filename)
def OpenDiadem_Data(Data, tdv_path=None, block=True, name="tmp"):
"""
Opens Diadem with given Data
:param Data:
:param tdv_path:
:return:
"""
path = path_join(PATH.DIR_TMP, "diadem_{}.mat".format(name))
StoreData_toPath(Data, path, sort_alphabetic=True)
OpenDiadem(path, tdv_path, block)
def OpenDiadem(logfile_path, tdv_path=None, block=True):
# Open in Diadem
if not isAbsolutPath(logfile_path):
logfile_path = getAbsolutePath(logfile_path)
if tdv_path and not isAbsolutPath(tdv_path):
tdv_path = getAbsolutePath(tdv_path)
args = [PATH.PATH_DIADEM, logfile_path]
if tdv_path:
args.append(tdv_path)
import subprocess
if block:
subprocess.call(args)
else:
subprocess.Popen(args)
def OpenExplorer(path):
if not isAbsolutPath(path):
path = getAbsolutePath(path)
import subprocess
subprocess.call("explorer {}".format(path), shell=True)
def DEBUG_Store(Data, tag=None):
if tag == None:
filename = FILENAME_TMP_JSON
else:
filename = FILENAME_TMP_JSON_TAG.format(tag)
store_json(Data, filename, PATH.DIR_TMP)
def DEBUG_Load(tag=None):
if tag == None:
filename = FILENAME_TMP_JSON
else:
filename = FILENAME_TMP_JSON_TAG.format(tag)
return load_json(filename, PATH.DIR_TMP)
def DEBUG_StoreData(Data, tag=None):
if tag == None:
filename = FILENAME_TMP_DATA
else:
filename = FILENAME_TMP_DATA_TAG.format(tag)
#save_json(Data, filename, PATH.DIR_TMP)
path = path_join(PATH.DIR_TMP, filename)
StoreData_toPath(Data, path, sort_alphabetic=True)
def DEBUG_LoadData(tag=None):
if tag == None:
filename = FILENAME_TMP_DATA
else:
filename = FILENAME_TMP_DATA_TAG.format(tag)
#return load_json(filename, PATH.DIR_TMP)
path = path_join(PATH.DIR_TMP, filename)
return LoadData_byPath(path)
if __name__ == '__main__':
pass | savejeff/ProjektPraktikum_TUD_AUT_QuadTorque | Python/FileModule.py | FileModule.py | py | 3,931 | python | en | code | 0 | github-code | 13 |
38668182440 | from constants import *
import techniques_app
import techniques_test
import copy
class Engine:
def __init__(self, board, test):
self.board = board
if not test:
self.solved = self.solve_app()
self.board = self.solved[0]
self.states = self.solved[1]
elif test:
self.solved = self.solve_test()
self.board = self.solved
def solve_app(self):
able_to_solve = True
board_states = [copy.deepcopy(self.board.board)]
while(not self.board.is_complete() and able_to_solve):
for tech in TECHNIQUES:
technique = getattr(techniques_app, tech)
old_board = copy.deepcopy(self.board.board)
coloring = technique(self.board.board)
if(old_board != self.board.board):
board_states.append([tech, copy.deepcopy(self.board.board), coloring])
break
if(tech == TECHNIQUES[-1] and old_board == self.board.board):
able_to_solve = False
break
return (self.board, board_states)
def solve_test(self):
able_to_solve = True
while(not self.board.is_complete() and able_to_solve):
for tech in TECHNIQUES:
technique = getattr(techniques_test, tech)
old_board = copy.deepcopy(self.board.board)
technique(self.board.board)
if(old_board != self.board.board):
break
if(tech == TECHNIQUES[-1] and old_board == self.board.board):
able_to_solve = False
break
return self.board
def print(self):
self.board.print()
| kamil20018/SudokuSolver | engine.py | engine.py | py | 1,763 | python | en | code | 1 | github-code | 13 |
37677087015 | import datetime
from openpyxl import load_workbook
from openpyxl.styles import NamedStyle, Alignment, Border, Side
# Playing with data in work schedule .xlsx
wb = load_workbook('shift_schedule1.xlsx')
ws1 = wb['Monday']
ws1['C2'] = datetime.date.today()
try:
date_style1 = NamedStyle(name='datetime', number_format='DD.MM.YYYY')
ws1['C2'].style = date_style1
except ValueError:
pass
ws1['C2'].border = Border(bottom=Side(border_style="thin", color="000000"))
ws1['C2'].alignment = Alignment(horizontal='left')
ws1['C3'] = 'Sale'
ws1["M6"].value = '=COUNTIF(Monday[[#This Row],[7:00 AM]:[3:00 PM]],"*")'
ws1['B6'] = 'Michael'
ws1['B7'] = 'Bob'
ws1['B8'] = 'Miranda'
ws1['B9'] = 'Andrew'
ws1['B10'] = 'Jessica'
for row in range(2, ws1.max_row):
ws1['C6'].value = 'manager'
ws1['D6'].value = 'manager'
ws1['E6'].value = 'manager'
ws1['F6'].value = 'manager'
ws1['G6'].value = 'manager'
ws1['H6'].value = 'manager'
ws1['I6'].value = 'dinner'
ws1['I8'].value = 'dinner'
ws1['I9'].value = 'dinner'
ws6 = wb['Saturday']
ws6['C2'] = datetime.date.today() + datetime.timedelta(days=5)
try:
date_style6 = NamedStyle(name='datetime6', number_format='DD.MM.YYYY')
ws6['C2'].style = date_style6
except ValueError:
pass
ws6['C2'].border = Border(bottom=Side(border_style="thin", color="000000"))
ws6['C2'].alignment = Alignment(horizontal='left')
ws6['C3'] = 'Administration'
ws6['M6'].value = '=COUNTIF(Saturday[[#This Row],[7:00 AM]:[3:00 PM]],"*")'
if __name__ == '__main__':
wb.save('shift_schedule1.xlsx')
| AndreiRekaev/Python-practice | 100daysofcode/70-72 openpyxl/schedule_automation.py | schedule_automation.py | py | 1,572 | python | en | code | 0 | github-code | 13 |
16879822667 | from locale import MON_12
def rotate_matrix(m):
n = len(m)
for layer in range(n // 2):
start, end = layer, n - layer - 1
for i in range(start, end):
temp = m[layer][i]
m[layer][i] = m[n - i - 1][layer]
m[n - i - 1][layer] = m[n - layer - 1][n - i - 1]
m[n - layer -1][n - i - 1] = m[i][n - layer - 1]
m[i][n - layer - 1] = temp
return m
def rotate_matrix_swap(m):
# n = len(m)
def transpose(m):
n = len(m)
for row in range(n - 1):
for col in range(row + 1, n):
m[row][col], m[col][row] = m[col][row], m[row][col]
return m
def reflect(m):
n = len(m)
for r in range(n):
for c in range(n // 2):
m[r][c], m[r][n - c - 1] = m[r][n - c - 1], m[r][c]
return m
tran_m = transpose(m)
ref_m = reflect(tran_m)
return ref_m
if __name__ == '__main__':
test_cases = [
([[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[7, 4, 1], [8, 5, 2], [9, 6, 3]]),
(
[
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25],
],
[
[21, 16, 11, 6, 1],
[22, 17, 12, 7, 2],
[23, 18, 13, 8, 3],
[24, 19, 14, 9, 4],
[25, 20, 15, 10, 5],
],
),
]
# for m1, expected in test_cases:
# r1 = rotate_matrix(m1)
# # print(r1)
# print(r1 == expected)
for m2, expected in test_cases:
r2 = rotate_matrix_swap(m2)
print(r2)
print(r2 == expected)
| melanietai/leetcode-practice | matrix/rotate_matrix.py | rotate_matrix.py | py | 1,784 | python | en | code | 0 | github-code | 13 |
4506651180 | """Test cases for the models module."""
import json
from typing import Any
import pytest
from haversine import Unit
from haversine import haversine
from helium_api_wrapper import DataObjects as DataObjects
from pytest_mock import MockFixture
from helium_positioning_api.DataObjects import Prediction
from helium_positioning_api.nearest_neighbor import nearest_neighbor
@pytest.fixture
def mock_integration() -> Any:
"""Mock integration for testing.
:return: integration Event
"""
with open("tests/data/integration_events.json") as file:
integrations = json.load(file)
return integrations[0]
@pytest.fixture
def mock_hotspots() -> Any:
"""Mock hotspots.
:return: List of hotspots
:rtype: Any
"""
with open("tests/data/hotspots.json") as file:
hotspot = json.load(file)
return hotspot
def test_nearest_neighbor_model(
module_mocker: MockFixture, mock_integration: Any, mock_hotspots: Any
) -> None:
"""Test for the nearest neighbor model.
:param mocker: Mocker
:param mock_integration: Event
"""
module_mocker.patch(
"helium_positioning_api.Models.get_last_integration",
return_value=transform_integration(mock_integration),
autospec=True,
)
prediction = nearest_neighbor(uuid="92f23793-6647-40aa-b255-fa1d4baec75d")
print(prediction)
assert prediction == Prediction(
uuid="92f23793-6647-40aa-b255-fa1d4baec75d",
lat=37.784056617819544,
lng=-122.39186733984285,
timestamp=1632353389723,
)
assert (
haversine(
(prediction.lat, prediction.lng),
(37.784056617819544, -122.39186733984285),
unit=Unit.KILOMETERS,
)
== 0
)
# def test_midpoint_model() -> None:
# """Test for the midpoint model."""
# prediction = Midpoint().predict(uuid="uuid")
#
# assert (
# haversine(
# prediction, (47.47771443776213, 12.053189171302527), unit=Unit.KILOMETERS
# )
# < 14
# )
def transform_integration(event: dict) -> DataObjects.IntegrationEvent:
"""Transform integration."""
hotspots = []
for hotspot in event["data"]["req"]["body"]["hotspots"]:
hotspots.append(DataObjects.IntegrationHotspot(**hotspot))
event["hotspots"] = hotspots
return DataObjects.IntegrationEvent(**event)
| emergotechnologies/helium-positioning-api | tests/test_models.py | test_models.py | py | 2,385 | python | en | code | 7 | github-code | 13 |
36221409657 | #!/usr/bin/env python3
import csv
import sys
from datetime import datetime
with open(sys.argv[1], encoding='utf-8-sig') as csvfile:
reader=csv.reader(csvfile)
headers_line=next(reader)
headers=dict()
for i in range(len(headers_line)):
headers[headers_line[i]]=i
print('Date,Payee,Category,Memo,Outflow,Inflow')
for row in reader:
transaction_datetime=datetime.strptime(row[headers['Transaction Date']], '%b %d, %Y %I:%M:%S %p')
date=transaction_datetime.strftime('%m/%d/%Y')
payee='-'.join(row[headers['Location']].split('-')[0:2])
category=''
memo=row[headers['Location']]
outflow=abs(float(row[headers['Amount']].replace('$', '')))
inflow=''
print(date, payee, category, memo, outflow, inflow, sep=',')
| Swandog/parse_ipass_csv | bin/parse.py | parse.py | py | 808 | python | en | code | 0 | github-code | 13 |
11583193939 | """Top level configurations for the app."""
import os
_BASEDIR = os.path.abspath(os.path.dirname(__file__))
# Autowaiter settings
BASE_URL = '127.0.0.1:5000'
# Filesystem
def guarantee_existence(dirs):
'''
For each directory in the given list, create it if it does not already exist
'''
for dirname in dirs:
if not os.path.exists(dirname):
os.makedirs(dirname)
DATA_DIR = os.path.join(_BASEDIR, 'data')
PREF_DIR = os.path.join(DATA_DIR, 'preferences') # User preferences
MENU_DIR = os.path.join(DATA_DIR, 'menus') # Cached menus
guarantee_existence([DATA_DIR, PREF_DIR, MENU_DIR])
LOG_FILE = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'auto_waiter.log')
DEBUG = True
# ADMINS = frozenset(['youremail@yourdomain.com'])
SECRET_KEY = 'This string will be replaced with a proper key in production.'
DATABASE_CONNECT_OPTIONS = {}
THREADS_PER_PAGE = 6
WTF_CSRF_ENABLED = True
WTF_CSRF_SECRET_KEY = "somethingimpossibletoguess"
# Waiter.com settings
VCS_URL = 'https://www.waiter.com/purestorage-dinner'
NUM_STORES = 4
HAS_SALAD_SPOT = True
NUM_DAYS = 3 # Short week because of the holiday. Change back to 4 next week
API_URL = 'https://www.waiter.com/api/v1'
| davidzheng814/auto-waiter | config.py | config.py | py | 1,225 | python | en | code | 0 | github-code | 13 |
30335891410 | import pandas as pd
from sklearn.model_selection import train_test_split
import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
print('Cargando archivo csv...')
data = pd.read_csv('./data/google_play_store_apps_reviews_training.csv')
data.head()
def preprocess_data(data):
# Remove package name as it's not relevant
data = data.drop('package_name', axis=1)
# Convert text to lowercase
data['review'] = data['review'].str.strip().str.lower()
return data
print('Procesando datos...')
data = preprocess_data(data)
# Split into training and testing data
x = data['review']
y = data['polarity']
x, x_test, y, y_test = train_test_split(x,y, stratify=y, test_size=0.25, random_state=42)
# Vectorize text reviews to numbers
vec = CountVectorizer(stop_words='english')
x = vec.fit_transform(x).toarray()
x_test = vec.transform(x_test).toarray()
print('Genero el modelo...')
#Generate model
model = MultinomialNB()
model.fit(x, y)
# Save model
output=joblib.dump(model, 'bayes_model.pkl')
print('Modelo descargado',output) | damiancipolat/Python_emotional_analysis | app_reviews_english/model_generator.py | model_generator.py | py | 1,109 | python | en | code | 0 | github-code | 13 |
47006020464 | def aroonlow2(a1='Close',val1=None,df=None):
ar,df[ar]='Close_ah',None
e1=val1+1; s1=df[a1].shape[0]-1
for i in range(s1,e1,-1):
lis=df[a1][i-val1:i+1]
m1=max(lis)
for counter, value in enumerate(lis):
if (value==m1) :pos=counter
df[ar][i]=100*pos/val1
ya=(df[a1][200:s1]).max().round(0).astype(int)
yi=(df[a1][200:s1]).min().round(0).astype(int)
d1=(ya-yi)/100
df[ar+'_2']=yi+(d1*df[ar]/100)
return df[ar+'_2'] | aa3110/python-trading | biblio/trade/tmp/_t_aroonhigh2.py | _t_aroonhigh2.py | py | 481 | python | en | code | 1 | github-code | 13 |
29232286627 | from . import Customer
from . import Product
from . import Address
from . import Payment
from . import Quote
from . import Order
from .PaymentMethod import Eft as EftPayment
from .Config import Config
class Factory:
def __init__(self, dydb=None):
self.__dydb = dydb
def order_from_quote(self, quote: Quote, user_id):
order = Order.Order(dydb=self.__dydb, cognito_id=user_id)
order.init_from_quote(quote)
return order
def new_payment(self, quote: Quote.Quote):
print("Quote Currency : ", quote.currency)
print("Quote Currency Rate: ", quote.currency_rate)
# quote.currency
pmt = Payment.Payment(dydb=self.__dydb,
amount=quote.total,
shipping_amount=quote.shipping_amount,
order_number=quote.order_number,
rate=quote.currency_rate
)
pmt.method = quote.payment_method
if quote.payment_method == "banktransfer":
eft = EftPayment.Eft()
pmt.action = eft
return pmt
def new_customer(self):
return self.make("Customer") # type: Customer
def load_customer(self, pk):
customer = Customer.Customer(dydb=self.__dydb)
customer.load(pk)
return customer
def new_product(self):
return self.make("Product") # type: Product
def new_billing_address(self, customer: Customer):
address = self.make("BillingAddress") # type: Address
address.sk = "ADDRESS#BILLING#{}".format(address.pk)
address.pk = customer.pk
address.customer_pk = customer.pk
return address
def new_shipping_address(self, customer: Customer):
address = self.make("ShippingAddress") # type: Address
address.sk = "ADDRESS#SHIPPING#{}".format(address.pk)
address.pk = customer.pk
address.customer_pk = customer.pk
return address
def make(self, model):
if model == "Product":
return Product.Product(dydb=self.__dydb)
if model == "Customer":
return Customer.Customer(dydb=self.__dydb)
if model == "Address":
address = Address.Address(dydb=self.__dydb)
address.pk = address.new_guid()
address.sk = "ADDRESS"
return address
if model == "ShippingAddress":
address = Address.Address(dydb=self.__dydb)
address.pk = address.new_guid()
address.type = "shipping"
address.sk = "ADDRESS#SHIPPING"
return address
if model == "BillingAddress":
address = Address.Address(dydb=self.__dydb)
address.pk = address.new_guid()
address.type = "billing"
address.sk = "ADDRESS#BILLING"
return address
| Venus713/RPS | libs/models/mpc/Factory.py | Factory.py | py | 2,893 | python | en | code | 0 | github-code | 13 |
7092534837 | from typing import List
class Solution:
def minimumOperations(self, nums: List[int]) -> int:
w=[0,0,0]
for v in nums:
w=[
w[0]+(v!=1),
min(w[:2])+(v!=2),
min(w)+(v!=3)
]
return min(w)
objeto = Solution()
nums = [2,1,3,2,1]
print(objeto.minimumOperations(nums)) | alexandreborgmann/leetcode | SortingThreeGroups.py | SortingThreeGroups.py | py | 377 | python | en | code | 0 | github-code | 13 |
34030512339 | # Filename: q3_find_gcd.py
# Author: Justin Leow
# Created: 22/2/2013
# Modified: 22/2/2013
# Description: Uses a recursive function to compute gcd
##8
##5
def gcd(m,n):
if(m%n==0):
return n
else:
return gcd(n,m%n)
print(gcd(24,16))
print(gcd(255,25)) | JLtheking/cpy5python | practical04/q3_find_gcd.py | q3_find_gcd.py | py | 282 | python | en | code | 0 | github-code | 13 |
27110602819 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cryptapp', '0008_auto_20160116_1004'),
]
operations = [
migrations.CreateModel(
name='contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
('email', models.CharField(default=b'', max_length=100)),
('message', models.TextField(max_length=200)),
('phone', models.CharField(max_length=15)),
('published_date', models.DateTimeField(null=True, blank=True)),
],
),
]
| aditi73/cryptic-mining | cryptapp/migrations/0009_contact.py | 0009_contact.py | py | 809 | python | en | code | 2 | github-code | 13 |
19000634905 | import csv
films = []
strikedOffFilms = {}
if __name__ == '__main__':
# CREATING LIST OF FILMS
print("Creating list of films... ", end='')
with open('films.txt', encoding="utf-8") as filmsFile:
for line in filmsFile:
films.append(line.strip())
print("Success")
# IMPORTING CSV...
print("Curating responses...", end='')
with open('reducing.csv', encoding="utf-8") as responsesFile:
responsesReader = csv.reader(responsesFile, delimiter=';')
responses = []
count = 0
for row in responsesReader:
# Disregard the top response, listing out the order.
if count > 0:
# Disregard the name at the end
responses.append(row[1:len(row) - 1])
count += 1
for response in responses:
for item in response:
if item not in strikedOffFilms:
strikedOffFilms[item] = 0
strikedOffFilms[item] += 1
print("Success")
print("Producing final list... ", end='')
# Sort the films by the amount of strike offs
strikedOffFilmsList = sorted(strikedOffFilms, key=strikedOffFilms.get, reverse=True)
print(strikedOffFilms.get('Snow White and the Seven Dwarves'))
# Get the bottom 64.
bottom64 = strikedOffFilmsList[0:64]
# Remove the bottom 64 from the films list.
for badFilm in bottom64:
films.remove(badFilm)
print("Success")
# We're done!
with open('top64.txt', 'w', encoding="utf-8") as finalFilms:
for goodFilm in films:
line = goodFilm + '\n'
finalFilms.write(line)
print("Printed to top64.txt")
| bchan061/bracket | reducer.py | reducer.py | py | 1,689 | python | en | code | 0 | github-code | 13 |
14793281123 | import pygame
class Shop:
def __init__(self):
self.image = pygame.image.load(r'img/Return_button.jpg')
self.pos = (0, 0)
self.image1 = pygame.image.load(r'img/Click.jpg')
self.pos1 = (50, 100)
self.image2 = pygame.image.load(r'img/yron_v_second.jpg')
self.pos2 = (50, 400)
self.click_cost = 10
self.DPS_cost = 100
def buing(self, balance, cost):
if balance >= cost:
cost *= 2
return int(balance - cost / 2), cost
return balance, cost
def try_to_buy(self, type, balance):
if type == 'click':
new_balance, new_cost = self.buing(balance, self.click_cost)
self.click_cost = new_cost
return new_balance
elif type == 'dps':
new_balance, new_cost = self.buing(balance, self.DPS_cost)
self.DPS_cost = new_cost
return new_balance
return balance
def draw(self, screen, balance):
screen.blit(self.image, self.pos)
screen.blit(self.image1, self.pos1)
screen.blit(self.image2, self.pos2)
self.write(screen, balance)
def write(self, screen, balance):
font = pygame.font.SysFont('Arial.ttf', 80)
text = font.render(f'У вас на счету: {balance}', True, [255, 255, 255])
screen.blit(text, (60, 0)) | Anakkobitskaya/click | Shop.py | Shop.py | py | 1,412 | python | en | code | 0 | github-code | 13 |
13613939580 | class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def merge(self, intervals):
if len(intervals) == 0:
return []
check_point = []
for interval in intervals:
check_point.append((interval.start, 0))
check_point.append((interval.end, 1))
check_point.sort(key = lambda x: (x[0], x[1]))
count = 0
res = []
for point in check_point:
if point[1] == 0:
if count == 0:
enter = point[0]
count += 1
else:
count -= 1
if count == 0:
new_interval = Interval(enter, point[0])
res.append(new_interval)
return res
| clovery410/mycode | leetcode_review/56merge_intervals.py | 56merge_intervals.py | py | 857 | python | en | code | 1 | github-code | 13 |
39182440691 | from django.urls import path
from .views import (
GradeView,
SubjectCreateView,
SubjectEditView,
ChaptersCreateView,
ChapterEditView,
ChapterListView,
SubjectListView,
QuestionCreateView,
QuestionEditView,
QuestionList,
QuestionPaperView,
frquestion,
load_subject_chapter,
chapterlistview,
subjectlistview,
questionview,
)
from django.views.decorators.csrf import csrf_exempt
urlpatterns=[
path('subjects/',SubjectCreateView.as_view()),
path('grades/',GradeView.as_view()),
path('chapters/',ChaptersCreateView.as_view()),
path('subjects/<int:pk>/',SubjectEditView.as_view()),
path('chapters/<int:pk>/',ChapterEditView.as_view()),
path('chapter-list/',csrf_exempt(ChapterListView.as_view())),
path('subject-list/',SubjectListView.as_view()),
path('question/',QuestionCreateView.as_view()),
path('question/<int:pk>/',QuestionEditView.as_view()),
path('question-paper/',QuestionList.as_view()),
path('question-paper/<int:pk>/',QuestionPaperView.as_view()),
path('frq/',frquestion),
path('ajax/load-subject/',load_subject_chapter,name='ajax_load_subjects'),
path('chapterfr/',chapterlistview),
path('subjectfr/',subjectlistview),
path('question-l/',questionview)
] | santhiya107/school | academics/urls.py | urls.py | py | 1,292 | python | en | code | 0 | github-code | 13 |
33346604050 | class Solution:
def subdomainVisits(self, cpdomains: List[str]) -> List[str]:
domain_counts= collections.defaultdict(int)
for domains in cpdomains:
score, _, domain = domains.partition(' ')
score = int(score)
domain_counts[domain] += score
for i, char in enumerate(domain):
if char == '.':
elem = domain[i+1:]
domain_counts[elem] += score
res = []
for key, value in domain_counts.items():
res.append(str(value) + ' ' + key)
return res | BradleyGenao/LeetCode-Solutions | subdomain-visit-count/subdomain-visit-count.py | subdomain-visit-count.py | py | 629 | python | en | code | 0 | github-code | 13 |
40131214450 | from collections import deque as dq
import copy
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
#그래프 그리기
graph = [[-1 for _ in range(n + 2)]]
for i in range(m):
tmp = [-1]
for j in range(n):
tmp.append(0)
tmp.append(-1)
graph.append(tmp)
graph.append([-1 for _ in range(n + 2)])
print(graph)
visit = copy.deepcopy(graph)
q = dq([])
q.append([1, 1])
graph[1][1] = 1
visit[1][1] = 1
while q:
nowXY = q.popleft()
nowY = nowXY[0]
nowX = nowXY[1]
if graph[nowY][nowX + 1] != -1:
graph[nowY][nowX + 1] += graph[nowY][nowX]
if graph[nowY + 1][nowX] != -1:
graph[nowY + 1][nowX] += graph[nowY][nowX]
if visit[nowY][nowX + 1] == 0:
q.append([nowY, nowX + 1])
visit[nowY][nowX + 1] = 1
if visit[nowY + 1][nowX] == 0:
q.append([nowY + 1, nowX])
visit[nowY + 1][nowX] = 1
return graph[m][n]
| dlwlstks96/codingtest | LeetCode/62_Unique Paths.py | 62_Unique Paths.py | py | 1,229 | python | en | code | 2 | github-code | 13 |
29403849533 | import requests
# 获取请求的URL
url = "http://127.0.0.1:9000/api/mgr/sq_mgr/"
# 获取请求头信息, 以 字典 的格式来传递
header = {"Content-Type": "application/x-www-form-urlencoded"}
# 获取请求参数, 以字符串的格式传递
payload = 'action=modify_course&id=5724&newdata={"name":"初中化学","desc":"初中化学课程","display_idx":"4"}'
# 模拟发送put请求,requests库调用put()方法模拟发送请求
res = requests.put(url, data=payload.encode("utf-8"), headers=header)
# 因为请求参数中有中文,所以需要进行编码。即 payload.encode('UTF8')
# 获取响应的结果
print(f"响应的结果为:{res.json()}")
# print(res.status_code) | testzhaoxudong/API_atuo_day1 | 模拟put请求_请求参数以字符串的格式传递.py | 模拟put请求_请求参数以字符串的格式传递.py | py | 701 | python | zh | code | 0 | github-code | 13 |
17059477194 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BoxExclusiveBase import BoxExclusiveBase
from alipay.aop.api.domain.BoxOrderStatusInfo import BoxOrderStatusInfo
from alipay.aop.api.domain.BoxExclusiveKeyword import BoxExclusiveKeyword
from alipay.aop.api.domain.BoxExclusiveService import BoxExclusiveService
from alipay.aop.api.domain.BoxExclusiveService import BoxExclusiveService
class SearchBrandBoxInfo(object):
def __init__(self):
self._base_info = None
self._box_status = None
self._box_type = None
self._brand_id = None
self._channel = None
self._ext_info = None
self._functions_order_info = None
self._keywords = None
self._operator_id = None
self._operator_type = None
self._related_accounts = None
self._related_functions = None
@property
def base_info(self):
return self._base_info
@base_info.setter
def base_info(self, value):
if isinstance(value, BoxExclusiveBase):
self._base_info = value
else:
self._base_info = BoxExclusiveBase.from_alipay_dict(value)
@property
def box_status(self):
return self._box_status
@box_status.setter
def box_status(self, value):
self._box_status = value
@property
def box_type(self):
return self._box_type
@box_type.setter
def box_type(self, value):
self._box_type = value
@property
def brand_id(self):
return self._brand_id
@brand_id.setter
def brand_id(self, value):
self._brand_id = value
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
@property
def functions_order_info(self):
return self._functions_order_info
@functions_order_info.setter
def functions_order_info(self, value):
if isinstance(value, BoxOrderStatusInfo):
self._functions_order_info = value
else:
self._functions_order_info = BoxOrderStatusInfo.from_alipay_dict(value)
@property
def keywords(self):
return self._keywords
@keywords.setter
def keywords(self, value):
if isinstance(value, BoxExclusiveKeyword):
self._keywords = value
else:
self._keywords = BoxExclusiveKeyword.from_alipay_dict(value)
@property
def operator_id(self):
return self._operator_id
@operator_id.setter
def operator_id(self, value):
self._operator_id = value
@property
def operator_type(self):
return self._operator_type
@operator_type.setter
def operator_type(self, value):
self._operator_type = value
@property
def related_accounts(self):
return self._related_accounts
@related_accounts.setter
def related_accounts(self, value):
if isinstance(value, list):
self._related_accounts = list()
for i in value:
if isinstance(i, BoxExclusiveService):
self._related_accounts.append(i)
else:
self._related_accounts.append(BoxExclusiveService.from_alipay_dict(i))
@property
def related_functions(self):
return self._related_functions
@related_functions.setter
def related_functions(self, value):
if isinstance(value, list):
self._related_functions = list()
for i in value:
if isinstance(i, BoxExclusiveService):
self._related_functions.append(i)
else:
self._related_functions.append(BoxExclusiveService.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.base_info:
if hasattr(self.base_info, 'to_alipay_dict'):
params['base_info'] = self.base_info.to_alipay_dict()
else:
params['base_info'] = self.base_info
if self.box_status:
if hasattr(self.box_status, 'to_alipay_dict'):
params['box_status'] = self.box_status.to_alipay_dict()
else:
params['box_status'] = self.box_status
if self.box_type:
if hasattr(self.box_type, 'to_alipay_dict'):
params['box_type'] = self.box_type.to_alipay_dict()
else:
params['box_type'] = self.box_type
if self.brand_id:
if hasattr(self.brand_id, 'to_alipay_dict'):
params['brand_id'] = self.brand_id.to_alipay_dict()
else:
params['brand_id'] = self.brand_id
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.functions_order_info:
if hasattr(self.functions_order_info, 'to_alipay_dict'):
params['functions_order_info'] = self.functions_order_info.to_alipay_dict()
else:
params['functions_order_info'] = self.functions_order_info
if self.keywords:
if hasattr(self.keywords, 'to_alipay_dict'):
params['keywords'] = self.keywords.to_alipay_dict()
else:
params['keywords'] = self.keywords
if self.operator_id:
if hasattr(self.operator_id, 'to_alipay_dict'):
params['operator_id'] = self.operator_id.to_alipay_dict()
else:
params['operator_id'] = self.operator_id
if self.operator_type:
if hasattr(self.operator_type, 'to_alipay_dict'):
params['operator_type'] = self.operator_type.to_alipay_dict()
else:
params['operator_type'] = self.operator_type
if self.related_accounts:
if isinstance(self.related_accounts, list):
for i in range(0, len(self.related_accounts)):
element = self.related_accounts[i]
if hasattr(element, 'to_alipay_dict'):
self.related_accounts[i] = element.to_alipay_dict()
if hasattr(self.related_accounts, 'to_alipay_dict'):
params['related_accounts'] = self.related_accounts.to_alipay_dict()
else:
params['related_accounts'] = self.related_accounts
if self.related_functions:
if isinstance(self.related_functions, list):
for i in range(0, len(self.related_functions)):
element = self.related_functions[i]
if hasattr(element, 'to_alipay_dict'):
self.related_functions[i] = element.to_alipay_dict()
if hasattr(self.related_functions, 'to_alipay_dict'):
params['related_functions'] = self.related_functions.to_alipay_dict()
else:
params['related_functions'] = self.related_functions
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SearchBrandBoxInfo()
if 'base_info' in d:
o.base_info = d['base_info']
if 'box_status' in d:
o.box_status = d['box_status']
if 'box_type' in d:
o.box_type = d['box_type']
if 'brand_id' in d:
o.brand_id = d['brand_id']
if 'channel' in d:
o.channel = d['channel']
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'functions_order_info' in d:
o.functions_order_info = d['functions_order_info']
if 'keywords' in d:
o.keywords = d['keywords']
if 'operator_id' in d:
o.operator_id = d['operator_id']
if 'operator_type' in d:
o.operator_type = d['operator_type']
if 'related_accounts' in d:
o.related_accounts = d['related_accounts']
if 'related_functions' in d:
o.related_functions = d['related_functions']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/SearchBrandBoxInfo.py | SearchBrandBoxInfo.py | py | 8,604 | python | en | code | 241 | github-code | 13 |
7855332654 | # -*- coding: utf-8 -*-
import shutil
from pathlib import Path
dir_icons = Path(__file__).absolute().parent.joinpath("icons")
dir_asset = Path("/Users/sanhehu/Downloads/Asset-Package_01312023.d59bb3e1bf7860fb55d4d737779e7c6fce1e35ae")
dir_service =[path for path in dir_asset.iterdir() if path.name.startswith("Architecture-Service")][0]
dir_category =[path for path in dir_asset.iterdir() if path.name.startswith("Category")][0]
dir_resource =[path for path in dir_asset.iterdir() if path.name.startswith("Resource")][0]
for dir, folder in [
(dir_service, "Service"),
(dir_category, "Category"),
]:
for path_src in dir.glob("**/*64.png"):
path_dst = dir_icons.joinpath(folder, path_src.relative_to(dir))
path_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(path_src, path_dst)
for path_src in dir_resource.glob("**/*.png"):
path_dst = dir_icons.joinpath("Resource", path_src.relative_to(dir_resource))
path_dst.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(path_src, path_dst)
| MacHu-GWU/aws_icons-project | move.py | move.py | py | 1,050 | python | en | code | 0 | github-code | 13 |
396465681 | #Para seguir colaborando en esta misión de salvar al planeta, necesitamos que elabores
# un programa en Python que dado el tamaño de un pez indique si su organismo está
# contaminado. Para ello tendremos 4 opciones:
#Tamaño Normal: Mensaje "Pez en buenas condiciones"
#Tamaño por debajo de lo Normal: Mensaje "Pez con problemas de nutrición"
#Tamaño un poco por encima de lo Normal: Mensaje "Pez con síntomas de organismo contaminado"
#Tamaño sobredimensionado: Mensaje "Pez contaminado"
TamañoNormal = int(input('Ingrese el tamaño normal que debe tener un pez: '))
TamañoSobredimensionado = int(input('Ingrese el tamano Sobresaliente : '))
TamañoDelPez = int(input('Ingrese el tamano del pez a analizar: '))
if(TamañoNormal == TamañoDelPez):
print('Pez en buenas condiciones')
elif(TamañoDelPez >= TamañoSobredimensionado):
print('Pez contaminado')
elif(TamañoDelPez < TamañoNormal):
print("Pez con problemas de nutrición")
else:
print("Pez con síntomas de organismo contaminado")
| AndreaF5rnandez/Python | Condicionales/Ejer2.py | Ejer2.py | py | 1,033 | python | es | code | 0 | github-code | 13 |
5647461545 | import plyvel
import os
username = os.environ['USERNAME']
# get the machine name from windows environment variables
machine_name = os.environ['COMPUTERNAME']
# C:\Users\alex.fielder\AppData\Local\Microsoft\Edge\User Data\Profile 4\Local Storage\leveldb
# leveldb_data_path = 'C:/Users/' + username + '/AppData/Local/Microsoft/Edge/User Data/Profile 4/Local Storage/leveldb
# Specify the path to your LevelDB directory
db_path = r"C:\Users\alex.fielder\AppData\Local\Microsoft\Edge\User Data\Profile 4\Local Extension Settings\hoimpamkkoehapgenciaoajfkfkpgfop"
# Check if the path is a valid directory
if not os.path.isdir(db_path):
print(f"The specified path '{db_path}' is not a valid directory.")
else:
try:
# Try to open the LevelDB database
db = plyvel.DB(db_path, create_if_missing=False) # We do not want to create a new DB if it's missing
# Iterate over all key, value pairs in the database
for key, value in db:
print(key, value)
db.close()
except Exception as e:
print(f"An error occurred while accessing the database: {e}") | AlexFielder/Scripts | CmdLine/DedupeAndGroupOneTab.py | DedupeAndGroupOneTab.py | py | 1,111 | python | en | code | 2 | github-code | 13 |
4122126635 |
import base64
import sys, neat, math, time, sqlite3
from matplotlib import pyplot as plt
import pygame as py
# constant variables
screen_size = [1280, 720]
border_colour = (255, 255, 255, 255)
car_size_x = 35
car_size_y = 35
class Car:
def __init__(self):
self.sprite = py.image.load('src/static/sim_content/car.png').convert()
self.sprite = py.transform.scale(self.sprite, (car_size_x, car_size_y))
self.rotated_sprite = self.sprite
# start position
self.position = [465, 610]
self.angle = 0
self.speed = 0
self.speed_set = False
# calculates center point
self.center = [self.position[0] + car_size_x / 2, self.position[1] + car_size_y / 2]
self.radars = []
# checks to see which cars are alive and the distance travelled
self.alive = True
self.distance = 0
def draw(self, screen):
screen.blit(self.rotated_sprite, self.position)
self.drawRadar(screen)
def drawRadar(self, screen):
for radar in self.radars:
position = radar[0]
py.draw.line(screen, (0, 255, 0), self.center, position, 1)
py.draw.circle(screen, (0, 255, 0), position, 5)
def rotateCenter(self, image, angle):
# this rotates the rectangle
rectangle = image.get_rect()
rotated_image = py.transform.rotate(image, angle)
# copies the rectangle and gets its centerpoint
rotated_rectangle = rectangle.copy()
rotated_rectangle.center = rotated_image.get_rect().center
# creates the new surface referencing the parent sprite
rotated_image = rotated_image.subsurface(rotated_rectangle).copy()
return rotated_image
def movement(self):
# this sets the speed to 10 for the first time
if not self.speed_set:
self.speed = 10
self.speed_set = True
# get the rotated sprite and move the x direction
self.rotated_sprite = self.rotateCenter(self.sprite, self.angle)
self.rect = self.rotated_sprite.get_rect()
self.position[0] += math.cos(math.radians(360 - self.angle)) * self.speed
# don't let the car go closer than 20 px to the edge of track
self.position[0] = max(self.position[0], 20)
self.position[0] = min(self.position[0], screen_size[0] - 80)
# increase the distance
self.distance += self.speed
# do the same for the y position
self.position[1] += math.sin(math.radians(360 - self.angle)) * self.speed
self.position[1] = max(self.position[1], 20)
self.position[1] = min(self.position[1], screen_size[0] - 80)
# calculates new center value
self.center = [int(self.position[0]) + car_size_x / 2, int(self.position[1]) + car_size_y / 2]
def getReward(self):
# calculates reward given
return self.distance / (car_size_x / 2)
def calcCorners(self):
# calculates four corners
length = 0.5 * car_size_x
left_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 30))) * length,
self.center[1] + math.sin(math.radians(360 - (self.angle + 30))) * length]
right_top = [self.center[0] + math.cos(math.radians(360 - (self.angle + 150))) * length,
self.center[1] + math.sin(math.radians(360 - (self.angle + 150))) * length]
left_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 210))) * length,
self.center[1] + math.sin(math.radians(360 - (self.angle + 210))) * length]
right_bottom = [self.center[0] + math.cos(math.radians(360 - (self.angle + 330))) * length,
self.center[1] + math.sin(math.radians(360 - (self.angle + 330))) * length]
corners = [left_top, right_top, left_bottom, right_bottom]
return corners
def checkCollision(self, sim_track):
self.alive = True
for point in self.calcCorners():
# if any of the corners touch the broder colour, that car is eliminated
if sim_track.get_at((int(point[0]), int(point[1]))) == border_colour:
self.alive = False
break
def isAlive(self):
return self.alive
def calcRadars(self, degree, length):
# calculates positions of radars from centerpoints
x = int(self.center[0] + math.cos(math.radians(360 - (self.angle + degree))) * length)
y = int(self.center[1] + math.sin(math.radians(360 - (self.angle + degree))) * length)
return x, y
def checkRadars(self, degree, sim_track):
length = 0
x, y = self.calcRadars(degree, length)
# while cars don't hit border colour and length < 250, cars go further
# calls function to calculate radar positions
while not sim_track.get_at((x, y)) == border_colour and length < 250:
length += 1
x, y = self.calcRadars(degree, length)
# calculate distance to border and append to radars list
dist = int(math.sqrt(math.pow(x - self.center[0], 2) + math.pow(y - self.center[1], 2)))
self.radars.append([(x, y), dist])
def getData(self):
# gets the distances to border
radars = self.radars
return_values = [0,0,0,0,0]
for i, radar in enumerate(radars):
return_values[i] = int(radar[1] / 30)
return return_values
def update(self, sim_track):
self.movement()
# checks for collisions and clears radars
self.checkCollision(sim_track)
self.radars.clear()
# from -90 to 120, with step size 45, check radar
for d in range(-90, 120, 45):
self.checkRadars(d, sim_track)
class Simulation:
def __init__(self, screen, clock, sim_track, gens, is_setup):
self.screen = screen
self.clock = clock
self.track = py.image.load(sim_track)
self.max_gen = gens
self.setup = is_setup
# keep track of different generations
self.current_generation = 0
self.generations = []
def loadConfig(self, config_path):
config = neat.config.Config(neat.DefaultGenome,
neat.DefaultReproduction,
neat.DefaultSpeciesSet,
neat.DefaultStagnation,
config_path)
return config
def events(self):
# exits on quit event
for event in py.event.get():
if event.type == py.QUIT:
self.quit()
if event.type == py.KEYDOWN:
if event.key == py.K_ESCAPE:
self.quit()
def quit(self):
py.quit()
sys.exit()
def runSim(self, genomes, config):
cars = []
nets = []
# intialises car and data objects in lists
for i, g in genomes:
net = neat.nn.FeedForwardNetwork.create(g, config)
nets.append(net)
g.fitness = 0
cars.append(Car())
# appends the different generations
self.current_generation += 1
self.generations.append(self.current_generation)
# counter to limit time
counter = time.time()
while True:
self.events()
# for each car get the action it takes
for i, car in enumerate(cars):
output = nets[i].activate(car.getData())
choice = output.index(max(output))
# decides what to do based on car choices
if choice == 0:
# goes left
car.angle += 10
elif choice == 1:
# goes right
car.angle -= 10
elif choice == 2:
# slows down
if(car.speed - 2 >= 12):
car.speed -= 2
else:
# speeds up
car.speed += 2
# checks to see if car is still alive
# increases fitness if they are and break loop if not
still_alive = 0
for i, car in enumerate(cars):
if car.isAlive():
still_alive += 1
car.update(self.track)
genomes[i][1].fitness += car.getReward()
if still_alive == 0:
break
# breaks loop after 6 seconds
counter_check = time.time()
if counter_check - counter >= 6:
break
if not self.setup:
# draws track and cars on screen
self.screen.blit(self.track, (0,0))
for car in cars:
if car.isAlive():
car.draw(self.screen)
# displays current generation to the user
font = py.font.Font(None, 32)
text = font.render("Generation: " + str(self.current_generation) + " / " + str(self.max_gen), True, (0,0,0))
self.screen.blit(text, (10, 10))
py.display.flip()
self.clock.tick(45)
class SaveData:
def __init__(self, name, stat_reporter, max_gens):
self.name = name
self.stat_reporter = stat_reporter
self.gens = max_gens
def formatData(self):
# gets list of generation ids from list
self.generation_list = []
for i in range(self.gens):
self.generation_list.append(i)
# gets fitness data for each generation from neat stat reporter function
self.mean_fitness = self.stat_reporter.get_fitness_mean()
self.best_fitness = [x.fitness for x in self.stat_reporter.most_fit_genomes]
self.average_fitnesses = []
self.top_fitnesses = []
# makes the data more readbable and understandable when in a graph
for i in self.mean_fitness:
x = i / 100
x = "{:.2f}".format(x)
self.average_fitnesses.append(float(x))
for i in self.best_fitness:
j = i / 100
j = "{:.2f}".format(j)
self.top_fitnesses.append(float(j))
# gets species ids from dictionary data from neat std out reporter function
self.species = []
for gen_data in self.stat_reporter.generation_statistics:
# unpacks data and finds most evolved species
keys = [*gen_data]
self.species.append(max(keys))
# loading in graphs
self.formatImages()
def graph(self, type, title, file_name, xlabel, ylabel, xdata, ydata, color):
plt.title(title)
# annotating the axis
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if type == "scatter":
# scattering the data
plt.scatter(xdata, ydata, color=color, s=6)
elif type == "line":
# plotting the data
plt.plot(xdata, ydata, color=color)
plt.savefig("src/main/sim/temporary_storage/" + file_name + ".png")
plt.close()
def formatImages(self):
# creating the graphs
mean_fitness = "mean_fitness"
best_fitness = "best_fitness"
species_change = "species_change"
# mean fitness graph
self.graph("scatter", "Mean Fitness", mean_fitness, "Generations", "Fitness", self.generation_list, self.average_fitnesses, "red")
# best fitness graph
self.graph("scatter", "Best Fitness", best_fitness, "Generations", "Fitness", self.generation_list, self.top_fitnesses, "red")
# species change graph
self.graph("line", "Species Evolution", species_change, "Generations", "Species ID", self.generation_list, self.species, "red")
| jayCleverly/Neural-Cars | src/main/sim/simulations.py | simulations.py | py | 12,540 | python | en | code | 1 | github-code | 13 |
4122930755 | import torch
from torch_geometric.loader import DataLoader
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
from sklearn.metrics import accuracy_score, f1_score,roc_auc_score,classification_report
import os, pickle
import time
from datetime import datetime
def joint_loss(opt,outputs,labels):
# loss = torch.mean((output - y)**2)
crit1,crit2 = opt.criterion
# print('target_range:=====',labels[0].min(), labels[0].max())
loss = opt.t_lambda*crit1(outputs[0],labels[0]) + (1.0-opt.t_lambda)*crit2(outputs[1].view(-1),labels[1])
return loss
# get criterion and determine the output dim
def get_criterion(opt):
if opt.task_type in ['node-classify', 'direct-classify','token-classifier']:
if opt.task_type == 'node-classify':
opt.output_dim = opt.num_labels
elif opt.task_type== 'direct-classify':
opt.output_dim = 8
elif opt.task_type=='token-classifier':
opt.output_dim = opt.num_labels
return torch.nn.CrossEntropyLoss()
elif opt.task_type == 'link-binary':
opt.output_dim = 1
# class_weights=torch.tensor([0.9,0.1],dtype=torch.float)
return torch.nn.BCEWithLogitsLoss()
# return my_loss
elif opt.task_type == 'dist-regression':
opt.output_dim = 1
return torch.nn.L1Loss()
# return torch.nn.MSELoss()
elif opt.task_type == 'joint':
opt.output_dim = 1
return [torch.nn.CrossEntropyLoss(),torch.nn.L1Loss()]
else:
raise Exception('task type error, not supported:{}'.format(opt.task_type))
def get_target(opt,batch):
if opt.task_type == 'link-binary':
target = batch.y.to(opt.device)
elif opt.task_type == 'node-classify':
target = batch.y_nrole.to(opt.device)
elif opt.task_type == 'dist-regression':
target = batch.y_dist.to(opt.device)
elif opt.task_type == 'direct-classify':
target = batch.y_direct.to(opt.device)
elif opt.task_type == 'joint':
target = [batch.y_direct.to(opt.device),batch.y_dist.to(opt.device)]
return target
def get_loss(opt,outputs,labels):
if opt.task_type == 'joint':
loss = joint_loss(opt,outputs,labels)
else:
if opt.output_dim == 1: outputs = outputs.view(-1)
loss = opt.criterion(outputs,labels) # Compute the loss solely based on the training nodes.
return loss
def train(opt, model, mydata):
# 1 data loader
loader_train = DataLoader(mydata.train_graphs, batch_size=6)
loader_test = DataLoader(mydata.test_graphs, batch_size=2)
print(loader_train, loader_test)
# 2 optimizer
optimizer = torch.optim.Adam(model.parameters(),lr=opt.lr, weight_decay=5e-4)
opt.dir_path = create_save_dir(opt) # prepare dir for saving best models
print('model will be saved to:',opt.dir_path)
best_loss = 99999.9
for epoch in range(opt.epochs):
# train mode
model.train()
for _i, graph in enumerate(loader_train,start=0):
optimizer.zero_grad() # Clear gradients.
outputs = predict_one_batch(opt,model,graph)
labels = get_target(opt,graph)
loss = get_loss(opt,outputs,labels)
loss.backward() # Derive gradients.
optimizer.step() # Update parameters based on gradients.
# test
model.eval()
preds,tgts,val_loss = predict_all_batches(opt, model,loader_test)
print(f'Epoch: {epoch:03d}, Loss: {val_loss:.4f}')
if val_loss < best_loss and epoch>(opt.epochs//2):
# save_model(opt, model,res_dict)
print('The best model saved with loss:', val_loss)
# update the pre-trained vectors
output_hidden_vect(opt,model,loader_test,'test',mydata=mydata)
output_hidden_vect(opt,model,loader_train,'train',mydata=mydata)
# taste the pred
# print(preds[:8])
# print(tgts[:8])
if opt.task_type in ['dist-regression','joint']:
print('MSE is the val loss',)
else:
# val_acc = test_accu(preds, tgts)
res_dict = evaluate(preds,tgts,True)
print(res_dict)
return val_loss
# only return pred logits, for back propagation
def predict_one_batch(opt,model, graph):
graph = graph.to(opt.device)
pred = model(graph)
return pred
# return pred labels, labels, and loss
def predict_all_batches(opt, model, dataloader_test):
outputs, targets, val_loss = [],[],0
for _ii, batch in enumerate(dataloader_test, start=0):
preds = predict_one_batch(opt,model, batch)
target = get_target(opt, batch)
val_loss += get_loss(opt,preds,target)
if opt.task_type in ['dist-regression','joint']:
continue
preds = torch.argmax(preds, dim=-1)
outputs.append(preds)
targets.append(target)
# numeric return
if opt.task_type in ['dist-regression','joint']:
return outputs,targets,val_loss
# category return
outputs = torch.cat(outputs)
targets = torch.cat(targets)
return outputs,targets,val_loss
def output_hidden_vect(opt,model,dataloader,split='test',mydata=None):
doc_ids, seg_ids,seg_vects = [], [],[]
for _ii, batch in enumerate(dataloader, start=0):
seg_ids.append(batch.seg_id)
doc_ids.append(batch.doc_id)
batch = batch.to(opt.device)
vects = model.encode(batch)
seg_vects.append(vects)
doc_ids = torch.cat(doc_ids)
seg_ids = torch.cat(seg_ids)
seg_vects = torch.cat(seg_vects)
pairs = zip(doc_ids.detach().cpu().numpy(),
seg_ids.detach().cpu().numpy(),
seg_vects.detach().cpu().numpy()
)
vect_path = os.path.join(opt.dir_path, opt.network_type+'seg_vect_'+split)
with open(vect_path, 'w',encoding='utf8') as f:
for docid,segid,vect in pairs:
if mydata:
if split=='train':
docid = mydata.train_idx2id[docid]
else:
docid = mydata.test_idx2id[docid]
f.write(str(docid)+'\t'+str(segid)+'\t'+str(vect).replace('\n','')+'\n')
def test_accu(y_pred,y_truth):
test_correct = y_pred == y_truth # Check against ground-truth labels.
test_acc = int(test_correct.sum()) / int(len(y_truth)) # Derive ratio of correct predictions.
print(str(test_correct.sum()) +' / '+ str(len(y_truth)))
return test_acc
def test_mse(y_pred,y_truth):
return torch.nn.MSELoss()(y_pred, y_truth)
def evaluate(outputs, targets, print_confusion=False):
# n_total,num_classes = outputs.shape
# 2) move to cpu to convert to numpy
output = outputs.cpu().numpy()
target = targets.cpu().numpy()
# confusion = confusion_matrix(output, target)
f1 = f1_score(target, output, average='weighted')
precision,recall,fscore,support = precision_recall_fscore_support(target, output, average='weighted')
acc = accuracy_score(target, output)
performance_dict = {'num':len(output),'acc': round(acc,3), 'f1': round(f1,3), 'precision':round(precision,3),'recall':round(recall,3)}
if print_confusion: print(classification_report(target, output))
return performance_dict
def create_save_dir(params):
if not os.path.exists('tmp_dir'):
os.mkdir('tmp_dir')
# Create model dir
dir_name = '_'.join([params.network_type,params.dataset_name,str(round(time.time()))[-6:]])
dir_path = os.path.join('tmp_dir', dir_name)
if not os.path.exists(dir_path):
os.mkdir(dir_path)
params.export_to_config(os.path.join(dir_path, 'config.ini'))
pickle.dump(params, open(os.path.join(dir_path, 'config.pkl'), 'wb'))
return dir_path
# Save the model
def save_model(params, model, performance_str):
# 1) save the learned model (model and the params used)
torch.save(model.state_dict(), os.path.join(params.dir_path, 'model'))
# get current date and time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
# 2) Write performance string
eval_path = os.path.join(params.dir_path, 'eval')
with open(eval_path, 'w') as f:
f.write(str(performance_str)+'\n'+dt_string) | dswang2011/DocGraph4LM | src/GNNs/trainer.py | trainer.py | py | 8,271 | python | en | code | 0 | github-code | 13 |
28250132556 | import asyncio
TAX_RATE = 0.05
COMBO_DISCOUNT = 15 # in %
class Order:
def __init__(self, inventory):
self.inventory = inventory
self.items = {
"Burgers": {"quantity": 0},
"Sides": {"quantity": 0},
"Drinks": {"quantity": 0},
}
self.subtotal = 0
"""
self.items structure
self.items = {
"Burgers": {
"quantity": int,
id: {
"info": inventory.get_item(id),
"order_quantity": int,
"to_process_quantity": int,
},
}
}
"""
def get_user_order(self, total_num_of_items):
print("Please enter the number of items that you would like to add to your order. Enter q to complete your order.")
order_ids = []
end_order = False
while not end_order:
item_id_order = input("Enter an item number: ")
if item_id_order == "q":
end_order = True
continue
try:
item_id_order = int(item_id_order)
except:
print("Please enter a valid number.")
continue
valid_id = 0 < item_id_order < total_num_of_items + 1
if not valid_id:
print(f"Please enter a number above 0 and below {total_num_of_items + 1}.")
continue
order_ids.append(item_id_order)
return order_ids
async def place_orders(self, order_ids):
print("Placing order...")
get_stock_tasks = []
item_infos_tasks = []
for id in order_ids:
stock = asyncio.create_task(self.inventory.get_stock(id))
info = asyncio.create_task(self.inventory.get_item(id))
get_stock_tasks.append(stock)
item_infos_tasks.append(info)
items_status = []
for i in range(len(item_infos_tasks)):
item = await asyncio.gather(get_stock_tasks[i], item_infos_tasks[i])
items_status.append(item)
return items_status
def fill_items(self, items_status):
stock = {}
for item in items_status:
item_stock = item[0]
id = item[1]["id"]
if id not in stock:
stock[id] = item_stock
# item out of stock
if stock[id] < 1:
print(
f"Unfortunately item number {id} is out of stock and has been removed from your order. Sorry!")
# item in stock
else:
category = item[1]["category"]
stock[id] -= 1
if category == "Burgers":
burgers = self.items["Burgers"]
burgers["quantity"] += 1
burgers[id] = burgers.get(id, {})
burgers[id]["info"] = item[1]
burgers[id]["order_quantity"] = burgers[id].get("order_quantity", 0) + 1
burgers[id]["to_process_quantity"] = burgers[id]["order_quantity"]
elif category == "Sides":
sides = self.items["Sides"]
sides["quantity"] += 1
sides[id] = sides.get(id, {})
sides[id]["info"] = item[1]
sides[id]["order_quantity"] = sides[id].get("order_quantity", 0) + 1
sides[id]["to_process_quantity"] = sides[id]["order_quantity"]
elif category == "Drinks":
drinks = self.items["Drinks"]
drinks["quantity"] += 1
drinks[id] = drinks.get(id, {})
drinks[id]["info"] = item[1]
drinks[id]["order_quantity"] = drinks[id].get("order_quantity", 0) + 1
drinks[id]["to_process_quantity"] = drinks[id]["order_quantity"]
def calculate_total_order_price(self):
self.calculate_subtotal()
tax = round(self.subtotal * TAX_RATE, 2)
total_price = round(self.subtotal + tax, 2)
print(f"\nSubtotal: ${self.subtotal}")
print(f"Tax: ${tax}")
print(f"Total: ${total_price}")
return total_price
async def conclude_order(self, total_price):
if total_price == 0:
print("The order is empty and has been cancelled.")
return
while True:
confirm_order = input(f"Would you like to purchase this order for ${total_price} (yes/no)? ")
if confirm_order == "yes":
print("Thank you for your order!")
await self.decrement_all_items()
break
elif confirm_order == "no":
print("No problem, please come again!")
break
else:
print("Invalid input.")
def calculate_subtotal(self):
burgers = self.items["Burgers"]
sides = self.items["Sides"]
drinks = self.items["Drinks"]
num_of_combos = min(
burgers["quantity"],
sides["quantity"],
drinks["quantity"]
)
print("Here is a summary of your order: \n")
while num_of_combos > 0:
self.add_combo_price_to_subtotal()
num_of_combos -= 1
self.add_items_price_to_subtotal()
def add_combo_price_to_subtotal(self):
combo_ids = []
for category_dict in self.items.values():
combo_ids.append(self.find_most_expensive(category_dict))
for category in self.items.values():
for id, item in category.items():
if id == "quantity":
continue
if id in combo_ids:
item["to_process_quantity"] -= 1
if item["info"]["category"] == "Burgers":
burger = item["info"]
elif item["info"]["category"] == "Sides":
side = item["info"]
elif item["info"]["category"] == "Drinks":
drink = item["info"]
discount = 1 - (COMBO_DISCOUNT / 100)
combo_price = round((burger["price"] + side["price"] + drink["price"]) * discount, 2)
self.subtotal += combo_price
side_name = side["size"] + " " + side["subcategory"]
drink_name = drink["size"] + " " + drink["subcategory"]
print(f"${combo_price} Burger Combo")
print(f" {burger['name']}")
print(f" {side_name}")
print(f" {drink_name}")
def find_most_expensive(self, item_dict):
most_expensive_item_id = 0
most_expensive_item_price = 0
for id in item_dict:
if id == "quantity":
continue
quantity = item_dict[id]["to_process_quantity"]
if quantity < 1:
continue
price = item_dict[id]["info"]["price"]
if most_expensive_item_id == 0:
most_expensive_item_id = id
most_expensive_item_price = price
if most_expensive_item_price < price:
most_expensive_item_id = id
most_expensive_item_price = price
return most_expensive_item_id
def add_items_price_to_subtotal(self):
price = 0
for category in self.items.values():
for id, item in category.items():
if id == "quantity":
continue
quantity = item["to_process_quantity"]
if quantity <= 0:
continue
price += (item["info"]["price"] * quantity)
item["to_process_quantity"] = 0
if item["info"]["category"] == "Burgers":
print(f'${item["info"]["price"]} {item["info"]["name"]} * {quantity}')
else:
item_name = item["info"]["size"] + " " + item["info"]["subcategory"]
print(f'${item["info"]["price"]} {item_name} * {quantity}')
self.subtotal += round(price, 2)
async def decrement_all_items(self):
tasks = []
for category in self.items.values():
for id, item in category.items():
if id == "quantity":
continue
quantity = item["order_quantity"]
for _ in range(quantity):
task = asyncio.create_task(self.inventory.decrement_stock(id))
tasks.append(task)
await asyncio.gather(*tasks)
| avk-ho/programming_exp | python/projects/async_point_of_sale_system/order.py | order.py | py | 8,647 | python | en | code | 0 | github-code | 13 |
22765593874 | import json
from django.shortcuts import render
from django.views import View
from django.db import transaction
from django.http import JsonResponse
from django.db.models import Q
from boards.models import Board, Image
class BoardCreateView(View):
def post(self, request):
try:
data = json.loads(request.body)
title = data['title']
content = data['content']
post_type = data['post_type']
thumb_img = data['thumb_img']
content_img = data['content_img']
with transaction.atomic():
board = Board.objects.create(
title = title,
content = content,
type = post_type
)
Image.objects.create(
img_url = thumb_img,
type = 'thumb',
board_id = board.id
)
for img in content_img:
Image.objects.create(
img_url = img,
type = 'content',
board_id = board.id
)
return JsonResponse({'MESSAGE':'board_created'}, status=201)
except KeyError:
return JsonResponse({'MESSAGE':'KEY_ERROR'}, status=400)
class BoardReadView(View):
def get(self, request, board_id):
try:
if not Board.objects.filter(id=board_id).exists():
return JsonResponse({'MESSAGE':'board_not_exists'}, status=404)
board = Board.objects.get(id=board_id)
thumb_img = Image.objects.get(board_id=board.id, type='thumb')
content_imgs = Image.objects.filter(board_id=board.id, type='content')
results = {
'title' : board.title,
'content' : board.content,
'type' : board.type,
'thumb_img' : thumb_img.img_url,
'content_imgs' : [
content_img.img_url for content_img in content_imgs
]
}
return JsonResponse(results, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY_ERROR'}, status=400)
class MainPageView(View):
def get(self, request):
try:
type = request.GET.get('type', None)
OFFSET = 0
LIMIT = 16
boards = Board.objects.filter(type=type).order_by('-created_at')[OFFSET:LIMIT]
results = [
{
'title' : board.title,
'content' : board.content,
'thumb' : Image.objects.get(board_id=board.id, type='thumb').img_url
} for board in boards
]
return JsonResponse({'MESSAGE':results}, status=200)
except KeyError:
return JsonResponse({'MESSAGE':'KEY_ERROR_1'}, status=400)
| jinatra/pedalgrade | boards/views.py | views.py | py | 2,986 | python | en | code | 0 | github-code | 13 |
38340279703 | #!/bin/env python
"""
This script fills up 10 rows of HTML table using the file
RowTemplate.html
with random values, currently.
"""
TEMPLATE = "RowTemplate.html"
PAGE_TEMPLATE = "NewbooTemplate.html"
PAGE_OUTPUT = "NewbooTabled.html"
def one_row(product_rank, product_image, product_link, product_name,
product_brand, star_ratings, review_count, *args, **kw):
product_score = star_ratings/10
with open(TEMPLATE) as fo:
row = fo.read()
return row.format(**locals())
def generate_table():
rows = []
product_image = "./newboo/shoe.jpg"
product_link = "http://www.newboo.com"
product_name = "Whatever product you wish for it to be"
product_brand = "BestBrand"
star_ratings = 95
review_count = 67
for i in range(1, 11):
product_rank = i
rows.append(one_row(**locals()))
star_ratings = star_ratings - i
review_count = review_count - i
with open(PAGE_TEMPLATE) as fo:
page = fo.read().format(table_rows='\n'.join(rows))
with open(PAGE_OUTPUT, 'w') as fw:
fw.write(page)
if __name__ == '__main__':
generate_table()
| eddebc/deeps-server | gen_table.py | gen_table.py | py | 1,184 | python | en | code | 0 | github-code | 13 |
70220886418 | import numpy as np
import pandas as pd
import sys
import os
def write_ans(ans, ansfile):
print("Writing answer to %s" % ansfile)
import csv
with open(ansfile, "w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["id", "label"])
for i in range(1, len(ans)+1):
writer.writerow([i, ans[i-1]])
def GBC(X_train, Y_train, X_test):
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier(n_estimators=250, learning_rate=0.1, random_state=42, min_samples_split=200, min_samples_leaf=50, max_depth=8, max_features='sqrt', subsample=0.8).fit(X_train, Y_train.ravel())
return clf.predict(X_test)
if __name__ == "__main__":
hlp = "python3 best.py train.csv test.csv X_train Y_train X_test ans.csv"
if len(sys.argv) != 7:
print(hlp)
sys.exit(0)
X_train_raw = pd.read_csv(sys.argv[3])
Y_train_raw = pd.read_csv(sys.argv[4])
X_test_raw = pd.read_csv(sys.argv[5])
X_train = X_train_raw.values.astype(float)
X_test = X_test_raw.values.astype(float)
Y_train = np.concatenate(([[0]], Y_train_raw.values))
# remove native_country
X_train = X_train[:, :64]
X_test = X_test[:, :64]
# Gradient Tree Boosting Classifier
ans = GBC(X_train, Y_train, X_test)
write_ans(ans, sys.argv[6])
| timlee0119/NTU-Machine-Learning-2019 | income_prediction/best.py | best.py | py | 1,342 | python | en | code | 0 | github-code | 13 |
71276692178 | from __future__ import print_function # use Python 3.0 printing
'''Procedure'''
#1-5 N/A
'''Part 1: Conditionals'''
#6a. This returns true since the first part returns true and the second part
#returns true as well. Since the compound keyword being used is and, the final
#out put is true since true and true is true. My predictions were correct.
#6b. This will return true since both conditions in this compund conditional
#return true and since the compound keyword being used is or, true or true
#returns true. My predictions were correct.
#7. 40 < x and x < 130 and 100 < y and y < 120
#8. x, y = (90, 115)
'''Part 2: if-else structures and print() functions'''
#9a. I predict that the first function call with 10 as the inputed parameter
#will print "10 is below the age limit." and the second function call will
#print "16 is old enough". After each function call, the console should also
#output " Minimum age is 13"
def age_limit_output(age):
'''Parameters: age -> Integer
Conditional checks if age is < 13.
Prints statements depending on whether or not condition is made
Does not return anything'''
AGE_LIMIT = 13 # convention: use CAPS for constants
if age < AGE_LIMIT:
print(age, 'is below the age limit.')
else:
print(age, 'is old enough.')
print(' Minimum age is ', AGE_LIMIT)
#9b.
def report_grade(percent):
'''Parameters: percent -> Integer
Conditional prints depending on whether or not the percent value is < 80
Does not return anything'''
if percent < 80:
print("A grade of " + str(percent) + " does not indicate mastery.")
print("Seek extra practice or help.")
else:
print("A grade of " + str(percent) + " indicates mastery.")
print("Keep up the good work!")
'''Part 3: The in operator and an introduction to collections'''
#10a. True
#10b. False
#11.
def letter_in_word(guess, word):
'''Parameters: guess -> string, word -> string
Conditional to check if character is in the given word
Returns bolean value depending on whether the letter is in the word'''
if guess in word:
return True
return False
#12.
def hint(color, secret):
'''Parameters: color -> string, secret -> array od strings
Conditional to print text to console depending on whether or not color
string is in array
Does not return anything'''
if color in secret:
print("The color red IS in the secret sequence of colors.")
else:
print("The color green IS NOT in the secret sequence of colors.")
'''Conclusion'''
#1. "if" allows block to run if condition/conditions are met. If it's not met,
#it branches to the "else" or "elif". "elif" allows block to run if the first
#condition for the "if" is not met and the condition in the "elif" is met as
#well. "else" runs the block if all prior conditions are not met.
#2. ==, >=, <=, >, <, !=, or, and, not, is, is not, in, not it are some examples
#of boolean operators in python
#3. Ira: Ira is not correct because the print statement is still only executed
#once which means that the amount of time it takes to compile and run the
#program is still the same.
#Jayla: Jayla is correct because that print statement is going to execute
#inevitably so it will save time if you keep track of only one instance of
#it.
#Kendra: Kendra is correct because the program will have less lines of code
#but still perform the same tasks which means that it saves memory for the
#same output.
#1.3.3 Function Test
age_limit_output(10)
age_limit_output(16)
report_grade(79)
report_grade(85)
print(letter_in_word('t', 'secret hangman phrase'))
secret = ['red','red','yellow','yellow','black']
hint('red', secret)
hint('green', secret) | Anshul2004/pythonPer2_2018-2019 | 1.3.3/Kashyap_1.3.3.py | Kashyap_1.3.3.py | py | 3,813 | python | en | code | 0 | github-code | 13 |
16656700610 | import numpy as np
import matplotlib.pyplot as plt
import itertools
import fractions
from fractions import Fraction
from numpy import *
px = []
py = []
def perp( a ) :
b = empty_like(a)
b[0] = -a[1]
b[1] = a[0]
return b
def seg_intersect(a1,a2, b1,b2) :
da = a2-a1
db = b2-b1
dp = a1-b1
dap = perp(da)
denom = dot( dap, db)
num = dot( dap, dp )
return (num / denom)*db + b1
def i_to_l(i):
return {
0 : 'A',
1 : 'B',
2 : 'C',
3 : 'D',
4 : 'E',
5 : 'F',
}[i]
tries = 0
pieces = {}
pieces[0] = 0
pieces[1] = 0
pieces[2] = 0
pieces[3] = 0
sequence_0 = {}
sequence_1 = {}
sequence_2 = {}
sequence_3 = {}
count = 0
for i in range(1,100000):
alpha = {}
# generate 6 random angles around a circle
t = np.random.random_sample(6) * np.pi
# create a mapping between angles and point identifiers e.g. ABCDEF
for i in range(0,len(t)):
alpha[t[i]] = i_to_l(i)
t2 = t.copy()
t2.sort()
s = ''
for a in t2:
s += alpha[a]
# generate x,y coordintaes from angles
x = 1 + np.cos(t)
y = 1 + np.sin(t)
chord = {}
chord[1] = []
chord[2] = []
chord[3] = []
tries = tries + 1
# data strucure for chords
for w in range(0,int(len(t)/2)):
j = w*2
p1 = array( [ x[j], y[j] ] )
p2 = array( [ x[j+1], y[j+1] ] )
chord[w+1].append(p1)
chord[w+1].append(p2)
# calculate intersects
# first calculate the intersect of 2 lines and then deermine if intersection
# point lies within the circle
intersects = 0
for h in range(1,4):
cA = chord[h]
for f in range(h+1,4):
#print(str(h)+"->"+str(f))
cB = chord[f]
m = seg_intersect(cA[0],cA[1],cB[0],cB[1])
d = np.linalg.norm(m-[1,1])
if(d < 1):
intersects = intersects + 1
# count number of intersections per try
pieces[intersects] = pieces[intersects] + 1
# log permutations
if((intersects == 3) and (s[0] == 'A')):
if(s in sequence_3.keys()):
pass
else:
sequence_3[s] =1
if((intersects == 2) and (s[0] == 'A')):
if(s in sequence_2.keys()):
pass
else:
sequence_2[s] =1
if((intersects == 1) and (s[0] == 'A')):
if(s in sequence_1.keys()):
pass
else:
sequence_1[s] =1
if((intersects == 0) and (s[0] == 'A')):
if(s in sequence_0.keys()):
pass
else:
sequence_0[s] =1
# results
x = itertools.permutations(['B','C','D','E','F'])
total_perms = len(list(x))
zero = round((pieces[0]/tries) * 100, 2)
one = round((pieces[1]/tries) * 100, 2)
two = round((pieces[2]/tries) * 100, 2)
three = round((pieces[3]/tries) *100, 2)
print("")
print("ZERO INTERSECTIONS (4 PIECES)")
print("-----------------------------")
print(", ".join(sequence_0.keys()))
noc0 = len(sequence_0.keys())
fnoc0 = Fraction(noc0/total_perms).limit_denominator()
print("")
print("NO. OF CONFIGURATIONS: "+str(noc0)+" TOTAL: "+str(total_perms))
print("PROBABILITY: "+str(fnoc0))
print("SIMULATION P(): "+str(zero)+"%")
print("-----------------------------")
print("ONE INTERSECTIONS (5 PIECES)")
print("-----------------------------")
print(", ".join(sequence_1.keys()))
noc1 = len(sequence_1.keys())
fnoc1 = Fraction(noc1/total_perms).limit_denominator()
print("")
print("NO. OF CONFIGURATIONS: "+str(noc1)+" TOTAL: "+str(total_perms))
print("PROBABILITY: "+str(fnoc1))
print("SIMULATION P(): "+str(one)+"%")
print("-----------------------------")
print("TWO INTERSECTIONS (6 PIECES)")
print("-----------------------------")
print(", ".join(sequence_2.keys()))
noc2 = len(sequence_2.keys())
fnoc2 = Fraction(noc2/total_perms).limit_denominator()
print("")
print("NO. OF CONFIGURATIONS: "+str(noc2)+" TOTAL: "+str(total_perms))
print("PROBABILITY: "+str(fnoc2))
print("SIMULATION P(): "+str(two)+"%")
print("-----------------------------")
print("THREE INTERSECTIONS (7 PIECES)")
print("-----------------------------")
print(", ".join(sequence_3.keys()))
noc3 = len(sequence_3.keys())
fnoc3 = Fraction(noc3/total_perms).limit_denominator()
print("")
print("NO. OF CONFIGURATIONS: "+str(noc3)+" TOTAL: "+str(total_perms))
print("PROBABILITY: "+str(fnoc3))
print("SIMULATION P(): "+str(three)+"%")
print("-----------------------------")
print("")
| recap/pizza-cutting | random_pizza_cuts.py | random_pizza_cuts.py | py | 4,425 | python | en | code | 0 | github-code | 13 |
33164100920 | import mysql.connector
mydb = mysql.connector.Connect(
host = "localhost",
port = 3306,
user = "root",
password = "",
database = "giveaway"
)
mycursor = mydb.cursor()
def write_user(val):
sql = "insert into users_bot(name,surname,age,city,address,phonenumber,email) values(%s,%s,%s,%s,%s,%s,%s)"
mycursor.execute(sql, val)
mydb.commit()
def read_users():
sql = "select * from users_bot"
mycursor.execute(sql)
result = mycursor.fetchall()
return result
def update_user(val):
sql = "update users_bot set name = %s, surname = %s, age = %s, city = %s, address = %s, phonenumber = %s, email = %s where id = %s"
mycursor.execute(sql, val)
mydb.commit()
def delete_user(val):
sql = "delete from users_bot where id = %s"
mycursor.execute(sql, val)
mydb.commit()
while True:
choice = int(input("[1]Add user\n[2]List of users\n[3]Update user\n[4]Delete users\n[0]Exit\n"))
if choice == 1:
name = input("Insert name: ")
surname = input("Insert surname: ")
age = int(input("Insert age: "))
city = input("Insert city: ")
address = input("Insert address: ")
phonenumber = int(input("Insert phone number: "))
email = input("Insert email: ")
val = (name, surname, age, city, address, phonenumber, email)
write_user(val)
elif choice == 2:
users = read_users()
for u in users:
print(u)
elif choice == 3:
id = int(input("Insert user ID: "))
name = input("Insert new name: ")
surname = input("Insert new urname: ")
age = int(input("Insert new age: "))
city = input("Insert new city: ")
address = input("Insert new address: ")
phonenumber = int(input("Insert new phone number: "))
email = input("Insert new email: ")
val = (name, surname, age, city, address, phonenumber, email, id)
update_user(val)
elif choice == 4:
id = int(input("Insert user id: "))
val = (id,)
delete_user(val)
elif choice == 0:
break | AmanmyrzaZhussipkhan/abaigiveawaybot | venv/include/finals.py | finals.py | py | 2,099 | python | en | code | 0 | github-code | 13 |
12110985283 | '''
占位符的使用
'''
import tensorflow as tf
plhd = tf.placeholder(tf.float32,[None,3]) #N行3列
data = [[1,2,3],
[4,5,6],
[7,8,9]]
#执行占位符时,必须传入具体数据,否则报错
with tf.Session() as sess:
print(sess.run(plhd,feed_dict={plhd:data}))
| 15149295552/Code | Month06/day15/08_placeholder.py | 08_placeholder.py | py | 297 | python | en | code | 1 | github-code | 13 |
39217510132 | from feature_extraction import Featurizer
from model import Model
from sleeptor import Sleeptor
import pandas as pd
from keras.models import load_model
def main():
featurizer = Featurizer()
train = False
if(train):
images, labels = featurizer.extrac_images()
print(images.shape)
print(labels.shape)
mode = 'serie'
type_model = 'lstm'
model = Model(mode, featurizer.size, featurizer.step, featurizer.n_samples_per_video, type_model)
model.train_lstm(images, labels)
else:
model = load_model('data/models/model_final_normalized.h5')
sleeptor = Sleeptor(featurizer, model)
sleeptor.live()
if __name__ == '__main__':
main()
| jsanch81/Sleeptor2 | main.py | main.py | py | 716 | python | en | code | 0 | github-code | 13 |
71179041938 | #!/usr/bin/env python3
import argparse
import os
import subprocess
import yaml
import sys
import re
import json
# Arguments section
parser = argparse.ArgumentParser()
parser.add_argument('template', help='yaml template describing the project',
default='template.yaml')
parser.add_argument('--dst-dir', help='the destination directory', default='.')
# Logger class
class AnsiColor:
Black = '\u001b[30m'
Red = '\u001b[31m'
Green = '\u001b[32m'
Yellow = '\u001b[33m'
Blue = '\u001b[34m'
Magenta = '\u001b[35m'
Cyan = '\u001b[36m'
White = '\u001b[37m'
Reset = '\u001b[0m'
class Log:
@staticmethod
def info(name, msg):
print(f'{AnsiColor.Cyan}[{name}] {msg}{AnsiColor.Reset}')
@staticmethod
def warn(name, msg):
print(f'{AnsiColor.Yellow}[{name}] {msg}{AnsiColor.Reset}')
@staticmethod
def die(name, msg):
sys.exit(f'{AnsiColor.Red}[{name}] {msg}{AnsiColor.Reset}')
# Template keys
class TemplateKey:
DevDependencies = 'DevDependencies'
Dependencies = 'Dependencies'
ProjectDirs = 'ProjectDirs'
GitIgnore = 'GitIgnore'
TsConfig = 'TsConfig'
Eslint = 'Eslint'
EslintIgnore = 'EslintIgnore'
Npm = 'Npm'
# NPM class
class Npm:
@staticmethod
def install(module, dev=False, g=False):
if module is None:
return
commands = ['npm', 'install']
if dev:
commands += ['--save-dev']
elif g:
commands += ['-g']
if isinstance(module, str):
module = [module]
commands += module
cmd = ' '.join(commands)
Log.info('install', f'running {cmd} in {os.getcwd()}')
subprocess.check_call(cmd, shell=True)
@staticmethod
def init(fast_init=True):
cmd = ' '.join(['npm', 'init']
+ ['-y'] if fast_init else [])
Log.info('init', f'running {cmd} in {os.getcwd()}')
subprocess.check_call(cmd, shell=True)
@staticmethod
def exec(option):
cmd = ' '.join(['npx', option])
Log.info('npx', f'running {cmd} in {os.getcwd()}')
subprocess.check_call(cmd, shell=True)
@staticmethod
def update_npm():
Npm.install('npm@latest', g=True)
class OptionMapper:
mappers = {
'latest': { 'handler': Npm.update_npm, 'args': [] }
}
def remove_comments(text):
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return " "
else:
return s
pattern = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE
)
return re.sub(pattern, replacer, text)
def write_ignore_file(filename, ignores_list):
Log.info('Ignore file', f'writing ignore file list to {filename}')
with open(filename, 'w') as ignore_file:
ignores = '\n'.join(ignores_list)
ignore_file.write(ignores)
def init_project(args):
Log.info('Template', f'opening {args.template}')
try:
with open(args.template, 'r') as template:
config = yaml.safe_load(template)
except:
Log.die('Template', f'There was an error opening {args.template}')
os.makedirs(args.dst_dir, exist_ok=True)
os.chdir(args.dst_dir)
for key in config.get(TemplateKey.Npm, []):
mapper = OptionMapper.mappers[key]
mapper['handler'](*mapper['args'])
Npm.init()
Npm.install(config.get(TemplateKey.DevDependencies, None), dev=True)
Npm.install(config.get(TemplateKey.Dependencies, None))
if 'tsc' in config.get(TemplateKey.DevDependencies, []):
Npm.exec('tsc --init')
if TemplateKey.TsConfig in config.keys():
with open('tsconfig.json') as ts_config:
json_data = remove_comments(ts_config.read())
data = json.loads(json_data)
Log.info('TSConfig', config[TemplateKey.TsConfig])
for key, value in config[TemplateKey.TsConfig].items():
data['compilerOptions'][key] = value
with open('tsconfig.json', 'w') as ts_config:
ts_config.write(json.dumps(data))
if TemplateKey.Eslint in config.keys():
with open('.eslintrc.yaml', 'w') as eslint:
eslint.write(yaml.dump(config[TemplateKey.Eslint]))
if TemplateKey.EslintIgnore in config.keys():
write_ignore_file('.eslintignore', config[TemplateKey.EslintIgnore])
if TemplateKey.GitIgnore in config.keys():
write_ignore_file('.gitignore', config[TemplateKey.GitIgnore])
if TemplateKey.ProjectDirs in config.keys():
Log.info('Creating directories', config[TemplateKey.ProjectDirs])
for dir in config[TemplateKey.ProjectDirs]:
path = os.path.normpath(os.path.normcase(dir))
os.makedirs(path, exist_ok=True)
if __name__ == '__main__':
init_project(parser.parse_args())
| Kaskeeeee/ts-project-initializer | ts-project-init.py | ts-project-init.py | py | 4,988 | python | en | code | 0 | github-code | 13 |
16129259053 | #!/usr/bin/python
"""
Purpose: Reading(Parsing) XML
"""
from pprint import pprint
try:
import untangle
except ModuleNotFoundError as ex:
print(repr(ex))
from os import system
system("pip install untangle --user")
import untangle
obj = untangle.parse("books.xml")
# print(obj)
# print(dir(obj))
# print(obj.catalog)
# print(dir(obj.catalog))
# print(obj.catalog.book)
mapping = {}
for each in obj.catalog.book:
# print(each)
# print(each.attrib['isbn'])
isbn = each.get_attribute("isbn")
title = each.title.cdata
mapping[isbn] = title
pprint(mapping)
| udhayprakash/PythonMaterial | python3/11_File_Operations/02_structured_files/02_xml/01_xml/f_parse_xml.py | f_parse_xml.py | py | 596 | python | en | code | 7 | github-code | 13 |
74880943058 | import pytest
import seismic_zfp
import segyio
SGZ_FILE = 'test_data/small_4bit.sgz'
SGY_FILE = 'test_data/small.sgy'
def test_read_trace_header():
with seismic_zfp.open(SGZ_FILE) as sgzfile:
with segyio.open(SGY_FILE) as sgyfile:
for trace_number in range(-5, 25, 1):
sgz_header = sgzfile.header[trace_number]
sgy_header = sgyfile.header[trace_number]
assert sgz_header == sgy_header
def test_read_trace_header_slicing():
slices = [slice(0, 5, None), slice(0, None, 2), slice(5, None, -1), slice(None, None, 10), slice(None, None, None)]
with seismic_zfp.open(SGZ_FILE) as sgzfile:
with segyio.open(SGY_FILE) as sgyfile:
for slice_ in slices:
sgy_headers = sgyfile.header[slice_]
sgz_headers = sgzfile.header[slice_]
for sgz_header, sgy_header in zip(sgz_headers, sgy_headers):
assert sgz_header == sgy_header
def test_header_is_iterable():
with seismic_zfp.open(SGZ_FILE) as sgz_file:
with segyio.open(SGY_FILE) as sgy_file:
for sgz_header, sgy_header in zip(sgz_file.header, sgy_file.header):
assert sgz_header == sgy_header
def test_read_bin_header():
with seismic_zfp.open(SGZ_FILE) as sgzfile:
with segyio.open(SGY_FILE) as segyfile:
assert sgzfile.bin == segyfile.bin
| equinor/seismic-zfp | tests/test_header-accessors.py | test_header-accessors.py | py | 1,416 | python | en | code | 57 | github-code | 13 |
40131464620 | # -*- coding: utf-8 -*-
from collections import deque as dq
n, m = map(int, input().split())
idxList = list(map(int, input().split()))
# n, m = 10, 10
# idxList = [1, 6, 3, 2, 7, 9, 8, 4, 10, 5]
q = [0 for _ in range(n)]
q = dq(q) #deque 선언
for i in idxList:
q[i-1] = i
moveCount = 0
for target in idxList:
if q[0] == target:
q.popleft()
continue
targetIdx = 0
for i in range(len(q)):
if q[i] == target:
targetIdx = i
break
if targetIdx < len(q)/2: #타겟이 왼쪽에 더 가깝다
for i in range(targetIdx):
q.rotate(-1) #앞으로 당긴다
moveCount += 1
q.popleft()
else: #타겟이 오른쪽에 더 가깝다
for i in range((len(q)) - targetIdx):
q.rotate(1) #뒤로 민다
moveCount += 1
q.popleft()
print(moveCount)
# while popCount < m:
# print(q, moveCount)
# if q[0] != 0:
# q.popleft()
# popCount += 1
# continue
# leftIdx = 0
# for i in range(len(q)): #왼쪽부터 진행해서 1 찾기
# if q[i] != 0:
# leftIdx = i
# break
# rightIdx = 0
# for i in range(len(q)-1, -1, -1): #오른쪽부터 진행해서 1 찾기
# if q[i] != 0:
# rightIdx = (len(q)-1) - i
# break
# print(leftIdx, rightIdx)
# if leftIdx <= rightIdx: #찾는 값이 왼쪽에 더 가까울때
# for i in range(leftIdx):
# print(q)
# q.rotate(-1) #앞으로 당긴다
# moveCount += 1
# q.popleft()
# popCount += 1
# else: #찾는 값이 오른쪽에 더 가까울때
# for i in range(rightIdx):
# print(q)
# q.rotate(1) #뒤로 민다
# moveCount += 1
# q.popleft()
# popCount += 1
#print(moveCount) | dlwlstks96/codingtest | 백준/자료구조_1021_회전하는 큐.py | 자료구조_1021_회전하는 큐.py | py | 1,958 | python | ko | code | 2 | github-code | 13 |
26053209548 | """
学籍:1213033903
氏名:玉城洵弥
内容:オイラー
"""
import matplotlib
import matplotlib.pyplot as plt
from math import *
def draw(x, y, a, b):
fig = plt.figure()
graph = fig.add_subplot()
graph.plot(x, y)
graph.plot(a, b)
graph.scatter(x, y, s=30)
plt.show()
def f1(t, Y1, Y2):
return Y2
def f2(t, Y1, Y2):
return -16 * Y1 - 10 * Y2
def euler(T, N, Y1, Y2):
dt = T / N; x = [0] * (N + 1)
y1 = [0] * (N + 1)
y2 = [0] * (N + 1)
y1[0] = Y1
y2[0] = Y2
for j in range(N):
t = j * dt; x[j] = t # 時刻
Y1 += dt * f1(t, Y1, Y2); Y2 += dt * f2(t, Y1, Y2) # 次の時刻の位置
y1[j + 1] = Y1; y2[j + 1] = Y2
x[N] = N * dt # 最後の時刻
return x, y1, y2
T = 1
N1 = 10; N2 = 5
Y1 = 1; Y2 = 0
# オイラー法
x, y1, y2 = euler(T, N1, Y1, Y2)
a, b1, b2 = euler(T, N2, Y1, Y2)
draw(x, y1, a, b1)
# print("{0:5d} {1:6.4f} {2:8.6f}".format(N, T/N, y[N]))
| gusuku-oknw/Numerical_analysis | 094.Eular.py | 094.Eular.py | py | 974 | python | en | code | 0 | github-code | 13 |
22654989229 | from datetime import timedelta
import requests
import pandas as pd
import logging
from azure.storage.blob import BlobServiceClient
from azure.core.exceptions import AzureError
from io import StringIO
import credentials
from s3connector import azure_connection_string
import timeit
start_time = timeit.default_timer()
# Set up logging
logging.basicConfig(filename='company_overviews.log', level=logging.INFO)
api_key = credentials.ALPHA_VANTAGE_API
max_requests_per_minute = 150
blob_service_client = BlobServiceClient.from_connection_string(azure_connection_string)
container_name = 'historic'
tickers_file = 'tickers.csv'
def send_teams_message(teams_url, message):
data = {'text': message}
headers = {'Content-Type': 'application/json'}
try:
response = requests.post(teams_url, headers=headers, data=json.dumps(data))
response.raise_for_status()
logging.info(f"Message sent to Teams successfully: {message}")
except requests.exceptions.HTTPError as http_err:
logging.error(f"HTTP error occurred when sending message to Teams: {http_err}")
except requests.exceptions.RequestException as req_err:
logging.error(f"Request error occurred when sending message to Teams: {req_err}")
except Exception as e:
logging.error(f"Unexpected error occurred when sending message to Teams: {e}")
teams_url = 'https://data874.webhook.office.com/webhookb2/9cb96ee7-c2ce-44bc-b4fe-fe2f6f308909@4f84582a-9476-452e-a8e6-0b57779f244f/IncomingWebhook/7e8bd751e7b4457aba27a1fddc7e8d9f/6d2e1385-bdb7-4890-8bc5-f148052c9ef5'
send_teams_message(teams_url, "Script started.")
def retrieve_company_overview(api_key, symbol):
url = f'https://www.alphavantage.co/query?function=OVERVIEW&symbol={symbol}&apikey={api_key}'
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
if data:
print(f"Finished processing symbol '{symbol}'.")
logging.info(f"Finished processing symbol '{symbol}'.")
return data
else:
logging.warning(f"No data returned for symbol '{symbol}'.")
except requests.exceptions.HTTPError as http_err:
logging.error(f"HTTP error occurred: {http_err}")
except requests.exceptions.RequestException as req_err:
logging.error(f"Request error occurred: {req_err}")
except Exception as e:
logging.error(f"Unexpected error occurred: {e}")
def save_dataframe_to_csv(dataframe, container_name, filename):
csv_data = dataframe.to_csv(index=False)
try:
container_client = blob_service_client.get_container_client(container_name)
container_client.upload_blob(name=filename, data=csv_data, overwrite=True)
logging.info(f"Dataframe saved to Azure Blob Storage as '{filename}' successfully.")
send_teams_message(teams_url, f"Dataframe saved to Azure Blob Storage as '{filename}' successfully.")
except AzureError as e:
logging.error(f"Error uploading CSV to Azure Blob Storage: {e}")
try:
container_client = blob_service_client.get_container_client(container_name)
blob_client = container_client.get_blob_client(tickers_file)
tickers_data = blob_client.download_blob().readall().decode('utf-8')
logging.info("Retrieved tickers data from Azure Blob Storage successfully.")
send_teams_message(teams_url, "Retrieved tickers data from Azure Blob Storage successfully.")
tickers_df = pd.read_csv(StringIO(tickers_data))
tickers_df.rename(columns={'ticker': 'symbol'}, inplace=True)
tickers_list = tickers_df['symbol'].tolist()
except AzureError as e:
logging.error(f"Error retrieving tickers from Azure Blob Storage: {e}")
# Retrieve company overviews
company_overviews = []
for symbol in tickers_list:
overview = retrieve_company_overview(api_key, symbol)
company_overviews.append(overview)
# Filter out None
company_overviews = [x for x in company_overviews if x is not None]
# Construct DataFrame
company_overviews_df = pd.DataFrame(company_overviews)
# Save to CSV
container_name = 'historic'
save_dataframe_to_csv(company_overviews_df, container_name, 'company_overviews.csv')
end_time = timeit.default_timer()
elapsed = end_time - start_time
elapsed = str(timedelta(seconds=elapsed))
print(f"Elapsed time: {elapsed} seconds")
message = f"Company Overview Script finished successfully. Elapsed time: {elapsed} seconds."
send_teams_message(teams_url, message) | ernestprovo23/algov5 | commodities/company_overviews.py | company_overviews.py | py | 4,475 | python | en | code | 0 | github-code | 13 |
17028421055 |
def GreedySearch(SymbolSets, y_probs):
"""Greedy Search.
Input
-----
SymbolSets: list
all the symbols (the vocabulary without blank)
y_probs: (# of symbols + 1, Seq_length, batch_size)
Your batch size for part 1 will remain 1, but if you plan to use your
implementation for part 2 you need to incorporate batch_size.
Returns
------
forward_path: str
the corresponding compressed symbol sequence i.e. without blanks
or repeated symbols.
forward_prob: scalar (float)
the forward probability of the greedy path
"""
# Follow the pseudocode from lecture to complete greedy search :-)
symbol,seqlen,batch_size = y_probs.shape
forward_path = []
forward_prob = []
for b in range(batch_size):
probs = 1
paths = ['empty']*seqlen
for s in range(seqlen):
cmax = 0
current = 'empty'
for m in range(symbol):
if y_probs[m][s][b] > cmax:
cmax = max(y_probs[m][s][b],cmax)
if m == 0:
current = 'empty'
else:
current = SymbolSets[m-1]
paths[s] = current
probs = probs * cmax
forward_path.append(paths)
forward_prob.append(probs)
forward_path_compress = []
for b in range(batch_size):
compressed = ""
prev = False
for s in range(seqlen):
single = forward_path[b][s]
if single == 'empty':
prev = False
elif single != prev:
compressed += single
prev = single
forward_path_compress.append(compressed)
return forward_path_compress[0], forward_prob[0]
##############################################################################
"""Beam Search.
Input
-----
SymbolSets: list
all the symbols (the vocabulary without blank)
y_probs: (# of symbols + 1, Seq_length, batch_size)
Your batch size for part 1 will remain 1, but if you plan to use your
implementation for part 2 you need to incorporate batch_size.
BeamWidth: int
Width of the beam.
Return
------
bestPath: str
the symbol sequence with the best path score (forward probability)
mergedPathScores: dictionary
all the final merged paths with their scores.
"""
# Follow the pseudocode from lecture to complete beam search :-)
blank = 0
def InitializePaths(SymbolSet, y):
InitialBlankPathScore = {}
InitialPathScore = {}
path = ""
InitialBlankPathScore[path] = y[int(blank)]
InitialPathsWithFinalBlank = {path}
InitialPathsWithFinalSymbol = set()
for c in range(len(SymbolSet)):
path = SymbolSet[c]
InitialPathScore[path] = y[c+1]
InitialPathsWithFinalSymbol.add(path)
return InitialPathsWithFinalBlank, InitialPathsWithFinalSymbol, InitialBlankPathScore, InitialPathScore
def ExtendWithBlank(PathWithTerminalBlank,PathsWithTerminalSymbol, y, BlankPathScore, PathScore):
UpdatedPathWithTerminalBlank = set()
UpdatedBlankPathScore = {}
for path in PathWithTerminalBlank:
UpdatedPathWithTerminalBlank.add(path)
UpdatedBlankPathScore[path] = BlankPathScore[path] * y[blank]
for path in PathsWithTerminalSymbol:
if path in UpdatedPathWithTerminalBlank:
UpdatedBlankPathScore[path] += PathScore[path] * y[blank]
else:
UpdatedPathWithTerminalBlank.add(path)
UpdatedBlankPathScore[path] = PathScore[path] * y[blank]
return UpdatedPathWithTerminalBlank,UpdatedBlankPathScore
def ExtendWithSymbol(PathsWithTerminalBlank, PathsWithTerminalSymbol, SymbolSet,y, BlankPathScore, PathScore ):
UpdatedPathsWithTerminalSymbol = set()
UpdatedPathScore = {}
for path in PathsWithTerminalBlank:
for c in range(len(SymbolSet)):
newpath = path + SymbolSet[c]
UpdatedPathsWithTerminalSymbol.add(newpath)
UpdatedPathScore[newpath] = BlankPathScore[path] * y[c+1]
for path in PathsWithTerminalSymbol:
for c in range(len(SymbolSet)):
if SymbolSet[c] == path[-1]:
newpath = path
else:
newpath = path + SymbolSet[c]
if newpath in UpdatedPathsWithTerminalSymbol:
UpdatedPathScore[newpath] += PathScore[path] *y[c+1]
else:
UpdatedPathsWithTerminalSymbol.add(newpath)
UpdatedPathScore[newpath] = PathScore[path] *y[c+1]
return UpdatedPathsWithTerminalSymbol, UpdatedPathScore
def Prune(PathsWithTerminalBlank, PathsWithTerminalSymbol, BlankPathScore, PathScore, BeamWidth):
PrunedBlankPathScore = {}
PrunedPathScore = {}
scorelist = []
# First gather all the relevant scores
for p in PathsWithTerminalBlank:
scorelist.append(BlankPathScore[p])
for p in PathsWithTerminalSymbol:
scorelist.append(PathScore[p])
# Sort and find cutoff score that retains exactly BeamWidth paths
scorelist.sort(reverse=True)
if BeamWidth < len(scorelist):
cutoff = scorelist[BeamWidth]
else:
cutoff = scorelist[-1]
PrunedPathsWithTerminalBlank= set()
for p in PathsWithTerminalBlank:
if BlankPathScore[p] > cutoff:
PrunedPathsWithTerminalBlank.add(p)
PrunedBlankPathScore[p] = BlankPathScore[p]
PrunedPathsWithTerminalSymbol = set()
for p in PathsWithTerminalSymbol:
if PathScore[p] > cutoff:
PrunedPathsWithTerminalSymbol.add(p)
PrunedPathScore[p] = PathScore[p]
return PrunedPathsWithTerminalBlank, PrunedPathsWithTerminalSymbol, PrunedBlankPathScore, PrunedPathScore
def MergeIdenticalPaths(PathsWithTerminalBlank, PathsWithTerminalSymbol, BlankPathScore, PathScore):
MergedPaths = PathsWithTerminalSymbol
FinalPathScore = PathScore
for p in PathsWithTerminalBlank:
if p in MergedPaths:
FinalPathScore[p] += BlankPathScore[p]
else:
MergedPaths.add(p)
FinalPathScore[p] = BlankPathScore[p]
return MergedPaths, FinalPathScore
def BeamSearch(SymbolSet, y, BeamWidth):
PathScore = {}
BlankPathScore = {}
symbol,seqlen,batch_size = y.shape
NewPathsWithTerminalBlank, NewPathsWithTerminalSymbol, NewBlankPathScore,NewPathScore = InitializePaths(SymbolSet, y[:,0,:])
for t in range(1, seqlen):
PathsWithTerminalBlank, PathsWithTerminalSymbol, BlankPathScore, PathScore = Prune(NewPathsWithTerminalBlank,
NewPathsWithTerminalSymbol,
NewBlankPathScore, NewPathScore,
BeamWidth)
NewPathsWithTerminalBlank, NewBlankPathScore = ExtendWithBlank(PathsWithTerminalBlank, PathsWithTerminalSymbol, y[:,t,:], BlankPathScore, PathScore)
NewPathsWithTerminalSymbol, NewPathScore = ExtendWithSymbol(PathsWithTerminalBlank, PathsWithTerminalSymbol, SymbolSet, y[:, t, :], BlankPathScore, PathScore)
MergedPaths, FinalPathScore = MergeIdenticalPaths(NewPathsWithTerminalBlank, NewPathsWithTerminalSymbol, NewBlankPathScore, NewPathScore)
allpath = sorted(FinalPathScore.items(), key = lambda x:x[1], reverse = True)
BestPath = allpath[0][0]
return BestPath, FinalPathScore
| sabrinazhong98/CMU-Deep-Learning | hw3p1/mytorch/search.py | search.py | py | 8,135 | python | en | code | 1 | github-code | 13 |
72626135378 | import argparse
import torch
from torchvision import datasets, transforms
from sklearn.metrics import roc_curve, auc
import os
import numpy as np
import matplotlib.pyplot as plt
import MVTec_loader as mvtec
from gradcam import GradCAM
# for dataloader check: pin pin_memory, batch size 32 in original
mean = 0.
std = 0.
totlen =0
CLASS_NAMES = ['bottle', 'cable', 'capsule', 'carpet', 'grid',
'hazelnut', 'leather', 'metal_nut', 'pill', 'screw',
'tile', 'toothbrush', 'transistor', 'wood', 'zipper']
num_classes = len(CLASS_NAMES)
for i in range(num_classes):
class_name = mvtec.CLASS_NAMES[i] # nuts
train_dataset = mvtec.MVTecDataset(class_name=class_name, is_train=True, grayscale=False)
kwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=32, shuffle=True, **kwargs)
for images, _ in train_loader:
batch_samples = images.size(0) # batch size (the last batch can have smaller size!)
images = images.view(batch_samples, images.size(1), -1)
mean += images.mean(2).sum(0)
totlen += len(train_loader.dataset)
print(i, mean/totlen)
mean /= totlen
print("mean",mean) | Ikea-179/Interpretable-Visual-Anomaly-Detection | Stage2 Model & Experiment/Data Augmentation/mean.py | mean.py | py | 1,271 | python | en | code | 1 | github-code | 13 |
37526433655 | #!/usr/bin/env python3
"""===============================================================================
FILE: forhabits/kostil/calorie-mate.py
USAGE: ./forhabits/kostil/calorie-mate.py
DESCRIPTION:
OPTIONS: ---
REQUIREMENTS: ---
BUGS: ---
NOTES: ---
AUTHOR: Alex Leontiev (alozz1991@gmail.com)
ORGANIZATION:
VERSION: ---
CREATED: 2021-06-24T18:25:33.559884
REVISION: ---
==============================================================================="""
import click
from alex_python_toolbox import google_drive
@click.command()
def calorie_mate():
creds = google_drive.get_creds("../../credentials_google_spreadsheet.json",create_if_not_exist=True)
class_df = google_drive.download_df_from_google_sheets(
creds, "1fgrbBlrMeAvOy3A8EhwHurZdXf9e7C6_p9yC1MwqBkA")
click.echo(class_df)
if __name__=="__main__":
calorie_mate()
| nailbiter/pyassistantbot2 | calorie-mate.py | calorie-mate.py | py | 915 | python | en | code | 0 | github-code | 13 |
18252925094 | import cv2
import numpy as np
img = cv2.imread('xray.jpeg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_gaussian = cv2.GaussianBlur(gray, (3, 3), 0)
kernelx = np.array([[1, 1, 1], [0, 0, 0], [-1, -1, -1]])
kernely = np.array([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]])
img_prewittx = cv2.filter2D(img_gaussian, -1, kernelx)
img_prewitty = cv2.filter2D(img_gaussian, -1, kernely)
cv2.imshow("Original Image", img)
cv2.imshow("Prewitt X", img_prewittx)
cv2.imshow("Prewitt Y", img_prewitty)
cv2.imshow("Prewitt", img_prewittx + img_prewitty)
cv2.waitKey(0)
cv2.destroyAllWindows()
| arilumintang/Python-Image-Processing-using-opencv | Program/prewitOperator.py | prewitOperator.py | py | 593 | python | en | code | 1 | github-code | 13 |
24089412710 | """Unit tests for wordpress_rest.py."""
import urllib.request, urllib.parse, urllib.error
from flask import get_flashed_messages
from oauth_dropins.webutil.util import json_dumps, json_loads
from oauth_dropins.wordpress_rest import WordPressAuth
from werkzeug.routing import RequestRedirect
from flask_app import app
from . import testutil
from wordpress_rest import WordPress, Add
class WordPressTest(testutil.AppTest):
def setUp(self):
super().setUp()
self.auth_entity = WordPressAuth(id='my.wp.com',
user_json=json_dumps({
'display_name': 'Ryan',
'username': 'ry',
'avatar_URL': 'http://ava/tar'}),
blog_id='123',
blog_url='http://my.wp.com/',
access_token_str='my token')
self.auth_entity.put()
self.wp = WordPress(id='my.wp.com',
auth_entity=self.auth_entity.key,
url='http://my.wp.com/',
domains=['my.wp.com'])
def expect_new_reply(
self,
url='https://public-api.wordpress.com/rest/v1/sites/123/posts/456/replies/new?pretty=true',
content='<a href="http://who">name</a>: foo bar',
response='{}', status=200, **kwargs):
self.expect_urlopen(
url, response, data=urllib.parse.urlencode({'content': content}),
status=status, **kwargs)
self.mox.ReplayAll()
def test_new(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123?pretty=true',
json_dumps({}))
self.mox.ReplayAll()
w = WordPress.new(auth_entity=self.auth_entity)
self.assertEqual(self.auth_entity.key, w.auth_entity)
self.assertEqual('my.wp.com', w.key.id())
self.assertEqual('Ryan', w.name)
self.assertEqual(['http://my.wp.com/'], w.domain_urls)
self.assertEqual(['my.wp.com'], w.domains)
self.assertEqual('http://ava/tar', w.picture)
def test_new_with_site_domain(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123?pretty=true',
json_dumps({'ID': 123, 'URL': 'https://vanity.domain/'}))
self.mox.ReplayAll()
w = WordPress.new(auth_entity=self.auth_entity)
self.assertEqual('vanity.domain', w.key.id())
self.assertEqual('https://vanity.domain/', w.url)
self.assertEqual(['https://vanity.domain/', 'http://my.wp.com/'],
w.domain_urls)
self.assertEqual(['vanity.domain', 'my.wp.com'], w.domains)
def test_new_site_domain_same_gr_blog_url(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123?pretty=true',
json_dumps({'ID': 123, 'URL': 'http://my.wp.com/'}))
self.mox.ReplayAll()
w = WordPress.new(auth_entity=self.auth_entity)
self.assertEqual(['http://my.wp.com/'], w.domain_urls)
self.assertEqual(['my.wp.com'], w.domains)
def test_site_lookup_fails(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123?pretty=true',
'my resp body', status=402)
self.mox.ReplayAll()
with self.assertRaises(urllib.error.HTTPError):
WordPress.new(auth_entity=self.auth_entity)
def test_site_lookup_api_disabled_error_start(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123?pretty=true',
'{"error": "unauthorized", "message": "API calls to this blog have been disabled."}',
status=403)
self.mox.ReplayAll()
with app.test_request_context():
with self.assertRaises(RequestRedirect):
self.assertIsNone(WordPress.new(auth_entity=self.auth_entity))
self.assertIsNone(WordPress.query().get())
self.assertIn('enable the Jetpack JSON API', get_flashed_messages()[0])
def test_site_lookup_api_disabled_error_finish(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123?pretty=true',
'{"error": "unauthorized", "message": "API calls to this blog have been disabled."}',
status=403)
self.mox.ReplayAll()
with app.test_request_context():
with self.assertRaises(RequestRedirect):
Add('test_site_lookup_api_disabled_error_finish').finish(self.auth_entity)
self.assertIsNone(WordPress.query().get())
self.assertIn('enable the Jetpack JSON API', get_flashed_messages()[0])
def test_create_comment_with_slug_lookup(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123/posts/'
'slug:the-slug?pretty=true',
json_dumps({'ID': 456}))
self.expect_new_reply(response=json_dumps({'ID': 789, 'ok': 'sgtm'}))
resp = self.wp.create_comment('http://primary/post/123999/the-slug?asdf',
'name', 'http://who', 'foo bar')
# ID field gets converted to lower case id
self.assertEqual({'id': 789, 'ok': 'sgtm'}, resp)
def test_create_comment_with_unicode_chars(self):
self.expect_new_reply(content='<a href="http://who">Degenève</a>: foo Degenève bar')
resp = self.wp.create_comment('http://primary/post/456', 'Degenève',
'http://who', 'foo Degenève bar')
self.assertEqual({'id': None}, resp)
def test_create_comment_with_unicode_chars_in_slug(self):
self.expect_urlopen(
'https://public-api.wordpress.com/rest/v1/sites/123/posts/slug:✁?pretty=true',
json_dumps({'ID': 456}))
self.expect_new_reply()
resp = self.wp.create_comment('http://primary/post/✁', 'name',
'http://who', 'foo bar')
self.assertEqual({'id': None}, resp)
def test_create_comment_gives_up_on_invalid_input_error(self):
# see https://github.com/snarfed/bridgy/issues/161
self.expect_new_reply(status=400,
response=json_dumps({'error': 'invalid_input'}))
resp = self.wp.create_comment('http://primary/post/456', 'name',
'http://who', 'foo bar')
# shouldn't raise an exception
self.assertEqual({'error': 'invalid_input'}, resp)
def test_create_comment_gives_up_on_coments_closed(self):
resp = {'error': 'unauthorized',
'message': 'Comments on this post are closed'}
self.expect_new_reply(status=403, response=json_dumps(resp))
# shouldn't raise an exception
got = self.wp.create_comment('http://primary/post/456', 'name',
'http://who', 'foo bar')
self.assertEqual(resp, got)
def test_create_comment_returns_non_json(self):
self.expect_new_reply(status=403, response='Forbidden')
self.assertRaises(urllib.error.HTTPError, self.wp.create_comment,
'http://primary/post/456', 'name', 'http://who', 'foo bar')
| snarfed/bridgy | tests/test_wordpress_rest.py | test_wordpress_rest.py | py | 6,874 | python | en | code | 649 | github-code | 13 |
18480657996 | # -*- coding: utf-8 -*-
"""
Created 20.01.2021
@author: Gustavo Hernandez-Mejia
"""
from funct_shifts_sympt_wall import *
import matplotlib.pyplot as plt
# import math
# from pylab import *
# from mpl_toolkits.mplot3d import axes3d
import numpy as np
import pandas as pd
import random
# from matplotlib.font_manager import FontProperties
# import csv
# import shelve
import time
# from io import StringIO
# import matplotlib.patches as patches
t1 = time.perf_counter()
global person_Tot, Users, V_recep, V_triag, V_nurse_No_Urg, dr_No_Urg_V
global V_nurse_Urg, V_dr_Urg, V_imagin, V_labor, med_test, back_lab, back_time
global Own_Arrive, Suspicion_of_infection, Isolation_needed, Time_scale
global invasiv_prob, neg_press_prob
global User_track_1, Seat_map, day_current, day
# Presence of potential intervention
# Isolation_room = 0
# Emergen_doct = 1
# Shock_room = 0
# roll_up_wall = 0
# Invasiv_room = 0
# negt_pres_room = 0
# emerg_doctor = 0
med_test = 1
actual_user = 0
Time_var = 0
N_days = 365
N_new_day_from_w = []
N_new_day_work = []
N_new_day = []
N_waiting_H = []
Result_user = []
Result_worker = []
N_day_fr_sta_1 = []
N_day_fr_sta_2 = []
N_day_fr_sta_3 = []
RECEP_from = []
TRIAG_from = []
TRIAG_U_from = []
N_URG_from = []
N_N_URG_from = []
IMAGI_from = []
LABOR_from = []
DR_URGE_from = []
DR_N_URG_from = []
RECEP_from_2 = []
TRIAG_from_2 = []
TRIAG_U_from_2 = []
N_URG_from_2 = []
N_N_URG_from_2 = []
DR_URGE_from_2 = []
DR_N_URG_from_2 = []
IMAGI_from_2 = []
LABOR_from_2 = []
RECEP_from_3 = []
TRIAG_from_3 = []
TRIAG_U_from_3 = []
N_URG_from_3 = []
N_N_URG_from_3 = []
DR_URGE_from_3 = []
DR_N_URG_from_3 = []
IMAGI_from_3 = []
LABOR_from_3 = []
port_RECEP_from = []
port_TRIAG_from = []
port_TRIAG_U_from = []
port_N_URG_from = []
port_N_N_URG_from = []
port_IMAGI_from = []
port_LABOR_from = []
port_DR_URGE_from = []
port_DR_N_URG_from = []
port_RECEP_from_2 = []
port_TRIAG_from_2 = []
port_TRIAG_U_from_2 = []
port_N_URG_from_2 = []
port_N_N_URG_from_2 = []
port_IMAGI_from_2 = []
port_LABOR_from_2 = []
port_DR_URGE_from_2 = []
port_DR_N_URG_from_2 = []
port_RECEP_from_3 = []
port_TRIAG_from_3 = []
port_TRIAG_U_from_3 = []
port_N_URG_from_3 = []
port_N_N_URG_from_3 = []
port_IMAGI_from_3 = []
port_LABOR_from_3 = []
port_DR_URGE_from_3 = []
port_DR_N_URG_from_3 = []
for day in range(N_days):
day_current = day
while Time_var <= Time_scale:
#------------------------------------------------------------------------------
""" Staff shift 1
"""
if (Time_var >= shift_1[0]) and (Time_var <= shift_1[1]):
entrance_routine(Time_var)
for k in range(Num_Aget):
if Users[k][2] != UNDEF:
Curr_time = Users[k][4]
Users[k][4] = Curr_time + 1
if Users[k][4] == Users[k][3]:
# "DESITION TREE AREA"
area_desit_tree(Users[k],k)
#-------------------------------------------
# Set position matrix if Waiting area
# 1. position in Matrix, where availeable seat
if Users[k][2] == WAI_N:
Seated = 0
while Seated == 0:
indx_1 = np.random.randint(0, high=4, size=1)
indx_2 = np.random.randint(0, high=10, size=1)
if Seat_map[indx_1[0],indx_2[0]] == 0:
Seat_map[indx_1[0],indx_2[0]] = Users[k][0]
Seated = 1
elif Users[k][4] == Users[k][6]:
# Action desition tree
action_desit_tree(Users[k],k,day)
wait_room_routine(k,day_current)
restore_routine(k)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
""" Staff shift 2
"""
if (Time_var >= shift_2[0]) and (Time_var <= shift_2[1]):
entrance_routine(Time_var)
for k in range(Num_Aget):
if Users[k][2] != UNDEF:
Curr_time = Users[k][4]
Users[k][4] = Curr_time + 1
if Users[k][4] == Users[k][3]:
# "DESITION TREE AREA"
area_desit_tree(Users[k],k)
#-------------------------------------------
# Set position matrix if Waiting area
# 1. position in Matrix, where availeable seat
if Users[k][2] == WAI_N:
Seated = 0
while Seated == 0:
indx_1 = np.random.randint(0, high=4, size=1)
indx_2 = np.random.randint(0, high=10, size=1)
if Seat_map[indx_1[0],indx_2[0]] == 0:
Seat_map[indx_1[0],indx_2[0]] = Users[k][0]
Seated = 1
elif Users[k][4] == Users[k][6]:
# Action desition tree
action_desit_tree_2(Users[k],k,day)
wait_room_routine(k,day_current)
restore_routine(k)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
""" Staff shift 3
"""
if (Time_var >= shift_3[0]):
entrance_routine(Time_var)
for k in range(Num_Aget):
if Users[k][2] != UNDEF:
Curr_time = Users[k][4]
Users[k][4] = Curr_time + 1
if Users[k][4] == Users[k][3]:
# "DESITION TREE AREA"
area_desit_tree(Users[k],k)
#-------------------------------------------
# Set position matrix if Waiting area
# 1. position in Matrix, where availeable seat
if Users[k][2] == WAI_N:
Seated = 0
while Seated == 0:
indx_1 = np.random.randint(0, high=4, size=1)
indx_2 = np.random.randint(0, high=10, size=1)
if Seat_map[indx_1[0],indx_2[0]] == 0:
Seat_map[indx_1[0],indx_2[0]] = Users[k][0]
Seated = 1
elif Users[k][4] == Users[k][6]:
# Action desition tree
action_desit_tree_3(Users[k],k,day)
wait_room_routine(k,day_current)
restore_routine(k)
if day_current >= 1:
if (Time_var <= shift_3[1]):
entrance_routine(Time_var)
for k in range(Num_Aget):
if Users[k][2] != UNDEF:
Curr_time = Users[k][4]
Users[k][4] = Curr_time + 1
if Users[k][4] == Users[k][3]:
# "DESITION TREE AREA"
area_desit_tree(Users[k],k)
#-------------------------------------------
# Set position matrix if Waiting area
# 1. position in Matrix, where availeable seat
if Users[k][2] == WAI_N:
Seated = 0
while Seated == 0:
indx_1 = np.random.randint(0, high=4, size=1)
indx_2 = np.random.randint(0, high=10, size=1)
if Seat_map[indx_1[0],indx_2[0]] == 0:
Seat_map[indx_1[0],indx_2[0]] = Users[k][0]
Seated = 1
elif Users[k][4] == Users[k][6]:
# Action desition tree
action_desit_tree_3(Users[k],k,day)
wait_room_routine(k,day_current)
restore_routine(k)
#------------------------------------------------------------------------------
# time variable for minutes manager
Time_var = Time_var + 1
# Day aligment
print(day+1)
#------------------------------------------------------------------------------
#--------------------- Medical Staff ----------------------------------------
Users_workers = []
Users_workers_2 = []
Users_workers_3 = []
staff_rot = worker_appe_rout(Users_workers,Users_workers_2,Users_workers_3)
Users_workers = staff_rot[0]
Users_workers_2 = staff_rot[1]
Users_workers_3 = staff_rot[2]
staff_ro_2 = work_statu_rout(Users_workers,Users_workers_2,
Users_workers_3,day_current)
Users_workers = staff_ro_2[0]
Users_workers_2 = staff_ro_2[1]
Users_workers_3 = staff_ro_2[2]
# curr_worker = Users_workers
# # Result_worker.extend(curr_worker)
# cont_use_day_w = 0
# for i in range(len(Users_workers)):
# if (Users_workers[i][6] == (day + 1)):
# cont_use_day_w = cont_use_day_w + 1
# N_new_day_work.append([day,cont_use_day_w])
#------------------------------------------------------------------------------
#-------------------------- From Waiting Area ------------------------------
curr_user = Users
Result_user.extend(curr_user)
cont_use_day = 0
for i in range(len(Users)):
if ((Users[i][1] == 2) and (Users[i][9] != UNDEF) and
(Users[i][9] == WAI_N+' -PATIENT')):
cont_use_day = cont_use_day + 1
N_new_day.append([day,cont_use_day])
cont_use_day = 0
for i in range(len(Users)):
if ((Users[i][1] == 2) and (Users[i][9] != UNDEF) and
(Users[i][9] != WAI_N+' -PATIENT')):
cont_use_day = cont_use_day + 1
N_new_day_from_w.append([day,cont_use_day])
cont_use_day = 0
for i in range(len(Users)):
if ((Users[i][1] == 2) and (Users[i][9] != UNDEF) and
(Users[i][11] == STAFF_1) and (Users[i][9] != WAI_N+' -PATIENT')):
cont_use_day = cont_use_day + 1
N_day_fr_sta_1.append([day,cont_use_day])
cont_use_day = 0
for i in range(len(Users)):
if ((Users[i][1] == 2) and (Users[i][9] != UNDEF) and
(Users[i][11] == STAFF_2) and (Users[i][9] != WAI_N+' -PATIENT')):
cont_use_day = cont_use_day + 1
N_day_fr_sta_2.append([day,cont_use_day])
cont_use_day = 0
for i in range(len(Users)):
if ((Users[i][1] == 2) and (Users[i][9] != UNDEF) and
(Users[i][11] == STAFF_3) and (Users[i][9] != WAI_N+' -PATIENT')):
cont_use_day = cont_use_day + 1
N_day_fr_sta_3.append([day,cont_use_day])
#------------------------------------------------------------------------------
#------------------------ From staff ----------------------------------------
staff_count = [RECEP_from,TRIAG_from,TRIAG_U_from,N_URG_from,N_N_URG_from,
DR_URGE_from,DR_N_URG_from,IMAGI_from,LABOR_from]
staff_count = count_day_staff_rout(Users,day,staff_count)
RECEP_from = staff_count[0]
TRIAG_from = staff_count[1]
TRIAG_U_from = staff_count[2]
N_URG_from = staff_count[3]
N_N_URG_from = staff_count[4]
DR_URGE_from = staff_count[5]
DR_N_URG_from = staff_count[6]
IMAGI_from = staff_count[7]
LABOR_from = staff_count[8]
staff_cnt_2 = [RECEP_from_2,TRIAG_from_2,TRIAG_U_from_2,N_URG_from_2,
N_N_URG_from_2,
DR_URGE_from_2,DR_N_URG_from_2,IMAGI_from_2,LABOR_from_2]
staff_cnt_2 = count_day_staff_rout_2(Users,day,staff_cnt_2)
RECEP_from_2 = staff_cnt_2[0]
TRIAG_from_2 = staff_cnt_2[1]
TRIAG_U_from_2 = staff_cnt_2[2]
N_URG_from_2 = staff_cnt_2[3]
N_N_URG_from_2 = staff_cnt_2[4]
DR_URGE_from_2 = staff_cnt_2[5]
DR_N_URG_from_2 = staff_cnt_2[6]
IMAGI_from_2 = staff_cnt_2[7]
LABOR_from_2 = staff_cnt_2[8]
staff_cnt_3 = [RECEP_from_3,TRIAG_from_3,TRIAG_U_from_3,N_URG_from_3,
N_N_URG_from_3,
DR_URGE_from_3,DR_N_URG_from_3,IMAGI_from_3,LABOR_from_3]
staff_cnt_3 = count_day_staff_rout_3(Users,day,staff_cnt_3)
RECEP_from_3 = staff_cnt_3[0]
TRIAG_from_3 = staff_cnt_3[1]
TRIAG_U_from_3 = staff_cnt_3[2]
N_URG_from_3 = staff_cnt_3[3]
N_N_URG_from_3 = staff_cnt_3[4]
DR_URGE_from_3 = staff_cnt_3[5]
DR_N_URG_from_3 = staff_cnt_3[6]
IMAGI_from_3 = staff_cnt_3[7]
LABOR_from_3 = staff_cnt_3[8]
#------------------------------------------------------------------------------
# var_zero()
time_arriv = []
for i in range(Num_Aget):
time_arriv.append(random.randint(Active_Period[0], Active_Period[1]))
time_arriv.sort()
for i in range(Num_Aget):
Users[i] = [i+1, 0, UNDEF, 0, 0, time_arriv[i],0, 0, UNDEF, UNDEF, 0, UNDEF]
Users[2][1] = 1
Users[80][1] = 1
Users[150][1] = 1
Users[200][1] = 1
Users[2][9] = INFEC
Users[80][9] = INFEC
Users[150][9] = INFEC
Users[200][9] = INFEC
"""------------------Seat Map Waiting Area -------------------------------
"""
for i in range(Seat_map.shape[0]):
for j in range(Seat_map.shape[1]):
Seat_map[i,j] = 0
Time_var = 0
cont_from_w = []
cont_day = 0
for i in range(len(LABOR_from)):
cont_day = (RECEP_from[i][1] + TRIAG_from[i][1]
+ TRIAG_U_from[i][1]
+ N_URG_from[i][1]
+ N_N_URG_from[i][1]
+ DR_URGE_from[i][1]
+ DR_N_URG_from[i][1]
+ IMAGI_from[i][1]
+ LABOR_from[i][1])
cont_from_w.append([i,cont_day])
cont_day = 0
cont_from_w_2 = []
cont_day = 0
for i in range(len(LABOR_from_2)):
cont_day = (RECEP_from_2[i][1] + TRIAG_from_2[i][1]
+ TRIAG_U_from_2[i][1]
+ N_URG_from_2[i][1]
+ N_N_URG_from_2[i][1]
+ DR_URGE_from_2[i][1]
+ DR_N_URG_from_2[i][1]
+ IMAGI_from_2[i][1]
+ LABOR_from_2[i][1])
cont_from_w_2.append([i,cont_day])
cont_day = 0
cont_from_w_3 = []
cont_day = 0
for i in range(len(LABOR_from_3)):
cont_day = (RECEP_from_3[i][1] + TRIAG_from_3[i][1]
+ TRIAG_U_from_3[i][1]
+ N_URG_from_3[i][1]
+ N_N_URG_from_3[i][1]
+ DR_URGE_from_3[i][1]
+ DR_N_URG_from_3[i][1]
+ IMAGI_from_3[i][1]
+ LABOR_from_3[i][1])
cont_from_w_3.append([i,cont_day])
cont_day = 0
cont_tot_w = []
cont_day = 0
for i in range(len(N_new_day)):
cont_day = N_new_day[i][1] + N_new_day_from_w[i][1]
cont_tot_w.append([i,cont_day])
cont_day = 0
#------------------------------------------------------------------------------
#---------------------- From staff Percentage -------------------------------
staff_count_2 = [port_RECEP_from,port_TRIAG_from,port_TRIAG_U_from,
port_N_URG_from,port_N_N_URG_from,port_DR_URGE_from,
port_DR_N_URG_from,port_IMAGI_from,port_LABOR_from]
staff_count_2 = percent_staff_rout(cont_from_w,staff_count,staff_count_2)
port_RECEP_from = staff_count_2[0]
port_TRIAG_from = staff_count_2[1]
port_TRIAG_U_from = staff_count_2[2]
port_N_URG_from = staff_count_2[3]
port_N_N_URG_from = staff_count_2[4]
port_DR_URGE_from = staff_count_2[5]
port_DR_N_URG_from = staff_count_2[6]
port_IMAGI_from = staff_count_2[7]
port_LABOR_from = staff_count_2[8]
staff_porcet_2 = [port_RECEP_from_2,port_TRIAG_from_2,port_TRIAG_U_from_2,
port_N_URG_from_2,port_N_N_URG_from_2,port_DR_URGE_from_2,
port_DR_N_URG_from_2,port_IMAGI_from_2,port_LABOR_from_2]
staff_porcet_2 = percent_staff_rout(cont_from_w_2,staff_cnt_2,
staff_porcet_2)
port_RECEP_from_2 = staff_porcet_2[0]
port_TRIAG_from_2 = staff_porcet_2[1]
port_TRIAG_U_from_2 = staff_porcet_2[2]
port_N_URG_from_2 = staff_porcet_2[3]
port_N_N_URG_from_2 = staff_porcet_2[4]
port_DR_URGE_from_2 = staff_porcet_2[5]
port_DR_N_URG_from_2 = staff_porcet_2[6]
port_IMAGI_from_2 = staff_porcet_2[7]
port_LABOR_from_2 = staff_porcet_2[8]
staff_porcet_3 = [port_RECEP_from_3,port_TRIAG_from_3,port_TRIAG_U_from_3,
port_N_URG_from_3,port_N_N_URG_from_3,port_DR_URGE_from_3,
port_DR_N_URG_from_3,port_IMAGI_from_3,port_LABOR_from_3]
staff_porcet_3 = percent_staff_rout(cont_from_w_3,staff_cnt_3,
staff_porcet_3)
port_RECEP_from_3 = staff_porcet_3[0]
port_TRIAG_from_3 = staff_porcet_3[1]
port_TRIAG_U_from_3 = staff_porcet_3[2]
port_N_URG_from_3 = staff_porcet_3[3]
port_N_N_URG_from_3 = staff_porcet_3[4]
port_DR_URGE_from_3 = staff_porcet_3[5]
port_DR_N_URG_from_3 = staff_porcet_3[6]
port_IMAGI_from_3 = staff_porcet_3[7]
port_LABOR_from_3 = staff_porcet_3[8]
#------------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# plots
pat_tot = []
days_plot = []
staff_plot = []
porcent_recep = []
porcent_triag = []
porcent_tria_u = []
porcent_N_urge = []
porcent_N_N_ur = []
porcent_dr_urg = []
porcent_dr_n_u = []
porcent_labora = []
porcent_imagin = []
for i in range(len(N_new_day)):
pat_tot.append(N_new_day[i][1])
staff_plot.append(N_new_day_from_w[i][1])
days_plot.append(N_new_day[i][0]+1)
porcent_recep.append(port_RECEP_from[i][1])
porcent_triag.append(port_TRIAG_from[i][1])
porcent_tria_u.append(port_TRIAG_U_from[i][1])
porcent_N_urge.append(port_N_URG_from[i][1])
porcent_N_N_ur.append(port_N_N_URG_from[i][1])
porcent_dr_urg.append(port_DR_URGE_from[i][1])
porcent_dr_n_u.append(port_DR_N_URG_from[i][1])
porcent_labora.append(port_LABOR_from[i][1])
porcent_imagin.append(port_IMAGI_from[i][1])
width = 0.5
ax=plt.figure(figsize=(9,5), facecolor='w', edgecolor='k')
p1 = plt.bar(days_plot, staff_plot, width)
p2 = plt.bar(days_plot, pat_tot, width,
bottom=staff_plot)
#plt.ylabel('Newly infected')
plt.title('Newly infected patients per day (numbers)', fontsize=14)
plt.xticks(days_plot, fontsize=12)
plt.yticks(fontsize=12)
plt.ylim(0, 255)
plt.legend((p1[0], p2[0]), ('By staff', 'By patients'), fontsize=12)
ax.savefig('new_infec_by_p_2.pdf', format='pdf', dpi=1400)
plt.show()
ax=plt.figure(figsize=(9,5), facecolor='w', edgecolor='k')
p1 = plt.bar(days_plot, pat_tot, width)
#plt.ylabel('Newly infected')
plt.title('Infected users by patients per day (numbers)', fontsize=14)
plt.xticks(days_plot, fontsize=12)
plt.yticks(fontsize=12)
#plt.legend((p1[0]), ('By patients'), fontsize=12)
#ax.savefig('new_infec_patients_2.pdf', format='pdf', dpi=1400)
plt.show()
width = 0.65
ax=plt.figure(figsize=(9,6), facecolor='w', edgecolor='k')
plt.bar(days_plot, porcent_N_N_ur, width, label='Non-urgt. nurse')
plt.bar(days_plot, porcent_triag,width, bottom =porcent_N_N_ur, label='Triage')
plt.bar(days_plot, porcent_tria_u, width, label='Triage-urgt',
bottom = [sum(x) for x in zip(porcent_N_N_ur, porcent_triag)])
plt.bar(days_plot, porcent_recep, width, label='Reception',
bottom = [sum(x) for x in zip(porcent_N_N_ur, porcent_tria_u,porcent_triag)])
plt.bar(days_plot, porcent_dr_n_u, width, label='Non-urgt. Dr',
bottom = [sum(x) for x in
zip(porcent_N_N_ur, porcent_tria_u,porcent_triag,porcent_recep)])
plt.bar(days_plot, porcent_N_urge, width, label='Urgt. Nurse',
bottom = [sum(x) for x in
zip(porcent_N_N_ur,porcent_tria_u,porcent_triag,porcent_recep,porcent_dr_n_u)])
plt.bar(days_plot, porcent_dr_urg, width, label='Urgt. Dr',
bottom = [sum(x) for x in
zip(porcent_N_N_ur, porcent_tria_u,porcent_triag,porcent_recep,
porcent_N_urge, porcent_dr_n_u)])
plt.bar(days_plot, porcent_labora, width, label='Laboratory',
bottom = [sum(x) for x in
zip(porcent_N_N_ur, porcent_tria_u,porcent_triag,porcent_recep,
porcent_N_urge, porcent_dr_n_u,porcent_dr_urg)])
plt.bar(days_plot, porcent_imagin, width, label='Imaging',
bottom = [sum(x) for x in
zip(porcent_N_N_ur, porcent_tria_u,porcent_triag,porcent_recep,
porcent_N_urge, porcent_dr_n_u,porcent_dr_urg,porcent_labora)])
#plt.bar(days_plot, porcent_dr_urg, width, bottom = porcent_imagin)
#p4 = plt.bar(days_plot, porcent_dr_n_u, width)
#p5 = plt.bar(days_plot, porcent_recep, width)
#p6 = plt.bar(days_plot, porcent_triag, width)
#p7 = plt.bar(days_plot, porcent_tria_u, width)
#plt.ylabel('Newly infected')
plt.title('Proportion of infected patients by medical staff/area (%)',fontsize=14)
plt.xticks(days_plot, fontsize=12)
plt.yticks(fontsize=12)
plt.ylim(0, 105)
#plt.legend((p1[0], p2[0]), ('Non-urgt. nurse', 'By patients'), fontsize=12)
plt.legend(loc='upper left',fontsize=12)
#ax.savefig('new_infec_by_staff_2.pdf', format='pdf', dpi=1400)
plt.show()
t2 = time.perf_counter()
print(f'\nFinished in {((t2-t1)/60): .2f} minutes')
print(f' " " in {((t2-t1)): .2f} seconds')
| GustavoHdezM/Parallel_test | shifts_sympt_wall.py | shifts_sympt_wall.py | py | 22,889 | python | en | code | 0 | github-code | 13 |
23728078655 | import cv2
import os
from datetime import datetime
class TimelapseEngine:
def __init__(self, output_directory: str,
resolution_width: int = 1920, resolution_height: int = 1088,
number_of_ramp_frames: int = 150):
self._cap = cv2.VideoCapture(0)
self._num_of_ramp_frames = number_of_ramp_frames
self.set_resolution(resolution_width, resolution_height)
os.makedirs(output_directory, exist_ok=True)
self._timelapse_directory = output_directory
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._cap.release()
def save_image(self):
target_image_path = self._generate_target_image_path()
captured_image = self._capture_image()
cv2.imwrite(target_image_path, captured_image)
return os.path.split(target_image_path)
def set_num_of_ramp_frames(self, number_of_ramp_frames: int):
self._num_of_ramp_frames = number_of_ramp_frames
def set_resolution(self, width: int, height: int):
# The resolution must be power of 32 in order to preserve color balance
# otherwise it gets blueish. Related stack overflow thread:
# https://stackoverflow.com/questions/60989671/white-blue-balance-error-in-high-resolution-with-opencv-and-picamera-v2
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
def _generate_target_image_path(self) -> str:
target_image_name = f'timelapse-{datetime.now().strftime("%Y%m%d%H%M%S")}.jpg'
return os.path.join(self._timelapse_directory, target_image_name)
def _capture_image(self):
self._discard_ramp_frames()
_, captured_image = self._cap.read()
return captured_image
def _discard_ramp_frames(self):
# The camera needs some time to warm up and adjust to lightning
# therefore the first few frames are useless and need to be thrown away
for i in range(self._num_of_ramp_frames):
self._cap.read()
if __name__ == '__main__':
import sys
if len(sys.argv) not in (2, 5):
print('Invalid arguments')
print()
print('Usage:')
print('timelapse.py <output_directory> [<resolution_width> <resolution_height> <number_of_ramp_frames>]')
print()
print('Note: In order to preserve color balance, the resolution must be set to power of 32. For instance:')
print('- 1920*1088')
print('- 1280*704 ')
print('- 640*672')
sys.exit(1)
timelapse_directory = sys.argv[1]
with TimelapseEngine(timelapse_directory) as timelapse_engine:
if len(sys.argv) == 5:
res_width = int(sys.argv[2])
res_height = int(sys.argv[3])
timelapse_engine.set_resolution(res_width, res_height)
num_of_ramp_frames = int(sys.argv[4])
timelapse_engine.set_num_of_ramp_frames(num_of_ramp_frames)
timelapse_engine.save_image()
| esceer/timelapse-script | src/utils/timelapse.py | timelapse.py | py | 3,031 | python | en | code | 0 | github-code | 13 |
6636281330 | import inflect
def main():
names = get_names()
p = inflect.engine()
print("Adieu, adieu, to ", end="")
print(p.join(names))
def get_names():
list_names = []
while True:
try:
list_names.append(input("Name: "))
except EOFError:
print()
return list_names
main() | Larfie/CS50 | adieu/adieu.py | adieu.py | py | 342 | python | en | code | 1 | github-code | 13 |
10038334907 | import smtplib
from email.mime.text import MIMEText
body = "This is a test email. How are you?"
msg = MIMEText(body)
msg['From'] = "alaminsrk3@gmail.com"
msg['To'] = "noonesp86@gmail.com"
msg['Subject'] = "Hello"
server = smtplib.SMTP('smtp.gmail.com', 587)
# for secured connection
server.starttls()
server.login("alaminsrk3@gmail.com", "01989745377as")
server.send_message(msg)
print("Mail sent")
server.quit()
| parvez86/PythonLearn | networking/emailclient.py | emailclient.py | py | 422 | python | en | code | 0 | github-code | 13 |
22102393292 | from collections import deque
from typing import Union, List
import torch
from torch import nn
from torch.distributions import Categorical
import torch.nn.functional as F
from agents.a2c import A2CAgent
from configs import Config
from utils import to_one_hot
class A2CAgentSwim(A2CAgent):
def __init__(self, id_: int, model, obs_spaces_shape: tuple, checkpoint,
cfg: Config):
super().__init__(id_, model, obs_spaces_shape, cfg)
self.test_mode: bool = False
self.policies_weights, self.policies_biases = None, None
self.model.value.register_forward_hook(self._save_features)
self.load_model(checkpoint)
self.choosing_layer = nn.Sequential(nn.Linear(self.cfg.h_space, self.cfg.h_space),
nn.ReLU(),
nn.Dropout(),
nn.Linear(self.cfg.h_space, self.cfg.h_space),
nn.ReLU(),
nn.Dropout(),
nn.Linear(self.cfg.h_space, len(self.policies_weights)))
self.features = None
self.ope_values = []
def _save_features(self, module, input_, output):
self.features = input_[0] if isinstance(input_, tuple) else input_
def train(self, mode: bool = True):
if mode:
self.training = mode
self.model.training = mode
# self.model.value.train()
self.choosing_layer.train()
return self
else:
return super().train(mode)
def test(self):
self.test_mode = True
return self.eval()
def take_action(self, obs):
self.obs.append(torch.as_tensor(obs).float())
state = torch.stack(list(self.obs), dim=-2)
_, value = self.model(state.to(self.cfg.device))
logits = self.choosing_layer(self.features)
choose_probs = F.softmax(logits, dim=-1).reshape(*logits.shape, *[1] * len(self.policies_weights.shape[2:]))
policy_weights = torch.sum(self.policies_weights * choose_probs[..., None], dim=1)
policy_bias = torch.sum(self.policies_biases * choose_probs, dim=1) if self.policies_biases is not None else 0
policy_logits = torch.sum(policy_weights * self.features[..., None, None, :], dim=-1) + policy_bias
dist = Categorical(logits=policy_logits)
if self.training and torch.rand((1,)).item() < self.e_greedy:
*shape, high = policy_logits.shape
action = torch.randint(high, shape)
else:
action = dist.sample()
if self.training:
policies_logits = torch.matmul(self.features, self.policies_weights.transpose(-1, -2))
policies_logits = policies_logits + self.policies_biases.unsqueeze(-2)
probs = F.softmax(logits, dim=-1)
policies_probs = F.softmax(policies_logits.transpose(2, 1), dim=-1)
one_hot_action = to_one_hot(action, num_columns=self.cfg.players - 1)
action_prob = torch.sum(probs * one_hot_action, dim=-1)
actions_policies_probs = torch.sum(policies_probs * one_hot_action, dim=-1)
ope_values = actions_policies_probs / action_prob
# prod of actions probs
self.ope_values.append(ope_values.prod(-1).T)
self.probs.append(action_prob)
self.entropies.append(dist.entropy())
self.values.append(value)
return action
def save_model(self) -> dict:
return dict(model=self.model.state_dict(),
choosing_layer=self.choosing_layer,
pool_weights=self.policies_weights,
pool_biases=self.policies_biases)
def load_model(self, checkpoint: dict):
self.model.load_state_dict(checkpoint['model'])
if [k for k in checkpoint if 'pool_' in k]:
self.policies_weights = checkpoint['pool_weights']
self.policies_biases = checkpoint['pool_biases']
else:
raise ValueError('Need pool_weights')
self.model.eval()
def parameters(self, recurse: bool = True):
return list(self.choosing_layer.parameters()) + list(self.model.value.parameters())
| Jlevan25/rl | agents/a2c_poolswim.py | a2c_poolswim.py | py | 4,447 | python | en | code | 0 | github-code | 13 |
24632814419 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import ruamel.yaml
class Database(dict):
yaml = ruamel.yaml.YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
def __init__(self, filename, default={}):
self.filename = filename
if not os.path.isfile(filename):
Database.yaml.dump(default, open(filename, 'wt'))
super().__init__(default)
self.save()
else:
super().__init__(Database.yaml.load(open(self.filename, 'rt')))
def __iter__(self):
return super().__iter__()
def save(self):
Database.yaml.dump(dict(self), open(self.filename, 'wt'))
| artemk1337/NetflixShareBot | database.py | database.py | py | 686 | python | en | code | 0 | github-code | 13 |
7734928356 | import xlrd
from common import models
def load(filename, version, volume, rootname):
vol = models.BookVolume.objects.get(name=volume, version__name=version)
wb = xlrd.open_workbook(filename)
sheet = wb.sheets()[0]
root = models.CourseTree.objects.create(volume=vol, name=rootname, level=0)
parent_stack = [root]
level = sheet.ncols
for i in xrange(1, sheet.nrows):
fields = sheet.row_values(i)
for j in xrange(0, level):
if fields[j]:
if j == 0:
parent_stack = [root]
node = models.CourseTree.objects.create(
volume=vol, name=fields[j], level=j + 1,
parent=parent_stack[-1]
)
parent_stack.append(node)
else:
parent_stack.pop()
def export(course_id, class_id):
qs = models.CourseTree.objects.filter(volume__course=course_id, volume__classes=class_id)
tree = {}
parent = {}
for ct in qs:
children = []
parent[ct.id] = children
if ct.parent_id:
pchildren = parent.setdefault(ct.parent_id, [])
pchildren.append(dict(id=ct.id, name=ct.name, children=children))
else:
tree = dict(id=ct.id, name=ct.name, children=children)
return tree
| fishmacs/yhbbg | bookbag/portal/coursetree.py | coursetree.py | py | 1,334 | python | en | code | 0 | github-code | 13 |
6948929184 | from typing import *
class Solution:
def buildArray(self, target: List[int], n: int) -> List[str]:
idx=0
tem=0
res=[]
while idx<len(target):
tem+=1
res.append('Push')
while tem!=target[idx]:
res.append('Pop')
tem+=1
res.append('Push')
idx+=1
return res
if __name__ == '__main__':
sol=Solution()
target=[2,3,4]
n=4
print(sol.buildArray(target,n))
| Xiaoctw/LeetCode1_python | 栈/用栈操作构建数组_1441.py | 用栈操作构建数组_1441.py | py | 503 | python | en | code | 0 | github-code | 13 |
29436952478 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import List
def convert_to_absolute(number: float) -> float:
return number if number > 0 else number * -1 # Connaitre cette syntaxe pour l'examen (peut-être demandé)
def use_prefixes() -> List[str]:
prefixes, suffixe = 'JKLMNOPQ', 'ack'
List = []
for letter in prefixes:
List.append(letter + suffixe)
return List
def prime_integer_summation() -> int:
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,
59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131,
137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373,
379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457,
461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541]
return sum(primes)
def factorial(number: int) -> int:
result = 1
for i in range(2, number + 1):
result *= i
return result
def use_continue() -> None:
for number in range(1, 11):
if number == 5:
continue
print(number)
def verify_ages(groups: List[List[int]]) -> List[bool]:
acceptance_list = []
is_50years = False
is_more_70years = False
for group in groups:
is_accepted = True # reset bool values for the next group in the first dimension list
good_length = True
if len(group) > 10 or len(group) <= 3: # condition 1
is_accepted = False
good_length = False
if good_length:
for member_age in group:
if member_age < 18: # condition 2
is_accepted = False
break
elif member_age == 50: # condition 3
is_50years = True
elif member_age > 70:
is_more_70years = True
if is_50years and is_more_70years:
is_accepted = False
is_50years, is_more_70years = False, False
if good_length:
for member_age in group:
if member_age == 25: # condition 4
is_accepted = True
if is_accepted: # Here is where we add to the Acceptance_list the decision of accepted or not
acceptance_list.append(True)
if is_accepted != True:
acceptance_list.append(False)
return acceptance_list
def main() -> None:
number = -4.325
print(f"La valeur absolue du nombre {number} est {convert_to_absolute(number)}")
print(f"La liste des noms générés avec les préfixes est: {use_prefixes()}")
print(f"La somme des 100 premiers nombre premier est : {prime_integer_summation()}")
number = 10
print(f"La factiorelle du nombre {number} est: {factorial(number)}")
print(f"L'affichage de la boucle est:")
use_continue()
groups = [
[15, 28, 65, 70, 72], [18, 24, 22, 50, 70], [25, 2],
[20, 22, 23, 24, 18, 75, 51, 49, 100, 18, 20, 20], [70, 50, 26, 28], [75, 50, 18, 25],
[13, 25, 80, 15], [20, 30, 40, 50, 60], [75, 50, 100, 28]
]
print(f"Les différents groupes sont: {groups}")
print(f"L'acceptance des groupes est: {verify_ages(groups)}")
if __name__ == '__main__':
main()
| INF1007-2021A/c03_ch5_exercices-sebroy-6 | exercice.py | exercice.py | py | 3,477 | python | en | code | 0 | github-code | 13 |
71254488017 | import requests # pip install requests
import json
from typing import Optional, List, Dict, Any
from pydantic import BaseModel, Field, validator # pip install pydantic
from tenacity import retry, stop_after_attempt, wait_fixed # pip install tenacity
from datetime import datetime as dt, timedelta, timezone
STATION_URL = "https://oraqi.deq.state.or.us/ajax/getAllStationsWithoutFiltering"
DATA_URL = "https://oraqi.deq.state.or.us/report/GetMultiStationReportData"
REQUEST_HEADERS = {"Content-Type": "application/json; charset=UTF-8"}
class Channel(BaseModel):
display_name: str = Field(alias="DisplayName")
id: int
name: str
alias: Optional[str]
value: float
status: int
valid: bool
description: Optional[str]
value_date: Optional[str]
units: str
class StationRecord(BaseModel):
datetime: dt
channels: List[Channel]
@validator("datetime", pre=True) # pre lets us modify the incoming value before it's parsed by pydantic
def fix_deq_date(cls, val: str):
"""
Datetimes reported by DEQ have a reported UTC offset of -05:00 but are actually -08:00.
First, we'll verify that the dates are still broken in the way we expect them to be;
Second, we'll fix them before importing. This *should* work during both PST and PDT.
Note on DEQ website (in footer of interactive reports page):
Data on this site is presented in Standard Time at the time the measurement ended. There
is no adjustment for Daylight Saving Time during its use from March to November.
PST is UTC - 8 hours
Further note that during DST, the adjusted times will not align with what is shown on the website
because website times are in PST, but adjusted times may appear in PDT when converted from
epoch time.
"""
# if "-05:00" in val: # Confirm data is still being reported with utcoffset -5 hours
# return val.replace("-05:00", "-08:00")
if "-07:00" in val or "-08:00" in val:
return val
raise Exception(f"Unexpected timezone in datetime: {val}")
"""
station_id: See bottom of this file for a list of valid station ides
from_timestamp, to_timestamp: specify in ISO datetime format: YYYY/MM/DDTHH:MM (e.g. "2018/05/03T00:00")
resolution: 60 for hourly data, 1440 for daily averages. Higher resolutions don't work, sorry, but lower-resolutions, such as 120, 180, 480, 720 will.
agg_method: These will *probably* all work: Average, MinAverage, MaxAverage, RunningAverage, MinRunningAverage, MaxRunningAverage, RunningForword, MinRunningForword, MaxRunningForword
"""
def get_data(station_id: int, from_timestamp: dt, to_timestamp: dt, resolution: int = 60, agg_method: str = "Average") -> List[StationRecord]:
# count = 99999 # This should be greater than the number of reporting periods in the data range specified above
# params = "Sid=" + str(station_id) + "&FDate=" + from_timestamp + "&TDate=" + to_timestamp + "&TB=60&ToTB=" + str(resolution) + "&ReportType=" + \
# agg_method + "&period=Custom_Date&first=true&take=" + str(count) + "&skip=0&page=1&pageSize=" + str(count)
channel_list: List[int] = list()
stations = get_station_data()
for station in stations:
if station["serialCode"] == station_id:
for monitor in station["monitors"]:
channel_list.append(monitor["channel"])
break
payload = {
"monitorChannelsByStationId": {
str(station_id): channel_list
},
"reportName": "multi Station report",
"startDateAbsolute": from_timestamp.strftime("%Y/%m/%dT%H:%MZ"), # UTC -- site seems timezone-aware
"endDateAbsolute": to_timestamp.strftime("%Y/%m/%dT%H:%MZ"),
"startDate": from_timestamp.strftime("%Y/%m/%dT%H:%MZ"),
"endDate": to_timestamp.strftime("%Y/%m/%dT%H:%MZ"),
"reportType": agg_method,
"fromTb": resolution,
"toTb": resolution,
"monitorChannelsByStationId[0].Key": str(station_id),
"monitorChannelsByStationId[0].Value": channel_list
}
req = post(DATA_URL, headers=REQUEST_HEADERS, data=json.dumps(payload))
req.raise_for_status()
records = req.json()[0]["data"] # type: ignore
all_records: List[StationRecord] = list()
for record in records:
all_records.append(StationRecord(datetime=record["datetime"], channels=record["channels"]))
# This need not remain here permanently, but for now let's verify the data arrived in chronological order.
# If this ever fails, we'll sort.
for i, record in enumerate(all_records[1:]):
assert all_records[i].datetime < record.datetime
return all_records
# These fail a lot, so we'll try tenacity
@retry(stop=stop_after_attempt(10), wait=wait_fixed(10), reraise=True)
def post(*args: Any, **kwargs: Any) -> requests.Response:
req = requests.post(*args, **kwargs) # type: ignore
req.raise_for_status()
return req
@retry(stop=stop_after_attempt(10), wait=wait_fixed(10), reraise=True)
def get(*args: Any, **kwargs: Any) -> requests.Response:
req = requests.get(*args, **kwargs) # type: ignore
req.raise_for_status()
return req
def get_station_data() -> List[Dict[str, Any]]:
return post(STATION_URL, headers=REQUEST_HEADERS).json() # type: ignore
def get_station_names() -> Dict[int, str]:
stations_names = {}
stations = get_station_data()
for station in stations:
stations_names[station["serialCode"]] = station["name"]
return stations_names
"""
To get a current list of stations, print the output of deq_tools.get_station_names()
These station ids were current as of Sept 2020:
1: 'Tualatin Bradbury Court'
2: 'Portland SE Lafayette'
6: 'Portland Jefferson HS'
7: 'Sauvie Island'
8: 'Beaverton Highland Park'
9: 'Hillsboro Hare Field'
10: 'Carus Spangler Road'
11: 'Salem State Hospital'
12: 'Turner Cascade Junior HS'
13: 'Lyons Marilynn School'
14: 'Albany Calapooia School'
15: 'Sweet Home Fire Department'
16: 'Corvallis Circle Blvd'
17: 'Roseburg Garden Valley'
19: 'Grants Pass Parkside School
20: 'Medford TV'
22: 'Provolt Seed Orchard'
23: 'Shady Cove School'
24: 'Talent'
26: 'Klamath Falls Peterson School'
27: 'Lakeview Center and M'
28: 'Bend Pump Station'
30: 'Baker City Forest Service'
31: 'Enterprise Forest Service'
32: 'La Grande Hall and N'
33: 'Pendleton McKay Creek'
34: 'The Dalles Cherry Heights School'
35: 'Cove City Hall'
37: 'Hermiston Municipal Airport'
39: 'Bend Road Department'
40: 'Madras Westside Elementary'
41: 'Prineville Davidson Park'
42: 'Burns Washington Street'
44: 'Silverton James and Western'
46: 'John Day Dayton Street'
47: 'Sisters Forest Service'
48: 'Cave Junction Forest Service'
49: 'Medford Welch and Jackson'
50: 'Ashland Fire Department'
56: 'Eugene Amazon Park'
57: 'Cottage Grove City Shops'
58: 'Springfield City Hall'
59: 'Eugene Saginaw'
60: 'Oakridge'
61: 'Eugene Wilkes Drive'
64: 'Portland Cully Helensview'
65: 'Eugene Highway 99'
67: 'Hillsboro Hare Field Sensor'
68: 'Hillsboro Hare Field Meteorology'
69: 'Forest Grove Pacific University'
75: 'Florence Forestry Department'
78: 'Portland Humboldt Sensors'
82: 'Chiloquin Duke Drive'
85: 'Redmond High School'
88: 'Coos Bay Marshfield HS
90: 'Roseburg Fire Dept'
"""
| eykamp/deq_tools | deq_tools/__init__.py | __init__.py | py | 7,701 | python | en | code | 0 | github-code | 13 |
39130112036 | import json
from os import PathLike
from pathlib import Path
from typing import Dict, Type, Union
import yaml
from ...models.archive import ThreadInfo
from .archive import LocalArchive
from .detect import detect_archive_version
from .v2 import AV2LocalArchive, av2_load_threadInfo_json, av2_load_users_json
from .v3 import AV3LocalArchive, av3_load_info_yaml
__all__ = ("load_archive", "load_archive_thread_info_only")
VERSION_CLASS_TABLE: Dict[int, Type[LocalArchive]] = {
3: AV3LocalArchive,
2: AV2LocalArchive,
}
def load_archive(
path: Union[str, PathLike],
*,
auto_load: bool = True,
auto_load_info: bool = True,
auto_load_history: bool = False,
) -> LocalArchive:
path = Path(path)
version = detect_archive_version(path)
if version is None:
raise ValueError("Directory is not an valid archive.")
cls = VERSION_CLASS_TABLE.get(version)
if cls is None:
raise ValueError("Version not supported.")
return cls(
path,
auto_load=auto_load,
auto_load_info=auto_load_info,
auto_load_history=auto_load_history,
)
def load_archive_thread_info_only(path: Union[str, PathLike]) -> ThreadInfo:
path = Path(path)
version = detect_archive_version(path)
if version is None:
raise ValueError("Directory is not an valid archive.")
if version == 3:
with open(path / "info.yaml", "r", encoding="utf-8") as info_rs:
return av3_load_info_yaml(yaml.safe_load(info_rs.read()))[0]
elif version == 2:
with open(path / "users.json", "r", encoding="utf-8") as users_rs:
users = av2_load_users_json(json.loads(users_rs.read()))
with open(path / "threadInfo.json", "r", encoding="utf-8") as info_rs:
return av2_load_threadInfo_json(json.loads(info_rs.read()), users)[0]
else:
raise ValueError("Version not supported.")
| 283375/tieba-thread-archive | src/tieba_thread_archive/local/archive/load.py | load.py | py | 1,912 | python | en | code | 2 | github-code | 13 |
73111810257 | import math
# 5
# Write a function that accepts string from user and print all permutations of that string.
print("!!!TASK 5!!!")
from itertools import permutations
def find_permutations(str):
char_list = [str[i] for i in range(0, len(str))]
char_list.sort()
prms = permutations(char_list)
for permutation in prms:
print(permutation)
find_permutations(input())
| Amayakof/PP2 | lab3/functions/5.py | 5.py | py | 389 | python | en | code | 0 | github-code | 13 |
37867006400 | from django.urls import include, path, re_path
from django.views.generic import TemplateView
from auth_users.views import current_user, change_user_data
urlpatterns = [
path('auth/', include('djoser.urls')),
path('auth/', include('djoser.urls.jwt')),
path('auth/', include('djoser.social.urls')),
path('api/user/', current_user, name='current_user'),
path('api/user/change/', change_user_data, name='change_user_data'),
# o include está aqui pois não funciona de dentro do engine, por conta do djoser
path('assets/', include('assets.urls')),
]
urlpatterns += [re_path(r'^.*', TemplateView.as_view(template_name='index.html'))]
| jorgemustafa/gerenciador-de-investimentos | auth_users/urls.py | urls.py | py | 662 | python | en | code | 0 | github-code | 13 |
16027445054 | # 배열최소합
# SWEA 난이도 D3
# N x N 배열
# N 개의 숫자를 골라 합이 최소가 되도록
# 세로로 같은 줄에서 두 개 이상의 숫자를 고를 수 없다.
# 인덱스 배열을 받아 각 줄마다 인덱스 요소들의 합을 반환하는 함수
def select_sum(nums, graph):
total = 0
for row, i in enumerate(nums):
total += graph[row][i]
return total
def dfs(depth, n, visited, nums, graph):
global answer
if depth == n:
answer = min(answer, select_sum(nums, graph))
return
for i in range(n):
if not visited[i]:
visited[i] = True
nums.append(i)
dfs(depth+1, n, visited, nums, graph)
nums.pop()
visited[i] = False
t = int(input())
for tc in range(1, t+1):
n = int(input())
graph = [list(map(int, input().split())) for _ in range(n)]
visited = [False for _ in range(n)]
answer = int(10e9)
nums = []
dfs(0, n, visited, nums, graph)
print(f'#{tc} {answer}') | joonann/ProblemSolving | python/202308/16/배열최소합.py | 배열최소합.py | py | 1,046 | python | ko | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.