seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
26038374198 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 14:24:45 2021
@author: Gustavo
"""
import requests
import time
import re
import ast
import numpy as np
import pandas as pd
import asyncio
import concurrent.futures
import nest_asyncio
import os.path
import random
from collections import defaultdict
import ujson
import functools
import json
with open("configWeb.json", "r") as json_file:
config = json.load(json_file)
SIGN_IN_URL = config['SIGN_IN_URL']
KEYWORD_URL = config['KEYWORD_URL']
SUPPLIERS_URL = config['SUPPLIERS_URL']
PRODUCT_DATABASE_URL = config['PRODUCT_DATABASE_URL']
SIGN_IN_HEADERS = config['SIGN_IN_HEADERS']
SIGN_IN_PAYLOADS = config['SIGN_IN_PAYLOADS']
headers = config['headers']
params = config['params']
list_keys = config['list_keys']
def file_exists(file_name):
return os.path.isfile(file_name)
def get_bearer_token():
response = requests.post(SIGN_IN_URL, headers=SIGN_IN_HEADERS, params=SIGN_IN_PAYLOADS, timeout=30)
dic_str = response.text
token = ' '.join(['Bearer',dic_str.split('token')[1].split('"')[2]])
return token
def update_bearer_token():
try:
page = requests.get(KEYWORD_URL, headers=headers, params=params, timeout=20)
if page.status_code == 200:
print('Token is valid!')
return
raise Exception(page.status_code)
except Exception as inst:
if inst.args[0] == 401:
print('Updating bearer token...')
headers['authorization'] = get_bearer_token()
configweb = {'SIGN_IN_URL': SIGN_IN_URL,
'KEYWORD_URL': KEYWORD_URL,
'SUPPLIERS_URL': SUPPLIERS_URL,
'PRODUCT_DATABASE_URL': PRODUCT_DATABASE_URL,
'SIGN_IN_HEADERS': SIGN_IN_HEADERS,
'SIGN_IN_PAYLOADS': SIGN_IN_PAYLOADS,
'headers': headers,
'params':params,
'list_keys': list_keys}
with open("configWeb.json", "w") as json_file:
json.dump(configweb, json_file)
else:
SystemExit(f'Error type {inst}')
def get_params(keyword, key, subkey, startFrom = None):
if isinstance(keyword, str):
params[key][subkey] = keyword
if startFrom is not None:
params[key]['from'] = startFrom
return params[key]
else:
raise ValueError('the keyword needs to be a string!')
def js_request(keyword, url = KEYWORD_URL, key = 'keyword', subkey = 'search_terms', startFrom = None):
params = get_params(keyword, key, subkey, startFrom)
for i in range(3):
try:
#page = requests.get(url, headers=headers, proxies={'http': proxy, 'https': proxy}, params=params, timeout=60)
page = requests.get(url, headers=headers, params=params, timeout=25)
page.raise_for_status()
except requests.exceptions.Timeout as etmout:
if i == 2:
pass#raise SystemExit(etmout)
print('Request timed out. Trying to request again for {}. Try #{} of 3'.format(keyword, i+1))
time.sleep(1)
except requests.HTTPError as ehttp:
if ehttp.response.status_code == 429 and i != 4:
print('Reached request limit. Wait for 1 h.')
time.sleep(3600)
print('Requesting for page again. Try #{} of 4'.format(i+1))
elif ehttp.response.status_code == 401:
print('The Bearer token is invalid. Getting a new one...')
try:
update_bearer_token()
print('Bearer Token has been updated')
time.sleep(1)
except:
pass
else: print(ehttp)
except requests.exceptions.RequestException as err:
print(err)
break
else:
return page.text
def transform_list_of_dicts(text, data_type = 'keyword', appendKeyword = None):
if not isinstance(text, str):
print('Page text returned belongs to {}'.format(type(text)))
return []
text = re.sub(r'(?<=:)null', r'-99999', text)
if data_type.lower() == 'keyword':
text = re.sub(r'(?<=:)true', r'True', text)
text = re.sub(r'(?<=:)false', r'False', text)
new_text = re.split(r',(?={"score")', text)
new_text[0] = re.split(r'\[(?={"score")', new_text[0])[1]
new_text[-1] = new_text[-1].split(']')[0]
elif data_type.lower() == 'supplier':
text = re.sub(r'"total_shipments":\d+\}(?=\]|,)', '\g<0> !split!', text)
new_text = text.split('!split!,')
new_text[-1] = new_text[-1].split('!split!]')[0]
new_text[0] = new_text[0].split('"data":[')[1]
elif data_type.lower() == 'product database':
text = re.sub(r'(?<=:)true', r'True', text)
text = re.sub(r'(?<=:)false', r'False', text)
total, new_text = text.split('"products":[')
new_text = new_text.split('],"categories"')[0]
new_text = re.split(r',(?={"id")', new_text)
total_count = int(re.search(r'(?<="total_count":)\d+', total).group(0))
list_of_dicts = []
for rep in new_text:
try:
data_to_append = ast.literal_eval(rep)
if appendKeyword is not None:
data_to_append['Parent Keyword'] = appendKeyword
data_to_append['Total NOP for the keyword'] = total_count
list_of_dicts.append(data_to_append)
except:
pass
return list_of_dicts
def build_list_of_dicts(keyword, url = KEYWORD_URL, key = 'keyword', subkey = 'search_terms'):
if isinstance(keyword, list):
if not keyword:
raise ValueError('Keyword list must not be empty')
else:
ans_list = []
list_dics = map(build_list_of_dicts, key)
for element in list_dics:
ans_list.extend(element)
return ans_list
print('requesting for {}'.format(keyword))
if key in ['keyword', 'supplier']:
try:
data = js_request(keyword, url, key, subkey)
return transform_list_of_dicts(data, data_type = key)
except:
return []
elif key == 'product database':
data = []
for start in range(0,100,100):
try:
data.append(js_request(keyword, url, key, subkey, startFrom=start))
except:
pass
ans = []
if data:
for rep in data:
ans.extend(transform_list_of_dicts(rep, data_type = key, appendKeyword = keyword))
return ans
nest_asyncio.apply()
async def concurrent_builder_dicts(taxonomy_words, key = 'keyword', m_workers=10):
list_of_dicts = []
with concurrent.futures.ThreadPoolExecutor(max_workers=m_workers) as executor:
try:
loop = asyncio.get_event_loop()
except:
pass
try:
if key == 'keyword':
futures = [
loop.run_in_executor(executor, build_list_of_dicts, word)
for word in taxonomy_words
]
elif key == 'product database':
futures = [
loop.run_in_executor(executor, functools.partial(build_list_of_dicts, word, PRODUCT_DATABASE_URL, key, 'include_keywords'))
for word in taxonomy_words
]
except:
pass
for response in await asyncio.gather(*futures):
try:
for element in response:
list_of_dicts.append(element)
except:
pass
return list_of_dicts
def paralel_builder(taxonomy_words, key = 'keyword', m_workers=10):
list_dict_keywords = []
try:
loop = asyncio.get_event_loop()
list_dict_keywords = loop.run_until_complete(concurrent_builder_dicts(taxonomy_words, key = key, m_workers=m_workers))
except:
pass
return list_dict_keywords
def remove_duplicates_from_list(lst_keywords, key = 'id'):
d = defaultdict(list)
final_lst = []
for i in lst_keywords:
d[i[key]].append(i)
for i in d.values():
final_lst.append(i[0])
return final_lst
def save_file(data, file_name, method = 'json'):
#func = {'ujson': ujson, 'json': json, 'pkl': pkl}
way = 'w'
if method == 'ujson':
extension = 'json'
else:
extension = method
if method in ['pkl', 'pickle']:
way = 'wb'
file_name = '.'.join([file_name, extension])
with open(file_name, way) as file:
ujson.dump(data, file)
def webscraping_keywords(taxonomy_word):
update_bearer_token()
N = 3
temp_names = taxonomy_word
list_of_keywords = []
while len(list_of_keywords) < 2*10**5:
if (isinstance(temp_names, list) or isinstance(temp_names, np.ndarray)) and len(temp_names):
try:
iter_list = paralel_builder(temp_names)
if not iter_list:
print('Error in getting new keywords! Trying again...')
temp_names = taxonomy_word
else:
list_of_keywords.extend(iter_list)
if len(iter_list)<= N:
indices = list(range(len(iter_list)))
else:
indices = random.sample(range(0, len(iter_list)-1), N)
iter_list = [iter_list[ind] for ind in indices]
temp_names = [d['name'] for d in iter_list]
except:
pass
else :
temp = build_list_of_dicts(temp_names)
list_of_keywords.extend(temp)
temp_names = [d['name'] for d in temp]
print('------------- Over ---------------')
ans = remove_duplicates_from_list(list_of_keywords)
return ans
def webscraping_products(keyword_list):
update_bearer_token()
N = 5
list_of_products = []
if isinstance(keyword_list, str):
list_of_products = paralel_builder(keyword_list, key = 'product database')
elif isinstance(keyword_list, list) and keyword_list:
for i in range(0, len(keyword_list), N): #python handles all the problems this expression could have
keyword_names = keyword_list[i:i+N] #same
try:
iter_list = paralel_builder(keyword_names, key = 'product database')#, proxy = list_proxies[i%10])
if not iter_list:
print('Error in getting products containing the keywords {}!'.format(keyword_names))
else:
list_of_products.extend(iter_list)
except:
pass
else: raise ValueError('The keywords provided need to be a string or a list of strings!')
print('------------- Over ---------------')
try:
ans = remove_duplicates_from_list(list_of_products)
except:
pass
return ans
def transform_keyword_to_pd(list_of_data, sort_by = 'exactSearchVolume'):
if not isinstance(list_of_data, list) or not list_of_data:
raise ValueError('The data given must be a non-empty list')
for element in list_of_data:
list_to_null = np.setdiff1d(list(list_keys['keyword'].keys()),list(element.keys()))
if len(list_to_null):
for null_key in list_to_null:
list_keys['keyword'][null_key].append(np.nan)
for non_null_key, value in element.items():
value_to_add = np.nan if isinstance(value,int) and value == -99999 else value
list_keys['keyword'][non_null_key].append(value_to_add)
temp_pd = pd.DataFrame( columns = list_keys['keyword'].keys() )
for key, value in list_keys['keyword'].items():
if key in ['exactSearchVolume', 'broadSearchVolume']:
temp_pd[key] = np.array(value).astype(np.int32)
else: temp_pd[key] = np.asarray(value)
return temp_pd.sort_values(by=sort_by, ascending=False)#.set_index('name')
def transform_supplier_to_pd(list_of_data, sort_by = 'total_shipments'):
if not isinstance(list_of_data, list) or not list_of_data:
raise ValueError('The data given must be a non-empty list')
for element in list_of_data:
for non_null_key, value in element.items():
list_keys['supplier'][non_null_key].append(value)
temp_pd = pd.DataFrame( columns = list_keys['supplier'].keys() )
for key, value in list_keys['supplier'].items():
if key == 'latest_shipment':
temp_pd[key] = np.array(value).astype(np.int32)
else: temp_pd[key] = np.asarray(value)
return temp_pd.sort_values(by=sort_by, ascending=False).set_index('name')
def transform_product_database_to_pd(list_of_data, sort_by = 'net'):
if not isinstance(list_of_data, list) or not list_of_data:
raise ValueError('The data given must be a non-empty list')
for element in list_of_data:
list_to_null = np.setdiff1d(list(list_keys['product database'].keys()),list(element.keys()))
if len(list_to_null):
for null_key in list_to_null:
list_keys['product database'][null_key].append(np.nan)
for non_null_key, value in element.items():
value_to_add = np.nan if isinstance(value,int) and value == -99999 else value
list_keys['product database'][non_null_key].append(value_to_add)
temp_pd = pd.DataFrame( columns = list_keys['product database'].keys() )
for key, value in list_keys['product database'].items():
if key in ['hasVariants', 'isUnavailable', 'noParentCategory', 'multipleSellers']:
temp_pd[key] = np.array(value).astype(np.int32)
else:
temp_pd[key] = np.asarray(value)
return temp_pd.sort_values(by=sort_by, ascending=False)#.set_index('name')
| gusvilalima/JungleScoutWeb | productScraper.py | productScraper.py | py | 14,137 | python | en | code | 0 | github-code | 13 |
32842083792 | import glob
import os
import shutil
import sys
import termios
import time
import tty
import uuid
from shutil import copyfile
import subprocess
import common.color
from common.color import WARNING_PREFIX, ERROR_PREFIX, FAIL, WARNING, ENDC, OKGREEN, BOLD, OKBLUE, INFO_PREFIX, OKGREEN
from common.debug import logger
__author__ = 'Sergej Schumilo'
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def atomic_write(filename, data):
tmp_file = "/tmp/" + str(uuid.uuid4())
f = open(tmp_file, 'wb')
f.write(data)
f.flush()
os.fsync(f.fileno())
f.close()
shutil.move(tmp_file, filename)
def read_binary_file(filename):
payload = ""
f = open(filename, 'rb')
while True:
buf = f.read(1024)
if len(buf) == 0:
break
payload += buf
f.close()
return payload
def find_diffs(data_a, data_b):
first_diff = 0
last_diff = 0
for i in range(min(len(data_a), len(data_b))):
if data_a[i] != data_b:
if first_diff == 0:
first_diff = i
last_diff = i
return first_diff, last_diff
def prepare_working_dir(directory_path):
folders = ["/corpus/regular", "/metadata", "/corpus/crash", "/corpus/kasan", "/corpus/timeout", "/bitmaps",
"/imports", "/snapshot", '/forced_imports']
project_name = directory_path.split("/")[-1]
shutil.rmtree(directory_path, ignore_errors=True)
for path in glob.glob("/dev/shm/kafl_%s_*" % project_name):
os.remove(path)
if os.path.exists("/dev/shm/kafl_tfilter"):
os.remove("/dev/shm/kafl_tfilter")
for folder in folders:
os.makedirs(directory_path + folder)
open(directory_path+"/filter", "wb").close()
open(directory_path+"/page_cache.lock", "wb").close()
open(directory_path+"/page_cache.dump", "wb").close()
open(directory_path+"/page_cache.addr", "wb").close()
def copy_seed_files(working_directory, seed_directory):
if len(os.listdir(seed_directory)) == 0:
return False
if len(os.listdir(working_directory)) == 0:
return True
i = 0
for (directory, _, files) in os.walk(seed_directory):
for f in files:
path = os.path.join(directory, f)
if os.path.exists(path):
atomic_write(working_directory + "/forced_imports/" + "seed_%05d" % i, read_binary_file(path))
#copyfile(path, working_directory + "/imports/" + "seed_%05d" % i)
i += 1
return True
def print_warning(msg):
sys.stdout.write("\033[0;33m\033[1m[WARNING] " + msg + "\033[0m\n")
sys.stdout.flush()
def print_fail(msg):
sys.stdout.write("\033[91m\033[1m[FAIL] " + msg + "\033[0m\n")
sys.stdout.flush()
def print_pre_exit_msg(num_dots, clrscr=False):
dots = ""
for i in range((num_dots % 3) + 1):
dots += "."
for i in range(3 - len(dots)):
dots += " "
if clrscr:
print('\x1b[2J')
print('\x1b[1;1H' + '\x1b[1;1H' + '\033[0;33m' + "[*] Terminating Slaves" + dots + '\033[0m' + "\n")
def print_exit_msg():
print('\x1b[2J' + '\x1b[1;1H' + '\033[92m' + "[!] Data saved! Bye!" + '\033[0m' + "\n")
def is_float(value):
try:
float(value)
return True
except ValueError:
return False
def is_int(value):
try:
int(value)
return True
except ValueError:
return False
def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def ask_for_permission(data, text, color='\033[91m'):
return True
ENDC = '\033[0m'
print("Enter " + data + text)
i = 0
print(len(data) * '_'),
while True:
input_char = getch()
# Check for CTRL+C
if input_char == chr(0x3):
print("")
return False
# Check for matching character
if (data[i] == input_char):
i += 1
print("\r" + color + data[:i] + ENDC + (len(data) - i) * '_'),
# Check if we are done here ...
if i == len(data):
break
print("")
return True
def execute(cmd, cwd, print_output=True, print_cmd=False):
if print_cmd:
print(OKBLUE + "\t " + "Executing: " + " ".join(cmd) + ENDC)
proc = subprocess.Popen(cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if print_output:
while True:
output = proc.stdout.readline()
if output:
print(output),
else:
break
while True:
output = proc.stderr.readline()
if output:
print(FAIL + output.decode("utf-8") + ENDC),
else:
break
if proc.wait() != 0:
print(FAIL + "Error while executing " + " ".join(cmd) + ENDC)
def to_real_path(relative_path):
return os.path.realpath(os.path.dirname(os.path.realpath(__file__ + "/../")) + "/" + relative_path) | nyx-fuzz/packer | packer/common/util.py | util.py | py | 5,308 | python | en | code | 15 | github-code | 13 |
71163200019 | from tests.TestCode import *
from Contracts.Contract import *
from hypothesis import given
from hypothesis.strategies import booleans
from pytest import raises
from Contracts.ContractLevel import OFF
def test_debugLevel():
assert QQQ(10,5) == 20
assert T(2, 5, 2) == 10
t = TTT(0)
empty(29)
def test_code():
assert QQQ(10,5) == 20
assert T(2, 5, 2) == 10
t = TTT(0)
#t.x = 2
with raises(PostConditionError):
t.hey()
@given(booleans())
def test_require(b):
c = require(lambda args: args.x == True)
v = c(lambda x: x)
if b == False:
with raises(PreConditionError):
v(b)
else:
v(b)
@given(booleans())
def test_ensure(b):
c = ensure(lambda args, result: result == True)
v = c(lambda x: x)
if b == False:
with raises(PostConditionError):
v(b)
else:
v(b)
@given(booleans())
def test_invariant(b):
class InvariantTest:
def __init__(self, x):
self.x = x
def set(self, x):
self.x = x
c = invariant(lambda self: self.x == True)
v = c(InvariantTest)
if b == False:
with raises(PostConditionError):
O = v(b)
O = v(True)
O.x = False
with raises(PreConditionError):
O.set(b)
O.x = True
with raises(PostConditionError):
O.set(b)
else:
O = v(b)
O.set(b)
O.x = False
with raises(PreConditionError):
O.set(b)
@types(x = int)
def v(x):
pass
def test_types():
with raises(PreConditionError):
v("")
v(2) | Fracture17/contracts | tests/test_ing.py | test_ing.py | py | 1,638 | python | en | code | 0 | github-code | 13 |
39131354811 | import socket
from urllib.parse import urlparse
from 装饰器.无参装饰器 import outer
@outer
def get_url(url):
# 解析url
url = urlparse(url)
# 获得主域名
host = url.netloc
# 获得子路经
path = url.path
if path == '':
path = '/'
# 建立socket连接
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, 80))
# 发送请求
client.send('GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n'.format(path, host).encode('utf8'))
data = b''
while True:
d = client.recv(1024)
if d:
data += d
else:
break
data = data.decode('utf-8')
# 去除头部信息
# data = data.split('\r\n\r\n')[1]
print(data)
client.close()
if __name__ == '__main__':
get_url('http://www.baidu.com')
| dsdcyy/python- | python进阶/网络编程/03socket模拟http请求.py | 03socket模拟http请求.py | py | 854 | python | en | code | 0 | github-code | 13 |
16056951661 | from sys import stderr, exit, argv
import numpy as np
from scipy.io import loadmat
import os
from os.path import isfile, isdir, realpath, dirname, exists
def preprocessStringerNeuropixelsData(data_path, output_path):
# Only accept neurons with at least 40 minutes recording length
minRecLength = 2400.
rawDATA_DIR = '{}/neuropixels/raw'.format(data_path)
# rawDATA_DIR = '/data.nst/share/data/stringer_spikes_neuropixels'
basenameDataFile = 'spks{}_Feb18.mat'
probeLocationsFileName = 'probeLocations.mat'
probeBordersFileName = 'probeBorders.mat'
numberOfProbes = 8
probeLocations = loadmat('{}/{}'.format(rawDATA_DIR,
probeLocationsFileName))
probeBorders = loadmat('{}/{}'.format(rawDATA_DIR,
probeBordersFileName), squeeze_me=True)
# mouseNumber = 1 is Waksman
mouseNumber = 1
mouseName = str(
probeLocations['probeLocations'][0][mouseNumber]['mouseName'][0])
saveDATA_DIR = '{}/neuropixels/{}/V1'.format(output_path, mouseName)
if not isdir('{}/neuropixels'.format(output_path)):
os.mkdir('{}/neuropixels'.format(output_path))
if not isdir(saveDATA_DIR):
os.mkdir(saveDATA_DIR)
if not isdir('{}/spks'.format(saveDATA_DIR)):
os.mkdir('{}/spks'.format(saveDATA_DIR))
# print("##### Mouse: {}".format(mouseName))
spks = loadmat('{}/spks/{}'.format(rawDATA_DIR,
basenameDataFile.format(mouseName)), squeeze_me=True)
# find detailed areas from which was recorded in the respective mouse (Waksman)
detailedAreas = np.array([])
for probeNumber in range(numberOfProbes):
ccfOntology = [name[0][0] for name in probeLocations['probeLocations']
[0][mouseNumber]['probe'][0][probeNumber]['ccfOntology']]
detailedAreas = np.append(detailedAreas, np.unique(ccfOntology))
detailedAreas = np.unique(detailedAreas)
detailedAreasPrimaryVisualCortex = ['VISp2/3', 'VISp4', 'VISp5', 'VISp6b', 'VISp6a']
# Save only spiketimes of neurons in primary visual cortex, and that satisfy the rate requirements
validNeurons = []
for probeNumber in range(numberOfProbes):
print("### Probe {}".format(probeNumber))
ccfCoords = probeLocations['probeLocations'][0][mouseNumber]['probe'][0][probeNumber]['ccfCoords']
ccfOntology = [name[0][0] for name in probeLocations['probeLocations']
[0][mouseNumber]['probe'][0][probeNumber]['ccfOntology']]
unsortedSptimes = spks['spks'][probeNumber][0]
clusterIdentities = np.array(spks['spks'][probeNumber][1]) - 1 # start at 0 instead of 1
# cluster heights in microns
wHeights = spks['spks'][probeNumber][2]
# Load spikes
sptimes = [[] for cli in np.unique(clusterIdentities)]
for sptime, cli in zip(unsortedSptimes, clusterIdentities):
sptimes[cli] += [float(sptime)]
Nneurons = len(wHeights)
for neuron in range(Nneurons):
# Spacing of electrodes is 20 mm, but two electrodes have the same height
ccfIndex = int(wHeights[neuron] / 20 * 2)
detailedArea = ccfOntology[ccfIndex]
if detailedArea in detailedAreasPrimaryVisualCortex:
spiketimes_neuron = np.sort(sptimes[neuron])
t_start = spiketimes_neuron[0]
t_end = spiketimes_neuron[-1]
Trec = t_end - t_start
rate = len(spiketimes_neuron) / Trec
if (rate < maxRate and Trec > minRecLength) and rate > minRate:
validNeurons += [[probeNumber, neuron]]
np.save('{}/spks/spiketimes-{}-{}.npy'.format(saveDATA_DIR,
probeNumber, neuron), np.sort(spiketimes_neuron))
# Save dictionary of valid neurons used for the analysis
np.save('{}/validNeurons.npy'.format(saveDATA_DIR), validNeurons)
def preprocessRetinaData(data_path, output_path):
sampling_rate = 10000. # 10 kHz
rawDATA_DIR = '{}/retina/raw/mode_paper_data/unique_natural_movie'.format(data_path)
saveDATA_DIR = '{}/retina'.format(output_path)
if not isdir(saveDATA_DIR):
os.mkdir(saveDATA_DIR)
if not isdir('{}/spks'.format(saveDATA_DIR)):
os.mkdir('{}/spks'.format(saveDATA_DIR))
data = loadmat('{}/data.mat'.format(rawDATA_DIR))
# Neuron list
neurons = data['data'][0][0][2][0][0][2][0]
N_neurons = neurons[-1]
# find valid neurons with 0.5Hz < rate < 10 Hz and save their spiketimes
validNeurons = []
for neuron in range(N_neurons):
spiketimes_neuron = data['data'][0][0][2][0][0][1][0][neuron][0] / sampling_rate
np.save('{}/spks/spiketimes_neuron{}.npy'.format(saveDATA_DIR, neuron), spiketimes_neuron)
t_start = spiketimes_neuron[0]
t_end = spiketimes_neuron[-1]
Trec = t_end - t_start
rate = spiketimes_neuron.size/Trec
if rate > minRate and rate < maxRate:
validNeurons += [neuron]
np.save('{}/validNeurons.npy'.format(saveDATA_DIR), validNeurons)
# Start and end times movie (probably)
# T_0 = data['data'][0][0][3][0][0][0][0][0] / sampling_rate
# T_f = data['data'][0][0][3][0][0][1][1][0] / sampling_rate
# T = T_f - T_0
# Description
# data['data'][0][0][0]
# Date
# data['data'][0][0][1]
# Sampling rate
# print(data['data'][0][0][2][0][0][0])
# Full data
# data['data'][0][0][2][0][0][1]
# Short data, but not really sure what this is. Spiketimes are not the same
# data['data'][0][0][2][0][0][3]
def preprocessCA1Data(data_path, output_path):
rawDATA_DIR = '{}/CA1/raw'.format(data_path)
saveDATA_DIR = '{}/CA1'.format(output_path)
if not isdir(saveDATA_DIR):
os.mkdir(saveDATA_DIR)
if not isdir('{}/spks'.format(saveDATA_DIR)):
os.mkdir('{}/spks'.format(saveDATA_DIR))
data = loadmat('{}/ec014.277.spike_ch.mat'.format(rawDATA_DIR))
sample_rate = 20000. # 20 kHz sampling rate in seconds
sptimes = data['sptimes'][0] / sample_rate
singleunit = data['singleunit'][0]
end_times = data['t_end'].flatten() / sample_rate
Nneurons = 85
validNeurons = []
for neuron in range(Nneurons):
if singleunit[neuron] == 1:
spiketimes_neuron = sptimes[neuron].flatten()
np.save('{}/spks/spiketimes_neuron{}.npy'.format(saveDATA_DIR, neuron), spiketimes_neuron)
t_start = spiketimes_neuron[0]
t_end = end_times[neuron]
Trec = t_end - t_start
rate = spiketimes_neuron.size/Trec
if rate > minRate and rate < maxRate:
validNeurons += [neuron]
np.save('{}/validNeurons.npy'.format(saveDATA_DIR), validNeurons)
def preprocessCultureData(data_path, output_path):
rawDATA_DIR = '{}/culture/raw'.format(data_path)
saveDATA_DIR = '{}/culture'.format(output_path)
if not isdir(saveDATA_DIR):
os.mkdir(saveDATA_DIR)
if not isdir('{}/spks'.format(saveDATA_DIR)):
os.mkdir('{}/spks'.format(saveDATA_DIR))
spiketimes1 = np.loadtxt("{}/L_Prg035_txt_nounstim.txt".format(rawDATA_DIR))
spiketimes2 = np.loadtxt("{}/L_Prg036_txt_nounstim.txt".format(rawDATA_DIR))
spiketimes3 = np.loadtxt("{}/L_Prg037_txt_nounstim.txt".format(rawDATA_DIR))
spiketimes4 = np.loadtxt("{}/L_Prg038_txt_nounstim.txt".format(rawDATA_DIR))
spiketimes5 = np.loadtxt("{}/L_Prg039_txt_nounstim.txt".format(rawDATA_DIR))
spiketimes = np.append(spiketimes1, spiketimes2, axis=0)
spiketimes = np.append(spiketimes, spiketimes3, axis=0)
spiketimes = np.append(spiketimes, spiketimes4, axis=0)
spiketimes = np.append(spiketimes, spiketimes5, axis=0)
sample_rate = 24.03846169
times = spiketimes.transpose()[0]
neurons = spiketimes.transpose()[1]
# spiketimes in seconds
times = times/sample_rate/1000
validNeurons = []
for neuron in np.arange(1, 61):
spiketimes_neuron = times[np.where(neurons == neuron)[0]]
np.save('{}/spks/spiketimes_neuron{}.npy'.format(saveDATA_DIR, neuron), spiketimes_neuron)
t_start = spiketimes_neuron[0]
t_end = spiketimes_neuron[-1]
Trec = t_end - t_start
rate = spiketimes_neuron.size/Trec
if (rate < maxRate and rate > minRate):
validNeurons += [neuron]
np.save('{}/validNeurons.npy'.format(saveDATA_DIR), validNeurons)
len(validNeurons)
# During preprocessing, only neurons with an average firing rate between minRate and maxRate (in Hz) are considered for the analysis.
minRate = 0.5
maxRate = 10.
recorded_system = argv[1]
# If data_path not specified, use analysis_data of the repository
if len(argv) > 2:
data_path = argv[2]
else:
CODE_DIR = '{}/..'.format(dirname(realpath(__file__)))
data_path = '{}/data'.format(CODE_DIR)
# If output_path not specified, use analysis_data of the repository
if len(argv) > 3:
output_path = argv[3]
else:
CODE_DIR = '{}/..'.format(dirname(realpath(__file__)))
output_path = '{}/data'.format(CODE_DIR)
if __name__ == "__main__":
if recorded_system == 'V1':
preprocessStringerNeuropixelsData(data_path, output_path)
if recorded_system == 'retina':
preprocessRetinaData(data_path, output_path)
if recorded_system == 'CA1':
preprocessCA1Data(data_path, output_path)
if recorded_system == 'culture':
preprocessCultureData(data_path, output_path)
| Priesemann-Group/historydependence | exe/preprocess_data.py | preprocess_data.py | py | 9,605 | python | en | code | 2 | github-code | 13 |
73585505617 | from tkinter import *
from typing import Match
from decimal import *
root = Tk()
root.title("Simple Calculator")
root.iconbitmap('.../Calc/favicon.ico')
e = Entry(root, width=40, borderwidth=5)
e.grid(row=0, column=0, columnspan=4, padx=10, pady=10)
e.insert(0, "0")
def button_click(number):
current = e.get()
e.delete(0, END)
print(current)
#This checks if the number coming in is . and that there is either a 0 or nothing, then add the 0 before
if number == '.' and (current == '0' or current == None):
e.insert(0, '0.')
#if it's just a 0 before this will remove so you won't get a 03 for your number
elif current == '0':
e.insert(0, str(number))
#normal function delete what was in display concatanate the 2 numbers.
else:
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
e.insert(0, "0")
def button_dot():
current = e.get()
#Checks if there is already a decimal in the number if so END and it won't add another
if '.' in current:
END
else:
button_click('.')
def button_add():
first_number = e.get()
global f_num
global math
math = "addition"
f_num = first_number
e.delete(0,END)
e.insert(0, "0")
def button_equal():
second_number = e.get()
e.delete(0, END)
if math == "addition":
e.insert(0, Decimal(f_num) + Decimal(second_number))
if math == "subtraction":
e.insert(0, Decimal(f_num) - Decimal(second_number))
if math == "multiplication":
e.insert(0, Decimal(f_num) * Decimal(second_number))
if math == "division":
e.insert(0, Decimal(f_num) / Decimal(second_number))
def button_subtract():
first_number = e.get()
global f_num
global math
math = "subtraction"
f_num = first_number
e.delete(0,END)
e.insert(0, "0")
def button_multiply():
first_number = e.get()
global f_num
global math
math = "multiplication"
f_num = (first_number)
e.delete(0,END)
e.insert(0, "0")
def button_divide():
first_number = e.get()
global f_num
global math
math = "division"
f_num = (first_number)
e.delete(0,END)
e.insert(0, "0")
def button_posNeg():
current = e.get()
#checks if number is already negative if so removes it
if '-' in current:
f_num = current.replace('-', '')
#if not adds the negative
else:
f_num= '-'+current
e.delete(0, END)
e.insert(0, str(f_num))
#Define the buttons
button_1 = Button(root, text="1", padx=40, pady=20, command=lambda: button_click(1))
button_2 = Button(root, text="2", padx=40, pady=20, command=lambda: button_click(2))
button_3 = Button(root, text="3", padx=40, pady=20, command=lambda: button_click(3))
button_4 = Button(root, text="4", padx=40, pady=20, command=lambda: button_click(4))
button_5 = Button(root, text="5", padx=40, pady=20, command=lambda: button_click(5))
button_6 = Button(root, text="6", padx=40, pady=20, command=lambda: button_click(6))
button_7 = Button(root, text="7", padx=40, pady=20, command=lambda: button_click(7))
button_8 = Button(root, text="8", padx=40, pady=20, command=lambda: button_click(8))
button_9 = Button(root, text="9", padx=40, pady=20, command=lambda: button_click(9))
button_0 = Button(root, text="0", padx=40, pady=20, command=lambda: button_click(0))
button_dot = Button(root, text=".", padx=42, pady=20, command=button_dot)
button_add = Button(root, text="+", padx=39, pady=20, command=button_add)
button_equal = Button(root, text="=", padx=91, pady=20, command=button_equal)
button_clear = Button(root, text="Clear", padx=78, pady=20, command=button_clear)
button_posNeg = Button(root, text="+/-", padx=34, pady=20, command=button_posNeg)
button_subtract = Button(root, text="—", padx=37, pady=20, command=button_subtract)
button_multiply = Button(root, text="X", padx=39, pady=20, command=button_multiply)
button_divide = Button(root, text="/", padx=41, pady=20, command=button_divide)
# Put the buttons on the screen
button_1.grid(row=4, column=0)
button_2.grid(row=4, column=1)
button_3.grid(row=4, column=2)
button_4.grid(row=3, column=0)
button_5.grid(row=3, column=1)
button_6.grid(row=3, column=2)
button_7.grid(row=2, column=0)
button_8.grid(row=2, column=1)
button_9.grid(row=2, column=2)
button_0.grid(row=5, column=0)
button_dot.grid(row=5, column=1)
button_clear.grid(row=1, column=0, columnspan=2)
button_posNeg.grid(row=1, column=2, columnspan=1)
button_add.grid(row=4, column=3)
button_equal.grid(row=5, column=2, columnspan=2)
button_subtract.grid(row=3, column=3)
button_multiply.grid(row=2, column=3)
button_divide.grid(row=1, column=3)
myLabel = Label(root, text="WARNING!!", wraplength=350, foreground="red", font=("Arial", 25)).grid(row=6, column=0, columnspan=4)
myLabel2 = Label(root, text="This is just a test version to show Tkinter and Pyinstaller for Python, this program is for demonstrative purposes only!", wraplength=350, foreground="red").grid(row=7, column=0, columnspan=4)
root.mainloop() | amacher/calculator | calculator.py | calculator.py | py | 5,059 | python | en | code | 0 | github-code | 13 |
29343884049 | '''
Numa eleição existem três candidatos. Faça um programa que peça o número total de eleitores.
Peça para cada eleitor votar e ao final mostrar o número de votos de cada candidato.
'''
total_eleitores = int(input('Numero de eleitores: '))
canditato_A = []
canditato_B = []
canditato_C = []
for i in range(total_eleitores):
voto = input('Escolha entre os canditador A, B ou C ')
if(voto == 'A' or voto =='a'):
canditato_A.append(voto)
elif(voto == 'B' or voto == 'b'):
canditato_B.append(voto)
elif(voto == 'C' or voto == 'c'):
canditato_C.append(voto)
print('O canditato A teve: ',len(canditato_A))
print('O canditato B teve: ',len(canditato_B))
print('O canditato C teve: ',len(canditato_C)) | paulocesarcsdev/ExerciciosPython | 3-EstruturaDeRepeticao/26.py | 26.py | py | 756 | python | pt | code | 0 | github-code | 13 |
21909724545 | """
Implementations of network analyses, like contact networks or adjacency matrices.
"""
import numpy as np
from sklearn.neighbors import KDTree
from pepe.preprocess import circularMask
from pepe.analysis import gSquared
def adjacencyMatrix(centers, radii, contactPadding=5, neighborEvaluations=6):
"""
Calculate the (unweighted) adjacency matrix, aka neighbor matrix,
of a set of particles (centers and radii).
For finding centers and radii, see `pepe.tracking`.
Optimized for large numbers of particles by using a kd-tree to only
evaluate potential contacts for nearby particles.
Parameters
----------
centers : np.ndarray[N,2]
A list of N centers of format [y, x].
radii : np.ndarray[N]
A list of N radii, corresponding to each particle center
contactPadding : int
Maximum difference between distance and sum of radii for which
two particles will still be considered in contact.
neightborEvaluations : int
How many of the closest points to find via the kd tree and test
as potential contacts. For homogeneous circles, or approximately
homogenous (< 2:1 size ratio), 6 should be plenty.
Returns
-------
adjMat : np.ndarray[N,N]
Unweighted adjacency matrix
"""
adjMatrix = np.zeros([len(centers), len(centers)])
# Instead of going over every point and every other point, we can just take nearest neighbors, since
# only neighbor particles can be in contact
kdTree = KDTree(centers, leaf_size=10)
# In 2D, 8 neighbors should be more than enough
# +1 is so we can remove the actual point itself
# Though if we have very few points, we may not even have 9 total
dist, ind = kdTree.query(centers, k=min(neighborEvaluations+1, len(centers)))
# See if the distance between nearby particles is less than the
# sum of radii (+ the padding)
for i in range(len(centers)):
for j in range(len(ind[i])):
if radii[i] + radii[ind[i][j]] + contactPadding > dist[i][j]:
adjMatrix[i][ind[i][j]] = 1
adjMatrix[ind[i][j]][i] = 1
return adjMatrix
def weightedAdjacencyMatrix(photoelasticSingleChannel, centers, radii, contactPadding=5, g2MaskPadding=1, contactThreshold=.1, neighborEvaluations=6):
"""
Calculate a weighted adjacency matrix of a system of particles
based on the average G^2 of each particle.
For finding centers and radii, see `pepe.tracking`.
For unweighted adjacency matrix, see `pepe.analysis.adjacencyMatrix()`.
Parameters
----------
photelasticSingleChannel : np.uint8[H,W]
A single channel image in array form of the photoelastic response of particles.
centers : np.ndarray[N,2]
A list of N centers of format [y, x].
radii : np.ndarray[N]
A list of N radii, corresponding to each particle center
contactPadding : int
Maximum difference between distance and sum of radii for which
two particles will still be considered in contact.
g2MaskPadding : int
Number of pixels to ignore at the edge of each particle when calculating the average
G^2.
contactThreshold : float
The neighbor weight value under which edges will be removed from the network,
as they would be considered to weak to represent anything physical. This will
help remove particle neighbors that are only barely touching, but not transmitting
any real force.
neightborEvaluations : int
How many of the closest points to find via the kd tree and test
as potential contacts. For homogeneous circles, or approximately
homogenous (< 2:1 size ratio), 6 should be plenty.
Returns
-------
adjMat : np.ndarray[N,N]
Weighted adjacency matrix
"""
# First, we calculate the unweighted network
unweightedAdjMat = adjacencyMatrix(centers, radii, contactPadding, neighborEvaluations)
# Calculate g^2 for entire image
gSqr = gSquared(photoelasticSingleChannel)
particleGSqrArr = np.zeros(len(centers))
# Apply a circular mask to each particle, and sum the gradient squared
# for each one
for i in range(len(centers)):
mask = circularMask(photoelasticSingleChannel.shape, centers[i], radii[i] - g2MaskPadding, channels=None)
# Divide by radius squared, since we could have particles of different sizes
particleGSqrArr[i] = np.sum(gSqr * mask) / np.sum(mask)
# Outer product, so average gradients are all multiplied together
weightedAdjMat = np.multiply.outer(particleGSqrArr, particleGSqrArr) * unweightedAdjMat
# Clean up the matrix by normalizing the diagonals
maxValue = np.max(weightedAdjMat)
for i in range(len(centers)):
weightedAdjMat[i,i] = maxValue
weightedAdjMat /= np.max(weightedAdjMat)
# And remove all edges below the cutoff threshold
weightedAdjMat[weightedAdjMat < contactThreshold] = 0
return weightedAdjMat
| Jfeatherstone/pepe | pepe/analysis/NetworkAnalysis.py | NetworkAnalysis.py | py | 5,046 | python | en | code | 1 | github-code | 13 |
74300184338 | import gym
import numpy as np
import random
env = gym.make("Pendulum-v0")
#env = gym.make("MountainCar-v0")
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 24100
SHOW_EVERY = 3000
epsilon = 0.2
START_EPSILON_DECAY = 1
END_EPSILON_DECAY = 25000
epsilon_decay = epsilon/(END_EPSILON_DECAY - START_EPSILON_DECAY)
DISCRETE_OS_SIZE = [20]*len(env.observation_space.high)
#print(DISCRETE_OS_SIZE)
discrete_os_win_size = (env.observation_space.high+1 - env.observation_space.low)/DISCRETE_OS_SIZE
ACTION_CHOICES = 1000
action_space = np.linspace(int(env.action_space.low), int(env.action_space.high), ACTION_CHOICES)
#print(action_space)
'''
q_table = np.random.uniform(low = -2, high = 0, size = (DISCRETE_OS_SIZE + [len(action_space)]))
'''
q_table = np.load(f"qtables_pendulum/8_{24000}-qtable.npy")
#print(q_table.shape)
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(int))
count = 0
for episode in range(EPISODES):
discrete_state = get_discrete_state(env.reset())
print(episode)
render = False
if episode % SHOW_EVERY == 0:
render = True
np.save(f"qtables_pendulum/9_{episode}-qtable.npy", q_table)
print(f"Success Rate ==> {count*100/(episode+1)}")
done = False
while not done:
if np.random.random() > epsilon:
action = [action_space[np.argmax(q_table[discrete_state])]]
action_index = np.argmax(q_table[discrete_state])
else:
action = [random.choice(action_space)]
action_index = np.where(action_space == action[0])
#print(action)
new_state, reward, done, _ = env.step(action)
new_discrete_state = get_discrete_state(new_state)
if render:
env.render()
if not done:
max_future_q = np.max(q_table[new_discrete_state])
current_q = q_table[discrete_state + (action_index,)]
new_q = current_q*(1 - LEARNING_RATE) + LEARNING_RATE*(reward + DISCOUNT * max_future_q)
q_table[discrete_state + (action_index, )] = new_q
elif reward >= -0.1:
count += 1
#print(f"We made it on episode {episode} ==> {count*100/(episode+1)}")
q_table[discrete_state + (action_index, )] = 0
discrete_state = new_discrete_state
if END_EPSILON_DECAY >= episode >= START_EPSILON_DECAY:
epsilon -= epsilon_decay
env.close()
| spacebot29/PendulumRL | GymPendulum-QLearning.py | GymPendulum-QLearning.py | py | 2,495 | python | en | code | 0 | github-code | 13 |
17046175144 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySecurityRiskGravityWorkflowCreateModel(object):
def __init__(self):
self._auth_feature_tables = None
self._check_sample_tables = None
self._contract_id = None
self._description = None
self._domain_account = None
self._org_info = None
self._project_id = None
self._project_name = None
self._review_history = None
@property
def auth_feature_tables(self):
return self._auth_feature_tables
@auth_feature_tables.setter
def auth_feature_tables(self, value):
self._auth_feature_tables = value
@property
def check_sample_tables(self):
return self._check_sample_tables
@check_sample_tables.setter
def check_sample_tables(self, value):
self._check_sample_tables = value
@property
def contract_id(self):
return self._contract_id
@contract_id.setter
def contract_id(self, value):
self._contract_id = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def domain_account(self):
return self._domain_account
@domain_account.setter
def domain_account(self, value):
self._domain_account = value
@property
def org_info(self):
return self._org_info
@org_info.setter
def org_info(self, value):
self._org_info = value
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, value):
self._project_id = value
@property
def project_name(self):
return self._project_name
@project_name.setter
def project_name(self, value):
self._project_name = value
@property
def review_history(self):
return self._review_history
@review_history.setter
def review_history(self, value):
self._review_history = value
def to_alipay_dict(self):
params = dict()
if self.auth_feature_tables:
if hasattr(self.auth_feature_tables, 'to_alipay_dict'):
params['auth_feature_tables'] = self.auth_feature_tables.to_alipay_dict()
else:
params['auth_feature_tables'] = self.auth_feature_tables
if self.check_sample_tables:
if hasattr(self.check_sample_tables, 'to_alipay_dict'):
params['check_sample_tables'] = self.check_sample_tables.to_alipay_dict()
else:
params['check_sample_tables'] = self.check_sample_tables
if self.contract_id:
if hasattr(self.contract_id, 'to_alipay_dict'):
params['contract_id'] = self.contract_id.to_alipay_dict()
else:
params['contract_id'] = self.contract_id
if self.description:
if hasattr(self.description, 'to_alipay_dict'):
params['description'] = self.description.to_alipay_dict()
else:
params['description'] = self.description
if self.domain_account:
if hasattr(self.domain_account, 'to_alipay_dict'):
params['domain_account'] = self.domain_account.to_alipay_dict()
else:
params['domain_account'] = self.domain_account
if self.org_info:
if hasattr(self.org_info, 'to_alipay_dict'):
params['org_info'] = self.org_info.to_alipay_dict()
else:
params['org_info'] = self.org_info
if self.project_id:
if hasattr(self.project_id, 'to_alipay_dict'):
params['project_id'] = self.project_id.to_alipay_dict()
else:
params['project_id'] = self.project_id
if self.project_name:
if hasattr(self.project_name, 'to_alipay_dict'):
params['project_name'] = self.project_name.to_alipay_dict()
else:
params['project_name'] = self.project_name
if self.review_history:
if hasattr(self.review_history, 'to_alipay_dict'):
params['review_history'] = self.review_history.to_alipay_dict()
else:
params['review_history'] = self.review_history
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySecurityRiskGravityWorkflowCreateModel()
if 'auth_feature_tables' in d:
o.auth_feature_tables = d['auth_feature_tables']
if 'check_sample_tables' in d:
o.check_sample_tables = d['check_sample_tables']
if 'contract_id' in d:
o.contract_id = d['contract_id']
if 'description' in d:
o.description = d['description']
if 'domain_account' in d:
o.domain_account = d['domain_account']
if 'org_info' in d:
o.org_info = d['org_info']
if 'project_id' in d:
o.project_id = d['project_id']
if 'project_name' in d:
o.project_name = d['project_name']
if 'review_history' in d:
o.review_history = d['review_history']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipaySecurityRiskGravityWorkflowCreateModel.py | AlipaySecurityRiskGravityWorkflowCreateModel.py | py | 5,357 | python | en | code | 241 | github-code | 13 |
25146338026 |
# 剪裁1280x720
import os
import cv2
import numpy as np
import numpy.linalg as npl
def colorSim(c1, c2, thr):
return npl.norm(c1[:3] - c2[:3]) < thr
def isVertBlack(img, x):
for y in range(10, img.shape[0] - 10, 10):
pixel = img[y, x]
if (not colorSim(pixel, (0, 0, 0), 30)):
return False
return True
def findHorBorder(img):
interval = 10
startX = None
l = None
r = None
for x in range(0, 500, interval):
if (not isVertBlack(img, x)):
startX = x - interval
break
for x in range(startX, startX + interval + 1):
if (not isVertBlack(img, x)):
l = x
break
for x in range(img.shape[1]-1, img.shape[1]-1 - 500, -interval):
if (not isVertBlack(img, x)):
startX = x + interval
break
for x in range(startX, startX - interval - 1, -1):
if (not isVertBlack(img, x)):
r = x + 1
break
return (l, r)
print(os.listdir())
for fname in os.listdir():
img = cv2.imread(fname)
if (np.any(img == None)):
continue
h = img.shape[0]
w = img.shape[1]
if (h != 720 or w != 1280):
print(fname)
l, r = findHorBorder(img)
print(l, r)
res = cv2.resize(img[1:h-1, l:r], (1280, 720))
cv2.imwrite(fname, res)
| xdedss/cvmaj | templates/multiple/crop.py | crop.py | py | 1,358 | python | en | code | 3 | github-code | 13 |
16025338977 | from functools import lru_cache
import bson
from core.config import settings
from core.db import get_mongo_worker_client
from pymongo.collection import Collection
from pymongo.database import Database
class MongoDbManager:
def __init__(self, mongo_db: Database):
self.mongo_client: Database = mongo_db
def insert_one(self, collection_name: str, data: dict):
collection: Collection = self.mongo_client[collection_name]
result = collection.insert_one(data)
inserted_id = result.inserted_id
if inserted_id:
return inserted_id
return None
def update_status_by_id(self, collection_name: str, id_: str, status: int):
collection: Collection = self.mongo_client[collection_name]
filter_ = {'_id': bson.ObjectId(id_)}
update = {'$set': {'status_id': status}}
collection.update_one(filter_, update)
def update_like_by_id(self, collection_name: str, id_: bson.ObjectId):
collection: Collection = self.mongo_client[collection_name]
filter_ = {'_id': id_}
update = {'$inc': {'content.likes_count_new': 1}}
collection.update_one(filter_, update)
def check_like(self, collection_name: str, user_id: str, review_id: int):
collection: Collection = self.mongo_client[collection_name]
filter_ = {'content.user_id': user_id, 'content.review_id': review_id}
result = collection.find_one(filter=filter_)
return result
@lru_cache()
def get_db_manager():
mongo_client = get_mongo_worker_client()
mongo_db = mongo_client[settings.worker_mongo_db]
return MongoDbManager(mongo_db)
| montekrist0/notifications_sprint_1 | worker/src/services/db_manager.py | db_manager.py | py | 1,651 | python | en | code | 1 | github-code | 13 |
7524169549 | from django.contrib import admin
from .models import Model, User
# Register your models here.
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
list_filter = ('username',)
list_display = ('username', 'email', 'last_login')
fields = ('username',
'email',
'password',
'first_name',
'last_name',)
@admin.register(Model)
class ModelAdmin(admin.ModelAdmin):
readonly_fields = ('id',)
list_filter = ('name', 'owner',)
list_display = ('name', 'description', 'owner', 'license')
fields = ('id',
'name',
'description',
'owner',
'authors',
'model_gltf_file',
'link',
'license',
'license_link')
| opensim-org/opensim-viewer | src/backend/backend/backend/admin.py | admin.py | py | 791 | python | en | code | 7 | github-code | 13 |
44736992014 | import csv
from .helpers import Importer, fetch
def run():
url = 'https://codeforiati.org/country-codes/country_codes.csv'
lookup = [
('code', 'code'),
('name_en', 'name_en'),
('name_fr', 'name_fr'),
]
r = fetch(url)
reader = csv.DictReader(r.iter_lines(decode_unicode=True))
countries = [{
'code': x['code'],
'name_en': x['name_en'],
'name_fr': x['name_fr'],
} for x in reader]
countries.append({
'code': 'XK',
'name_en': 'Kosovo',
'name_fr': '',
})
countries = sorted(countries, key=lambda x: x['name_en'])
Importer('Country', 'countries', lookup, source_data=countries)
if __name__ == '__main__':
run()
| codeforIATI/codelist-updater | importers/country.py | country.py | py | 731 | python | en | code | 2 | github-code | 13 |
10545954145 | # from cipherart import logo
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n").lower()
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
def ceasar(start_text, shift_amount, cip_direction):
start_text = ""
for letter in text:
position = alphabet.index(letter)
if direction == "encode":
new_position = position + shift_amount
elif direction == "decode":
new_position = position - shift_amount
new_letter = alphabet[new_position]
start_text += new_letter
print(f"The {cip_direction}d message is {start_text}")
shift = shift % 26 # in case user puts in number greater than 26
ceasar(text, shift, direction)
| antwolfe/Python-Projects | cipher.py | cipher.py | py | 1,089 | python | en | code | 0 | github-code | 13 |
14183302100 | """
This Code Controll The Arena Arena
"""
from Parameters import *
class Arena():
def __init__(self, gridsize, screen):
"""
:param gridsize: is how big the grid (rectangel). Find setting in Parameters.py
:param screen: Which Screen i want to show the arena
"""
self.gridSize = gridsize
self.screen = screen
self.direction = [UP,DOWN, RIGHT, LEFT]
self.createGrid()
self.drawLogo()
def drawLogo(self):
"""
Drawing my logo just in case someone want stole my video
:return:
"""
logo = pygame.image.load("Pictures/Logo.png")
logo = pygame.transform.scale(logo, (5*GRID_SIZE,5*GRID_SIZE))
self.screen.blit(logo, (SCREEN_WIDTH+SCREEN_WIDTH2-50,30))
def createGrid(self):
"""
Create Arena Visualization. With grid like chess board
You can change the color in parameters for color1 and color2
:return:
"""
for x in range(int(SCREEN_WIDTH/self.gridSize)):
for y in range(int(SCREEN_HEIGHT/self.gridSize)):
if (x+y)%2 == 0:
r = pygame.Rect((x*self.gridSize, y*self.gridSize), (self.gridSize,self.gridSize))
pygame.draw.rect(self.screen, GRID_COLOR_1, r)
else:
rr = pygame.Rect((x*self.gridSize, y*self.gridSize), (self.gridSize,self.gridSize))
pygame.draw.rect(self.screen, GRID_COLOR_2, rr)
def drawScore(screen,score,best, gen):
"""
Drawing Socre when testing
:param score: integer
:param best: integer
:param gen: integer
:return:
"""
pygame.font.init()
font = pygame.font.Font('freesansbold.ttf', 20)
bestImage = pygame.image.load("Pictures/Throphy.png")
scoreImage = pygame.image.load("Pictures/Tomatos.png")
scoreText = font.render("X"+str(score), True, (255, 255, 255))
bestText = font.render("X" + str(best), True, (255, 255, 255))
genText = font.render("Gen: " + str(gen), True, (255, 255, 255))
screen.blit(genText, (SCREEN_WIDTH + SCREEN_WIDTH2 - 40, SCREEN_HEIGHT - 30))
screen.blit(scoreText, (SCREEN_WIDTH + 170, SCREEN_HEIGHT-30))
screen.blit(bestText, (SCREEN_WIDTH + 70, SCREEN_HEIGHT - 30))
screen.blit(bestImage, (SCREEN_WIDTH+40,SCREEN_HEIGHT-40))
screen.blit(scoreImage, (SCREEN_WIDTH+140,SCREEN_HEIGHT-40))
def drawNeuralNetwork(screen, input, weights, output):
"""
Vizualising neural network. by changing the array into percentage i can controll circle opacity
:param input: Input is 24 vector of snakes looking for wall, food, dan snake body (array)
:param weights: input is weights and biases (array with size [[Weights],[Biases]]
:param Output is feedforward result to define direction
:return:
"""
pygame.font.init()
font = pygame.font.Font('freesansbold.ttf', 13)
dirList = ["UP", "RIGHT", "DOWN", "LEFT"]
input = np.array(input).reshape(-1)
input = 255*(input-np.min(input))/(np.max(input)-np.min(input)) if sum(input) !=0 else [0 for _ in range(24)]
top5 = np.argsort(input)[-5:]
pygame.font.init()
stepY = 22
stepX = 60
#Draw Neuron
for i in range(len(shape)):
for j in range(shape[i]):
y = int((SCREEN_HEIGHT-shape[i]*stepY)/2) + stepY*j
x = 20 + SCREEN_WIDTH + i*stepX
pygame.gfxdraw.filled_circle(screen, x, y, 7, (255,255,255)) # draw neuron
if i == 0:
"""Draw Basic Circle"""
pygame.gfxdraw.filled_circle(screen, x, y, 7, (124, 253, 0, input[j]))
elif i == 1:
if j in top5:
"""Draw first layer Green Circle"""
input = np.array(weights[0][0][j]).reshape(-1)
input = 255 * (input - np.min(input)) / (np.max(input) - np.min(input))
pygame.gfxdraw.filled_circle(screen, x, y, 7, (124, 153, 25, input[j]))
if i < len(shape) - 1:
"""Draw Lines"""
for z in range(shape[i + 1]):
if z % 3 == 0:
x2 = 30 + SCREEN_WIDTH + (i + 1) * stepX
y2 = int((SCREEN_HEIGHT - shape[i + 1] * stepY) / 2) + stepY * z
pygame.gfxdraw.line(screen, x, y, x2, y2, (255, 0, 25, 40))
else:
x2 = 20 + SCREEN_WIDTH + (i + 1) * stepX
y2 = int((SCREEN_HEIGHT - shape[i + 1] * stepY) / 2) + stepY * z
pygame.gfxdraw.line(screen, x, y, x2, y2, (0, 255, 155, 40))
if i == 3:
"""Draw Output Teks and Green Circle indating direction"""
directionText = font.render(dirList[j], True, (255, 255, 255))
screen.blit(directionText, (x+20,y))
if j == np.argmax(output):
pygame.gfxdraw.filled_circle(screen, x, y, 7, (125, 253, 25, 255))
| Verietoto/Playing-Snake-Using-Artificial_Intelligence-and-Neural_Network | Arena.py | Arena.py | py | 5,298 | python | en | code | 0 | github-code | 13 |
71645480339 | from urllib import response
import discord
from discord.ext import commands, tasks
import random
from itertools import cycle
import os
from discord import Color
from discord.utils import get
import asyncio
token = 'YOUR_API_KEY'
#sets what the bot is intended for, set to all as default
intents = discord.Intents().all()
#sets the prefix for commands
client = commands.Bot(command_prefix = ["."], intents=intents)
client.remove_command('help')
#A list of statuses for the bot to cycle between
status = cycle(['Garrett', 'Garretting'])
#moods for garrett to be in for the day
moods = ["happy", "sad", "mad", "bored", "fearful", "lonely"]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#EVENTS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#runs when the bot is activated
@client.event
async def on_ready():
change_status.start()
update_mood.start()
print("Garrett")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#COMMANDS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#used to check the latency of the bot
@client.command()
async def ping(ctx):
await ctx.send(f"Pong! {round(client.latency * 1000)}ms")
#allows the 8ball (or ask command) to function
@client.command(aliases=["8ball", "ask"])
async def _8ball(ctx, *, question):
responses = ["Garrett thinks: **You should go for it!**",
"Garrett thinks: **Maybe you should think on that a little more.**",
"Garrett thinks: **No.**",
"Garrett thinks: **Yes.**",
"Garrett thinks: **It's' your funeral.**",
"Garrett has no: **Thoughts, only dance.**",
"Garrett thinks of bagels while you ask your question.",
"Garrett thinks: **Without a doubt!**",
"Garrett thinks: **It is certain!**",
"Garrett thinks: **Most likely!**",
"Garrett thinks: **Don't count on it.**",
"Garrett thinks: **Ask again later, I'm napping.**",
"garrettdance.mp4",
"Garrett thinks: **nothing**"]
response = random.choice(responses)
if response == "garrettdance.mp4":
await ctx.send(f"{ctx.author.mention}", file=discord.File(r"./garrettdance.mp4"))
else:
await ctx.send(f"{ctx.author.mention} {response}")
#the backdoor into the bot that allows you to have the bot send messages to a specific channel
@client.command()
@commands.has_role("mod")
async def sendmsg(ctx, channel, *, message):
await client.get_channel(int(channel)).send(f"{message}")
await ctx.send(f"{ctx.author.mention} Your message **{message}** has been sent to channel ID **{channel}**!")
#just has the bot send the gif to the client the command was used
@client.command()
async def GarrettDance(ctx):
await ctx.send(f"{ctx.author.mention}", file=discord.File(r"./garrettdance.mp4"))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#TASKS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#changes the bots active status every 30 minutes
@tasks.loop(minutes=30)
async def change_status():
await client.change_presence(activity=discord.Game(next(status)))
#updates the mood of Garrett every day
@tasks.loop(hours=24)
async def update_mood():
mood = random.choice(moods)
reason = "There's no particular reason, just because I'm quirky like that."
if mood == "happy":
happyReasons = ["My dance moves are killer today!",
"There was a butterfly and I chased it across this field! It was fun!",
"I made a friend and we danced together for a while!",
"There's no particular reason, just because I'm quirky like that."]
reason = random.choice(happyReasons)
elif mood == "sad":
sadReasons = ["I just can't boogy today...",
"I ran out of catfood...",
"Someone slipped some catnip in my drink last night...",
"Etherium is down 24%...",
"I gained 3 pounds...",
"There's no particular reason, just because I'm quirky like that.",
"My favorite toy just broke..."]
reason = random.choice(sadReasons)
elif mood == "mad":
madReasons = ["I LOST ALL MY MONEY IN CRYPTO!",
"There's no particular reason, just because I'm quirky like that.",
"Dream is too hot...",
"This fucking tail behind me won't leave me alone!"]
reason = random.choice(madReasons)
elif mood == "bored":
boredReasons = ["I no longer feel the need to seek entertainment, I stimulated myself too much with 'toys' and 'catnip' that I have lost all sense of joy in life... I've become a hollow shell with no reason whatsoever... I dance, but with no feeling, I boogy, but with no purpose, no intent... What am I? Also there's nothing good on TV...",
"I just want to sleep.",
"These toys aren't that much fun anymore.",
"There's nothing good to watch on TV.",
"I can't think of any dance moves to bust it down to."]
reason = random.choice(boredReasons)
elif mood == "fearful":
fearfulReasons = ["There was a dog right outside my door, what the hell!?",
"Fucking vacuume, sucky bitch.",
"There was a weird snake thing on the ground, it didn't move and smelled like salt. It was scary!"]
reason = random.choice(fearfulReasons)
elif mood == "lonely":
lonelyReasons = ["No one wants to watch me dance...",
"The weird giant human thing that lives in my house went to work.",
"The other cats scoffed at me when I tried to talk to them..."]
reason = random.choice(lonelyReasons)
await client.get_channel(1026282390743683215).send(f"""I am feeling **{mood}** today. {reason}""")
#Runs the bot
async def main():
async with client:
await client.start(token)
asyncio.run(main()) | BorderDestroyer/GarrettBot | bot.py | bot.py | py | 6,668 | python | en | code | 0 | github-code | 13 |
36569422921 | # 2126 - procurando subsequencias
# algoritmo KMP
def computar_prefixo(padrao, m, prefixo):
tam = 0
prefixo[0] = 0
i = 1
while i < m:
if padrao[i] == padrao[tam]:
tam += 1
prefixo[i] = tam
i += 1
else:
if tam != 0:
tam = prefixo[tam - 1]
else:
prefixo[i] = 0
i += 1
def busca_subsequencia(n1, n2, caso):
m = len(n1)
n = len(n2)
prefixo = [0] * m
computar_prefixo(n1, m, prefixo)
conta_ocorrencias = 0
pos_ultima_ocorrencia = -1
j = 0
i = 0
while i < n:
if n1[j] == n2[i]:
i += 1
j += 1
if j == m:
conta_ocorrencias += 1
pos_ultima_ocorrencia = i-j+1
j = prefixo[j-1]
elif i < n and n1[j] != n2[i]:
if j != 0:
j = prefixo[j - 1]
else:
i += 1
print('Caso #{}:'.format(caso))
if conta_ocorrencias == 0:
print('Nao existe subsequencia\n')
else:
print('Qtd.Subsequencias: {}'.format(conta_ocorrencias))
print('Pos: {}\n'.format(pos_ultima_ocorrencia))
def main():
try:
caso = 1
while True:
n1 = input()
n2 = input()
busca_subsequencia(n1, n2, caso)
caso += 1
except EOFError:
pass
main()
| ltakuno/arquivos | python/URI/Basico/uri2126.py | uri2126.py | py | 1,434 | python | pt | code | 0 | github-code | 13 |
73175399378 | class StackNode():
def __init__(self, value, nxt):
self.value = value
self.next = nxt
def __repr__(self):
nval = self.next and self.next.value or None
return f'[{self.value}:{repr(nval)}]'
class Stack():
def __init__(self):
self.top = None
def push(self, obj):
if self.top == None:
node = StackNode(obj, None)
self.top = node
elif self.top != None:
node = StackNode(obj, self.top)
self.top = node
def pop(self):
if self.top == None:
return None
else:
node = self.top
self.top = self.top.next
return node.value
def topp(self):
if self.top != None:
return self.top.value
else:
return None
def count(self):
node = self.top
count = 0
while node != None:
count += 1
node = node.next
return count
| LowTechTurtle/More_Python_THW | data_structure/ex17/ex15_stack.py | ex15_stack.py | py | 1,001 | python | en | code | 0 | github-code | 13 |
11217100976 | from django.urls import path
from . import views
urlpatterns = [
path('detail/<int:id>', views.detail_post, name="detail-post"),
path('category/<ctg_name>', views.category_post, name="category-post"),
path('search', views.search_post, name='search'),
path('create/category', views.create_category, name='create-category'),
path('create/post', views.create_post, name='create-post'),
] | talhajubair100/bootstrap_blog | blog/urls.py | urls.py | py | 408 | python | en | code | 0 | github-code | 13 |
74449852817 | from delegations.models import Billing, Delegation, BusinessExpenses, UsersDelegations
# def createDelegationsCompanionObjects(delegation_id):
# billing = Billing.objects.create(
# FK_delegation=Delegation.objects.get(id_delegation=delegation_id))
# BusinessExpenses.objects.create(FK_billing=Billing.objects.get(id_billing=billing.id_billing))
def getBusinessExpenses(delegation_id):
billing = Billing.objects.get(
FK_delegation=Delegation.objects.get(id_delegation=delegation_id))
return BusinessExpenses.objects.get(FK_billing=billing)
def getParticipantsList(delegation_id):
curr_delegation = Delegation.objects.get(pk=delegation_id)
participants_list = [curr_delegation.FK_organizer]
for inst in UsersDelegations.objects.all():
if inst.FK_delegation.id_delegation == delegation_id:
participants_list.append(inst.FK_user)
return participants_list
| LeviSforza/PO-Projekt | delegations/utils.py | utils.py | py | 925 | python | en | code | 0 | github-code | 13 |
37412146119 | import re
import time
import operator
import sys
sys.path.append("../../")
import kaggle
import popular
training = "../data/train.csv"
extra = "../data/sku_names.csv"
testing = "../data/test.csv"
def train(data, ngram=1):
data = kaggle.format_words(data)
output = kaggle.word_count_hash(data, ngram)
return output
def classify(words, model, popularity):
top_items = 10 # Number of predictions to return
scores = score_all(words, model, popularity)
winner, best = [], 0.0
sorted_scores = sorted(scores.iteritems(), key=operator.itemgetter(1))
sorted_scores.reverse()
sorted_scores_list = []
for x in sorted_scores[0:top_items]:
sorted_scores_list.append(x[0])
return sorted_scores_list
#def test(model,
def score_all(query_words, model, popularity):
output = {}
s = 0.
idf = inverse_document_frequency(model, query_words, debug=True)
for sku,target_words in model.items():
s = score(query_words, target_words, idf)
output[sku] = s
return output
def score(query, target, idf, debug=False):
output = 0
for q in query:
tf = term_frequency(q, target)
if q in idf:
word_idf = idf[q]
else:
word_idf = 1.
if word_idf == 0.:
word_idf = 1.
tf_idf = tf/word_idf
output += tf_idf
return output
def term_frequency(query, target):
total_words = 0
for word,count in target.items():
total_words += count
# in case a higher n-gram doesn't have anything that long
if total_words == 0:
total_words = 1
if query in target:
word_count = target[query]
else:
word_count = 0
return float(word_count)/total_words
def inverse_document_frequency(model, words, debug=False):
idf = {}
for w in words:
idf[w] = 0.
for sku,document in model.items():
for w in words:
if w in document:
idf[w] += 1
new_idf = {}
document_count = len(model)
for word,count in idf.items():
new_idf[word] = count/document_count
return new_idf
# get the predictions
def predict_all(data, model, popularity):
output = []
for d in data:
prediction = classify(d, model, popularity)
output.append(prediction)
return output
def voted_predict_all(data, model1, model2, popularity, w):
predictions1 = predict_all(data, model1, popularity)
predictions2 = predict_all(data, model2, popularity)
predictions = vote(predictions1, predictions2, popularity, w)
return predictions
# Returns the percent that were correct and the predictions.
def test(model1, model2, data, class_labels, popularity):
predictions = voted_predict_all(data, model1, model2, popularity)
# see how many are correct
sum_score = 0.
for index,prediction in enumerate(predictions):
correct_answer = class_labels[index]
if correct_answer in prediction:
score = 1.0/(prediction.index(correct_answer)+1)
sum_score += score
predictions_count = float(len(predictions))
score = sum_score/predictions_count
return score, predictions
def train_model(csv_file, ngram=1, validation=False):
class_labels_index = 1
input_data_index = 3
data = kaggle.file_to_hash(csv_file, class_labels_index, input_data_index, validation)
model = train(data, ngram)
data = kaggle.file_to_array(csv_file, validation)
class_labels = kaggle.slice(data, class_labels_index)
popularity = popular.popularity_hash(class_labels, data)
return model, popularity
def test_data(csv_file, class_labels_index, input_data_index, validation, items_count, ngram=1):
array = kaggle.file_to_array(csv_file, validation)
class_labels = kaggle.slice(array, class_labels_index)
test_data = kaggle.slice(array, input_data_index)
formatted_test_data = []
for d in test_data:
formatted = kaggle.format_string(d)
tokens = kaggle.tokenize(formatted, ngram)
formatted_test_data.append(tokens)
if items_count != 'All':
class_labels, formatted_test_data = class_labels[0:items_count], formatted_test_data[0:items_count]
return class_labels, formatted_test_data
def vote(pred1, pred2, popularity, weight):
output = []
uni_gram_weight = 1.0
bi_gram_weight = 0.5#0.3
popular_weight = 0.5
for index,preds in enumerate(pred1):
scored = {}
pop = popular.sort_by_popularity(preds + pred2[index], popularity)
length = len(preds)
#print preds
for i,p in enumerate(preds):
score = (1 - (float(i)/length)) * uni_gram_weight
if p in scored:
scored[p] += score
elif p not in scored:
scored[p] = score
#print pred2[index]
for i,p in enumerate(pred2[index]):
score = (1 - (float(i)/length)) * bi_gram_weight
if p in scored:
scored[p] += score
elif p not in scored:
scored[p] = score
for i,p in enumerate(pop):
score = (1 - (float(i)/length)) * popular_weight
if p in scored:
scored[p] += score
elif p not in scored:
scored[p] = score
sorted_scores = sorted(scored.iteritems(), key=operator.itemgetter(1))
sorted_scores.reverse()
#print sorted_scores
sub_out = []
for x in sorted_scores[0:5]:
sub_out.append(x[0])
output.append(sub_out)
#print sub_out
return output
def real_test(w):
sample_size = 'All'
model1, popularity = train_model(training, ngram=1, validation='all')
#_, popularity = train_model(training, ngram=1, validation='all')
model2, _ = train_model(extra, ngram=1, validation='all')
_, td = test_data(testing, 1, 2, 'all', sample_size)
predictions = voted_predict_all(td, model1, model2, popularity, w)
return predictions
def validation_test():
start = time.time()
sample_size = 'All'
model1, popularity = train_model(training, ngram=1, validation=False)
_, popularity = train_model(training, ngram=1, validation="all")
model2, _ = train_model(extra, ngram=1, validation="all")
validation = True
class_labels, _test_data = test_data(training, 1, 3, validation, sample_size)
precision, predictions = test(model1, model2, _test_data, class_labels, popularity)
return "Precision: " + str(precision) + ".\n" + str(len(_test_data)) + " examples.\n Time: " + str(time.time() - start)
| pmiller10/best_buy | best_buy/models/tf_idf.py | tf_idf.py | py | 5,892 | python | en | code | 0 | github-code | 13 |
2051641616 | #
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http:#www.gnu.org/licenses/.
#
from pkg_resources import resource_filename
import json
import os
import numpy as np
import shapely.geometry as sg
class Area:
"""
Read and manipulate simulation area defined in JSON files.
File is expected to receive path to JSON file contain single list named
"vertices". The list should contain vertices (X, Y) describing simulation
area:
>>> {
>>> "vertices": [
>>> [0, 0],
>>> [0, 75],
>>> ...
>>> ]
>>> }
"""
_RESOURCE = "resource:"
class InvalidContentError(ValueError):
"""
JSON content is invalid.
"""
pass
def __init__(self, file_path):
if file_path.startswith(Area._RESOURCE):
file_path = file_path[len(Area._RESOURCE):]
file_path = os.path.abspath(os.path.join(resource_filename("smile.resources", ""),
file_path))
else:
file_path = os.path.expanduser(file_path)
with open(file_path, 'r') as handle:
content = json.load(handle)
if 'vertices' not in content:
raise Area.InvalidContentError('JSON does not contain \'vertices\'')
self.area = sg.Polygon(content['vertices'])
def contains(self, point, rtol=1e-5, atol=1e-5):
"""
Checks whether point is inside or close to the area.
Args:
point (point like): The point.
rtol (float): The relative tolerance parameter.
atol (float): The absolute tolerance parameter.
"""
if not isinstance(point, sg.Point):
point = sg.Point(point)
if self.area.contains(point):
return True
distance = self.area.distance(point)
return np.isclose(distance, 0, rtol=rtol, atol=atol, equal_nan=False)
| goofacz/smile-python | smile/area.py | area.py | py | 2,480 | python | en | code | 0 | github-code | 13 |
4632080505 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 6 12:13:17 2018
@author: pinaki
"""
import pandas as pd
from sklearn.preprocessing import StandardScaler
dataset=pd.read_csv("criminal_train.csv")
X = dataset.iloc[:, 2:71].values
y = dataset.iloc[:, 71].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from collections import OrderedDict
def analyseModel(classifier,X_Test,Y_Test,X_Train,Y_Train,runCrossVal=False,cv=10):
analyserObject=OrderedDict()
analyserObject['ClassifierType']=str(type(classifier))
classifier.fit(X_Train, Y_Train)
y_pred = classifier.predict(X_Test)
analyserObject['ConfusionMatrix'] = confusion_matrix(Y_Test, y_pred)
analyserObject['Accuracy'] = accuracy_score(Y_Test, y_pred)
if runCrossVal:
accuracies = cross_val_score(estimator = classifier, X = X_Train, y = Y_Train, cv = cv)
analyserObject['AccuracyList'] = accuracies
analyserObject['MeanAccuracy'] = accuracies.mean()
analyserObject['AccuracySD'] = accuracies.std()
return analyserObject
#List of performance monitors
performanceMonitors=[]
# Fitting XGBoost to the Training set
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
# Fitting LogisticRegression to the Training set
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
# Fitting KNeighborsClassifier to the Training set
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
# Fitting SVM to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state = 0)
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
# Fitting Kernel SVN to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
# Fitting Naiyesve Ba to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
# Fitting DecisionTree to the Training set
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
# Fitting RandomForest to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
performanceMonitors.append(analyseModel(classifier,X_test,y_test,X_train,y_train,runCrossVal=True,cv=10))
performanceMonitors=sorted(performanceMonitors, key=lambda k: k['MeanAccuracy'],reverse=True)
for i in performanceMonitors:
cm=i["ConfusionMatrix"]
print(cm[1][1],cm[0][1],cm[1][0],i["ClassifierType"]) | PinakiGhosh/HackerearthCompetitions | PredictTheCriminal/06-12-18_FirstTry.py | 06-12-18_FirstTry.py | py | 3,845 | python | en | code | 0 | github-code | 13 |
70873002897 | import sys
sys.path.append('../../modules')
from env.tic_tac_toe import TicTacToe
import numpy as np
# ***** welcome messages *****
print('==============================')
print('Welcome to the rollout tic-tac-toe AI.')
# ***** player choice of first-hand or second-hand *****
while True:
first_hand = input('Who goes first? (type "me" for me, "ai" for AI)\n')
if first_hand in ['me', 'ai']:
break
else:
print('Please try again.')
if first_hand == 'me':
print('You are going first and are assigned the "x" stone.')
current_side = 'cross'
human_side = 'cross'
ai_side = 'circle'
elif first_hand == 'ai':
print('You are going second and are assigned the "o" stone.')
current_side = 'cross'
human_side = 'circle'
ai_side = 'cross'
print('==============================')
# ***** utility functions *****
def board_to_str(board):
board_str = ''
board_str += ' 0 1 2 \n'
board_str += ' -------\n'
for i, row in enumerate(board):
board_str += f' {i} |'
for item in row:
if item == 0:
board_str += ' '
elif item == -1:
board_str += 'o'
elif item == 1:
board_str += 'x'
board_str += '|'
board_str += '\n'
if i == len(board) - 1:
board_str += ' -------'
else:
board_str += ' -------\n'
return board_str
def swap_side(current_side):
if current_side == 'cross':
return 'circle'
elif current_side == 'circle':
return 'cross'
def rollout(game, action):
mind_game = TicTacToe()
mind_game.board = game.board.copy()
first_action = True
current_side = ai_side
while True:
if first_action:
_, reward, done = mind_game.step(a, current_side)
first_action = False
else:
_, reward, done = mind_game.step(mind_game.sample_action_space(), current_side)
if done:
if current_side == ai_side:
if reward == 0: return reward
elif reward == 1: return reward
elif current_side == human_side:
if reward == 0: return 0
elif reward == 1: return -100 # this was able to fix some trouble; undo it and use human first to see some trouble
current_side = swap_side(current_side)
game = TicTacToe()
while True:
print('==============================')
print(board_to_str(game.board))
print('==============================')
# *****
if current_side == human_side:
position = tuple(map(int, input('Your position tuple (row, col): ').split(',')))
_, reward, done = game.step(position, current_side)
elif current_side == ai_side:
# the true AI part
print('AI is contemplating ...')
values = np.zeros((len(game.action_space), ))
for i, a in enumerate(sorted(game.action_space)):
returns = []
for n in range(1500):
returns.append(rollout(game, a))
values[i] = np.mean(returns)
_, reward, done = game.step(sorted(game.action_space)[np.argmax(values)], current_side)
if done:
print('========== GAME HAS ENDED ==========')
if reward == 0:
print('Result: draw')
print(board_to_str(game.board))
print('====================================')
break
elif reward == 1:
winning_player = 'human' if human_side == current_side else 'AI'
print(f'Result: {current_side} (held by {winning_player}) wins!')
print(board_to_str(game.board))
print('====================================')
break
current_side = swap_side(current_side)
| zhihanyang2022/classic_rl | examples/rollout/tic_tac_toe_ai.py | tic_tac_toe_ai.py | py | 3,451 | python | en | code | 0 | github-code | 13 |
14881338269 | from .app import app, ldap_obj
import ldap
def server_dn():
return 'cn=DHCP Config,cn=dhcpsrv,dc=%s,dc=%s' % (
app.config['openldap_server_domain_name'].split('.')[0],
app.config['openldap_server_domain_name'].split('.')[1])
def _deep_delete(dn):
try:
objects = ldap_obj.search_s(dn, ldap.SCOPE_SUBTREE)
except ldap.NO_SUCH_OBJECT:
return
for obj in objects:
if obj[0] != dn:
_deep_delete(obj[0])
ldap_obj.delete_s(dn)
def drop_all():
for ou in ['Hosts','Groups','Subnets']:
objects = ldap_obj.search_s('ou=%s,%s' % (ou, server_dn()), ldap.SCOPE_SUBTREE)
for obj in objects:
if not obj[0].startswith('ou=%s' % ou):
_deep_delete(obj[0])
| GR360RY/dhcpawn | flask_app/ldap_utils.py | ldap_utils.py | py | 767 | python | en | code | 2 | github-code | 13 |
14982135260 | import csv
from assignment3.min_wise_sample import MinWiseSample
from assignment3.utils import *
from assignment3.packet import *
reservoir_size_range = [100, 1000, 10000, 100000]
samples = []
addresses = []
ip_freq = dict()
ip_amt = 0
k = 10
# Define the host IPs you are interested in.
infected_host_ips = ['147.32.84.165']
# Set to true if you want to bypass the host IP check.
scan_all = True
for x in reservoir_size_range:
samples.append(MinWiseSample(x))
file = "data\capture20110816-2.pcap.netflow.labeled"
# Treat data as a stream.
with open(file, "r") as f:
reader = csv.reader(f, delimiter=" ")
for z, line in enumerate(reader):
if z < 1:
continue
# Split the arguments in the line
args = line[1].split("\t")
new_args = remove_empty_strings(args)
date = line[0] + ' ' + new_args[0]
p = packet(date, new_args[1], new_args[2], new_args[3].split(':')[0], new_args[5].split(':')[0], new_args[6], new_args[7], new_args[8], new_args[9], new_args[10], new_args[11])
src = p.src
ip = p.dst
# Filter the broadcasts and non-ip adresses
if (ip != "Broadcast") and (ip != "ff02") and (scan_all or src in infected_host_ips):
# Add to the min-wise sampling pool.
for sample in samples:
sample.add(ip)
# Add to the frequency counter.
if ip in ip_freq:
ip_freq[ip] += 1
else:
ip_freq[ip] = 1
# Increment the amount of IP's gathered. This is useful in the future.
ip_amt += 1
addresses.append(ip)
total_freq_sorted = sort_dict_by_value(ip_freq, ip_amt)
for i, sample in enumerate(samples):
samples[i] = sample.count_and_sort()
print('Stream reading done.')
print('The file that was read can be found in %s' % file)
print('There are %i destination IP addresses' % ip_amt)
# print('Printing all frequencies:')
# print('Total frequencies, sorted by value in descending order:')
# print(total_freq_sorted)
#
# for i, sample in enumerate(samples):
# print('Frequencies of samples sampled by using min-wise sampling with a reservoir size of %i, sorted by value in descending order:' % 10**(i+2))
# print(sample)
print('')
first_k_all = select_first_k(total_freq_sorted, k)
print('Printing only the top %i frequencies' % k)
print('Top %i highest frequencies of the total frequencies:' % k)
print(first_k_all)
for i, sample in enumerate(samples):
first_k_sampled = select_first_k(sample, k)
print('Top {} highest frequencies of the sampled frequencies with reservoir size {}:'.format(k, 10**(i+2)))
print(first_k_sampled)
print('This set has %i IP addresses in common with the normal sample, %i of which are in the same position' % (keys_in_common(first_k_all, first_k_sampled), keys_in_same_position(first_k_all, first_k_sampled)))
print() | Michieldoesburg/cyber_data_analytics | assignment3/sampling.py | sampling.py | py | 2,921 | python | en | code | 2 | github-code | 13 |
22787880421 | class Solution(object):
def isValid(self, s):
a = list()
for s0 in s:
if s0 in ['[', '{', '(']:
a.append(s0)
else:
if len(a) == 0:
return False
s1 = a.pop()
if not (s1 == '[' and s0 == ']' or s1 == '{' and s0 == '}' or s1 == '(' and s0 == ')'):
return False
return True if len(a) == 0 else False
if __name__ == '__main__':
solution = Solution()
s = '{[}}'
print(solution.isValid(s))
| lmb633/leetcode | 20isValid.py | 20isValid.py | py | 572 | python | en | code | 0 | github-code | 13 |
3529928431 | from itertools import product
import time
import warnings
import numpy as np
from numpy.testing import assert_raises, assert_
import pytest
from mrrt.operators import (
TV_Operator,
FiniteDifferenceOperator,
DiagonalOperator,
IdentityOperator,
CompositeLinOp,
BlockDiagLinOp,
BlockColumnLinOp,
BlockRowLinOp,
)
from mrrt.operators.LinOp import (
retrieve_block_out,
retrieve_block_in,
split_block_outputs,
split_block_inputs,
)
from mrrt.utils import config
all_xp = [np]
if config.have_cupy:
import cupy
if cupy.cuda.runtime.getDeviceCount() > 0:
all_xp += [cupy]
def get_loc(xp):
"""Location arguments corresponding to numpy or CuPy case."""
if xp is np:
return dict(loc_in="cpu", loc_out="cpu")
else:
return dict(loc_in="gpu", loc_out="gpu")
def get_data(xp, shape=(128, 128)):
# c_cpu = skimage.data.camera().astype(np.float64)
rstate = xp.random.RandomState(5)
return rstate.randn(*shape)
@pytest.mark.parametrize("xp, order", product(all_xp, ["C", "F"]))
def test_block_diag_identical(xp, order):
MDWT_Operator = pytest.importorskip("mrrt.operators.MDWT_Operator")
filters = pytest.importorskip("pyframelets.separable.filters")
c = get_data(xp)
Phi = MDWT_Operator(
c.shape,
order=order,
nd_input=False,
nd_output=False,
level=3,
filterbank=filters.pywt_as_filterbank("db2", xp=xp),
mode="periodization",
**get_loc(xp),
)
nblocks = 3
Phi_block_op = BlockDiagLinOp([Phi] * nblocks)
assert_(len(Phi_block_op.blocks) == nblocks)
# should have references to the same underlying operator (not copies!)
assert_(Phi_block_op.blocks[0] is Phi_block_op.blocks[1])
# test round trip
x = xp.concatenate((c.ravel(order=order),) * nblocks, axis=0)
y = Phi_block_op * x
x2 = Phi_block_op.H * y
xp.testing.assert_allclose(x, x2, rtol=1e-9, atol=1e-9)
# access linops directly without the blocks attribute
assert_(Phi_block_op[0] is Phi)
# access subset via a slice
Phi_sl = Phi_block_op[slice(1, None)]
assert_(Phi_sl.nblocks == (Phi_block_op.nblocks - 1))
assert_(Phi_sl[-1] is Phi)
# Change one of the linops dynamically
Phi_sl[0] = 2 * Phi
assert_(Phi_sl[0] is not Phi)
@pytest.mark.parametrize("xp, order", product(all_xp, ["C", "F"]))
def test_block_diag_identical_TV(xp, order):
c = get_data(xp)
Phi = TV_Operator(
c.shape, order=order, nd_input=False, nd_output=False, **get_loc(xp)
)
nblocks = 3
Phi_block_op = BlockDiagLinOp([Phi] * nblocks)
assert_(len(Phi_block_op.blocks) == nblocks)
# should have references to the same underlying operator (not copies)
assert_(Phi_block_op.blocks[0] is Phi_block_op.blocks[1])
# test round trip
x = xp.concatenate([c.ravel(order=order)] * nblocks, axis=0)
y = Phi_block_op * x
tv_out_ref = Phi * c.ravel(order=order)
for blk in range(nblocks):
xp.testing.assert_allclose(y[Phi_block_op.slices_out[0]], tv_out_ref)
tv_adj_ref = Phi.H * tv_out_ref
x2 = Phi_block_op.H * y
for blk in range(nblocks):
xp.testing.assert_allclose(x2[Phi_block_op.slices_in[0]], tv_adj_ref)
# test_norm
xp.testing.assert_allclose(Phi_block_op.norm(x), x2)
@pytest.mark.parametrize(
"xp, nd_in, nd_out, order1, order2",
product(
all_xp,
[True, False],
[False], # MDWT_Operator does not support nd_output = True
["C", "F"],
["C", "F"],
),
)
def test_block_diag(xp, nd_in, nd_out, order1, order2):
MDWT_Operator = pytest.importorskip("mrrt.operators.MDWT_Operator")
filters = pytest.importorskip("pyframelets.separable.filters")
c = get_data(xp)
Phi = MDWT_Operator(
c.shape,
order=order1,
nd_input=nd_in,
nd_output=nd_out,
level=3,
filterbank=filters.pywt_as_filterbank("db2", xp=xp),
mode="periodization",
**get_loc(xp),
)
TV = TV_Operator(
c.shape, order=order2, nd_input=nd_in, nd_output=nd_out, **get_loc(xp)
)
if nd_out:
# non-uniform nd_output shape not allowed
assert_raises(
ValueError, BlockDiagLinOp, [Phi, TV], enforce_uniform_order=False
)
return
B = BlockDiagLinOp([Phi, TV], enforce_uniform_order=False, **get_loc(xp))
assert_(B.blocks[0] is Phi)
assert_(B.blocks[1] is TV)
c2 = xp.concatenate((c.ravel(order=order1), c.ravel(order=order2)), axis=0)
res = B * c2
dwt_out = res[B.slices_out[0]]
tv_out = res[B.slices_out[1]]
if nd_out:
dwt_out = dwt_out.reshape(B.shapes_out[0], order=B.blocks[0].order)
tv_out = tv_out.reshape(B.shapes_out[1], order=B.blocks[1].order)
xp.testing.assert_allclose(dwt_out, Phi * c)
xp.testing.assert_allclose(tv_out, TV * c)
# @dec.slow # TODO: mark as slow
@pytest.mark.parametrize(
"xp, Op",
product(
[np], # only test concurrent blocks with NumPy
[
# MDWT_Operator, # TODO: fix concurrent operation for MDWT_Operator
TV_Operator,
FiniteDifferenceOperator,
DiagonalOperator,
"composite",
],
),
)
def test_concurrent_blocks(xp, Op, verbose=False):
"""Test BlockDiagLinOp with concurrent processing."""
MDWT_Operator = pytest.importorskip("mrrt.operators.MDWT_Operator")
filters = pytest.importorskip("pyframelets.separable.filters")
c = get_data(xp)
dec_level = 2
dwt_mode = "periodization"
nd_in = nd_out = False # True case not working with CompositeLinOp
order = "F"
nblocks = 6
c3d = xp.stack([c] * 16, axis=-1)
op_kwargs = dict(
order=order,
nd_input=nd_in,
nd_output=nd_out,
level=dec_level,
filterbank=filters.pywt_as_filterbank("db2", xp=xp),
mode=dwt_mode,
)
op_kwargs.update(get_loc(xp))
if Op is FiniteDifferenceOperator:
op_kwargs["use_corners"] = False
if Op is DiagonalOperator:
Phi = Op(diag=xp.arange(c3d.size), **op_kwargs)
elif Op == "composite":
# test BlockDiagLinop where each LinOp is a CompositeLinOp
Phi1 = MDWT_Operator(c3d.shape, **op_kwargs)
D1 = DiagonalOperator(diag=xp.arange(c3d.size), **op_kwargs)
Phi = CompositeLinOp([Phi1, D1])
else:
Phi = Op(c3d.shape, **op_kwargs)
# define serial Operator
Phi_B = BlockDiagLinOp([Phi] * nblocks, concurrent=False, **get_loc(xp))
# define concurrent Operator
Phi_Bc = BlockDiagLinOp([Phi] * nblocks, concurrent=True, **get_loc(xp))
assert_(Phi_B.nd_input == Phi.nd_input)
assert_(Phi_B.nd_output == Phi.nd_output)
if Phi.nd_input:
if order == "F":
assert_(Phi_B.shape_in == Phi.shape_in + (nblocks,))
elif order == "C":
assert_(Phi_B.shape_in == (nblocks,) + Phi.shape_in)
else:
assert_(Phi_B.shape_in == (Phi.nargin * nblocks,))
if Phi.nd_output:
if order == "F":
assert_(Phi_B.shape_out == Phi.shape_out + (nblocks,))
elif order == "C":
assert_(Phi_B.shape_out == (nblocks,) + Phi.shape_out)
else:
assert_(Phi_B.shape_out == (Phi.nargout * nblocks,))
"""
test forward transform
"""
allc = xp.asfortranarray(xp.stack([c3d] * nblocks, axis=-1))
# run serial Operator
tstart_serial = time.time()
tmp = Phi_B.H * (Phi_B * allc)
t_serial = time.time() - tstart_serial
# run concurrent Operator
tstart = time.time()
tmpc = Phi_Bc.H * (Phi_Bc * allc)
t = time.time() - tstart
# verify equivalent result
xp.testing.assert_allclose(tmp, tmpc)
# only if nblocks is large enough and c is big enough
if not (t < t_serial) and (Op is not DiagonalOperator):
import os
ncpus = os.cpu_count()
if ncpus > 1 and nblocks > 1:
warnings.warn(
"test_concurrent_blocks: concurrent case unexpectedly "
"slower on a machine with {} cpus".format(ncpus)
)
if verbose:
print("Operator: {}".format(Op))
print(" time (serial) = {}".format(t_serial))
print(" time (concurrent) = {}".format(t))
time_ratio = t_serial / t
print(" speedup factor = {}\n".format(time_ratio))
@pytest.mark.parametrize("xp, order", product(all_xp, ["C", "F"]))
def test_block_column_linop(xp, order):
"""Test BlockColumnLinOp."""
c = get_data(xp)
kwargs = dict(squeeze_reps=True, order=order)
kwargs.update(get_loc(xp))
D = IdentityOperator(c.size, **kwargs)
D2 = DiagonalOperator(xp.full(c.size, 2.0, dtype=c.dtype), **kwargs)
D3 = DiagonalOperator(xp.full(c.size, 3.0, dtype=c.dtype), **kwargs)
ColOp = BlockColumnLinOp([D, D2, D3])
assert_(ColOp.nargin == D.nargin == D2.nargin == D3.nargin)
assert_(ColOp.nargout == D.nargout + D2.nargout + D3.nargout)
c3 = ColOp * c
# retrieve single blocks
c3_0 = retrieve_block_out(c3, ColOp, 0).reshape(c.shape, order=order)
c3_1 = retrieve_block_out(c3, ColOp, 1).reshape(c.shape, order=order)
c3_2 = retrieve_block_out(c3, ColOp, 2).reshape(c.shape, order=order)
# retrieve all blocks at once
allc = split_block_outputs(c3, ColOp, reshape=False)
xp.testing.assert_array_equal(allc[0], c3_0.ravel(order=order))
xp.testing.assert_array_equal(allc[1], c3_1.ravel(order=order))
xp.testing.assert_array_equal(allc[2], c3_2.ravel(order=order))
# reshape and compare to expected result of running each individually
xp.testing.assert_allclose(D * c, allc[0])
xp.testing.assert_allclose(D2 * c, allc[1])
xp.testing.assert_allclose(D3 * c, allc[2])
# only retrieve the last 2 outputs
c3_sl = split_block_outputs(c3, ColOp, sl=slice(1, 3), reshape=False)
assert_(len(c3_sl) == 2)
# Block column operator can't split inputs
assert_raises(ValueError, split_block_inputs, c, ColOp)
assert_raises(ValueError, retrieve_block_in, c, ColOp, 0)
# test adjoint
c_adj = ColOp.H * c3
c_adj_expected = 0
for n in range(ColOp.nblocks):
c_adj_expected += ColOp.blocks[n].H * retrieve_block_out(c3, ColOp, n)
xp.testing.assert_allclose(c_adj, c_adj_expected)
# test norm()
xp.testing.assert_allclose(ColOp.norm(c), c_adj)
@pytest.mark.parametrize("xp, order", product(all_xp, ["C", "F"]))
def test_block_row_linop(xp, order):
"""Test BlockRowLinOp."""
c = get_data(xp)
c3 = xp.concatenate((c.ravel(order=order),) * 3)
kwargs = dict(squeeze_reps=True, order=order)
kwargs.update(get_loc(xp))
D = IdentityOperator(c.size, **kwargs)
D2 = DiagonalOperator(xp.full(c.size, 2.0, dtype=c.dtype), **kwargs)
D3 = DiagonalOperator(xp.full(c.size, 3.0, dtype=c.dtype), **kwargs)
RowOp = BlockRowLinOp([D, D2, D3])
assert_(RowOp.nargout == D.nargin == D2.nargin == D3.nargin)
assert_(RowOp.nargin == D.nargin + D2.nargin + D3.nargin)
c3sum = RowOp * c3
xp.testing.assert_array_equal(c3sum.reshape(c.shape, order=order), c * 6)
# Block row operator can't split outputs
assert_raises(ValueError, split_block_outputs, c3sum, RowOp)
assert_raises(ValueError, retrieve_block_out, c3sum, RowOp, 0)
# test adjoint
c3_adj = RowOp.H * c3sum
c3_adj_expected = xp.concatenate(
[xp.ravel(B.H * c3sum, order=order) for B in RowOp.blocks]
)
xp.testing.assert_allclose(c3_adj, c3_adj_expected)
# test norm()
xp.testing.assert_allclose(RowOp.norm(c3), c3_adj)
| mritools/mrrt.operators | mrrt/operators/tests/test_block.py | test_block.py | py | 11,670 | python | en | code | 1 | github-code | 13 |
14551996293 | import sys
NO_LETTER = 0
LETTER_EXISTS = 1
LETTER_TERMINAL = 2
class LetterNode:
def __init__(self):
self.is_terminal = NO_LETTER
self.next = None
class Trie:
def __init__(self):
self.chars = [LetterNode() for _ in range(26)]
@staticmethod
def char_num(char):
return ord(char) - ord('a')
def insert(self, word, ind):
word_list = self.chars
for i in range(len(word)):
char = self.char_num(word[i])
if not word_list[char]:
word_list[char] = LetterNode()
word_list[char].is_terminal = LETTER_EXISTS
if i == len(word) - 1:
word_list[char].is_terminal = LETTER_TERMINAL + ind
if not word_list[char].next:
word_list[char].next = [LetterNode() for _ in range(26)]
word_list = word_list[char].next
def contains(self, word):
word_list = self.chars
for i in range(len(word) - 1):
char = self.char_num(word[i])
if not word_list[char] or not word_list[char].next:
return NO_LETTER
word_list = word_list[char].next
return word_list[self.char_num(word[-1])].is_terminal
string, ans_len = input(), int(input())
my_trie = Trie()
for i, word in enumerate(sys.stdin.read().splitlines()):
my_trie.insert(word, i)
answer = ['No' for _ in range(ans_len)]
for i in range(len(string)):
for j in range(1, 31):
if i + j > len(string):
break
else:
sub_str = string[i: i + j]
contains_ind = my_trie.contains(sub_str)
if contains_ind >= LETTER_TERMINAL:
answer[contains_ind - LETTER_TERMINAL] = 'Yes'
print(*answer, sep='\n')
| StepDan23/MADE_algorithms | hw_14/d.py | d.py | py | 1,772 | python | en | code | 0 | github-code | 13 |
34132412230 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('statistik', '0006_auto_20151114_0029'),
]
operations = [
migrations.AddField(
model_name='review',
name='recommended_options',
field=django.contrib.postgres.fields.ArrayField(null=True, size=None, base_field=models.IntegerField(choices=[(0, 'Regular'), (1, 'Random'), (2, 'S-Random'), (3, 'R-Random'), (4, 'Mirror')])),
),
migrations.AddField(
model_name='review',
name='text',
field=models.CharField(blank=True, max_length=256),
),
migrations.AlterField(
model_name='review',
name='characteristics',
field=django.contrib.postgres.fields.ArrayField(null=True, size=None, base_field=models.IntegerField(choices=[(0, 'Scratching'), (1, 'Jacks'), (2, 'Speed Changes'), (3, 'Charge Notes'), (4, 'Scales'), (5, 'Chord Scales'), (6, 'Denim')])),
),
]
| benhgreen/statistik | statistik/migrations/0007_auto_20151114_0154.py | 0007_auto_20151114_0154.py | py | 1,131 | python | en | code | 3 | github-code | 13 |
7042094650 | import eqsig
import numpy as np
import tempfile
import o3seespy as o3
from o3seespy import extensions
import os
def get_inelastic_response(tmp_file, mass, k_spring, f_yield, motion, dt, xi=0.05, r_post=0.0):
osi = o3.OpenSeesInstance(ndm=2, state=3)
# Establish nodes
bot_node = o3.node.Node(osi, 0, 0)
top_node = o3.node.Node(osi, 0, 0)
# Fix bottom node
o3.Fix3DOF(osi, top_node, o3.cc.FREE, o3.cc.FIXED, o3.cc.FIXED)
o3.Fix3DOF(osi, bot_node, o3.cc.FIXED, o3.cc.FIXED, o3.cc.FIXED)
# Set out-of-plane DOFs to be slaved
o3.EqualDOF(osi, top_node, bot_node, [o3.cc.DOF_Y, o3.cc.DOF2D_ROTZ])
# nodal mass (weight / g):
o3.Mass(osi, top_node, mass, 0., 0.)
# Define material
bilinear_mat = o3.uniaxial_material.Steel01(osi, fy=f_yield, e0=k_spring, b=r_post)
# Assign zero length element, # Note: pass actual node and material objects into element
o3.element.ZeroLength(osi, [bot_node, top_node], mats=[bilinear_mat], dirs=[o3.cc.DOF2D_X], r_flag=1)
# Define the dynamic analysis
values = list(-1 * motion) # should be negative
acc_series = o3.time_series.Path(osi, dt, values)
o3.pattern.UniformExcitation(osi, o3.cc.X, accel_series=acc_series)
# set damping based on first eigen mode
angular_freq2 = o3.get_eigen(osi, solver='fullGenLapack', n=1)
if hasattr(angular_freq2, '__len__'):
angular_freq2 = angular_freq2[0]
angular_freq = angular_freq2 ** 0.5
beta_k = 2 * xi / angular_freq
o3.rayleigh.Rayleigh(osi, alpha_m=0.0, beta_k=beta_k, beta_k_init=0.0, beta_k_comm=0.0)
# Run the dynamic analysis
o3.wipe_analysis(osi)
newmark_gamma = 0.5
newmark_beta = 0.25
o3.algorithm.Newton(osi)
o3.constraints.Transformation(osi)
o3.algorithm.Newton(osi)
o3.numberer.RCM(osi)
o3.system.SparseGeneral(osi)
o3.integrator.Newmark(osi, newmark_gamma, newmark_beta)
o3.analysis.Transient(osi)
o3.test_check.EnergyIncr(osi, tol=1.0e-10, max_iter=10)
analysis_time = (len(values) - 1) * dt
analysis_dt = 0.001
outputs = {
"time": [],
"rel_disp": [],
"rel_accel": [],
"force": []
}
o3.record(osi)
curr_time = o3.get_time(osi)
while curr_time < analysis_time:
outputs["time"].append(curr_time)
outputs["rel_disp"].append(o3.get_node_disp(osi, top_node, o3.cc.X))
outputs["rel_accel"].append(o3.get_node_accel(osi, top_node, o3.cc.X))
o3.gen_reactions(osi)
outputs["force"].append(-o3.get_node_reaction(osi, bot_node, o3.cc.X)) # Negative since diff node
o3.analyze(osi, 1, analysis_dt)
curr_time = o3.get_time(osi)
o3.wipe(osi)
o3.extensions.to_py_file(osi, ofile=tmp_file, compress=True)
return outputs
def test_can_compress_py_file():
mass = 1.0
f_yield = 1.5 # Reduce this to make it nonlinear
r_post = 0.0
rec = 0.3 * np.sin(np.linspace(0, 2, 10))
k_spring = 4 * np.pi ** 2
tmp_file = tempfile.NamedTemporaryFile(delete=False).name
outputs = get_inelastic_response(tmp_file, mass, k_spring, f_yield, rec, dt=0.01, xi=0.05, r_post=r_post)
ofile = open(tmp_file).read()
os.unlink(tmp_file)
assert len(ofile.splitlines()) == 34
assert 'for ' in ofile
def test_get_fn_name_and_args():
line = "node(0.0, 1.0, 2, 'water')"
fn1, args = o3.extensions._get_fn_name_and_args(line)
assert fn1 == 'node'
assert args[0] == 0.0
assert args[1] == 1.0
assert args[2] == 2
assert args[3] == "'water'"
def test_build_logic_formula():
line1 = "node(0.0, 1.0, 2, 'water')"
line2 = "node(0.0, 2.0, 2, 'water')"
new_line = o3.extensions._build_logic_formula(line1, line2)
assert new_line == "node(0.0, 1.0 + 1.0 * i, 2, 'water')"
line2 = "node(0.0, 0.0, 2, 'water')"
new_line = o3.extensions._build_logic_formula(line1, line2)
assert new_line == "node(0.0, 1.0 -1.0 * i, 2, 'water')"
def test_compress_opy_lines():
commands = ['opy.nodeCoord(1)',
'opy.nodeCoord(2)',
'opy.nodeCoord(3)',
'opy.nodeCoord(4)',
'opy.nodeCoord(5)',
'opy.nodeCoord(6)']
new_commands = o3.extensions.compress_opy_lines(commands)
print(new_commands)
if __name__ == '__main__':
# test_get_fn_name_and_args()
# test_can_compress_py_file()
# test_build_logic_formula()
test_compress_opy_lines()
| o3seespy/o3seespy | tests/test_extensions.py | test_extensions.py | py | 4,454 | python | en | code | 16 | github-code | 13 |
33946993135 | """
Зробити такі вибірки з отриманої бази даних:
Знайти 5 студентів із найбільшим середнім балом з усіх предметів.
Знайти студента із найвищим середнім балом з певного предмета.
Знайти середній бал у групах з певного предмета.
Знайти середній бал на потоці (по всій таблиці оцінок).
Знайти які курси читає певний викладач.
Знайти список студентів у певній групі.
Знайти оцінки студентів у окремій групі з певного предмета.
Знайти середній бал, який ставить певний викладач зі своїх предметів.
Знайти список курсів, які відвідує студент.
Список курсів, які певному студенту читає певний викладач.
"""
import sqlite3
from random import randint, choice
from faker import Faker
fake = Faker("uk-UA")
conn = sqlite3.connect("../university.db")
cursor = conn.cursor()
cursor.execute("""DROP TABLE IF EXISTS students""")
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS students (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL,
group_id INTEGER,
FOREIGN KEY (group_id) REFERENCES groups (id)
)
"""
)
cursor.execute("""DROP TABLE IF EXISTS groups""")
# Створення таблиці груп
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS groups (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name NOT NULL
)"""
)
cursor.execute("""DROP TABLE IF EXISTS teachers""")
# Створення таблиці викладачів
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS teachers (
id INTEGER PRIMARY KEY AUTOINCREMENT,
name TEXT NOT NULL
)"""
)
cursor.execute("""DROP TABLE IF EXISTS subjects""")
# Створення таблиці предметів
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS subjects (
id INTEGER PRIMARY KEY AUTOINCREMENT,
subject TEXT NOT NULL,
teacher_id INTEGER,
FOREIGN KEY (teacher_id) REFERENCES teachers(id)
)"""
)
cursor.execute("""DROP TABLE IF EXISTS grades""")
# Створення таблиці оцінок
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS grades (
id INTEGER PRIMARY KEY AUTOINCREMENT,
student_id INTEGER,
subject_id INTEGER,
grade INTEGER,
date DATETIME,
FOREIGN KEY (student_id) REFERENCES students(id) ON UPDATE CASCADE,
FOREIGN KEY (subject_id) REFERENCES subjects(id) ON UPDATE CASCADE
)"""
)
# Збереження змін у базі даних
conn.commit()
# Закриття з'єднання з базою даних
conn.close()
with sqlite3.connect("../university.db") as con:
cur = con.cursor()
sql_to_groups = """INSERT INTO groups (name) VALUES (?)"""
for _ in range(1, 4):
cur.execute(sql_to_groups, (fake.word(),))
sql_to_teachers = """INSERT INTO teachers (name) VALUES (?)"""
for _ in range(1, 6):
cur.execute(sql_to_teachers, (fake.name(),))
sql_to_subjects = """INSERT INTO subjects (subject, teacher_id) VALUES (?,?)"""
for _ in range(1, 9):
cur.execute(sql_to_subjects, (fake.word(), randint(1, 5),))
for group_id in range(1, 4):
for _ in range(10):
cur.execute("INSERT INTO students (name, group_id) VALUES (?, ?) RETURNING id",
(fake.name(), group_id))
student_id = cur.fetchone()[0]
for subject_id in range(1, 9):
for _ in range(3):
cur.execute(
"INSERT INTO grades (student_id, subject_id, grade, date) VALUES (?, ?, ?, ?)",
(student_id, subject_id, randint(0, 100), fake.date_this_decade(),))
| F1r3n25/hw6 | create_table.py | create_table.py | py | 4,398 | python | uk | code | 0 | github-code | 13 |
10419599363 | import pickle
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib_venn import venn3
from util import get_args
def main(args):
sis_df = pd.read_csv('./data/sis.csv')
ling_fts = pickle.load(open('./ling_fts_new_df.p', 'rb'))
bert_fail_idx = []
ft_fail_idx = []
hybrid_fail_idx = []
for index, row in sis_df.iterrows():
human_score = row['sentence average score']
bert_score = row['bert prediction']
ft_score = row['ft prediction']
hybrid_score = row['hybrid prediction']
if abs(human_score - bert_score) >= args.threshold:
bert_fail_idx.append(index)
if abs(human_score - ft_score) >= args.threshold:
ft_fail_idx.append(index)
if abs(human_score - hybrid_score) >= args.threshold:
hybrid_fail_idx.append(index)
set_bert = set(bert_fail_idx)
set_ft = set(ft_fail_idx)
set_hybrid = set(hybrid_fail_idx)
set_100 = set_bert.difference(set_ft).difference(set_hybrid)
set_010 = set_ft.difference(set_bert).difference(set_hybrid)
set_001 = set_hybrid.difference(set_bert).difference(set_ft)
set_110 = set_bert.intersection(set_ft).difference(set_hybrid)
set_101 = set_bert.intersection(set_hybrid).difference(set_ft)
set_011 = set_hybrid.intersection(set_ft).difference(set_bert)
set_111 = set_bert.intersection(set_ft).intersection(set_hybrid)
fig = venn3(
subsets=(len(set_100), len(set_010), len(set_110), len(set_001), len(set_101), len(set_011), len(set_111)),
set_labels=('BERT', 'FT', 'Hybrid'))
venn_name = './figures/error_analysis_venn.pdf'
plt.savefig(fig, venn_name)
bert_fail_cases = sis_df.iloc[bert_fail_idx]
ft_fail_cases = sis_df.iloc[ft_fail_idx]
hybrid_fail_cases = sis_df.iloc[hybrid_fail_idx]
bert_fail_cases.to_csv('./data/error_cases/bert_error_cases.csv', index=False)
ft_fail_cases.to_csv('./data/error_cases/ft_error_cases.csv', index=False)
hybrid_fail_cases.to_csv('./data/error_cases/hybrid_error_cases.csv', index=False)
if __name__ == '__main__':
args = get_args()
main(args)
| jadeleiyu/symmetry_inference | model_evaluation.py | model_evaluation.py | py | 2,142 | python | en | code | 0 | github-code | 13 |
39702778067 | #!/usr/bin/env python3
####################################################
### Installation script for the Juelich KKR code ###
####################################################
# import modules
import os
import sys
import getopt
import shutil
##########################################################################
#global settings:
codeslist = ['kkrhost', 'kkrimp', 'voronoi', 'pkkprime', 'rhoq'] # still to add 'kkrsusc'
##########################################################################
def greeting():
""" Prints greeting. """
print("***********************************************************")
print("Welcome to the Installation script of the Juelich KKR codes.")
print("\nYou can find useful additional information in our KKR wiki: \033[4mhttps://kkr.iff.kfa-juelich.de/doku.php\033[0m")
print("and in our code's documentation: \033[4mhttps://kkr.iffgit.fz-juelich.de/jukkr/index.html\033[0m")
print("\nCurrently this script only supports individual builds of one of these codes:")
for code in codeslist:
print(" * {}".format(code))
print("\nTo build a different code please go to the source/code subdirectory and build the code as in a previous version (see wiki).\n")
print("***********************************************************\n")
##########################################################################
def goodbye():
""" Prints final remarks before exiting. """
print("\n***********************************************************")
print("Congratulations you have now set up the build directory.")
print("To compile the code please go to the build directory and type")
print(" 'make'")
print(" or")
print(" 'make -j n'")
print(" where n is the number of tasks you want to use in the parallel compilation process.")
print("***********************************************************")
##########################################################################
def usage():
""" Prints usage info and exists. """
print("Acceptable arguments:")
print(" -h or --help Print this message and exit.")
print(" -i or --interactive Use interactive installation script asking the user for input.")
print(" -v or --verbose Verbose mode.")
print(" -d or --debug Set up debug mode of the code.")
print(" --program=name Set up build of this program. Available are: {}".format(codeslist))
print(" --machine=name Use a predefined set of settings for a specific machine where 'name' is one of 'iff', 'claix', 'jureca'.")
print(" --compiler=name Use a specific compiler, 'name' could for example be 'mpiifort' or 'gfortran'.")
print(" --parallelization=scheme Use either MPI, OpenMP or both (hybrid) parallelization: 'scheme should be one of 'mpi', 'openmp', 'hybrid'.")
print(" --flags=flag1,flag2 Add cmake flags manually (can be combined with either -m or the settings with -c and -p).")
sys.exit()
##########################################################################
def read_interactive(flags, codeslist):
""" Interactively ask user for input and return the neede options. """
print("Please input the the codename of the code you want to compile (one of {}).".format(codeslist))
while True:
code = input()
if code not in codeslist:
print("your input is not a valid code name. Please chose a valid scheme.")
else:
break
print("Please input the compiler name (e.g. 'gfortran', 'ifort', 'mpiifort'). Empty input will try the system's default.")
compiler = input()
if compiler=='': compiler = None
print("Please input the parallelization scheme ('serial', 'mpi', 'openmp', 'hybrid')")
while True:
parallelization = input()
if parallelization not in ['serial', 'mpi', 'openmp', 'hybrid']:
print("your input is not a valid parallelization mode. Please chose a valid scheme.")
else:
break
print("Please input additional flags for cmake (empty input skips adding additional flags)")
while True:
print("Next flag? (empty line exists read-in)")
tmpinput = input()
if tmpinput=='':
break
else:
if 'enable_mpi' in tmpinput.lower() or 'enable_omp' in tmpinput.lower():
print("Skipping flag {} since it messes with the parallelization settings.")
else:
flags.append(tmpinput)
print("Use debug flags for compilation? (leave empty for 'Release' build)")
debug = input()
if debug != '':
flags.append("CMAKE_BUILD_TYPE=Debug")
print("Summary of inputs:\n----------\n Compiler: {}\n Parallelization scheme: {}\n Cmake flags: {}\n".format(compiler, parallelization, flags))
return compiler, parallelization, flags, code
##########################################################################
def read_machine(flags, mname):
""" Read options from machine file. """
print("read_machine: Not implemented yet.")
sys.exit()
return compiler, parallelization, flags
##########################################################################
def check_dependencies(verbose):
""" Check if all necessary dependencies are available (e.g. cmake). """
print("check-dependencies...")
# check make
print("make availabale?")
if shutil.which('make') is None:
print("Command 'make' not found. Maybe you have to import some modules?")
sys.exit()
print("OK")
# check cmake
print("cmake availabale?")
if shutil.which('cmake') is None:
print("Command 'cmake' not found. Maybe you have to import some modules?")
sys.exit()
print("OK")
##########################################################################
def create_build_dir(verbose):
""" Create a build directory and store eventually previously existing ones. """
# check for existence of build dir
if 'build' in os.listdir('.'):
i = 1
oldbuild = 'build_{}'.format(i)
while oldbuild in os.listdir('.'):
i += 1
oldbuild = 'build_{}'.format(i)
print("Found old build directory. Moving this to {}".format(oldbuild))
if verbose:
print("Continue (will rename old build dir)? [Y/n]")
answer = input()
if 'n' in answer.lower(): sys.exit()
shutil.move('build', oldbuild)
# create build dir
if verbose:
print("Continue (will create build dir)? [Y/n]")
answer = input()
if 'n' in answer.lower(): sys.exit()
os.makedirs('build')
##########################################################################
def run_cmake(compiler, parallelization, flags, verbose, code):
""" Runs cmake step to create makefile etc in build directory. """
from subprocess import call
# start creating cmake task
task = "cd build && "
# add compiler if given
if compiler is not None:
task += "FC=$(which {}) ".format(compiler)
# add cmake command
task += "cmake "
# add parallelization flags
if parallelization=='hybrid':
task += "-DENABLE_MPI=ON "
task += "-DENABLE_OMP=ON "
elif parallelization=='openmp':
task += "-DENABLE_MPI=OFF "
task += "-DENABLE_OMP=ON "
elif parallelization=='mpi':
task += "-DENABLE_MPI=ON "
task += "-DENABLE_OMP=OFF "
else:
task += "-DENABLE_MPI=OFF "
task += "-DENABLE_OMP=OFF "
# add code compile flags
comp_host = 'OFF'
comp_imp = 'OFF'
comp_susc = 'OFF'
comp_pkkr = 'OFF'
comp_voro = 'OFF'
comp_rhoq = 'OFF'
if code=='kkrhost':
comp_host = 'ON'
elif code=='kkrimp':
comp_imp = 'ON'
elif code=='kkrsusc':
comp_susc = 'ON'
elif code=='pkkprime':
comp_pkkr = 'ON'
elif code=='voronoi':
comp_voro = 'ON'
elif code=='rhoq':
comp_rhoq = 'ON'
else:
print("Error setting code option!")
sys.exit()
task += " -DCOMPILE_KKRHOST={} ".format(comp_host)
task += " -DCOMPILE_KKRIMP={} ".format(comp_imp)
task += " -DCOMPILE_KKRSUSC={} ".format(comp_susc)
task += " -DCOMPILE_PKKPRIME={} ".format(comp_pkkr)
task += " -DCOMPILE_VORONOI={} ".format(comp_voro)
task += " -DCOMPILE_RHOQ={} ".format(comp_rhoq)
# add additional flags if given
for flag in flags:
task += "-D{} ".format(flag)
# finalize task and run
task += '..'
print("\nNow run cmake command: '{}'".format(task))
print("\n***********************************************************\n")
if verbose:
print("Continue (will run cmake command)? [Y/n]")
answer = input()
if 'n' in answer.lower(): sys.exit()
call(task, shell=True)
##########################################################################
def main(argv):
# print greeting lines
greeting()
# process script options
try:
if len(argv)==0: usage()
opts, args = getopt.getopt(argv, "ivhdm", ["interactive", "verbose", "help", "debug", "machine=", "compiler=", "parallelization=", "flags=","program="])
#print(argv, len(argv), opts,args)
except getopt.GetoptError as e:
print("Error: {}\n".format(e))
usage()
# define defaults
compiler = None
parallelization = None
flags = []
verbose = False
code = None
# first check for vebosity level (determines additional printing)
for opt, arg in opts:
if opt in ("-v", "--verbose"):
verbose = True
# now deal with different input options:
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
elif opt in ("-i", "--interactive"):
compiler, parallelization, flags, code = read_interactive(flags, codeslist)
elif opt=="--machine":
compiler, parallelization, flags = read_machine(flags, args)
else: # read in options from compiler and parallelization info
if opt=="--compiler":
compiler = arg
if compiler=='': compiler = None
if opt=="--parallelization":
parallelization = arg
if parallelization=='': parallelization = None
# finally allow to add flags additionally
if "-i" not in opts and "--interactive" not in opts:
if opt=="--flags" :
for iarg in arg.split(','):
flags.append(iarg)
if opt=="--program":
code = arg
if code not in codeslist:
print("your input ({}) is not a valid code name. Please chose a valid scheme.".format(code))
print("Supported codes: {}".format(codeslist))
if opt in ("-d", "--debug"):
flags.append("CMAKE_BUILD_TYPE=Debug")
# check dependencies
create_build_dir(verbose)
# create build dir
check_dependencies(verbose)
# run cmake with previously read-in options
run_cmake(compiler, parallelization, flags, verbose, code)
# print final messages
goodbye()
##########################################################################
# run script
if __name__ == "__main__":
main(sys.argv[1:])
| JuDFTteam/JuKKR | install.py | install.py | py | 11,093 | python | en | code | 5 | github-code | 13 |
42231872922 | # imports -----------------------------------------
import OpenGL
from OpenGL.GLU import gluLookAt
OpenGL.ERROR_ON_COPY = True
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.constants import GLfloat
import sys, os, random, time
from math import sin, cos, sqrt, pi
import numpy as np
# view settings
(view_rotx, view_roty, view_dist)=(-40.0, 90.0, 2000.0)
# which index from data to draw
curIndex = 0
# change view angle, exit upon ESC
def key(k, x, y):
global view_dist, curIndex
if k == 'a':
view_dist *= 0.9
elif k == 'z':
view_dist *= 1.1
elif k == ' ':
curIndex += 1
elif ord(k) == 27: # Escape
glutDestroyWindow(glutGetWindow())
glutLeaveMainLoop()
return
else:
return
glutPostRedisplay()
# change view angle
def special(k, x, y):
global view_rotx, view_roty
if k == GLUT_KEY_UP:
view_rotx += 5.0
elif k == GLUT_KEY_DOWN:
view_rotx -= 5.0
elif k == GLUT_KEY_LEFT:
view_roty += 5.0
elif k == GLUT_KEY_RIGHT:
view_roty -= 5.0
else:
return
glutPostRedisplay()
# new window size or exposure
def reshape(width, height):
h = float(height) / float(width);
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glFrustum(-1.0, 1.0, -h, h, 50.0, 50000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def initGL():
# set various openGL params
glDisable(GL_CULL_FACE)
glDisable(GL_DEPTH_TEST)
glEnable (GL_BLEND)
glBlendFunc (GL_SRC_ALPHA, GL_ONE)
glEnable(GL_NORMALIZE)
def draw():
global curIndex, drawData, drawParams
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
if 'pore_z' in drawParams:
pz = drawParams['pore_z']
else:
pz = 0
glPushMatrix()
gluLookAt(0, view_dist, 0, 0, 0, 0, 0, 0, 1);
glRotatef(view_rotx, 1, 0, 0)
glRotatef(view_roty, 0, 0, 1)
glTranslatef(0,0,-pz)
# then the DNA
glColor(1,1,1,1)
glBegin(GL_LINE_STRIP)
if curIndex >= drawData.shape[0]:
curIndex = 0
for vert in drawData[curIndex,:,:]:
glVertex3f(vert[0],vert[1],vert[2])
glEnd()
# and a pore, if needed
if True:
glColor(1,0,0,1)
r = drawParams['pore_r']
z = drawParams['pore_z']
glBegin(GL_LINE_STRIP)
for th in np.linspace(0,2*np.pi,20):
glVertex3f(r*np.cos(th),r*np.sin(th),z)
glEnd()
glPopMatrix()
# now draw things in screen coords
# GL projects rasterpos, so need to set appropriate proj mat
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0,1,0,1,-1, 1);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glColor(1,1,1,1)
glRasterPos2f(0.02, 0.02);
# loop through string and spit out per char
debugstr = "Index: " + str(curIndex)
for c in debugstr:
glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(c));
glMatrixMode(GL_PROJECTION);
glPopMatrix()
glMatrixMode(GL_MODELVIEW);
glPopMatrix()
glutSwapBuffers()
glutPostRedisplay()
def drawData(data,params=None):
global drawData,drawParams
drawData = np.copy(data)
drawParams = params
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowPosition(0, 0)
glutInitWindowSize(500, 500)
glutCreateWindow("DNA Monte Carlo")
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE, GLUT_ACTION_CONTINUE_EXECUTION)
glutSetOption(GLUT_ACTION_GLUTMAINLOOP_RETURNS, GLUT_ACTION_CONTINUE_EXECUTION)
initGL()
glutDisplayFunc(draw)
glutReshapeFunc(reshape)
glutKeyboardFunc(key)
glutSpecialFunc(special)
glutMainLoop()
| tszalay/DNA-MCMC | Python/sim_draw.py | sim_draw.py | py | 3,819 | python | en | code | 0 | github-code | 13 |
37535280973 | from readMail import *
from config import FROM_EMAIL,EXCEL_CONFIG,CONTENT_EMAIL,LOGO
from excel_manager import *
from sendMail import *
from pdf_gen import *
excel_dict = read_pdf_in(EXCEL_CONFIG)
for key,value in excel_dict.items():
# Check if the row status is Not Sent
if value['Status'] == 'Not Sent':
# Make a list of firstnames and username in lowercase
names = [i.lower() for i in value['User Name'].split(' ')]
# Decrypt the Student Email
student_email = ''.join([chr(int(i,base=2)) for i in value['email'].split(' ')])
print("User email decrypted as {}".format(student_email))
# Dummy sender fixing code
# student_email = 'shrishty123chandra@gmail.com'
# Creating student subject text
student_subject = '{}_{}_{}'.format(key,*names)
# Read the email from CONTENT_EMAIL with the subject and save the attachment
sub_to_file = read_email_from_gmail(CONTENT_EMAIL,student_subject)
# If the subject is in sub_to_file, then it means that the attachment is saved successfully
if student_subject in sub_to_file:
# Modify the saved file with logo and the text
saved_file = gen_pdf(key,sub_to_file[student_subject][:-4],LOGO)
else:
# There has been error and it needs to be exited.
print("No attachment found in the email")
print("Exiting")
break
# Create the message output body
msg_text="""Hey {},\n\n\nGood to see the progress, Please find the attachment of your previous session.\n\n\nThank you,\nJohn """.format(value['User Name'])
# Subject to send to the email
sub_to_send = "Your course material {}".format(key)
# Send the email
print("Sending mail to {}".format(student_email))
send_mail(FROM_EMAIL,[student_email],sub_to_send,msg_text,saved_file)
# Update the excel sheet
print("Updating status in row {}".format(value['index']+2))
update_excel(EXCEL_CONFIG,value['index']+2)
# Update the color changes
print("Changing color as per statuses")
change_color(EXCEL_CONFIG)
# Print the final success message
print("Success !!") | Animesh420/automated_email | orchestra.py | orchestra.py | py | 2,300 | python | en | code | 0 | github-code | 13 |
15236342952 | def isMonotonic(array):
if len(array) <= 2:
return True
direction = array[0] - array[1]
for i in range(1, len(array) - 1):
#print(f'direction: {direction}')
if direction == 0:
direction = array[i] - array[i + 1]
continue
if breaksDirection(direction, array[i], array[i + 1]):
return False
return True
def breaksDirection(direction, currentItem, nextItem):
diff = currentItem - nextItem
if direction < 0:
return diff > 0
return diff < 0
if __name__ == "__main__":
print(isMonotonic([-1, -5, -10, -1100, -1100, -1101, -9001]))
print(isMonotonic([1, 2, 0]))
print(isMonotonic([-1, -5, 10]))
| nssathish/python-dsa | algoexpert/IsMonoticArray_V2.py | IsMonoticArray_V2.py | py | 623 | python | en | code | 0 | github-code | 13 |
1378079071 | # Write a Python program to create and display all combinations of letters,
# selecting each letter from a different key in a dictionary.
# Sample data: {'1': ['a','b'], '2': ['c','d']}
import itertools
# Create a dictionary 'd' with keys '1' and '2', and associated lists of characters as values.
d = {'1': ['a', 'b'], '2': ['c', 'd']}
# Iterate through combinations of values from the dictionary 'd' using 'itertools.product'.
# The values are sorted based on their keys to ensure a specific order.
for combo in itertools.product(*[d[k] for k in sorted(d.keys())]):
# Print the combinations as strings by joining the characters in each combination.
print(''.join(combo)) | Parth9780/Backend_12-SEP | Python 12_Sep/Assignment/Module_3/Que44.py | Que44.py | py | 698 | python | en | code | 0 | github-code | 13 |
20511182787 | import hashlib
import OpenSSL
import base64
FLAG = 'y0uAr3_S00_K1raK1ra'
SECRET = 'pkuggg::vsylaesl'
with open('cert.pem', 'rb') as f:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, f.read())
def getflag(token):
serial = hashlib.sha256((SECRET+token).encode()).hexdigest()[:8]
return 'flag{%s_%s}'%(FLAG, serial)
def checktoken(token):
try:
id, sig = token.split(':', 1)
sig = base64.b64decode(sig, validate=True)
OpenSSL.crypto.verify(cert, sig, id.encode(), 'sha256')
return id
except Exception:
return None | PKU-GeekGame/geekgame-0th | src/emoji/game/flag.py | flag.py | py | 598 | python | en | code | 46 | github-code | 13 |
40063655013 | import flask
from flask import request
import requests
# from markupsafe import Markup
print(request.__module__)
app = flask.Flask(__name__)
def topN():
r = requests.get("https://api.binance.com/api/v3/ticker/24hr")
symbol_24hrPercent_volume = [[i.get('symbol'), i.get('priceChangePercent'), i.get('volume')] for i in r.json()]
symbol_24hrPercent_volume = sorted(symbol_24hrPercent_volume, key=lambda x: abs(float(x[1])), reverse=True)
volume_threshold = 1_000_000
symbol_24hrPercent_volume = [i for i in symbol_24hrPercent_volume if float(i[2])>volume_threshold]
topN = 20
symbol_24hrPercent_volume = symbol_24hrPercent_volume[:topN]
top_symbols =[i[0] for i in symbol_24hrPercent_volume]
return top_symbols
def MACD():
pass
@app.route('/')
def index():
symbol = topN()
elements = []
for i in range(len(symbol)):
elements.append(f"""
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div id="tradingview_6c7af"></div>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<script type="text/javascript">
new TradingView.widget(
{{
"width": 490,
"height": 305,
"symbol": "BINANCE:{symbol[i]}",
"interval": "15",
"timezone": "Asia/Kolkata",
"theme": "dark",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"hide_side_toolbar": true,
"allow_symbol_change": true,
"studies": [
"Volume@tv-basicstudies"
],
"container_id": "{i}"
}}
);
</script>
</div>
<!-- TradingView Widget END -->""")
print(len(elements))
return flask.render_template('creating_grid.html', elements=elements)
@app.route('/top',methods = ['GET' , 'POST'])
def tooop():
if request.method == 'GET':
# import ipdb
# ipdb.set_trace()
# print(request.form['Interval'])
# import ipdb
# ipdb.set_trace()
interval = request.args.get('Interval')
interval = interval if interval else '15'
# else:
# interval = '15'
symbol = topN()
elements = []
for i in range(len(symbol)):
elements.append(f"""
<!-- TradingView Widget BEGIN -->
<div class="tradingview-widget-container">
<div id="tradingview_6c7af"></div>
<script type="text/javascript" src="https://s3.tradingview.com/tv.js"></script>
<script type="text/javascript">
new TradingView.widget(
{{
"width": 490,
"height": 305,
"symbol": "BINANCE:{symbol[i]}",
"interval": "{interval}",
"timezone": "Asia/Kolkata",
"theme": "dark",
"style": "1",
"locale": "en",
"toolbar_bg": "#f1f3f6",
"enable_publishing": false,
"hide_side_toolbar": true,
"allow_symbol_change": true,
"studies": [
"Volume@tv-basicstudies"
],
"container_id": "{i}"
}}
);
</script>
</div>
<!-- TradingView Widget END -->""")
print(len(elements))
return flask.render_template('top.html',elements=elements)
@app.route('/pinned')
def pinned():
return flask.render_template('pinned.html')
@app.route('/buy_sell')
def buy_sell():
return flask.render_template('buy_sell.html')
@app.route('/positions')
def positions():
return flask.render_template('positions.html')
if __name__ == "__main__":
app.run(use_reloader=True)
# app.run(host= '127.0.0.1', port = 3500)
| RobinSequeira/CryptoCurrencyScreener | app_grid.py | app_grid.py | py | 3,258 | python | en | code | 0 | github-code | 13 |
5529631391 | '''
Preprocessor for Foliant documentation authoring tool.
Converts EPS images to PNG format.
'''
import re
from pathlib import Path
from hashlib import md5
from subprocess import run, PIPE, STDOUT, CalledProcessError
from foliant.preprocessors.base import BasePreprocessor
class Preprocessor(BasePreprocessor):
defaults = {
'convert_path': 'convert',
'cache_dir': Path('.epsconvertcache'),
'image_width': 0,
'targets': [],
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._source_img_ref_pattern = re.compile(
r'\!\[(?P<caption>.*)\]\((?P<path>((?!\:\/\/).)*[^\/\s]+\.eps)\)'
)
self._cache_path = (self.project_path / self.options['cache_dir']).resolve()
self._current_dir_path = self.working_dir.resolve()
self.logger = self.logger.getChild('epsconvert')
self.logger.debug(f'Preprocessor inited: {self.__dict__}')
def _process_epsconvert(self, img_caption: str, img_path: str) -> str:
source_img_path = (self._current_dir_path / img_path).resolve()
self.logger.debug(f'Source image path: {source_img_path}')
img_hash = md5(f'{self.options["image_width"]}'.encode())
with open(source_img_path, 'rb') as source_img_file:
source_img_file_body = source_img_file.read()
img_hash.update(f'{source_img_file_body}'.encode())
converted_img_path = (self._cache_path / f'{img_hash.hexdigest()}.png').resolve()
self.logger.debug(f'Converted image path: {converted_img_path}')
converted_img_ref = f''
if converted_img_path.exists():
self.logger.debug(f'Converted image already exists')
return converted_img_ref
converted_img_path.parent.mkdir(parents=True, exist_ok=True)
resize_options = ''
if self.options['image_width'] > 0:
resize_options = f'-resize {self.options["image_width"]}'
try:
command = (
f'{self.options["convert_path"]} ' +
f'"{source_img_path}" ' +
f'{resize_options} ' +
f'"{converted_img_path}"'
)
self.logger.debug(f'Running the command: {command}')
run(command, shell=True, check=True, stdout=PIPE, stderr=STDOUT)
self.logger.debug(f'Converted image saved, width: {self.options["image_width"]} (0 means auto)')
except CalledProcessError as exception:
self.logger.error(str(exception))
raise RuntimeError(
f'Processing of image {img_path} failed: {exception.output.decode()}'
)
return converted_img_ref
def process_epsconvert(self, content: str) -> str:
def _sub(source_img_ref) -> str:
return self._process_epsconvert(
source_img_ref.group('caption'),
source_img_ref.group('path')
)
return self._source_img_ref_pattern.sub(_sub, content)
def apply(self):
self.logger.info('Applying preprocessor')
self.logger.debug(f'Allowed targets: {self.options["targets"]}')
self.logger.debug(f'Current target: {self.context["target"]}')
if not self.options['targets'] or self.context['target'] in self.options['targets']:
for markdown_file_path in self.working_dir.rglob('*.md'):
self._current_dir_path = markdown_file_path.parent.resolve()
with open(markdown_file_path, encoding='utf8') as markdown_file:
content = markdown_file.read()
processed_content = self.process_epsconvert(content)
if processed_content:
with open(markdown_file_path, 'w', encoding='utf8') as markdown_file:
markdown_file.write(processed_content)
self.logger.info('Preprocessor applied')
| foliant-docs/foliantcontrib.epsconvert | foliant/preprocessors/epsconvert.py | epsconvert.py | py | 3,989 | python | en | code | 0 | github-code | 13 |
4075538437 | #!/usr/bin/env python3
# This worker now expects to receive an input file in runlist format (without
# the header).
import argparse
from pathlib import Path
import shutil
from subprocess import call
import sys
import time
from zeroworker import LockfileListReader, LockfileListWriter
# from zeroworker import ZmqListReader, ZmqListWriter
BASEDIR = '/global/cfs/cdirs/dune/www/data/Module2'
INDIR = f'{BASEDIR}/packetized'
OUTDIR = f'{BASEDIR}/charge_reco/v1'
def get_outpath_(path, outdir: str) -> Path:
relpath = Path(path).relative_to(INDIR)
assert relpath.name.find('-packet-') != -1, \
'"{path}" does\'t have "-packet-" in its name, are you sure it\'s a packet file?'
outname = relpath.name.replace('-packet-', '-reco-')
out_relpath = relpath.parent.joinpath(outname)
return Path(BASEDIR).joinpath(outdir, out_relpath)
def get_outpath(path) -> Path:
return get_outpath_(path, OUTDIR)
def get_tmppath(path) -> Path:
return get_outpath_(path, OUTDIR+'.tmp')
def process(chargepath, lightpaths, config):
tmppath = get_tmppath(chargepath)
tmppath.parent.mkdir(parents=True, exist_ok=True)
tmppath.unlink(missing_ok=True) # don't want to append!
print(f'PROCESSING {chargepath}')
cmd = f'time ./run_module0_flow-{config}.sh {tmppath} {path} {" ".join(lightpaths)}'
retcode = call(cmd, shell=True)
if retcode == 0:
outpath = get_outpath(chargepath)
outpath.parent.mkdir(parents=True, exist_ok=True)
shutil.move(tmppath, outpath)
return retcode
def main():
sys.stdout.reconfigure(line_buffering=True)
sys.stderr.reconfigure(line_buffering=True)
ap = argparse.ArgumentParser()
ap.add_argument('infile')
ap.add_argument('-c', '--config', default='module3')
ap.add_argument('--immortal', action='store_true')
args = ap.parse_args()
reader = LockfileListReader(args.infile)
logger = LockfileListWriter(args.infile+'.done')
with logger:
while True:
try:
path = next(reader)
retcode = process(path, args.config)
logger.log(f'{path} {retcode}')
except StopIteration:
if args.immortal:
time.sleep(60)
else:
break
if __name__ == '__main__':
main()
| lbl-neutrino/calibizer_job | calibizer_worker.py | calibizer_worker.py | py | 2,347 | python | en | code | 0 | github-code | 13 |
39792473322 | #
# TODO:
#
# Add closed connection and re-connection callbacks so callers are aware these
# things happened. The webhook needs this information to tell it to reprocess
# cached message files.
#
# This code is based on the async examples in the pika github repo.
# See https://github.com/pika/pika/tree/master/examples
import pika, pika.channel, pika.spec
from pika.adapters.asyncio_connection import AsyncioConnection
from pika.exchange_type import ExchangeType
import asyncio, json, logging, os
from enum import Enum, auto
_user = os.environ['RABBITMQ_DEFAULT_USER']
_passwd = os.environ['RABBITMQ_DEFAULT_PASS']
_host = os.environ['RABBITMQ_HOST']
_port = os.environ['RABBITMQ_PORT']
_amqp_url_str = f'amqp://{_user}:{_passwd}@{_host}:{_port}/%2F'
class State(Enum):
OPENING = auto()
OPEN = auto()
CLOSING = auto()
CLOSED = auto()
class RabbitMQConnection(object):
""" ======================================================================
A RabbitMQConnection wraps a RabbitMQ connection and includes code to
re-open the connection if it closes or the open fails.
====================================================================== """
def __init__(self, channels):
self._connection = None
self.state = State.OPENING
self._stopping = False
self.stopped = False
self.channels = channels
logging.debug(f'given channels {self.channels}')
async def connect(self, delay=0):
"""
Initiate a connection to RabbitMQ. The connection is not valid until
self.on_connection_open() is called.
"""
self.state = State.OPENING
if delay > 0:
logging.debug(f'Waiting for {delay}s before connection attempt.')
await asyncio.sleep(delay)
logging.info(f'Connecting to {_host} as {_user}')
return AsyncioConnection(
pika.URLParameters(_amqp_url_str),
on_open_callback=self.on_connection_open,
on_open_error_callback=self.on_connection_open_error,
on_close_callback=self.on_connection_closed)
def on_connection_open(self, connection):
"""
Now the connection is open a channel can be opened. Channels
are 'virtual connections' where all operations are performed.
"""
logging.info('Connection opened')
self._connection = connection
self.state = State.OPEN
for z in self.channels:
logging.info(f'Opening channel {z}, of type {type(z)}')
z.open(self._connection)
def on_connection_open_error(self, _unused_connection, err):
"""
Opening a connection failed. If the calling code has not requested
a shutdown via self.stop() then try to reconnect.
Consider backing off the delay to some maximum value.
"""
logging.error('Connection open failed: %s', err)
if not self._stopping:
asyncio.create_task(self.connect(30))
else:
self.state = State.CLOSED
def on_connection_closed(self, _unused_connection, reason):
"""
The connection has closed. If the calling code has not requested
a shutdown via self.stop() then try to reconnect assuming the closure
is due to an error.
Consider backing off the delay to some maximum value.
"""
logging.warning('Connection closed: %s', reason)
for z in self.channels:
z.is_open = False
if not self._stopping:
asyncio.create_task(self.connect(60))
else:
self.state = State.CLOSED
self.stopped = True
def stop(self) -> None:
"""
Shut down the connection to RabbitMQ.
Start by closing the channel. The channel closed callback
will then ask to close the connection.
"""
if self._stopping or self.stopped:
return
self._stopping = True
self.state = State.CLOSING
# This closes the channels automatically.
self._connection.close()
class TxChannel(object):
""" ======================================================================
A TxChannel wraps a RabbitMQ channel devoted to publishing messages to a
single exchange.
====================================================================== """
def __init__(self, exchange_name, exchange_type, on_ready=None, on_publish_ack=None):
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._on_ready = on_ready
self._on_publish_ack = on_publish_ack
self._channel: pika.channel.Channel = None
self._message_number = 0
self.is_open = False
def open(self, connection) -> None:
connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
logging.debug(f'Opened tx channel {channel} to server {_amqp_url_str}')
self._channel = channel
self._message_number = 0
self._channel.add_on_close_callback(self.on_channel_closed)
self._channel.confirm_delivery(self.on_delivery_confirmation)
logging.debug(f'Declaring exchange {self._exchange_name}')
self._channel.exchange_declare(
exchange=self._exchange_name,
exchange_type=self._exchange_type,
durable=True,
callback=self.on_exchange_declareok)
def on_channel_closed(self, channel, reason):
logging.debug(f'Channel {channel} to exchange {self._exchange_name} was closed: {reason}')
self._channel = None
self.is_open = False
def on_exchange_declareok(self, method):
logging.info(f'Exchange {self._exchange_name} declared ok, ready to send.')
self.is_open = True
if self._on_ready is not None:
asyncio.create_task(self._on_ready(self))
def publish_message(self, routing_key: str, message) -> int:
"""
Publish a message to RabbitMQ.
The message cannot be considered safely accepted by RabbitMQ until
the on_publish_ack callback passed to the constructor of this class
has been called.
All messages are published persistently so they can survive a
RabbitMQ server restart. The server is not meant to ack receipt
of a message until it has been written to disk.
"""
if self._channel is None or not self._channel.is_open:
return
properties = pika.BasicProperties(
app_id='broker',
content_type='application/json',
delivery_mode = pika.spec.PERSISTENT_DELIVERY_MODE
)
self._channel.basic_publish(self._exchange_name, routing_key,
json.dumps(message, ensure_ascii=False),
properties)
self._message_number += 1
return self._message_number
def on_delivery_confirmation(self, method_frame):
"""
pika calls this to notify that RabbitMQ has accepted a published message.
The caller will be notified via the on_publish_ack callback passed to the
constructor of this class.
"""
if self._on_publish_ack is not None:
asyncio.create_task(self._on_publish_ack(method_frame.method.delivery_tag))
class RxChannel(object):
""" ======================================================================
An RxChannel wraps a RabbitMQ channel devoted to receiving messages from a
single exchange. A durable queue is declared.
routing_key is not required for fanout exchanges, but must be set for
direct exchanges.
====================================================================== """
def __init__(self, exchange_name, exchange_type, queue_name, on_message, routing_key=None):
self._exchange_name = exchange_name
self._exchange_type = exchange_type
self._queue_name = queue_name
self._routing_key = routing_key
self._on_message = on_message
self._channel: pika.channel.Channel = None
self.is_open = False
def open(self, connection) -> None:
connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
logging.debug(f'Opened rx channel {channel} to server {_amqp_url_str}')
self._channel = channel
self._channel.add_on_close_callback(self.on_channel_closed)
logging.debug(f'Declaring exchange {self._exchange_name}')
self._channel.exchange_declare(
exchange=self._exchange_name,
exchange_type=self._exchange_type,
durable=True,
callback=self.on_exchange_declareok)
def on_channel_closed(self, channel, reason):
logging.warning(f'Channel {channel} to exchange {self._exchange_name} was closed: {reason}')
self._channel = None
self.is_open = False
def on_exchange_declareok(self, method):
logging.debug(f'Exchange {self._exchange_name} declared ok, declaring queue {self._queue_name}.')
self._channel.queue_declare(queue=self._queue_name, durable=True, callback=self.on_queue_declareok)
def on_queue_declareok(self, q_declare_ok):
logging.debug(f'Binding queue {self._queue_name} to exchange {self._exchange_name} with routing key {self._routing_key}')
self._channel.queue_bind(self._queue_name, self._exchange_name, routing_key=self._routing_key, callback=self.on_bindok)
def on_bindok(self, _unused_frame):
logging.info('Adding channel cancellation callback, start listening for messages.')
self._channel.add_on_cancel_callback(self.on_consumer_cancelled)
self._consumer_tag = self._channel.basic_consume(self._queue_name, self._on_message)
self.is_open = True
def on_consumer_cancelled(self, method_frame):
logging.warning('Consumer was cancelled remotely, shutting down: %r', method_frame)
if self._channel and self._channel.is_open:
self._channel.close()
self.is_open = False
| DPIclimate/broker | src/python/api/client/RabbitMQ.py | RabbitMQ.py | py | 10,143 | python | en | code | 2 | github-code | 13 |
72748160979 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
setup(
name='crossbarhttprequests',
packages=['crossbarhttp'],
version='0.1.6',
description='This is a library for connecting to Crossbar.io HTTP Bridge Services using python-requests.',
author='Yomi Daniels',
license='MIT',
author_email='yomid4all@gmail.com',
url='https://github.com/ydaniels/crossbarhttprequests',
long_description=readme,
keywords=['wamp', 'crossbar', 'requests'],
install_requires=['requests', 'requests_futures'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
)
| ydaniels/crossbarhttprequests | setup.py | setup.py | py | 1,138 | python | en | code | 1 | github-code | 13 |
73643674577 | # Ejercicio 186: Calcular la distancia euclidiana con math.dist de Python 3.8.0.
# math.dist(p, q)
from math import dist
punto_1 = (2, 3)
punto_2 = (-3, 5)
distancia = dist(punto_1, punto_2)
print(distancia)
| Fhernd/PythonEjercicios | Parte001/ex186_distancia_euclidénea.py | ex186_distancia_euclidénea.py | py | 213 | python | es | code | 126 | github-code | 13 |
38616198386 | from HWdevices.abstract.AbstractGAS import AbstractGAS
from HWdevices.PSI_scheme.libs.parsing import Parser
from HWdevices.PSI_scheme.scheme.command import Command
from HWdevices.PSI_scheme.scheme.scheme_manager import SchemeManager
class GAS(AbstractGAS):
def __init__(self, ID, address):
super(GAS, self).__init__(ID, address)
self.scheme_manager = SchemeManager(ID, address)
self.parser = Parser()
def get_co2_air(self) -> float:
"""
Measures CO2 in air.
:return: measured CO2 in air
"""
command = Command("get-co2-air")
value = self.scheme_manager.execute([command])
return self.parser.parse_co2_air(value)
def get_small_valves(self) -> str:
"""
Obtain settings of individual vents of GAS device.
Represented as one byte, where first 6 bits represent
vents indexed as in a picture scheme available here:
https://i.imgur.com/jSeFFaO.jpg
:return: byte representation of vents settings.
"""
command = Command("get-small-valves")
value = self.scheme_manager.execute([command])
return self.parser.parse_small_valves(value)
def set_small_valves(self, mode: int) -> bool:
"""
Changes settings of individual vents of GAS device.
Can be set by one byte (converted to int), where first 6
bits represent vents indexed as in a picture scheme
available here: https://i.imgur.com/jSeFFaO.jpg
Mode 0 - normal mode, output from GMS goes to PBR (255)
Mode 1 - reset mode, N2 (nitrogen) goes to PBR (239)
Mode 2 - no gas input to PBR (249)
Mode 3 - output of PBR goes to input of PBR (246)
:param mode: chosen mode (0 to 3)
:return: True if was successful, False otherwise.
"""
modes = {0: "11111111", 1: "11101111", 2: "11111001", 3: "11110110"}
command = Command("get-small-valves", [int(modes[mode], 2)])
result = self.scheme_manager.execute([command])[0].rstrip()
return result == 'ok'
def get_flow(self, repeats: int) -> float:
"""
Actual flow being send from GAS to the PBR.
:param repeats: the number of measurement repeats
:return: The current flow in L/min.
"""
command = Command("get-flow", [repeats])
value = self.scheme_manager.execute([command])
return self.parser.parse_flow(value)
def get_flow_target(self) -> float:
"""
Actual desired flow.
:return: The desired flow in L/min.
"""
command = Command("get-flow-target")
value = self.scheme_manager.execute([command])
return self.parser.parse_flow_target(value)
def set_flow_target(self, flow: float) -> bool:
"""
Set flow we want to achieve.
:param flow: flow in L/min we want to achieve (max given by get_flow_max)
:return: True if was successful, False otherwise.
"""
command = Command("set-flow-target", [flow])
result = float(self.scheme_manager.execute([command])[0].rstrip())
return result == 'ok'
def get_flow_max(self) -> float:
"""
Maximal allowed flow.
:return: The maximal flow in L/min
"""
command = Command("get-flow-max")
value = self.scheme_manager.execute([command])
return self.parser.parse_flow_max(value)
def get_pressure(self, repeats: int = 5, wait: int = 0) -> float:
"""
Current pressure.
:param repeats: the number of measurement repeats
:param wait: waiting time between individual repeats
:return: Current pressure in ???
"""
command = Command("get-pressure", [repeats, wait])
value = self.scheme_manager.execute([command])
return self.parser.parse_pressure(value)
def measure_all(self):
"""
Measures all basic measurable values.
"""
commands = [Command("get-co2-air"),
Command("get-flow", [5]),
Command("get-pressure", [5, 0])]
values = self.scheme_manager.execute(commands)
result = dict()
result["co2_air"] = self.parser.parse_co2_air(values[0])
result["flow"] = self.parser.parse_flow(values[1])
result["pressure"] = self.parser.parse_pressure(values[2])
return result
| SmartBioTech/PBRcontrol | HWdevices/PSI_scheme/GAS.py | GAS.py | py | 4,434 | python | en | code | 1 | github-code | 13 |
18717302585 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import json
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
import multiprocessing as mp
import sys
# The original news documents contain articles which are not json parseable.
# Simply check for url, title, dop and text files.
def make_parseable(t):
output=""
i=0
seen=0
while(i<len(t)):
if(t[i]=='"'):
if(t[i:i+8]=='"url": "'):
output=output+ '"url": "'
i=i+8
elif(t[i:i+13]=='", "title": "'):
output=output+'", "title": "'
i=i+13
elif(t[i:i+11]=='", "dop": "'):
output=output+'", "dop": "'
i=i+11
elif(t[i:i+12]=='", "text": "'):
output=output+'", "text": "'
i=i+12
elif(t[i:i+3]=='" }'):
output=output+'" }'
i=i+3
else:
i=i+1
elif(t[i]=='\\'):
i=i+1
else:
output=output+t[i]
i=i+1
return output
#removing random symbols
def remove_junk(t):
utf=''
utf="".join([x if ord(x) < 128 else ' ' for x in t.lower()])
'''
space_characters=["1","2","3","4","5","6","7","8","9","0","~","`","!","@","#","$","%","^",
"&","*","(",")","_","-","+","=","{","}","[","]","|","\\",":",";","\"","'",",","<",".",">","/","?",]
for i in range(len(t)):
if(t[i] in space_characters):
output=output+" "
else:
output=output+t[i].lower()
'''
return utf
#Removing stopwords
def remove_stop_words(t):
english_stop_words = stopwords.words('english')
return(' '.join([word for word in t.split() if word not in english_stop_words]))
#Lemmatization
def get_lemmatized_text(t):
lemmatizer = WordNetLemmatizer()
return(' '.join([lemmatizer.lemmatize(word) for word in t.split()]))
#Running and printing in a new file
def do_all(t):
try:
parsed_line=(json.loads(t))
except:
temp = make_parseable(t)
parsed_line=(json.loads(temp))
parsed_line["text"]=remove_junk(parsed_line["text"])
# parsed_line["text"]=remove_stop_words(parsed_line["text"])
# parsed_line["text"]=get_lemmatized_text(parsed_line["text"])
parsed_line["title"]=remove_junk(parsed_line["title"])
# parsed_line["title"]=remove_stop_words(parsed_line["title"])
# parsed_line["title"]=get_lemmatized_text(parsed_line["title"])
return (parsed_line)
def final_function(t):
return(json.dumps(do_all(t)))
| heroapoorva/Novelty-detection | cleaning_function.py | cleaning_function.py | py | 2,649 | python | en | code | 0 | github-code | 13 |
29581604732 | import requests
import pandas as pd
import re
from bs4 import BeautifulSoup
search = input("enter the product")
start_char = search[0]
string = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z']
A_to_G = string[0:7]
H_to_P = string[8:16]
Q_to_Z = string[16:25]
if start_char in A_to_G:
web = 'https://www.midwayusa.com/'
file = 'midwayusa.txt'
f = open(file, 'r')
products = []
urls=[]
for url in f:
url = url.rstrip("\n")
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
div = soup.find("div", class_="product-block-status")
button = div.find("div",class_="product-item-container")
product_title = soup.find('h1', id='l-product-description').get_text()
regex = re.compile(r'[\n\r\t]')
product_title = regex.sub(" ", product_title)
product_title=product_title.lstrip().rstrip()
products.append(product_title)
urls.append(url)
product=pd.DataFrame(products,urls)
product["web"] =web
product.to_csv('midwayusa.csv')
elif start_char in H_to_P:
web = 'https://palmettostatearmory.com/'
file = 'palmettostatearmory.txt'
f = open(file, 'r')
title=[]
urls=[]
for url in f:
url = url.rstrip("\n")
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
product_title = soup.find('span',class_="base").get_text()
title.append(product_title)
urls.append(url)
product = pd.DataFrame(title,urls)
product["web"] = web
product.to_csv('palmettostatearmory.csv')
elif start_char in Q_to_Z:
web = 'https://www.brownells.com/'
file = 'brownells.txt'
f = open(file, 'r')
title=[]
urls=[]
for url in f:
url = url.rstrip("\n")
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
products = soup.find('h1', class_='mbm')
product_title1 = products.find_all('span')[0].get_text().lstrip().rstrip()
product_title2 = products.find_all('span')[-1].get_text().lstrip().rstrip()
product_title = f'{product_title1}{product_title2}'
title.append(product_title)
urls.append(url)
product = pd.DataFrame(title,urls)
product["web"] = web
product.to_csv('brownells.csv')
| rupeshjatav/webscrapping | scrap.py | scrap.py | py | 2,420 | python | en | code | 0 | github-code | 13 |
70169019857 | import math
from datetime import datetime, timedelta
from flask import (
Blueprint, render_template, request, redirect, url_for, session
)
from .auth import login_required
from .db import get_db
from .forecast import forecast
from .models.current_inventory import get_current_inventory
from .models.location import get_all_locations, get_location_name_by_id
from .models.product import get_product_out, get_product_low, get_all_products, get_itr, get_ROP, get_product, \
get_order_quantity
bp = Blueprint('insight', __name__, url_prefix="/insight")
@bp.route('/')
@login_required
def get_insight_page():
page_data = {
'products': list(),
'locations': list(),
"count_stockout": 0,
"count_critical": 0,
"count_missingsales": 0,
"name": session['auth'][request.remote_addr]
}
connection = get_db()
cursor = connection.cursor()
try:
page_data["count_stockout"] = len(get_product_out(cursor))
page_data["count_critical"] = len(get_product_low(connection, cursor))
# page_data["count_missingsales"] = len(get_missingdata(cursor))
page_data['products'] = get_all_products(cursor)
page_data['locations'] = get_all_locations(cursor)
except Exception as e:
print(e)
return render_template("ask.html", context=page_data)
@bp.route('/ask/sell/what', methods=["POST"])
def what_should_sell():
answer = 'Flexchain recommends selling the following products as they have a high inventory turnover ratio in the location you indicated. You still have enough time to order more stocks for those in low supply.'
form_location = request.form.get('location', None)
form_date = request.form.get('date', None)
sale_date = None
if form_date:
sale_date = datetime.strptime(form_date, '%Y-%m-%d')
current_date = datetime.now()
try:
connection = get_db()
cursor = connection.cursor()
# Sale event less than 3 months: Get products and rank them according to ITR
# filter out products in critical level and return up to 3
if current_date + timedelta(days=90) > sale_date:
product_itr = get_itr(cursor)
product_filter = get_product_low(connection, cursor)
filtered_location_itr = [itr for itr in product_itr if int(itr['location']) == int(form_location)]
print(filtered_location_itr)
filtered_itr = [itr for itr in filtered_location_itr if itr['sku'] not in product_filter]
if len(filtered_itr) > 0:
answer += "<ol>"
for product in filtered_itr[0:3]:
answer += '<li>' + product['name'] + '</li>'
if len(filtered_itr) > 0:
answer += "</ol>"
# If date is more than 3 months:
# get products and rank them according to ITR return top 3, filter out products in critical
# and inform the user to order them ASAP
else:
product_itr = get_itr(cursor)
product_filter = get_product_low(connection, cursor)
filtered_location_itr = [itr for itr in product_itr if int(itr['location']) == int(form_location)]
for itr in filtered_location_itr:
if itr['sku'] in product_filter:
itr['level'] = 'critical'
else:
itr['level'] = 'stable'
if len(filtered_location_itr) > 0:
answer += "<ol>"
for product in filtered_location_itr[0:3]:
answer += '<li>' + product['name'] + '</li>'
if len(filtered_location_itr) > 0:
answer += "</ol>"
except Exception as e:
print(e)
return redirect(url_for('answer_page', answer=answer))
@bp.route('/ask/sell/where', methods=["POST"])
def where_should_sell():
answer = ''
answer_template = "Flexchain recommends selling from or shipping from {}. Data shows that the {} you indicated sells really well in {}."
form_location = request.form.get('location', None)
items = list()
if form_location is None:
# Need to redirect back with error here.
print('Form location was blank 500.')
return
# Calculate ITR of items in each location return location with highest ITR
try:
connection = get_db()
cursor = connection.cursor()
products = get_all_products(cursor)
for p in products:
include = request.form.get(p["sku"], None)
if include is not None:
items.append(p["sku"])
product_itr = get_itr(cursor)
filtered_itr = [itr for itr in product_itr if int(itr['location']) == int(form_location)]
filtered_item_itr = [itr for itr in filtered_itr if itr['sku'] in items]
skus = [itr['sku'] for itr in filtered_item_itr]
for i in filtered_item_itr[0:len(items)]:
location_name = get_location_name_by_id(cursor, i['location'])
answer += answer_template.format(location_name, i['name'], location_name) + '<br><br>'
for item in items:
if item not in skus:
product = get_product(cursor, item)[0]
answer += "There is not enough inventory or sales data to provide a recommendation for {}.<br><br>".format(
product['product_name']
)
cursor.close()
except Exception as e:
print(e)
return render_template("answers.html", answer=answer)
@bp.route('/ask/sell/should', methods=["POST"])
def should_sell_item():
chosen_itr = None
answer = '''
{} has {:0.0f} cycles per year. It is taking {:0.2f} months to sell and replace inventory.
'''
form_product = request.form.get('product', None)
form_location = request.form.get('location', None)
# Get ITR of specific to store
try:
connection = get_db()
cursor = connection.cursor()
product_itr = get_itr(cursor)
filtered_itr = [itr for itr in product_itr if int(itr['location']) == int(form_location)]
for itr in filtered_itr:
if str(itr['sku']) == str(form_product):
chosen_itr = itr
if chosen_itr is None:
answer = 'There is not enough sales and/or inventory data to provide a smart recommendation.'
return render_template('answers.html', answer=answer)
skus = [itr['sku'] for itr in filtered_itr[0:3]]
product_name = get_product(cursor, form_product)[0]['product_name']
location_name = get_location_name_by_id(cursor, form_location)
answer = answer.format(product_name, chosen_itr['itr'], 12.0 / float(chosen_itr['itr']))
if form_product in skus:
answer += '<br>' + 'You should definitely sell {} as it is one of your top selling items in {}.'.format(
product_name, location_name
)
else:
answer += '<br>' + 'It is best to find another item as {} is not selling well in {}.'.format(
product_name, location_name
)
cursor.close()
except Exception as e:
print(e)
# GET ITR of the rest of the items
# Return rank of ITR of item with respect to the rest of the store and current inventory level
# Answer: <Insert name of product> has <ITR> turns per year. It is taking 12/<ITR> months to sell and replace inventory.
# Answer: If item is in top 3 ITR: You should definitely sell <Insert product name> as it is one of your top selling item in <location>
# Answer: If item is bottom in ITR rank: It is best to find another item as <insert product name> is not selling well in <location>
return render_template('answers.html', answer=answer)
@bp.route('/ask/order/quantity', methods=["POST"])
def suggest_order_quantity():
# Return order quantity and ROP (to tell the user that when current inventory in store x is below top,
# you should order x amount
# Answer: Flexchain recommends ordering x amount of <product name> once available inventory is below <ROP>
# order quantity function for x
answer = 'Flexchain recommends ordering {:.0f} {} once available inventory is below {:.0f}.'
form_product = request.form.get('product', None)
try:
connection = get_db()
cursor = connection.cursor()
rop = get_ROP(connection, form_product)
product = get_product(cursor, form_product)[0]
order_quantity = get_order_quantity(connection, cursor, form_product)
answer = answer.format(order_quantity, product['product_name'], rop, order_quantity)
cursor.close()
except Exception as e:
print(e)
return render_template('answers.html', answer=answer)
@bp.route('/ask/order/when', methods=["POST"])
def when_order():
# Get current inventory
# if less than critical level:
# get next 3 month demand forecast
# return month 0 meaning to order now because of the demand forecasted is x for the next 3 months
# Answer: Order <product name> now as current available inventory is in critical level. In the next 3 months, Flexchain predicts that you will have a demand of x units.
# If more than critical level:
# get reorder point
# get next month forecast
# get current inventory-next month forecast
# if < reorder point
# return month 1 and demand for next month that needs to be fulfilled, repeat loop until less than reorder point
# Answer: Order <insert product name> next month. While you still have x in your current inventory, it will not be enough to cover the demand anticipated for next month which is at x units.
form_product = request.form.get('product', None)
try:
connection = get_db()
cursor = connection.cursor()
low_products = get_product_low(connection, cursor)
low_products_sku = [p['sku'] for p in low_products]
prod = get_product(cursor, form_product)[0]
current_inventory = get_current_inventory(cursor, form_product)
if form_product in low_products_sku:
fc = forecast(4, 1, 0, form_product, 3, connection)
answer = 'Order {} now as current available inventory is in critical level. In the next 3 months, ' \
'Flexchain predicts you will have a demand of {} units'
return render_template('answers.html', answer=answer.format(prod['product_name'], sum(fc)))
else:
reorder_point = get_ROP(connection, form_product)
fc = 0
months = 0
while float(reorder_point) < float(current_inventory) - float(fc):
if months == 12:
return render_template('answers.html', answer="You will not have to order {} for at least another "
"year.".format(prod['product_name']))
months += 1
fc = sum(forecast(4, 1, 0, form_product, months, connection))
if fc == 0:
print(forecast(4, 1, 0, form_product, months, connection))
return render_template('answers.html', answer='Not enough data exists to determine when to '
'reorder {} again.'.format(prod['product_name']))
print(reorder_point, fc, months)
answer = 'Order {} in {} month(s). You have {} in your current inventory to ' \
'to cover the demand anticipated for the next {} month(s) which is {:.0f} units. '
return render_template('answers.html',
answer=answer.format(prod['product_name'], months-1, current_inventory, months, fc))
except Exception as e:
print(e)
return render_template('answers.html', answer=answer)
@bp.route('stock-level')
def get_stock_level_page():
page_data = {
"products": list()
}
connection = get_db()
cursor = connection.cursor()
product_collection = list()
if request.args.get("type") == "out":
product_collection = get_product_out(cursor)
elif request.args.get("type") == "low":
product_collection = get_product_low(connection, cursor)
else:
out_products = get_product_out(cursor)
low_products = get_product_low(connection, cursor)
for p in out_products:
product_collection.append(p)
for p in low_products:
# Need to filter out from getting double products here.
add = True
for pc in product_collection:
if pc["sku"] == p["sku"]:
add = False
if add:
product_collection.append(p)
for prod in product_collection:
fc = forecast(4, 1, 0, prod["sku"], 1, connection)
if len(fc) != 0:
prod["demand"] = math.ceil(fc[0])
rop = get_ROP(connection, prod['sku'])
order_quantity = get_order_quantity(connection, cursor, prod['sku'])
prod["action"] = "Order {} units of {} once current inventory hits below {} units.".format(int(order_quantity), prod['product_name'], int(rop))
else:
prod["demand"] = "Forecast values cannot be generated at this time"
prod["action"] = ""
page_data["products"] = product_collection
# Create a function in the product model that returns the list of out of inventory / low items
# Call the function here
# Loop over the items
# Add the forecast for each
# After that add it to page_data and we can loop over it in the html. (I'll help with this part)
# forecast(4,1,0,sku,1,connection)
return render_template("stock-levels.html", context=page_data)
| gracegreene/Flexchain2.0 | flexchain/insights.py | insights.py | py | 13,780 | python | en | code | 0 | github-code | 13 |
73667101139 | from django.urls import path
from .views import CampaignViewset, SubscriberViewset
urlpatterns = [
path('campaigns/', CampaignViewset.as_view({
'get': 'get_all_campaigns',
'post': 'create_campaign',
})),
path('id/<str:campaign_slug>/', CampaignViewset.as_view({
'get': 'get_id_from_slug'
})),
path('campaigns/<str:campaign_slug>/', CampaignViewset.as_view({
'get': 'get_single_campaign'
})),
path('subscribers/', SubscriberViewset.as_view({
'get': 'get_all_subscribers',
'post': 'create_subscriber',
})),
]
| DevJoshi030/Next-Demo-API | api/urls.py | urls.py | py | 589 | python | en | code | 0 | github-code | 13 |
17974746913 | #クラスを定義(サイコロ)
class Dice:
#インプットからの値によりサイコロの目を定義するための関数
def __init__(self, num_list):
self.top = num_list[0]
self.front = num_list[1]
self.right = num_list[2]
self.left = num_list[3]
self.back = num_list[4]
self.bottom = num_list[5]
#全ての転がし方を試すための関数
def orders(self, num_top, num_front):
#全通り試すことのできる命令のリストを作成
for order in list("RRRRSRRRRSRRRRSRRRRSERRRRERRRRERRRR"):
if order == "E" :
self.top, self.left, self.bottom, self.right = \
self.left, self.bottom, self.right, self.top
#指定された上面、前面の値と一致したら、その時に右面を返す。
if self.top == num_top and self.front == num_front:
return self.right
if order == "S":
self.top, self.front, self.bottom, self.back = \
self.back, self.top, self.front, self.bottom
if self.top == num_top and self.front == num_front:
return self.right
if order == "R":
self.right, self.front, self.left, self.back = \
self.back, self.right, self.front, self.left
if self.top == num_top and self.front == num_front:
return self.right
num_list = list(map(int, input().split()))
order_num = int(input())
#複数の命令に対応できるように結果を格納するためのリストを用意。
result = []
dice = Dice(num_list)
for _ in range(order_num):
num_top, num_front = map(int, input().split())
result.append(dice.orders(num_top, num_front))
for _ in result:
print(_)
| takumi-kawauchi/Python | AOJ/11/11_B.py | 11_B.py | py | 1,853 | python | ja | code | 0 | github-code | 13 |
13772267607 | import sys
import os
import mediateur
import dateur
from random import randrange
o_ordonate = ["oui","non"]
o_id_user = ["hash", "clair"]
o_date = ["mediane", "fixe", "clair"]
o_hours = ["fixe", "clair"]
o_id_item = ["hash", "clair"]
o_price = ["mediane", "moyenne", "clair"]
o_qty = ["moyenne"]
ordonate = ""
id_user = ""
date = ""
hours = ""
id_item = ""
price = ""
qty = ""
f_source = ""
f_dest = ""
sel_client = [str(hash(str(randrange(314159265359)))) for i in range(13)]
def configurateur() :
conf = open("reparateur.conf", "r")
nb_line = 0
for line in conf :
curseur = 0
while line[curseur] != ":" :
curseur += 1
curseur += 1
option = []
while line[curseur] != ";" :
option.append(line[curseur])
curseur += 1
option = "".join(option)
if nb_line == 0 :
if option in o_ordonate :
global ordonate
ordonate = option
else :
print("Erreur de configuration : option ordonate.")
return -1
elif nb_line == 1 :
if option in o_id_user :
global id_user
id_user = option
else :
print("Erreur de configuration : option id_user.")
return -1
elif nb_line == 2 :
if option in o_date :
global date
date = option
else :
print("Erreur de configuration : option date.")
return -1
elif nb_line == 3 :
if option in o_hours :
global hours
hours = option
else :
print("Erreur de configuration : option hours.")
return -1
elif nb_line == 4 :
if option in o_id_item :
global id_item
id_item = option
else :
print("Erreur de configuration : option id_item.")
return -1
elif nb_line == 5 :
if option in o_price :
global price
price = option
else :
print("Erreur de configuration : option price.")
return -1
elif nb_line == 6 :
if option in o_qty :
global qty
qty = option
else :
print("Erreur de configuration : option qty.")
return -1
nb_line += 1
conf.close()
return 1
def first_line(l1, line) :
curseur = 0
global ordonate
if ordonate == "oui" :
while line[curseur] != "," :
curseur += 1
curseur += 1
while line[curseur] != "\n" :
f_dest.write(line[curseur])
curseur += 1
f_dest.write("\n")
return False
def delete(line) :
curs = 0
while line[curs] != "\n" :
curs += 1
if line[curs - 1] == "*" :
f_dest.write("DEL,1111/11/11,11:11,11111,11.11,11\n")
return True
else :
return False
def order(line, curseur) :
global ordonate
if ordonate == "oui" :
while line[curseur] != "," :
curseur += 1
curseur += 1
return curseur
def client(line, curseur) :
global id_user
global f_dest
user = []
while line[curseur] != "," :
user.append(line[curseur])
curseur += 1
user = "".join(user)
if id_user == "hash" :
c = curseur + 1
annee = []
while line[c] != "/" :
annee.append(line[c])
c += 1
if "".join(annee) == "2010" :
mois = 13
else :
c += 1
mois = 0
while line[c] != "/" :
mois = mois * 10 + int(line[c])
c += 1
f_dest.write(str(hash(str(hash(user[::-1] + sel_client[mois-1]))[::-1])))
elif id_user == "clair" :
f_dest.write(user)
f_dest.write(",")
curseur += 1
return curseur
def dates(line, curseur, med_tab) :
global date
global f_dest
# Années
annee = []
while line[curseur] != "/" :
f_dest.write(line[curseur])
annee.append(line[curseur])
curseur += 1
f_dest.write(line[curseur])
curseur += 1
# Mois
if "".join(annee) == "2010" :
mois = 13
f_dest.write("12/")
curseur += 3
else :
mois = 0
while line[curseur] != "/" :
mois = mois * 10 + int(line[curseur])
f_dest.write(line[curseur])
curseur += 1
f_dest.write(line[curseur])
curseur += 1
# Jours
if date == "mediane" :
jour = str(int(med_tab[mois-1]))
if int(jour) < 10 :
jour = "0" + str(jour)
f_dest.write(jour)
curseur += 2
elif date == "fixe" :
f_dest.write("15")
curseur += 2
elif date == "clair" :
while line[curseur] != "," :
f_dest.write(line[curseur])
curseur += 1
f_dest.write(",")
curseur += 1
return curseur
def heure(line, curseur) :
global hours
global f_dest
if hours == "fixe" :
f_dest.write("13:37")
curseur += 5
elif hours == "clair" :
while line[curseur] != "," :
f_dest.write(line[curseur])
curseur += 1
f_dest.write(",")
curseur += 1
return curseur
def objet(line, curseur) :
global id_item
global f_dest
item = []
while line[curseur] != "," :
item.append(line[curseur])
curseur += 1
item = "".join(item)
if id_item == "hash" :
sel = str(hash(str(randrange(314159265359))))
f_dest.write(str(hash(str(hash(item[::-1] + sel))[::-1])))
elif id_item == "clair" :
f_dest.write(item)
f_dest.write(",")
curseur += 1
return curseur
def prix(line, curseur, med_tab) :
global price
global f_dest
if price == "clair" :
while line[curseur] != "," :
f_dest.write(line[curseur])
curseur += 1
f_dest.write(",")
curseur += 1
else :
curseur += 2
b_inf = 0
b_sup = 0
while line[curseur] != "." :
b_inf = b_inf * 10 + int(line[curseur])
curseur += 1
curseur += 1
while line[curseur] != "," :
curseur += 1
curseur += 2
while line[curseur] != "." :
b_sup = b_sup * 10 + int(line[curseur])
curseur += 1
if price == "mediane" :
f_dest.write(str(med_tab[b_inf//2]))
elif price == "moyenne" :
f_dest.write(str((b_sup + b_inf)/2))
f_dest.write(",")
curseur += 5
return curseur
def quantite(line, curseur) :
global qty
global f_dest
curseur += 2
b_inf = 0
b_sup = 0
neg = 1
if line[curseur] == "-" :
neg = -1
curseur += 1
while line[curseur] != "," :
b_inf = b_inf * 10 + int(line[curseur])
curseur += 1
b_inf *= neg
curseur += 2
neg = 1
if line[curseur] == "-" :
neg = -1
curseur += 1
while line[curseur] != "[" :
b_sup = b_sup * 10 + int(line[curseur])
curseur += 1
if qty == "moyenne" :
f_dest.write(str(int(((b_sup + b_inf)/2))))
while line[curseur] != "\n" :
curseur += 1
return curseur
def extreme(f_source, f_source2, f_dest):
lines = f_source.readlines()
lines2= f_source2.readlines()
for line,line2 in zip(lines, lines2):
if "price" in line:
f_dest.write(line2)
continue
columns=line.split(',')
columns2= line2.split(',')
if 1-min(float(columns[4]), float(columns2[4]) )/max(float(columns2[4]), float(columns2[4])) > 0.85:
f_dest.write("DEL,1111/11/11,11:11,11111,11.11,11\n")
else:
f_dest.write(line2)
def main() :
if len(sys.argv) == 1 :
print("Erreur : Pas de fichier précisé.")
return -1
elif len(sys.argv) > 2 :
print("Erreur : Trop d'argument. Seul le nom du fichier est nécessaire.")
return -1
else :
fichier = sys.argv[1]
if configurateur() == -1 :
return -1
else :
global f_source
f_source = open("csv/"+fichier+".csv", "r")
global f_dest
f_dest = open("csv/"+fichier+"_tmp.csv", "w")
l1 = True
date_med_tab = dateur.main()
prix_med_tab = mediateur.main()
for line in f_source :
curseur = 0
if l1 :
l1 = first_line(l1, line)
continue
if not delete(line) :
curseur = order(line, curseur)
curseur = client(line, curseur)
curseur = dates(line, curseur, date_med_tab)
curseur = heure(line, curseur)
curseur = objet(line, curseur)
curseur = prix(line, curseur, prix_med_tab)
curseur = quantite(line, curseur)
f_dest.write("\n")
f_source.close()
f_dest.close()
extreme(open("csv/ground_truth.csv", "r"), open("csv/"+fichier+"_tmp.csv", "r"), open("csv/"+fichier+"_rep.csv", "w"))
os.remove("csv/"+fichier+"_tmp.csv")
if __name__ == "__main__":
main() | RedSPINE/Projet_Secu | Hall_Script/reparateur_modulaire/reparateur.py | reparateur.py | py | 9,534 | python | fr | code | 0 | github-code | 13 |
39737791193 | from control.TestbenchController import TestbenchController
from utils import Logger
from utils import Utils
from utils import BenchConfig
class CPDBench:
def __init__(self):
self._datasets = []
self._algorithms = []
self._metrics = []
self._logger = None
def start(self) -> None:
BenchConfig.load_config()
self._logger = Logger.get_application_logger()
self._logger.debug('CPDBench object created')
self._logger.info("Starting CPDBench")
self._logger.info(f"Got {len(self._datasets)} datasets, {len(self._algorithms)} algorithms and "
f"{len(self._metrics)} metrics")
bench = TestbenchController()
bench.execute_testrun(self._datasets, self._algorithms, self._metrics)
def dataset(self, function):
#self._logger.debug(f'Got a dataset function: {Utils.get_name_of_function(function)}')
self._datasets.append(function)
return function
def algorithm(self, function):
#self._logger.debug(f'Got an algorithm function: {Utils.get_name_of_function(function)}')
self._algorithms.append(function)
return function
def metric(self, function):
#self._logger.debug(f'Got a metric function: {Utils.get_name_of_function(function)}')
self._metrics.append(function)
return function
| Lucew/CPD-Bench | src/interface/CPDBench.py | CPDBench.py | py | 1,380 | python | en | code | 0 | github-code | 13 |
15153125108 | def shellSort(arr):
#start with a big gap then reduce the gap
n=len(arr)
gap=n//2
#Do a gapped insertion sort for this gap size
#the first gap elements a[0..gap-1] are already in gapped
#order keep adding one or more element until the entire array
#is gap sorted
while gap>0:
for i in range(gap,n):
#adda a[i] the elements that have been soted
#save a[i] in tenp and make a hole at position i
temp=arr[i]
#shift earlier gap-sorted elements up until the correct
#liocation a[i] is found
j=i
while j>=gap and arr[j-gap]>temp:
arr[j]=arr[j-gap]
j-=gap
arr[j]=temp
gap/=2
for i in arr:
print(i,end=" ")
arr=[3,43,5,2,5,6,2,56,75]
shellSort(arr)
| amanlalwani007/important-python-scripts | shell sort.py | shell sort.py | py | 836 | python | en | code | 0 | github-code | 13 |
72460465617 | #!/usr/bin/env python
import os
import re
from distutils.core import setup, Extension
SDL_VERSION = '1.2.14'
def get_sources_from_dir(d):
return map(lambda x: os.path.join(d, x),
filter(lambda x: re.search(r'\.cc?$', x), os.listdir(d)))
extra_compile_args = ['-std=gnu99', '-I%s' % os.getcwd()]
extra_link_args = []
if os.path.exists('C:\\apps\\SDL-%s' % SDL_VERSION):
extra_compile_args.append('-IC:\\apps\\SDL-%s\\include' % SDL_VERSION)
extra_link_args.append('-LC:\\apps\\SDL-%s\\lib' % SDL_VERSION)
sdl_sources = get_sources_from_dir('sdl')
sdl_module = Extension('SDL',
sources = sdl_sources,
extra_compile_args = extra_compile_args,
extra_link_args = extra_link_args,
libraries = ['SDL'])
setup(name = 'pysdl2',
version = '0.4-next',
description = 'Python Bindings for SDL',
author = 'Josh Holtrop',
author_email = 'pysdl2@gmail.com',
ext_modules = [sdl_module],
long_description = 'Python Bindings for SDL',
license = 'LGPL v2.1')
| holtrop/pysdl2 | setup.py | setup.py | py | 1,052 | python | en | code | 4 | github-code | 13 |
71141528659 | from email import message
from turtle import title
from discord.ext import commands
from requests import get
from libs.help import EmbedHelp
from libs.embed import Embed
from typing import Any
class Explain(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def explain(self, ctx, word=""):
"""Finds Meaning Online"""
if str(word).strip() == "":
help = EmbedHelp(self.explain, accepted_args=["Word"])
await ctx.send(help())
else:
embed = Embed()
payload = get(
f"https://api.dictionaryapi.dev/api/v2/entries/en/{word}"
).json()
if not isinstance(payload, list) and payload.get('title'):
embed.title = payload.get('title')
embed.description = payload.get('message')
else:
payload = payload[0]
definitions = payload.get('meanings')[0].get('definitions')[0]
if payload.get('word'):
word = payload.get('word')
embed.title = word[0].upper() + word[1:]
elif definitions.get('definition'):
embed.description = f"{definitions.get('definition')}"
elif definitions.get('example'):
embed.add_field(
name="Example", value=f"{definitions.get('example')}")
elif definitions.get('synonyms'):
embed.add_field(
name="Synonyms", value=f"{', '.join(definitions.get('synonyms'))}")
elif definitions.get('antonyms'):
embed.add_field(
name="Antonyms", value=f"{', '.join(definitions.get('antonyms'))}")
elif payload.get('meanings')[0].get('partOfSpeech'):
embed.add_field(
name="Part of Speach", value=f"{payload.get('meanings')[0].get('partOfSpeech')}")
await ctx.send(embed())
def setup(bot) -> dict[str, Any]:
return {
"Object": Explain(bot),
"name": "Explain It",
"description": "Adds Ability to use Dictionary Search"
}
| git-vamp/WitheredBot | plugins/explain_plugin.py | explain_plugin.py | py | 2,226 | python | en | code | 1 | github-code | 13 |
29835220619 | import random
import chess
from engines.game_data import GameData
from engines.zobrist_hash import ZobristHash
board = [[" " for x in range(8)] for y in range(8)]
piece_list = ["R", "N", "B", "Q", "P"]
def place_kings(brd):
while True:
rank_white, file_white, rank_black, file_black = (
random.randint(0, 7),
random.randint(0, 7),
random.randint(0, 7),
random.randint(0, 7),
)
diff_list = [abs(rank_white - rank_black), abs(file_white - file_black)]
if sum(diff_list) > 2 or set(diff_list) == set([0, 2]):
brd[rank_white][file_white], brd[rank_black][file_black] = "K", "k"
break
def populate_board(brd, wp, bp):
for x in range(2):
if x == 0:
piece_amount = wp
pieces = piece_list
else:
piece_amount = bp
pieces = [s.lower() for s in piece_list]
while piece_amount != 0:
piece_rank, piece_file = random.randint(0, 7), random.randint(0, 7)
piece = random.choice(pieces)
if (
brd[piece_rank][piece_file] == " "
and pawn_on_promotion_square(piece, piece_rank) == False
):
brd[piece_rank][piece_file] = piece
piece_amount -= 1
def fen_from_board(brd):
fen = ""
for x in brd:
n = 0
for y in x:
if y == " ":
n += 1
else:
if n != 0:
fen += str(n)
fen += y
n = 0
if n != 0:
fen += str(n)
fen += "/" if fen.count("/") < 7 else ""
fen += " w - - 0 1\n"
return fen
def pawn_on_promotion_square(pc, pr):
if pc == "P" and pr == 0:
return True
elif pc == "p" and pr == 7:
return True
return False
def generate_random_fen():
piece_amount_white, piece_amount_black = random.randint(0, 15), random.randint(
0, 15
)
place_kings(board)
populate_board(board, piece_amount_white, piece_amount_black)
return fen_from_board(board)
fens = [
("rnbqkbr1/pppppppp/7n/8/3P4/6P1/PPP1PP1P/RNBQKBNR w KQq - 1 3", "c1e3"),
"4N3/2p4R/2b1p3/Pp1P1p2/1KPP4/2r2k2/5P2/6b1 w - - 0 1",
"8/3p1B2/4P1n1/P1b1N3/1P6/PP4p1/Kp1k1p2/4R3 w - - 0 1",
"2r5/p7/7N/PP2Pp2/8/1pRp3N/3knP2/3b1K2 w - - 0 1",
"1NR1n3/r1PPbp1P/B1Kp2N1/PPRn2P1/p1PBp1PQ/1ppp2q1/r1b2p2/5k2 w - - 0 1",
"3B4/2rP1k1K/QPP2r1p/Pp2q2n/bP1p2pp/pPnPbB1N/Pp2p1R1/6NR w - - 0 1",
"8/2n1rn1P/PNQpN1Pp/1Pp1pP2/1P1BPpq1/4pbR1/KbRPppB1/4kr2 w - - 0 1",
"3r1q2/1n2P2N/BKp1p1Pp/1p1Rp1Pb/Rb2P3/BPQP1pk1/1ppPPN2/n5r1 w - - 0 1",
"6nb/2r1PB2/3pppK1/pQPP1P1P/1PpP1R2/2pP1RNp/N4n1p/qr1kB2b w - - 0 1",
"R2Q4/6Pp/8/3r4/8/3n3k/2K5/8 w - - 0 1",
"8/4P3/8/1B6/7r/8/3Bpkq1/2K5 w - - 0 1",
"r6r/1b2k1bq/8/8/7B/8/8/R3K2R b QK - 3 2",
"8/8/8/2k5/2pP4/8/B7/4K3 b - d3 5 3",
"r4rk1/1pp1qppp/p1np1n2/2b1p1B1/2B1P1b1/P1NP1N2/1PP1QPPP/R4RK1 w - - 0 10",
"3k4/3p4/8/K1P4r/8/8/8/8 b - - 0 1",
"8/P1k5/K7/8/8/8/8/8 w - - 0 1",
"5r1B/bpp5/3R2BP/PP1P1p2/p1PP2nP/P1K5/k2p1Q2/1q2NrR1 w - - 0 1",
"q5k1/P1PB3p/n3r3/1P1Npn2/1PPb1B2/P2pP1p1/K1P1p3/1Q1Rr3 w - - 0 1",
"1K6/2P1R3/2Bp4/1PN3pp/Pp1QP2P/k1P4P/nN1pRB2/2bb1q1r w - - 0 1",
"2BNk3/6P1/P1ppqQ2/nb4Pr/PRp1p2P/1PR5/2p1Pp2/K3B2r w - - 0 1",
"5b2/1P2k1n1/1QK3p1/3P3R/Npp1n3/pPP4r/1pq1PRpP/2BBN3 w - - 0 1",
"R5R1/bpB3n1/2PP1P1p/1Npkp3/3p2KP/2P3PN/qp6/1rQ3rn w - - 0 1",
"3rBb2/P1P3P1/pPq1PPPK/4pN1n/r2pP2B/5k2/1Rb3N1/4Q1n1 w - - 0 1",
"1B3q2/npp2P2/4Rp2/1P2pP1P/pp1k1P2/1b1PRp2/2pKn1r1/5rN1 w - - 0 1",
"4B2N/p2Q2P1/1P1BnP2/Pbpp3P/1P4np/b2pNk1p/prp5/6K1 w - - 0 1",
"r1bqkbnr/ppp1pppp/n2p4/8/8/4P3/PPPP1PPP/RNBQK1NR w KQkq - 0 3",
]
game_data_ = GameData(1)
for fen in fens:
# if type(fen) is tuple:
fen1 = "rnbqkbr1/ppppnppp/B3p3/8/4P3/7N/PPPP1PPP/RNBQK2R w KQq - 4 4"
move = chess.Move.from_uci("e1g1")
# else:
# move = random.choice(list(game_data_.board.legal_moves))
game_data_.board = chess.Board(fen1)
init_hash = game_data_.zobrist_hash.h()
push_hash = game_data_.push_move(move)
fresh_push_hash = ZobristHash().from_board(game_data_.board)
pop__hash = game_data_.pop_move()
fresh_pop_hash = ZobristHash().from_board(game_data_.board)
if (
init_hash != pop__hash
or init_hash != fresh_pop_hash
or push_hash != fresh_push_hash
):
print(f"fen : {fen1}")
print(f"move : {move}")
print(f"init_hash: {init_hash}\n")
print(f"push_hash: {push_hash}")
print(f"fresh_push_hash: {fresh_push_hash}\n")
print(f"pop__hash: {pop__hash}")
print(f"fresh_pop_hash: {fresh_pop_hash}")
break
print("done")
| Marius-likes-coding/chess-engine | chess-bot/test_zobriest.py | test_zobriest.py | py | 4,823 | python | en | code | 0 | github-code | 13 |
36333270615 | import sys
n, x = map(int, sys.stdin.readline().split(' '))
# print(n,x)
arr = list(map(int, sys.stdin.readline().split(' ')))
# print(arr)
end = x
res = 0
for i in range(end):
res += arr[i]
maxRes = res
cnt = 1
for i in range(end, n, 1):
start = i - x
res = res + arr[i] - arr[start]
if res > maxRes:
maxRes = res
cnt = 1
elif res == maxRes:
cnt += 1
if maxRes == 0:
print('SAD')
else:
print(maxRes)
print(cnt) | bywindow/Algorithm | src/DP/백준_21921_S3.py | 백준_21921_S3.py | py | 468 | python | en | code | 0 | github-code | 13 |
4241619036 | from .models import *
import yfinance as yf
import math
import pandas as pd
import time as tim
from smartapi import SmartConnect
from smartapi import SmartWebSocket
import traceback
from pytz import timezone
import json
from datetime import time, datetime
# import telepot
# bot = telepot.Bot("5448843199:AAEKjMn2zwAyZ5tu8hsLIgsakxoLf980BoY")
# bot.getMe()
# time.sleep(5)
import logging
logger = logging.getLogger('dev_log')
class run_strategy():
def __init__(self, strategy):
self.parameters = strategy
self.ltp_prices={}
self.times=tim.time()
for i in range(100):
try:
self.obj = SmartConnect(api_key='NuTmF22y')
data = self.obj.generateSession("Y99521", "abcd@1234")
refreshToken = data['data']['refreshToken']
self.feedToken = self.obj.getfeedToken()
break
except Exception as e:
print(str(e))
tim.sleep(1)
def ltp_nifty_options(self,token_dict,dict_token):
tim.sleep(.5)
position_opened = positions.objects.filter(status='OPEN')
positions_opened=[]
tokens_used=[]
for i in range(len(position_opened)):
if position_opened[i].token not in tokens_used:
tokens_used.append(position_opened[i].token)
positions_opened.append(position_opened[i])
for i in range(len(positions_opened)):
try:
with open('datamanagement/data.json') as file:
data=json.load(file)
data[positions_opened[i].token]=self.obj.ltpData("NFO", positions_opened[i].symbol, positions_opened[i].token)['data']['ltp']
tim.sleep(.5)
json_object = json.dumps(data, indent = 2)
with open("datamanagement/data.json","w") as write_file:
write_file.write(json_object)
except Exception as e:
logger.info(str(e))
print(str(e))
with open('datamanagement/data.json') as file:
data=json.load(file)
data['26000']=self.obj.ltpData("NSE", 'NIFTY', "26000")['data']['ltp']
print(data['26000'])
json_object = json.dumps(data, indent = 2)
with open("datamanagement/data.json","w") as write_file:
write_file.write(json_object)
def calculate_websocket_token(self,token_dict, dict_token):
token=""
lists=[]
for key,value in token_dict.items():
lists.append(value)
token=""
for i in range(len(lists)):
token=token+"nse_fo|"+str(lists[i])
if i==len(lists)-1:
token=token
else:
token=token+'&'
return token
def websocket(self,token_dict, dict_token):
while True:
try:
self.ltp_nifty_options(token_dict,dict_token)
if time(15, 40) <= datetime.now(timezone("Asia/Kolkata")).time():
break
except Exception:
print(traceback.format_exc())
def token_calculations(self, nifty_price, expiry,token_dict,dict_token):
strike_prices = []
spot = round(nifty_price/50, 0)*50
low_vix = spot-600
high_vix = spot+600
spot_value = low_vix
while spot_value <= high_vix:
strike_prices.append(spot_value)
spot_value += 50
df = pd.read_csv('datamanagement/scripts.csv')
# token_dict = {}
# dict_token = {}
for i in range(len(df)):
for j in range(len(strike_prices)):
symbol = str(expiry)+str(int(strike_prices[j]))
if symbol in df['symbol'][i]:
token_dict[str(df['symbol'][i])] = str(df['token'][i])
dict_token[str(df['token'][i])] = str(df['symbol'][i])
return token_dict, dict_token
def run(self):
try:
price_buy = self.obj.ltpData("NSE", 'NIFTY', "26000")['data']['ltp']
print(price_buy)
token_dict, dict_token=self.token_calculations(price_buy, self.parameters.expiry_1,{},{})
token_dict, dict_token=self.token_calculations(price_buy, self.parameters.expiry_2,token_dict,dict_token)
with open('datamanagement/data.json') as file:
data=json.load(file)
# data[position_opened[i].token]=self.obj.ltpData("NSE", position_opened[i].symbol, position_opened[i].token)['data']['ltp']
# for key,value in dict_token.items():
# data[key]=self.obj.ltpData("NFO", value,key )['data']['ltp']
json_object = json.dumps(data, indent = 2)
with open("datamanagement/data.json","w") as write_file:
write_file.write(json_object)
value=self.websocket(token_dict, dict_token)
return value
except Exception:
print(traceback.format_exc())
| sudhanshu8833/porfolio_management | datamanagement/data_collection.py | data_collection.py | py | 5,183 | python | en | code | 1 | github-code | 13 |
71473383697 | import unittest
import automatic_conversion_test_base
import numpy as np
import parameterized
import onnx
from onnx import helper
#####################################################################################
# Every test calls _test_op_conversion to downgrade a model from the most recent opset version
# to a early version and runs checker + shape inference on the downgraded model.
####################################################################################
class TestAutomaticDowngrade(automatic_conversion_test_base.TestAutomaticConversion):
def _test_op_downgrade(self, op: str, *args, **kwargs):
self._test_op_conversion(op, *args, **kwargs, is_upgrade=False)
@parameterized.parameterized.expand(
[
"ReduceL1",
"ReduceL2",
"ReduceLogSum",
"ReduceLogSumExp",
"ReduceMean",
"ReduceMax",
"ReduceMin",
"ReduceProd",
"ReduceSum",
"ReduceSumSquare",
]
)
def test_reduce_ops(self, op) -> None:
# TODO: need to add test cases for missing axes input which depends on this pr:
# https://github.com/onnx/onnx/pull/5613
axes = helper.make_tensor(
"b", onnx.TensorProto.INT64, dims=[3], vals=np.array([0, 1, 2])
)
self._test_op_downgrade(
op,
from_opset=13,
input_shapes=[[3, 4, 5], [3]],
output_shapes=[[1, 1, 1]],
input_types=[onnx.TensorProto.FLOAT, onnx.TensorProto.INT64],
initializer=[axes],
)
def test_dft20_no_axis(self) -> None:
self._test_model_conversion(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x) => (float[N, M, 2] y)
{
y = DFT (x)
}
""",
)
def test_dft20_initializer_axis(self) -> None:
self._test_model_conversion(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x, int64 dft_length) => (float[N, K, 2] y)
<int64 axis = {1}>
{
y = DFT (x, dft_length, axis)
}
""",
)
def test_dft20_constant_axis(self) -> None:
self._test_model_conversion(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x, int64 dft_length) => (float[N, K, 2] y)
{
axis = Constant <value = int64{1}>()
y = DFT (x, dft_length, axis)
}
""",
)
def test_dft20_unknown_axis(self) -> None:
self._test_model_conversion_fails(
to_opset=19,
model="""
<ir_version: 9, opset_import: [ "" : 20]>
dft_no_axis (float[N, M, 1] x, int64 dft_length, int64 axis) => (float[P, K, 2] y)
{
y = DFT (x, dft_length, axis)
}
""",
)
if __name__ == "__main__":
unittest.main()
| onnx/onnx | onnx/test/version_converter/automatic_downgrade_test.py | automatic_downgrade_test.py | py | 3,187 | python | en | code | 15,924 | github-code | 13 |
10045654395 | #zad1
napis = 'ala ma kota'
ls = [(slowo, len(slowo)) for slowo in napis.split()]
print (ls)
#zad2
n = int(input('podaj ilosc elementow'))
def fibonacci(n):
fib1, fib2 = 0, 1
for i in range(n):
fib1, fib2 = fib2, fib1 + fib2
yield fib1
fib = [x for x in fibonacci(n)]
print(fib)
#zad3
def f1(n):
if n % 2 == 0:
return True
return False
def f2(f, n):
lista = []
for i in n:
if(f(i)):
lista.append(i)
return lista
lst = []
for i in range(20):
lst.append(i)
print(f2(f1, lst))
#zad4
from math import sqrt
from math import pow
import random
krotka = ()
lista = []
res = []
point = (random.randint(0, 11),random.randint(0, 11))
print(point)
for i in range(10):
krotka = (random.randint(0, 11),random.randint(0, 11))
lista.append(krotka)
for i in range(9):
krotka = (round(sqrt( pow(lista[i][0] - point[0],2) + pow(lista[i][1] - point[1],2)),3),lista[i])
print(krotka)
res.append(krotka)
res = sorted(res, key=lambda elem: elem[0])
print(res)
#zad5
from fnmatch import fnmatch
from os import listdir
sciezka = input('Podaj sciezke ')
sciezka = sciezka[:-1]
rozsz = input('Podaj rozszerzenie')
rozsz = '*.' + rozsz
pliki = listdir(sciezka)
def f( lista, rozsz ):
for i in lista:
if fnmatch(i, rozsz):
yield i
lista = [x for x in f(pliki, rozsz)]
print(pliki) | burlakaann/Python | cw2/cw2.py | cw2.py | py | 1,438 | python | en | code | 0 | github-code | 13 |
41805750023 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def recoverTree(self, root):
"""
:type root: TreeNode
:rtype: void Do not return anything, modify root in-place instead
.
"""
nodes = [None, None]
def sol():
global last
global lastlast
last = lastlast = None
def dfs(x):
global last
global lastlast
if not x:
return
dfs(x.left)
if not nodes[0] and last and last.val > x.val:
if not lastlast or lastlast.val < last.val:
nodes[0] = last
if last and last.val < x.val:
if lastlast and lastlast.val > last.val:
nodes[1] = last
lastlast = last
last = x
dfs(x.right)
dfs(root)
if lastlast and lastlast.val > last.val:
nodes[1] = last
sol()
node1, node2 = nodes
if node1 and node2:
node1.val, node2.val = node2.val, node1.val
| superwhd/LeetCode | 99 Recover Binary Search Tree.py | 99 Recover Binary Search Tree.py | py | 1,359 | python | en | code | 1 | github-code | 13 |
15791591230 |
def pageCount(n, p):
# Write your code here
if p==1 or n==p:
return 0
else:
if (n-p)<p:
if n-p <=1 and p%2!=0:
return 1
else:
return (n-p)//2
else:
return p//2
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
p = int(input().strip())
result = pageCount(n, p)
fptr.write(str(result) + '\n')
fptr.close()
| Joshwa034/testrepo | pages.py | pages.py | py | 494 | python | en | code | 0 | github-code | 13 |
17080884214 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.MedicalUserMessageSubcriptionInfo import MedicalUserMessageSubcriptionInfo
class AlipayCommerceMedicalUsermessageSubscriptionQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceMedicalUsermessageSubscriptionQueryResponse, self).__init__()
self._data = None
self._error_message = None
self._result_code = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if isinstance(value, list):
self._data = list()
for i in value:
if isinstance(i, MedicalUserMessageSubcriptionInfo):
self._data.append(i)
else:
self._data.append(MedicalUserMessageSubcriptionInfo.from_alipay_dict(i))
@property
def error_message(self):
return self._error_message
@error_message.setter
def error_message(self, value):
self._error_message = value
@property
def result_code(self):
return self._result_code
@result_code.setter
def result_code(self, value):
self._result_code = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceMedicalUsermessageSubscriptionQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
if 'error_message' in response:
self.error_message = response['error_message']
if 'result_code' in response:
self.result_code = response['result_code']
| alipay/alipay-sdk-python-all | alipay/aop/api/response/AlipayCommerceMedicalUsermessageSubscriptionQueryResponse.py | AlipayCommerceMedicalUsermessageSubscriptionQueryResponse.py | py | 1,742 | python | en | code | 241 | github-code | 13 |
40109589541 | import pytest
from src.problem_0025.listnode import ListNode
from src.problem_0025.problem_0025 import Solution
@pytest.fixture
def solution() -> Solution:
return Solution()
def list_to_listnode(list_: list[int]) -> ListNode:
"""Convert a list to a ListNode object.
Args:
list_ (list[int]): The list to convert.
Returns:
ListNode: The head of the list.
"""
head: ListNode = ListNode()
current: ListNode = head
for val in list_:
current.next = ListNode(val)
current = current.next
return head.next
def equals(list1: ListNode, list2: ListNode) -> bool:
if list1 is None and list2 is None:
return True
if list1 is None or list2 is None:
return False
if list1.val != list2.val:
return False
return equals(list1.next, list2.next)
def test_case1(solution: Solution) -> None:
head = list_to_listnode([1, 2, 3, 4, 5])
expected = list_to_listnode([2, 1, 4, 3, 5])
actual = solution.reverse_k_group(head, 2)
assert equals(actual, expected)
def test_case2(solution: Solution) -> None:
head = list_to_listnode([1, 2, 3, 4, 5])
expected = list_to_listnode([3, 2, 1, 4, 5])
actual = solution.reverse_k_group(head, 3)
assert equals(actual, expected)
def test_case3(solution: Solution) -> None:
head = list_to_listnode([1, 2, 3, 4, 5])
expected = list_to_listnode([4, 3, 2, 1, 5])
actual = solution.reverse_k_group(head, 4)
assert equals(actual, expected)
| firattamur/leetcode-python | tests/test_problem_0025.py | test_problem_0025.py | py | 1,523 | python | en | code | 0 | github-code | 13 |
4468052369 | from openpyxl.workbook import Workbook
from openpyxl import load_workbook
# Load workbook
wb = load_workbook('./data/regions.xlsx')
new_sheet = wb.create_sheet('ImportedSheet')
active_sheet = wb.active
# Select cell
cell = active_sheet['A1']
# Print selected cell in active sheet
print(cell)
# Print selected cell value in active sheet
print(cell.value)
# Edit exising workbook and save as modified (folder must be in place)
active_sheet['A1'] = 0
wb.save('./data/modified/modified.xlsx') | pchmielecki87/PythonScripts | Excel/Openpyxl/openpyLoadWorkbook.py | openpyLoadWorkbook.py | py | 493 | python | en | code | 0 | github-code | 13 |
15725718510 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
file = open("./sample.txt","r")
read_file = file.read()
read_file = file.lower()
file.close()
sampleWordList = []
read_file = read_file.replace("2,000th","2000th")
puntuationList = [".",",","!","?","-","\'","\"","\n"]
for puntuation in puntuationList:
read_file = read_file.replace(puntuation," ")
wordList = read_file.split(" ")
for word in wordList:
if len(word)>5:
sampleWordList.append(word)
print(sampleWordList)
inputword = input("Please input a word which includes more than 5 letters.")
if len(inputword) <=5:
print("You should input a word which includes more than 5 letters.")
else:
number = 0
while number < len(sampleWordList):
if sampleWordList[number] == inputword:
result = True
else:
result = False
number = number+1
if result == True:
print(inputword," exists in sampleWordList.")
else:
print(inputword," does not exist in sampleWordList.") | PeterWolf-tw/ESOE-CS101-2016 | homework01_b05505037.py | homework01_b05505037.py | py | 1,015 | python | en | code | 15 | github-code | 13 |
17056975494 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.NimitzRange import NimitzRange
class NimitzRangeCond(object):
def __init__(self):
self._key = None
self._range = None
@property
def key(self):
return self._key
@key.setter
def key(self, value):
self._key = value
@property
def range(self):
return self._range
@range.setter
def range(self, value):
if isinstance(value, NimitzRange):
self._range = value
else:
self._range = NimitzRange.from_alipay_dict(value)
def to_alipay_dict(self):
params = dict()
if self.key:
if hasattr(self.key, 'to_alipay_dict'):
params['key'] = self.key.to_alipay_dict()
else:
params['key'] = self.key
if self.range:
if hasattr(self.range, 'to_alipay_dict'):
params['range'] = self.range.to_alipay_dict()
else:
params['range'] = self.range
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = NimitzRangeCond()
if 'key' in d:
o.key = d['key']
if 'range' in d:
o.range = d['range']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/NimitzRangeCond.py | NimitzRangeCond.py | py | 1,387 | python | en | code | 241 | github-code | 13 |
74760616337 | from itertools import product
def shift_picture_to_frame_start(
frame: list[list[int]],
pic_width: int,
pic_height: int,
pic_x: int,
pic_y: int
) -> None:
"""Shifts a picture's elements to the top left corner of a frame."""
_validate_input(frame, pic_width, pic_height, pic_x, pic_y)
if pic_width == 0 or pic_height == 0:
return
if pic_x == 0 and pic_y == 0:
return
for i, j in product(range(pic_height), range(pic_width)):
k = i + pic_y
h = j + pic_x
frame[i][j], frame[k][h] = frame[k][h], frame[i][j]
def _validate_input(
frame: list[list[int]],
pic_width: int,
pic_height: int,
pic_x: int,
pic_y: int
) -> None:
if frame == [] or any(len(row) == [] for row in frame):
raise ValueError('Cannot shift the picture within an empty frame')
if pic_width < 0 or pic_height < 0 or pic_x < 0 or pic_y < 0:
raise ValueError('Picture parameters cannot be negative values')
frame_width = len(frame[0])
frame_height = len(frame)
if pic_x + pic_width > frame_width or pic_y + pic_height > frame_height:
raise ValueError('The picture must be within the frame')
| ForeverProglamer/game-logic-dev-test-tasks | task-1/core.py | core.py | py | 1,215 | python | en | code | 0 | github-code | 13 |
16711192134 | from selenium import webdriver
import time
driver = webdriver.Chrome()
url = 'http://c.m.163.com/news/l/107212.html?w=4'
count = 12
while count > 0:
driver = webdriver.Chrome()
for j in range(5):
driver.execute_script('''window.open("%s","_blank");''' % url)
# driver.refresh()
print('%s %d' % ('count :', count))
count = count - 1
driver.close()
time.sleep(5)
driver.quit()
print('close')
| liuzhy520/PYWebChecker | pywebrefresher/src/run.py | run.py | py | 454 | python | en | code | 0 | github-code | 13 |
5517187535 | """ Take standard deviation of residuals after explaining them by inputs """
# for grad
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import numpy as np
from numpy.polynomial.polynomial import Polynomial
from itertools import combinations
import statsmodels.api as sm
import numpy as np
import random
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
from .helpers import *
torch.manual_seed(42)
def hetero(X, episodes1, episodes2, lr1, lr2, hidden1, hidden2, lin_dependence=False, debug=False, viz=False):
n, d = X.shape
W_est = np.zeros((d, d))
# for each pair
for i, j in combinations(range(d), 2):
if debug:
print(i, j)
x = X[:, i]
y = X[:, j]
# linear (in)depencence test
if lin_dependence:
if sm.OLS(x, y).fit().pvalues[0] > 0.05:
if debug:
plt.scatter(x, y, s=1)
plt.show()
print('insignificant, skip')
continue
# estimate linear fits, pick the one with more hetero as anti-causal
crit_vals = []
for inp, tar in ((x, y), (y, x)):
model = NN(hidden=hidden1)
inpT = torch.tensor(inp, dtype=torch.float32).view(-1, 1)
tarT = torch.tensor(tar, dtype=torch.float32).view(-1, 1)
train(model, inpT, tarT, episodes=episodes1, lr=lr1, viz=viz)
with torch.no_grad():
res = (tarT - model.forward(inpT))**2
res = (res-torch.mean(res))/torch.std(res)
# Idea: if we can explain the variance, then ?
res_model = NN(hidden=hidden2)
train(res_model, inpT, res, episodes=episodes2, lr=lr2, viz=viz)
with torch.no_grad():
print(torch.std(res))
remaining = torch.std(res-res_model.forward(inpT))
crit_vals.append(remaining.item())
# chose the one with
if np.argmax(crit_vals) == 0:
W_est[i, j] = 1
else:
W_est[j, i] = 1
if debug:
print(crit_vals)
print(W_est)
print()
return W_est
if __name__ == '__main__':
# X = torch.randn(1000, 2)
X = torch.zeros(3000, 2).uniform_(0, 1)
X[:, 1] += 0.5*X[:, 0]
# X[:, 2] += 0.5*X[:, 1]
X = (X-torch.mean(X, axis=0))/torch.std(X, axis=0)
W_est = hetero(X, episodes1=500, episodes2=500, lr1=1e-2, lr2=1e-2, hidden1=32, hidden2=32, debug=True, viz=True)
print(W_est) | Scriddie/CCSL-ID | src/seq/std_vsb_exploit/hetero.py | hetero.py | py | 2,569 | python | en | code | 0 | github-code | 13 |
36647166611 | # for loop iterating over loop
l = [4,1,11,13]
ages = []
for person in l:
petyears = person*7
ages.append(petyears)
print(ages) # [28,7,77,91]
#comprehension
ages = [person*7 for person in l]
#only younger than 10
ages = [person*7 for person in l if person < 10]
###############################
#old way
list_of_numbers = [9, 10, 5 ,100 ,23 ,2]
half_val = []
for x in list_of_numbers:
half_of_x = x / 2
half_val.append(half_of_x)
print(half_val)
#############################
list_of_numbers = [6, 14, 82, 52, 2, 1, 14]
half_val = [x / 2 for x in list_of_numbers]
print(half_val)
#############################
#dictionary comprehension
#old way
list_of_numbers = [100, 67, 23, 45, 11]
square_numbers = {}
for x in list_of_numbers:
square_numbers[x] = x ** 2
print(square_numbers)
#new way
list_of_numbers = [100, 67, 23, 45, 11]
square_numbers = {number: number ** 2 for number in list_of_numbers}
print(square_numbers)
################################
#nested comprehensions
matrix = [["_", "X", "_"],
["O", "X", "O"],
["O", "X", "O"]]
target_column = []
for row in matrix:
target_column.append(row[1])
print(target_column)
################################
matrix = [["_", "X", "_"],
["O", "X", "O"],
["O", "X", "O"]]
target_column = [row[2] for row in matrix]
print(target_column)
| cjredmond/class_notes | week2/day1_comprehensions.py | day1_comprehensions.py | py | 1,377 | python | en | code | 0 | github-code | 13 |
5122160948 | import pygame
class Menu:
def __init__(self, game):
self.game = game
self.settings = self.game.settings
self.screen = game.screen
self.screen_rect = self.screen.get_rect()
self.initialize_menu_vars()
def initialize_menu_vars(self):
self.is_main_menu = False
self.is_2nd_menu = False
self.is_3rd_menu = False
self.is_board_menu = False
self.is_final_menu = False
self.start_game_color = self.settings.WHITE
self.exit_game_color = self.settings.WHITE
self.roll_the_dice_button_color = self.settings.WHITE
self.okay_button_color = self.settings.WHITE
self.two_player_color = self.settings.BLACK
self.three_player_color = self.settings.BLACK
self.four_player_color = self.settings.BLACK
self.selected_red = True
self.selected_blue = True
self.selected_green = True
self.selected_yellow = True
# Settings for skipped turns
self.is_turn_skip = False
self.skipped_turn_text = ""
# This counter ensures that the game leaves the third menu after all players have chosen their colors
self.counter = 0
# To print text to show exactly what color the respective player will be choosing
self.player_text = ""
# Initializing the "Roll Dice" button
self.roll_dice_button_coordinates = (self.settings.screen_center, 2*self.settings.box_size + 7*self.settings.box_size)
self.roll_dice_button_rect = self.settings.translucent_background_setter(50, "Roll Dice", self.roll_dice_button_coordinates, "n")
self.roll_dice_button_surface = self.settings.draw_translucent_background(self.roll_dice_button_rect)
# Initialize the "Ok" button
self.okay_button_coordinates = (self.settings.screen_center, 2*self.settings.box_size + 7*self.settings.box_size)
self.okay_button_rect = self.settings.translucent_background_setter(50, "Okay", self.okay_button_coordinates, "n")
self.okay_button_surface = self.settings.draw_translucent_background(self.okay_button_rect)
self.okay_button = self.settings.draw_text("Okay",self.settings.MAIN_MENU_FONT_PATH, 50, self.okay_button_color, self.okay_button_coordinates[0], self.okay_button_coordinates[1], "n", False)
def main_menu(self):
self.is_main_menu = True
while self.is_main_menu:
self.screen.fill(self.settings.GREY)
self.draw_main_menu()
self.start_game = self.settings.draw_text("Start Game", self.settings.MAIN_MENU_FONT_PATH,50, self.start_game_color, self.start_text_coordinates[0], self.start_text_coordinates[1], "center", True)
self.exit_game = self.settings.draw_text("Exit", self.settings.MAIN_MENU_FONT_PATH,50, self.exit_game_color, self.end_text_coordinates[0], self.end_text_coordinates[1], "center", True)
self.game._check_events()
pygame.display.flip()
def second_menu(self):
"This menu is used to choose the number of players"
self.is_2nd_menu = True
while self.is_2nd_menu:
self.screen.fill(self.settings.GREY)
self.settings.draw_text("Select number of players", self.settings.LUDO_TITLE_FONT_PATH, 65, self.settings.LIGHT_BLUE, self.screen_rect.midtop[0], self.screen_rect.midtop[1], "n", True)
self.player_2 = self.settings.draw_text("II Players", self.settings.MAIN_MENU_FONT_PATH,50, self.two_player_color, self.screen_rect.centerx - 6*self.settings.box_size, self.screen_rect.centery - self.settings.box_size, "center", True)
self.player_3 = self.settings.draw_text("III Players", self.settings.MAIN_MENU_FONT_PATH,50, self.three_player_color, self.screen_rect.centerx + 6*self.settings.box_size, self.screen_rect.centery - self.settings.box_size, "center", True)
self.player_4 = self.settings.draw_text("IV Players", self.settings.MAIN_MENU_FONT_PATH,50, self.four_player_color, self.screen_rect.centerx, self.screen_rect.centery + self.settings.box_size, "center", True)
self.game._check_events()
pygame.display.flip()
def third_menu(self):
"This menu is used to decide the color choices for each player"
# Breaks out of all the loops then to run the game and calls the function to initialize the tokens for the players
self.is_3rd_menu = True
self.game.player.color_for_player = [""]*self.game.player.no_of_players
while self.is_3rd_menu:
if self.counter == self.game.player.no_of_players:
self.is_3rd_menu = False
# Update the race sprites
self.game._check_events()
# Draw the race sprites
self.screen.fill(self.settings.GREY)
self.draw_third_menu()
pygame.display.flip()
def final_menu(self, color):
while self.is_final_menu:
self.game._check_events()
self.draw_final_menu(color)
pygame.display.flip()
def initialize_main_menu(self):
"Initializes the main menu elements"
# Initializing the main menu background
self.main_menu_background = pygame.image.load(self.settings.MAIN_MENU_BACKGROUND_PATH).convert()
# Initializing the main menu title image
self.title_image = pygame.image.load(self.settings.MAIN_MENU_TITLE_IMAGE_PATH)
self.title_image_rect = self.title_image.get_rect()
self.main_title_surface = self.settings.draw_translucent_background(self.title_image_rect)
self.main_menu_title_image_coordinates = (self.screen_rect.midtop[0] - self.title_image_rect.midtop[0], self.screen_rect.midtop[1] - self.title_image_rect.midtop[1])
self.title_image_rect.x, self.title_image_rect.y = self.main_menu_title_image_coordinates
# Initializng elements for the start and exit buttons
self.start_text_coordinates = (self.screen_rect.centerx, self.screen_rect.centery - self.settings.box_size)
self.end_text_coordinates = (self.screen_rect.centerx, self.screen_rect.centery + self.settings.box_size)
self.start_text_rect = self.settings.translucent_background_setter(50, "Start Game", self.start_text_coordinates, "center")
self.end_text_rect = self.settings.translucent_background_setter(50, "Exit", self.end_text_coordinates, "center")
self.start_game_surface = self.settings.draw_translucent_background(self.start_text_rect)
self.end_game_surface = self.settings.draw_translucent_background(self.end_text_rect)
def initialize_third_menu(self):
self.dwarf_surface = pygame.transform.scale(pygame.image.load(self.settings.DWARF_CHOOSE_MENU_PATH), (self.settings.third_menu_rect_width,int(1.5*self.settings.third_menu_rect_width)))
self.orc_surface = pygame.transform.scale(pygame.image.load(self.settings.ORC_CHOOSE_MENU_PATH), (self.settings.third_menu_rect_width,int(1.5*self.settings.third_menu_rect_width)))
self.lycan_surface = pygame.transform.scale(pygame.image.load(self.settings.LYCAN_CHOOSE_MENU_PATH), (self.settings.third_menu_rect_width,int(1.5*self.settings.third_menu_rect_width)))
self.elf_surface = pygame.transform.scale(pygame.image.load(self.settings.ELF_CHOOSE_MENU_PATH), (self.settings.third_menu_rect_width,int(1.5*self.settings.third_menu_rect_width)))
self.dwarf_surface.set_colorkey(self.settings.WHITE)
self.orc_surface.set_colorkey(self.settings.WHITE)
self.lycan_surface.set_colorkey(self.settings.WHITE)
self.elf_surface.set_colorkey(self.settings.WHITE)
self.dwarf_rect = self.dwarf_surface.get_rect()
self.orc_rect = self.orc_surface.get_rect()
self.lycan_rect = self.lycan_surface.get_rect()
self.elf_rect = self.elf_surface.get_rect()
self.dwarf_rect.centerx, self.dwarf_rect.centery = (self.screen_rect.centerx - 3*self.settings.third_menu_rect_space, self.screen_rect.centery)
self.orc_rect.centerx, self.orc_rect.centery = (self.screen_rect.centerx - self.settings.third_menu_rect_space, self.screen_rect.centery)
self.lycan_rect.centerx, self.lycan_rect.centery = (self.screen_rect.centerx + self.settings.third_menu_rect_space, self.screen_rect.centery)
self.elf_rect.centerx, self.elf_rect.centery = (self.screen_rect.centerx + 3*self.settings.third_menu_rect_space, self.screen_rect.centery)
def initialize_final_menu(self):
self.dwarf_win_screen = pygame.image.load(self.settings.DWARF_WIN_SCREEN_PATH).convert()
self.orc_win_screen = pygame.image.load(self.settings.ORC_WIN_SCREEN_PATH).convert()
self.lycan_win_screen = pygame.image.load(self.settings.LYCAN_WIN_SCREEN_PATH).convert()
self.elf_win_screen = pygame.image.load(self.settings.ELF_WIN_SCREEN_PATH).convert()
def draw_main_menu(self):
self.screen.blit(self.main_menu_background, (0,0))
self.screen.blit(self.main_title_surface, self.main_menu_title_image_coordinates)
self.screen.blit(self.title_image, self.title_image_rect)
self.screen.blit(self.start_game_surface, self.start_text_rect.topleft)
self.screen.blit(self.end_game_surface, self.end_text_rect.topleft)
def draw_third_menu(self):
self.screen.blit(self.dwarf_surface, self.dwarf_rect)
self.screen.blit(self.orc_surface, self.orc_rect)
self.screen.blit(self.lycan_surface, self.lycan_rect)
self.screen.blit(self.elf_surface, self.elf_rect)
self.settings.draw_text(self.player_text, self.settings.MAIN_MENU_FONT_PATH, 25,self.settings.BLACK, self.screen_rect.centerx, self.screen_rect.centery + 5*self.settings.box_size, "center", True)
def draw_final_menu(self, color):
if color == "red":
self.screen.blit(self.dwarf_win_screen, self.settings.win_screen_coordinates)
self.settings.draw_text("The dwarfs discover wonderous materials long thought to be lost to the ages in the ruins.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 4*self.settings.box_size,
align = "n", isDraw = True)
self.settings.draw_text("With these materials the Dwarven empire gains technological supremacy over the other primal races.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 4*self.settings.box_size + 20,
align = "n", isDraw = True)
self.settings.draw_text("You are assigned as the Dwarven High King by the dwarven race.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 2*self.settings.box_size,
align = "n", isDraw = True)
self.settings.draw_text("Congratulations stranger, you bring a new age of technological advancements to ARCADIA!",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 2*self.settings.box_size + 20,
align = "n", isDraw = True)
# The text that will return you to main menu
self.settings.draw_text("Press \"Spacebar\" to return back to the main menu...",
self.settings.LUDO_TITLE_FONT_PATH,13,self.settings.LIGHT_RED, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - self.settings.box_size + 10,
align = "n", isDraw = True)
elif color == "green":
self.screen.blit(self.orc_win_screen, self.settings.win_screen_coordinates)
self.settings.draw_text("The Orcs discover the lost art of blood magic in the ancient ruins.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 3*self.settings.box_size,
align = "n", isDraw = True)
self.settings.draw_text("The orcs start using the blood magic to gain physical and mental enhancements, the orc hoard is now unstoppable by the other primal races.",
self.settings.LUDO_TITLE_FONT_PATH,13,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 3*self.settings.box_size + 20,
align = "n", isDraw = True)
self.settings.draw_text("You are assigned as the HEAD WARCHIEF of the orc hoards.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 2*self.settings.box_size,
align = "n", isDraw = True)
self.settings.draw_text("Congratulations stranger, riches await you as you bring endless pillaging and war to ARCADIA!",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 2*self.settings.box_size + 20,
align = "n", isDraw = True)
# The text that will return you to main menu
self.settings.draw_text("Press \"Spacebar\" to return back to the main menu...",
self.settings.LUDO_TITLE_FONT_PATH,13,self.settings.LIGHT_RED, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - self.settings.box_size + 10,
align = "n", isDraw = True)
elif color == "blue":
self.screen.blit(self.lycan_win_screen, self.settings.win_screen_coordinates)
self.settings.draw_text("The Lycans discover the seeds of life in the ancient ruins. They use the seeds to bring back life to their dying continent,",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 4*self.settings.box_size,
align = "n", isDraw = True)
self.settings.draw_text("unfortunately, the cost of using the seeds is at the expense of other continents.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 4*self.settings.box_size + 20,
align = "n", isDraw = True)
self.settings.draw_text("This brings upon an endless famine and death the lycans were previously suffering to the other primal races.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 3*self.settings.box_size + 10,
align = "n", isDraw = True)
self.settings.draw_text("You are assigned as the PRIEST OF LIFE by the Lycan packs. Congratulations stranger,",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 2*self.settings.box_size,
align = "n", isDraw = True)
self.settings.draw_text("you bring peace to the lycan continent but mass famine and suffering to the rest of ARCADIA!",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 2*self.settings.box_size + 20,
align = "n", isDraw = True)
# The text that will return you to main menu
self.settings.draw_text("Press \"Spacebar\" to return back to the main menu...",
self.settings.LUDO_TITLE_FONT_PATH,13,self.settings.LIGHT_RED, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - self.settings.box_size + 10,
align = "n", isDraw = True)
elif color == "yellow":
self.screen.blit(self.elf_win_screen, self.settings.win_screen_coordinates)
self.settings.draw_text("The elves use the ancient power from the ruins to bring upon a new age of supremacy over the other primal races.",
self.settings.LUDO_TITLE_FONT_PATH,15,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 3*self.settings.box_size,
align = "n", isDraw = True)
self.settings.draw_text("You are assigned as ARCH MAGUS KAISER by the elven race. Congratulations stranger, you are now the iron fist ruler of ARCADIA!",
self.settings.LUDO_TITLE_FONT_PATH,14,self.settings.WHITE, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - 2.5*self.settings.box_size,
align = "n", isDraw = True)
# The text that will return you to main menu
self.settings.draw_text("Press \"Spacebar\" to return back to the main menu...",
self.settings.LUDO_TITLE_FONT_PATH,13,self.settings.LIGHT_RED, self.settings.final_menu_text_coordinates[0], self.settings.final_menu_text_coordinates[1] - self.settings.box_size - 20,
align = "n", isDraw = True)
def show_buttons_on_board_menu(self):
"This function takes care of showing the buttons on the board menu."
if self.is_turn_skip:
turn_skip_text_color = self.settings.token_color_dictionary[self.game.player.current_player_color]
self.settings.draw_text(f"{self.skipped_turn_text}", self.settings.MAIN_MENU_FONT_PATH, 25, turn_skip_text_color, self.settings.screen_center, 5*self.settings.box_size, "n", True)
self.screen.blit(self.okay_button_surface, self.okay_button_rect.topleft)
self.settings.draw_text("Okay",self.settings.MAIN_MENU_FONT_PATH, 50, self.okay_button_color, self.okay_button_coordinates[0], self.okay_button_coordinates[1], "n", True)
else:
self.game.player.show_current_player()
self.screen.blit(self.roll_dice_button_surface, self.roll_dice_button_rect.topleft)
self.rtd_button = self.settings.draw_text("Roll Dice",self.settings.MAIN_MENU_FONT_PATH, 50, self.roll_the_dice_button_color, self.roll_dice_button_coordinates[0], self.roll_dice_button_coordinates[1], "n", True) | sunnad99/Fantasy-Ludo-Game | menu.py | menu.py | py | 19,056 | python | en | code | 0 | github-code | 13 |
22456483186 | import os
# this get our current location in the file system
import inspect
HERE_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# adding parent directory to path, so we can access the utils easily
import sys
root_path = os.path.join(HERE_PATH, '..')
sys.path.append(root_path)
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import filetools
from utils.experiment import read_eval
from utils.plot_helpers import save_and_close_figure
# design figure
fontsize = 22
matplotlib.rc('xtick', labelsize=20)
matplotlib.rc('ytick', labelsize=20)
matplotlib.rcParams.update({'font.size': fontsize})
if __name__ == '__main__':
problem_names = ['circle', 'sinus']
method_names = ['random', 'uncertainty_batch_no_repulsion', 'uncertainty_batch', 'uncertainty_single']
results = {}
for problem_name in problem_names:
results[problem_name] = {}
for method_name in method_names:
foldername = os.path.join(HERE_PATH, 'plot', problem_name, method_name)
all_eval_file = filetools.list_files(foldername, ['xp_eval.json'])
accuracy_score = []
n_support_vector = []
for eval_file in all_eval_file:
xp_eval = read_eval(eval_file)
accuracy_score.append(xp_eval['accuracy_score'])
n_support_vector.append(xp_eval['n_support_vector'])
result = {}
result['accuracy_score'] = accuracy_score
result['mean_accuracy_score'] = np.mean(accuracy_score, axis=0)
result['std_accuracy_score'] = np.std(accuracy_score, axis=0)
result['n_support_vector'] = n_support_vector
result['mean_n_support_vector'] = np.mean(n_support_vector, axis=0)
result['std_n_support_vector'] = np.std(n_support_vector, axis=0)
results[problem_name][method_name] = result
#
figures = []
axs = []
for problem_name in problem_names:
fig = plt.figure(figsize=(12, 8))
all_data = []
for i, method_name in enumerate(method_names):
data = results[problem_name][method_name]['accuracy_score']
data = np.array(data)
if method_name == 'uncertainty_single':
data = data[:, 0:-1:10]
all_data.append(data)
all_data = np.array(all_data)
all_data = np.swapaxes(all_data, 0, 1)
all_data = np.swapaxes(all_data, 1, 2)
ax = sns.tsplot(all_data, time=range(1, 11), condition=method_names, ci=[68, 95])
ax.plot([1, 10], [1, 1], color='grey', linestyle='--')
ax.set_xlabel('Iterations', fontsize=fontsize)
ax.set_ylabel('Prediction Accuracy', fontsize=fontsize)
ylim = ax.get_ylim()
ax.set_ylim([ylim[0], 1 + 0.05 * np.diff(ylim)])
ax.legend(bbox_to_anchor=(1, 0.35), fontsize=fontsize)
figures.append(fig)
#
for i, fig in enumerate(figures):
foldername = os.path.join(HERE_PATH, 'plot', problem_names[i])
filename = os.path.join(foldername, 'accuracy')
save_and_close_figure(fig, filename, exts=['.png'])
| croningp/crystal_active_learning | simulation/plot_perf.py | plot_perf.py | py | 3,186 | python | en | code | 4 | github-code | 13 |
17051259244 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class DiscountDetail(object):
def __init__(self):
self._discount_amount = None
self._discount_desc = None
self._discount_type = None
self._id = None
self._is_hit = None
self._is_purchased = None
self._name = None
@property
def discount_amount(self):
return self._discount_amount
@discount_amount.setter
def discount_amount(self, value):
self._discount_amount = value
@property
def discount_desc(self):
return self._discount_desc
@discount_desc.setter
def discount_desc(self, value):
if isinstance(value, list):
self._discount_desc = list()
for i in value:
self._discount_desc.append(i)
@property
def discount_type(self):
return self._discount_type
@discount_type.setter
def discount_type(self, value):
self._discount_type = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def is_hit(self):
return self._is_hit
@is_hit.setter
def is_hit(self, value):
self._is_hit = value
@property
def is_purchased(self):
return self._is_purchased
@is_purchased.setter
def is_purchased(self, value):
self._is_purchased = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.discount_amount:
if hasattr(self.discount_amount, 'to_alipay_dict'):
params['discount_amount'] = self.discount_amount.to_alipay_dict()
else:
params['discount_amount'] = self.discount_amount
if self.discount_desc:
if isinstance(self.discount_desc, list):
for i in range(0, len(self.discount_desc)):
element = self.discount_desc[i]
if hasattr(element, 'to_alipay_dict'):
self.discount_desc[i] = element.to_alipay_dict()
if hasattr(self.discount_desc, 'to_alipay_dict'):
params['discount_desc'] = self.discount_desc.to_alipay_dict()
else:
params['discount_desc'] = self.discount_desc
if self.discount_type:
if hasattr(self.discount_type, 'to_alipay_dict'):
params['discount_type'] = self.discount_type.to_alipay_dict()
else:
params['discount_type'] = self.discount_type
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.is_hit:
if hasattr(self.is_hit, 'to_alipay_dict'):
params['is_hit'] = self.is_hit.to_alipay_dict()
else:
params['is_hit'] = self.is_hit
if self.is_purchased:
if hasattr(self.is_purchased, 'to_alipay_dict'):
params['is_purchased'] = self.is_purchased.to_alipay_dict()
else:
params['is_purchased'] = self.is_purchased
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = DiscountDetail()
if 'discount_amount' in d:
o.discount_amount = d['discount_amount']
if 'discount_desc' in d:
o.discount_desc = d['discount_desc']
if 'discount_type' in d:
o.discount_type = d['discount_type']
if 'id' in d:
o.id = d['id']
if 'is_hit' in d:
o.is_hit = d['is_hit']
if 'is_purchased' in d:
o.is_purchased = d['is_purchased']
if 'name' in d:
o.name = d['name']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/DiscountDetail.py | DiscountDetail.py | py | 4,216 | python | en | code | 241 | github-code | 13 |
29405519938 | # -*- coding: utf-8 -*-
"""
@author: MD.Nazmuddoha Ansary
"""
from __future__ import print_function
from termcolor import colored
import numpy as np
import matplotlib.pyplot as plt
import cv2
import scipy.signal
from scipy.ndimage import rotate
from scipy.ndimage.measurements import center_of_mass
from scipy.ndimage.interpolation import shift
from skimage.morphology import skeletonize_3d
import os
from matplotlib import gridspec
class Separator(object):
def __init__(self,img_file_path):
self.img_file_path=img_file_path
self.output_dir=None
def __binarizeData(self):
print(colored('# Binarizing Image!!','green'))
img_data = cv2.imread(self.img_file_path,0)
self.plotData(img_data,identifier='Raw Data')
blur = cv2.GaussianBlur(img_data,(5,5),0)
_,thresholded_data = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresholded_data[thresholded_data<255]=0
thresholded_data[thresholded_data==255]=1
self.data=1-thresholded_data
def __mapConnectedComponents(self):
print(colored('# Tracking Connected Components !!','green'))
kernel = np.ones((5,5), np.uint8)
dilated_data = cv2.dilate(self.data, kernel, iterations=6)
labeled_data,num_of_components =scipy.ndimage.measurements.label(dilated_data)
diff=self.data*num_of_components - labeled_data
diff[diff<0]=0
self.plotData(diff,identifier='Detected Words/Segments')
print(colored('# Storing Words !!','green'))
self.words=[]
self.sk_words=[]
self.row_sum=[]
self.col_sum=[]
for component in range(1,num_of_components):
idx = np.where(diff==component)
y,h,x,w = np.min(idx[0]), np.max(idx[0]), np.min(idx[1]), np.max(idx[1])
word=np.ones((h-y+1,w-x+1))
idx=(idx[0]-y,idx[1]-x)
word[idx]=0
inv_word=1-word
sk_word=skeletonize_3d(inv_word)/255
#rotate
coords = np.column_stack(np.where(sk_word > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
# rotate the image to deskew it
(h, w) = inv_word.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(sk_word, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
self.row_sum.append(np.sum(1-rotated,axis=1))
self.col_sum.append(np.sum(1-rotated,axis=0))
self.sk_words.append(rotated)
self.words.append(word)
self.words.reverse()
self.sk_words.reverse()
def __nullifySingle(self,line_width=1):
for i in range(len(self.words)):
word=self.words[i]
self.plotData(word)
max_y=np.nanmax(self.col_sum[i])
thresh_y=(max_y-line_width)/max_y
y=self.col_sum[i]/max_y
cols=np.argwhere(y>=thresh_y)
for col in cols:
word[:,col]=1
self.plotData(word)
def preprocessData(self,img_file_path,return_img=False):
self.img_file_path=img_file_path
self.__binarizeData()
self.plotData(self.data,identifier="Binary Data")
self.__mapConnectedComponents()
self.debug_plothist()
#self.__nullifySingle()
if return_img:
return self.data
def plotData(self,data,plot_now_flag=True,save_plot_flag=False,identifier=None):
if identifier:
plt.figure(identifier)
plt.title(identifier)
plt.grid(True)
plt.imshow(data)
if plot_now_flag:
plt.show()
if save_plot_flag:
print('Saving {} at {}'.format(identifier+'.png',self.output_dir))
plt.savefig(os.path.join(self.output_dir,identifier+'.png'))
plt.clf()
plt.close()
def debug_plothist(self,plot_now_flag=True,save_plot_flag=False):
for i in range(len(self.words)):
fig = plt.figure('Word/Segment {}'.format(i))
gs = gridspec.GridSpec(2, 2,width_ratios=[10,1],height_ratios=[5,1])
img_ax = plt.subplot(gs[0])
img_ax.imshow(self.words[i]+self.sk_words[i])
x=self.row_sum[i]/np.nanmax(self.row_sum[i])
y=np.flip(np.arange(x.shape[0]))
col_hist = plt.subplot(gs[1])
col_hist.plot(x,y)
y=self.col_sum[i]/np.nanmax(self.col_sum[i])
x=np.arange(y.shape[0])
row_hist = plt.subplot(gs[2])
row_hist.plot(x,y)
if plot_now_flag:
plt.show()
if save_plot_flag:
print('Saving {} at {}'.format(identifier+'.png',self.output_dir))
plt.savefig(os.path.join(self.output_dir,identifier+'.png'))
plt.clf()
plt.close()
if __name__=='__main__':
img_file_path='test.jpg'
SeparatorObj=Separator(img_file_path)
SeparatorObj.preprocessData(img_file_path)
| mnansary/pyHOCR | segmentation/script.py | script.py | py | 5,398 | python | en | code | 3 | github-code | 13 |
10977220224 | # dockstring for my file
""" This is tic tac toe console python game """
class GameSession:
""" This is class for creatin tic tac toe game session """
def __init__(self):
self.coords = []
self.value = 'O'
self.end_game = False
self.check_value = False
# current field
self.dict = [[' ', ' ', ' '],
[' ', ' ', ' '],
[' ', ' ', ' ']]
# combination of coordinates that will lead to victory
self.win_combination = [[[1, 1], [1, 2], [1, 3]],
[[1, 1], [2, 1], [3, 1]],
[[1, 1], [2, 2], [3, 3]],
[[3, 1], [2, 2], [1, 3]],
[[1, 3], [2, 3], [3, 3]],
[[3, 1], [3, 2], [3, 3]],
[[2, 1], [2, 2], [2, 3]],
[[1, 2], [2, 2], [3, 2]],
]
def point_input(self):
""" create user input and check it for valid values """
# create user input and check it for valid values
self.coords = input().split()
if len(self.coords) != 2:
print('Введите 2 числа через пробел!\n'
'Ожидаются координаты от 1 до 3')
self.check_value = False
self.point_input()
return None
if int(self.coords[0]) not in (1, 2, 3) \
or int(self.coords[1]) not in (1, 2, 3):
print('Неверный формат, повторите ввод!\n'
'Ожидаются координаты от 1 до 3')
self.check_value = False
self.point_input()
return None
if self.dict[int(self.coords[0])-1][int(self.coords[1])-1] != ' ':
print('Клетка уже занята! измените выбор!')
self.check_value = False
self.point_input()
return None
self.check_value = True
return [self.coords[0], self.coords[1]]
def display_board(self):
""" Display game board on screen. """
# Display game board on screen.
print('y\\x', '', "|", 1, "|", 2, "|", 3)
print(" ---------------")
print(' ', '1', "|", self.dict[0][0], "|",
self.dict[0][1], "|", self.dict[0][2])
print(" ---------------")
print(' ', '2', "|", self.dict[1][0], "|",
self.dict[1][1], "|", self.dict[1][2])
print(" ---------------")
print(' ', '3', "|", self.dict[2][0], "|",
self.dict[2][1], "|", self.dict[2][2])
def field_update(self):
""" update current field with appropriate value """
# update current field with appropriate value
self.point_input()
self.dict[int(self.coords[0])-1][int(self.coords[1])-1] = self.value
self.display_board()
def game_process(self):
""" game process function that will determine the winners and draft """
# game process function that will determine the winners and draft
game.display_board()
player = 1
while self.end_game is not True:
print(f'Xодит игрок {player}.\nKуда поставить {self.value}?\n'
'(введите координаты Х и У от 1 до 3 через пробел):')
self.field_update()
for each in self.win_combination:
if self.dict[each[0][0] - 1][each[0][1] - 1] == \
self.dict[each[1][0] - 1][each[1][1] - 1] == \
self.dict[each[2][0] - 1][each[2][1] - 1] != ' ':
self.end_game = True
print(f'Player {player} win!')
if player == 1:
player += 1
self.value = 'X'
else:
player -= 1
self.value = 'O'
if ' ' not in self.dict[0] \
and ' ' not in self.dict[1] \
and ' ' not in self.dict[2]:
self.end_game = True
print('Draft')
game = GameSession()
game.game_process()
| PaulusSE/tic-tac-toe | game_ttt.py | game_ttt.py | py | 4,398 | python | en | code | 0 | github-code | 13 |
22074680915 | #!/usr/bin/env python3
"""
Simple client to get an object from the NPO Frontend API media endpoint. This version accepts explicit key, secret origins.
"""
from npoapi.media import Media
def check_credentials():
client = Media().command_line_client(
description="Get an media object from the NPO Frontend API using provided credentials. This lets you easily "
"check whether new credentials do work")
client.add_argument('apikey', type=str, nargs=1, help='key')
client.add_argument('apisecret', type=str, nargs=1, help='secret')
client.add_argument('origin', type=str, nargs=1, help='origin')
client.add_argument('mid', type=str, nargs='?', help='mid', default="WO_NCRV_026201")
args = client.parse_args()
client.login(args.apikey[0], args.apisecret[0], args.origin[0])
mid = args.mid
print(client.get(mid))
client.exit()
if __name__ == "__main__":
check_credentials()
| npo-poms/pyapi | src/npoapi/bin/npo_check_credentials.py | npo_check_credentials.py | py | 952 | python | en | code | 0 | github-code | 13 |
221561886 |
import torch
from torch.autograd import Function
from torch.autograd import gradcheck
import torch.nn as nn
#import torchvision
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from datamaestro import prepare_dataset
import torch.nn.functional as F
#Dataset et Dataloader
class MonDataset(Dataset) :
def __init__(self,data,label) :
self.data=torch.tensor(data,dtype=torch.float)/255
self.label=torch.tensor(label,dtype=torch.float)
def __getitem__(self , index ) :
return self.data[index], self.label[index]
#mettre image sous forme de vecteur
#retourne un couple exemple , l a b e l
def __len__(self) :
return list(self.label.size())[0]
ds = prepare_dataset ( "com.lecun.mnist")
train_images , train_labels = ds.files["train/images"].data() ,ds.files["train/labels"].data()
test_images , test_labels = ds.files["test/images"].data() , ds.files["test/labels"].data()
BATCH_SIZE = 5
data_train = DataLoader(MonDataset(train_images, train_labels), shuffle=True , batch_size=BATCH_SIZE)
data_test = DataLoader(MonDataset(test_images, test_labels), shuffle=True , batch_size=BATCH_SIZE)
#Autoencodeur
"""
class Encodeur(nn.Module):
def __init__(self,dim_in,dim_out) :
super(Model,self).__init__()
self.linear = torch.nn.Linear(dim_in,dim_out)
self.relu = torch.nn.ReLU()
def forward(self,x):
y = self.linear(x)
return self.relu(y)
class Decodeur(nn.Module):
def __init__(self,dim_in,dim_out) :
super(Model,self).__init__()
self.linear = torch.nn.Linear(dim_in,dim_out)
self.sig = torch.nn.Sigmoid()
def forward(self,x):
y = self.linear(x).squeeze()
return self.relu(y)
"""
class Autoencodeur(nn.Module):
def __init__(self,dim_in,dim_out) :
super(Model,self).__init__()
self.dim_in=dim_in
self.dim_out=dim_out
self.w= torch.nn.Parameter(torch.randn(self.dim_in,self.dim_out,dtype=torch.float64))
self.b=torch.nn.Parameter(torch.randn(1,dtype=torch.float64))
def encodeur(self,x):
model = torch.nn.Sequential(torch.nn.functionnal.linear(x, self.w,self.b),torch.nn.functionnal.relu())
model.train()
return model(x.float())
def decodeur(self,y) :
self.sig = torch.nn.Sigmoid()
model = torch.nn.Sequential(torch.nn.functionnal.linear(y, self.w.T,self.b),torch.nn.functionnal.sigmoid())
model.train()
return model(y.float())
encode = Encodeur(28,5)
for x, y in data_train :
pass
#A tester
#Faire parameter et sequencial, function | Salomelette/AMAL2 | AMAL2/TME3/TME3.py | TME3.py | py | 2,645 | python | en | code | 0 | github-code | 13 |
38025000752 | #!/usr/bin/python3
import sys
def calculate(n, w1, maximum):
startIndex = 0
for currIndex in range(1, n):
if w1[startIndex] < maximum:
startIndex = currIndex
continue
if w1[startIndex] == w1[currIndex]:
if startIndex != currIndex:
startIndex = currIndex + 1
elif w1[startIndex] < w1[currIndex]:
maximum = w1[startIndex]
startIndex = currIndex
elif w1[currIndex] > maximum:
maximum = w1[currIndex]
if startIndex < n:
maximum = max(maximum, w1[startIndex])
return maximum
def main():
n = int(input())
w1 = [int(i) for i in input().split()]
w2 = [int(i) for i in input().split()]
if n == 1:
print(w1[0])
return
rv = calculate(n, w1, 0)
rv = calculate(n, w2, rv)
print(rv)
if __name__ == "__main__":
main()
| hmichelova/inf237 | set03/weights.py | weights.py | py | 909 | python | en | code | 0 | github-code | 13 |
74101533136 | import argparse
import csv
import codecs
import glob
import pickle
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument("--source", help="flag for path to data")
parser.add_argument("--target", help="flag for path to destination (must have a folder there)")
parser.add_argument("--include", help="flag for including files containing the given string")
# parser.add_argument("--exclude", help="flag for excluding files containing the given string")
parser.add_argument("--type", help="whether using new cell files, cluster files, or lymphocyte files")
args = parser.parse_args()
def load(src):
reader = csv.reader(codecs.open(src, "rU", "utf-8"))
input_list = []
for row in tqdm(reader):
input_list.append(row)
return input_list
def minimise_new_cell_file(src):
input_list = load(src)
print(input_list[len(input_list) - 1])
to_cancel = input("Cancel? ")
if to_cancel.lower() == "yes":
return
response = input("Delete? ")
if response.lower() == "yes":
del input_list[len(input_list) - 1]
for i, row in enumerate(input_list):
entry = input_list[i][2:9]
entry.append(input_list[i][-4]) # xMin, xMax, yMin, yMax, dye 1, dye 2, dye 3, neutral, cell area
input_list[i] = entry
print(src.split('/')[-1][:-4] + ".p") # Remove '.csv' extension from src name
print(len(input_list))
print(input_list[0])
file_name = args.target + src.split('/')[-1][:-4] + ".p" # Remove ".csv" extension from src name
p_file = open(file_name, "wb")
pickle.dump(input_list, p_file)
p_file.close()
def minimise_new_cluster_file(src):
input_list = load(src)
print(input_list[len(input_list) - 1])
to_cancel = input("Cancel? ")
if to_cancel.lower() == "yes":
return
response = input("Delete? ")
if response.lower() == "yes":
del input_list[len(input_list) - 1]
for i, row in enumerate(input_list):
input_list[i] = [input_list[i][1]] + input_list[i][3:] # all cells (ignore), Dye 2 cells (cancer clusters), region Area (ignore), xMax, xMin, yMax, yMin
file_name = args.target + src.split('/')[-1][:-4] + ".p" # Remove ".csv" extension from src name
p_file = open(file_name, "wb")
pickle.dump(input_list, p_file)
p_file.close()
# Annoyingly, the actual sizes of these resulting files in the aggregate lead to 23GB of space. Not ideal, and weirdly less space efficient. I ended up not
# using this function.
def minimise_new_lymphocyte_file(src):
input_list = load(src)
print(input_list[len(input_list) - 1])
to_cancel = input("Cancel? ")
if to_cancel.lower() == "yes":
return
response = input("Delete? ")
if response.lower() == "yes":
del input_list[len(input_list) - 1]
for i, row in enumerate(input_list):
input_list[i] = input_list[i][:] # xMin, xMax, yMin, yMax, dye 3 (cd3), dye 4 (cd8)
file_name = args.target + src.split('/')[-1][:-4] + ".p" # Remove ".csv" extension from src name
p_file = open(file_name, "wb")
pickle.dump(input_list, p_file)
p_file.close()
def minimise_validation_set_tb(src):
"""
This file contains all of the patients, unfortunately.
Need to split up and create multiple output files.
Need to get name (line[0]), number of dye2 positive cells (line[6]),
and the Xmax/Xmin/Ymax/Ymin (line[-4:]).
"""
input_list = load(src)
# Validate the number of distinct files here
file_data = {}
for i, row in enumerate(input_list):
if i == 0:
continue
entry = [row[54], row[5], row[4], row[7], row[6]] # dye2+ cells, xmax, xmin, ymax, ymin
filename = row[0]
if filename in file_data:
file_data[filename].append(entry)
else:
file_data[filename] = [entry]
print("Number of files:", len(file_data.keys()))
for filename in file_data.keys():
output_filename = "/Volumes/Dan Media/ValidationDataset/TB_pickle_files/" + filename.split('\\')[-1][0:-4] + ".p" # get the last token with the patient code
print(output_filename)
p_file = open(output_filename, "wb")
pickle.dump(file_data[filename], p_file)
p_file.close()
def clear_data(file_data):
print("in clear_data()")
for key in file_data.keys():
code = key.split('\\')[-1] # get the last token with the patient code
output_filename = "/Volumes/Dan Media/ValidationDataset/lymphocyte_csv_files/" + code + ".csv"
with open(output_filename, 'a') as f:
writer = csv.writer(f)
writer.writerows(file_data[key])
def minimise_validation_set_lymphocyte(src):
"""
This file contains all of the paients. This means a mammoth 100+GB of data
per file. Have to stream through the file rather than store in intermediate list,
because there is LITERALLY not enough space to do otherwise.
Need to split up and create multiple output files.
Need to get name (line[0]), number of dye 3/4 for CD3/CD8 (respectively, line[22]/line[29]),
and Xmax/Xmin/Ymax/Ymin (line[4:8]).
"""
# Validate the number of distinct files here
file_data = {}
threshold = 2000000
unique_keys = set()
reader = csv.reader(codecs.open(src, "rU", "utf-8"))
for i, row in tqdm(enumerate(reader)):
# Skip the first row.
if i == 0:
continue
if i % threshold == 0:
clear_data(file_data)
file_data = {}
entry = [row[4], row[5], row[6], row[7], row[22], row[29]] # xmin, xmax, ymin, ymax, dye3+, dye4+ cells
filename = row[0]
if filename not in unique_keys:
unique_keys.add(filename)
if filename in file_data:
file_data[filename].append(entry)
else:
file_data[filename] = [entry]
print("Number of files:", len(unique_keys))
clear_data(file_data)
return unique_keys
def get_all_validation_cohort_codes():
all_codes = load("/Users/soutar/Documents/Computer Science/CS4098/Patient_codes_validation_cohorts.csv")
all_codes_japan = set()
all_codes_edinburgh = set()
for (SN_code, HU_code) in all_codes[1:]:
ecode, jcode = SN_code, HU_code
while len(ecode) != 3:
ecode = "0" + ecode
while len(jcode) != 3:
jcode = "0" + jcode
ecode, jcode = "SN" + ecode, "HU" + jcode
if ecode != "SN000":
all_codes_edinburgh.add(ecode)
if jcode != "SN000":
all_codes_japan.add(jcode)
return {"EDINBURGH CODES": all_codes_edinburgh, "JAPANESE CODES": all_codes_japan}
if __name__ == "__main__":
for file in glob.glob(args.source + "*.csv"):
if (args.include is None) or args.include in file:
print("Minimising", file, "...")
if args.type.lower() == "clusters":
minimise_new_cluster_file(file)
elif args.type.lower() == "lymphocytes":
minimise_new_lymphocyte_file(file)
elif args.type.lower() == "cancer cells":
minimise_new_cell_file(file)
# End of file
| danielsoutar/CS4098 | extract.py | extract.py | py | 7,202 | python | en | code | 0 | github-code | 13 |
71454125459 | import pygame
import sys
import os
import random
import json
# Platform independent paths
main_dir = os.path.split(os.path.abspath(__file__))[0]
img_dir = os.path.join(main_dir, 'images')
sound_dir = os.path.join(main_dir, 'audio')
font_dir = os.path.join(main_dir, 'font')
data_dir = os.path.join(main_dir, 'data')
def load_image(img_filename: str, colorkey=None) -> pygame.Surface:
"""Loads an image from an image file and applies transparent color if applicable. A Surface representation of the image is returned."""
# Attempt to load the user specified image
img_location = os.path.join(img_dir, img_filename)
try:
img = pygame.image.load(img_location).convert()
except pygame.error as msg:
print('Failed to load:', img_filename)
raise SystemExit(msg)
# Apply color transparency if applicable
if colorkey is not None:
if colorkey == -1:
colorkey = img.get_at((0, 0))
# The pygame.RLEACCEL flag provides better performance on non accelerated displays
img.set_colorkey(colorkey, pygame.RLEACCEL)
return img
def load_sound(sound_filename: str):
"""Loads a sound file and returns a corresponding Sound object. If pygame.mixer is not available an instance of a dummy class containing a play method is returned instead."""
class NoSound:
"""Dummy class used in the event pygame.mixer is not available."""
def play(self):
None
if not pygame.mixer:
return NoSound()
sound_location = os.path.join(sound_dir, sound_filename)
try:
sound = pygame.mixer.Sound(sound_location)
except pygame.error as msg:
print('Failed to load:', sound_filename)
raise SystemExit(msg)
return sound
def load_font(font_filename: str, font_size: int) -> pygame.font.Font:
"""Loads a font file and returns a Font object with the specified font and size on success. Returns the default pygame font with user specified size otherwise."""
font_location = os.path.join(font_dir, font_filename)
try:
font = pygame.font.Font(font_location, font_size)
except:
font = pygame.font.Font(None, font_size)
return font
def load_data(data_filename: str, permissions: str):
"""Loads a JSON file and returns a dictionary representation of the JSON file."""
# Attempt to load the user specified file
data_location = os.path.join(data_dir, data_filename)
try:
with open(data_location, permissions) as file:
return json.load(file)
except pygame.error as msg:
print('Failed to load:', data_filename)
raise SystemExit(msg)
def save_data(json_data_filename: str, data):
"""Saves the specified data inside the specified JSON file."""
# Attempt to open the file and write data to it
data_location = os.path.join(data_dir, json_data_filename)
with open(data_location, 'w') as f:
json.dump(data, f)
class Asteroid(pygame.sprite.Sprite):
def __init__(self):
"""Initialize an instance of the Asteroid class."""
pygame.sprite.Sprite.__init__(self)
self.image = load_image('asteroid.png', (255, 255, 255))
self.rect = self.image.get_rect()
# TODO: Change velocity to be a random float for move variation and decreased likely hood of overlap
self.velocity = [random.choice([-2, -1, 1, 2]), random.choice([-2, -1, 1, 2])]
# Set the initial position to be somewhere random in the game area
screen_size = pygame.display.get_window_size()
self.rect.center = [random.randint(0, screen_size[0] / 2), random.randint(0, screen_size[1] / 2)]
self.explosion_sound = load_sound('explosion.wav')
def update(self):
"""Moves the asteroid within the game boundaries."""
# Random movement
self.rect.move_ip(self.velocity)
# Check for bounds
screen_size = pygame.display.get_window_size()
# Divide x and y of screen size by 2 to get the boundaries prior to scaling up 2x
x_boundaries = (0, screen_size[0] / 2)
y_boundaries = (0, screen_size[1] / 2)
if self.rect.center[0] < x_boundaries[0] or self.rect.center[0] > x_boundaries[1]:
# Flip the x velocity then move back into the gameview
self.velocity[0] = -self.velocity[0]
self.rect.move_ip(self.velocity)
if self.rect.center[1] < y_boundaries[0] or self.rect.center[1] > y_boundaries[1]:
self.velocity[1] = -self.velocity[1]
self.rect.move_ip(self.velocity)
# TODO: Add logic to rotate the asteroids
def explode(self):
"""Plays an explosion sound."""
# TODO: Visual effect for exploding
self.explosion_sound.play()
class Player(pygame.sprite.Sprite):
def __init__(self):
"""Initialize an instance of the Player class."""
pygame.sprite.Sprite.__init__(self)
self.image = load_image('player.png', (255, 255, 255))
self.rect = self.image.get_rect()
self.smashing = False
def update(self):
"""Updates the player position based on where the mouse cursor is."""
pos = pygame.mouse.get_pos()
# The actual gameview is 1/2 the screen size so divide the mouse coordinates by 2 to account for the 2x scaling
self.rect.center = (pos[0] / 2, pos[1] / 2)
def smash(self):
"""Changes the player image to that of the robot hand in a clenched fist state."""
# Change to image of robot hand clenched into a fist
self.image = load_image('smash.png', (255, 255, 255))
self.smashing = True
def unsmash(self):
"""Changes the player image back to it's original state if the player was attempting a smash previously."""
if self.smashing:
self.image = load_image('player.png', (255, 255, 255))
def create_asteroids(nun_asteroids: int) -> list[Asteroid]:
"""Returns a list of asteroids containing the specified number of asteroids"""
asteroids = []
for i in range(nun_asteroids):
asteroids.append(Asteroid())
return asteroids
def has_asteroids(group: pygame.sprite.RenderPlain) -> bool:
"""Returns a bool of whether or not a RenderPlain group has any asteroids in it."""
return len([obj for obj in group if type(obj) == Asteroid]) > 0
# TODO: Separate class for time conversions?
def minutesToMilliseconds(minutes: float) -> float:
"""Converts minutes into milliseconds."""
seconds = minutesToSeconds(minutes)
return secondsToMilliseconds(seconds)
def minutesToSeconds(minutes: float) -> float:
"""Converts minutes to seconds."""
return minutes * 60
def secondsToMilliseconds(seconds: float) -> float:
"""Converts seconds to milliseconds."""
return seconds * 1000
def secondsToMinutes(seconds: float) -> float:
"""Converts seconds to minutes."""
return seconds * 1 / 60
def millisecondsToSeconds(milliseconds: float) -> float:
"""Converts millseconds to seconds."""
return milliseconds * 1 / 1000
def millisecondsToMinutes(milliseconds: float) -> float:
"""Converts milliseconds to minutes."""
seconds = millisecondsToSeconds(milliseconds)
return secondsToMinutes(seconds)
def millisecondsToMinutesSecondsFormat(milliseconds: float) -> str:
"""Converts millseconds to a string with time formated as {minutes}:{seconds}."""
# Convert milliseconds to minutes with remainder
minutes = millisecondsToMinutes(milliseconds)
# Convert remainder to seconds
remainder = minutes - int(minutes)
seconds = minutesToSeconds(remainder)
formatted_time = f'{int(minutes)}:{int(seconds)}'
# Add a leading zero to the seconds if the number of seconds is a single digit number
if int(seconds) < 10:
formatted_time = f'{int(minutes)}:0{int(seconds)}'
return formatted_time
def main():
"""The main method runs when the script is run and houses the game loop and variables for the game."""
# Initialize game components
pygame.init()
screen = pygame.display.set_mode((480, 320))
gameview = pygame.Surface((int(screen.get_width() / 2), int(screen.get_height() / 2)))
pygame.display.set_caption('Robot Hand Asteroid Smasher')
pygame.mouse.set_visible(False)
clock = pygame.time.Clock()
background_track = load_sound('background.wav')
background_track.play(loops=-1)
# Prepare game objects
player = Player()
num_asteroids = 3
asteroids = create_asteroids(num_asteroids)
sprites_group = pygame.sprite.RenderPlain((asteroids, player))
score = 0
high_score = load_data('high_score.json', 'r')['high_score']
current_high_score_color = (255, 255, 255)
time_limit = minutesToMilliseconds(2)
game_active = True
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT or event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN and game_active:
player.smash()
hit_list = pygame.sprite.spritecollide(player, sprites_group, False)
hit_asteroids = [obj for obj in hit_list if type(obj) == Asteroid]
if len(hit_asteroids) != 0:
hit_asteroids[0].explode()
# Update the score with 1 points awards for every asteroid destroyed
# Showing * 1 for clarity that each asteroid is worth 1 point
score += len(hit_asteroids) * 1
sprites_group.remove(hit_asteroids)
# TODO: Clicking and missing an asteroid
# Check if the group has any asteroids remaining
if not has_asteroids(sprites_group):
# Refill the group with one more asteroid than before
num_asteroids += 1
asteroids = create_asteroids(num_asteroids)
sprites_group.add(asteroids)
if event.type == pygame.MOUSEBUTTONUP and game_active:
player.unsmash()
# TODO: Refill the sprite group with asteroids with one more asteroid then before once all asteroids have been destroyed.
sprites_group.update()
gameview.fill((0, 0, 0))
remaining_time = time_limit - pygame.time.get_ticks()
if remaining_time <= 0:
game_active = False
if game_active:
if pygame.font:
font = load_font('Pixeltype.ttf', 16)
# Score Text
text = font.render(f'Score: {score}', False, (255, 255, 255))
textpos = text.get_rect(topleft = (5, 5))
# Remaining Time Text
remaining_time_text_color = (255, 255, 255)
if remaining_time < secondsToMilliseconds(10):
remaining_time_text_color = (255, 87, 51)
remaining_time_text = font.render(f'Time: {millisecondsToMinutesSecondsFormat(remaining_time)}', False, remaining_time_text_color)
remaining_time_text_pos = remaining_time_text.get_rect(topleft = (textpos.x, textpos.y + 10))
gameview.blit(remaining_time_text, remaining_time_text_pos)
gameview.blit(text, textpos)
# High Score Text
# Check if the user has achieved a new high score
# Don't alter the original high score, use a local copy instead
current_high_score = high_score
if score > current_high_score:
current_high_score = score
current_high_score_color = (57, 255, 20)
high_score_text = font.render(f'High Score: {current_high_score}', False, current_high_score_color)
high_score_textpos = high_score_text.get_rect(topright = (gameview.get_width() - 5, 5))
gameview.blit(high_score_text, high_score_textpos)
else:
if pygame.font:
font = load_font('Pixeltype.tff', 24)
end_game_text = font.render('Thanks for playing!', False, (255, 255, 255))
end_game_text_pos = end_game_text.get_rect(center = (gameview.get_width() / 2, gameview.get_height() / 2))
gameview.blit(end_game_text, end_game_text_pos)
if score > high_score:
# Render the new high score
new_high_score_text = font.render(f'New High Score: {score}', False, (57, 255, 20))
new_high_score_pos = new_high_score_text.get_rect(center = (gameview.get_width() / 2, gameview.get_height() * 3 / 4))
gameview.blit(new_high_score_text, new_high_score_pos)
# Save the new high score
save_data('high_score.json', {"high_score": score})
sprites_group.draw(gameview)
scaled_gameview = pygame.transform.scale(gameview, (screen.get_width(), screen.get_height()))
screen.blit(scaled_gameview, (0, 0))
pygame.display.update()
if __name__ == '__main__':
main() | tonypham04/Robot-Hand-Asteroid-Smasher | game.py | game.py | py | 13,169 | python | en | code | 0 | github-code | 13 |
25229335373 | import random
from random import seed
import math
random.seed(1)
# ----------------- STEP 1: -----------------
# Network initialisation:
def makeNetwork(inputs,neuronsCount,output):
hiddenLayer = [{'weights':[random.random() for i in range(inputs+1)]} for i in range(neuronsCount)]
outputLayer = [{'weights':[random.random() for i in range(neuronsCount+1)]} for i in range(output)]
neuralNetwork = []
neuralNetwork.append(hiddenLayer)
neuralNetwork.append(outputLayer)
return neuralNetwork
# ----------------- STEP 2: -----------------
# Forward Propogation
# - Neuron Activation
# - Neuron Transfer
# - Neuron Propagation
# - Neuron Activation. Formula, activation = Σ(weight*input)+bias.
def activate(weights, inputs):
# bias
activation = weights[-1]
# weight*input
for i in range(len(weights)-1):
activation += weights[i] * inputs[i]
return activation
# - Neuron Transfer. Here, sigmoid function.
def transfer(activation):
return 1.0 / (1 + math.exp(-activation))
# - Neuron Propagation
def forwardPropagate(neuralNetwork, dataRow):
inputs = dataRow
for layer in neuralNetwork:
inputForNextLayer = []
for neuron in layer:
activation = activate(neuron['weights'], inputs)
neuron['output'] = transfer(activation)
inputForNextLayer.append(neuron['output'])
inputs = inputForNextLayer
return inputs
# ----------------- STEP 3: -----------------
# Back Propagation
# - Transfer Derivative
# - Error Backpropagation
# - Transfer Derivative OR simply derivative(sigmoid)
def transferDerivative(output):
return output*(1.0-output)
# - Error Backpropagation
# - Error for each neuron in outlayer: error = (expected - output) * transferDerivative(output)
# - Error for neurons in hidden layer: error = (weight-i * error-j) * transferDerivative(output)
def backPropagateError(neuralNetwork, expected):
for i in range(len(neuralNetwork)-1,-1,-1):
currentLayer = neuralNetwork[i]
errors = []
if i == len(neuralNetwork)-1:
for j in range(len(currentLayer)):
neuron = currentLayer[j]
errors.append(expected[j] - neuron['output'])
else:
for j in range(len(currentLayer)):
error = 0
for neuron in neuralNetwork[i + 1]:
error += (neuron['weights'][j] * neuron['delta'])
errors.append(error)
for j in range(len(currentLayer)):
neuron = currentLayer[j]
neuron['delta'] = errors[j] * transferDerivative(neuron['output'])
# ----------------- STEP 4: -----------------
# Training the network
# - Update Weights
# - Train Netwwork
# weightNew = weightOld + learning_rate * error * input
# - Update Weights
def updateWts(neuralNetwork, dataRow, learningRate):
for i in range(len(neuralNetwork)):
inputs = dataRow[:-1]
if i != 0:
inputs = [neuron['output'] for neuron in neuralNetwork[i - 1]]
for neuron in neuralNetwork[i]:
for j in range(len(inputs)):
neuron['weights'][j] += learningRate * neuron['delta'] * inputs[j]
neuron['weights'][-1] += learningRate * neuron['delta']
# - Train Netwwork
def trainTheNetwork(neuralNetwork,trainData,learningRate,epochs,outputsCount):
print('---> learning rate=%.4f' % learningRate)
for epoch in range(epochs):
totalError = 0
for eachRow in trainData:
outputs = forwardPropagate(neuralNetwork,eachRow)
# encode data
expectedValues = [0 for i in range(outputsCount)]
expectedValues[eachRow[-1]]=1
# error calculation for one row of data
totalError+=sum([(expectedValues[i]-outputs[i])**2 for i in range(len(expectedValues))])
# backpropagate and update weights
backPropagateError(neuralNetwork,expectedValues)
updateWts(neuralNetwork,eachRow,learningRate)
print('---> epoch=%d, error=%.4f' % (epoch, totalError))
# ----------------- STEP 5: -----------------
# Predictions
def predict(neuralNetwork, row):
outputs = forwardPropagate(neuralNetwork, row)
# select the class with max probability or in other words arg max function
return outputs.index(max(outputs))
from csv import reader
# ----------------- STEP 6: -----------------
# Testing our algorithm with a dataset:
# load CSV
def loadCSVfile(filename):
data = []
with open(filename,'r') as file:
csv_reader = reader(file)
for eachRow in csv_reader:
if not eachRow:
continue
else:
data.append(eachRow)
return data
# string to float
def strColumnToFloat(data,column):
for eachRow in data:
eachRow[column] = float(eachRow[column].strip())
# string to int
def strColumnToInt(dataset, column):
class_values = [row[column] for row in dataset]
unique = set(class_values)
dic = {}
for i, value in enumerate(unique):
dic[value] = i
for row in dataset:
row[column] = dic[row[column]]
return dic
# min max stats from data
def minMaxFromData(data):
stats = [[min(column), max(column)] for column in zip(*data)]
return stats
# normalizeData
def normalizeDataset(dataset, minmax):
for row in dataset:
for i in range(len(row)-1):
row[i] = (row[i] - minmax[i][0]) / (minmax[i][1] - minmax[i][0])
# k-fold cross validation
def crossValidation(data,folds):
dataSplit = []
dataCopy = list(data)
foldSize = int(len(data)/folds)
for _ in range(folds):
fold = []
while len(fold)<foldSize:
index = random.randrange(len(dataCopy))
fold.append(dataCopy.pop(index))
dataSplit.append(fold)
return dataSplit
# accuracy score
def accuracyMetric(actual, predicted):
correct = 0
for i in range(len(actual)):
if actual[i] == predicted[i]:
correct += 1
return correct / float(len(actual)) * 100
# run algo using cross validation
def runAlgorithm(data,algorithm,foldCount,*args):
folds = crossValidation(data, foldCount)
scores = []
for fold in folds:
train_set = list(folds)
train_set.remove(fold)
train_set = sum(train_set, [])
test_set = []
for row in fold:
row_copy = list(row)
test_set.append(row_copy)
row_copy[-1] = None
predicted = algorithm(train_set, test_set, *args)
actual = [row[-1] for row in fold]
accuracy = accuracyMetric(actual, predicted)
scores.append(accuracy)
return scores
# Wrapper function of neural network
def backPropagation(train, test, learningRate, numberOfEpochs, numberOfHiddenLayers):
numberOfInputs = len(train[0]) - 1
numberOfOutputs = len(set([row[-1] for row in train]))
neuralNetwork = makeNetwork(numberOfInputs, numberOfHiddenLayers, numberOfOutputs)
trainTheNetwork(neuralNetwork, train, learningRate, numberOfEpochs, numberOfOutputs)
predictions = []
for row in test:
prediction = predict(neuralNetwork, row)
predictions.append(prediction)
return predictions
# loading the CSV
filename = 'seeds_dataset.csv'
dataset = loadCSVfile(filename)
# convert some colums to floats
for i in range(len(dataset[0])-1):
strColumnToFloat(dataset, i)
# convert some columns to integers
strColumnToInt(dataset, len(dataset[0])-1)
# normalize the data
minmax = minMaxFromData(dataset)
normalizeDataset(dataset, minmax)
# evaluate algorithm
numberOfFolds = 6
learningRate = 0.3
numberOfEpochs = 500
numberOfHiddenLayers = 5
scores = runAlgorithm(dataset, backPropagation, numberOfFolds, learningRate, numberOfEpochs, numberOfHiddenLayers)
print('Scores List: %s' % scores)
print('Accuracy mean: %.4f%%' % (sum(scores)/float(len(scores))))
| yasharma2301/NeuralNetwork_From_Scratch | neural.py | neural.py | py | 7,752 | python | en | code | 0 | github-code | 13 |
22788031061 | class Solution(object):
def findContinuousSequence(self, target):
length = target // 2 + 1
print(length)
i = 1
j = 1
result = []
temp_sum = 0
while i <= j and j <= length:
while j <= length and temp_sum < target:
temp_sum += j
j += 1
if temp_sum == target:
result.append([num for num in range(i, j)])
temp_sum -= i
i += 1
# print(i, j, temp_sum)
while i < j and temp_sum > target:
temp_sum -= i
i += 1
if temp_sum == target:
result.append([num for num in range(i, j)])
temp_sum -= i
i += 1
# print(i, j, temp_sum)
return result
solution = Solution()
print(solution.findContinuousSequence(9))
| lmb633/leetcode | 57findContinuousSequence.py | 57findContinuousSequence.py | py | 924 | python | en | code | 0 | github-code | 13 |
40734928378 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, json
from datetime import datetime
_timeString = datetime.today().strftime("%y%m%d%H%M%S")
rootDirectory = os.getcwd()
#rootDirectory = os.path.dirname(rootDirectory)
dataDirectory = os.path.join(rootDirectory, "Data")
readingListDirectory = os.path.join(rootDirectory, "ReadingLists")
resultsDirectory = os.path.join(rootDirectory, "Results")
outputDirectory = os.path.join(rootDirectory, "Output")
jsonOutputDirectory = os.path.join(outputDirectory, "JSON")
cblOutputDirectory = os.path.join(outputDirectory, "CBL")
dataFile = os.path.join(dataDirectory, "data.db")
cvCacheFile = os.path.join(dataDirectory, "cv.db")
overridesFile = os.path.join(dataDirectory,'SeriesOverrides.json')
configFile = os.path.join(rootDirectory, 'config.ini')
resultsFile = os.path.join(resultsDirectory, "results-%s.txt" % (_timeString))
problemsFile = os.path.join(resultsDirectory, "problems-%s.txt" % (_timeString))
seriesFile = os.path.join(resultsDirectory, "series-%s.txt" % (_timeString))
def checkDirectories():
directories = [
dataDirectory,
readingListDirectory,
resultsDirectory,
outputDirectory,
jsonOutputDirectory,
cblOutputDirectory
]
for directory in directories:
if not os.path.exists(directory):
os.makedirs(directory)
checkDirectories()
def getJSONData(jsonFile : str) -> dict :
jsonData = None
if fileExists(jsonFile):
altSeriesDataFile = open(jsonFile,'r')
jsonData = json.load(altSeriesDataFile)
return jsonData
def fileExists(file : str) -> bool:
if file is not None and os.path.exists(file):
return True
return False
def checkFilePath(string):
folder = os.path.dirname(string)
if not os.path.exists(folder):
os.makedirs(folder)
def checkFolderPath(string):
if not os.path.exists(string):
os.makedirs(string)
| themadman0980/ReadingListManager | readinglistmanager/filemanager.py | filemanager.py | py | 1,955 | python | en | code | 1 | github-code | 13 |
6021059507 | #!/usr/bin/python3
# Write a function that computes the square value of all integers of a matrix.
def square_matrix_simple(matrix=[]):
# new list
new = []
# square each element
for i in matrix:
new.append([x**2 for x in i])
# return new list
return new
| johnkoye19/alx-higher_level_programming | 0x04-python-more_data_structures/0-square_matrix_simple.py | 0-square_matrix_simple.py | py | 287 | python | en | code | 1 | github-code | 13 |
23178428264 | # coding=UTF-8
__author__ = 'wangtao'
import unittest
import os
import glob
from appium import webdriver
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p))
success = True
class XcfAndroidTests(unittest.TestCase):
def setUp(self):
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.0'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['appPackage'] = 'com.xiachufang'
desired_caps['appActivity'] = '.activity.StartPageActivity'
desired_caps['app'] = PATH('/Users/wangtao/xcf/testapk/xcf_testapk.apk')
self.driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
loginPhoneNum = '15901549596'
loginPassword = '123456'
def tearDown(self):
# end the session
self.driver.quit()
def phoneNumLoginSuccess(self):
ff = self.driver.find_element_by_id('tab_widget_content_self')
if (ff != ''):
ff.click()
else:
ff = self.driver.find_element_by_id('re_login_close')
ff.click()
ff = self.driver.find_element_by_id('tab_widget_content_self')
ff.click()
ff = self.driver.find_element_by_id('entrance_login')
ff.click()
| taozitao/UIautomatorForXCF | loginDemo/__init__.py | __init__.py | py | 1,275 | python | en | code | 0 | github-code | 13 |
10635751623 | # CTI-110
# M3HW2 - Software Sales
# Juan Santiago
# 9-21-17
#
#A software company sells a package that retails for $99.
#They offer bulk discounts for volume purchases
#(for example, buying many copies to install in a college classroom).
#The discounts are as follows:
#Quantity 10-19: 10% discount
#Quantity 20-49: 20% discount
#Quantity 50-99: 30% discount
#Quantity 100+: 40% discount
userNumberOfPackages = int( input( "Please enter the number of packages brought: " ) )
packagePrice = 99
if userNumberOfPackages < 10:
discount = 0;
elif userNumberOfPackages < 20:
discount = 0.10
elif userNumberOfPackages < 50:
discount = 0.20
elif userNumberOfPackages < 100:
discount = 0.30
else:
discount = 0.40
subTotal = userNumberOfPackages * packagePrice
discountAmount = discount * subTotal
total = subTotal - discountAmount
print( "\nAmount of discount: $" + format( discountAmount, ",.2f" ) + \
"\nTotal amount: $" + format( total, ",.2f" ) )
| JSantiago2007/cti110 | M3HW2_SoftwareSales_Santiago.py | M3HW2_SoftwareSales_Santiago.py | py | 1,016 | python | en | code | 0 | github-code | 13 |
18392043824 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 21 13:49:06 2019
@author: rajat
"""
# import the modules used
import cv2
import numpy as np
import scipy.io as scsio
import matplotlib.pyplot as plt
from skimage.transform import AffineTransform
from skimage.measure import ransac
# load the position data from mat file
def load_pos_data(filename, loadRedPos):
data = scsio.loadmat(filename)
posX = data['pos_x'][0]
posY = data['pos_y'][0]
if loadRedPos and 'red_x' in data.keys() and 'red_y' in data.keys():
redX = data['red_x'][0]
redY = data['red_y'][0]
return data, redX, redY, posX, posY
else:
return data, posX, posY, np.nan, np.nan
# function to get length matched
def get_length_adjusted(posX1, posY1, posX2, posY2):
max_length = max(len(posX1),len(posX2))
if len(posX1)==max_length:
posX2 = np.append(posX2, [-1]*(max_length - len(posX2)))
posY2 = np.append(posY2, [-1]*(max_length - len(posY2)))
else:
posX1 = np.append(posX1, [-1]*(max_length - len(posX1)))
posY1 = np.append(posY1, [-1]*(max_length - len(posY1)))
posX1 = np.array(posX1, dtype=np.float32)
posY1 = np.array(posY1, dtype=np.float32)
posX2 = np.array(posX2, dtype=np.float32)
posY2 = np.array(posY2, dtype=np.float32)
return posX1, posY1, posX2, posY2
# find intersecting coordinates between 2 camera position
def find_intersect_coords(posX1, posY1, posX2, posY2):
# find lengt adjusted coords
posX1, posY1, posX2, posY2 = get_length_adjusted(posX1, posY1, posX2, posY2)
# return common indices
indices = np.where((posX1!=-1) & (posY1!=-1) & (posX2!=-1) & (posY2!=-1))[0]
# merged dictionary
data = {}
data['posX1'] = posX1
data['posY1'] = posY1
data['posX2'] = posX2
data['posY2'] = posY2
data['common_ind'] = indices
data['posX1_c'] = posX1[indices]
data['posY1_c'] = posY1[indices]
data['posX2_c'] = posX2[indices]
data['posY2_c'] = posY2[indices]
# return the length adjusted position and indices
return data
# function to create compatible array
def create_compatible_array(posX, posY):
pts_ = []
for x1,y1 in zip(posX, posY):
pts_.append([x1,y1])
# convert them to float32 datatype
return np.array(pts_, dtype=np.float32)
# funtions to generate the arrays to calculate homography
def create_src_dst_points(srcptX, srcptY, dstptX, dstptY):
pts_src = create_compatible_array(srcptX, srcptY)
pts_dst = create_compatible_array(dstptX, dstptY)
return pts_src, pts_dst
# function to find inliers using ransac algorithm
def find_inliers(src, dst):
# affine transform
model = AffineTransform()
model.estimate(src, dst)
# robustly estimate affine transform model with RANSAC
model_robust, inliers = ransac((src, dst), AffineTransform, min_samples=3,residual_threshold=2, max_trials=100)
return inliers
# get perspective transformed data
def get_transformed_coords(c1_posX, c1_posY, c2_posX, c2_posY, unocc_c1, unocc_c2):
# find preprocessed and intersecting coordinates bw 2 cameras
intersect_coords = find_intersect_coords(c1_posX, c1_posY, c2_posX, c2_posY)
c1_posX_c = intersect_coords['posX1_c']
c1_posY_c = intersect_coords['posY1_c']
c2_posX_c = intersect_coords['posX2_c']
c2_posY_c = intersect_coords['posY2_c']
# change -1 to nan
c1_posX = intersect_coords['posX1']
c1_posY = intersect_coords['posY1']
c2_posX = intersect_coords['posX2']
c2_posY = intersect_coords['posY2']
c1_posX[c1_posX==-1] = np.nan
c1_posY[c1_posY==-1] = np.nan
c2_posX[c2_posX==-1] = np.nan
c2_posY[c2_posY==-1] = np.nan
# create the compatible pos coordinates data format for a single camera that needs to be transformed
c1_coords = create_compatible_array(c1_posX, c1_posY)
# create source and destination use to calculate homography
c1_coords_c, c2_coords_c = create_src_dst_points(c1_posX_c, c1_posY_c, c2_posX_c, c2_posY_c)
# get inliers using ransac algorithm
inliers = find_inliers(c1_coords_c, c2_coords_c)
intersect_coords['inliers'] = inliers
c1_coords_c = c1_coords_c[inliers]
c2_coords_c = c2_coords_c[inliers]
# find the transformed coordinates
hg, status = cv2.findHomography(c1_coords_c, c2_coords_c)
# perform perspective transformation
c1_coords_t = cv2.perspectiveTransform(np.array([c1_coords]), hg)[0]
# transformed points
c1_posX_t = c1_coords_t[:,0]
c1_posY_t = c1_coords_t[:,1]
# change unoccupied pixels to nan
c1_posX_t[unocc_c1] = np.nan
c1_posY_t[unocc_c1] = np.nan
c2_posX[unocc_c2] = np.nan
c2_posY[unocc_c2] = np.nan
# return the homography, transformed coordinates
return intersect_coords, hg, c1_posX_t, c1_posY_t, c2_posX, c2_posY
# funtion to get 2 camera merged positions
def get_merged_pos_2cams(camA_posX, camA_posY, camB_posX, camB_posY):
# find unoccupied pixels
unocc_cA = np.where(camA_posX==-1)[0]
unocc_cB = np.where(camB_posX==-1)[0]
# find the homography and transformed coordinates
common_coords_cAcB, hg_cAcB, camA_posX_t, camA_posY_t, camB_posX, camB_posY \
= get_transformed_coords(camA_posX, camA_posY, camB_posX, camB_posY, unocc_cA, unocc_cB)
merged_cam_posX = np.nanmean(np.transpose(np.vstack((camA_posX_t, camB_posX))), axis=1)
merged_cam_posY = np.nanmean(np.transpose(np.vstack((camA_posY_t, camB_posY))), axis=1)
# set nan to -1 (not a good coding practice)
merged_cam_posX[np.isnan(merged_cam_posX)] = -1.
merged_cam_posY[np.isnan(merged_cam_posY)] = -1.
# return the merged cam position
return common_coords_cAcB, hg_cAcB, camA_posX_t, camA_posY_t, camB_posX, camB_posY, merged_cam_posX, merged_cam_posY
# all camera posiiton filename
cam1_pos_filename = 'cam1_Pos.mat'
cam2_pos_filename = 'cam2_Pos.mat'
cam3_pos_filename = 'cam3_Pos.mat'
cam4_pos_filename = 'cam4_Pos.mat'
# load the position data for each camera
_, cam1_posX, cam1_posY, _, _ = load_pos_data(cam1_pos_filename, True)
_, cam2_posX, cam2_posY, _, _ = load_pos_data(cam2_pos_filename, False)
_, cam3_posX, cam3_posY, _, _ = load_pos_data(cam3_pos_filename, False)
_, cam4_posX, cam4_posY, _, _ = load_pos_data(cam4_pos_filename, False)
# get the trasnformed data, homography matrix and matching vertices
common_coords_c2c3, hg_c2c3, cam2_posX_t, cam2_posY_t, cam3_posX, cam3_posY, merged_cam23_posX,\
merged_cam23_posY = get_merged_pos_2cams(cam2_posX, cam2_posY, cam3_posX, cam3_posY)
common_coords_c1c2c3, hg_c1c2c3, cam1_posX_t, cam1_posY_t, merged_cam23_posX, merged_cam23_posY,\
merged_cam123_posX, merged_cam123_posY = get_merged_pos_2cams(cam1_posX, cam1_posY, merged_cam23_posX, merged_cam23_posY)
common_coords_c1c2c3c4, hg_c1c2c3c4, cam4_posX_t, cam4_posY_t, merged_cam123_posX, merged_cam123_posY,\
merged_cam1234_posX, merged_cam1234_posY = get_merged_pos_2cams(cam4_posX, cam4_posY, merged_cam123_posX, merged_cam123_posY)
# hold the homogrpahy dictionary
hg = {}
hg['cam2cam3'] = hg_c2c3
hg['cam1cam2cam3'] = hg_c1c2c3
hg['cam1cam2cam3cam4'] = hg_c1c2c3c4
# hold the common coordinate information
common_coords = {}
common_coords['cam2cam3'] = common_coords_c2c3
common_coords['cam1cam2cam3'] = common_coords_c1c2c3
common_coords['cam1cam2cam3cam4'] = common_coords_c1c2c3c4
# dict to hold the transformed coordinates
transformed_coords = {}
transformed_coords['cam1_posX'] = cam1_posX_t
transformed_coords['cam1_posY'] = cam1_posY_t
transformed_coords['cam2_posX'] = cam2_posX_t
transformed_coords['cam2_posY'] = cam2_posY_t
transformed_coords['cam4_posX'] = cam4_posX_t
transformed_coords['cam4_posY'] = cam4_posY_t
# dictionary to hold the merged coordinates
merged_coords = {}
merged_coords['cam2_cam3_posX'] = merged_cam23_posX
merged_coords['cam2_cam3_posY'] = merged_cam23_posY
merged_coords['cam1_cam2_cam3_posX'] = merged_cam123_posX
merged_coords['cam1_cam2_cam3_posY'] = merged_cam123_posY
merged_coords['cam1_cam2_cam3_cam4_posX'] = merged_cam1234_posX
merged_coords['cam1_cam2_cam3_cam4_posY'] = merged_cam1234_posY
# plot the processed data
plt.figure(1)
plt.scatter(cam3_posX, cam3_posY, s=1)
plt.scatter(cam2_posX_t, cam2_posY_t, s=1)
plt.scatter(merged_cam23_posX, merged_cam23_posY, s=1)
plt.title('cam2 and cam3')
plt.show()
plt.figure(2)
plt.scatter(merged_cam23_posX, merged_cam23_posY, s=1)
plt.scatter(cam1_posX_t, cam1_posY_t, s=1)
plt.scatter(merged_cam123_posX, merged_cam123_posY, s=1)
plt.title('cam1, cam2 and cam3')
plt.show()
plt.figure(3)
plt.scatter(merged_cam123_posX, merged_cam123_posY, s=1)
plt.scatter(cam4_posX_t, cam4_posY_t, s=1)
plt.scatter(merged_cam1234_posX, merged_cam1234_posY, s=1)
plt.title('cam1, cam2, cam3 and cam4')
plt.show()
plt.figure(4)
plt.scatter(cam1_posX_t, cam1_posY_t, s=1)
plt.scatter(cam2_posX_t, cam2_posY_t, s=1)
plt.scatter(cam3_posX, cam3_posY, s=1)
plt.scatter(cam4_posX_t, cam4_posY_t, s=1)
plt.title('cam1, cam2, cam3 and cam4')
plt.show()
del hg_c2c3, hg_c1c2c3, hg_c1c2c3c4
del common_coords_c2c3, common_coords_c1c2c3, common_coords_c1c2c3c4
del cam1_posX_t, cam1_posY_t, cam2_posX_t, cam2_posY_t, cam4_posX_t, cam4_posY_t
del merged_cam23_posX, merged_cam23_posY, merged_cam123_posX, merged_cam123_posY
# all camera posiiton filename
cam5_pos_filename = 'cam5_Pos.mat'
cam6_pos_filename = 'cam6_Pos.mat'
cam7_pos_filename = 'cam7_Pos.mat'
cam8_pos_filename = 'cam8_Pos.mat'
# load the position data for each camera
_, cam5_posX, cam5_posY, _, _ = load_pos_data(cam5_pos_filename, True)
_, cam6_posX, cam6_posY, _, _ = load_pos_data(cam6_pos_filename, False)
_, cam7_posX, cam7_posY, _, _ = load_pos_data(cam7_pos_filename, False)
_, cam8_posX, cam8_posY, _, _ = load_pos_data(cam8_pos_filename, False)
# get the trasnformed data, homography matrix and matching vertices
common_coords_c6c7, hg_c6c7, cam6_posX_t, cam6_posY_t, cam7_posX, \
cam7_posY, merged_cam67_posX, merged_cam67_posY = get_merged_pos_2cams(cam6_posX, cam6_posY, cam7_posX, cam7_posY)
common_coords_c5c6c7, hg_c5c6c7, cam5_posX_t, cam5_posY_t, merged_cam67_posX,\
merged_cam67_posY, merged_cam567_posX, merged_cam567_posY = get_merged_pos_2cams(cam5_posX, cam5_posY, merged_cam67_posX, merged_cam67_posY)
common_coords_c5c6c7c8, hg_c5c6c7c8, cam8_posX_t, cam8_posY_t, merged_cam567_posX,\
merged_cam567_posY, merged_cam5678_posX, merged_cam5678_posY = get_merged_pos_2cams(cam8_posX, cam8_posY, merged_cam567_posX, merged_cam567_posY)
# hold the homogrpahy dictionary
hg['cam6cam7'] = hg_c6c7
hg['cam5cam6cam7'] = hg_c5c6c7
hg['cam5cam6cam7cam8'] = hg_c5c6c7c8
# hold the common coordinate information
common_coords['cam6cam7'] = common_coords_c6c7
common_coords['cam5cam6cam7'] = common_coords_c5c6c7
common_coords['cam5cam6cam7cam8'] = common_coords_c5c6c7c8
# dict to hold the transformed coordinates
transformed_coords['cam5_posX'] = cam5_posX_t
transformed_coords['cam5_posY'] = cam5_posY_t
transformed_coords['cam6_posX'] = cam6_posX_t
transformed_coords['cam6_posY'] = cam6_posY_t
transformed_coords['cam8_posX'] = cam8_posX_t
transformed_coords['cam8_posY'] = cam8_posY_t
# dictionary to hold the merged coordinates
merged_coords['cam6_cam7_posX'] = merged_cam67_posX
merged_coords['cam6_cam7_posY'] = merged_cam67_posY
merged_coords['cam5_cam6_cam7_posX'] = merged_cam567_posX
merged_coords['cam5_cam6_cam7_posY'] = merged_cam567_posY
merged_coords['cam5_cam6_cam7_cam8_posX'] = merged_cam5678_posX
merged_coords['cam5_cam6_cam7_cam8_posY'] = merged_cam5678_posY
# plot the processed data
plt.figure(5)
plt.scatter(cam7_posX, cam7_posY, s=1)
plt.scatter(cam6_posX_t, cam6_posY_t, s=1)
plt.scatter(merged_cam67_posX, merged_cam67_posY, s=1)
plt.title('cam6 and cam7')
plt.show()
plt.figure(6)
plt.scatter(merged_cam67_posX, merged_cam67_posY, s=1)
plt.scatter(cam5_posX_t, cam5_posY_t, s=1)
plt.scatter(merged_cam567_posX, merged_cam567_posY, s=1)
plt.title('cam5, cam6 and cam7')
plt.show()
plt.figure(7)
plt.scatter(merged_cam567_posX, merged_cam567_posY, s=1)
plt.scatter(cam8_posX_t, cam8_posY_t, s=1)
plt.scatter(merged_cam5678_posX, merged_cam5678_posY, s=1)
plt.title('cam5, cam6, cam7 and cam8')
plt.show()
plt.figure(8)
plt.scatter(cam5_posX_t, cam5_posY_t, s=1)
plt.scatter(cam6_posX_t, cam6_posY_t, s=1)
plt.scatter(cam7_posX, cam7_posY, s=1)
plt.scatter(cam8_posX_t, cam8_posY_t, s=1)
plt.title('cam5, cam6, cam7 and cam8 transformed')
plt.show()
del hg_c6c7, hg_c5c6c7, hg_c5c6c7c8
del common_coords_c6c7, common_coords_c5c6c7, common_coords_c5c6c7c8
del cam5_posX_t, cam5_posY_t, cam6_posX_t, cam6_posY_t, cam8_posX_t, cam8_posY_t
del merged_cam67_posX, merged_cam67_posY, merged_cam567_posX, merged_cam567_posY
# run stitching on merged cam1234 and merged cam5678 to merge all the cameras
common_coords_allcams, hg_allcams, merged_cam1234_posX_t, merged_cam1234_posY_t, \
merged_cam5678_posX, merged_cam5678_posY, merged_allcams_posX, merged_allcams_posY = \
get_merged_pos_2cams(merged_cam1234_posX, merged_cam1234_posY, merged_cam5678_posX, merged_cam5678_posY)
# homography between left vs right half
hg['cam14cam58'] = hg_allcams
# hold the common coordinate information
common_coords['cam14cam58'] = common_coords_allcams
# dictionary to hold the merged coordinates
merged_coords['cam1234_posX_t'] = merged_cam1234_posX_t
merged_coords['cam1234_posY_t'] = merged_cam1234_posY_t
merged_coords['cam5678_posX'] = merged_cam5678_posX
merged_coords['cam5678_posY'] = merged_cam5678_posY
merged_coords['cam14cam58_posX'] = merged_allcams_posX
merged_coords['cam14cam58_posY'] = merged_allcams_posY
# plot all the merged camera
plt.figure(9)
plt.scatter(merged_cam1234_posX_t, merged_cam1234_posY_t, s=1)
plt.scatter(merged_cam5678_posX, merged_cam5678_posY, s=1)
plt.scatter(merged_allcams_posX, merged_allcams_posY, s=1)
plt.title('All cameras merged')
plt.show()
# save the data
np.save('homography.npy', hg)
np.save('common_coords.npy', common_coords)
np.save('transformed_coords.npy', transformed_coords)
np.save('merged_coords.npy', merged_coords)
| rajatsaxena/MultiCameraPositionAlignment | mcpa.py | mcpa.py | py | 14,095 | python | en | code | 1 | github-code | 13 |
33121620336 |
import numpy as np
import scipy.linalg as linalg
from tqdm import tqdm
from numba import njit
# @njit
def ftcs(N, M, T, S_0, S_max, K, r, sigma, optimal_delta=True):
'''
N -> Number of time steps
M -> Number of grid spaces
S_max -> Maximum stock price
'''
S_max = 2 * S_0
if optimal_delta:
dt = 0.0005
N = int(T/dt)
else:
# print(N )
dt = T / N # Time step
dS = S_max / M # Space step
# print(dt)
# S0 [i = 1]
# S [i = 2]
# Smax [i = M]
all_i = np.arange(1, M)
all_j = np.arange(N)
# M+1 equally spaced values of S, from 0
all_S = np.linspace(0, S_max, M+1)
# Populate the grid with the the values
grid = np.zeros(shape=(M+1, N+1))
grid[:, -1] = np.maximum(all_S - K, 0)
# Greek Arrays
alpha = (0.25 * dt * (sigma**2 * all_i**2 - r * all_i))*2
beta = (-0.5 * dt * (sigma**2 * all_i**2 + r))*2
gamma = (0.25 * dt * (sigma**2 * all_i**2 + r * all_i))*2
# A matrix
A = np.diag(alpha[1:], -1) + np.diag(1 + beta) + np.diag(gamma[:-1], 1)
# Side boundary conditionS
A[0, 0] += 2*alpha[0]
A[0, 1] -= alpha[0]
A[-1, -1] += 2*gamma[-1]
A[-1, -2] -= gamma[-1]
# Terminal Condition (call option)
grid[:, -1] = np.maximum(all_S - K, 0)
# Iterate over the grid
for j in reversed(all_j):
old_grid = grid.copy()
grid[1:-1, j] = np.dot(A, grid[1:-1, j+1])
grid[0, j] = 2 * grid[1, j] - grid[2, j]
grid[-1, j] = 2 * grid[-2, j] - grid[-3, j]
if np.isnan(grid[:, 0][int(len(grid)/2)]):
print("Abort")
# option_value = old_grid[:, 0][int(len(grid)/2)]
option_value = np.interp(S_0, all_S, grid[:, 0])
print(f"Estimated option value: {option_value}")
return old_grid, option_value
# option_value = grid[:, 0][int(len(grid)/2)]
option_value = np.interp(S_0, all_S, grid[:, 0])
# print(f"Estimated option value: {option_value}")
return grid, option_value
# grid, value = ftcs(1000, 100, 5/12, 50, 100, 50, 0.06, 0.4)
# grid, value = ftcs(1000, 1000, 1, 50, 200, 50, 0.04, 0.3)
| DaanMoll/ComputationalFinance | assignment3/plots/ftcs_mat.py | ftcs_mat.py | py | 2,232 | python | en | code | 0 | github-code | 13 |
23804444876 | """
My MQTT library for ease of deploying HA agents.
The following entries are expected in the config dictionary:
[main]
mqttServer
mqttPort
mqttUser
mqttPass
mqttSet
mqttState
mqttId
"""
# TODO: augment LWT with interrupt handler to call specified function
import socket, time
import simplejson as json
import paho.mqtt.publish as mqtt
import paho.mqtt.client as mqttclient
import lib.mystat as mystat
class mymqtt:
"""
MyMQTT provides an MQTT object that will provide a persistent connection
and handle dispatching callbacks on received messages.
It also supports multiple subscriptions.
"""
def do_connect(self):
"""
Try to connect...and keep trying in the event of a disconnect.
"""
while True:
try:
self.mqttc.connect(self.mqtt_serv, self.mqtt_port)
break
except:
time.sleep(15)
return
def on_connect(self, client, userdata, flags, rc):
"""
When we do connect, subscribe to the primary topic.
Also, publish the LWT to let folks know we alive.
"""
client.subscribe(self.set_topic)
self.publish(topic=self.lwt_topic, payload='Online', retain=True)
return
def on_message(self, client, userdata, message):
"""
Hello? Pass the message to the recipient.
"""
userdata.on_message(self, message)
return
def add_sub(self, topic):
""" Fancy clients may need to listen to secondary topics. """
self.mqttc.subscribe(topic)
return
def publish(self, topic='None', payload='None', fmt='plain', retain=False, qos=1):
""" Take a payload and publishes to defined MQTT topic.
Format can be 'plain' or 'json'. If json, payload should be a dict.
"""
if fmt == 'plain':
# must go out as string
payload = str(payload)
else:
payload = json.dumps(payload)
# don't fail if we try to publish but are not connected
try:
self.mqttc.publish(topic=topic, payload=payload,
qos=qos, retain=retain)
except:
pass
return
def update(self, payload='None', fmt='plain', qos=1, retain=False):
""" Publish an update to the primary topic. """
self.publish(topic=self.state_topic, payload=payload,
fmt=fmt, qos=qos, retain=retain)
return
def loop_forever(self):
""" You have seen Primer(2004), correct?
If the loop does stop, kill the stats thread.
"""
try:
self.mqttc.loop_forever()
except:
self.stats.stop()
return
def loop_start(self):
""" t=0 """
self.mqttc.loop_start()
return
def loop_stop(self):
""" Uh-oh. If it is the end, let's make sure we are all dead. """
self.mqttc.loop_stop()
self.stats.stop()
return
def __init__(self, config, userdata=None):
""" This is how we start an IOT party.
- find our client_id
- generate our topic strings based on the id
- set instance variables
- pass userdata object handlers
- configure LWT
- kick off MQTT start-up
- start a stats thread to post MQTT stats messages about this device
"""
self.client_id = socket.gethostname()
self.mqttc = mqttclient.Client(self.client_id)
self.mqttc.disable_logger()
self.set_topic = config['main']['mqttSet'].replace('CID', self.client_id)
self.state_topic = config['main']['mqttState'].replace('CID', self.client_id)
self.lwt_topic = "ha/sbc/" + self.client_id + "/LWT"
self.mqtt_user = config['main']['mqttUser']
self.mqtt_pass = config['main']['mqttPass']
self.mqtt_serv = config['main']['mqttServer']
self.mqtt_port = int(config['main']['mqttPort'])
self.mqttc.username_pw_set(self.mqtt_user, self.mqtt_pass)
# pass the userdata object to handlers
self.mqttc.user_data_set(userdata)
# set will to be executed if we disconnect prematurely
self.mqttc.will_set(self.lwt_topic, 'Offline', retain=True)
self.mqttc.on_connect = self.on_connect
self.mqttc.on_message = self.on_message
self.do_connect()
if (config['main'].getboolean('stats', fallback=True)):
self.stats = mystat.mystat(self)
return
| jerobins/dmx-uplights | bin/lib/mymqtt.py | mymqtt.py | py | 4,360 | python | en | code | 0 | github-code | 13 |
6767243788 | #wainwright
import matplotlib.pyplot as plt
import numpy as np
import re
import retrace_path
import create_maze
import path_finding_old
#use haversine distance
def calculateDistance(long1,long2,lat1,lat2):
dlo=long1-long2
dla=lat1-lat2
R=6371e3 #metres
a=np.power(np.sin(0.5*dla),2)+np.cos(lat1)*np.cos(lat2)*np.power(np.sin(0.5*dlo),2)
c=2*np.arctan2(np.sqrt(a),np.sqrt(1-a))
d=R*c
return d
#return distance
def read_route(File):
X=[]
Y=[]
times=[]
with open(File) as file:
n=-1
for line in file: #read csv and split
n+=1
split=line.split(',')
if n==0:
X.append('Start')
else:
X.append(split[0])
Y.append(split[2])
for i in range (0,len(Y)):
s1 = re.findall(r'[0-9]*h',Y[i]) #take hrs
S1=pow(60,2)*float(s1[0].replace('h',''))#seconds
s2 = re.findall(r'[0-9]*\:',Y[i]) #take mins
S2=pow(60,1)*float(s2[0].replace(':',''))#seconds
s3 = re.findall(r'\:[0-9]*',Y[i])#take seconds
S3=pow(60,0)*float(s3[0].replace(':',''))#seconds
S=S1+S2+S3 #sum to get seconds
times.append(S)
total_distance=643302.7844925951 #metres
V=total_distance/np.sum(times)
return V,times,X,Y
def read_peaks(File):
with open(File) as file:
places=[]
long=[]
lat=[]
H=[]
n=0
for line in file:
n+=1
if n!=1:
s=line.split(',')
if s[9]=='1':
places.append(s[1])
lat.append(s[7])
long.append(s[8])
H.append(s[4])
return places,lat,long,H
def follow_route(places,lat,long,H,times,X,V):
ratio=[]
dH=0
D=[]
height_change=[]
for i in range (1,len(X)):
actual_time=times[i]
To=X[i]
From=X[i-1]
# print(actual_time,From,To)
for j in range (0,len(places)):
if places[j]==To:
for n in range(0,len(places)):
if places[n]==From:
dH=float(H[n])-float(H[j])
d=calculateDistance(np.pi/180*float(long[n]),np.pi/180*float(long[j]),np.pi/180*float(lat[n]),np.pi/180*float(lat[j]))
height_change.append(dH)
D.append(d)
expected_time = d*V
r=actual_time/expected_time
ratio.append(r)
return ratio,D,height_change
#%%
def plot_figures(ratio,height_change,D):
theta=[]
for i in range (0,len(D)):
theta.append(np.arctan(height_change[i]/D[i])*180/np.pi)
plt.figure(1)
plt.scatter(height_change,ratio)
plt.show()
plt.figure(2)
plt.scatter(np.linspace(0,210,210),ratio)
plt.show()
plt.figure(3)
bins=np.linspace(0,12,50)
plt.hist(ratio,bins)
plt.show()
plt.figure(4)
plt.scatter(theta,ratio)
plt.show()
#%%
def main(grid,costs,res,friction):
V,times,X,Y = read_route("Paul tierney route.csv")
places,lat,long,H = read_peaks("coordsOfExtendedWainwrights.csv")
ratio,D,height_change = follow_route(places,lat,long,H,times,X,V)
expt=np.sum(times)/3600/24
print(expt,'days from data PAUL')
#plot_figures(ratio,height_change,D)
places_new=[]
for i in range(0,len(X)):
for j in range(0,len(places)):
if str(X[i])==str(places[j]):
places_new.append(j)
with open('indices.csv') as file:
places=[]
x_index=[]
y_index=[]
for line in file:
s=line.split(',')
places.append(s[0])
x_index.append(s[1])
y_index.append(s[2])
file.close()
c=0
maze=create_maze.main(len(grid[0]),len(grid),0)
for i in range(0,len(places_new)-1):
X=places_new[i]
Y=places_new[i+1]
start=(int(x_index[X].rstrip()),int(y_index[X].rstrip()))
end=(int(x_index[Y].rstrip()),int(y_index[Y].rstrip()))
# print(X,start,'->',Y,end)
path = path_finding_old.main(maze,start,end)
cost_new=retrace_path.main(path,grid,res,friction)
# print(cost_new)
c+=cost_new
model=c/3600/24
print(c/3600/24,' days from model PAUL')
r=model/expt
print('so our model is ',r,'times slower')
return r
if __name__=='main':
main()
| JordanBarton/carrot47 | wainwright.py | wainwright.py | py | 5,797 | python | en | code | 0 | github-code | 13 |
5328833642 | from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse
# Create your views here.
def main(request):
template = loader.get_template('main.html')
context = {}
return HttpResponse(template.render(context, request))
def detalle(request, post_id):
print(post_id)
template = loader.get_template('detalle.html')
context = {
'post_id' : post_id
}
return HttpResponse(template.render(context, request))
| billygl/blog | posts/views.py | views.py | py | 506 | python | en | code | 0 | github-code | 13 |
28601505840 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 10 16:07:43 2019
@author: dxh
test convolving two func
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
from scipy.integrate import quad
import matplotlib as matt
import matplotlib.lines as mlines
from matplotlib import colors
matt.rcParams['font.family'] = 'STIXGeneral'
def poss_ln_gaussian(m1, mu, sigma):
if m1 >= 0:
poss = 1/(m1 * sigma * np.sqrt(2 * np.pi)) * np.exp(-(np.log(m1) - mu)**2/(2*sigma**2))
else:
poss = 0
return poss
def mass_fun_i(m1, a=2.35, mbh_max=80, mbh_min=5):
if m1>=mbh_min and m1<=mbh_max:
N = (mbh_max)**(-a+1)/(1-a) - (mbh_min)**(-a+1)/(1-a) # The intergral function of m**a for normalize the power law function
return m1**(-a)/N
else:
return 0.
poss_mass_fun=np.vectorize(mass_fun_i)
def joint_twofun(tau, t, a=2.35, mbh_max=80, mbh_min=5, sigma =0.2):
return mass_fun_i(tau, a=a, mbh_max=mbh_max, mbh_min=mbh_min) * poss_ln_gaussian(t, mu=np.log(tau), sigma = sigma)
def cov_twofun(t, a=2.35, mbh_max=80, mbh_min=5, sigma =0.2):
inter = quad(joint_twofun, 0, 100, args=(t, a, mbh_max, mbh_min,sigma))[0]
return inter
cov_twofun=np.vectorize(cov_twofun)
plt.subplots(figsize=(8, 6))
#print cov_twofun(5.5)
#Before conv:
x = np.logspace(np.log10(0.2), np.log10(85),300)
y = poss_mass_fun(x)
#y = poss_ln_gaussian(x,mu=np.log(5.5),sigma=0.2)
plt.plot(x, y ,'--', label = 'power-law distribution')
#plt.xlim(3,20)
#plt.show()
#After conv:
x = np.logspace(np.log10(0.2), np.log10(85),300)
#y = poss_mass_fun(x)
#y = poss_ln_gaussian(x,mu=np.log(5.5),sigma=0.2)
y = cov_twofun(x, sigma=0.04)
plt.plot(x, y, label = 'power-law convolved with Log-Normal')
plt.xlabel("$m_1 (M_{\odot})$",fontsize=25)
plt.ylabel("P($m$)",fontsize=25)
plt.xlim(3,20)
plt.tick_params(labelsize=15)
plt.legend(prop={'size': 17},loc=1)
plt.savefig("convolving.pdf")
plt.show()
| dartoon/my_code | test_code/convolve_two_fun.py | convolve_two_fun.py | py | 1,996 | python | en | code | 0 | github-code | 13 |
33974877573 | from django.urls import path
from website.views import home,logout_user,register_user,customer_record,delete_record,Add_record,update_record
urlpatterns = [
path('', home, name='home'),
path('logout/',logout_user,name='logout'),
path('register/',register_user,name='register'),
path('record/<int:pk>/',customer_record,name='record'),
path('deleterecord/<int:pk>/',delete_record,name='delete-record'),
path('add_record/',Add_record,name='add_record'),
path('update_record/<int:pk>', update_record, name='update_record'),
] | HellyModiKalpesh/CRM-internship | website/urls.py | urls.py | py | 524 | python | en | code | 1 | github-code | 13 |
7834216340 | """add checksum columns and revoke token table
Revision ID: b58139cfdc8c
Revises: f2833ac34bb6
Create Date: 2019-04-02 10:45:05.178481
"""
import sqlalchemy as sa
from alembic import op
from journalist_app import create_app
from models import Reply, Submission
from sdconfig import SecureDropConfig
from store import Storage, queued_add_checksum_for_file
from worker import create_queue
# revision identifiers, used by Alembic.
revision = "b58139cfdc8c"
down_revision = "f2833ac34bb6"
branch_labels = None
depends_on = None
def upgrade() -> None:
with op.batch_alter_table("replies", schema=None) as batch_op:
batch_op.add_column(sa.Column("checksum", sa.String(length=255), nullable=True))
with op.batch_alter_table("submissions", schema=None) as batch_op:
batch_op.add_column(sa.Column("checksum", sa.String(length=255), nullable=True))
op.create_table(
"revoked_tokens",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("journalist_id", sa.Integer(), nullable=True),
sa.Column("token", sa.Text(), nullable=False),
sa.ForeignKeyConstraint(["journalist_id"], ["journalists.id"]),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("token"),
)
try:
config = SecureDropConfig.get_current()
except ModuleNotFoundError:
# Fresh install, nothing to migrate
return
app = create_app(config)
# we need an app context for the rq worker extension to work properly
with app.app_context():
conn = op.get_bind()
query = sa.text(
"""SELECT submissions.id, sources.filesystem_id, submissions.filename
FROM submissions
INNER JOIN sources
ON submissions.source_id = sources.id
"""
)
for (sub_id, filesystem_id, filename) in conn.execute(query):
full_path = Storage.get_default().path(filesystem_id, filename)
create_queue(config.RQ_WORKER_NAME).enqueue(
queued_add_checksum_for_file,
Submission,
int(sub_id),
full_path,
app.config["SQLALCHEMY_DATABASE_URI"],
)
query = sa.text(
"""SELECT replies.id, sources.filesystem_id, replies.filename
FROM replies
INNER JOIN sources
ON replies.source_id = sources.id
"""
)
for (rep_id, filesystem_id, filename) in conn.execute(query):
full_path = Storage.get_default().path(filesystem_id, filename)
create_queue(config.RQ_WORKER_NAME).enqueue(
queued_add_checksum_for_file,
Reply,
int(rep_id),
full_path,
app.config["SQLALCHEMY_DATABASE_URI"],
)
def downgrade() -> None:
op.drop_table("revoked_tokens")
with op.batch_alter_table("submissions", schema=None) as batch_op:
batch_op.drop_column("checksum")
with op.batch_alter_table("replies", schema=None) as batch_op:
batch_op.drop_column("checksum")
| freedomofpress/securedrop | securedrop/alembic/versions/b58139cfdc8c_add_checksum_columns_revoke_table.py | b58139cfdc8c_add_checksum_columns_revoke_table.py | py | 3,226 | python | en | code | 3,509 | github-code | 13 |
12430091908 | # This is a Python Program to compute prime factors of an integer.
n = int(input("\nEnter any Number = "))
print("-------------------------")
print("Prime Factors of {0} are:".format(n))
print("-------------------------")
for i in range (2, n+1):
if n%i == 0:
count = 0
for j in range (1, i+1):
if i%j == 0:
count += 1
if count == 2:
print(j)
print("-------------------------") | Prashant1099/Python-Programming-Examples-on-Mathematical-Functions | 6. Compute Prime Factors of an Integer.py | 6. Compute Prime Factors of an Integer.py | py | 494 | python | en | code | 0 | github-code | 13 |
74675201616 | import os
from .system import System
from ..forcefield import *
from ..topology import *
from .. import logger
class LammpsExporter():
'''
LammpsExporter export a non-polarizable :class:`System` to input files for LAMMPS.
LAMMPS is powerful and flexible. But the input file for LAMMPS is a mess and cannot be handled in an elegant way,
especially when there are hybrid functional forms.
Here, only limited types of FF (OPLS-AA and SDK-CG) will be considered.
The following potential functions are currently supported:
* :class:`~mstk.forcefield.LJ126Term`
* :class:`~mstk.forcefield.MieTerm`
* :class:`~mstk.forcefield.HarmonicBondTerm`
* :class:`~mstk.forcefield.HarmonicAngleTerm`
* :class:`~mstk.forcefield.SDKAngleTerm`
* :class:`~mstk.forcefield.OplsDihedralTerm`
* :class:`~mstk.forcefield.PeriodicDihedralTerm`
* :class:`~mstk.forcefield.OplsImproperTerm`
In order to run simulation of SDK-CG model, the package CG-SDK should be included when compiling LAMMPS.
'''
def __init__(self):
pass
@staticmethod
def export(system, data_out, in_out, **kwargs):
'''
Generate input files for LAMMPS from a system
Parameters
----------
system : System
data_out : str
in_out : str
'''
if not system.use_pbc:
raise Exception('PBC required for exporting LAMMPS')
supported_terms = {LJ126Term, MieTerm,
HarmonicBondTerm, MorseBondTerm,
HarmonicAngleTerm, SDKAngleTerm, LinearAngleTerm,
OplsDihedralTerm, PeriodicDihedralTerm,
OplsImproperTerm}
unsupported = system.ff_classes - supported_terms
if unsupported != set():
raise Exception('Unsupported FF terms: %s' % (', '.join(map(lambda x: x.__name__, unsupported))))
if system.topology.has_virtual_site:
raise Exception('Virtual sites not supported by LAMMPS')
if LinearAngleTerm in system.ff_classes:
logger.warning('LinearAngleTerm not supported by LAMMPS. Exported in harmonic form at 178 degree')
top = system.topology
ff = system.ff
# Morse bond requires hybrid bond styles
is_morse = MorseBondTerm in system.ff_classes
# SDK CGFF requires all pairwise parameters and hybrid angle styles
is_sdk = MieTerm in system.ff_classes or SDKAngleTerm in system.ff_classes
### Assign LAMMPS types ####################################
bond_types = list(ff.bond_terms.values())
angle_types = list(ff.angle_terms.values())
dihedral_types = list(ff.dihedral_terms.values())
improper_types = list(ff.improper_terms.values())
lmp_types: {str: Atom} = {} # {'typ': Atom}
lmp_types_atype: {str: AtomType} = {} # {'typ': AtomType}
for atom in top.atoms:
if atom.type not in lmp_types:
lmp_types[atom.type] = atom
lmp_types_atype[atom.type] = ff.atom_types[atom.type]
lmp_type_list = list(lmp_types.keys())
lmp_symbol_list = [atom.symbol or 'UNK' for atom in lmp_types.values()]
lmp_shake_bonds = [i + 1 for i, bterm in enumerate(bond_types) if bterm.fixed]
lmp_shake_angles = [i + 1 for i, aterm in enumerate(angle_types) if aterm.fixed]
#############################################################
string = 'Created by mstk\n'
string += '\n%i atoms\n' % top.n_atom
string += '%i bonds\n' % top.n_bond
string += '%i angles\n' % top.n_angle
string += '%i dihedrals\n' % top.n_dihedral
string += '%i impropers\n' % top.n_improper
string += '\n%i atom types\n' % len(lmp_type_list)
string += '%i bond types\n' % len(bond_types)
string += '%i angle types\n' % len(angle_types)
string += '%i dihedral types\n' % len(dihedral_types)
string += '%i improper types\n' % len(improper_types)
if not top.cell.is_rectangular:
raise Exception('Triclinic box haven\'t been implemented')
box = top.cell.size * 10
string += '\n0 %10.4f xlo xhi\n' % box[0]
string += '0 %10.4f ylo yhi\n' % box[1]
string += '0 %10.4f zlo zhi\n' % box[2]
string += '\nMasses\n\n'
for i, (typ, atom) in enumerate(lmp_types.items()):
string += '%4i %8.3f # %8s\n' % (i + 1, atom.mass, typ)
if len(bond_types) > 0:
string += '\nBond Coeffs # %s\n\n' % ('hybrid harmonic morse' if is_morse else 'harmonic')
for i, bterm in enumerate(bond_types):
bond_style = '' if not is_morse else 'morse' if type(bterm) is MorseBondTerm else 'harmonic'
if type(bterm) is HarmonicBondTerm:
string += '%4i %8s %12.6f %10.4f # %s-%s\n' % (
i + 1, bond_style, bterm.k / 4.184 / 100, bterm.length * 10, bterm.type1, bterm.type2)
elif type(bterm) is MorseBondTerm:
alpha = (bterm.k / bterm.depth) ** 0.5
string += '%4i %8s %12.6f %10.4f %10.4f # %s-%s\n' % (
i + 1, bond_style, bterm.depth / 4.184, alpha / 10, bterm.length * 10, bterm.type1, bterm.type2)
if len(angle_types) > 0:
string += '\nAngle Coeffs # %s\n\n' % ('hybrid harmonic sdk' if is_sdk else 'harmonic')
for i, aterm in enumerate(angle_types):
angle_style = '' if not is_sdk else 'sdk' if type(aterm) is SDKAngleTerm else 'harmonic'
string += '%4i %8s %12.6f %10.4f # %s-%s-%s\n' % (
i + 1, angle_style, aterm.k / 4.184, min(aterm.theta * RAD2DEG, 178),
aterm.type1, aterm.type2, aterm.type3)
if len(dihedral_types) > 0:
string += '\nDihedral Coeffs # opls\n\n'
for i, dterm in enumerate(dihedral_types):
if type(dterm) is PeriodicDihedralTerm:
try:
dterm = dterm.to_opls_term()
except:
raise Exception('Only OPLS type dihedral is implemented')
k1, k2, k3, k4 = map(lambda x: x * 2 / 4.184, [dterm.k1, dterm.k2, dterm.k3, dterm.k4])
string += '%4i %12.6f %12.6f %12.6f %12.6f # %s-%s-%s-%s\n' % (
i + 1, k1, k2, k3, k4,
dterm.type1, dterm.type2, dterm.type3, dterm.type4)
if len(improper_types) > 0:
string += '\nImproper Coeffs # cvff\n\n'
for i, iterm in enumerate(improper_types):
string += '%4i %12.6f %4i %4i # %s-%s-%s-%s\n' % (
i + 1, iterm.k / 4.184, -1, 2, iterm.type2, iterm.type3, iterm.type1, iterm.type4)
string += '\nAtoms\n\n'
for atom in top.atoms:
typ = atom.type
x, y, z = atom.position * 10
string += '%8i %6i %6i %12.6f %10.4f %10.4f %10.4f # %8s %8s\n' % (
atom.id + 1, atom.molecule.id + 1, lmp_type_list.index(typ) + 1,
atom.charge, x, y, z, atom.name, atom.molecule.name)
if top.n_bond > 0:
string += '\nBonds\n\n'
for i, bond in enumerate(top.bonds):
a1, a2 = bond.atom1, bond.atom2
btype = bond_types.index(system.bond_terms[bond]) + 1
string += '%6i %6i %6i %6i # %s-%s\n' % (
i + 1, btype, a1.id + 1, a2.id + 1, a1.name, a2.name)
if top.n_angle > 0:
string += '\nAngles\n\n'
for i, angle in enumerate(top.angles):
aterm = angle_types.index(system.angle_terms[angle]) + 1
a1, a2, a3 = angle.atom1, angle.atom2, angle.atom3
string += '%6i %6i %6i %6i %6i # %s-%s-%s\n' % (
i + 1, aterm, a1.id + 1, a2.id + 1, a3.id + 1, a1.name, a2.name, a3.name)
if top.n_dihedral > 0:
string += '\nDihedrals\n\n'
for i, dihedral in enumerate(top.dihedrals):
dtype = dihedral_types.index(system.dihedral_terms[dihedral]) + 1
a1, a2, a3, a4 = dihedral.atom1, dihedral.atom2, dihedral.atom3, dihedral.atom4
string += '%6i %6i %6i %6i %6i %6i # %s-%s-%s-%s\n' % (
i + 1, dtype, a1.id + 1, a2.id + 1, a3.id + 1, a4.id + 1,
a1.name, a2.name, a3.name, a4.name)
if top.n_improper > 0:
string += '\nImpropers\n\n'
for i, improper in enumerate(top.impropers):
itype = improper_types.index(system.improper_terms[improper]) + 1
a1, a2, a3, a4 = improper.atom1, improper.atom2, improper.atom3, improper.atom4
string += '%6i %6i %6i %6i %6i %6i # %s-%s-%s-%s\n' % (
i + 1, itype, a2.id + 1, a3.id + 1, a1.id + 1, a4.id + 1,
a2.name, a3.name, a1.name, a4.name)
with open(data_out, 'wb') as f:
f.write(string.encode())
### LAMMPS input script #################################################
cmd_bond = 'hybrid harmonic morse' if is_morse else 'harmonic'
cmd_angle = 'hybrid harmonic sdk' if is_sdk else 'harmonic'
cmd_pair_base = 'lj/sdk' if is_sdk else 'lj/cut'
cmd_pair_suffix = '/coul/long' if system.charged else ''
cmd_mix = 'arithmetic' if system.ff.lj_mixing_rule == ForceField.LJ_MIXING_LB else 'geometric'
cmd_tail = 'yes' if ff.vdw_long_range == ForceField.VDW_LONGRANGE_CORRECT else 'no'
cmd_shift = 'yes' if ff.vdw_long_range == ForceField.VDW_LONGRANGE_SHIFT else 'no'
cmd_comment_kspace = '# ' if not system.charged else ''
cmd_paras = ''
for i, (typ_i, atom) in enumerate(lmp_types.items()):
for j, (typ_j, atom) in enumerate(lmp_types.items()):
if j < i:
continue
atype1 = lmp_types_atype[typ_i]
atype2 = lmp_types_atype[typ_j]
try:
# LAMMPS CG-SDK does not allow lj mixing. have to write it explicitly
vdw = ff.get_vdw_term(atype1, atype2, mixing=is_sdk)
except:
continue
if is_sdk:
if type(vdw) is MieTerm:
if vdw.is_sdk:
lj_style = f'lj{int(vdw.repulsion)}_{int(vdw.attraction)}'
else:
raise Exception('MieTerm other than SDK type are not implemented')
else:
lj_style = 'lj12_6'
cmd_paras += 'pair_coeff %4i %4i %8s %9.5f %8.4f # %8s %8s %s\n' % (
i + 1, j + 1, lj_style, vdw.epsilon / 4.184, vdw.sigma * 10,
lmp_type_list[i], lmp_type_list[j], ','.join(vdw.comments))
else:
cmd_paras += 'pair_coeff %4i %4i %9.5f %8.4f # %8s %8s %s\n' % (
i + 1, j + 1, vdw.epsilon / 4.184, vdw.sigma * 10,
lmp_type_list[i], lmp_type_list[j], ','.join(vdw.comments))
cmd_shake = 'fix SHAKE all shake 0.0001 20 0 b '
if lmp_shake_bonds:
cmd_shake += ' '.join(map(str, lmp_shake_bonds))
if lmp_shake_angles:
cmd_shake += ' a ' + ' '.join(map(str, lmp_shake_angles))
else:
cmd_shake = '# ' + cmd_shake
timestep = 10.0 if is_sdk else 1.0
string = f'''# created by mstk
units real
boundary p p p
atom_style full
bond_style {cmd_bond}
angle_style {cmd_angle}
dihedral_style opls
improper_style cvff
special_bonds lj 0 0 {ff.scale_14_vdw} coul 0 0 {ff.scale_14_coulomb}
pair_style {cmd_pair_base}{cmd_pair_suffix} {ff.vdw_cutoff * 10}
pair_modify mix {cmd_mix} tail {cmd_tail} shift {cmd_shift}
{cmd_comment_kspace}kspace_style pppm 1.0e-4
read_data {os.path.basename(data_out)}
{cmd_paras}
variable T equal 300
variable P equal 1
variable elec equal ecoul+elong
# thermo_style custom step press pe ebond eangle edihed eimp evdwl v_elec
# thermo 10
# minimize 1.0e-4 1.0e-6 200 1000
# reset_timestep 0
{cmd_shake}
fix ICECUBE all momentum 100 linear 1 1 1
velocity all create $T 12345
timestep {timestep}
fix NPT all npt temp $T $T {timestep * 100} iso $P $P {timestep * 500} fixedpoint 0 0 0
thermo_style custom step temp press pe emol evdwl v_elec density
thermo_modify flush yes
thermo 1000
variable slog equal logfreq(10,9,10)
dump TRJ all custom 10 dump.lammpstrj id mol type element q xu yu zu
dump_modify TRJ sort id element {' '.join(lmp_symbol_list)} every v_slog first yes
dump DCD all dcd 10000 dump.dcd
dump_modify DCD unwrap yes
restart 1000000 rst_*
run 1000000
'''
with open(in_out, 'wb') as f:
f.write(string.encode())
| z-gong/mstk | mstk/simsys/lmpexporter.py | lmpexporter.py | py | 12,785 | python | en | code | 7 | github-code | 13 |
74673694416 | import numpy as np
import pandas as pd
from packaging import version
from scipy.sparse import csr_matrix
from typing import Mapping, List, Tuple, Union
from sklearn.metrics.pairwise import cosine_similarity
from bertopic.representation._base import BaseRepresentation
from sklearn import __version__ as sklearn_version
class KeyBERTInspired(BaseRepresentation):
def __init__(self,
top_n_words: int = 10,
nr_repr_docs: int = 5,
nr_samples: int = 500,
nr_candidate_words: int = 100,
random_state: int = 42):
""" Use a KeyBERT-like model to fine-tune the topic representations
The algorithm follows KeyBERT but does some optimization in
order to speed up inference.
The steps are as follows. First, we extract the top n representative
documents per topic. To extract the representative documents, we
randomly sample a number of candidate documents per cluster
which is controlled by the `nr_samples` parameter. Then,
the top n representative documents are extracted by calculating
the c-TF-IDF representation for the candidate documents and finding,
through cosine similarity, which are closest to the topic c-TF-IDF representation.
Next, the top n words per topic are extracted based on their
c-TF-IDF representation, which is controlled by the `nr_repr_docs`
parameter.
Then, we extract the embeddings for words and representative documents
and create topic embeddings by averaging the representative documents.
Finally, the most similar words to each topic are extracted by
calculating the cosine similarity between word and topic embeddings.
Arguments:
top_n_words: The top n words to extract per topic.
nr_repr_docs: The number of representative documents to extract per cluster.
nr_samples: The number of candidate documents to extract per cluster.
nr_candidate_words: The number of candidate words per cluster.
random_state: The random state for randomly sampling candidate documents.
Usage:
```python
from bertopic.representation import KeyBERTInspired
from bertopic import BERTopic
# Create your representation model
representation_model = KeyBERTInspired()
# Use the representation model in BERTopic on top of the default pipeline
topic_model = BERTopic(representation_model=representation_model)
```
"""
self.top_n_words = top_n_words
self.nr_repr_docs = nr_repr_docs
self.nr_samples = nr_samples
self.nr_candidate_words = nr_candidate_words
self.random_state = random_state
def extract_topics(self,
topic_model,
documents: pd.DataFrame,
c_tf_idf: csr_matrix,
topics: Mapping[str, List[Tuple[str, float]]]
) -> Mapping[str, List[Tuple[str, float]]]:
""" Extract topics
Arguments:
topic_model: A BERTopic model
documents: All input documents
c_tf_idf: The topic c-TF-IDF representation
topics: The candidate topics as calculated with c-TF-IDF
Returns:
updated_topics: Updated topic representations
"""
# We extract the top n representative documents per class
_, representative_docs, repr_doc_indices, _ = topic_model._extract_representative_docs(c_tf_idf, documents, topics, self.nr_samples, self.nr_repr_docs)
# We extract the top n words per class
topics = self._extract_candidate_words(topic_model, c_tf_idf, topics)
# We calculate the similarity between word and document embeddings and create
# topic embeddings from the representative document embeddings
sim_matrix, words = self._extract_embeddings(topic_model, topics, representative_docs, repr_doc_indices)
# Find the best matching words based on the similarity matrix for each topic
updated_topics = self._extract_top_words(words, topics, sim_matrix)
return updated_topics
def _extract_candidate_words(self,
topic_model,
c_tf_idf: csr_matrix,
topics: Mapping[str, List[Tuple[str, float]]]
) -> Mapping[str, List[Tuple[str, float]]]:
""" For each topic, extract candidate words based on the c-TF-IDF
representation.
Arguments:
topic_model: A BERTopic model
c_tf_idf: The topic c-TF-IDF representation
topics: The top words per topic
Returns:
topics: The `self.top_n_words` per topic
"""
labels = [int(label) for label in sorted(list(topics.keys()))]
# Scikit-Learn Deprecation: get_feature_names is deprecated in 1.0
# and will be removed in 1.2. Please use get_feature_names_out instead.
if version.parse(sklearn_version) >= version.parse("1.0.0"):
words = topic_model.vectorizer_model.get_feature_names_out()
else:
words = topic_model.vectorizer_model.get_feature_names()
indices = topic_model._top_n_idx_sparse(c_tf_idf, self.nr_candidate_words)
scores = topic_model._top_n_values_sparse(c_tf_idf, indices)
sorted_indices = np.argsort(scores, 1)
indices = np.take_along_axis(indices, sorted_indices, axis=1)
scores = np.take_along_axis(scores, sorted_indices, axis=1)
# Get top 30 words per topic based on c-TF-IDF score
topics = {label: [(words[word_index], score)
if word_index is not None and score > 0
else ("", 0.00001)
for word_index, score in zip(indices[index][::-1], scores[index][::-1])
]
for index, label in enumerate(labels)}
topics = {label: list(zip(*values[:self.nr_candidate_words]))[0] for label, values in topics.items()}
return topics
def _extract_embeddings(self,
topic_model,
topics: Mapping[str, List[Tuple[str, float]]],
representative_docs: List[str],
repr_doc_indices: List[List[int]]
) -> Union[np.ndarray, List[str]]:
""" Extract the representative document embeddings and create topic embeddings.
Then extract word embeddings and calculate the cosine similarity between topic
embeddings and the word embeddings. Topic embeddings are the average of
representative document embeddings.
Arguments:
topic_model: A BERTopic model
topics: The top words per topic
representative_docs: A flat list of representative documents
repr_doc_indices: The indices of representative documents
that belong to each topic
Returns:
sim: The similarity matrix between word and topic embeddings
vocab: The complete vocabulary of input documents
"""
# Calculate representative docs embeddings and create topic embeddings
repr_embeddings = topic_model._extract_embeddings(representative_docs, method="document", verbose=False)
topic_embeddings = [np.mean(repr_embeddings[i[0]:i[-1]+1], axis=0) for i in repr_doc_indices]
# Calculate word embeddings and extract best matching with updated topic_embeddings
vocab = list(set([word for words in topics.values() for word in words]))
word_embeddings = topic_model._extract_embeddings(vocab, method="document", verbose=False)
sim = cosine_similarity(topic_embeddings, word_embeddings)
return sim, vocab
def _extract_top_words(self,
vocab: List[str],
topics: Mapping[str, List[Tuple[str, float]]],
sim: np.ndarray
) -> Mapping[str, List[Tuple[str, float]]]:
""" Extract the top n words per topic based on the
similarity matrix between topics and words.
Arguments:
vocab: The complete vocabulary of input documents
labels: All topic labels
topics: The top words per topic
sim: The similarity matrix between word and topic embeddings
Returns:
updated_topics: The updated topic representations
"""
labels = [int(label) for label in sorted(list(topics.keys()))]
updated_topics = {}
for i, topic in enumerate(labels):
indices = [vocab.index(word) for word in topics[topic]]
values = sim[:, indices][i]
word_indices = [indices[index] for index in np.argsort(values)[-self.top_n_words:]]
updated_topics[topic] = [(vocab[index], val) for val, index in zip(np.sort(values)[-self.top_n_words:], word_indices)][::-1]
return updated_topics
| MaartenGr/BERTopic | bertopic/representation/_keybert.py | _keybert.py | py | 9,223 | python | en | code | 4,945 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.