seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
32520478741 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 24 13:13:32 2018
@author: Administrator
"""
import wget, time
import os
# 网络地址
DATA_URL = 'http://164.52.0.183:8000/file/findTrace/2018-12-24.txt'
# DATA_URL = '/home/xxx/book/data.tar.gz'
out_fname = '2018-12-24.txt'
def download(DATA_URL):
out_fname = '2018-12-24.txt'
date = time.ctime()
path = str(date.split(' ')[1] +'-' + date.split(' ')[2])
wget.download(DATA_URL, out=out_fname)
if not os.path.exists('./' + out_fname):
wget.download(DATA_URL, out=out_fname)
else:
#os.remove('./' + out_fname)
print("today's data has been download")
mkdir(path)
return path
def mkdir(path):
# 去除首位空格
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists=os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
os.makedirs(path)
print(path +' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print(path +' 目录已存在')
return False
# 提取压缩包
#tar = tarfile.open(out_fname)
#tar.extractall()
#tar.close()
# 删除下载文件
#os.remove(out_fname)
# 调用函数
path = download(DATA_URL)
file = open("./" + out_fname)
lines = file.readlines()
output = {}
temp = ""
cnt = 0
for line in lines:
line=line.strip('\n')
if line.startswith("FPS"):
fps_split = line.split("=")
#print(fps_split)
fps_temp = fps_split[1]
for i in range(1,cnt+1):
output[temp][-i] += " "+fps_temp
cnt = 0
elif line.startswith("ID:dokidoki/mlinkm/"):
Channel_ID_1200 = line[19:]
if Channel_ID_1200 in output:
temp = Channel_ID_1200 + "_high"
else:
output[Channel_ID_1200 + "_high"] = []
temp = Channel_ID_1200 + "_high"
cnt = 0
elif line.startswith("ID:EXT-ENC-0/dokidoki/mlinkm/"):
Channel_ID_500 = line[29:]
if Channel_ID_1200 in output:
temp = Channel_ID_500 + "_low"
else:
output[Channel_ID_500 + "_low"] = []
temp = Channel_ID_500 + "_low"
cnt = 0
else:
output[temp].append(line)
cnt += 1
for key,value in output.items():
f_file = open("./" + path + "/" + str(key) + ".csv","w")
for idx in range(len(value)):
data = value[idx].replace(" ",",")
data += "\n"
f_file.write(data)
#print(output)
#print(Channel_ID_500)
| Y1ran/Pensieve-A3C-Streaming-Adaptive-Bitrate-Model | final/download_data.py | download_data.py | py | 2,603 | python | en | code | 6 | github-code | 36 |
42263303655 | from django import template
from all_products.queryutil import ShirtQuery
register = template.Library()
@register.filter
def shirt_price(shirt):
shirt_query = ShirtQuery(shirt)
for size in shirt_query.sizes:
stock = shirt_query.get_stock(size)
if stock > 0:
return shirt_query.get_price(size) | drivelous/ecmrc | shirts/templatetags/shirt_price.py | shirt_price.py | py | 319 | python | en | code | 12 | github-code | 36 |
40243466051 | import streamlit as st
import cv2
import time
import os
import tempfile
import matplotlib.pyplot as plt
from src.utils.streamlit import factors
from src.utils.onnx_process import load_model, load_label_map, video_predict
from src.utils.video_process import video_stitch
from src.utils.streamlit import save_uploaded_file
MODEL_PATH = "./results/models/onnx_dive/model.onnx"
LABEL_PATH = "./results/models/onnx_dive/label_map.pbtxt"
MODEL_INPUT_SIZE = (640, 640) # width, height
NUM_CLASSES = 5
CONF_THRESHOLD = 0.2
NMS_THRESHOLD = 0.1
##STEP 1 Load Model
with st.spinner(text="Loading Model ... Please be patient!"):
session, input_name, output_name = load_model(MODEL_PATH)
##STEP 2 Upload Video
st.write("# Upload diving video:\n")
with st.expander("How to Use YOEO"):
st.write("............")
# create temp dir for storing video and outputs
temp_dir = tempfile.TemporaryDirectory()
temp_path = temp_dir.name
video_file = st.file_uploader(
"Choose a File", accept_multiple_files=False, type=["mp4", "mov"]
)
if video_file is not None:
file_details = {"FileName": video_file.name, "FileType": video_file.type}
st.write(file_details)
video_path = save_uploaded_file(video_file, temp_path)
st.write(video_path)
# get fps for optimization slider max value
fps = round(cv2.VideoCapture(video_path).get(cv2.CAP_PROP_FPS))
factors_fps = list(factors(fps))
# user options
marine_options = st.multiselect(
"What flora & fauna do you prefer",
["Fish", "Coral", "Turtle", "Shark", "Manta Ray"],
["Fish", "Coral", "Turtle", "Shark", "Manta Ray"],
help="Select the flora & fauna you want to be included in the final video",
)
label_map = load_label_map(LABEL_PATH)
new_label_map = {}
for key, val in label_map.items():
new_label_map[val["name"].lower().replace('"', "")] = key - 1
marine_options = [new_label_map[x.lower()] for x in marine_options]
# user advanced options
with st.expander("Advanced Options"):
st.write("###### Leave as default if unsure!")
opt_val = st.select_slider(
"Optimization", options=factors_fps, value=max(factors_fps)
) # num of frames per sec to do inferencing
strict_val = st.slider(
"Trimming Strictness", min_value=0, value=fps
) # number of frames prior to keep if current frame is to be kept
sharpen = st.checkbox("Sharpen Video")
color_grade = st.checkbox("Color Grade Video")
yt_link = st.text_input("Enter a Youtube Audio Link")
# start inferencing
trim_bt = st.button("Start Auto-Trimming!")
st.write(trim_bt)
if trim_bt:
with st.spinner(text="YOEO working its magic: IN PROGRESS ..."):
(
frame_predictions,
bbox_class_score,
orig_frames,
origi_shape,
fps,
) = video_predict(
video_path,
"frames",
session,
input_name,
output_name,
LABEL_PATH,
MODEL_INPUT_SIZE,
NUM_CLASSES,
CONF_THRESHOLD,
NMS_THRESHOLD,
opt_val,
)
bbox_video_path = os.path.join(temp_path, "orig_video")
video_stitch(
frame_predictions,
bbox_video_path,
video_file.name.replace(".mp4", ""),
origi_shape,
fps,
)
# recode video using ffmpeg
video_bbox_filename = os.path.join(bbox_video_path, video_file.name)
video_bbox_recode_filename = video_bbox_filename.replace(".mp4", "_recoded.mp4")
os.system(
"ffmpeg -i {} -vcodec libx264 {}".format(
os.path.join(bbox_video_path, video_file.name),
video_bbox_recode_filename,
)
)
tab_od, tab_trim, tab_beauty = st.tabs(
[
"YOEO's Object Detection Results",
"Your Trimmed Video",
"Beautiful Photos Captured By You",
]
)
with tab_od:
st.write(video_bbox_filename)
# st.write(os.listdir(os.path.join(RESULTS_PATH, latest_folder)))
st.write(video_bbox_recode_filename)
st.subheader("YOEO's Object Detection Results:")
st.video(video_bbox_recode_filename)
st.subheader("Flora & Fauna Detected: ")
col1, col2, col3 = st.columns(3)
col1.metric("# Species Detected", "2")
col2.metric("Turtle", "1")
col3.metric("Fish", "23")
with tab_trim:
st.subheader("YOEO's Trimmed Video:")
with tab_beauty:
st.subheader("YOEO's Beautiful Photos:")
with st.expander("About YOEO"):
st.write(
"YOEO (You Only Edit Once) is an object detection model and web application created by data scientists and AI practitioners who are diving enthusiasts!"
)
st.write("The Model is trained on ...")
##STEP 3
# st.write("# 3. YOEO working its magic: ")
# st.write("-> to insert model inference and stich algo in progress bar")
# my_bar = st.progress(0)
# for percent_complete in range(100):
# time.sleep(0.1)
# my_bar.progress(percent_complete + 1)
##STEP 4
# st.write("# 4. Objects of interest detected and trimmed video output: ")
# col1, col2, col3 = st.columns(3)
# col1.metric("# Species Detected", "2")
# col2.metric("Turtle", "1")
# col3.metric("Fish", "23")
# st.video(vid_file)
| teyang-lau/you-only-edit-once | streamlit_app_onnx.py | streamlit_app_onnx.py | py | 5,602 | python | en | code | 6 | github-code | 36 |
18609514956 | class Solution(object):
def countSort(self, nums):
nums.sort()
last = None
count = 0
count_dict = {}
print(nums)
for x in nums:
if x == last:
count += 1
else:
if last:
count_dict[last] = count
count = 1
last = x
if last:
count_dict[last] = count
return sorted(count_dict.items(), key = lambda x: x[0])
def subsets(self, nums):
results = [[]]
count_dict = self.countSort(nums)
print(count_dict)
for n, count in count_dict:
for subset in results[:]:
for choose_count in range(1, count+1):
newSubset = subset[:]
newSubset.extend([n]*choose_count)
results.append(newSubset)
return results
if __name__ == '__main__':
mySolution = Solution()
print(mySolution.subsets([1, 2, 2]))
| luluxing3/LeetCode | lulu/substsII.py | substsII.py | py | 1,031 | python | en | code | 1 | github-code | 36 |
24324519488 | from pathlib import Path
from typing import IO
def sentencepiece_load(file):
"""Load a SentencePiece model"""
from sentencepiece import SentencePieceProcessor
spm = SentencePieceProcessor()
spm.Load(str(file))
return spm
# source: https://github.com/allenai/allennlp/blob/master/allennlp/common/file_utils.py#L147 # NOQA
def http_get_temp(url: str, temp_file: IO) -> None:
import requests
import warnings
from urllib3.exceptions import InsecureRequestWarning
# temporary fix for dealing with this SSL certificate issue:
# https://github.com/bheinzerling/bpemb/issues/63
with warnings.catch_warnings():
warnings.simplefilter("ignore", InsecureRequestWarning)
req = requests.get(url, stream=True, verify=False)
req.raise_for_status()
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
try:
from tqdm import tqdm
progress = tqdm(unit="B", total=total)
except ImportError:
progress = None
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
if progress is not None:
progress.update(len(chunk))
temp_file.write(chunk)
if progress is not None:
progress.close()
return req.headers
# source: https://github.com/allenai/allennlp/blob/master/allennlp/common/file_utils.py#L147 # NOQA
def http_get(url: str, outfile: Path, ignore_tardir=False) -> None:
import tempfile
import shutil
with tempfile.NamedTemporaryFile() as temp_file:
headers = http_get_temp(url, temp_file)
# we are copying the file before closing it, flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at current position, so go to the start
temp_file.seek(0)
outfile.parent.mkdir(exist_ok=True, parents=True)
if headers.get("Content-Type") == "application/x-gzip":
import tarfile
tf = tarfile.open(fileobj=temp_file)
members = tf.getmembers()
if len(members) != 1:
raise NotImplementedError("TODO: extract multiple files")
member = members[0]
if ignore_tardir:
member.name = Path(member.name).name
tf.extract(member, str(outfile.parent))
extracted_file = outfile.parent / member.name
assert extracted_file == outfile, "{} != {}".format(
extracted_file, outfile)
else:
with open(str(outfile), 'wb') as out:
shutil.copyfileobj(temp_file, out)
return outfile
def load_word2vec_file(word2vec_file, add_pad=False, pad="<pad>"):
"""Load a word2vec file in either text or bin format."""
from gensim.models import KeyedVectors
word2vec_file = str(word2vec_file)
binary = word2vec_file.endswith(".bin")
vecs = KeyedVectors.load_word2vec_format(word2vec_file, binary=binary)
if add_pad:
if pad not in vecs:
add_embeddings(vecs, pad)
else:
raise ValueError("Attempted to add <pad>, but already present")
return vecs
def add_embeddings(keyed_vectors, *words, init=None):
import numpy as np
if init is None:
init = np.zeros
vectors_to_add = init((len(words), keyed_vectors.vectors.shape[1]))
keyed_vectors.add_vectors(words, vectors_to_add)
return keyed_vectors.vectors.shape[0]
| bheinzerling/bpemb | bpemb/util.py | util.py | py | 3,501 | python | en | code | 1,146 | github-code | 36 |
33899004274 | import json
from copy import deepcopy
import numpy as np
import pandas as pd
from CWiPy import settings
from CWiPy.MembershipFunction import MembershipFunction
from CWiPy.Modifier import dict_modifiers
def get_synonyms(word):
"""
Args:
word:
Returns:
list of objects containing term and similarity from -100 to 100
Raises:
IOException: when not found, you should load words first
"""
word = word.replace('-', '_')
data_file = \
f"{settings.BASE_DIR}/{settings.STATIC_DIR}/thesaurus/{word}.json"
result = []
with open(data_file) as f:
thesaurus_data = json.load(f)
# print(thesaurus_data['data']['definitionData']['definitions'])
for entry in thesaurus_data["data"]["definitionData"]["definitions"]:
for synonym in entry["synonyms"]:
result.append({
'term': synonym['term'],
'similarity': int(synonym['similarity']),
})
f.close()
return result
def get_modifiers_synonyms(limit=100):
"""
Args:
limit: similarity limit
Returns:
dict of synonym modifiers: {synonym: modifier}
"""
result = {}
for modifier in dict_modifiers().keys():
for synonym in get_synonyms(modifier):
if synonym['similarity'] < limit:
continue
term = synonym['term']
if term not in result:
result[term] = set()
result[term].add(modifier)
return result
class SyntaxException(BaseException):
pass
class FuzzyQuery:
def __init__(self, fuzzy_query, fields, limit=None, alpha_cut=None,
modifiers_included=None, round_values=None):
"""
Args:
fuzzy_query: fuzzy query string
fields: dict of querying numerical fields: {field_name, {membership_function_name: membership_function}}
limit: similarity limit for synonyms
alpha_cut: alpha cut applied for range filtering
modifiers_included: are modifiers included in query
round_values: round returning query values
Raises:
SyntaxException: on syntax error
"""
if limit is None:
limit = 100
if alpha_cut is None:
alpha_cut = 0.5
if modifiers_included is None:
modifiers_included = True
if round_values is None:
round_values = False
self.fuzzy_query = fuzzy_query
self.fields = fields
self.limit = limit
self.alpha_cut = alpha_cut
self.round_values = round_values
self.modifiers_included = modifiers_included
def extract_crisp_parameters(self):
"""
Converts fuzzy_query to crisp query parameters.
Fuzzy expression structure:
[composite modifier] [summarizer] [field] [connector]
[composite modifier] [summarizer] [field] [connector]
[composite modifier] [summarizer] [field] [connector] ...
[composite modifier] [summarizer] [field]
example fuzzy_query: middle age and very high salary
[connector] = {and, or, but}
Returns:
dict[field, [lower bound, upper bound, connector]]
"""
EOQ_TOKEN = "~~~END_TOKEN~~~"
if self.fuzzy_query == "":
raise SyntaxException("Empty query")
tokens = list(
filter(lambda x: len(x) > 0, self.fuzzy_query.split(' ')))
tokens.append(EOQ_TOKEN)
modifiers_synonyms = get_modifiers_synonyms(self.limit)
modifiers = dict_modifiers()
connectors = ["and", "or", "", "but", EOQ_TOKEN]
connector_sql = {
"and": "and",
"or": "or",
"but": "and",
EOQ_TOKEN: "",
}
expression = []
result = []
for token in tokens:
if token in connectors:
token = connector_sql[token]
if self.modifiers_included and len(expression) < 2:
raise SyntaxException(
f"Empty or incorrect expression {expression}")
original_expression = expression
expression.reverse()
if expression[0] not in self.fields.keys():
raise SyntaxException(
f"Unknown field {expression[0]} in expression "
f"{original_expression}")
field = expression.pop(0)
mf_name = expression[0]
if mf_name not in self.fields[field].keys():
raise SyntaxException(
f"Unknown membership function {mf_name} in expression "
f"{original_expression}")
mf: MembershipFunction = deepcopy(self.fields[field][mf_name])
expression.pop(0)
while len(expression) > 0:
if expression[0] not in modifiers and expression[0] \
not in modifiers_synonyms:
raise SyntaxException(
f"Unknown modifier {expression[0]} in expression "
f"{original_expression}")
if expression[0] in modifiers.keys():
mf.set_modifier(modifiers[expression[0]](mf.modifier))
else:
mf.set_modifier(
modifiers_synonyms[expression[0]][0](mf.modifier))
expression.pop(0)
l, r = mf.extract_range(self.alpha_cut)
result.append([field, l, r, token])
else:
expression.append(token)
return result
def to_sql(self):
"""
Returns:
Constructed SQL where clause
"""
crisp_query = ""
params = self.extract_crisp_parameters()
for (field, l, r, token) in params:
if self.round_values:
l, r = int(l), int(r)
crisp_query += f" {l} <= {field} and {field} <= {r} {token} "
return crisp_query
def matching(self, df: pd.DataFrame) -> pd.Series:
"""
Args:
df: Querying pandas dataframe
Returns:
Series matching fuzzy query
"""
params = self.extract_crisp_parameters()
result_series = pd.Series(np.ones(len(df), dtype=bool))
connector = ""
for (field, left, right, next_connector) in params:
if self.round_values:
left, right = int(left), int(right)
matching_series = (left <= df[field]) & (df[field] <= right)
if connector == "":
result_series = matching_series
elif connector == "or":
result_series = result_series | matching_series
else: # and
result_series = result_series & matching_series
connector = next_connector
return result_series
| akali/fuzzy | CWiPy/Syntax.py | Syntax.py | py | 7,092 | python | en | code | 2 | github-code | 36 |
19019435985 | def setup_grid(points: list) -> list:
width = 0
depth = 0
coords = set()
for coord in points:
x = int(coord.split(',')[0])
y = int(coord.split(',')[1])
coords.add((x, y))
width = x if x > width else width
depth = y if y > depth else depth
grid = [[' ' for x in range(width + 1)] for y in range(depth + 2)]
for coord in coords:
grid[coord[1]][coord[0]] = '#'
return grid
def merge_lines(a: list, b: list) -> list:
for index in range(len(a)):
if a[index] != ' ':
b[index] = a[index]
return b
def fold_grid(grid: list, instructions: list) -> list:
for instruction in instructions:
axis = instruction.split('=')[0][-1]
line = int(instruction.split('=')[1])
new = []
if axis == 'y':
top = grid[:line]
bottom = list(reversed(grid[line + 1:]))
for index in range(len(bottom)):
new.append(merge_lines(bottom[index], top[index]))
grid = new
elif axis == 'x':
left = [grid[y][:line] for y in range(len(grid))]
right = [list(reversed(grid[y][line + 1:])) for y in range(len(grid))]
for index in range(len(right)):
new.append(merge_lines(right[index], left[index]))
grid = new
return grid
with open('in.txt', 'r') as file:
lines = file.read().splitlines()
separator = lines.index('')
instructions = lines[separator + 1:]
grid = setup_grid(lines[:separator])
grid = fold_grid(grid, instructions)
for line in grid:
print(''.join(line)) | AG-Guardian/AdventOfCode2021 | Day 13/part2.py | part2.py | py | 1,626 | python | en | code | 0 | github-code | 36 |
29432294183 | from pymongo import MongoClient
client = MongoClient('localhost', 27017)
database = client.mflix
pipline = [
{'$unwind':'$cast'},
{'$group':
{
'_id':'$cast',
'count':{'$sum':1}
}},
{
'$sort':{'count':-1}
}]
actors = database.movies.aggregate(pipline)
for actor in actors:
print(actor)
| RezaeiShervin/MaktabSharif89 | Shervin_Rezaei_HW18_MaktabSharif89/Shervin_Rezaei_HW18_MaktabSharif89(7).py | Shervin_Rezaei_HW18_MaktabSharif89(7).py | py | 355 | python | en | code | 1 | github-code | 36 |
73118977704 | def tab_zam(file1, var):
with open(file1, 'r', encoding="utf-8") as file:
if var == "развернуть":
x = file.read().replace("\t", " ")
elif var == "свернуть":
x = file.read().replace(" ", "\t")
else:
print("Некорректный ввод")
return
with open(file1, 'w') as file:
file.write(x)
file = input("Введите путь к файлу: ")
var = input("Выберите развернуть или свернуть символы табуляции: ")
tab_zam(file, var)
| IlyaOrlov/PythonCourse2.0_September23 | Practice/ssharygina/ssharygina5.5.py | ssharygina5.5.py | py | 596 | python | ru | code | 2 | github-code | 36 |
40399625928 | #PE 7
primes = []
for x in range(2, 1000000):
composite = 0
for i in range(2, int(x**.5)+1):
if x%i == 0:
composite = 1
else:
continue
if composite == 0:
primes.append(x)
print(primes[10000])
| smailliwniloc/Project-Euler | PE0007.py | PE0007.py | py | 252 | python | en | code | 0 | github-code | 36 |
3458501597 | class Solution(object):
def findContentChildren(self, g, s):
"""
:type g: List[int]
:type s: List[int]
:rtype: int
"""
g = sorted(g)
s = sorted(s)
res = 0
while g and s:
if s[0] < g[0]:
s.pop(0)
else:
res += 1
s.pop(0)
g.pop(0)
return res
if __name__ == '__main__':
# g = [1, 2, 3]
# s = [1, 1]
# g = [1, 2]
# s = [1, 2, 3]
g = [10, 9, 8, 7]
s = [5, 6, 7, 8]
print(Solution().findContentChildren(g, s)) | pi408637535/Algorithm | com/study/algorithm/daily/455. Assign Cookies.py | 455. Assign Cookies.py | py | 608 | python | en | code | 1 | github-code | 36 |
25759812026 | #!/usr/bin/env python
import os
import json
from twitter import Api
# Custom import
from datetime import datetime
from datetime import date
import time
import re
import sys
def loadConfig(config_secret):
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
global CONSUMER_KEY
global CONSUMER_SECRET
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
global ACCESS_TOKEN
global ACCESS_TOKEN_SECRET
with open(config_secret, 'r') as cred:
json_str = cred.read()
json_data = json.loads(json_str)
CONSUMER_KEY = json_data['consumer_key']
CONSUMER_SECRET = json_data['consumer_secret']
ACCESS_TOKEN = json_data['access_token']
ACCESS_TOKEN_SECRET = json_data['access_token_secret']
# Users to watch for should be a list. This will be joined by Twitter and the
# data returned will be for any tweet mentioning:
# @twitter *OR* @twitterapi *OR* @support.
#USERS = ['@twitter', '@twitterapi', '@support']
LOCATIONS = ['-6.38','49.87','1.77','55.81']
UK = ['-5.95459','49.979488','-0.109863','58.12432'] # United Kingdom
US = ['-123.960279', '33.080519', '-60.996094', '45.336702'] # US
AU = ['105.785815', '-44.513723', '154.301442', '-12.449423'] # Australia
NZ = ['164.772949', '-47.15984', '179.626465', '-33.94336'] # New Zealand
SEA = ['90.825760', '-11.836210', '153.766943', '21.217420'] # South East Asian
AF = ['-25.195408', '-35.880958', '32.812407', '31.960635'] # African
COUNTRIES = ['UK', 'US', 'AU', 'NZ', 'SEA', 'AF']
DAY_CYCLE = 2
def getLocation(country_code):
if country_code == 'UK':
return UK, 0
elif country_code == 'US':
return US, 1
elif country_code == 'AU':
return AU, 2
elif country_code == 'NZ':
return NZ, 3
elif country_code == 'SEA':
return SEA, 4
elif country_code == 'AF':
return AF, 5
else:
return UK, 0
def write_to_file(filename, text, append=True):
if append:
mode = 'a'
else:
mode = 'w'
with open(filename, mode) as fw:
fw.write(str(text) + '\n')
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
pass
def normalize_tweet_text(tweet_text):
# Normalize text
## Remove comma, linefeed, and tab
tweet_text = re.sub('[,\n\t]', ' ', tweet_text)
## Remove http link from tweet_text
tweet_text = re.sub('http?([-a-zA-Z0-9@:%_\+.~#?&//=])*', ' ', tweet_text)
## Remove multiple spaces
tweet_text = re.sub(' +',' ',tweet_text)
## Encode special character to utf-8 format, because ASCII is sucks (can't support wide range of characters)
tweet_text = tweet_text.encode('utf-8','ignore')
tweet_text = str(tweet_text)
return tweet_text
def extract_line(directory, today, line):
line = line.strip()
line = line.replace('\n', '\\n')
if line == '':
return
line = json.loads(line, strict=False)
try:
try:
lang = line['lang'] # String
# English only
if lang != 'en':
return
except:
pass
# Extract line information
try:
geo = line['geo'] # String
except Exception as ex:
#print('Geo Exception %s' % ex)
return
#geo = line['geo'] # Object
timestamp_ms = line['timestamp_ms'] # Long Integer
user = line['user'] # Object
#entities = line['entities'] # Object
tweet_id = line['id'] # Integer
tweet_text = line['text'] # String
retweet_count = line['retweet_count']
place = line['place']
ccode = 'NA'
cname = 'default'
if place is not None:
ccode = place['country_code']
cname = place['country']
# Extract user information
user_id = user['id'] # Integer
utc_offset = user['utc_offset'] # Integer
if utc_offset is None:
utc_offset = ''
else :
utc_offset = str(utc_offset).strip()
#friends_count = user['friends_count'] # Integer
#followers_count = user['followers_count'] # Integer
#statuses_count = user['statuses_count'] # Integer
# Extract entities information
#hashtags = entities['hashtags'] # Array of String
#user_mentions = entities['user_mentions'] # Dictionary
# Extract user_mentions information
#for user_mention in user_mentions:
# mentioned_id = user_mention['id']
#print(str(mentioned_id)+'\n')
# Print for testing
#print(str(geo))
#print(str(timestamp_ms))
#print(str(user_id))
#print(str(entities))
#print(str(tweet_id))
# For each geotagged tweets
if geo is not None:
#print(str(geo))
try:
coordinates = geo['coordinates'] # Array of Float
gps = []
for var in coordinates:
gps.append(str(var))
except Exception as ex:
print('Coordinate Exception {}'.format(ex))
return
#print(gps[0])
#print(gps[1])
# Normalize text
tweet_text = normalize_tweet_text(tweet_text)
# Write all logs
f_summary = 'summary_{0}_{1}.csv'.format(ccode, cname)
csv_output = '{0},{1},{2},{3},{4},{5},{6}'.format(tweet_id, user_id, timestamp_ms, gps[0], gps[1], tweet_text, utc_offset)
if csv_output != '':
write_to_file(directory + f_summary, csv_output)
#time.sleep(1)
except Exception as ex:
f_error = '{0}/error_{1}.txt'.format(directory, today)
make_sure_path_exists(directory)
with open(f_error, 'a') as fw:
fw.write('[{0}] Extract Exception {1}\n'.format(str(datetime.now()),ex))
fw.write('[{0}] {1}\n'.format(str(datetime.now()),line))
##########################
# Main function
##########################
def main():
arglen = len(sys.argv)
USING_TWITTER = False
if arglen == 3:
directory = sys.argv[1]
country_code = sys.argv[2]
LOCATIONS, selected = getLocation(country_code)
USING_TWITTER = True
elif arglen == 2:
directory = sys.argv[1]
else :
print('Please give two inputs: directory name and country code {US, UK, AU, NZ, SEA, AF}')
return
if directory != '':
directory = directory + '/'
if USING_TWITTER:
loadConfig('config_secret.json')
# Since we're going to be using a streaming endpoint, there is no need to worry
# about rate limits.
api = Api(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# api.GetStreamFilter will return a generator that yields one status
# message (i.e., Tweet) at a time as a JSON dictionary.
try:
today = date.today()
if USING_TWITTER:
count_day = 0
counter = 0
count_thousands = 0
print(country_code)
print(today)
str_out = ''
while(True):
for line in api.GetStreamFilter(locations=LOCATIONS):
# warning: "limit"
try:
if date.today() != today :
# Change day
today = date.today()
try:
print('[{0}] Processed {1:,} tweets'.format(str(datetime.now()), count_thousands*1000 + counter))
print('--- End of the day ---')
except:
pass
counter = 0
count_thousands = 0
count_day += 1
print(today)
# Write remaining data into file
if str_out != '':
write_to_file(f_complete, str_out)
str_out = ''
if count_day == DAY_CYCLE:
count_day = 0
# Change the countries
selected = (selected + 1 ) % len(COUNTRIES)
country_code = COUNTRIES[selected]
LOCATIONS, selected = getLocation(country_code)
print(country_code)
break
# Write json to file
f_complete = '{0}/logs/log_{1}_{2}.txt'.format(directory, country_code, today)
#print json.dumps(line)
str_out = '{0}{1}\n'.format(str_out, json.dumps(line))
# Counter
counter = counter + 1
if counter % 25 == 0:
if str_out != '':
write_to_file(f_complete, str_out)
str_out = ''
if counter % 1000 == 0 and counter > 0:
counter = 0
count_thousands = count_thousands + 1
print('[{0}] Processed {1},000 tweets'.format(str(datetime.now()),count_thousands))
except Exception as ex:
f_error = '{0}/logs/error_{1}.txt'.format(directory, str(today))
with open(f_error, 'a') as fw:
fw.write('[{0}] Line Exception {1}\n'.format(str(datetime.now()),ex))
fw.write('[{0}] {1}\n'.format(str(datetime.now()),line))
else:
# Loop through os files
# and create similar filename but using csv
# Extract json and write into csv file
for subdir, dirs, files in os.walk(directory):
for file in files:
if file.startswith('log'):
print('[{0}] Processing file : {1}'.format(str(datetime.now()), file))
with open(directory + file, 'r') as fin:
for line in fin:
try:
extract_line(directory, today, line)
except:
pass
pass
print('Program finished ')
except Exception as ex:
f_error = '{0}/logs/error_{1}.txt'.format(directory, str(today))
make_sure_path_exists(directory + '/logs')
write_to_file(f_error, '[{0}] Outer Exception {1}\n'.format(str(datetime.now()),ex))
##########################
# End of Main
##########################
if __name__ == '__main__':
main() | gunarto90/twitter-stream | stream.py | stream.py | py | 11,136 | python | en | code | 1 | github-code | 36 |
19115581972 | async def is_member(user, guild):
if not (isinstance(user, str) or isinstance(user, int)):
return await guild.fetch_member(int(user.id))
return await guild.fetch_member(int(user))
# raise TypeError("User must by specyfied by str or int (id)")
TERMINAL_COLORS = {
"H": "\033[95m", # header
"BL": "\033[94m", # blue
"C": "\033[96m", # cyan
"G": "\033[92m", # green
"W": "\033[93m", # warning
"F": "\033[91m", # fail
"E": "\033[0m", # end
"B": "\033[1m", # bold
"U": "\033[4m", # underline
}
def log(*args):
if not isinstance(args, tuple):
args = tuple(args)
for type, special_string in args:
if isinstance(type, list):
type = "".join(
[TERMINAL_COLORS.get(type_color.upper()) for type_color in type]
)
else:
type = TERMINAL_COLORS.get(type.upper())
print(
f"{type}{special_string}{TERMINAL_COLORS.get('E')}",
end=" ",
)
print("\n")
| cnuebred/pyelectron | src/utils.py | utils.py | py | 1,027 | python | en | code | 1 | github-code | 36 |
40751652782 | import random, math
def generator(
name="problem-n",
cities = 2,
smallAirplanes = 1,
mediumAirplanes = 0,
largeAirplanes = 0,
trains = 1,
railwayFactor = 0.5,
smallTrucksPerCity = 1,
mediumTrucksPerCity = 0,
largeTrucksPerCity = 0,
officesPerCity=1,
smallPackages=10,
mediumPackages=5,
largePackages=10,
goalFactor = 0.5
):
# ==================
# Check input values
# ==================
if goalFactor < 0:
raise InputError("Factor must be larger or equal than zero")
elif goalFactor > 1:
raise InputError("Factor must be less or equal than one")
# ================
# Generate objects
# ================
Cities = []
Trainstations = []
Airports = []
Trucks = { "small": [], "medium": [], "large": [] }
Offices = []
Airplanes = { "small": [], "medium": [], "large": [] }
Trains = []
Railways = {}
Packages = { "small": [], "medium": [], "large": [] }
# Cities
for i in xrange(0,cities):
Cities.append("city-{}".format(i))
Trainstations.append("trainstation-{}".format(i))
Airports.append("airport-{}".format(i))
# Trcuks
a = []
for j in xrange(0,smallTrucksPerCity):
a.append("small-truck-{}-{}".format(i,j))
Trucks['small'].append(a)
a = []
for j in xrange(0,mediumTrucksPerCity):
a.append("medium-truck-{}-{}".format(i,j))
Trucks['medium'].append(a)
a = []
for j in xrange(0,largeTrucksPerCity):
a.append("large-truck-{}-{}".format(i,j))
Trucks['large'].append(a)
# Offices
a = []
for j in xrange(0,officesPerCity):
a.append("office-{}-{}".format(i,j))
Offices.append(a)
# Airplanes
for i in xrange(0,smallAirplanes):
Airplanes["small"].append("small-airplane-{}".format(i))
for i in xrange(0,mediumAirplanes):
Airplanes["medium"].append("medium-airplane-{}".format(i))
for i in xrange(0,largeAirplanes):
Airplanes["large"].append("large-airplane-{}".format(i))
# Trains
for i in xrange(0,trains):
Trains.append("train-{}".format(i))
# Packages
for i in xrange(0,smallPackages):
Packages["small"].append("small-package-{}".format(i))
for i in xrange(0,mediumPackages):
Packages["medium"].append("medium-package-{}".format(i))
for i in xrange(0,largePackages):
Packages["large"].append("large-package-{}".format(i))
# Railways
size = int(math.ceil(cities*railwayFactor))
for i in xrange(0,size):
for j in xrange(0,size):
Railways["Railway-{}-{}".format(i,size-j)] = [i,size-j]
output = "(define (problem {})\n".format(name)
output += " (:domain logistics)\n"
output += " (:objects\n"
output += " "
# =======================
# Creates all the objects
# =======================
row = 1
newRow = 5
# Cities
for i in xrange(0,cities):
output += "{} ".format(Cities[i])
row = row + 1
if row%newRow == 0:
output += "\n "
# Trucks
for j in xrange(0,smallTrucksPerCity):
output += "{} ".format(Trucks["small"][i][j])
row = row + 1
if row%newRow == 0:
output += "\n "
for j in xrange(0,mediumTrucksPerCity):
output += "{} ".format(Trucks["medium"][i][j])
row = row + 1
if row%newRow == 0:
output += "\n "
for j in xrange(0,largeTrucksPerCity):
output += "{} ".format(Trucks["large"][i][j])
row = row + 1
if row%newRow == 0:
output += "\n "
# Offices
for j in xrange(0,officesPerCity):
output += "{} ".format(Offices[i][j])
row = row + 1
if row%newRow == 0:
output += "\n "
# Airplanes
for i in xrange(0,smallAirplanes):
output += "{} ".format(Airplanes["small"][i])
row = row + 1
if row%newRow == 0:
output += "\n "
for i in xrange(0,mediumAirplanes):
output += "{} ".format(Airplanes["medium"][i])
row = row + 1
if row%newRow == 0:
output += "\n "
for i in xrange(0,largeAirplanes):
output += "{} ".format(Airplanes["large"][i])
row = row + 1
if row%newRow == 0:
output += "\n "
# Trains
for i in xrange(0,trains):
output += "{} ".format(Trains[i])
row = row + 1
if row%newRow == 0:
output += "\n "
# Airports
for i in xrange(0,cities):
output += "{} ".format(Airports[i])
row = row + 1
if row%newRow == 0:
output += "\n "
# Train stations
for i in xrange(0,cities):
output += "{} ".format(Trainstations[i])
row = row + 1
if row%newRow == 0:
output += "\n "
# Packages
for i in xrange(0,smallPackages):
output += "{} ".format(Packages["small"][i])
row = row + 1
if row%newRow == 0:
output += "\n "
for i in xrange(0,mediumPackages):
output += "{} ".format(Packages["medium"][i])
row = row + 1
if row%newRow == 0:
output += "\n "
for i in xrange(0,largePackages):
if i == largePackages:
output += "{}".format(Packages["large"][i])
else:
output += "{} ".format(Packages["large"][i])
row = row + 1
if row%newRow == 0:
output += "\n "
output += "\n"
output += " )\n"
output += " (:init\n"
# ======================
# Initialize all objects
# ======================
row = 0
# Cities
for i in xrange(0,cities):
output += " (city {}) ".format(Cities[i])
output += "\n"
# Trucks
for j in xrange(0,smallTrucksPerCity):
output += " (truck {0}) (small-vehicle {0}) (at {0} {1})\n".format(Trucks["small"][i][j], Offices[i][random.randint(0,len(Offices[i])-1)])
for j in xrange(0,mediumTrucksPerCity):
output += " (truck {0}) (medium-vehicle {0}) (at {0} {1})\n".format(Trucks["medium"][i][j], Offices[i][random.randint(0,len(Offices[i])-1)])
for j in xrange(0,largeTrucksPerCity):
output += " (truck {0}) (large-vehicle {0}) (at {0} {1})\n".format(Trucks["large"][i][j], Offices[i][random.randint(0,len(Offices[i])-1)])
# Offices
for j in xrange(0,officesPerCity):
output += " (location {0}) (loc {0} {1})\n".format(Offices[i][j],Cities[i])
output += "\n"
row = 0
# Airports
for i in xrange(0,cities):
output += " (airport {0}) (location {0}) (loc {0} {1})\n".format(Airports[i],Cities[i])
# Airports
for i in xrange(0,cities):
output += " (trainstation {0}) (location {0}) (loc {0} {1})\n".format(Trainstations[i],Cities[i])
# Airplanes
for i in xrange(0,smallAirplanes):
output += " (airplane {0}) (small-vehicle {0}) (at {0} {1})\n".format(Airplanes["small"][i],random.choice(Airports))
if smallAirplanes != 0:
output += "\n"
for i in xrange(0,mediumAirplanes):
output += " (airplane {0}) (medium-vehicle {0}) (at {0} {1})\n".format(Airplanes["medium"][i],random.choice(Airports))
if mediumAirplanes != 0:
output += "\n"
for i in xrange(0,largeAirplanes):
output += " (airplane {0}) (large-vehicle {0}) (at {0} {1})\n".format(Airplanes["large"][i],random.choice(Airports))
if largeAirplanes != 0:
output += "\n"
# Trains
for i in xrange(0,trains):
output += " (train {0}) (large-vehicle {0}) (at {0} trainstation-{1})\n".format(Trains[i],i)
if trains != 0:
output += "\n"
# Railways
for key in Railways:
output += " (railway {0} {1}) (railway {0} {1})\n".format(Trainstations[Railways[key][0]],Trainstations[Railways[key][1]])
if len(Railways) != 0:
output += "\n"
# Packages
for i in xrange(0,smallPackages):
output += " (small-object {0}) (at {0} {1})\n".format(Packages["small"][i],random.choice(random.choice(Offices)))
if smallPackages != 0:
output += "\n"
for i in xrange(0,mediumPackages):
output += " (medium-object {0}) (at {0} {1})\n".format(Packages["medium"][i],random.choice(random.choice(Offices)))
if mediumPackages != 0:
output += "\n"
for i in xrange(0,largePackages):
output += " (large-object {0}) (at {0} {1})\n".format(Packages["large"][i],random.choice(random.choice(Offices)))
if largePackages != 0:
output += "\n"
output += " )\n"
# ===========
# Create goal
# ===========
output += " (:goal\n"
output += " (and\n"
shuffledSmallPackages = random.sample(Packages["small"],int(math.ceil(smallPackages*goalFactor)))
for i in xrange(0,len(shuffledSmallPackages)):
output += " (at {} {})\n".format(shuffledSmallPackages[i],random.choice(random.choice(Offices)))
shuffledMediumPackages = random.sample(Packages["medium"],int(math.ceil(mediumPackages*goalFactor)))
for i in xrange(0,len(shuffledMediumPackages)):
output += " (at {} {})\n".format(shuffledMediumPackages[i],random.choice(random.choice(Offices)))
shuffledLargePackages = random.sample(Packages["large"],int(math.ceil(largePackages*goalFactor)))
for i in xrange(0,len(shuffledLargePackages)):
output += " (at {} {})\n".format(shuffledLargePackages[i],random.choice(random.choice(Offices)))
output += " )\n"
output += " )\n"
output += ")"
return output
def main():
# name="problem-n",
# cities = 2,
# smallAirplanes = 1,
# mediumAirplanes = 0,
# largeAirplanes = 0,
# trains = 1,
# railwayFactor = 0.5,
# smallTrucksPerCity = 1,
# mediumTrucksPerCity = 0,
# largeTrucksPerCity = 0,
# officesPerCity=1,
# smallPackages=10,
# mediumPackages=5,
# largePackages=10,
# goalFactor = 0.5
for c in [2,3,4,5,6,7,8,9,10,12,14,16,18,20,25,30,35]:
name = "city-problem-1-office-{}".format(c)
prob = generator(
name=name,
cities=c,
smallAirplanes=int(c*0.2),
mediumAirplanes=int(c*0.5),
largeAirplanes=int(c*0.3),
trains=int(c*0.3),
largeTrucksPerCity=1,
officesPerCity=1,
smallPackages=int(c*0.5),
mediumPackages=int(c*0.4),
largePackages=int(c*0.1),
railwayFactor = 0.5
)
with open("generated-problems/cities/{}.pddl".format(name), 'w') as f:
f.write(prob)
if __name__ == "__main__":
main()
| owodunni-lfischerstrom/tddc17-lab4 | generator.py | generator.py | py | 10,199 | python | en | code | 0 | github-code | 36 |
4292641099 | from django.contrib.auth.models import User
from django.test import TestCase
from note.forms import NoteAddForm, NoteEditForm
from note.models import Note
class NoteFormsTestCase(TestCase):
def setUp(self):
# Arrange
self.user = User.objects.create_user(username='test_user', password='test_pass')
self.note_data = {
'title': 'Test Note',
'content': 'Test content',
}
def test_note_add_form_valid(self):
# Act
form = NoteAddForm(data=self.note_data)
# Assert
self.assertTrue(form.is_valid())
def test_note_add_form_invalid(self):
# Act
form_data = self.note_data.copy()
form_data['title'] = ''
form = NoteAddForm(data=form_data)
# Assert
self.assertFalse(form.is_valid())
def test_note_add_form_save(self):
# Act
form = NoteAddForm(data=self.note_data)
# Assert
self.assertTrue(form.is_valid())
# Act
note = form.save(commit=False)
note.author = self.user
note.save()
# Assert: the note is saved correctly
self.assertEqual(Note.objects.count(), 1)
saved_note = Note.objects.first()
self.assertEqual(saved_note.title, self.note_data['title'])
self.assertEqual(saved_note.content, self.note_data['content'])
self.assertEqual(saved_note.author, self.user)
def test_note_edit_form_valid(self):
# Act
note = Note.objects.create(title='Initial Title', content='Initial content', author=self.user)
form_data = {
'title': 'Updated Title',
'content': 'Updated content',
}
form = NoteEditForm(data=form_data, instance=note)
# Assert
self.assertTrue(form.is_valid())
def test_note_edit_form_invalid(self):
# Act
note = Note.objects.create(title='Initial Title', content='Initial content', author=self.user)
form_data = {
'title': '', # Empty title
'content': 'Updated content',
}
form = NoteEditForm(data=form_data, instance=note)
# Assert
self.assertFalse(form.is_valid())
def test_note_edit_form_save(self):
# Act
note = Note.objects.create(title='Initial Title', content='Initial content', author=self.user)
form_data = {
'title': 'Updated Title',
'content': 'Updated content',
}
form = NoteEditForm(data=form_data, instance=note)
# Assert
self.assertTrue(form.is_valid())
# Act
updated_note = form.save()
# Assert: note is updated correctly
self.assertEqual(updated_note.title, form_data['title'])
self.assertEqual(updated_note.content, form_data['content'])
| mehdirahman88/django_notes | note/tests/test_forms.py | test_forms.py | py | 2,820 | python | en | code | 0 | github-code | 36 |
8711592711 | import cx_Oracle
class modulo():
codigoSeccion=0
ramo1=""
ramo2=""
ramo3=""
ramo4=""
def __init__(self,codSec) :
self.codigoSeccion=codSec
def crearModulo():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
codigoSeccion=input("Indique codigo de seccion a crear: ")
ramo1=int(input("Indique ramo 1: "))
ramo2=int(input("Indique ramo 2: "))
ramo3=int(input("Indique ramo 3: "))
ramo4=int(input("Indique ramo 4: "))
cursor.execute(''' insert into secciones (codigosSeccion,ramo1,ramo2,ramo3,ramo4)
values (:cs,:r1,:r2,:r3,:r4)''',cs=codigoSeccion,r1=ramo1,r2=ramo2,r3=ramo3,r4=ramo4)
conexion.commit()
print ("Seccion creada con exito!! ")
except:
print ("Error al crear modulo!!")
finally:
cursor.close()
conexion.close()
def editarModulo():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
codigoSeccion=input("Indique codigo de seccion a editar: ")
ramo1=int(input("Indique nuevo ramo 1: "))
ramo2=int(input("Indique nuevo ramo 2: "))
ramo3=int(input("Indique nuevo ramo 3: "))
ramo4=int(input("Indique nuevo ramo 4: "))
cursor.execute(''' update secciones set ramo1=:r1, ramo2=:r2,ramo3=:r3,ramo4=:r4
where codigosSeccion=:cod''',cod=codigoSeccion,r1=ramo1,r2=ramo2,r3=ramo3,r4=ramo4)
conexion.commit()
print ("Modulo editado correctamente!!")
except:
print ("Error al editar modulo!!")
finally:
cursor.close()
conexion.close()
def eliminarModulo():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
idS=input("Indique seccion que desea eliminar: ")
cursor.execute(''' delete from secciones where codigosSeccion=:id ''',id=idS)
conexion.commit()
print ("Modulo eliminado correctamente!! ")
except:
print ("Error al eliminar modulo!!")
finally:
cursor.close()
conexion.close()
def mostrarModulos():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
cursor.execute(''' select * from secciones ''')
res=cursor.fetchall()
for row in res:
print("\n|Sección:",row[0], "|Ramos:", row[1],"-",row[2],"-",row[3],"-",row[4])
except:
print ("Error al mostrar modulos!! ")
finally:
cursor.close()
conexion.close() | nmolina2733/Universidad | modulo.py | modulo.py | py | 3,357 | python | es | code | 0 | github-code | 36 |
2180953342 | from flask import Flask, jsonify, request
import datetime
import fetchNavigationData
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route('/api', methods=['GET'])
def index():
first = request.args.get('first', '')
second = request.args.get('second', '')
json1 = fetchNavigationData.fetch_station_list(first, second)
json2 = fetchNavigationData.fetch_station_list(second, first)
json2.reverse()
d = {}
for index in range(len(json1)):
if index < len(json1) and index < len(json2):
time1 = datetime.datetime.strptime(json2[index]["time"], '%H:%M')
time2 = datetime.datetime.strptime(json1[index]["time"], '%H:%M')
diff = time2 - time1
diff = int(diff.total_seconds())
if diff < 0:
diff = -1 * diff
d[index] = diff
for k, v in sorted(d.items(), key=lambda x:x[1]):
result = json1[k]["name"]
if(len(json2) < len(json1)):
result = json2[k]["name"]
json2.reverse()
return jsonify({
'result': result,
'way': [json1, json2]
})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| 5ym/smaen | back/module/app.py | app.py | py | 1,208 | python | en | code | 0 | github-code | 36 |
932987117 | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Piece
@receiver(post_save, sender=Piece)
def save_base64_thumbnail(**kwargs):
update_fields = kwargs["update_fields"]
# Without this, the signal will be called in an infinite loop.
if update_fields is not None and "image_b64_thumbnail" in update_fields:
return
piece = kwargs["instance"]
b64thumb = piece.generate_base64_data_thumbnail()
piece.image_b64_thumbnail = b64thumb
piece.save(update_fields=["image_b64_thumbnail"])
| ChrisCrossCrash/chriskumm.com_django | art/signals.py | signals.py | py | 569 | python | en | code | 0 | github-code | 36 |
28512545517 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
# Utility classes that can be used to generate parse tree patterns. These
# utilities take a sample expression or statement, and return a parse tree that
# uses symbolic names for the nodes. You'll need to then do additional editing on
# the parse tree as needed (for example, replacing a specific value with a pattern).
import parser
from symbol import sym_name
from token import tok_name
from pprint import pprint
# pretty-prints a symbolic parse tree for expr (as for use with 'eval')
# the symbolic names will be strings, so to use this as a constant
# in some code you'll need to replace the quotes with nothing
# (except for the actual string constants ...)
def print_eval_tree(expr):
t = parser.ast2tuple(parser.expr(expr))
# t = parser.ast2tuple(parser.suite(expr))
pprint(integer2symbolic(t))
# same as print_eval_tree, except as for use with 'exec' (for definitions, statements, etc)
def print_exec_tree(expr):
t = parser.ast2tuple(parser.suite(expr))
pprint(integer2symbolic(t))
# take a parse tree represented as a tuple, and return a new tuple
# where the integers representing internal nodes and terminal nodes are
# replaced with symbolic names
def integer2symbolic(fragment):
head = fragment[0]
if head in sym_name:
rest = tuple(map(integer2symbolic, fragment[1:]))
return ('symbol.' + sym_name[head], ) + rest
if head in tok_name:
return ('token.' + tok_name[head], ) + fragment[1:]
raise ValueError("bad value in parsetree")
# examples of use:
# print_eval_tree("urbansim.gridcell.population**2")
# print_exec_tree("x = urbansim.gridcell.population**2")
s = """def foo(x=5):
y = x+3
return y*2
"""
print_exec_tree(s)
| psrc/urbansim | opus_core/variables/utils/parse_tree_pattern_generator.py | parse_tree_pattern_generator.py | py | 1,935 | python | en | code | 4 | github-code | 36 |
13422218557 | import colors
##################################################################
#This is the module used for testing correctness. It performs #
#safety, liveliness and fairness test on the list of values sent #
#from the monitor. #
##################################################################
def testCorrectness(rows,istoken):
color = colors.bcolors()
if safetyTest(rows,istoken) == True:
print (color.OKGREEN+'Safety Test Passed!!!')
else:
print (color.FAIL+'Safety Test Failed!!!')
if livelinessTest(rows) == True:
print (color.OKGREEN + 'Liveliness Test Passed!!!')
else:
print (color.WARNING + 'Liveliness Test Failed!!!')
if fairnessTest(rows) == True:
print (color.OKGREEN + 'Fairness Test Passed!!!')
else:
print (color.WARNING + 'Fairness Test Failed!!!')
print (color.ENDC)
def safetyTest(rows,istoken):
""" This function tests the safety property of the algorithm.
It does that in 2 steps: 1) CSSafe Test, 2) ReleaseSyncTest
1) CSSafe Test: This test ensures that at any time 'T' only one process uses CS.
2) ReleaseSync Test: This test ensures that only the process which executed CS, is releasing a resource.
"""
csTest = isCSSafe(rows)
if istoken:
return csTest
releaseTest = isReleaseSync(rows)
return csTest and releaseTest
def isCSSafe(rows):
processesInCS = {}
flag = True
for row in rows:
if (row[2]!='None'):
if row[0] not in processesInCS:
processesInCS[row[0]] = row[2]
else:
print ('!!!!!!!!!!!!!!!!'+str(row[2]) + ' and ' + str(processesInCS[row[0]]) + 'are in the CS at the same time T=' + str(row[0]))
flag = False
print ("Is CS safe: " + str(flag))
return flag
def isReleaseSync(rows):
currentlyInCS = 'None';
for row in rows:
if row[2] != 'None':
if currentlyInCS == 'None':
currentlyInCS = row[2]
else:
return False
if row[3] != 'None':
if row[3] == currentlyInCS:
currentlyInCS = 'None'
else:
return False
print ("Release is sync")
return True
def livelinessTest(rows):
""" This function checks if every process that requests for CS, eventually gets served"""
firstEntry = True
requestCount = 0
processInCS = 'None'
release = False
for row in rows:
if row[1] != 'None':
if firstEntry or processInCS != 'None':
requestCount += 1
elif release:
pass
else:
print ("Process " + str(row[1]) + " is unneccessarily waiting for CS at time " + str(row[0]))
return False
if row[2] != 'None':
firstEntry = False
processInCS = row[2]
if row[3] == processInCS:
processInCS = 'None'
release = True
continue
if (processInCS == 'None' and requestCount != 0 and not firstEntry):
return False
release = False
return True
def fairnessTest(rows):
""" This function tests if the processes are being served in a fair way. The one who is waiting for long time
must be given priority over others(FIFO)"""
queue = []
flag = True
color = colors.bcolors()
for row in rows:
if row[1] != 'None':
queue.append(row[1])
continue
if row[2] != 'None' and queue[0] != row[2]:
print (color.WARNING + "Process " + str(row[2]) + "jumped ahead of the queue. Fairness violated at " + str(row[0]) )
return False
elif row[2] != 'None':
queue.remove(row[2])
return True
| NishanthMuruganandam/AsynchronousSystems | Correctness_Verif_Performance_Measure_DistAlgos/correctnessTester.py | correctnessTester.py | py | 3,322 | python | en | code | 0 | github-code | 36 |
36570276493 | import datetime
import urllib
import urllib.parse
from mpcomp import http_core
try:
import simplejson
from simplejson.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = None
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
__author__ = "j.s@google.com (Jeff Scudder)"
PROGRAMMATIC_AUTH_LABEL = "GoogleLogin auth="
AUTHSUB_AUTH_LABEL = "AuthSub token="
OAUTH2_AUTH_LABEL = "Bearer "
# This dict provides the AuthSub and OAuth scopes for all services by service
# name. The service name (key) is used in ClientLogin requests.
AUTH_SCOPES = {
"cl": ( # Google Calendar API
"https://www.google.com/calendar/feeds/",
"http://www.google.com/calendar/feeds/",
),
"gbase": ( # Google Base API
"http://base.google.com/base/feeds/",
"http://www.google.com/base/feeds/",
),
"blogger": ("http://www.blogger.com/feeds/",), # Blogger API
"codesearch": ( # Google Code Search API
"http://www.google.com/codesearch/feeds/",
),
"cp": ( # Contacts API
"https://www.google.com/m8/feeds/",
"http://www.google.com/m8/feeds/",
),
"finance": ("http://finance.google.com/finance/feeds/",), # Google Finance API
"health": ("https://www.google.com/health/feeds/",), # Google Health API
"writely": ( # Documents List API
"https://docs.google.com/feeds/",
"https://spreadsheets.google.com/feeds/",
"https://docs.googleusercontent.com/",
),
"lh2": ("http://picasaweb.google.com/data/",), # Picasa Web Albums API
"apps": ( # Google Apps Domain Info & Management APIs
"https://apps-apis.google.com/a/feeds/user/",
"https://apps-apis.google.com/a/feeds/policies/",
"https://apps-apis.google.com/a/feeds/alias/",
"https://apps-apis.google.com/a/feeds/groups/",
"https://apps-apis.google.com/a/feeds/compliance/audit/",
"https://apps-apis.google.com/a/feeds/migration/",
"https://apps-apis.google.com/a/feeds/emailsettings/2.0/",
),
"weaver": ("https://www.google.com/h9/feeds/",), # Health H9 Sandbox
"wise": ("https://spreadsheets.google.com/feeds/",), # Spreadsheets Data API
"sitemaps": ( # Google Webmaster Tools API
"https://www.google.com/webmasters/tools/feeds/",
),
"youtube": ( # YouTube API
"http://gdata.youtube.com/feeds/api/",
"http://uploads.gdata.youtube.com/feeds/api",
"http://gdata.youtube.com/action/GetUploadToken",
),
"books": ("http://www.google.com/books/feeds/",), # Google Books API
"analytics": ("https://www.google.com/analytics/feeds/",), # Google Analytics API
"jotspot": ( # Google Sites API
"http://sites.google.com/feeds/",
"https://sites.google.com/feeds/",
),
# "local": ("http://maps.google.com/maps/feeds/",), # Google Maps Data API
"code": ("http://code.google.com/feeds/issues",), # Project Hosting Data API
}
class Error(Exception):
pass
class UnsupportedTokenType(Error):
"""Raised when token to or from blob is unable to convert the token."""
pass
class OAuth2AccessTokenError(Error):
"""Raised when an OAuth2 error occurs."""
def __init__(self, error_message):
self.error_message = error_message
class OAuth2RevokeError(Error):
"""Raised when an OAuth2 token revocation was unsuccessful."""
def __init__(self, http_response, response_body=None):
"""Sets the HTTP information in the error.
Args:
http_response: The response from the server, contains error information.
response_body: string (optional) specified if the response has already
been read from the http_response object.
"""
body = response_body or http_response.read()
self.status = http_response.status
self.reason = http_response.reason
self.body = body
self.headers = http_core.get_headers(http_response)
self.error_msg = "Invalid response %s." % self.status
try:
json_from_body = simplejson.loads(body)
if isinstance(json_from_body, dict):
self.error_msg = json_from_body.get("error", self.error_msg)
except (ValueError, JSONDecodeError):
pass
def __str__(self):
return "OAuth2RevokeError(status=%i, error=%s)" % (self.status, self.error_msg)
REQUEST_TOKEN = 1
AUTHORIZED_REQUEST_TOKEN = 2
ACCESS_TOKEN = 3
class OAuth2Token(object):
"""Token object for OAuth 2.0 as described on
<http://code.google.com/apis/accounts/docs/OAuth2.html>.
Token can be applied to a gdata.client.GDClient object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token.
This class supports 3 flows of OAuth 2.0:
Client-side web flow: call generate_authorize_url with `response_type='token''
and the registered `redirect_uri'.
Server-side web flow: call generate_authorize_url with the registered
`redirect_url'.
Native applications flow: call generate_authorize_url as it is. You will have
to ask the user to go to the generated url and pass in the authorization
code to your application.
"""
def __init__(
self,
client_id,
client_secret,
scope,
user_agent,
auth_uri="https://accounts.google.com/o/oauth2/auth",
token_uri="https://accounts.google.com/o/oauth2/token",
access_token=None,
refresh_token=None,
revoke_uri="https://accounts.google.com/o/oauth2/revoke",
):
"""Create an instance of OAuth2Token
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string, scope of the credentials being requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
access_token: string, access token.
refresh_token: string, refresh token.
"""
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.revoke_uri = revoke_uri
self.access_token = access_token
self.refresh_token = refresh_token
# True if the credentials have been revoked or expired and can't be
# refreshed.
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def _refresh(self, request):
"""Refresh the access_token using the refresh_token.
Args:
request: The atom.http_core.HttpRequest which contains all of the
information needed to send a request to the remote server.
"""
body = urllib.parse.urlencode(
{
"grant_type": "refresh_token",
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
)
headers = {
"user-agent": self.user_agent,
}
http_request = http_core.HttpRequest(
uri=self.token_uri, method="POST", headers=headers
)
http_request.add_body_part(body, mime_type="application/x-www-form-urlencoded")
response = request(http_request)
body = response.read()
if response.status == 200:
self._extract_tokens(body)
else:
self._invalid = True
return response
def _extract_tokens(self, body):
d = simplejson.loads(body)
self.access_token = d["access_token"]
self.refresh_token = d.get("refresh_token", self.refresh_token)
if "expires_in" in d:
self.token_expiry = (
datetime.timedelta(seconds=int(d["expires_in"]))
+ datetime.datetime.now()
)
else:
self.token_expiry = None
def authorize(self, client):
"""Authorize a gdata.client.GDClient instance with these credentials.
Args:
client: An instance of gdata.client.GDClient
or something that acts like it.
Returns:
A modified instance of client that was passed in.
Example:
>>> c = gdata.client.GDClient(source='user-agent')
>>> c = token.authorize(c)
"""
client.auth_token = self
request_orig = client.http_client.request
def new_request(http_request):
response = request_orig(http_request)
if response.status == 401:
refresh_response = self._refresh(request_orig)
if self._invalid:
return refresh_response
self.modify_request(http_request)
return request_orig(http_request)
return response
client.http_client.request = new_request
return client
def modify_request(self, http_request):
"""Sets the Authorization header in the HTTP request using the token.
Returns:
The same HTTP request object which was passed in.
"""
http_request.headers["Authorization"] = "%s%s" % (
OAUTH2_AUTH_LABEL,
self.access_token,
)
return http_request
ModifyRequest = modify_request
def _make_credentials_property(name):
"""Helper method which generates properties.
Used to access and set values on credentials property as if they were native
attributes on the current object.
Args:
name: A string corresponding to the attribute being accessed on the
credentials attribute of the object which will own the property.
Returns:
An instance of `property` which is a proxy for the `name` attribute on the
credentials attribute of the object.
"""
def get_credentials_value(self):
return getattr(self.credentials, name)
def set_credentials_value(self, value):
setattr(self.credentials, name, value)
return property(get_credentials_value, set_credentials_value)
| MicroPyramid/opensource-job-portal | mpcomp/gauth.py | gauth.py | py | 10,945 | python | en | code | 336 | github-code | 36 |
17134241020 | from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
spark.conf.set('spark.sql.parquet.compression.codec', 'snappy')
spark.conf.set('hive.exec.dynamic.partition.mode', 'nonstrict')
spark.conf.set('spark.streaming.stopGracefullyOnShutdown', 'true')
spark.conf.set('hive.exec.max.dynamic.partitions', '3000')
spark.conf.set('hive.support.concurrency', 'true')
from pyspark.sql import functions as f
from pyspark.sql import types as t
# variables globales
class Modelacion_02_feat():
def __init__(self):
self.str1='First Class'
def export_table(self,TRAIN_POB_CAP,VAR_MES):
# Lectura en el server
datos_contacto = spark.read.table("cd_baz_bdclientes.cd_cte_datos_contacto_master") \
.select(
f.col('id_master'),
f.col('lentidad'),
f.col('genero'),
f.col('fecha_nacimiento'),
f.col('cposta').alias('cod_postal')) \
.withColumn('entidad', f.when(f.trim(f.col('lentidad')).isin('VERACRUZ', 'VERACRUZ DE IGNACIO DE LA LLAVE'), 'VERACRUZ') \
.otherwise(f.trim(f.col('lentidad')))) \
.drop(f.col('lentidad'))
recorrido = spark.read.table("cd_baz_bdclientes.cd_con_cte_recorrido") \
.select(
f.col('id_master'),
f.col('num_periodo_mes').alias('per_ref'),
f.col('cod_perfil_trx'),
f.col('saldo'),
f.col('potencial'),
f.col('recorrido')) \
.filter(f.col('per_ref') == str(VAR_MES)) \
.orderBy(f.col('id_master'))
# Secuencia de extracion de tablas
TT_train_feat_ren_ind = self.feat_cap(recorrido,datos_contacto,VAR_MES,TRAIN_POB_CAP)
respond = TT_train_feat_ren_ind
return respond
# Paso 1: Extraccion de informacion para el modelo de potenciales
def feat_cap(self,recorrido,datos_contacto,VAR_MES,TRAIN_POB_CAP):
_sdm = \
datos_contacto.alias('A').withColumn('genero', f.when(f.trim(f.col('genero')).isin('N', 'E'), 'X') \
.otherwise(f.col('genero'))) \
.withColumn('var_mes', f.to_date(f.lit(str(VAR_MES)+'01'), 'yyyyMMdd')) \
.withColumn('edad', f.round(f.months_between(f.col('var_mes'), f.col('fecha_nacimiento')) / 12, 0).cast(t.IntegerType())) \
.select(
f.col('id_master'),
f.col('edad'),
f.col('var_mes'),
f.col('genero'),
f.col('cod_postal'),
f.col('entidad')) \
.orderBy('id_master')
TT_train_feat_ren_ind = \
TRAIN_POB_CAP.alias('A').join(_sdm.alias('B'), f.col('A.id_master') == f.col('B.id_master'), 'left') \
.join(recorrido.alias('D'), f.col('A.id_master') == f.col('D.id_master'), 'left') \
.select(
f.col('A.id_master'),
f.col('A.per_ref'),
f.col('A.mto_ing_mes'),
f.coalesce(f.col('B.genero'), f.lit('VACIO')).alias('genero'),
f.coalesce(f.col('B.edad'), f.lit(0)).alias('edad'), # mayor a 18
f.coalesce(f.col('B.entidad'), f.lit('VACIO')).alias('entidad'),
f.coalesce(f.col('B.cod_postal'), f.lit(0)).alias('cod_postal'),
f.coalesce(f.col('D.saldo'), f.lit(0)).alias('saldo'),
f.coalesce(f.col('D.potencial'), f.lit(0)).alias('potencial'),
f.coalesce(f.col('D.recorrido'), f.lit(0)).alias('recorrido')) \
.orderBy('id_master')
del datos_contacto
del recorrido
del _sdm
return TT_train_feat_ren_ind
| ConMota/app_renta_indirecta_GS | Class_02_feat.py | Class_02_feat.py | py | 3,937 | python | es | code | 0 | github-code | 36 |
39090962511 | import socket
import struct
import textwrap
import sys
INTERFACE_NAME = 'enp0s3'
def format_multi_line(string, size=80):
if isinstance(string, bytes):
string = ''.join(r'\x{:02x}'.format(byte) for byte in string)
if size % 2:
size -= 1
return '\n'.join([line for line in textwrap.wrap(string, size)])
def get_mac_addr(raw_mac_addr):
byte_str = map('{:02x}'.format, raw_mac_addr)
mac_addr = ':'.join(byte_str).upper()
return mac_addr
def destruct_ethernet_header(raw_data):
dest, src, prototype = struct.unpack('! 6s 6s H', raw_data[:14])
dest_mac = get_mac_addr(dest)
src_mac = get_mac_addr(src)
data = raw_data[14:]
return dest_mac, src_mac, prototype, data
def destruct_ipv4_header(raw_data):
first_byte = raw_data[0]
version = first_byte >> 4
ihl = (first_byte & 0b1111) * 4
ttl, proto, src, target = struct.unpack('! 8x B B 2x 4s 4s', raw_data[:20])
src = get_ip(src)
target = get_ip(target)
data = raw_data[ihl:]
return first_byte, version, ihl, ttl, proto, src, target, data
def destruct_tcp_header(raw_data):
(src_port, dest_port, sequence, acknowledgment, offset_reserved_flags) = struct.unpack(
'! H H L L H', raw_data[:14])
offset = (offset_reserved_flags >> 12) * 4
flag_urg = (offset_reserved_flags & 0b100000) >> 5
flag_ack = (offset_reserved_flags & 0b10000) >> 4
flag_psh = (offset_reserved_flags & 0b1000) >> 3
flag_rst = (offset_reserved_flags & 0b100) >> 2
flag_syn = (offset_reserved_flags & 0b10) >> 1
flag_fin = offset_reserved_flags & 1
data = raw_data[offset:]
return src_port, dest_port, sequence, acknowledgment, flag_urg, flag_ack, flag_psh, flag_rst, flag_syn, flag_fin, data
def destruct_udp_header(raw_data):
src_port, dest_port, size = struct.unpack('! H H 2x H', raw_data[:8])
data = raw_data[8:]
return src_port, dest_port, size, data
def destruct_icmp_header(raw_data):
packet_type, code, checksum = struct.unpack('! B B H', raw_data[:4])
data = raw_data[4:]
return packet_type, code, checksum, data
def destruct_arp_header(raw_data):
hardware_type, protocol_type, hardware_size, protocol_size, opcode, src_mac, src_ip, dest_mac, dest_ip = struct.unpack('! H H B B H 6s 4s 6s 4s', raw_data[:28])
src_mac = get_mac_addr(src_mac)
src_ip = get_ip(src_ip)
dest_mac = get_mac_addr(dest_mac)
dest_ip = get_ip(dest_ip)
data = raw_data[28:]
return hardware_type, protocol_type, hardware_size, protocol_size, opcode, src_mac, src_ip, dest_mac, dest_ip, data
def decode_http(raw_data):
try:
data = raw_data.decode('utf-8')
except:
data = raw_data
return data
def get_ip(addr):
return '.'.join(map(str, addr))
def main():
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.ntohs(3))
try:
s.bind((INTERFACE_NAME, 0))
except:
print('Device interface not found')
sys.exit()
while True:
raw_data, addr = s.recvfrom(65535)
print('=================================')
eth = destruct_ethernet_header(raw_data)
print('Ethernet frame:')
print('Destination Mac: {}, Source Mac: {}, EtherType: {}'.format(eth[0], eth[1], eth[2]))
print('---------------------------------')
if eth[2] == 0x0800:
ipv4 = destruct_ipv4_header(eth[3])
print('IPv4 header:')
print('TTL: {}'.format(ipv4[1], ipv4[2], ipv4[3]))
print('Source IP: {}, Target IP: {}, Protocol: {}'.format(ipv4[5], ipv4[6], ipv4[4]))
print('---------------------------------')
# TCP
if ipv4[4] == 6:
tcp = destruct_tcp_header(ipv4[7])
print('TCP:')
print('Source port: {}, Destination port: {}'.format(tcp[0], tcp[1]))
print('Flags:')
print('URG: {}, ACK: {}, PSH: {}'.format(tcp[4], tcp[5], tcp[6]))
print('RST: {}, SYN: {}, FIN: {}'.format(tcp[7], tcp[8], tcp[9]))
print('---------------------------------')
if len(tcp[10]) > 0:
# HTTP
if tcp[0] == 80 or tcp[1] == 80:
print('HTTP data:')
try:
http = decode_http(tcp[10])
http_info = str(http[10]).split('\n')
for line in http_info:
print('' + str(line))
except:
print(format_multi_line(tcp[10]))
else:
print('TCP Data:')
print(format_multi_line(tcp[10]))
# ICMP
elif ipv4[4] == 1:
icmp = destruct_icmp_header(ipv4[7])
print('ICMP:')
print('Type: {}, Code: {}, Checksum: {},'.format(icmp[0], icmp[1], icmp[2]))
print('---------------------------------')
print('ICMP data:')
print(format_multi_line(icmp[3]))
# UDP
elif ipv4[4] == 17:
udp = destruct_udp_header(ipv4[7])
print('UDP:')
print('Source Port: {}, Destination Port: {}, Length: {}'.format(udp[0], udp[1], udp[2]))
# Other IPv4
else:
print('Other IPv4 data:')
print(format_multi_line(ipv4[7]))
# ARP
elif eth[2] == 0x0806:
arp = destruct_arp_header(eth[3])
print('ARP:')
print('Hardware type: {}, Protocol type: {}'.format(arp[0], arp[1]))
print('Hardware size: {}, Protocol size: {}'.format(arp[2], arp[3]))
print('Opcode: {}'.format(arp[4]))
print('Source Mac: {}, Source IP: {}'.format(arp[5], arp[6]))
print('Dest Mac: {}, Dest IP: {}'.format(arp[7], arp[8]))
print('---------------------------------')
else:
print('Ethernet data:')
print(format_multi_line(eth[3]))
print('=================================')
main() | frederon/packet-sniffer | sniffer.py | sniffer.py | py | 6,225 | python | en | code | 0 | github-code | 36 |
33040671101 | import io
from typing import List, Set, Tuple
from clvm import KEYWORD_FROM_ATOM, KEYWORD_TO_ATOM, SExp
from clvm import run_program as default_run_program
from clvm.casts import int_from_bytes
from clvm.EvalError import EvalError
from clvm.operators import OP_REWRITE, OPERATOR_LOOKUP
from clvm.serialize import sexp_from_stream, sexp_to_stream
from clvm_rs import STRICT_MODE, deserialize_and_run_program2, serialized_length
from clvm_tools.curry import curry, uncurry
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.hash import std_hash
from .tree_hash import sha256_treehash
def run_program(
program,
args,
max_cost,
operator_lookup=OPERATOR_LOOKUP,
pre_eval_f=None,
):
return default_run_program(
program,
args,
operator_lookup,
max_cost,
pre_eval_f=pre_eval_f,
)
INFINITE_COST = 0x7FFFFFFFFFFFFFFF
class Program(SExp):
"""
A thin wrapper around s-expression data intended to be invoked with "eval".
"""
@classmethod
def parse(cls, f) -> "Program":
return sexp_from_stream(f, cls.to)
def stream(self, f):
sexp_to_stream(self, f)
@classmethod
def from_bytes(cls, blob: bytes) -> "Program":
f = io.BytesIO(blob)
result = cls.parse(f) # type: ignore # noqa
assert f.read() == b""
return result
def to_serialized_program(self) -> "SerializedProgram":
return SerializedProgram.from_bytes(bytes(self))
def __bytes__(self) -> bytes:
f = io.BytesIO()
self.stream(f) # type: ignore # noqa
return f.getvalue()
def __str__(self) -> str:
return bytes(self).hex()
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
return sha256_treehash(self, set(args))
def run_with_cost(self, max_cost: int, args) -> Tuple[int, "Program"]:
prog_args = Program.to(args)
cost, r = run_program(self, prog_args, max_cost)
return cost, Program.to(r)
def run(self, args) -> "Program":
cost, r = self.run_with_cost(INFINITE_COST, args)
return r
def curry(self, *args) -> "Program":
cost, r = curry(self, list(args))
return Program.to(r)
def uncurry(self) -> Tuple["Program", "Program"]:
r = uncurry(self)
if r is None:
return self, self.to(0)
return r
def as_int(self) -> int:
return int_from_bytes(self.as_atom())
def as_atom_list(self) -> List[bytes]:
"""
Pretend `self` is a list of atoms. Return the corresponding
python list of atoms.
At each step, we always assume a node to be an atom or a pair.
If the assumption is wrong, we exit early. This way we never fail
and always return SOMETHING.
"""
items = []
obj = self
while True:
pair = obj.pair
if pair is None:
break
atom = pair[0].atom
if atom is None:
break
items.append(atom)
obj = pair[1]
return items
def __deepcopy__(self, memo):
return type(self).from_bytes(bytes(self))
EvalError = EvalError
def _tree_hash(node: SExp, precalculated: Set[bytes32]) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if node.listp():
left = _tree_hash(node.first(), precalculated)
right = _tree_hash(node.rest(), precalculated)
s = b"\2" + left + right
else:
atom = node.as_atom()
if atom in precalculated:
return bytes32(atom)
s = b"\1" + atom
return bytes32(std_hash(s))
def _serialize(node) -> bytes:
if type(node) == SerializedProgram:
return bytes(node)
else:
return SExp.to(node).as_bin()
class SerializedProgram:
"""
An opaque representation of a clvm program. It has a more limited interface than a full SExp
"""
_buf: bytes = b""
@classmethod
def parse(cls, f) -> "SerializedProgram":
length = serialized_length(f.getvalue()[f.tell() :])
return SerializedProgram.from_bytes(f.read(length))
def stream(self, f):
f.write(self._buf)
@classmethod
def from_bytes(cls, blob: bytes) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(blob)
return ret
@classmethod
def from_program(cls, p: Program) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(p)
return ret
def to_program(self) -> Program:
return Program.from_bytes(self._buf)
def uncurry(self) -> Tuple["Program", "Program"]:
return self.to_program().uncurry()
def __bytes__(self) -> bytes:
return self._buf
def __str__(self) -> str:
return bytes(self).hex()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
def __eq__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return False
return self._buf == other._buf
def __ne__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return True
return self._buf != other._buf
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
tmp = sexp_from_stream(io.BytesIO(self._buf), SExp.to)
return _tree_hash(tmp, set(args))
def run_safe_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, STRICT_MODE, *args)
def run_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, 0, *args)
def _run(self, max_cost: int, flags, *args) -> Tuple[int, Program]:
# when multiple arguments are passed, concatenate them into a serialized
# buffer. Some arguments may already be in serialized form (e.g.
# SerializedProgram) so we don't want to de-serialize those just to
# serialize them back again. This is handled by _serialize()
serialized_args = b""
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
# TODO: move this ugly magic into `clvm` "dialects"
native_opcode_names_by_opcode = dict(
("op_%s" % OP_REWRITE.get(k, k), op) for op, k in KEYWORD_FROM_ATOM.items() if k not in "qa."
)
cost, ret = deserialize_and_run_program2(
self._buf,
serialized_args,
KEYWORD_TO_ATOM["q"][0],
KEYWORD_TO_ATOM["a"][0],
native_opcode_names_by_opcode,
max_cost,
flags,
)
return cost, Program.to(ret)
NIL = Program.from_bytes(b"\x80")
| snight1983/chia-rosechain | chia/types/blockchain_format/program.py | program.py | py | 7,273 | python | en | code | 369 | github-code | 36 |
34347438133 | """Module for quad element with 4 nodes - type 3 in gmsh
"""
from diffuspy.element import Element
import numpy as np
class Quad4(Element):
"""Constructor of a 4-node quadrangle (TYPE 3) element
"""
def __init__(self, eid, model, material):
super().__init__(eid, model)
# Nodal coordinates in the natural domain (isoparametric coordinates)
self.XEZ = np.array([[-1.0, -1.0],
[1.0, -1.0],
[1.0, 1.0],
[-1.0, 1.0]])
# check if conductivity was assigned
try:
self.λ = material.λ[self.surf]
except AttributeError:
print('Conductivity (λ) not defined!')
except KeyError:
print('Surface ', self.surf,
' with no conductivity (λ) assigned!')
# check if capacitance material properties were assigned
# if not, just pass because it maybe not a transient analysis
try:
self.ρ = material.ρ[self.surf]
self.c = material.c[self.surf]
except:
pass
# check if its a boundary element
if eid in model.bound_ele[:, 0]:
# index where bound_ele refers to this element
index = np.where(model.bound_ele[:, 0] == eid)[0]
# side of the element at the boundary
self.side_at_boundary = model.bound_ele[index, 1]
# boundary line where the element side share interface
self.at_boundary_line = model.bound_ele[index, 2]
else:
self.side_at_boundary = []
self.at_boundary_line = []
def shape_function(self, xez):
"""Create the basis function and evaluate them at xez coordinates
Args:
xez (array): position in the isoparametric coordinate xi, eta, zeta
Return:
N (array): shape functions
"""
# variables in the natural (iso-parametric) domain
e1 = xez[0]
e2 = xez[1]
# Terms of the shape function
e1_term = 0.5*(1.0 + self.XEZ[:, 0] * e1)
e2_term = 0.5*(1.0 + self.XEZ[:, 1] * e2)
# Basis functions
# N = [ N_1 N_2 N_3 N_4 ]
N = e1_term*e2_term
self.N = np.array(N)
# Derivative of the shape functions
# dN = [ dN1_e1 dN2_e1 ...
# dN1_e2 dN2_e2 ... ]
self.dN_ei = np.zeros((2, 4))
self.dN_ei[0, :] = 0.5 * self.XEZ[:, 0] * e2_term
self.dN_ei[1, :] = 0.5 * self.XEZ[:, 1] * e1_term
return self.N, self.dN_ei
@staticmethod
def mapping(N, xyz):
"""maps from cartesian to isoparametric.
"""
x1, x2 = N @ xyz
return x1, x2
def jacobian(self, xyz, dN_ei):
"""Creates the Jacobian matrix of the mapping between an element
Args:
xyz (array of floats): coordinates of element nodes in cartesian
coordinates
dN_ei (array of floats): derivative of shape functions
Return:
det_jac (float): determinant of the jacobian matrix
dN_xi (array of floats): derivative of shape function
with respect to cartesian system
arch_length (array of floats): arch length for change of variable
in the line integral
"""
# Jac = [ x1_e1 x2_e1
# x1_e2 x2_e2 ]
Jac = dN_ei @ xyz
det_jac = abs((Jac[0, 0]*Jac[1, 1] -
Jac[0, 1]*Jac[1, 0]))
# jac_inv = [ e1_x1 e2_x1
# e1_x2 e2_x2 ]
jac_inv = np.linalg.inv(Jac)
# Using Chain rule,
# N_xi = N_eI * eI_xi (2x8 array)
dN_xi = np.zeros((2, 4))
dN_xi[0, :] = (dN_ei[0, :]*jac_inv[0, 0] +
dN_ei[1, :]*jac_inv[0, 1])
dN_xi[1, :] = (dN_ei[0, :]*jac_inv[1, 0] +
dN_ei[1, :]*jac_inv[1, 1])
# Length of the transofmation arch
# Jacobian for line integral-2.
arch_length = np.array([
(Jac[0, 0]**2 + Jac[0, 1]**2)**(1/2),
(Jac[1, 0]**2 + Jac[1, 1]**2)**(1/2),
(Jac[0, 0]**2 + Jac[0, 1]**2)**(1/2),
(Jac[1, 0]**2 + Jac[1, 1]**2)**(1/2)
])
return det_jac, dN_xi, arch_length
def heat_stiffness_matrix(self, t=1):
"""Build the element heat (q) stiffness (k) matrix
"""
k_q = np.zeros((4, 4))
gauss_points = self.XEZ / np.sqrt(3.0)
for gp in gauss_points:
N, dN_ei = self.shape_function(xez=gp)
dJ, dN_xi, _ = self.jacobian(self.xyz, dN_ei)
B = dN_xi
# Check if condutivity is a function
if callable(self.λ) is True:
x1, x2 = self.mapping(N, self.xyz)
λ = self.λ(x1, x2, t)
else:
λ = self.λ
k_q += λ * (B.T @ B) * dJ
return k_q
def heat_capacitance_matrix(self, t=1):
"""Build element matrix (k) due internal thermal energy storage (s)
"""
k_s = np.zeros((4, 4))
gauss_points = self.XEZ / np.sqrt(3.0)
for gp in gauss_points:
N, dN_ei = self.shape_function(xez=gp)
dJ, dN_xi, _ = self.jacobian(self.xyz, dN_ei)
# check if attribute and surface were assigned correctly
try:
# Check if specific heat is a function
if callable(self.c) is True:
x1, x2 = self.mapping(N, self.xyz)
c = self.c(x1, x2, t)
else:
c = self.c
except AttributeError:
print('Specific heat (c) not defined')
except KeyError:
print('Surface ', self.surf,
' with no specific heat (c) assigned!')
try:
# Check if density is a function
if callable(self.ρ) is True:
x1, x2 = self.mapping(N, self.xyz)
ρ = self.ρ(x1, x2, t)
else:
ρ = self.ρ
except AttributeError:
print('Density (ρ) not defined')
except KeyError:
print('Surface ', self.surf,
' with no density (ρ) assigned!')
k_s += c*ρ*(np.atleast_2d(N).T @ np.atleast_2d(N))*dJ
return k_s
def heat_convection_matrix(self, h, t=1):
"""Build the element matrix (k) due convection boundary (c)
"""
k_c = np.zeros((4, 4))
gp = np.array([
[[-1.0/np.sqrt(3), -1.0],
[1.0/np.sqrt(3), -1.0]],
[[1.0, -1.0/np.sqrt(3)],
[1.0, 1.0/np.sqrt(3)]],
[[-1.0/np.sqrt(3), 1.0],
[1.0/np.sqrt(3), 1.0]],
[[-1.0, -1.0/np.sqrt(3)],
[-1.0, 1/np.sqrt(3)]]])
# check if there is convection
if h is not None:
# loop for specified boundary conditions
for key in h(1, 1).keys():
line = key
# loop over each boundary line that intersects the element
# sides
for ele_boundary_line, ele_side in zip(self.at_boundary_line,
self.side_at_boundary):
# Check if this element is at the line with convection bc
if line == ele_boundary_line:
# solve the integral with GQ
for w in range(2):
N, dN_ei = self.shape_function(xez=gp[ele_side, w])
_, _, arch_length = self.jacobian(self.xyz, dN_ei)
# check if condutance is a function
if callable(h) is True:
x1, x2 = self.mapping(N, self.xyz)
h_v = h(x1, x2, t)[line]
else:
h_v = h[line]
dL = arch_length[ele_side]
k_c += h_v * (
np.atleast_2d(N).T @ np.atleast_2d(N)) * dL
else:
# Catch element that is not at boundary
continue
return k_c
def heat_source_vector(self, σ_q=None, t=1, T_ip=1, dα=1, tol=1e-5):
"""Build the element vector due internal heat (q) source (σ)
Args:
T_ip: Nodal temperature form previous iteration i
"""
gauss_points = self.XEZ / np.sqrt(3.0)
pq = np.zeros(4)
# find the average temperature of element
# check if heat source is nonlinear in T
if np.size(T_ip) > 1:
T_avg = np.average(T_ip[self.conn])
for gp in gauss_points:
N, dN_ei = self.shape_function(xez=gp)
dJ, dN_xi, _ = self.jacobian(self.xyz, dN_ei)
x1, x2 = self.mapping(N, self.xyz)
if σ_q is not None:
if 'Reaction Degree' in σ_q.__defaults__:
pq[:] += N[:] * σ_q(x1, x2, t=t, dα=dα) * dJ
elif 'Temperature' in σ_q.__defaults__:
pq[:] += N[:] * σ_q(x1, x2, t=t, T=T_avg) * dJ
else:
pq[:] += N[:] * σ_q(x1, x2, t=t) * dJ
return pq
def heat_boundary_flux_vector(self, q_bc, t=1):
"""Build element load vector due q_bc boundary condition
"""
gp = np.array([
[[-1.0/np.sqrt(3), -1.0],
[1.0/np.sqrt(3), -1.0]],
[[1.0, -1.0/np.sqrt(3)],
[1.0, 1.0/np.sqrt(3)]],
[[-1.0/np.sqrt(3), 1.0],
[1.0/np.sqrt(3), 1.0]],
[[-1.0, -1.0/np.sqrt(3)],
[-1.0, 1/np.sqrt(3)]]])
p_t = np.zeros(4)
if q_bc is not None:
# loop for specified boundary conditions
for key in q_bc(1, 1).keys():
line = key
for ele_boundary_line, ele_side in zip(self.at_boundary_line,
self.side_at_boundary):
# Check if this element is at the line with traction
if line == ele_boundary_line:
# solve the integral with GQ
for w in range(2):
N, dN_ei = self.shape_function(xez=gp[ele_side, w])
_, _, arch_length = self.jacobian(self.xyz, dN_ei)
dL = arch_length[ele_side]
x1, x2 = self.mapping(N, self.xyz)
p_t[:] += N[:] * q_bc(x1, x2, t)[line] * dL
else:
# Catch element that is not at boundary
continue
return p_t
def heat_boundary_convection_vector(self, T_a, h, t=1):
"""Build the element heat vector due convection bc
"""
gp = np.array([
[[-1.0/np.sqrt(3), -1.0],
[1.0/np.sqrt(3), -1.0]],
[[1.0, -1.0/np.sqrt(3)],
[1.0, 1.0/np.sqrt(3)]],
[[-1.0/np.sqrt(3), 1.0],
[1.0/np.sqrt(3), 1.0]],
[[-1.0, -1.0/np.sqrt(3)],
[-1.0, 1/np.sqrt(3)]]])
p_c = np.zeros(4)
# Try compute the vector due convection
if h is not None:
# loop for specified boundary condition line
for key in h(1, 1).keys():
line = key
for ele_boundary_line, ele_side in zip(self.at_boundary_line,
self.side_at_boundary):
# Check if this element is at the line with traction
if line == ele_boundary_line:
# solve the integral with GQ
for w in range(2):
N, dN_ei = self.shape_function(xez=gp[ele_side, w])
_, _, arch_length = self.jacobian(self.xyz, dN_ei)
dL = arch_length[ele_side]
# check if condutance is a function
if callable(h) is True:
x1, x2 = self.mapping(N, self.xyz)
h_v = h(x1, x2, t)[line]
else:
h_v = h[line]
# check if the surrounded fluid temperature is
# a function
if callable(T_a) is True:
x1, x2 = self.mapping(N, self.xyz)
T_a_v = T_a(x1, x2, t)[line]
else:
T_a_v = T_a[line]
p_c[:] += N[:] * h_v * T_a_v * dL
else:
# Catch element that is not at boundary
continue
return p_c
| nasseralkmim/diffuspy | diffuspy/elements/quad4.py | quad4.py | py | 13,182 | python | en | code | 5 | github-code | 36 |
19358330260 | import subprocess
import numpy as np
# Tamaño de las matrices
n = 8
# Crear matrices aleatorias en Python entre 1 y 5 (con decimales)
A = np.random.randint(1, 6, size=(n, n))
B = np.random.randint(1, 6, size=(n, n))
# girar la matriz 90 grados a la derecha
# Ejecutar el programa en C
# Asegúrate de que este sea el nombre correcto del ejecutable
program_name = "./bin"
num_threads = 8
# Construct the command to run the C program with arguments
command = [program_name, str(num_threads), str(n)]
process = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, text=True)
# Construct the input data as you were doing
input_data = "\n".join(" ".join(str(val) for val in row) for row in A) + \
"\n" + "\n".join(" ".join(str(val) for val in row) for row in B)
stdout, stderr = process.communicate(input=input_data)
# Verificar que no haya habido errores en la ejecución
if process.returncode != 0:
print("Error al ejecutar el programa en C:")
print(stderr)
else:
# Obtener la matriz resultante del programa en C
C = np.array([[float(val) for val in row.split()]
for row in stdout.strip().split('\n')])
# Calcular la multiplicación de matrices en Python
expected_result = np.dot(A, B)
print("Matriz A:")
print(A)
print("Matriz B:")
print(B)
print("Matriz C:")
print(C)
print("Matriz esperada:")
print(expected_result)
# Verificar si las matrices son iguales dentro de una pequeña tolerancia
tolerance = 1e-6
if np.allclose(C, expected_result, rtol=tolerance, atol=tolerance):
print("La multiplicación de matrices es correcta.")
else:
print("La multiplicación de matrices es incorrecta.")
| nivalderramas/paralela | matrixMult/matrixComprobator.py | matrixComprobator.py | py | 1,808 | python | es | code | 0 | github-code | 36 |
12371109221 | n = int(input())
array = []
for i in range(n):
array.append(int(input()))
def merge_sort(array):
def sort(low, high):
if high - low < 2:
return
mid = (low + high) // 2
sort(low, mid)
sort(mid, high)
merge(low, mid, high)
def merge(low, mid, high):
arr = []
l, h = low, mid
while l < mid and h < high:
if array[l] < array[h]:
arr.append(array[l])
l += 1
else:
arr.append(array[h])
h += 1
while l < mid:
arr.append(array[l])
l += 1
while h < high:
arr.append(array[h])
h += 1
for i in range(low, high):
array[i] = arr[i-low]
return sort(0, len(array))
merge_sort(array)
for i in range(n):
print(array[i]) | hwangstone1/Algorithm_repository | Algorithm_sorting/exercise_7.py | exercise_7.py | py | 875 | python | en | code | 0 | github-code | 36 |
10513613017 | from django.test import SimpleTestCase
from website.forms import CreateUserForm, SignUpForm, FeedbackForm, PatientForm, DocumentationP, EventForm, MessageForm, RequestForm
from website.models import Patient, SignUp, Feedback, Documentation, Event, Messages, Requests
class TestForms(SimpleTestCase):
def test_create_user_form(self):
form = CreateUserForm(data={
'model': ['Patient'],
'fields': ['dina', 'balua']
})
def test_sign_up_form(self):
form = SignUpForm(data={
'model': ['SignUp'],
'fields': ['lior', 'inbar', 16, 'man', 'dinab@gmail', +972855555555, 'Canada', 'write']
})
def test_feedback_form(self):
form = FeedbackForm(data={
'model': ['Feedback'],
'fields': ['dina', 'balua', 'message']
})
def test_patient_form(self):
form = PatientForm(data={
'model': ['Patient'],
'fields': ['dan']
})
def test_documentation_form(self):
form = DocumentationP(data={
'model': ['Documentation'],
'fields': ['inbar', 'balua', 'message', 'meeting', 'diagnosis']
})
def test_event_form(self):
form = EventForm(data={
'model': ['Event'],
'fields': ['avihai', 27/7/92]
})
def test_message_form(self):
form = MessageForm(data={
'model': ['Messages'],
'fields': ['vika', 18/3/98]
})
def test_request_form(self):
form = RequestForm(data={
'model': ['Requests'],
'fields': ['lior', 27/10/1994]
})
| liorco15/HealthTourism | test_forms.py | test_forms.py | py | 1,646 | python | en | code | 0 | github-code | 36 |
33989498564 | from django.contrib.auth.models import User
from django.shortcuts import render
from profile_app.models import UserProfileInfo
from video_app.models import Video
from comment_app.models import Comment
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def write_comment(request, video_id):
if request.method == 'POST':
user = request.user
userprofileinfo = UserProfileInfo.objects.get(user=user)
text = request.POST.get('text')
video = Video.objects.get(video_id=video_id)
comment = Comment(text=text, userprofileinfo = userprofileinfo, video=video)
comment.save()
response_data = {
'result': 'success',
'id': comment.id,
'text': comment.text,
'userprofileinfo': comment.userprofileinfo.user.username,
'date': comment.date,
'video': comment.video.title,
}
return JsonResponse(response_data)
else:
error = {'error': 'Non POST method not allowed'}
return JsonResponse(error)
| NathanA15/music-video | music_project/comment_app/views.py | views.py | py | 1,008 | python | en | code | 0 | github-code | 36 |
18760528871 | import numpy as np
import itertools
import cv2
def draw_epipolar_lines(img_left, img_right):
height = np.shape(img_left)[0]
divisions = 40.0
colors = [(255,0,0), (0,0,255), (0,255,0), (255,255,0), (255,255,255), (0,255,255)]
color_generator = itertools.cycle(colors)
step = int(np.floor(height/divisions))
stop = int(divisions*step)
img = np.hstack([img_left, img_right])
for col in range(0,stop-1, step):
img[col, :, :] = next(color_generator)
return img
def rectify_images(left_img, right_img, left_K, right_K, transl_RL_R, rot_RL, crop_parameter):
left_img_size = left_img.shape[0:2][::-1]
right_img_size = right_img.shape[0:2][::-1]
distCoeffs = None
R1,R2,P1,P2,Q,_,_ = cv2.stereoRectify(left_K, distCoeffs, right_K, distCoeffs, left_img_size, rot_RL, transl_RL_R, alpha=crop_parameter)
left_maps = cv2.initUndistortRectifyMap(left_K, distCoeffs, R1, P1, left_img_size, cv2.CV_16SC2)
right_maps = cv2.initUndistortRectifyMap(right_K, distCoeffs, R2, P2, right_img_size, cv2.CV_16SC2)
left_img_remap = cv2.remap(left_img, left_maps[0], left_maps[1], cv2.INTER_LANCZOS4)
right_img_remap = cv2.remap(right_img, right_maps[0], right_maps[1], cv2.INTER_LANCZOS4)
return left_img_remap, right_img_remap
def filter_images(bright_img, no_light_img, treshold=0):
mask = (bright_img<(no_light_img+treshold))
filtered_img = bright_img
filtered_img[mask] = 0
return filtered_img
def nothing(x):
pass
def stereo_SGBM_tuner(img1, img2):
win_name = 'window'
cv2.namedWindow(win_name)
cv2.createTrackbar("disparity_min", win_name, 20, 10, nothing)
cv2.createTrackbar("disparity_num", win_name, 20,50, nothing)
win_size = 5
min_disp = -1
max_disp = 63
num_disp = max_disp - min_disp
uniqueness_ratio = 5
block_size = 5
while(1):
min_disp = cv2.getTrackbarPos("disparity_min", win_name) * 16
num_disp = cv2.getTrackbarPos("disparity_num", win_name) * 16
print(num_disp)
assert(num_disp % 16 is 0)
stereo_SGBM = cv2.StereoSGBM_create(min_disp, num_disp, block_size)
disp = stereo_SGBM.compute(img2, img1)
cv2.imshow(win_name,disp)
k = cv2.waitKey(1) & 0xFF
if k == 27:
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
if __name__ == '__main__':
pass
| olaals/multivision-depr | multivision/oa_stereo_utils.py | oa_stereo_utils.py | py | 2,439 | python | en | code | 0 | github-code | 36 |
38043839602 | import xlrd #读取excel
import xlwt #写入excel
from datetime import date,datetime
def read_excel(name):
#打开文件
workbook = xlrd.open_workbook('../data/' + name + '.xlsx')
#获取所有sheet
# print(workbook.sheet_names()) #只有一张表
sheet_name = workbook.sheet_names()[0]
#根据sheet索引或者名称获取sheet内容
sheet = workbook.sheet_by_index(0) #sheet索引从0开始
# sheets = workbook.sheet_by_name('Sheet1')
# sheet的名称,行数,列数
# print(sheet.name,sheet.nrows,sheet.ncols)
#获取整行, 整列的值(数组)
# rows = sheet.row_values(1) #获取第二行的内容
f = open('../data/' + name + '.csv','w+')
string = ''
for k in range(sheet.nrows):
rows = sheet.row_values(k)
# print(rows)
for i in range(sheet.ncols):
if i == 0:
if k == 0:
string = str(rows[i])
else:
string = str(int(rows[i]))
else:
if k == 0:
string += ',' + str(rows[i])
else:
string += ',' + str(int(rows[i]))
print(string, file = f)
string = ''
# cols = sheet.col_values(2) #获取第三列的内容
# print('rows:',rows)
# print('cols:',cols)
#获取单元格内容
# print(sheet.cell(0,0).value)
# print(sheet.cell(0,0).value.encode('utf-8'))
#获取单元格内容的数据类型
# print(sheet.cell(1,0).ctype)
if __name__ == "__main__":
roads = ['airport','lihua','zhenning','jianshe4','jianshe3','jianshe2','jianshe1']
for i in range(len(roads)):
read_excel(roads[i]) | MrLeedom/TSC_RL | CSP/preprocess/third.py | third.py | py | 1,744 | python | en | code | 7 | github-code | 36 |
74470473064 | """
Project Tasks that can be invoked using using the program "invoke" or "inv"
"""
import os
from invoke import task
# disable the check for unused-arguments to ignore unused ctx parameter in tasks
# pylint: disable=unused-argument
IS_WINDOWS = os.name == "nt"
if IS_WINDOWS:
# setting 'shell' is a work around for issue #345 of invoke
RUN_ARGS = {"pty": False, "shell": r"C:\Windows\System32\cmd.exe"}
else:
RUN_ARGS = {"pty": True}
def get_files():
"""
Get the files to run analysis on
"""
files = [
"dploy",
"tests",
"tasks.py",
]
files_string = " ".join(files)
return files_string
@task
def setup(ctx):
"""
Install python requirements
"""
ctx.run("python -m pip install -r requirements.txt", **RUN_ARGS)
@task
def clean(ctx):
"""
Clean repository using git
"""
ctx.run("git clean --interactive", **RUN_ARGS)
@task
def lint(ctx):
"""
Run pylint on this module
"""
cmds = ["pylint --output-format=parseable", "flake8"]
base_cmd = "python -m {cmd} {files}"
for cmd in cmds:
ctx.run(base_cmd.format(cmd=cmd, files=get_files()), **RUN_ARGS)
@task
def reformat_check(ctx):
"""
Run formatting check
"""
cmd = "black --check"
base_cmd = "python -m {cmd} {files}"
ctx.run(base_cmd.format(cmd=cmd, files=get_files()), **RUN_ARGS)
@task
def reformat(ctx):
"""
Run formatting
"""
cmd = "black"
base_cmd = "python -m {cmd} {files}"
ctx.run(base_cmd.format(cmd=cmd, files=get_files()), **RUN_ARGS)
@task
def metrics(ctx):
"""
Run radon code metrics on this module
"""
cmd = "radon {metric} --min B {files}"
metrics_to_run = ["cc", "mi"]
for metric in metrics_to_run:
ctx.run(cmd.format(metric=metric, files=get_files()), **RUN_ARGS)
@task()
def test(ctx):
"""
Test Task
"""
# Use py.test instead of the recommended pytest so it works on Python 3.3
cmd = "py.test --cov-report term-missing --cov=dploy --color=no"
ctx.run(cmd, **RUN_ARGS)
# pylint: disable=redefined-builtin
@task(test, lint, reformat_check)
def all(default=True):
"""
All tasks minus
"""
@task(clean)
def build(ctx):
"""
Task to build an executable using pyinstaller
"""
cmd = "pyinstaller -n dploy --onefile " + os.path.join("dploy", "__main__.py")
ctx.run(cmd, **RUN_ARGS)
| arecarn/dploy | tasks.py | tasks.py | py | 2,421 | python | en | code | 68 | github-code | 36 |
33453047943 | from __future__ import print_function
import socket
import sys
import os
import re
import logging
import datetime
"""
FTPClient object requires:
- HOST (IP address or domain)
- PORT (Integer value between 0-99999)
- COMMANDS (List of Strings: LIST|PUT|GET followed by filename)
CTRL+C to exit client
"""
EXAMPLE_INPUT = "\n - Example input: python client.py <domain/ip> <port> <put filename|get filename|list>"
class FTPClient:
def __init__(self, host, port, command):
logging.basicConfig(filename='client.log', level=logging.DEBUG)
self.cli_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.host = self.check_host(host)
self.port = self.check_port(port)
self.command = self.check_command(command)
self.connected = False
self.protocol_commands = {
"put": self.put_file,
"get": self.get_file,
"list": self.show_list
}
self.protocol_errors = {
"FileAlreadyExists": "File already exists in current directory",
"FileNotFound": "File could not be found in current directory",
"FileTooLarge": "File is too large to transfer (over 5GB in size)",
"FileZeroSized": "File is a zero-sized file (does not contain data)",
"FileNameTooLong": "Filename of file is too long (over 255 chars)",
"FileIsDirectory": "File is actually a directory (folder containing files)"
}
self.protocol_messages = {
"FileOkTransfer": "No existing file present, OK to create new file.",
"FileSizeReceived": "The filesize of file being transferred has successfully been received."
}
def log(self, ctype, message):
# Logs passed message with date and time to client.log
date = str(datetime.datetime.now()).split(".")[0]
line = "[%s] %s" % (ctype, message)
logging.info("%s | %s" % (date, line))
if ctype == "ERR":
try:
self.disconnect()
except OSError:
pass
raise SystemExit("[ERR] %s" % message)
print(line)
@staticmethod
def get_filesize(size_bytes):
# Converts bytes to larger suffix
# Returns converted filesize as a string
sizes = ['B', 'KB', 'MB', 'GB']
i = 0
while size_bytes > 1024 and i < 5:
size_bytes = size_bytes / 1024.00
i += 1
return "%0.2f%s" % (size_bytes, sizes[i])
# Arguement Checkers
def check_command(self, command):
cmd_type = command[0].lower()
if cmd_type not in ["list", "put", "get"]:
self.log("ERR", "The parameter %s is not supported by this client. Try: %s" % (cmd_type, EXAMPLE_INPUT))
if (cmd_type == "put" or cmd_type == "get") and len(command) != 2:
self.log("ERR",
"The \"%s\" command must be followed by the <filename> field. Try: %s" % (cmd_type, EXAMPLE_INPUT))
return command
def check_host(self, host):
if host.lower() != "localhost" and (" " in host or not re.match(r"^[a-zA-Z0-9_.-]*$", host)):
self.log("ERR", "The domain/IP address provided contains spaces and/or special characters. " +
"Allowed characters: letters, numbers, periods, dashes and underscores.")
return host
def check_port(self, port):
if not port.isdigit() or not (1 <= len(port) <= 5):
self.log("ERR", "The port parameter that has been provided is too short/long or is not a numerical value")
if int(port) < 0:
self.log("ERR", "The port parameter that has been provided is not a positive numerical value")
return int(port)
def start(self):
self.log("OK!", "Client startup initialised.")
# Parse command list and check if valid command. Also, check if command needs the parameter filename
if self.command[0] == "list":
self.protocol_commands[self.command[0]]()
else:
self.protocol_commands[self.command[0]](filename=self.command[1])
# After command execution, notify server of disconnect and close socket on client side.
# self.disconnect()
def connect(self):
try:
# Try connect to server. If connection refused, log and raise SystemExit
self.cli_socket.connect((self.host, self.port))
self.log("CON", "Successfully connected to server at: %s:%s" % (self.host, self.port))
self.connected = True
except (socket.gaierror, ConnectionRefusedError) as e:
self.cli_socket.close()
self.log("ERR", "An error occurred when connecting to host %s:%s\n%s" % (self.host, self.port, str(e)))
def disconnect(self):
# Notify server of disconnect, then close client.
if self.connected:
self.connected = False
self.cli_socket.send(b"DISCONNECT")
self.log("DIS", "Disconnected from server.")
# Command execution
def put_file(self, filename):
# Check file/filename for security/file issues
if filename not in os.listdir(os.getcwd()):
self.cli_socket.sendall(b"FileNotFound")
self.log("ERR", "FileNotFound: " + self.protocol_errors["FileNotFound"] + " (server).")
elif len(filename) > 255:
self.cli_socket.sendall(b"FileNameTooLong")
self.log("ERR", "FileNameTooLong: " + self.protocol_errors["FileNameTooLong"])
elif os.path.isdir('%s/%s' % (os.getcwd(), filename)):
self.cli_socket.sendall(b"FileIsDirectory")
self.log("ERR", "FileIsDirectory: " + self.protocol_errors["FileIsDirectory"])
elif os.path.getsize(('%s/%s' % (os.getcwd(), filename))) > 5368709120:
self.cli_socket.sendall(b"FileTooLarge")
self.log("ERR", "FileTooLarge: " + self.protocol_errors["FileTooLarge"])
elif os.path.getsize(('%s/%s' % (os.getcwd(), filename))) == 0:
self.cli_socket.sendall(b"FileZeroSized")
self.log("ERR", "FileZeroSized: " + self.protocol_errors["FileZeroSized"])
else:
self.log("OK!", "File '%s' found in client directory. Sending server total file-size." % filename)
self.connect()
self.cli_socket.sendall(("PUT " + filename).encode())
# send client the filesize of file being sent.
response = self.cli_socket.recv(24).decode()
if response in self.protocol_errors:
self.log("ERR", "Server response: \"%s\" - %s" % (response, self.protocol_errors[response]))
elif response in self.protocol_messages:
filesize = str(os.path.getsize(os.getcwd() + '/' + filename))
self.cli_socket.sendall(filesize.encode())
max_size = self.get_filesize(int(filesize))
bytes_sent = 0
upload = open(os.getcwd() + '/' + filename, 'rb')
data = upload.read(4096)
while data:
bytes_sent += len(data)
current_size = self.get_filesize(bytes_sent)
print("[UPL] Uploading '%s' [%s / %s]\t" % (filename, current_size, max_size), end='\r')
self.cli_socket.sendall(data)
data = upload.read(4096)
self.log("UPL", "Upload Complete '%s' [%s / %s]" % (filename, current_size, max_size))
def get_file(self, filename):
# send GET request to server, w/ filename
self.log("CMD", "Invoking Server Protocol 'GET' command with filename: %s" % filename)
# If filename exists in client directory, do not continue
if filename in os.listdir(os.getcwd()):
self.log("ERR", "FileAlreadyExists: " + self.protocol_errors["FileAlreadyExists"] + " (client).")
self.connect()
self.cli_socket.sendall(("GET " + filename).encode())
# If server responds with a protocol error, log and raise SystemExit
response = self.cli_socket.recv(1024).decode()
if response in self.protocol_errors:
self.log("ERR", "Server response: \"%s\" - %s" % (response, self.protocol_errors[response]))
elif response in self.protocol_messages:
self.log("OK!", "Server response: \"%s\" - %s" % (response, self.protocol_messages[response]))
# Else server has resonded with filesize. Continue with downloading file.
file_size = int(response)
bytes_collected = 0
max_size = self.get_filesize(file_size)
download_file = open(filename, 'wb')
# Write downloded byte data to a file named by filename received form server.
while bytes_collected < file_size:
data = self.cli_socket.recv(4096)
bytes_collected += len(data)
current_size = self.get_filesize(bytes_collected)
download_file.write(data)
print("[DWN] Downloading '%s' [%s / %s]" % (filename, current_size, max_size), end='\r')
# Once filesize matches the downloaded bytes we have received, close file (download complete).
download_file.close()
self.log("DWN", "Download Complete '%s' [%s / %s]" % (filename, current_size, max_size))
self.log("OK!", "File saved to: %s/%s" % (os.getcwd(), filename))
def show_list(self):
# send LIST request to server, w/ no other parameters.
self.log("CMD", "Invoking Server Protocol 'LIST' command.")
self.connect()
self.cli_socket.sendall("LIST".encode())
# If response is empty, log and raise SystemExit. Else, print response.
response = self.cli_socket.recv(16384)
if response:
self.log("OK!", "Server responded with:\n%s" % response.decode())
else:
self.log("ERR", "Server responded without a file list.")
if __name__ == '__main__':
if len(sys.argv) < 4:
raise SystemExit("[ERR] The domain/IP and port parameters are required:\n" + EXAMPLE_INPUT)
client = FTPClient(host=sys.argv[1], port=sys.argv[2], command=sys.argv[3:])
client.start()
| denBot/clientserver-ftp-sockets-demo | src/client.py | client.py | py | 10,207 | python | en | code | 0 | github-code | 36 |
24390096374 | from itertools import chain
from . import builder
from .. import options as opts, safe_str, shell
from .common import Builder, choose_builder, SimpleBuildCommand
from ..file_types import HeaderFile, SourceFile
from ..iterutils import iterate
from ..languages import known_langs
from ..path import Path
from ..versioning import detect_version
# Set the source language to C++, since we want to be able to use the C++
# language definition to infer whether a file passed to `moc` is a source or
# header file based on its extension.
with known_langs.make('qtmoc', src_lang='c++') as x:
x.vars(compiler='MOC', flags='MOCFLAGS')
with known_langs.make('qrc') as x:
x.vars(compiler='RCC', flags='RCCFLAGS')
x.exts(source=['.qrc'])
with known_langs.make('qtui') as x:
x.vars(compiler='UIC', flags='UICFLAGS')
x.exts(source=['.ui'])
@builder('qtmoc')
def moc_builder(env):
return choose_builder(env, known_langs['qtmoc'], (MocBuilder,),
default_candidates=['moc'])
class MocBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
mocflags_name = langinfo.var('flags').lower()
mocflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = MocCompiler(
self, env, command=(name, command, found),
flags=(mocflags_name, mocflags)
)
@staticmethod
def _parse_brand(version_output):
if 'moc' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class MocCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return None
def _call(self, cmd, input, output, flags=None):
return list(chain(
cmd, iterate(flags), [input, '-o', output]
))
def default_name(self, input, step):
if isinstance(input, SourceFile):
return input.path.stripext('.moc').suffix
base, leaf = input.path.stripext(
known_langs['c++'].default_ext('source')
).splitleaf()
return base.append('moc_' + leaf).suffix
def output_file(self, name, step):
return SourceFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, opts.include_dir):
flags.append('-I' + i.directory.path)
elif isinstance(i, opts.define):
if i.value:
flags.append('-D' + i.name + '=' + i.value)
else:
flags.append('-D' + i.name)
elif isinstance(i, opts.warning):
for j in i.value:
if j == opts.WarningValue.disable:
flags.append('--no-warnings')
else:
raise ValueError('unsupported warning level {!r}'
.format(j))
elif isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
@builder('qrc')
def qrc_builder(env):
return choose_builder(env, known_langs['qrc'], (RccBuilder,),
default_candidates=['rcc'])
class RccBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
rccflags_name = langinfo.var('flags').lower()
rccflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = RccCompiler(
self, env, command=(name, command, found),
flags=(rccflags_name, rccflags)
)
@staticmethod
def _parse_brand(version_output):
if 'rcc' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class RccCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return 'gcc'
def _call(self, cmd, input, output, deps=None, flags=None):
result = list(chain(cmd, iterate(flags), [input, '-o', output]))
if deps:
return self.env.tool('rccdep')(result, deps)
return result
def default_name(self, input, step):
return input.path.stripext(
known_langs['c++'].default_ext('source')
).suffix
def output_file(self, name, step):
return SourceFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
@builder('qtui')
def qtui_builder(env):
return choose_builder(env, known_langs['qtui'], (UicBuilder,),
default_candidates=['uic'])
class UicBuilder(Builder):
def __init__(self, env, langinfo, command, found, version_output):
super().__init__(langinfo.name, *self._parse_brand(version_output))
name = langinfo.var('compiler').lower()
uicflags_name = langinfo.var('flags').lower()
uicflags = shell.split(env.getvar(langinfo.var('flags'), ''))
self.transpiler = UicCompiler(
self, env, command=(name, command, found),
flags=(uicflags_name, uicflags)
)
@staticmethod
def _parse_brand(version_output):
if 'uic' in version_output:
return 'qt', detect_version(version_output)
return 'unknown', None
@staticmethod
def check_command(env, command):
return env.execute(command + ['--version'], stdout=shell.Mode.pipe,
stderr=shell.Mode.devnull)
class UicCompiler(SimpleBuildCommand):
@property
def deps_flavor(self):
return None
def _call(self, cmd, input, output, flags=None):
return list(chain(
cmd, iterate(flags), [input, '-o', output]
))
def default_name(self, input, step):
base, leaf = input.path.stripext('.h').splitleaf()
return base.append('ui_' + leaf).suffix
def output_file(self, name, step):
return HeaderFile(Path(name), 'c++')
def flags(self, options, global_options=None, output=None, mode='normal'):
flags = []
for i in options:
if isinstance(i, safe_str.stringy_types):
flags.append(i)
else:
raise TypeError('unknown option type {!r}'.format(type(i)))
return flags
| jimporter/bfg9000 | bfg9000/tools/qt.py | qt.py | py | 7,250 | python | en | code | 73 | github-code | 36 |
20857741707 | #https://leetcode.com/problems/xor-operation-in-an-array/
class Solution:
def xorOperation(self, n: int, start: int) -> int:
ans=[]
for i in range(1,n+1,1):
ans.append(start+2*(i-1))
l=ans[0]
ans=ans[1:]
for x in ans:
l=l^x
return l | manu-karenite/Problem-Solving | Math/XOROperations.py | XOROperations.py | py | 308 | python | en | code | 0 | github-code | 36 |
12610161351 | #!/usr/bin/python
import sys
#input should be space-separated lines of
#waterfall-like quantites
#with time on the vertical axis
#types on horizontal axis
#bar heights as values
linecnt = 0
while 1:
line = sys.stdin.readline()
if len(line) == 0:
break
colcnt = 0
for col in line.split():
print('%d %d %d' % (linecnt, colcnt, float(col)))
colcnt += 1
print()
linecnt += 1
| marcuswanner/randuino | hist2pm3d.py | hist2pm3d.py | py | 387 | python | en | code | 3 | github-code | 36 |
73527120745 | '''
candidate generation: writes a pickle file of candidates
'''
import sys
import nltk
import numpy as np
from ncbi_normalization import load, sample
from ncbi_normalization.parse_MEDIC_dictionary import concept_obj
from normalize import dump_data, load_data, load_mentions
from gensim.models import KeyedVectors
def prepare_embedding_vocab(filename, binary = True, limit = 1000000):
'''filename: '~/disease-normalization/data/embeddings/wvec_50_haodi-li-et-al.bin'
1. Use gensim for reading in embedding model
2. Sort based on the index to make sure that they are in the correct order
3. Normalize the vectors
4. Build vocabulary mappings, zero for padding
5. Create an inverse dictionary
'''
vector_model = KeyedVectors.load_word2vec_format(filename, binary = binary, limit = limit)
#vector_model=KeyedVectors.load_word2vec_format(config['embedding']['emb_file'], binary=True, limit=50000)
words = [k for k,v in sorted(vector_model.vocab.items(),key = lambda x:x[1].index)]
vector_model.init_sims(replace = True)
vocabulary={"<SPECIAL>": 0, "<OOV>": 1}
for word in words:
vocabulary.setdefault(word, len(vocabulary))
inversed_vocabulary={value:key for key, value in vocabulary.items()}
return vector_model, vocabulary, inversed_vocabulary
def load_pretrained_word_embeddings(vocab,embedding_model):
"""vocab: vocabulary from data vectorizer
embedding_model: model loaded with gensim"""
pretrained_embeddings = np.random.uniform(low=-0.05, high=0.05, size=(len(vocab)-1,embedding_model.vectors.shape[1]))
pretrained_embeddings = np.vstack((np.zeros(shape=(1,embedding_model.vectors.shape[1])), pretrained_embeddings))
found=0
for word,idx in vocab.items():
if word in embedding_model.vocab:
pretrained_embeddings[idx]=embedding_model.get_vector(word)
found+=1
print("Found pretrained vectors for {found} words.".format(found=found))
return pretrained_embeddings
def load_concepts(dict_file,order):
'''
dict_file: directory to the tsv file of MEDIC dictionary
dictionary.loaded format:
dictionary of entries, key = canonical id, value = named tuple in the form of
MEDIC_ENTRY(DiseaseID='MESH:D005671', DiseaseName='Fused Teeth',
AllDiseaseIDs=('MESH:D005671',), AllNames=('Fused Teeth', 'Teeth, Fused')
'''
# MEDIC dictionary
dictionary = load.Terminology()
dictionary.loaded = load.load(dict_file,'MEDIC')
concept = concept_obj(dictionary,order=order)
concept.names = [name.lower() for name in concept.names]
return concept, dictionary
def span_to_sum_of_w2v(spans,vocabulary,pretrained):
'''
represent all spans by sum of w2v
'''
embeddings = []
for span in spans:
tokenized = nltk.word_tokenize(span.lower())
index = [vocabulary.get(token,1) for token in tokenized]
#emb = np.mean(np.array([pretrained[i] for i in index]), axis=0)
emb = np.sum(np.array([pretrained[i] for i in index]), axis=0)
embeddings.append(emb)
embeddings = np.array(embeddings)
return embeddings
def cosine_similarity_candidates(mention_spans,concept_spans,emb_path,n_cossim):
'''
yields list of list of candidates
n_cossim = number of candidates for each mention
'''
# prepare embeddings
vector_model, vocabulary, inversed_vocabulary = prepare_embedding_vocab(emb_path, binary = True)
pretrained = load_pretrained_word_embeddings(vocabulary, vector_model)
# vector representations
mention_embeddings = span_to_sum_of_w2v(mention_spans,vocabulary,pretrained)
concept_embeddings = span_to_sum_of_w2v(concept_spans,vocabulary,pretrained)
from sklearn.preprocessing import normalize
concept_embeddings = normalize(concept_embeddings)
mention_embeddings = normalize(mention_embeddings)
dot_product_matrix = np.dot(mention_embeddings,np.transpose(concept_embeddings))
dot_product_matrix = dot_product_matrix.tolist()
candidate_indices = [np.argpartition(np.array(mention_candidates),-n_cossim)[-n_cossim:].tolist() for mention_candidates in dot_product_matrix]
return candidate_indices
def jaccard_distance_candidates(mention_spans,concept_spans,n_jaccard):
candidate_indices = []
for mention in mention_spans:
distances = [nltk.jaccard_distance(set(mention),set(concept)) for concept in concept_spans]
indices = np.argpartition(np.array(distances),-n_jaccard)[-n_jaccard:].tolist()
candidate_indices.append(indices)
return candidate_indices
if __name__ == "__main__":
'''
1. prepare concept spans & mention spans
2. get the candidates based on cosine similarity
3. get the candidates based on Jaccard distance
4. prepare (start, end, span), gold standard
'''
dict_file = 'data/CTD_diseases.tsv'
dev_file = 'data/NCBIdevelopset_corpus.txt'
emb_path = 'data/wvec_50_haodi-li-et-al.bin'
n_cossim = sys.argv[1]
n_jaccard = sys.argv[2]
save_to = 'data/selected_max200.pickle'
# (1)
# concepts
[potato0,potato1,concept_order,potato2,potato3,potato4] = load_data('data/sampled_dev_set.pickle')
del potato0, potato1, potato2, potato3, potato4
concept, dictionary = load_concepts(dict_file,concept_order)
# mentions
corpus_dev = load_mentions(dev_file,'dev corpus')
# (2)
cossim_candidate_indices = cosine_similarity_candidates(corpus_dev.names,concept.names,emb_path,n_cossim)
# (3)
jaccard_candidate_indices = jaccard_distance_candidates(corpus_dev.names,concept.names,n_jaccard)
# (4)
assert len(cossim_candidate_indices)==len(jaccard_candidate_indices)
candidates = []
for cossim,jaccard in zip(cossim_candidate_indices,jaccard_candidate_indices):
mention_candidates = sorted(list(set(cossim+jaccard)))
candidates.append(mention_candidates)
positives_training, positives_dev, positives_dev_truncated = load_data('data/gitig_positive_indices.pickle')
del positives_training, positives_dev_truncated
positives_dev = sample.prepare_positives(positives_dev,nltk.word_tokenize,vocabulary)
can_val_data = sample.NewDataSet('dev corpus')
can_val_data.y = []
can_val_data.mentions = []
start = 0
for cans, poss, span in zip(candidates,positives_dev,corpus_dev.names):
end = start + len(cans)
(chosen_idx, idces), e_token_indices = poss
can_val_data.y.extend([1 if can in idces else 0 for can in cans])
can_val_data.mentions.append((start,end,span))
start = end
assert len(can_val_data.mentions)==len(candidates)
data = [candidates, can_val_data.mentions, can_val_data.y]
dump_data(save_to,data)
| fshdnc/nor-bert | src/candidate_generation.py | candidate_generation.py | py | 6,793 | python | en | code | 0 | github-code | 36 |
24759427933 | #!/usr/bin/python3
def safe_print_list(my_list=[], x=0):
ctr = 0
for index in range(0, x):
try:
print(f"{my_list[index]}", end="")
ctr += 1
except IndexError:
break
print()
return (ctr)
| Riyo3350G/alx-higher_level_programming | 0x05-python-exceptions/0-safe_print_list.py | 0-safe_print_list.py | py | 256 | python | en | code | 0 | github-code | 36 |
36955740049 | import wttest
from wtscenario import make_scenarios
from wiredtiger import WT_NOTFOUND
# test_prepare_hs05.py
# Test that after aborting prepare transaction, correct update from the history store is restored.
class test_prepare_hs05(wttest.WiredTigerTestCase):
conn_config = 'cache_size=50MB'
format_values = [
('column', dict(key_format='r', key=1, value_format='S')),
('column-fix', dict(key_format='r', key=1, value_format='8t')),
('string-row', dict(key_format='S', key=str(1), value_format='S')),
]
scenarios = make_scenarios(format_values)
def test_check_prepare_abort_hs_restore(self):
uri = 'table:test_prepare_hs05'
create_params = 'key_format={},value_format={}'.format(self.key_format, self.value_format)
self.session.create(uri, create_params)
if self.value_format == '8t':
value1 = 97
value2 = 98
value3 = 99
else:
value1 = 'a' * 5
value2 = 'b' * 5
value3 = 'c' * 5
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(1))
cursor = self.session.open_cursor(uri)
key = self.key
self.session.begin_transaction()
cursor[key] = value1
cursor.set_key(key)
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(2))
# Commit update and remove operation in the same transaction.
self.session.begin_transaction()
cursor[key] = value2
cursor.set_key(key)
cursor.remove()
self.session.commit_transaction('commit_timestamp=' + self.timestamp_str(3))
# Add a prepared update for the key.
self.session.begin_transaction()
cursor[key] = value3
self.session.prepare_transaction('prepare_timestamp='+ self.timestamp_str(4))
# Try to evict the page with prepared update. This will ensure that prepared update is
# written as the on-disk version and the older versions are moved to the history store.
session2 = self.conn.open_session()
session2.begin_transaction('ignore_prepare=true')
cursor2 = session2.open_cursor(uri, None, "debug=(release_evict=true)")
cursor2.set_key(key)
if self.value_format == '8t':
# In FLCS, deleted values read back as 0.
self.assertEquals(cursor2.search(), 0)
self.assertEquals(cursor2.get_value(), 0)
else:
self.assertEquals(cursor2.search(), WT_NOTFOUND)
cursor2.reset()
# This should abort the prepared transaction.
self.session.rollback_transaction()
self.session.checkpoint()
# We should be able to read the older version of the key from the history store.
self.session.begin_transaction('read_timestamp='+self.timestamp_str(2))
cursor.set_key(key)
self.assertEqual(cursor.search(), 0)
self.assertEqual(cursor.get_value(), value1)
self.session.rollback_transaction()
# The latest version should be marked deleted.
self.session.begin_transaction()
cursor.set_key(key)
if self.value_format == '8t':
# In FLCS, deleted values read back as 0.
self.assertEquals(cursor.search(), 0)
self.assertEquals(cursor.get_value(), 0)
else:
self.assertEqual(cursor.search(), WT_NOTFOUND)
self.session.rollback_transaction()
| mongodb/mongo | src/third_party/wiredtiger/test/suite/test_prepare_hs05.py | test_prepare_hs05.py | py | 3,463 | python | en | code | 24,670 | github-code | 36 |
31965515598 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vi: ts=4 sw=4
import pickle
from ..Protocols import *
from scipy.spatial.distance import cdist
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans, MeanShift, estimate_bandwidth, AffinityPropagation, SpectralClustering # Clustering methods
from sklearn.decomposition import PCA
import skimage
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.patches as patches
class cluster(ProtocolMultiple):
def __init__(self, name='cluster', **kwargs):
self.name = self.__class__.__name__ if name is None else name
self.default_ext = '.pkl'
self.run_args = {
'file_extension' : '.pkl',
'force' : False,
'verbosity' : 3,
'num_jobs' : None,
'num_clusters' : 20,
'cluster_method' : 'kmeans', # 'affinity', 'kmeans', 'meanshift'
'features' : 'all', # 'shape', 'color', 'all'
'feature_normed_range' : [-2, +2], # range for plotting the normed features
'bbox_pad' : 0.5,
'image_contrast' : (0, 1),
'image_contrast_trim' : None,
'overlays' : 3,
}
self.run_args.update(kwargs)
# WARNING: This association of features names is hard-coded, and is thus contingent
# on the current implementation of Protocols.py>flake_analysis
self.feature_names_color = [
'g contrast',
'v contrast',
'gray',
'gray std',
'H',
'S',
'V',
'H std',
'S std',
'V std',
'R',
'G',
'B',
'R std',
'G std',
'B std',
'entropy'
]
self.feature_names_color = self.feature_names_color + ['{}_inner'.format(f) for f in self.feature_names_color]
self.feature_names_shape = ['P/A'] + ['hist {}'.format(i) for i in range(15)] + ['fractal dimension']
def load_flakes(self, datas, **run_args):
flakes = []
for data in datas:
with open(data.infile, 'rb') as fin:
saved = pickle.load(fin) # 'res_map', 'image_labelmap', 'flakes'
if len(flakes)==0:
h, w = saved['res_map'].shape
for flake in saved['flakes']:
flakes.append(flake)
#self.print_structure(flake)
if run_args['verbosity']>=5:
print(' {} flakes added from image {}'.format(len(saved['flakes']), data.infile))
return flakes
def load_flakes_parallel(self, datas, **run_args):
# Parallelize loading
# Doesn't seem to actually run faster (likely I/O limited)
from joblib import Parallel, delayed
import itertools
flakes = Parallel(n_jobs=run_args['num_jobs'])(delayed(self.load_flake_pkl)(data.infile) for data in datas)
flakes = list(itertools.chain.from_iterable(flakes))
return flakes
def load_flake_pkl(self, infile):
with open(infile, 'rb') as fin:
flakes = pickle.load(fin)['flakes']
return flakes
def load_features(self, flakes, **run_args):
if run_args['features']=='all':
features = [ np.concatenate([flake['flake_color_fea'], flake['flake_shape_fea']]) for flake in flakes ]
if 'flake_color_fea_names' in flakes[0]:
self.feature_names_color = flakes[0]['flake_color_fea_names']
if 'flake_shape_fea_names' in flakes[0]:
self.feature_names_shape = flakes[0]['flake_shape_fea_names']
self.feature_names = self.feature_names_color + self.feature_names_shape
else:
features = [ flake['flake_{}_fea'.format(run_args['features'])] for flake in flakes ]
if run_args['features']=='color':
if 'flake_color_fea_names' in flakes[0]:
self.feature_names = flakes[0]['flake_color_fea_names']
else:
self.feature_names = self.feature_names_color
elif run_args['features']=='shape':
if 'flake_shape_fea_names' in flakes[0]:
self.feature_names = flakes[0]['flake_shape_fea_names']
else:
self.feature_names = self.feature_names_shape
else:
if 'flake_{}_fea_names'.format(run_args['features']) in flakes[0]:
self.feature_names = flakes[0]['flake_{}_fea_names'.format(run_args['features'])]
else:
self.feature_names = []
return np.asarray(features)
def load_clustering(self, basename, output_dir='./', features_rescaled=None, **run_args):
# Load data aggregated from the "cluster" protocol into a cluster.pkl file
savefile = self.get_outfile(basename, output_dir, ext=run_args['file_extension'])
if os.path.exists(savefile):
with open(savefile, 'rb') as fin:
clustering = pickle.load(fin)
else:
savefile = self.get_outfile(basename, output_dir+'/../cluster/', ext=run_args['file_extension'])
if os.path.exists(savefile):
with open(savefile, 'rb') as fin:
clustering = pickle.load(fin)
elif features_rescaled is not None:
# Manually recompute some minimal aspects of clustering
# Note: This mostly exists so that select_flakes.run has access to this information
# even if cluster.run has never been run (and thus cluster.pkl doesn't exist).
clustering = {}
vmin, vmax = run_args['feature_normed_range']
distributions, dist_bin_edges = np.apply_along_axis(lambda x: np.histogram(x, bins=50, range=[vmin,vmax], density=True), 0, features_rescaled)
clustering['distributions'] = distributions
clustering['dist_bin_edges'] = dist_bin_edges
else:
print("Error in cluster.load_clustering: we don't have access to clustering information.")
return clustering
@run_default
def run(self, datas, output_dir, basename, **run_args):
results = {}
clustering = {} # Save results of clustering operation
# Aggregate results
########################################
flakes = self.load_flakes(datas, **run_args)
if run_args['verbosity']>=4:
print(' {:,d} flakes identified in {:d} images'.format(len(flakes), len(datas)))
features_orig = self.load_features(flakes, **run_args)
# Clustering
########################################
rescale = StandardScaler()
features = rescale.fit_transform(features_orig)
if run_args['verbosity']>=4:
print(" Clustering {:,d} flakes using '{}'".format(len(flakes), run_args['cluster_method']))
start = time.time()
n_jobs = run_args['num_jobs'] if 'num_jobs' in run_args else -1
if run_args['cluster_method']=='kmeans':
cluster_result = KMeans(n_clusters=run_args['num_clusters'], random_state=0, n_jobs=n_jobs).fit(features)
elif run_args['cluster_method']=='meanshift':
bandwidth = estimate_bandwidth(features, quantile=0.1)#, n_samples=int(features.shape[0]/10))
cluster_result = MeanShift(bandwidth=bandwidth, bin_seeding=True, n_jobs=n_jobs).fit(features)
elif run_args['cluster_method']=='affinity':
cluster_result = AffinityPropagation().fit(features)
elif run_args['cluster_method']=='spectral':
cluster_result = SpectralClustering(n_clusters=run_args['num_clusters'], n_jobs=n_jobs).fit(features)
else:
print("ERROR: clustering method '{}' not recognized.".format(run_args['cluster_method']))
raise NotImplementedError
clustering['cluster_result'] = cluster_result
results['cluster_runtime'] = time.time()-start
results['cluster_method'] = run_args['cluster_method']
# Assignments are unsorted by default
assignment = cluster_result.labels_
results['num_clusters'] = len(np.unique(assignment))
clustering['assignment'] = assignment # Label ids for each flake, saying what cluster it belongs to [unsorted indexing]
if run_args['verbosity']>=4:
print(" clustering took {:.1f}s ({:d} clusters)".format(results['cluster_runtime'], results['num_clusters']))
# Sort clusters into a sensible order
consider_features = np.asarray([flake['flake_color_fea'][:2] for flake in flakes]) # Grayscale and V contrast
# The average for each cluster gives the position for the center of that cluster (in the feature space)
central_features = np.zeros([results['num_clusters'], consider_features.shape[1]])
for i in range(results['num_clusters']):
cluster_i = np.nonzero(assignment==i)[0]
central_features[i,:] = np.mean(consider_features[cluster_i, :])
clustering['sort_indices'] = np.argsort(np.abs(central_features).sum(1))
clustering['unsort2sort'] = np.unique(clustering['sort_indices'], return_index=True)[1]
clustering['cluster_centers'] = cluster_result.cluster_centers_[clustering['sort_indices']] # in (normed) feature space coordinates [sorted indexing]
clustering['cluster_centers_orig'] = rescale.inverse_transform(clustering['cluster_centers']) # in (original) feature space coordinates [sorted indexing]
clustering['cluster_center_distances'] = cdist(clustering['cluster_centers'], clustering['cluster_centers']) # in (normed) feature space coordinates [sorted indexing]
# Compute additional things
########################################
# The distribution (histogram) for each feature dimension
# Since these are normed they should look somewhat Gaussian
vmin, vmax = run_args['feature_normed_range']
distributions, dist_bin_edges = np.apply_along_axis(lambda x: np.histogram(x, bins=50, range=[vmin,vmax], density=True), 0, features)
clustering['distributions'] = distributions
clustering['dist_bin_edges'] = dist_bin_edges
# Output results
########################################
# Save cluster results
outfile = self.get_outfile(basename, output_dir, ext=run_args['file_extension'])
results['files_saved'] = [
{ 'filename': '{}'.format(outfile) ,
'description' : 'results of cluster analysis' ,
'type' : 'data'
} ,
]
with open(outfile, 'wb') as fout:
pickle.dump(clustering, fout)
# Output images
if run_args['verbosity']>=4:
print(' Generating PCA 3D projection')
# Pick a color for each cluster
norm = mpl.colors.Normalize(vmin=0, vmax=results['num_clusters']-1)
cmap = mpl.cm.jet
#cmap = cmap_vge
m = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
cluster_colors = [m.to_rgba(index) for index in range(results['num_clusters'])]
pca = PCA(n_components=3)
coordinates = pca.fit_transform(features)
outfile = self.get_outfile(basename, output_dir, ext='-{}.png'.format(run_args['cluster_method']))
self.plot_pca(outfile, coordinates, assignment, cluster_colors, **run_args)
if run_args['verbosity']>=4:
print(' Generating map of distances')
outfile = self.get_outfile('distances', output_dir, ext='-{}.png'.format(run_args['cluster_method']))
self.plot_distances(outfile, clustering['cluster_center_distances'], cluster_colors, **run_args)
if run_args['verbosity']>=4:
print(' Generating cluster images')
self.plot_clusters(output_dir, clustering['cluster_centers'], clustering['cluster_centers_orig'], clustering['sort_indices'], distributions, dist_bin_edges, flakes, features, assignment, rescale=rescale, **run_args)
return results
def plot_pca(self, outfile, coordinates, assignment, cluster_colors, **run_args):
flake_colors = [cluster_colors[index] for index in assignment]
# Centroid of each cluster (in PCA coordinates)
num_clusters = np.max(assignment)+1
cluster_coordinates = np.zeros([num_clusters, coordinates.shape[1]])
for i in range(num_clusters):
cluster_i = np.nonzero(assignment==i)[0]
cluster_coordinates[i,:] = np.mean(coordinates[cluster_i,:], axis=0)
cluster_index = range(cluster_coordinates.shape[0])
plt.rcParams['xtick.labelsize'] = 8
plt.rcParams['ytick.labelsize'] = 8
plt.rcParams['axes.labelsize'] = 12
plt.rcParams['lines.markersize'] = 5
cmap = run_args['cmap'] if 'cmap' in run_args else 'jet'
alpha = 0.12
self.fig = plt.figure(figsize=(10,10))
self.fig.subplots_adjust(left=0.08, right=0.95, bottom=0.08, top=0.95, hspace=0.15, wspace=0.15)
self.ax = self.fig.add_subplot(2,2,2 , projection='3d')
self.ax.scatter(coordinates[:,0], coordinates[:,1], coordinates[:,2], c=flake_colors, alpha=0.3)
self.ax.set_xlabel('$\mathrm{PCA}_1$', labelpad=-4)
self.ax.set_ylabel('$\mathrm{PCA}_2$', labelpad=-4)
self.ax.set_zlabel('$\mathrm{PCA}_3$', labelpad=-2)
self.ax.tick_params(axis='both', which='major', pad=-1)
self.ax.view_init(elev=30, azim=45-90)
self.ax = self.fig.add_subplot(2,2,1)
self.ax.scatter(coordinates[:,0], coordinates[:,2], c=flake_colors, edgecolors=None, alpha=alpha)
self.ax.set_xlabel('$\mathrm{PCA}_1$')
self.ax.set_ylabel('$\mathrm{PCA}_3$')
self.overlay_cluster_number(cluster_coordinates, cluster_index, 0, 2, cluster_colors)
xi, xf, yi, yf = self.ax.axis()
self.ax.text(xi,yf, '{:,d} flakes in {} clusters'.format(len(assignment), len(cluster_colors)), size=10, verticalalignment='top', horizontalalignment='left', alpha=0.5)
self.ax = self.fig.add_subplot(2,2,3)
self.ax.scatter(coordinates[:,0], coordinates[:,1], c=flake_colors, cmap=cmap, edgecolors=None, alpha=alpha)
self.ax.set_xlabel('$\mathrm{PCA}_1$')
self.ax.set_ylabel('$\mathrm{PCA}_2$')
self.overlay_cluster_number(cluster_coordinates, cluster_index, 0, 1, cluster_colors)
self.ax = self.fig.add_subplot(2,2,4)
self.ax.scatter(coordinates[:,2], coordinates[:,1], c=flake_colors, cmap=cmap, edgecolors=None, alpha=alpha)
self.ax.set_xlabel('$\mathrm{PCA}_3$')
self.ax.set_ylabel('$\mathrm{PCA}_2$')
self.overlay_cluster_number(cluster_coordinates, cluster_index, 2, 1, cluster_colors)
plt.savefig(outfile, dpi=300)
plt.close()
def overlay_cluster_number(self, cluster_coordinates, cluster_index, coord1, coord2, cluster_colors):
r = 0.3 # r=1 means no fade (strong color), r=0 means fully faded (appears white)
cluster_colors_a = [ [ 1-(1-c[0])*r, 1-(1-c[1])*r, 1-(1-c[2])*r, c[3]] for c in cluster_colors]
self.ax.scatter(cluster_coordinates[:,coord1], cluster_coordinates[:,coord2], s=25, c=cluster_colors_a, edgecolor=cluster_colors, alpha=1)
for i in range(cluster_coordinates.shape[0]):
self.ax.text(cluster_coordinates[i, coord1], cluster_coordinates[i, coord2], '{}'.format(i), size=3, horizontalalignment='center', verticalalignment='center')
def plot_distances(self, outfile, cluster_center_distances, cluster_colors, plot_buffers=[0.15,0.05,0.15,0.05], **run_args):
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['ytick.labelsize'] = 15
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['lines.markersize'] = 5
self.fig = plt.figure( figsize=(8,8), facecolor='white' )
left_buf, right_buf, bottom_buf, top_buf = plot_buffers
fig_width = 1.0-right_buf-left_buf
fig_height = 1.0-top_buf-bottom_buf
self.ax = self.fig.add_axes( [left_buf, bottom_buf, fig_width, fig_height] )
#plt.figtext(0,1, 'distances between clusters (in the feature space)', size=15, verticalalignment='top', horizontalalignment='left')
self.ax.imshow(cluster_center_distances, cmap='viridis')
self.ax.set_xlabel('cluster index')
self.ax.set_ylabel('cluster index')
xi, xf, yi, yf = self.ax.axis()
s = 0.02
n = len(cluster_colors)
self.axt = self.fig.add_axes( [left_buf, bottom_buf+fig_height, fig_width, s] )
self.axt.scatter(range(n), np.ones(n), c=cluster_colors)
if n<160:
for i in range(n):
self.axt.text(i, 1, '{}'.format(i), size=4, horizontalalignment='center', verticalalignment='center')
self.axt.axis([xi, xf, 0, 2])
self.axt.axes.get_xaxis().set_visible(False)
self.axt.axes.get_yaxis().set_visible(False)
self.axr = self.fig.add_axes( [left_buf+fig_width, bottom_buf, s, fig_height] )
self.axr.scatter(np.ones(n), range(n), c=cluster_colors)
if n<80:
for i in range(n):
self.axr.text(1, i, '{}'.format(i), size=4, horizontalalignment='center', verticalalignment='center')
self.axr.axis([0, 2, yi, yf])
self.axr.axes.get_xaxis().set_visible(False)
self.axr.axes.get_yaxis().set_visible(False)
plt.savefig(outfile, dpi=300)
def plot_clusters(self, output_dir, cluster_centers, cluster_centers_orig, sort_indices, distributions, dist_bin_edges, flakes, flake_features, assignment, rescale=None, plot_buffers=[0.01,0.0,0.0,0.045], **run_args):
plt.rcParams['xtick.labelsize'] = 15
plt.rcParams['ytick.labelsize'] = 15
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['lines.markersize'] = 5
#for i, feature_vector in enumerate(cluster_centers[:1]): # for testing
for i, feature_vector in enumerate(cluster_centers):
# i # [sorted indexing]
# feature_vector # in (normed) feature space coordinates [sorted indexing]
feature_vector_orig = cluster_centers_orig[i] # in (original) feature space coordinates [sorted indexing]
i_before_sort = sort_indices[i] # [unsorted indexing]
cluster_i = np.nonzero(assignment==i_before_sort)[0] # indices [in unsorted indexing] of all flakes matching this cluster
flakes_cluster = np.asarray(flakes)[cluster_i] # flakes matching this cluster
features_cluster = flake_features[cluster_i] # feature vectors matching this cluster
self.plot_cluster(output_dir, '{:03d}'.format(i), feature_vector, feature_vector_orig, flakes_cluster, features_cluster, distributions, dist_bin_edges, rescale=rescale, plot_buffers=plot_buffers, **run_args)
def plot_cluster(self, output_dir, cluster_name, feature_vector, feature_vector_orig, flakes_cluster, features_cluster, distributions, dist_bin_edges, rescale=None, plot_buffers=[0.01,0.0,0.0,0.045], **run_args):
''' Outputs an image showing representative flakes for this cluster.
flakes_cluster, features_cluster : The subset of flakes (and their features) for this cluster.
feature_vector, feature_vector_orig : The centroid of this cluster (average of features).
distributions, dist_bin_edges : The feature distributions (for all flakes).
'''
num_flakes = len(flakes_cluster)
# Sort flakes by their distance from the cluster centroid (which is located at position "feature_vector")
distances = cdist(features_cluster, [feature_vector], metric='euclidean')[:,0]
sort_indices = np.argsort(distances)
flakes_cluster = flakes_cluster[sort_indices]
features_cluster = features_cluster[sort_indices]
distances = distances[sort_indices]
if run_args['verbosity']>=5:
print(' image for cluster {} ({:,d} flakes)'.format(cluster_name, num_flakes))
# Output a summary (central, generic, peripheral)
########################################
self.fig = plt.figure( figsize=(8,8), facecolor='white' )
fea_w, fea_h = 0.04, 0.95 # Size of features graphs in sidebar
plt.figtext(0,1, 'cluster {} ({:,d} flakes)'.format(cluster_name, num_flakes), size=20, verticalalignment='top', horizontalalignment='left')
# Sidebar that shows the feature vector for the centroid of this cluster
self._plot_cluster_sidebar(feature_vector, feature_vector_orig, features_cluster, distributions, dist_bin_edges, fea_w=fea_w, fea_h=fea_h, **run_args)
# Images of example flakes for this cluster
self._plot_cluster_main(flakes_cluster, distances, fea_w=fea_w, fea_h=fea_h, plot_buffers=plot_buffers, **run_args)
outfile = os.path.join(output_dir, 'cluster-{}-{}.png'.format(run_args['cluster_method'], cluster_name))
plt.savefig(outfile, dpi=300)
plt.close(self.fig.number)
if 'output_all' in run_args and run_args['output_all']:
# Output a summary (central, generic, peripheral)
########################################
nrows, ncols = 8, 7
num_per_page = nrows*ncols
num_pages = int(np.ceil(num_flakes/num_per_page))
for page in range(num_pages):
num_this_page = num_per_page
if page==(num_pages-1): # Last page
num_this_page = num_flakes - (num_pages-1)*num_per_page
idx_start = page*num_per_page
idx_end = idx_start+num_this_page
if run_args['verbosity']>=5:
print(' page {:d} for cluster {} ({:,d}/{:,d} flakes)'.format(page+1, cluster_name, num_this_page, num_flakes))
self.fig = plt.figure( figsize=(8,8), facecolor='white' )
plt.figtext(0,1, 'cluster {} ({:,d}/{:,d} flakes)'.format(cluster_name, num_this_page, num_flakes), size=20, verticalalignment='top', horizontalalignment='left')
# Sidebar that shows the feature vector for the centroid of this cluster
if rescale is not None:
# Since we have access to the scaling between original coordinates for feature vector
# and the rescale coordinates (avg=0, std=1), we can compute the sidebar for just the
# flakes being displayed.
# There are two equivalent ways to get the information for this subset of flakes (this page of results)
# Method 1: Load features_orig for these flakes, and transform them
#flakes_page = flakes_cluster[idx_start:idx_end]
#features_orig = self.load_features(flakes_page, **run_args)
#features_rescaled = rescale.transform(features_orig)
# Method 2: Select subset of rescaled features, and inverse_transform them
features_rescaled = features_cluster[idx_start:idx_end]
features_orig = rescale.inverse_transform(features_rescaled)
# Compute centroid for this subset of flakes (this page of results)
feature_vector_orig = np.average(features_orig, axis=0)
feature_vector = rescale.transform( [feature_vector_orig] )[0]
self._plot_cluster_sidebar(feature_vector, feature_vector_orig, features_rescaled, distributions, dist_bin_edges, fea_w=fea_w, fea_h=fea_h, **run_args)
else:
self._plot_cluster_sidebar(feature_vector, feature_vector_orig, features_cluster, distributions, dist_bin_edges, fea_w=fea_w, fea_h=fea_h, **run_args)
self._plot_cluster_page(idx_start, flakes_cluster, distances, fea_w, fea_h, plot_buffers, nrows, ncols, **run_args)
outfile = os.path.join(output_dir, 'cluster-{}-page{:03d}.png'.format(run_args['cluster_method'], page+1))
plt.savefig(outfile, dpi=300)
plt.close(self.fig.number)
def _plot_cluster_page(self, idx, flakes_cluster, distances, fea_w, fea_h, plot_buffers, nrows, ncols, **run_args):
# The total area we have available for plotting flakes
left_buf, right_buf, bottom_buf, top_buf = plot_buffers
left_buf += fea_w*( 2.2 + 2.3 )
fig_width = 1.0-right_buf-left_buf
fig_height = 1.0-top_buf-bottom_buf
w = fig_width/ncols
ystart = bottom_buf+fig_height
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
def _plot_cluster_main(self, flakes_cluster, distances, fea_w, fea_h, plot_buffers, **run_args):
# The total area we have available for plotting flakes
left_buf, right_buf, bottom_buf, top_buf = plot_buffers
left_buf += fea_w*( 2.2 + 2.3 )
fig_width = 1.0-right_buf-left_buf
fig_height = 1.0-top_buf-bottom_buf
#self.ax = self.fig.add_axes( [left_buf, bottom_buf, fig_width, fig_height] )
# Central flakes
nrows, ncols = 3, 7
w = fig_width/ncols
idx = 0
ystart = bottom_buf+fig_height
plt.figtext(left_buf, ystart, 'central', size=8, verticalalignment='bottom', horizontalalignment='left')
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
# Generic flakes
if idx<len(flakes_cluster):
ystart = ystart-nrows*w - 0.015
#nrows, ncols = 2, 6
w = fig_width/ncols
idx = max( int( np.clip( len(flakes_cluster)/2, idx, len(flakes_cluster)-nrows*ncols ) ), idx )
plt.figtext(left_buf, ystart, 'generic', size=8, verticalalignment='bottom', horizontalalignment='left')
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
# Peripheral flakes
if idx<len(flakes_cluster):
ystart = ystart-nrows*w - 0.015
nrows, ncols = 2, 7
w = fig_width/ncols
idx = max( len(flakes_cluster)-nrows*ncols, idx )
plt.figtext(left_buf, ystart, 'peripheral', size=8, verticalalignment='bottom', horizontalalignment='left')
for irow in range(nrows):
for icol in range(ncols):
ax_pos = [left_buf+icol*w, ystart-(irow+1)*w, w, w]
if idx<len(flakes_cluster):
self._plot_flake_image(ax_pos, flakes_cluster[idx], distances[idx], **run_args)
idx += 1
def _plot_cluster_sidebar(self, feature_vector, feature_vector_orig, features_cluster, distributions, dist_bin_edges, fea_w, fea_h, **run_args):
# Sidebar that shows the feature vector for the centroid of this cluster
vmin, vmax = run_args['feature_normed_range']
self.ax = self.fig.add_axes( [0.0, 0, fea_w, fea_h] )
vector = np.asarray([feature_vector]).transpose()
self.ax.imshow(vector, cmap='inferno', aspect='auto', vmin=vmin, vmax=vmax)
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
xi, xf, yi, yf = self.ax.axis()
if len(feature_vector)<80:
for ifea, fea in enumerate(feature_vector):
if fea<0:
color = 'white'
else:
color = 'k'
self.ax.text((xi+xf)*0.5, ifea, '{:.2f}'.format(fea), color=color, size=8, verticalalignment='center', horizontalalignment='center')
self.ax.text(xf, ifea, '{:.3g}'.format(feature_vector_orig[ifea]), size=6, verticalalignment='center', horizontalalignment='left')
# Miniature histogram (of the entire distribution)
axc = self.fig.add_axes( [fea_w*2.2, fea_h-(ifea+1)*fea_h/len(feature_vector), fea_w*2.3, fea_h/len(feature_vector)] )
w = dist_bin_edges[ifea][1]-dist_bin_edges[ifea][0]
axc.bar( dist_bin_edges[ifea][:-1]+0.5*w, distributions[ifea], width=w, color='b', alpha=0.3 )
plt.xlim(vmin,vmax)
# Overlay the histogram for this cluster
distribution, dist_bin_edge = np.histogram(features_cluster[:,ifea], bins=50, range=[vmin,vmax], density=True)
distribution *= np.max(distributions[ifea])/np.max(distribution)
#axc.bar( dist_bin_edge[:-1]+0.5*w, distribution, width=w, color='purple', alpha=0.2 )
axc.plot( dist_bin_edge[:-1]+0.5*w, distribution, '-', color='purple', linewidth=0.8, alpha=0.3 )
axc.axvline(fea, color='purple', linewidth=1)
if fea<vmin:
axc.axvline(vmin, color='purple', linewidth=4)
elif fea>vmax:
axc.axvline(vmax, color='purple', linewidth=4)
axc.axes.get_xaxis().set_visible(False)
axc.axes.get_yaxis().set_visible(False)
if len(self.feature_names)==len(feature_vector):
axc.text(vmin, np.max(distributions[ifea]), self.feature_names[ifea], size=4, verticalalignment='top', horizontalalignment='left', alpha=0.25)
axc.text(vmax, np.max(distributions[ifea]), '{:d}'.format(ifea), size=4, verticalalignment='top', horizontalalignment='right', alpha=0.25)
def _plot_flake_image(self, ax_pos, flake_i, distance, **run_args):
# Load parent image
filename = flake_i['infile'].replace('\\', '/') # String replace in case files were saved on another platform.
img = plt.imread(filename)
h, w, c = img.shape
# Define image sub-region that has the flake in it
y1, y2, x1, x2 = flake_i['bbox']
# Make the crop border a bit bigger than the flake bounding box
box_size = (1+run_args['bbox_pad'])*max( abs(x2-x1), abs(y2-y1) )
x1p = int(np.clip((x1+x2)*0.5 - box_size/2, 0, w))
x2p = int(np.clip((x1+x2)*0.5 + box_size/2, 0, w))
y1p = int(np.clip((y1+y2)*0.5 - box_size/2, 0, h))
y2p = int(np.clip((y1+y2)*0.5 + box_size/2, 0, h))
box = y1p, y2p, x1p, x2p
# Adjust image of flake
flake = img[y1p:y2p , x1p:x2p, :]
in_range = self.get_in_range(img, run_args['image_contrast'], **run_args)
flake = skimage.exposure.rescale_intensity(flake, in_range=in_range, out_range='dtype')
# Plot flake
self.ax = self.fig.add_axes(ax_pos)
self.ax.axes.get_xaxis().set_visible(False)
self.ax.axes.get_yaxis().set_visible(False)
self.ax.imshow(flake)
xi, xf, yi, yf = self.ax.axis()
yc, xc = flake_i['center_of_mass']
s = '{}\nflake{:03d}\n({}, {})'.format(flake_i['infile'], flake_i['index'], int(xc), int(yc))
self.ax.text(xi, yf, s, color='white', size=3, verticalalignment='top', horizontalalignment='left')
self.ax.text(xi, yi, '${:.1f} \, \mathrm{{\mu m}}$'.format(flake_i['radius_um']), color='r', size=5, verticalalignment='bottom', horizontalalignment='left')
self.ax.text((xi+xf)*0.5, yi, '{:.1f}'.format(distance), color='white', size=2, verticalalignment='bottom', horizontalalignment='center')
self.ax.text(xf, yi, '{:.3f}'.format(flake_i['flake_contrast']), color='orange', size=3, verticalalignment='bottom', horizontalalignment='right')
# Various overlays on the flake
xc -= x1p
yc -= y1p
size = flake_i['radius_pixels']
if run_args['overlays']>=1:
c = flake_i['contour']
xs = (c[:,0] - x1p)
ys = (c[:,1] - y1p)
self.ax.plot(xs, ys, '-', linewidth=0.6, color='r', dashes=[4,1], alpha=0.2)
if run_args['overlays']>=7:
c = flake_i['convex_hull']
xs = (c[:,1] - x1p)
ys = (c[:,0] - y1p)
self.ax.plot(xs, ys, '-', linewidth=0.5, color='g', alpha=0.5)
if run_args['overlays']>=5:
rect = patches.Rectangle( ((x1-x1p), (y1-y1p)), (x2-x1), (y2-y1), linewidth=1.0, edgecolor='orange', facecolor='none', alpha=0.5)
self.ax.add_patch(rect)
if run_args['overlays']>=3:
# Cross hair and circle
rect = patches.Rectangle( (xc-size/2, yc), size, 0, linewidth=0.6, edgecolor='r', facecolor='none', alpha=0.3) # Horizontal bar
self.ax.add_patch(rect)
rect = patches.Rectangle( (xc, yc-size/2), 0, size, linewidth=0.6, edgecolor='r', facecolor='none', alpha=0.3) # Vertical bar
self.ax.add_patch(rect)
if run_args['overlays']>=5:
# Circle overlay denoting size
circ = patches.Circle(xy=(xc,yc), radius=size, linewidth=0.6, edgecolor='r', facecolor='none', alpha=0.3)
self.ax.add_patch(circ)
def get_in_range(self, data, im_contrast, image_contrast_trim=None, **run_args):
if image_contrast_trim is not None:
image_contrast_trim = np.clip(image_contrast_trim, 0, 0.95)
avg = np.average(data)
avg /= 255
amt = image_contrast_trim
im_contrast = ( avg*amt , 1.0-(1.0-avg)*amt )
in_range = ( im_contrast[0]*255, im_contrast[1]*255 )
return in_range
class select_flakes(cluster):
def __init__(self, name='select_flakes', **kwargs):
self.name = self.__class__.__name__ if name is None else name
self.default_ext = '.pkl'
self.run_args = {
'file_extension' : '.pkl',
'force' : False,
'verbosity' : 3,
'num_jobs' : None,
'num_clusters' : 20,
'cluster_method' : 'selection',
'features' : 'all', # 'shape', 'color', 'all'
'feature_normed_range' : [-2, +2], # range for plotting the normed features
'bbox_pad' : 0.5,
'image_contrast' : (0, 1),
'image_contrast_trim' : None,
'overlays' : 3,
}
self.run_args.update(kwargs)
# WARNING: This association of features names is hard-coded, and is thus contingent
# on the current implementation of Protocols.py>flake_analysis
self.feature_names_color = [
'g contrast',
'v contrast',
'gray',
'gray std',
'H',
'S',
'V',
'H std',
'S std',
'V std',
'R',
'G',
'B',
'R std',
'G std',
'B std',
'entropy'
]
self.feature_names_color = self.feature_names_color + ['{}_inner'.format(f) for f in self.feature_names_color]
self.feature_names_shape = ['P/A'] + ['hist {}'.format(i) for i in range(15)] + ['fractal dimension']
@run_default
def run(self, datas, output_dir, basename, **run_args):
results = {}
# Load all flakes identified by "find_flakes" protocol
flakes = self.load_flakes(datas, **run_args)
if run_args['verbosity']>=4:
print(' {:,d} flakes identified in {:d} images'.format(len(flakes), len(datas)))
# Compute the rescaling of feature vectors
features_all_orig = self.load_features(flakes, **run_args)
rescale = StandardScaler()
features_all_rescaled = rescale.fit_transform(features_all_orig)
flakes_selected = self.select(flakes, features_all_orig, features_all_rescaled, **run_args)
features_orig = self.load_features(flakes_selected, **run_args)
feature_vector_orig = np.average(features_orig, axis=0)
feature_vector = rescale.transform( [feature_vector_orig] )[0]
features = rescale.transform(features_orig)
if run_args['verbosity']>=4:
print(" Selected {:,d} flakes using '{}'".format(len(flakes_selected), run_args['cluster_method']))
clustering = self.load_clustering(basename=basename, output_dir=output_dir, features_rescaled=features, **run_args)
self.plot_cluster(output_dir, cluster_name='selection', feature_vector=feature_vector, feature_vector_orig=feature_vector_orig, flakes_cluster=flakes_selected, features_cluster=features, distributions=clustering['distributions'], dist_bin_edges=clustering['dist_bin_edges'], rescale=rescale, **run_args)
return results
def extract_features(self, feature_name, flakes, flake_features, **run_args):
# Extract the specified feature, returning a list of that feature
# for the entire list of flakes
# Handle special case of relative standard deviation
if feature_name.endswith(' std_inner __relative'):
if run_args['verbosity']>=5:
print(" Computing {}".format(feature_name))
name = feature_name[:-len(' std_inner __relative')]
features = self.extract_features(name, flakes, flake_features, **run_args)
features_std = self.extract_features('{} std_inner'.format(name), flakes, flake_features, **run_args)
return features_std/features
elif feature_name.endswith(' std __relative'):
if run_args['verbosity']>=5:
print(" Computing {}".format(feature_name))
features = self.extract_features(feature_name[:-len(' std __relative')], flakes, flake_features, **run_args)
features_std = self.extract_features(feature_name[:-len(' __relative')], flakes, flake_features, **run_args)
return features_std/features
# Check if it appears as value associated with each flake object
if feature_name in flakes[0]:
if run_args['verbosity']>=5:
print(" Extracting {} from flakes".format(feature_name))
return np.asarray( [ f[feature_name] for f in flakes ] )
# Default: lookup in self.feature_names
i = self.feature_names.index(feature_name)
if run_args['verbosity']>=5:
print(" Extracting {} from flake_features, index {}".format(feature_name, i))
return flake_features[:,i]
def select(self, flakes, flake_features_orig, flake_features_rescaled, **run_args):
# Generate a list of boolean arrays, which are selecting flakes with
# features within the specified range
conditions = []
for key, value in run_args['selection'].items():
if run_args['verbosity']>=5:
print(" Adding condition: {} between {} and {}".format(key, value[0], value[1]))
if key.endswith(' __rescaled'):
features = self.extract_features(key[:-len(' __rescaled')], flakes, flake_features_rescaled, **run_args)
else:
features = self.extract_features(key, flakes, flake_features_orig, **run_args)
conditions.append( (features>=value[0]) )
conditions.append( (features<=value[1]) )
idx = np.where(np.all(conditions, axis=0))[0]
flakes = np.asarray(flakes)[idx]
if run_args['verbosity']>=3 and len(flakes)<1:
print("WARNING: Selection criteria too restrictive. (No flakes meet criteria.)")
return flakes
| CFN-softbio/SciAnalysis | SciAnalysis/ImAnalysis/Flakes/cluster.py | cluster.py | py | 42,216 | python | en | code | 19 | github-code | 36 |
13347982564 | import bge, json
from bge.logic import globalDict
from random import choice, random
from pprint import pprint
from ast import literal_eval as litev
if not 'player_active' in globalDict.keys():
globalDict['player_active'] = False
def init(cont):
""" Initializes the character. """
own = cont.owner
scene = own.scene
# Sensors
autostart = cont.sensors['autostart'].positive
# Actuators
track_to = cont.actuators['track_to']
# Objects
track_direction = own.childrenRecursive['track_direction']
# Properties
#### INITIALIZE ####
if autostart:
if track_to.object == None:
track_to.object = track_direction
cont.activate(track_to)
own.childrenRecursive['char_mesh'].color[0] = random()
own.childrenRecursive['camera_smooth'].timeOffset = 5
if not globalDict['player_active']:
scene.active_camera = own.childrenRecursive['camera_char']
own['is_player'] = True
globalDict['player_active'] = True
pass
def set_direction(cont):
""" Sets the direction of track object and properties of character. """
own = cont.owner
scene = own.scene
# Sensors
always = cont.sensors['always'].positive
up = cont.sensors['up'].positive
down = cont.sensors['down'].positive
left = cont.sensors['left'].positive
right = cont.sensors['right'].positive
run = cont.sensors['run'].positive
# Actuators
track_to = cont.actuators['track_to']
# Objects
track_direction = own.childrenRecursive['track_direction']
directions = [obj for obj in own.childrenRecursive if obj.name.startswith('dir_')]
if len(directions) > 0:
new_dic = {}
for obj in directions:
new_dic[obj.name] = obj
directions = new_dic
# Properties
#### INITIALIZE ####
if not run:
own['run'] = False
if not up and not down and not left and not right or up and down or left and right:
own['walk'] = False
own['run'] = False
if up and not down or not up and down or left and not right or not left and right:
own['walk'] = True
if run:
own['run'] = True
if up and not down:
if not left and not right:
track_direction.worldPosition = directions['dir_U'].worldPosition
elif left and not right:
track_direction.worldPosition = directions['dir_UL'].worldPosition
elif not left and right:
track_direction.worldPosition = directions['dir_UR'].worldPosition
if not up and down:
if not left and not right:
track_direction.worldPosition = directions['dir_D'].worldPosition
elif left and not right:
track_direction.worldPosition = directions['dir_DL'].worldPosition
elif not left and right:
track_direction.worldPosition = directions['dir_DR'].worldPosition
if not up and not down:
if left and not right:
track_direction.worldPosition = directions['dir_L'].worldPosition
elif not left and right:
track_direction.worldPosition = directions['dir_R'].worldPosition
pass
def mov_anim(cont):
""" Moves the character and animates its armature. """
own = cont.owner
scene = own.scene
# Sensors
autostart = cont.sensors['autostart'].positive
is_walk = cont.sensors['is_walk'].positive
is_run = cont.sensors['is_run'].positive
# Actuators
motion = cont.actuators[0]
# Objects
char_armature = own.childrenRecursive['char_armature']
# Properties
LOOP = bge.logic.KX_ACTION_MODE_LOOP
blend_in = 5
motion_vec = [0, 0, 0]
motion_spd = -0.07
#### INITIALIZE ####
if autostart:
if not is_walk:
char_armature.playAction('character', 0, 120, blendin=blend_in, play_mode=LOOP)
motion_vec[1] = 0
elif is_walk and not is_run:
char_armature.playAction('character', 130, 145, blendin=blend_in, play_mode=LOOP)
motion_vec[1] = motion_spd
elif is_walk and is_run:
char_armature.playAction('character', 150, 165, blendin=blend_in, play_mode=LOOP)
motion_vec[1] = motion_spd * 2
motion.dLoc = motion_vec
cont.activate(motion)
pass
def camera_collision(cont):
""" Avoids the camera to pass through objects. """
own = cont.owner
scene = own.scene
# Sensors
always = cont.sensors['always'].positive
# Objects
camera = own.childrenRecursive['camera_char']
axis = own.childrenRecursive['camera_axis']
origin = own.childrenRecursive['camera_origin']
# Properties
dist = axis.getDistanceTo(origin)
ray = own.rayCast(origin, axis, dist)
#### INITIALIZE ####
if always:
if ray[0] != None:
camera.worldPosition = ray[1]
elif ray[0] == None:
camera.worldPosition = origin.worldPosition
| BlenderCN-Org/upbge_random_city_generator | char.py | char.py | py | 4,512 | python | en | code | 1 | github-code | 36 |
32281752161 | import identity_server.logic.session.login_session.logged_in_state as lst
import identity_server.logic.session.login_session.waiting_for_permission as wfp
from mongodb.Application import Application
from mongodb.ApplicationAccount import ApplicationAccount
from django.http.response import HttpResponse
import identity_server.logic.session.login_session.login_session_context as ctx
from typing import Type, Union
from django.http.request import HttpRequest
import identity_server.logic.session.session as ssn
class InitialLoginState(ssn.SessionState):
"""
Session was not started.
Checks is request is valid, returns login page in case if yes and bad request otherwise.
"""
def required_request_params(self):
return [
'callback_url',
'client_id'
]
def route(self, request: HttpRequest) -> Union[Type[ssn.SessionState], None]:
assert isinstance(self.session_context, ctx.LoginSessionContext)
data = self._get_request_data(request)
client_id = data['client_id']
app = self.get_authorized_app(client_id)
if app:
self.session_context.authorized_clients[client_id] = app[0].permissions
return lst.LoggedIn
return super().route(request)
def process_request(self, request: HttpRequest, **kwargs) -> HttpResponse:
assert isinstance(self.session_context, ctx.LoginSessionContext)
data = self._get_request_data(request)
client_id = data['client_id']
scope, app_name = self._get_app_info(client_id)
is_logged_in = self.is_user_logged_in()
self.session_context.assign(
{'scope': scope, 'callback_url': data['callback_url'], 'client_id': client_id, 'app': app_name})
self.set_session_state(wfp.WaitingForPermissions)
return self.render_html(request, 'login_page.html', context={'scope': scope, 'app': app_name, 'clientId': client_id, 'is_logged_in': is_logged_in})
def is_user_logged_in(self):
assert isinstance(self.session_context, ctx.LoginSessionContext)
return self.session_context.user_id != ''
def get_authorized_app(self, client_id):
assert isinstance(self.session_context, ctx.LoginSessionContext)
user_id = self.session_context.user_id
if user_id:
authorized_app = ApplicationAccount.objects.filter(
worker_id=user_id, client_id=client_id)
return authorized_app
def _get_app_info(self, client_id) -> Application:
"""
Makes request to the database to get actual application name associated with given client id
"""
app = Application.objects.filter(client_id=client_id).first()
return app.permissions, app.name
| aI-lab-glider/oauth2-server-implementation | identity_server/logic/session/login_session/initial_login_state.py | initial_login_state.py | py | 2,766 | python | en | code | 0 | github-code | 36 |
32716485591 | """ Image editing class for head to bot, time-trail, obstacle where
there is only single agent
"""
import datetime
import logging
import rospy
import cv2
from markov.log_handler.logger import Logger
from markov.utils import get_racecar_idx
from mp4_saving import utils
from mp4_saving.constants import (RaceCarColorToRGB,
IconographicImageSize,
TrackAssetsIconographicPngs, RACE_COMPLETE_Y_OFFSET,
RACE_TYPE_TO_VIDEO_TEXT_MAPPING, XYPixelLoc, AWS_DEEPRACER_WATER_MARK,
SCALE_RATIO, FrameQueueData)
from mp4_saving.image_editing_interface import ImageEditingInterface
from mp4_saving.top_view_graphics import TopViewGraphics
LOG = Logger(__name__, logging.INFO).get_logger()
class SingleAgentImageEditing(ImageEditingInterface):
""" Image editing class for head to bot, time-trail, obstacle where
there is only single agent
"""
def __init__(self, racecar_name, racecar_info, race_type):
""" Initializing the required data for the head to bot, time-trail. This is used for single agent
Arguments:
racecars_info (list): list of dict having information of the agent
race_type (str): Since this class is reused for all the different race_type
"""
self.racecar_info = racecar_info
self.race_type = race_type
racecar_index = get_racecar_idx(racecar_name)
self.racecar_index = racecar_index if racecar_index else 0
# Store the font which we will use to write the phase with
self.amazon_ember_regular_20px = utils.get_font('AmazonEmber-Regular', 20)
self.amazon_ember_regular_16px = utils.get_font('AmazonEmber-Regular', 16)
self.amazon_ember_heavy_30px = utils.get_font('AmazonEmber-Heavy', 30)
self.amazon_ember_light_18px = utils.get_font('AmazonEmber-Light', 18)
self.amazon_ember_light_20px = utils.get_font('AmazonEmber-Light', 20)
self.amazon_ember_light_italic_20px = utils.get_font('AmazonEmber-LightItalic', 20)
self.is_racing = rospy.get_param("VIDEO_JOB_TYPE", "") == "RACING"
self.is_league_leaderboard = rospy.get_param("LEADERBOARD_TYPE", "") == "LEAGUE"
self.leaderboard_name = rospy.get_param("LEADERBOARD_NAME", "")
self._total_laps = int(rospy.get_param("NUMBER_OF_TRIALS", 0))
# The track image as iconography
self.track_icongraphy_img = utils.get_track_iconography_image()
# Track image offset
self.track_loc_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value if self.is_league_leaderboard \
else XYPixelLoc.TRACK_IMG_WITHOUT_OFFSET_LOC.value
# Gradient overlay image
gradient_img_path = TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG_LEAGUE_LEADERBOARD.value \
if self.is_league_leaderboard else TrackAssetsIconographicPngs.OBSTACLE_OVERLAY_PNG.value
self.gradient_img = self._plot_track_on_gradient(gradient_img_path)
self.gradient_alpha_rgb_mul, self.one_minus_gradient_alpha = utils.get_gradient_values(self.gradient_img)
# Top camera information
top_camera_info = utils.get_top_camera_info()
self.top_view_graphics = TopViewGraphics(top_camera_info.horizontal_fov, top_camera_info.padding_pct,
top_camera_info.image_width, top_camera_info.image_height,
racecar_info)
def _edit_major_cv_image(self, major_cv_image, mp4_video_metrics_info):
""" Apply all the editing for the Major 45degree camera image
Args:
major_cv_image (Image): Image straight from the camera
Returns:
Image: Edited main camera image
"""
# Applying gradient to whole major image and then writing text
major_cv_image = utils.apply_gradient(major_cv_image, self.gradient_alpha_rgb_mul,
self.one_minus_gradient_alpha)
# Top left location of the picture
loc_x, loc_y = XYPixelLoc.SINGLE_AGENT_DISPLAY_NAME_LOC.value
# Display name (Racer name/Model name)
display_name = self.racecar_info[self.racecar_index]['display_name']
display_name_txt = display_name if len(display_name) < 15 else "{}...".format(display_name[:15])
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=display_name_txt,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Lap Counter
loc_y += 30
current_lap = min(int(mp4_video_metrics_info[self.racecar_index].lap_counter) + 1, self._total_laps)
lap_counter_text = "{}/{}".format(current_lap, self._total_laps)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=lap_counter_text,
loc=(loc_x, loc_y), font=self.amazon_ember_heavy_30px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# total_evaluation_time (Race time)
loc_y += 45
total_eval_milli_seconds = mp4_video_metrics_info[self.racecar_index].total_evaluation_time
time_delta = datetime.timedelta(milliseconds=total_eval_milli_seconds)
total_eval_time_text = "Race | {}".format(utils.milliseconds_to_timeformat(time_delta))
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=total_eval_time_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Reset counter
loc_y += 25
reset_counter_text = "Reset | {}".format(mp4_video_metrics_info[self.racecar_index].reset_counter)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=reset_counter_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_18px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Speed
loc_x, loc_y = XYPixelLoc.SPEED_EVAL_LOC.value
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.SPEED_LEADERBOARD_LOC.value
speed_text = "{} m/s".format(utils.get_speed_formatted_str(mp4_video_metrics_info[self.racecar_index].throttle))
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=speed_text,
loc=(loc_x, loc_y), font=self.amazon_ember_light_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Leaderboard name
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.LEADERBOARD_NAME_LOC.value
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=self.leaderboard_name,
loc=(loc_x, loc_y), font=self.amazon_ember_regular_16px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Evaluation type
loc_x, loc_y = XYPixelLoc.RACE_TYPE_EVAL_LOC.value
if self.is_league_leaderboard:
loc_x, loc_y = XYPixelLoc.RACE_TYPE_RACE_LOC.value
race_text = "race" if self.is_racing else "evaluation"
evaluation_type_txt = "{} {}".format(RACE_TYPE_TO_VIDEO_TEXT_MAPPING[self.race_type], race_text)
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=evaluation_type_txt,
loc=(loc_x, loc_y), font=self.amazon_ember_light_italic_20px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# AWS Deepracer logo at the bottom for the community leaderboard
if self.is_league_leaderboard:
major_cv_image = utils.write_text_on_image(image=major_cv_image, text=AWS_DEEPRACER_WATER_MARK,
loc=XYPixelLoc.AWS_DEEPRACER_WATER_MARK_LOC.value,
font=self.amazon_ember_regular_16px,
font_color=RaceCarColorToRGB.White.value,
font_shadow_color=RaceCarColorToRGB.Black.value)
# Check if the done flag is set and set the banner appropriately
if mp4_video_metrics_info[self.racecar_index].done and (int(self._total_laps) >= current_lap):
# When the cv2 text is written, it automatically drops the alpha value of the image
rel_y_offset = XYPixelLoc.TRACK_IMG_WITH_OFFSET_LOC.value[1] if self.is_league_leaderboard else 0
racecomplete_image = utils.get_image(TrackAssetsIconographicPngs.RACE_COMPLETE_OVERLAY_PNG.value,
IconographicImageSize.RACE_COMPLETE_IMAGE_SIZE.value)
x_offset = major_cv_image.shape[1] - racecomplete_image.shape[1]//2
y_offset = major_cv_image.shape[0] - RACE_COMPLETE_Y_OFFSET - rel_y_offset - racecomplete_image.shape[0]//2
major_cv_image = utils.plot_rectangular_image_on_main_image(
major_cv_image, racecomplete_image, (x_offset, y_offset))
major_cv_image = cv2.cvtColor(major_cv_image, cv2.COLOR_RGB2BGRA)
return major_cv_image
def _plot_track_on_gradient(self, gradient_img_path):
""" For the given gradient apply the track iconographic image and use this to apply gradient
on each camera frame. Previously this was done on the top camera which changed every frame. But
with the track iconographic image set static, adding the track on gradient is more optimized.
Arguments:
gradient_img_path (str): Gradient image path
Returns:
(Image): Edited gradient image with track image
"""
gradient_img = utils.get_image(gradient_img_path, IconographicImageSize.FULL_IMAGE_SIZE.value)
gradient_img = cv2.cvtColor(gradient_img, cv2.COLOR_RGBA2BGRA)
track_icongraphy_scaled = utils.resize_image(self.track_icongraphy_img, SCALE_RATIO)
track_icongraphy_alpha = track_icongraphy_scaled[:, :, 3]/255.0
# Track image is placed at the bottom right with some offset (only in leaderboard tracks)
x_min = -(self.track_loc_offset[1] + track_icongraphy_scaled.shape[0])
x_max = gradient_img.shape[0] - self.track_loc_offset[1]
y_min = -(self.track_loc_offset[0] + track_icongraphy_scaled.shape[1])
y_max = gradient_img.shape[1] - self.track_loc_offset[0]
# This is used as the offset for plotting the agent dots
self.track_start_loc = (gradient_img.shape[1] + y_min, gradient_img.shape[0] + x_min)
for channel in range(0, 4):
gradient_img[x_min:x_max, y_min:y_max, channel] =\
(track_icongraphy_alpha * track_icongraphy_scaled[:, :, channel]) + \
(1 - track_icongraphy_alpha) * (gradient_img[x_min:x_max, y_min:y_max, channel])
return gradient_img
def _plot_agents_on_major_cv_image(self, major_cv_image, mp4_video_metrics_info):
""" Add the agents, obstacles on the track.
Arguments:
major_cv_image (Image): Edited image having gradient, text, track
mp4_video_metrics_info (List): List of ROS metric values of each agent
Returns:
Image: Edited image with gradient, text, track and agents with dots
"""
agents_loc = [(metric.x, metric.y) for metric in mp4_video_metrics_info]
objects_loc = []
if mp4_video_metrics_info[0].object_locations:
objects_loc = [(object_loc.x, object_loc.y) for object_loc in mp4_video_metrics_info[0].object_locations]
return self.top_view_graphics.plot_agents_as_circles(
major_cv_image, agents_loc, objects_loc, self.track_start_loc)
def edit_image(self, major_cv_image, metric_info):
mp4_video_metrics_info = metric_info[FrameQueueData.AGENT_METRIC_INFO.value]
major_cv_image = self._edit_major_cv_image(major_cv_image, mp4_video_metrics_info)
major_cv_image = self._plot_agents_on_major_cv_image(major_cv_image, mp4_video_metrics_info)
return cv2.cvtColor(major_cv_image, cv2.COLOR_BGRA2RGB)
| aws-deepracer-community/deepracer-simapp | bundle/src/deepracer_simulation_environment/scripts/mp4_saving/single_agent_image_editing.py | single_agent_image_editing.py | py | 13,347 | python | en | code | 79 | github-code | 36 |
42242738770 | #!/usr/bin/env python
import numpy
import scipy.integrate
from pylab import *
datafile="../../../Mathematica/calculated_vals.tsv"
tag,x,e,f = numpy.loadtxt("data.txt",unpack=True)
tags=numpy.unique(tag)
flimit = numpy.zeros(len(tags))
for i in range(0,len(tags)):
itag=tags[i]
inds = numpy.where(tag == itag)
xplot=x[inds]
yplot=-f[inds]*31e-15
isort=numpy.argsort(xplot)
xplot = xplot[isort]
yplot = yplot[isort]
plot(xplot,yplot)
flimit[i] = scipy.integrate.trapz(xplot,-yplot)
yscale('log')
xscale('log')
savefig('integrands.png')
clf()
dist,fpfa,fnaive,fright,ftemp=numpy.loadtxt(datafile,unpack=True)
dist=dist*1e6
plot(tags,flimit)
plot(dist,fpfa)
plot(dist,fright)
plot(dist,ftemp)
xscale('log')
yscale('log')
show()
| charlesblakemore/opt_lev_analysis | casimir/scuffCode/Comparison/byXi/plot_integrand.py | plot_integrand.py | py | 773 | python | en | code | 1 | github-code | 36 |
33517632306 | from manimlib.imports import *
class Limite4_1 (ThreeDScene):
def construct (self):
titulo=TextMobject('''Existencia del Límite en Infinito\n
de Funciones de $\\mathbb{R}^n$ $\\rightarrow$ $\\mathbb{R}$''').scale(1.5)
text=TextMobject("Sea $f:\\mathbb{R}^{n}\\rightarrow\\mathbb{R}$").move_to(2.2*UP)
text1=TexMobject(r"\lim_{\vec{x}\rightarrow\infty}f(\vec{x})=L\leftrightarrow\forall\epsilon>0").move_to(1*UP)
text2=TexMobject(r"\exists\delta>0 \ tq \ \forall \vec{x}\in B^{c}_{\delta}(\vec{0})").move_to(-0.2*UP)
text3=TexMobject(r"\implies d(f(\vec{x}),L)<\epsilon").move_to(1.4*DOWN)
G1=VGroup(text,text1,text2,text3)
text4=TextMobject('''Veamos el siguiente ejemplo para aterrizar ideas:''')
text5=TexMobject(r"f:\mathbb{R}^{2}\rightarrow\mathbb{R}")
text6=TexMobject(r"f(x,y)=1+\frac{1}{x^{2}+y^{2}}").move_to(1.5*DOWN)
G2=VGroup(text4,text5,text6)
self.play(Write(titulo))
self.wait(5.25)
self.play(FadeOut(titulo))
self.play(Write(text))
self.play(Write(text1))
self.play(Write(text2))
self.play(Write(text3))
self.wait(6)
self.play(FadeOut(G1))
self.play(Write(text4))
self.wait(4.6)
self.play(text4.shift,2*UP,runtime=1.5)
self.play(Write(text5))
self.play(Write(text6))
self.wait(3)
self.play(FadeOut(G2))
self.wait()
self.custom_method()
def custom_method(self):
axes=ThreeDAxes()
superficie=superficie4()
text1=TexMobject(r'''f(x,y)=1+\frac{1}{x^{2}+y^{2}}''')
text1.to_corner(UL)
text2=TextMobject("Tomemos", " $\epsilon$=0.5")
text2.to_corner(UL)
text2[1].set_color(RED)
text3=TextMobject('''Y notemos que \n
podemos escoger''').to_corner(UL)
text3_1=TextMobject("una"," $\\delta>0$").move_to(text3.get_center()+1*DOWN)
text3_1[1].set_color(YELLOW_C)
text4=TextMobject('''Tal que la imagen de los\n
puntos que no \n
pertenecen a $ B_{\\delta}(\\vec{0})$,''').to_corner(UL)
text5=TextMobject('''están a una distancia $\\epsilon$\n
de 1.''').to_corner(UL)
text5_1=TextMobject('''Es posible hacer lo mismo\n
con toda $\\epsilon>0$.''').to_corner(UL)
text6=TextMobject('''Por lo cual:''').to_corner(UL)
text7=TexMobject(r"\lim_{\vec{x}\rightarrow\infty}f(\vec{x})=1").move_to(text5.get_center()+1*DOWN)
M=TextMobject("1").move_to(1*UP+0.2*LEFT)
#epsilons se pueden modificar
r=0.5
r1=1
linea=Line((0,0,1),(0,0,1+r),stroke_width=6,color=RED)
linea_1=Line((0,0,1),(0,0,1+r1),stroke_width=6,color=RED)
R=1.7
R1=R-0.5
linea1=Line((0,0,0),(R,0,0),stroke_width=6,color=YELLOW_C)
circulo=Circle(radius=R,color=YELLOW_C)
circulo1=Circle(radius=R1,color=YELLOW_C)
#cilindro = ParametricSurface(
# lambda u, v: np.array([
# R*np.cos(TAU * v),
# R*np.sin(TAU * v),
# 4*u
# ]),
# resolution=(6, 32)).fade(0.1).set_opacity(0.2)
#cilindro.set_color(YELLOW_C)
#cilindro1 = ParametricSurface(
# lambda u, v: np.array([
# R1*np.cos(TAU * v),
# R1*np.sin(TAU * v),
# 4*u
# ]),
# resolution=(6, 32)).fade(0.1).set_opacity(0.2)
#cilindro1.set_color(YELLOW_C)
def puntosEnSuperficie(rad,lim,num):
puntosDom = []
puntosSur = []
for i in range(num):
azar = lim*np.random.rand(1,2)[0] + 0.1
if (rad < np.sqrt(azar[0]**2 + azar[1]**2) < lim):
puntosDom.append(Dot(np.array([azar[0], azar[1],0]), color = BLUE))
puntosSur.append(Dot(superficie.func(azar[0], azar[1]), color = RED))
return puntosDom, puntosSur
puntosD1, puntosS1 = puntosEnSuperficie(R, 5, 6000)
puntosD2, puntosS2 = puntosEnSuperficie(R1, R, 3000)
GPuntosD1 = VGroup(*puntosD1)
GPuntosS1 = VGroup(*puntosS1)
GPuntosD2 = VGroup(*puntosD2)
GPuntosS2 = VGroup(*puntosS2)
###Animacion
self.set_camera_orientation(0.8*np.pi/2, -0.25*np.pi,distance=12)
self.begin_ambient_camera_rotation(rate=0.001)
self.play(ShowCreation(axes))
self.add_fixed_in_frame_mobjects(text1)
self.add_fixed_in_frame_mobjects(M)
self.play(Write(text1))
self.play(ShowCreation(superficie))
self.wait()
self.play(FadeOut(text1))
self.add_fixed_in_frame_mobjects(text2)
self.play(Write(text2))
self.play(ShowCreation(linea))
self.play(FadeOut(text2))
self.add_fixed_in_frame_mobjects(text3)
self.play(Write(text3))
self.add_fixed_in_frame_mobjects(text3_1)
self.play(Write(text3_1))
self.play(ShowCreation(linea1))
self.play(ShowCreation(circulo))
self.play(FadeOut(text3),FadeOut(text3_1))
self.add_fixed_in_frame_mobjects(text4)
self.play(Write(text4))
#self.play(ShowCreation(cilindro))
self.wait()
self.play(FadeOut(text4))
self.play(FadeIn(GPuntosD1))
self.add_fixed_in_frame_mobjects(text5)
self.play(Write(text5),FadeOut(linea1))
self.play(FadeIn(GPuntosS1))
self.play(linea.shift,(R+0.1)*RIGHT,runtime=10)
self.wait(6.5)
self.play(FadeOut(text5))
self.add_fixed_in_frame_mobjects(text5_1)
self.play(Write(text5_1))
self.play(ReplacementTransform(linea,linea_1))
self.play(ReplacementTransform(circulo,circulo1))
#self.play(ReplacementTransform(cilindro,cilindro1))
self.play(FadeIn(GPuntosD2))
self.play(FadeIn(GPuntosS2))
self.play(linea_1.shift,(R1+0.1)*RIGHT,runtime=10)
self.wait(3)
self.play(FadeOut(text5_1))
self.add_fixed_in_frame_mobjects(text6)
self.play(Write(text6))
self.add_fixed_in_frame_mobjects(text7)
self.play(Write(text7))
self.wait(2)
self.play(FadeOut(text7),FadeOut(text6),FadeOut(axes),FadeOut(M),
FadeOut(superficie),FadeOut(linea_1),FadeOut(circulo1),FadeOut(GPuntosD1),
FadeOut(GPuntosS1),FadeOut(GPuntosD2),FadeOut(GPuntosS2)) | animathica/calcanim | Límite y continuidad en funciones multivariable/limite_infinito_Rn-R.py | limite_infinito_Rn-R.py | py | 6,635 | python | en | code | 19 | github-code | 36 |
8721596981 | from scipy.misc import comb
def exp(p, n):
total = 0.0
for k in range(n+1):
total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)
return total
def main():
for p in [0.3, 0.75, 0.8, 1.0, 0.0, 0.5]:
for n in range(1, 20):
print('Checking n=%d, p=%f' % (n, p))
print('Result: %f' % (exp(p, n)))
if __name__ == '__main__':
main()
| JelteF/statistics | 2/lab2_2_d.py | lab2_2_d.py | py | 395 | python | en | code | 0 | github-code | 36 |
31618957679 | import pickle
import os
import pprint
def save_dict_to_file(output):
global system_text
global list_of_files
file_ = f'{your_target_folder}/{list_of_files[i_file]}'
filename, file_extension = os.path.splitext(file_)
dict_data = {}
for line_ in output.splitlines():
key, value = line_.split(': ')
dict_data[key] = value
file_date_ = os.stat(file_).st_birthtime
file_date = datetime.datetime.fromtimestamp(file_date_).strftime('%y%m%d_%H%M')
new_filename = f"{my_reverse_date(dict_data['kuupaev'])}_{dict_data['firma']}_{dict_data['arve nr']}_{file_date}"
def load_dict_from_file(file_name):
with open(file_name, 'rb') as file_dict:
output_dict = pickle.load(file_dict)
return output_dict
your_target_folder = '/Volumes/[C] Windows 10 (1)/Users/docha/OneDrive/Leka/Tsekkid for test/Tsekkid aprill'
list_of_files = [fn for fn in os.listdir(your_target_folder)
if any(fn.endswith(ext) for ext in ['.pkl',])]
#pprint.pprint(list_of_files)
for file_ in list_of_files:
dict_ = load_dict_from_file(f'{your_target_folder}/{file_}')
for k, v in dict_.items():
if k == 'kuupaev' or k == 'summa kokku':
if ',' in v:
print(file_, k, v)
| dochaauch/Tools_for_buh | Bonus_help.py | Bonus_help.py | py | 1,262 | python | en | code | 0 | github-code | 36 |
72219956265 | # one can of paint covers 5m^2 of wall, given a random height and width of wall
# calculate the minimum cans of paint to buy to fully cover the wall fully.
# define a function to take in inputs for width, height
# calculate the area of wall w*h, then calculate cans to buy rounded up to whole number
# output the number of cans
from math import ceil
def getPaintCans(w,h):
print(f"You need at least {ceil((float(w)*float(h))/5)} cans of paint to fully cover the {w} by {h} wall.")
[width,height]=str.split(input("Input width and height of wall in metres, separated by a comma: \n"),",")
getPaintCans(width,height) | ElliotMonde/py_udemy | print_debug_comment/get_paint_cans.py | get_paint_cans.py | py | 620 | python | en | code | 0 | github-code | 36 |
71685392105 | from turtle import *
def kwadrat(s,col):
pd()
fillcolor(col)
begin_fill()
for _ in range(4):
fd(s)
lt(90)
end_fill()
pu()
def trojkat(s,n,col):
x=position()
fd(s)
for i in range(n,0,-2):
for j in range(i):
kwadrat(s,col)
fd(s)
bk((j+1)*s)
lt(90)
fd(s)
rt(90)
fd(s)
setpos(x)
def filling(s,n):
for _ in range(2):
trojkat(s,n,"red")
rt(90)
bk((n+2)*s)
trojkat(s,n,"green")
rt(90)
bk((n+2)*s)
def kwadraty(n):
pu()
k = 2*n+7
s=480/k
bk(240)
lt(90)
bk(240)
rt(90)
color("yellow")
x=position()
for i in range(k):
for j in range(k):
kwadrat(s,"black")
fd(s)
bk(s*k)
lt(90)
fd(s)
rt(90)
setpos(x)
fd(s)
lt(90)
fd(s)
rt(90)
for _ in range(4):
filling(s,n)
rt(90)
bk(2*s*n+5*s)
tracer(30,0)
kwadraty(12)
update()
done() | chinski99/minilogia | 2010/etap 3/kwadraty.py | kwadraty.py | py | 1,051 | python | en | code | 0 | github-code | 36 |
37502622427 | # https://school.programmers.co.kr/learn/courses/30/lessons/12981
def solution(n, words):
check = set()
check.add(words[0])
cnt = 2
for i in range(1, len(words)):
st, ed = words[i - 1], words[i]
if st[-1] != ed[0] or ed in check:
return [cnt % n if cnt % n else n, cnt // n + 1 if cnt % n else 0]
check.add(ed)
cnt += 1
return [0, 0]
| junsgi/Algorithm | Implementation/영어 끝말잇기.py | 영어 끝말잇기.py | py | 399 | python | en | code | 0 | github-code | 36 |
39060387909 | from numpy import arange,log,exp,r_
from matplotlib import pyplot as plt
from scipy.special import gamma
import Cua2008
from numpy import fft,sin,pi
from numpy.random import normal
duration=60
hf_dt=0.01
mean=0.0
std=1.0
num_samples = int(duration/hf_dt)
t=arange(0,duration,hf_dt)
noise = normal(mean, std, size=num_samples)
freq=0.1
freq2=20.0
noise=sin(2*pi*t*freq+pi/4)+sin(2*pi*t*freq2+pi/6)
ft=fft.rfft(noise)
f=fft.rfftfreq(len(noise),hf_dt)
# GP window
Tw=duration
epsilon=0.2
eta=0.05
b=-epsilon*log(eta)/(1+eta*(log(epsilon)-1))
c=b/(epsilon*Tw)
#a=(exp(1)/(epsilon*Tw))**b
a=(((2*c)**(2*b+1))/gamma(2*b+1))**0.5
w=a*t**b*exp(-c*t)
plt.figure()
plt.plot(r_[0,t+10],r_[0,w])
#fft
#Cua window
i = 0 # Horizontal P-wave acceleration - rock:
i = 6 # Vertical P-wave acceleration - rock:
i = 12 # Horizontal S-wave acceleration - rock: **BAD #<-- S-wave alpha_t_rise for i=12 should be 0.064 instead of 0.64?
i = 19 # Vertical S-wave acceleration - soil:
M=5
R=10
TT=10
env=Cua2008.envelope(M,R,t,TT,Pcoeff=0,Scoeff=12)
plt.figure()
plt.plot(t,env)
plt.show() | Ogweno/mylife | misc/windowing_test.py | windowing_test.py | py | 1,076 | python | en | code | 0 | github-code | 36 |
10905562253 | from pydantic import BaseModel
class SourceURL(BaseModel):
'''Source URL schema'''
source_url: str
class Config:
orm_mode = True
class URLInfo(SourceURL):
'''URL Information schema'''
short_url_key: str
short_url: str
| ScottyZA/backendend-challenge | url_shortener/schemas.py | schemas.py | py | 255 | python | en | code | 0 | github-code | 36 |
72300794345 | import sys
from osgeo import gdal, osr
class GDALUtilities:
"""
This class has the following capabilities
1. Get raster info
2. Read image band as an array
3. Reproject a raster
"""
def __init__(self, path):
self.path = path
def get_raster_info(self):
self.dataset = gdal.Open(self.path, gdal.GA_ReadOnly)
print(
"Driver: {}/{}\n".format(
self.dataset.GetDriver().ShortName, self.dataset.GetDriver().LongName
)
)
print(
"Size is {} x {} x {}\n".format(
self.dataset.RasterXSize, self.dataset.RasterYSize, self.dataset.RasterCount
)
)
print("Projection is {}\n".format(self.dataset.GetProjection()))
geotransform = self.dataset.GetGeoTransform()
if geotransform:
print("Origin = ({}, {})\n".format(geotransform[0], geotransform[3]))
print("Pixel Size = ({}, {})\n".format(geotransform[1], geotransform[5]))
band_count = self.dataset.RasterCount
for i in range(1, band_count + 1):
band = self.dataset.GetRasterBand(i)
band_name = band.GetDescription()
if band_name:
print(f"Band {i} Name: {band_name}")
else:
print(f"Band {i} has no name.")
dataset = None
def read_image(self, band: int = None):
self.dataset = gdal.Open(self.path, gdal.GA_ReadOnly)
band = self.dataset.GetRasterBand(band)
data = band.ReadAsArray()
self.dataset = None
return data
def reproject(
self, output_path: str = None, target_crs: str = None # EPSG:4326
):
self.dataset = gdal.Open(self.path, gdal.GA_ReadOnly)
input_dataset = self.dataset
input_srs = input_dataset.GetProjectionRef()
target_srs = osr.SpatialReference()
target_srs.SetFromUserInput(target_crs)
output_dataset = gdal.Warp(
output_path, input_dataset, dstSRS=target_srs.ExportToWkt()
)
input_dataset = None
output_dataset = None | manojappalla/RSGIS-Tutorials | gdal_tutorials/gdal_utilities.py | gdal_utilities.py | py | 2,135 | python | en | code | 0 | github-code | 36 |
33654948642 | import unittest
from onnx import defs, helper
from onnx.onnx_pb2 import NodeProto
class TestRelu(unittest.TestCase):
def test_relu(self):
self.assertTrue(defs.has('Relu'))
node_def = helper.make_node(
'Relu', ['X'], ['Y'])
if __name__ == '__main__':
unittest.main()
| tianyaoZhang/myONNX | onnx/test/relu_test.py | relu_test.py | py | 307 | python | en | code | 0 | github-code | 36 |
20530887810 | #Annalisa Dattilio
#This program will allow the user to enter their size/measurements and be able to find their perfect size across online clothing stores domestically and internationally
mylist = list(("Nike", "Cotton On"))
for x in range(len(mylist)):
print(mylist[x])
#how to show only the sizes for store user selects
user_store_selection = str(input("Please select a store from the list above: "))
#if user_store_selection == "nike":
# print what
#elif user_store_selection == "cotton on":
#print what
#else:
#print("Please only select stores from the list.")
#NIKE(Women's Bottoms)
#https://www.nike.com/size-fit/womens-bottoms-alpha - Nike women's size chart
#Waist size in inches
Nike_waist_size_XXS_low = 21.25
Nike_waist_size_XXS_high = 23.5
Nike_waist_size_XS_low = 23.5
Nike_waist_size_XS_high = 26
Nike_waist_size_S_low = 26
Nike_waist_size_S_high = 29
Nike_waist_size_M_low = 29
Nike_waist_size_M_high = 31.5
Nike_waist_size_L_low = 31.5
Nike_waist_size_L_high = 34.5
Nike_waist_size_XL_low = 34.5
Nike_waist_size_XL_high = 38.5
Nike_waist_size__XXL_low = 38.5
Nike_waist_size__XXL_high = 42.5
#Hip size in inches
Nike_hip_size_XXS_low = 30.5
Nike_hip_size_XXS_high = 33
Nike_hip__size_XS_low = 33
Nike_hip__size_XS_high = 35.5
Nike_hip_size_S_low = 35.5
Nike_hip_size_S_high = 38.5
Nike_hip_size_M_low = 38.5
Nike_hip_size_M_high = 41
Nike_hip_size_L_low = 41
Nike_hip_size_L_high = 44
Nike_hip_size_XL_low = 44
Nike_hip_size_XL_high = 47
Nike_hip_size_XXL_low = 47
Nike_hip_size_XXL_high = 50
#Height in inches
Nike_height_XXS_low = 64
Nike_height_XXS_high = 68
Nike_height_XS_low = 64
Nike_height_XS_high = 68
Nike_height_S_low = 64
Nike_height_S_high = 68
Nike_height_M_low = 64
Nike_height_M_high = 68
Nike_height_L_low = 64
Nike_height_L_high = 68
Nike_height_XL_low = 64
Nike_height_XL_high = 68
Nike_height_XXL_low = 64
Nike_height_XXL_high = 68
user_waist_size = int(input("Enter waist size in centimeters: "))
print(user_waist_size / 2.54, end= " in. ") # the / operator is used to convert centimeters to inches through division
user_waist_size_in = user_waist_size / 2.54
user_hip_size = int(input("\nEnter hip size in centimeters: "))
print(user_hip_size * 0.393701, end= " in. ") # the * operator is used to convert centimeters to inches through multiplication
user_hip_size_in = user_hip_size * 0.393701
user_height = int(input("\nEnter height in centimeters: "))
print(user_height / 2.54, end= " in.")
user_height_in = user_height / 2.54
#waist size
if user_waist_size_in > 21.25 and user_waist_size_in < 23:
print("\nYour waist fits size", "XXS", sep=": ")
if user_waist_size_in > 23.5 and user_waist_size_in < 26:
print("\nYour waist fits size", "XS", sep=": ")
if user_waist_size_in > 26 and user_waist_size_in < 29:
print("\nYour waist fits size", "S", sep=": ")
if user_waist_size_in > 29 and user_waist_size_in < 31.5:
print("\nYour waist fits size", "M", sep=": ")
if user_waist_size_in > 31.5 and user_waist_size_in < 34.5:
print("\nYour waist fits size", "L", sep=": ")
if user_waist_size_in > 34.5 and user_waist_size_in < 38.5:
print("\nYour waist fits size", "XL", sep=": ")
if user_waist_size_in > 38.5 and user_waist_size_in < 42.5:
print("\nYour waist fits size", "XXL", sep=": ")
#hip size
if user_hip_size_in > 30.5 and user_hip_size_in < 33:
print("Your hips fit size", "XXS", sep=": ")
if user_hip_size_in > 33 and user_hip_size_in < 35.5:
print("Your hips fit size", "XS", sep=": ")
if user_hip_size_in > 35.5 and user_hip_size_in < 38.5:
print("Your hips fit size", "S", sep=": ")
if user_hip_size_in > 38.5 and user_hip_size_in < 41:
print("Your hips fit size", "M", sep=": ")
if user_hip_size_in > 41 and user_hip_size_in < 44:
print("Your hips fit size", "L", sep=":")
if user_hip_size_in > 44 and user_hip_size_in < 47:
print("Your hips fit size", "XL", sep=": ")
if user_hip_size_in > 47 and user_hip_size_in < 50:
print("Your hips fit size", "XXL", sep=": ")
#height
if user_height < 64 or user_height > 68:
print("-If your height is below 5ft. 4in. know the pant legs will be slightly longer and may be baggy.\n-If your height is above 5ft. 8 in. know the pants will fit above the ankle")
#SPRINT 2
#COTTON ON(Women's Bottoms)
#https://cottonon.com/us/size-guide.html?gclid=Cj0KCQiA7bucBhCeARIsAIOwr--1ciZ5xW1MV06odv9bOfZkwd0VsflsiPgMhUOBfi0B5G0FwPpOWpcaAjuzEALw_wcB
#waist size
Cottonon_waist_size_xxxs_low = 21.6
Cottonon_waist_size_xxxs_high = 23.6
Cottonon_waist_size_xxs_low = 23.6
Cottonon_waist_size_xxs_high = 25.6
Cottonon_waist_size_xs_low = 25.6
Cottonon_waist_size_xs_high = 27.6
Cottonon_waist_size_s_low = 27.6
Cottonon_waist_size_s_high = 29.6
Cottonon_waist_size_m_low = 29.6
Cottonon_waist_size_m_high = 31.6
Cottonon_waist_size_l_low = 31.6
Cottonon_waist_size_l_high = 33.6
Cottonon_waist_size_xl_low = 33.6
#hip size
Cottonon_hip_size_xxxs_low = 31.6
Cottonon_hip_size_xxxs_high = 33.6
Cottonon_hip_size_xxs_low = 33.6
Cottonon_hip_size_xxs_high = 35.6
Cottonon_hip_size_xs_low = 35.6
Cotton_hip_size_xs_high = 37.6
Cottonon_hip_size_s_low = 37.6
Cottonon_hip_size_s_high = 39.6
Cottonon_hip_size_m_low = 39.6
Cottonon_hip_size_m_high = 41.6
Cottonon_hip_size_l_low = 41.6
Cottonon_hip_size_l_high = 43.6
Cottonon_hip_size_xl_low = 43.6
#WAIST SIZE
if user_waist_size_in > 21.6 and user_waist_size_in < 23.6:
print("\nYour waist fits size", "0", sep=": ")
if user_waist_size_in > 23.6 and user_waist_size_in < 25.6:
print("\nYour waist fits size", "2", sep=": ")
if user_waist_size_in > 25.6 and user_waist_size_in < 27.6:
print("\nYour waist fits size", "4", sep=": ")
if user_waist_size_in > 27.6 and user_waist_size_in < 29.6:
print("\nYour waist fits size", "6", sep=": ")
if user_waist_size_in > 29.6 and user_waist_size_in < 31.6:
print("\nYour waist fits size", "8", sep=": ")
if user_waist_size_in > 31.6 and user_waist_size_in < 33.6:
print("\nYour waist fits size", "10", sep=": ")
if user_waist_size_in > 33.6 and user_waist_size_in < 33.7:
print("\nYour waist fits size", "12", sep=": ")
#HIP SIZE
if user_hip_size_in > 31.6 and user_hip_size_in < 33.6:
print("\nYour hips fit size", "0", sep=": ")
if user_hip_size_in > 33.6 and user_hip_size_in < 35.6:
print("\nYour hips fit size", "2", sep=": ")
if user_hip_size_in > 35.6 and user_hip_size_in < 37.6:
print("\nYour hips fit size", "4", sep=": ")
if user_hip_size_in > 37.6 and user_hip_size_in < 39.6:
print("\nYour hips fit size", "6", sep=": ")
if user_hip_size_in > 39.6 and user_hip_size_in < 41.6:
print("\nYour hips fit size", "8", sep=": ")
if user_hip_size_in > 41.6 and user_hip_size_in < 43.6:
print("\nYour hips fit size", "10", sep=": ")
if user_hip_size_in > 43.6 and user_hip_size_in < 43.7:
print("\nYour hips fit size", "12", sep=": ")
user_waist_size != user_hip_size
#store does not carry your size. would you like to try a dufferent store? select y to continue or n to quit
answer = input("Would you like to continue? Enter yes or no: ")
#if answer == "yes":
#what here (i want to prompt to run the program again)
#elif answer == "no":
#what here (if no i want the program to stop and end there)
#else:
#print("Please enter yes or no.")
#if(Nike_waist_size > 23.5 or Nike < 30
#if out of range, print store does not carry size
#additional websites used for assistance:
#https://www.w3schools.com/python/default.asp
#https://www.folkstalk.com/tech/how-to-ask-a-yes-or-no-question-on-python-with-code-examples/
| ADattilio88/SizeCalculator | integration sprint 1.py | integration sprint 1.py | py | 7,563 | python | en | code | 0 | github-code | 36 |
38660134742 | import logging
import sys
import click
import requests
from bs4 import BeautifulSoup
from telegram.ext import CommandHandler, Filters, MessageHandler, Updater
from tinydb import Query, TinyDB
db = TinyDB("db.json")
Job = Query()
TELEGRAM_BOT_TOKEN = None
class JobExistsException(Exception):
pass
def parse_result_item(item):
"""
Takes a li item containing one search result and parses id, url and price from it.
Returns a dict containing the results.
"""
main = item.find_all("div", {"aditem-main"})
price = item.find_all("p", {"aditem-main--middle--price"})
article = item.find_all("article")
if len(main) != 1 or len(article) != 1 or len(price) != 1:
return
main = main[0]
article = article[0]
price = price[0]
result = {
"ad_id": article["data-adid"],
"price": price.text.strip(),
}
a = main.find_all("a")[0]
result["url"] = "https://www.ebay-kleinanzeigen.de" + a["href"]
return result
def execute_search(search_term):
"""
Runs the search for one search term.
Returns a list containing all parsed search results.
"""
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
}
url = f"https://www.ebay-kleinanzeigen.de/s-79249/{search_term}/k0l9364r20"
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, features="html.parser")
ul = soup.find_all("ul", {"id": "srchrslt-adtable"})
assert len(ul) == 1
ul = ul[0]
items = ul.find_all("li")
results = []
for i in items:
data = parse_result_item(i)
if data is not None:
results.append(data)
if len(results) == 0:
logging.warning(
f"No results found for search term '{search_term}'. Check if parser works correctly."
)
return results
def init_search(search_term, chat_id):
"""
Initialize a new search term.
Executes one search and marks all current results as known.
"""
result = db.search(Job.search_term == search_term)
if result:
raise JobExistsException
initial_results = execute_search(search_term)
ids = [_["ad_id"] for _ in initial_results]
db.insert({"search_term": search_term, "chat_id": chat_id, "known_ads": ids})
def echo(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
def start_watching(update, context):
"""
Command handler for starting to watch a new search term.
"""
search_target = "".join(context.args)
try:
init_search(search_target, update.effective_chat.id)
except JobExistsException:
reply = "Hm, looks like I'm watching that already."
else:
reply = f"Ok, I'll start watching '{search_target}'"
context.bot.send_message(chat_id=update.effective_chat.id, text=reply)
def stop_watching(update, context):
"""
Command handler for stopping to watch a search term
"""
search_term = "".join(context.args)
result = db.search(Job.search_term == search_term)
if not result:
reply = "I don't think I am watching that."
else:
db.remove(Job.search_term == search_term)
reply = "Ok. I'll no longer watch " + search_term
context.bot.send_message(chat_id=update.effective_chat.id, text=reply)
def look_for_stuff(context):
"""
Command handler to peridically check all active search jobs.
"""
for job in db.all():
known_ads = set(job["known_ads"])
results = execute_search(job["search_term"])
something_new = False
for r in results:
if r["ad_id"] not in known_ads:
message = (
f"New item for {job['search_term']} ({r['price']}): {r['url']}"
)
context.bot.send_message(chat_id=job["chat_id"], text=message)
known_ads.add(r["ad_id"])
something_new = True
if something_new:
db.update(
{"known_ads": list(known_ads)}, Job.search_term == job["search_term"]
)
else:
# context.bot.send_message(chat_id=job["chat_id"], text=f"Nothing new for {job['search_term']}")
pass
def status(update, context):
message = "I'm currently watching: \n"
for job in db.all():
message += "- " + job["search_term"] + "\n"
context.bot.send_message(chat_id=update.effective_chat.id, text=message)
@click.group()
def cli():
pass
@cli.command()
@click.option("--token", prompt=True, help="The telegram bot api token")
def run(token):
TELEGRAM_BOT_TOKEN = token
updater = Updater(token=TELEGRAM_BOT_TOKEN, use_context=True)
dispatcher = updater.dispatcher
job_queue = updater.job_queue
job_minute = job_queue.run_repeating(look_for_stuff, interval=5 * 60, first=0)
echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
dispatcher.add_handler(echo_handler)
start_watching_handler = CommandHandler("start", start_watching)
dispatcher.add_handler(start_watching_handler)
stop_handler = CommandHandler("stop", stop_watching)
dispatcher.add_handler(stop_handler)
status_handler = CommandHandler("status", status)
dispatcher.add_handler(status_handler)
updater.start_polling()
@cli.command()
@click.argument("searchterm")
def search(searchterm):
data = execute_search(searchterm)
click.echo(data)
if __name__ == "__main__":
cli()
| NiklasMM/ebk-bot | bot.py | bot.py | py | 5,611 | python | en | code | 0 | github-code | 36 |
38083425705 | #!/usr/bin/env python3
import os
import sys
import math
import struct
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.interconnect import csr_bus
from litex.soc.cores.uart import *
sys.path.append('../')
from periphs.misc import *
from periphs.accel import *
def get_common_ios():
return [
# clk / rst
("clk", 0, Pins(1)),
("rst", 0, Pins(1)),
# serial
("serial", 0,
Subsignal("tx", Pins(1)),
Subsignal("rx", Pins(1))
),
("gpio_irq", 0, Pins("1")),
("user_led", 15, Pins("1")),
# SPI master
("spi", 0,
Subsignal("sclk", Pins(1)),
Subsignal("miso", Pins(1)),
Subsignal("mosi", Pins(1)),
Subsignal("csn", Pins(1)),
Subsignal("irq", Pins(1)),
),
# SPI slave, accel simulator
("spi_slave", 0,
# SPI slave part
Subsignal("sck", Pins(1)),
Subsignal("miso", Pins(1)),
Subsignal("mosi", Pins(1)),
Subsignal("csn", Pins(1)),
Subsignal("int1", Pins(1)),
Subsignal("int2", Pins(1)),
#Subsignal("irq", Pins(1)),
Subsignal("led0", Pins(1)),
Subsignal("led1", Pins(1)),
Subsignal("led2", Pins(1)),
Subsignal("led3", Pins(1)),
Subsignal("led4", Pins(1)),
Subsignal("led5", Pins(1)),
Subsignal("led6", Pins(1)),
Subsignal("led15", Pins(1)),
# UART part
Subsignal("tx", Pins(1)),
Subsignal("rx", Pins(1)),
),
# MailBox sender interface
("mbx_snd", 0,
Subsignal("dout_r", Pins("0 1 2 3 4 5 6 7")),
Subsignal("dout_re", Pins(1)),
Subsignal("int_r", Pins(1)),
Subsignal("int_re", Pins(1)),
),
# MailBox sender interface
("mbx_rcv", 0,
Subsignal("din_status", Pins("0 1 2 3 4 5 6 7")),
Subsignal("len_status", Pins("0 1 2 3 4 5 6 7")),
Subsignal("rd_r", Pins(1)),
Subsignal("rd_re", Pins(1)),
Subsignal("int", Pins(1)),
),
]
class Platform(XilinxPlatform):
def __init__(self):
XilinxPlatform.__init__(self, "xc7a35tcpg236-1", io=[], toolchain="vivado")
class CRG(Module):
def __init__(self, platform, soc_config):
clk = platform.request("clk")
rst = platform.request("rst")
self.clock_domains.cd_sys = ClockDomain()
self.cd_sys.clk.attr.add("keep")
self.cd_sys.rst.attr.add("keep")
self.comb += [
self.cd_sys.clk.eq(clk),
]
self.sync += [
self.cd_sys.rst.eq(rst),
]
class BaseSoC(SoCCore):
csr_map = {
"ctrl": 0,
"uart": 2,
"timer0": 3,
}
interrupt_map = {
"uart": 3,
"timer0": 4,
}
mem_map = {
"rom": 0x00000000,
"sram": 0x10000000,
"csr": 0xf0000000,
}
csr_map.update(SoCCore.csr_map)
interrupt_map.update(SoCCore.interrupt_map)
def __init__(self, platform, soc_config, **kwargs):
platform.add_extension(get_common_ios())
sys_clk_freq = soc_config["sys_clk_freq"]
SoCCore.__init__(self, platform, sys_clk_freq,
with_uart=True,
integrated_main_ram_size=0,
**kwargs)
# crg
self.submodules.crg = CRG(platform, soc_config)
if soc_config["platform_name"] in ["accel_sim_release"]:
# Integrate SPI master
self.submodules.spi_master = spi_master = SpiMaster(self.platform.request("spi", 0))
self.add_csr("spi_master", 10, allow_user_defined=True)
self.add_interrupt("spi_master", 6, allow_user_defined=True)
self.register_mem("spi_master", 0x30000000, spi_master.bus, 32)
spi_master.add_source(self.platform)
# Custom accel simulator IP core
self.submodules.accel = accel = AccelCore(freq=sys_clk_freq, baud=115200, pads=self.platform.request("spi_slave", 0))
self.add_csr("accel", 11, allow_user_defined=True)
self.add_interrupt("accel", 7, allow_user_defined=True)
if soc_config["mbx_sender"] in ["yes"]:
# Integrate mailbox sender
self.submodules.mbx_snd = mbx_snd = MailBoxSenderInf(self.platform.request("mbx_snd", 0))
self.add_csr("mbx_snd", 12, allow_user_defined=True)
if soc_config["mbx_receiver"] in ["yes"]:
# Integrate mailbox receiver
self.submodules.mbx_rcv = mbx_rcv = MailBoxReceiverInf(self.platform.request("mbx_rcv", 0))
self.add_csr("mbx_rcv", 13, allow_user_defined=True)
self.add_interrupt("mbx_rcv", 8, allow_user_defined=True)
# Integrate GPIO LED
self.submodules.gpio_led = gpio_led = GpioLED(self.platform.request("user_led", 15))
self.add_csr("gpio_led", 14, allow_user_defined=True)
if soc_config["platform_name"] in ["accel_sim"]:
# Integrate SPI master
self.submodules.spi_master = spi_master = SpiMaster(self.platform.request("spi", 0))
self.add_csr("spi_master", 10, allow_user_defined=True)
self.add_interrupt("spi_master", 6, allow_user_defined=True)
self.register_mem("spi_master", 0x30000000, spi_master.bus, 32)
spi_master.add_source(self.platform)
# Custom accel simulator IP core
self.submodules.accel = accel = AccelCore(freq=sys_clk_freq, baud=115200, pads=self.platform.request("spi_slave", 0))
self.add_csr("accel", 11, allow_user_defined=True)
self.add_interrupt("accel", 7, allow_user_defined=True)
if soc_config["mbx_sender"] in ["yes"]:
# Integrate mailbox sender
self.submodules.mbx_snd = mbx_snd = MailBoxSenderInf(self.platform.request("mbx_snd", 0))
self.add_csr("mbx_snd", 12, allow_user_defined=True)
if soc_config["mbx_receiver"] in ["yes"]:
# Integrate mailbox receiver
self.submodules.mbx_rcv = mbx_rcv = MailBoxReceiverInf(self.platform.request("mbx_rcv", 0))
self.add_csr("mbx_rcv", 13, allow_user_defined=True)
self.add_interrupt("mbx_rcv", 8, allow_user_defined=True)
if soc_config["platform_name"] in ["accel_test"]:
# Integrate SPI master
self.submodules.spi_master = spi_master = SpiMaster(self.platform.request("spi", 0))
self.add_csr("spi_master", 10, allow_user_defined=True)
self.add_interrupt("spi_master", 6, allow_user_defined=True)
self.register_mem("spi_master", 0x30000000, spi_master.bus, 32)
spi_master.add_source(self.platform)
# Integrate int module
self.submodules.gpio_isr = GpioISR(self.platform.request("gpio_irq", 0), rissing_edge_detect=False)
self.add_csr("gpio_isr", 11, allow_user_defined=True)
self.add_interrupt("gpio_isr", 7, allow_user_defined=True)
if soc_config["mbx_sender"] in ["yes"]:
# Integrate mailbox sender
self.submodules.mbx_snd = mbx_snd = MailBoxSenderInf(self.platform.request("mbx_snd", 0))
self.add_csr("mbx_snd", 12, allow_user_defined=True)
if soc_config["mbx_receiver"] in ["yes"]:
# Integrate mailbox receiver
self.submodules.mbx_rcv = mbx_rcv = MailBoxReceiverInf(self.platform.request("mbx_rcv", 0))
self.add_csr("mbx_rcv", 13, allow_user_defined=True)
self.add_interrupt("mbx_rcv", 8, allow_user_defined=True)
def main():
# get config
if len(sys.argv) < 2:
print("missing config file")
exit(1)
exec(open(sys.argv[1]).read(), globals())
# generate core
platform = Platform()
platform.name = soc_config["platform_name"]
soc = BaseSoC(platform, soc_config,
ident=soc_config["soc_ident"],
integrated_rom_size=soc_config["rom_size"],
integrated_sram_size=soc_config["sram_size"],
cpu_type=soc_config["cpu"],
cpu_variant=soc_config["cpu_variant"]
)
output_dir = "build/" + soc_config["platform_name"]
build_name = soc_config["platform_name"] + "_core"
builder = Builder(soc, output_dir=output_dir , compile_gateware=False)
vns = builder.build(build_name=build_name, regular_comb=False)
# prepare core (could be improved)
def replace_in_file(filename, _from, _to):
# Read in the file
with open(filename, "r") as file :
filedata = file.read()
# Replace the target string
filedata = filedata.replace(_from, _to)
# Write the file out again
with open(filename, 'w') as file:
file.write(filedata)
init_filename = "mem.init"
mem_1_init_filename = "mem_1.init"
mem_2_init_filename = "mem_2.init"
os.system("mv " + output_dir + "/gateware/mem.init " + output_dir + "/gateware/" + build_name + ".init".format(init_filename))
os.system("mv " + output_dir + "/gateware/mem_1.init " + output_dir + "/gateware/" + build_name + "_mem_1" + ".init".format(mem_1_init_filename))
os.system("mv " + output_dir + "/gateware/mem_2.init " + output_dir + "/gateware/" + build_name + "_mem_2" + ".init".format(mem_2_init_filename))
replace_in_file(output_dir + "/gateware/" + build_name + ".v", init_filename, build_name + ".init")
replace_in_file(output_dir + "/gateware/" + build_name + ".v", mem_1_init_filename, build_name + "_mem_1" + ".init")
replace_in_file(output_dir + "/gateware/" + build_name + ".v", mem_2_init_filename, build_name + "_mem_2" + ".init")
if __name__ == "__main__":
main()
| kamejoko80/linux-on-litex-vexriscv-legacy | soc_builder/soc_generator.py | soc_generator.py | py | 10,332 | python | en | code | 0 | github-code | 36 |
23928535346 | # This is the python implementation of minesweeper
import random as rand
from tkinter import *
from functools import partial
def create_graph(w, h):
""" Function to create the graph for a board of n * n size """
graph = {}
for i in range(h):
for j in range(w):
neighbors = []
# Top left
if i - 1 >= 0 and j - 1 >= 0:
neighbors.append((i - 1, j - 1))
# Top
if i - 1 >= 0:
neighbors.append((i - 1, j))
# Top Right
if i - 1 >= 0 and j + 1 < w:
neighbors.append((i - 1, j + 1))
# Right
if j + 1 < w:
neighbors.append((i, j + 1))
# Bottom Right
if i + 1 < h and j + 1 < w:
neighbors.append((i + 1, j + 1))
# Bottom
if i + 1 < h:
neighbors.append((i + 1, j))
# Bottom Left
if i + 1 < h and j - 1 >= 0:
neighbors.append((i + 1, j - 1))
# Left
if j - 1 >= 0:
neighbors.append((i, j - 1))
graph[(i, j)] = neighbors
return graph
def add_mines(w, h, mineCount):
mines = [[0 for col in range(w)] for row in range(h)]
count = mineCount
done = False
while not done:
for i in range(h):
for j in range(w):
if mines[i][j] != 1 and rand.random() > 0.91:
if count < 0:
done = True
break
mines[i][j] = 1
count = count - 1
return mines
def count_surrounds(graph, i, j, mines):
count = 0
for neighbor in graph[i, j]:
if mines[neighbor[0]][neighbor[1]] == 1:
count += 1
return count
def calc_mines(graph, mines, w, h):
button_numbers = [[0 for col in range(w)] for row in range(h)]
for i in range(h):
for j in range(w):
# add -1 if the button is over a bomb
if mines[i][j] == 1:
button_numbers[i][j] = -1
# Else calculate the buttons value
else:
button_numbers[i][j] = count_surrounds(graph, i, j, mines)
return button_numbers
def reveal_mines(buttons):
for i in range(height):
for j in range(width):
if mines[i][j] == 1:
buttons[i][j].configure(
text="", relief=SUNKEN, image=bombImage)
buttons[i][j].configure(command='')
def recursive_reveal(buttons, i, j):
neighbors = graph[i, j]
grow_list = []
for neighbor in neighbors:
if button_numbers[neighbor[0]][neighbor[1]] == 0:
buttons[neighbor[0]][neighbor[1]].configure(
relief=SUNKEN, text='0', image=downImg)
grow_list.append([neighbor[0], neighbor[1]])
button_numbers[neighbor[0]][neighbor[1]] = 'd'
elif button_numbers[neighbor[0]][neighbor[1]] == 1:
buttons[neighbor[0]][neighbor[1]].configure(
relief=SUNKEN, text='1', image=downImg)
if len(grow_list) > 0:
for neighbor in grow_list:
recursive_reveal(buttons, neighbor[0], neighbor[1])
def grid_callback(i, j, buttons, btn_value):
print(i,j);
buttons[i][j].configure(relief=SUNKEN, text=btn_value, image=downImg)
# Handle the game loss
if btn_value == -1:
reveal_mines(buttons)
# If 0 Recursive Reveal
if btn_value == 0:
recursive_reveal(buttons, i, j)
# Else
# if btn_value == 1:
# buttons[i][j].configure(bg='blue')
# elif btn_value == 2:
# buttons[i][j].configure(bg='green')
# elif btn_value == 3:
# buttons[i][j].configure(bg='orange')
# elif btn_value == 4:
# buttons[i][j].configure(bg='purple')
# elif btn_value == -1:
# buttons[i][j].configure(bg='red')
print(i, j)
# Main Method
height = 8
width = 8
# Create the graph
graph = create_graph(width, height)
total_cells = height * width
ez_density = 0.1
mine_number = total_cells * ez_density
print(total_cells, mine_number)
mines = add_mines(width, height, mine_number)
button_numbers = calc_mines(graph, mines, width, height)
print(graph[1, 1])
print("Mines")
for i in range(height):
for j in range(width):
print(mines[i][j], end='\t')
print()
print("Button Numbers")
for i in range(height):
for j in range(width):
print(button_numbers[i][j], end='\t')
print()
### ###
# : All the GUI Stuff here : #
### ###
root = Tk()
Grid.rowconfigure(root, 0, weight=1)
Grid.columnconfigure(root, 0, weight=1)
#Create & Configure frame
frame = Frame(root)
frame.grid(row=0, column=0)
frame.pack(side=TOP)
img = PhotoImage(file="assets/square_up.png")
downImg = PhotoImage(file="assets/square_down.png")
bombImage = PhotoImage(file="assets/square_bomb.png")
buttons = []
for row_index in range(height):
buttons.append([])
Grid.rowconfigure(frame, row_index, weight=1)
for col_index in range(width):
btn_value = button_numbers[row_index][col_index]
Grid.columnconfigure(frame, col_index, weight=1)
# Create button and add an anonymous function to call callback with it's coordinates
btn = Button(frame, image=img, height=45, width=45,
compound=CENTER, state=None, bd=0)
# Configure the buttons call back to call with position and it's value
btn.configure(command=lambda i=row_index, j=col_index,
value=btn_value: grid_callback(i, j, buttons, value))
btn.grid(row=row_index, column=col_index)
buttons[row_index].append(btn)
# Add bottom frame with action buttons
menu_frame = Frame(root, bg="#F19C79")
menu_frame.pack(side=BOTTOM)
restart_button = Button(menu_frame, text="Restart")
restart_button.grid(row=0)
menu_button = Button(menu_frame, text="Main Menu")
menu_button.grid(row=0, column=1)
quit_button = Button(menu_frame, text="Quit", command=lambda x=1: quit(x))
quit_button.grid(row=0, column=2)
# Add Top Frame with
root.mainloop()
| thalluricheritha/minesweeper | MineSweeperScript.py | MineSweeperScript.py | py | 6,181 | python | en | code | 0 | github-code | 36 |
5881104834 | import array
import binascii
import configparser
import datetime
import io
import logging
import os
import signal
import sys
import time
try:
import serial
except ImportError:
pass
ModulSerialMissing = True
################################################################################
# Constants
BUILDVERSION = "V1.0.0"
BUILDDATE = "2017-10-22"
################################################################################
# classes / structs
class mondata:
def __init__(self):
self.viewmode=0
self.Lights="0"
self.SC1TX="0"
self.percAssist="000"
self.AWD="0"
self.C10="0"
self.Voltage="000"
self.Current="000"
self.SC1RX="0"
self.SC2="00"
self.Speed="000"
self.D1618="000"
self.D1921="000"
self.D2224="000"
self.D2527="000"
self.wsize="0"
self.TX=""
self.RX=""
self.PLIST=["---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---","---"]
################################################################################
# Import external functions
import lib.message as msg
import lib.config as cfg
import lib.osys as osy
import arg as arg
# TODO:
# - TX RX Handlers
# - TX RX Parsers
# - Basic Logging
# - Basic TX Construction mode (Parameter Reading P00 P59)
# - PDCURSES?
################################################################################
# Functions
def signal_handler(signal, frame):
sys.exit(0)
################################################################################
# beginn main
# init global vars
monitordata= mondata()
mondata.viewmode=0
signal.signal(signal.SIGINT, signal_handler)
cfg.read(cfg)
if len(sys.argv) == 1:
print('\npye-motion ' + BUILDVERSION + ' - ' + BUILDDATE)
# check for modules which might not be part of the standard python 3 installation
if 'ModulSerialMissing' in locals():
print('Missing Module pyserial. Install by typing pye-motion -install')
print('No command line argument given. type pye-motion -help for valid arguments')
if len(sys.argv) != 1:
if (sys.argv[1] in ("-help")):
arg.help()
exit()
elif (sys.argv[1] in ( "-install")):
arg.install()
exit()
elif (sys.argv[1] in ( "-listen")):
msg.serialOpen(cfg)
arg.listen(monitordata)
elif (sys.argv[1] in ( "-plisten")):
print("warning: This modus requires to set the LCD into settings mode first. ")
print("Hold + and - simultaneously to enter settings. ")
rawtx = input("press enter to continue")
msg.serialOpen(cfg)
arg.plisten(monitordata)
elif (sys.argv[1] in ( "-pquery")):
print("warning: This modus requires to set the LCD into settings mode first. ")
print("Hold + and - simultaneously to enter settings. ")
rawtx = input("press enter to continue")
msg.serialOpen(cfg)
arg.pquery(monitordata)
elif (sys.argv[1] in ( "-speedlimit")):
print("warning: This modus requires to set the LCD into settings mode first. ")
print("Hold + and - simultaneously to enter settings. ")
rawtx = input("press enter to continue")
msg.serialOpen(cfg)
if len(sys.argv) == 3:
arg.speedlimit(monitordata, sys.argv[2])
else:
arg.speedlimit(monitordata, 0)
exit()
else:
print('Invalid command line argument given. type pye-motion - help for valid arguments')
# sample code for opening, sending, receiving and closing comport
#ser = serial.Serial(port_A, py pybaudrate=baud_A, timeout=1) # open first serial port
#print ("Port opened: " + ser.portstr) # check which port was really used
#ser.write("hello world".encode("utf-8")) # write a string
#receive = ser.read(11)
#print (receive.decode("utf-8"))
#ser.close() # close port
| nasrudin2468/pye-motion | pye-motion.py | pye-motion.py | py | 4,158 | python | en | code | 4 | github-code | 36 |
32624805079 | from torch import nn
import torch
import numpy as np
import os
class Encoder(nn.Module):
def __init__(self, latent_dims, qc_level):
super(Encoder, self).__init__()
dims = []
if qc_level == 1:
dims = [17, 24, 8, latent_dims]
elif qc_level == 2:
dims = [22, 36, 12, latent_dims]
elif qc_level == 3:
dims = [8, 12, latent_dims]
if qc_level == 3:
self.linear1 = None
self.linear2 = nn.Linear(dims[0], dims[1])
self.linear2_bn = nn.BatchNorm1d(dims[1])
self.linear3A = nn.Linear(dims[1], dims[2])
self.linear3B = nn.Linear(dims[1], dims[2])
else:
self.linear1 = nn.Linear(dims[0], dims[1])
self.linear1_bn = nn.BatchNorm1d(dims[1])
self.linear2 = nn.Linear(dims[1], dims[2])
self.linear2_bn = nn.BatchNorm1d(dims[2])
self.linear3A = nn.Linear(dims[2], dims[3])
self.linear3B = nn.Linear(dims[2], dims[3])
def forward(self, x):
if self.linear1 is not None:
x = torch.tanh(self.linear1(x))
x = torch.tanh(self.linear1_bn(x))
x = torch.tanh(self.linear2(x))
x = torch.tanh(self.linear2_bn(x))
mu = self.linear3A(x)
logvar = self.linear3B(x)
return mu, logvar
class QualityEncoder(object):
def __init__(self, device='auto', encoder_type1_path=None, encoder_type2_path=None, encoder_type3_path=None, encoder_dim = [2, 2, 2]):
self.encoder_type1_path = encoder_type1_path
self.encoder_type2_path = encoder_type2_path
self.encoder_type3_path = encoder_type3_path
if not self.encoder_type1_path:
self.encoder_type1_path = os.path.join(os.path.split(__file__)[0], 'encoder', 'quality_encoder_type1.pickle')
if not self.encoder_type2_path:
self.encoder_type2_path = os.path.join(os.path.split(__file__)[0], 'encoder', 'quality_encoder_type2.pickle')
if not self.encoder_type3_path:
self.encoder_type3_path = os.path.join(os.path.split(__file__)[0], 'encoder', 'quality_encoder_type3.pickle')
self.type1_encoder = Encoder(encoder_dim[0], qc_level=1)
self.type2_encoder = Encoder(encoder_dim[1], qc_level=2)
self.type3_encoder = Encoder(encoder_dim[2], qc_level=3)
self.type1_quality_refs = [
[0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 1.0, 0.0, 0.0],
[0.3, 0.4, 0.3, 0.3, 0.335, 0.475, 0.3, 0.3, 0.4, 0.3, 0.3, 0.335, 0.475, 0.3, 0.7, 0.3, 0.3],
[1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 0.0, 1.0, 1.0],
[0.0, 1.0, 0.0, 0.0, 0.050, 0.250, 0.0, 1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 0.0, 1.0, 1.0],
[1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 5.0, 0.0, 1.0, 1.0]
]
self.type2_quality_refs = [
[0.0, 1.0, 1.0, 0.0, 0.0, 0.200, 0.200, 0.0, 1.0, 1.0, 0.0, 0.0, 0.200, 0.200, 1.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.200, 0.200],
[0.2, 0.6, 0.6, 0.2, 0.2, 0.360, 0.360, 0.2, 0.6, 0.6, 0.2, 0.2, 0.360, 0.360, 0.6, 0.2, 0.6, 0.6, 0.2, 0.2, 0.360, 0.360],
[1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 0.0, 1.0, 1.0, 0.0, 0.0, 0.050, 0.250, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000], # good heavy only; worst others
[0.0, 1.0, 1.0, 0.0, 0.0, 0.050, 0.250, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000], # bad heavy only
[1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000, -1.0, 1.0, -1.0, -1.0, 1.0, 1.0, 1.000, 1.000]
]
self.type3_quality_refs = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[1.5, 0.3, 0.9, 1.5, 0.3, 0.9, 0.3, 0.3],
[5.0, 1.0, 3.0, 0.0, 0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 5.0, 1.0, 3.0, 1.0, 1.0],
[5.0, 1.0, 3.0, 5.0, 1.0, 3.0, 1.0, 1.0]
]
if device == 'auto':
self.device = "cuda" if torch.cuda.is_available() else "cpu"
else:
self.device = device
print('device: ' + self.device)
self.load_encoder()
self._update_reference_latents()
def __call__(self, quality_vectors, normalize=True):
qv = torch.tensor(quality_vectors, dtype=torch.float32).to(self.device)
dim = qv.shape[1]
if dim == 17:
latent = self.type1_encoder(qv)[0].cpu().detach().numpy()
score = self.score_type1_latent(latent)
type = 1
elif dim == 22:
latent = self.type2_encoder(qv)[0].cpu().detach().numpy()
score = self.score_type2_latent(latent)
type = 2
elif dim == 8:
latent = self.type3_encoder(qv)[0].cpu().detach().numpy()
score = self.score_type3_latent(latent)
type = 3
else:
raise 'Unkonw dimension. Valid dimensions are 17 for type 1 quality, 22 for type 2 quality, and 8 for type 3 quality.'
if normalize:
return self.normalize_score(type, score), latent
else:
return score, latent
def load_encoder(self):
self.type1_encoder.load_state_dict(torch.load(self.encoder_type1_path, map_location=self.device))
self.type2_encoder.load_state_dict(torch.load(self.encoder_type2_path, map_location=self.device))
self.type3_encoder.load_state_dict(torch.load(self.encoder_type3_path, map_location=self.device))
self.type1_encoder.to(self.device)
self.type2_encoder.to(self.device)
self.type3_encoder.to(self.device)
self.type1_encoder.eval()
self.type2_encoder.eval()
self.type3_encoder.eval()
self._update_reference_latents()
def _update_reference_latents(self):
self.type1_latent_points = self.type1_encoder(torch.tensor(self.type1_quality_refs).to(self.device))[0].to('cpu').detach().numpy()
self.type2_latent_points = self.type2_encoder(torch.tensor(self.type2_quality_refs).to(self.device))[0].to('cpu').detach().numpy()
self.type3_latent_points = self.type3_encoder(torch.tensor(self.type3_quality_refs).to(self.device))[0].to('cpu').detach().numpy()
self._score_range = [
dict(max= self.score_type1_latent([self.type1_latent_points[0]])[0], min=self.score_type1_latent([self.type1_latent_points[4]])[0]),
dict(max= self.score_type2_latent([self.type2_latent_points[0]])[0], min=self.score_type2_latent([self.type2_latent_points[4]])[0]),
dict(max= self.score_type3_latent([self.type3_latent_points[0]])[0], min=self.score_type3_latent([self.type3_latent_points[4]])[0])
]
def encode_quality(self, quality_vectors):
qv = torch.tensor(quality_vectors, dtype=torch.float32).to(self.device)
dim = qv.shape[1]
if dim == 17:
return self.type1_encoder(torch.tensor(qv, dtype=torch.float32).to(self.device))[0].detach().numpy()
elif dim == 22:
return self.type2_encoder(torch.tensor(qv, dtype=torch.float32).to(self.device))[0].detach().numpy()
elif dim == 8:
return self.type3_encoder(torch.tensor(qv, dtype=torch.float32).to(self.device))[0].detach().numpy()
else:
raise 'Unkonw dimension. Valid dimensions are 17 for type 1 quality, 22 for type 2 quality, and 8 for type 3 quality.'
# def encode_type1_quality(self, quality_vector):
# return self.type1_encoder(torch.tensor(quality_vector, dtype=torch.float32).to(self.device))[0].detach().numpy()
# def encode_type2_quality(self, quality_vector):
# return self.type2_encoder(torch.tensor(quality_vector, dtype=torch.float32).to(self.device))[0].detach().numpy()
# def encode_type3_quality(self, quality_vector):
# return self.type3_encoder(torch.tensor(quality_vector, dtype=torch.float32).to(self.device))[0].detach().numpy()
def score_type1_latent(self, type1_latents):
return self.score_func(type1_latents, self.type1_latent_points)
def score_type2_latent(self, type2_latents):
return self.score_func(type2_latents, self.type2_latent_points)
def score_type3_latent(self, type3_latents):
return self.score_func(type3_latents, self.type3_latent_points)
def normalize_score(self, type, score):
if type <= 0 or type >=4:
return
min = self._score_range[type - 1]['min']
max = self._score_range[type - 1]['max']
return -10 + 20 * ((score - min)/(max - min))
def score_func(self, latent_points, ref_latent_points):
# ref_latent_points = self._ref_latent_points
dist_max = np.linalg.norm(ref_latent_points[0] - ref_latent_points[4])
score = 2 * (1 - (self.dist_func(latent_points, ref_latent_points[0])/dist_max)**0.5)
score = score + 1 * (1 - (self.dist_func(latent_points, ref_latent_points[1])/dist_max)**0.5)
score = score + 1 * (1 - (self.dist_func(latent_points, ref_latent_points[2])/dist_max)**0.5)
score = score - 1 * (1 - (self.dist_func(latent_points, ref_latent_points[3])/dist_max)**0.5)
score = score - 2 * (1 - (self.dist_func(latent_points, ref_latent_points[4])/dist_max)**0.5)
return score
def dist_func(self, a, b):
return np.linalg.norm(a - b, axis=1)
| chiyang/tmasque | tmasque/QualityEncoder.py | QualityEncoder.py | py | 8,839 | python | en | code | 0 | github-code | 36 |
2223043714 | def interleave_str(s1, s2, s3):
# can s3 be formed by interleaving s1 and s2 ?
n, m = len(s1), len(s2)
if len(s3) != n + m:
return False
dp = [False for _ in range(m + 1)]
for i in range(n + 1):
for j in range(m + 1):
if i == j ==0:
dp[j] = True
elif i == 0:
dp[j] = dp[j-1] and s2[j-1] == s3[i+j-1]
elif j == 0:
dp[j] = dp[j] and s1[i-1] == s3[i+j-1]
else:
dp[j] = (dp[j] and s1[i-1] == s3[i+j-1]) or (dp[j-1] and s2[j-1] == s3[i+j-1])
return dp[-1]
print(interleave_str("aabcc", "dbbca", "aadbbcbcac"))
| arrws/leetcode | dynamic/interleave_str.py | interleave_str.py | py | 661 | python | en | code | 0 | github-code | 36 |
22313884437 | from django.core.management.base import BaseCommand
from import_data.models import OuraMember, FitbitMember, GoogleFitMember
from retrospective.tasks import (
update_fitbit_data,
update_oura_data,
update_googlefit_data,
)
import time
import requests
class Command(BaseCommand):
help = "Updates all data for all members"
def handle(self, *args, **options):
# cheat to wake up sleeping worker
requests.get("https://oh-oura-connect.herokuapp.com/")
oura_users = OuraMember.objects.all()
for o in oura_users:
update_oura_data.delay(o.id)
print("submitted oura update for {}".format(o.id))
time.sleep(2)
fitbit_users = FitbitMember.objects.all()
for f in fitbit_users:
update_fitbit_data.delay(f.id)
print("submitted fitbit update for {}".format(f.id))
time.sleep(2)
gf_users = GoogleFitMember.objects.all()
for g in gf_users:
update_googlefit_data.delay(g.user.oh_id, g.user.user.id)
print("submitted googlefit update for {}".format(g.id))
time.sleep(2)
| OpenHumans/quantified-flu | import_data/management/commands/update_data_imports.py | update_data_imports.py | py | 1,148 | python | en | code | 24 | github-code | 36 |
25470793052 |
# Escrito por gilsilva20629@gmail.com _ gilberto.s@escolar.ifrn.edu.br
'''
3) Implemente um programa que leia uma palavra e verifique se a mesma é palíndromo.
Um palíndromo é uma palavra que pode ser lida igualmente de trás pra frente e de frente pra trás. Exemplo: arara.
'''
p = input('Digite uma palavra: ')
t0 = -1
r = 1
for i in p:
if i == p[t0] :
t0 = t0 - 1
else:
r = 0
if r == 0 :
print('false')
else:
print('true')
'''
if :
elif
else:
''' | gilsilva20629/REDES-DE-COMPUTADORES | Gilberto_Silva_ead03.py | Gilberto_Silva_ead03.py | py | 491 | python | pt | code | 0 | github-code | 36 |
13918851672 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 3 13:07:13 2018
@author: marcos
"""
import pandas as pd
import csv
import pickle as pkl
import numpy as np
import scipy.stats as sts
from sklearn import preprocessing as prep
# =============================================================================
# Data Manipulation Tool
# =============================================================================
class DMT(object):
def __init__(self, database_file, file_format='csv', sep=',', decimal='.', orient='index'):
self.file = database_file
self.file_format = file_format
self.sep = sep
self.decimal = decimal
self.orient = orient
self.classes = None
self.minima = None
self.maxima = None
self.outliers_inf = None
self.outliers_sup = None
self.normalized = False
# _index is used for iterator
self._index = 0
if self.file_format == 'csv':
self.df = pd.read_csv(self.file, sep=self.sep)
elif self.file_format == 'json':
self.df = pd.read_json(self.file)
elif self.file_format == 'dict':
persisted_dict = pkl.load(open(database_file, 'rb'))
self.df = pd.DataFrame.from_dict(persisted_dict, orient=self.orient)
############ I/O and Import/Export Methods ####################
def print_summary(self):
print(' Summary of stored data:')
print('-------------------------------------')
print('%8s | %15s | %8s' % ('Id', 'Name', 'Type'))
print('-------------------------------------')
for i,col in enumerate(self.df.dtypes):
print('%8d | %15s | %8s' % (i, self.df.columns[i], col))
print('-------------------------------------')
print()
def save_csv(self, output_file, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
data.to_csv(output_file, sep=self.sep, decimal=self.decimal, quoting=csv.QUOTE_NONNUMERIC, index=False)
def save_json(self, output_file, orient='index', numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
data.to_json(output_file, orient=self.orient)
def save_dict(self, output_file, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
pkl.dump(data.to_dict(orient=self.orient), open(output_file, 'wb'))
def get_json(self, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
return data.to_json(orient=self.orient)
def get_dict(self, numeric_only=False):
if numeric_only:
data = self.get_numeric_data()
else:
data = self.df
return data.to_dict(orient=self.orient)
############ Column or row manipulation Methods ####################
def drop_columns(self, col_list):
self.df = self.df.drop(columns=col_list)
def set_class(self, column, categorical=True):
if categorical:
self.set_categorical(column)
self.classes = self.df[column].copy()
self.df.drop(columns=[column], inplace=True)
def is_classes_set(self):
return self.classes is not None
def get_classes(self):
return self.classes
# Encode categorical data into integer ids
def encode_categorical(self):
le = prep.LabelEncoder()
for x in self.df.columns:
if self.df[x].dtypes == 'object':
self.df[x] = le.fit_transform(self.df[x])
# Set a column to categorical data
def set_categorical(self, column):
self.df[column] = self.df[column].astype(str)
########### Magical Methods #################################
def __len__(self):
return len(self.df)
def __str__(self):
return str(self.df)
def __getitem__(self, index):
return self.df[index]
def __iter__(self):
return self
def __next__(self):
try:
result = self.df.loc[self.df.index[self._index]]
except IndexError:
raise StopIteration
self._index += 1
return result
############ Data Transformation Methods ####################
def get_stats(self, output_format='df'):
le = prep.LabelEncoder()
stats = {}
for i,a in enumerate(self.df.columns):
stats[a] = {}
## Type
stats[a]['type'] = self.df.dtypes[i]
## Counting
stats[a]['count'] = self.df[a].count()
## Non-unique values
stats[a]['nunique'] = self.df[a].nunique()
## Mode
mode = self.df[a].mode()
if len(mode) == 1:
stats[a]['mode'] = mode[0]
else:
stats[a]['mode'] = None
if pd.api.types.is_numeric_dtype(self.df[a]):
## Entropy
hist = np.histogram(self.df[a])[0]
p = hist / np.sum(hist)
stats[a]['entropy'] = sts.entropy(p)
## Variance
stats[a]['variance'] = self.df[a].var()
## Average
stats[a]['average'] = self.df[a].mean()
## Dispersion
if stats[a]['average']:
stats[a]['dispersion'] = stats[a]['variance']/stats[a]['average']
else:
stats[a]['dispersion'] = 0.0
## Standard deviation
stats[a]['std_dev'] = self.df[a].std()
## Minimum and maximum
stats[a]['min'] = self.df[a].min()
stats[a]['max'] = self.df[a].max()
## Median
stats[a]['median'] = self.df[a].median()
## Skewness and Kurtosis
stats[a]['skewness'] = self.df[a].skew()
stats[a]['kurtosis'] = self.df[a].kurt()
## Quantiles
qts = self.df[a].quantile([0.25, 0.5, 0.75])
stats[a]['quantile1'] = qts[0.25]
stats[a]['quantile2'] = qts[0.5]
stats[a]['quantile3'] = qts[0.75]
else:
tmp = le.fit_transform(self.df[a])
hist = np.histogram(tmp)[0]
p = hist / np.sum(hist)
stats[a]['entropy'] = sts.entropy(p)
stats[a]['variance'] = None
stats[a]['average'] = None
stats[a]['dispersion'] = None
stats[a]['std_dev'] = None
stats[a]['min'] = None
stats[a]['max'] = None
stats[a]['median'] = None
stats[a]['skewness'] = None
stats[a]['kurtosis'] = None
stats[a]['quantile1'] = None
stats[a]['quantile2'] = None
stats[a]['quantile3'] = None
stats_df = pd.DataFrame.from_dict(stats, orient=self.orient)
if output_format == 'df':
return stats_df
elif output_format == 'html':
return '<h2 style="text-align:center">Stored Data Description</h2>' + stats_df.to_html()
else:
return 'Stored Data Description\n' + str(stats_df)
def normalize(self):
if not self.normalized:
numeric_data = self.get_numeric_data()
maxima = numeric_data.max()
minima = numeric_data.min()
data_range = maxima - minima
data_range[data_range == 0] = 1.0
numeric_data = (numeric_data - minima) / data_range
self.df[numeric_data.columns] = numeric_data
self.minima = minima
self.maxima = maxima
self.normalized = True
def denormalize(self):
if self.normalized:
if (self.minima is not None) and (self.maxima is not None):
numeric_data = self.get_numeric_data()
numeric_data = numeric_data * (self.maxima - self.minima) + self.minima
self.df[numeric_data.columns] = numeric_data
self.normalized = False
def split_outliers(self, limQ1=25, limQ3=75, c=1.5):
numeric_data = self.get_numeric_data()
q1 = np.percentile(numeric_data, limQ1, axis=0)
q3 = np.percentile(numeric_data, limQ3, axis=0)
iqr = sts.iqr(numeric_data, axis=0)
keep = []
sup = []
inf = []
for i in range(len(numeric_data)):
d = numeric_data.loc[numeric_data.index[i]]
test_inf = d < q1 - c * iqr
if test_inf.any():
inf.append(i)
else:
test_sup = d > q3 + c * iqr
if test_sup.any():
sup.append(i)
else:
keep.append(i)
drop = False
if len(inf):
self.outliers_inf = self.df.loc[self.df.index[inf]]
drop = True
if len(sup):
self.outliers_sup = self.df.loc[self.df.index[sup]]
drop = True
if drop:
self.df.drop(inf + sup, inplace=True)
def get_numeric_data(self):
return self.df._get_numeric_data()
| mhfribeiro/safra-meta | modules/preprocess/dmt.py | dmt.py | py | 10,322 | python | en | code | 0 | github-code | 36 |
11514166585 | from hashlib import sha1
from json import dump
from os import makedirs
apps = {
'apps': [
'club.postdata.covid19cuba',
'com.codestrange.www.cuba_weather',
'com.cubanopensource.todo',
]
}
def main():
result = {}
makedirs('api', exist_ok=True)
with open(f'api/apps.json', mode='w', encoding='utf-8') as file:
dump(apps, file, ensure_ascii=False)
with open('api/apps.json', encoding='utf-8') as file:
text = file.read()
cache = sha1(text.encode())
result['hash'] = cache.hexdigest()
with open(f'api/apps_hash.json', mode='w', encoding='utf-8') as file:
dump(result, file, ensure_ascii=False)
if __name__ == '__main__':
main()
| leynier/cubaopenplay.github.io | app/main.py | main.py | py | 725 | python | en | code | 3 | github-code | 36 |
31521216432 | class Solution(object):
def countDigitOne(self, n):
"""
:type n: int
:rtype: int
"""
return self.c(n+1)
def c(self, n):
if n<=10:
return int(n>1)
head=int(str(n)[0])
tail=int(str(n)[1:] or 0)
full=int('1'+'0'*(len(str(n))-1))
if head==1 and tail==0:
return 10*self.c(n/10)+(n/10)
else:
return (full if head>1 else 0) + head*self.c(full) + self.c(tail) + (tail)*(head==1) | szhu3210/LeetCode_Solutions | LC/233.py | 233.py | py | 511 | python | en | code | 3 | github-code | 36 |
18526754583 | import logging
import tqdm
from multiprocessing import Pool
from dsrt.config.defaults import DataConfig
class Padder:
def __init__(self, properties, parallel=True, config=DataConfig()):
self.properties = properties
self.config = config
self.parallel = parallel
self.max_ulen = self.properties['max-utterance-length']
self.max_dlen = self.properties['max-dialogue-length']
self.init_logger()
def init_logger(self):
self.logger = logging.getLogger()
self.logger.setLevel(self.config['logging-level'])
def transform(self, dialogues):
self.log('info', 'Padding the dialogues (using max utterance length={} tokens) ...'.format(self.max_ulen))
self.empty_turn = [self.config['pad-d']] * (self.properties['max-utterance-length'] + 1)
chunksize=self.config['chunksize']
p = Pool() if self.parallel else Pool(1)
res = []
total = len(dialogues)
self.log('info', '[padder running on {} cores]'.format(p._processes))
for d in tqdm.tqdm(p.imap(self.pad_dialogue, dialogues, chunksize=chunksize), total=total):
res.append(d)
p.close()
p.join()
return res
def pad_dialogues(self, dialogues):
"""
Pad the entire dataset.
This involves adding padding at the end of each sentence, and in the case of
a hierarchical model, it also involves adding padding at the end of each dialogue,
so that every training sample (dialogue) has the same dimension.
"""
self.log('info', 'Padding the dialogues ...')
return [self.pad_dialogue(d) for d in dialogues]
def pad_dialogue(self, dialogue):
for i, u in enumerate(dialogue):
dif = self.max_ulen - len(u) + 1
dialogue[i] += [self.config['pad-u']] * dif
# only pad the dialogue if we're training a hierarchical model
if self.config['hierarchical']:
dif = self.max_dlen - len(dialogue)
dialogues += [self.empty_turn] * dif
return dialogue
####################
# UTILITIES #
####################
def log(self, priority, msg):
"""
Just a wrapper, for convenience.
NB1: priority may be set to one of:
- CRITICAL [50]
- ERROR [40]
- WARNING [30]
- INFO [20]
- DEBUG [10]
- NOTSET [0]
Anything else defaults to [20]
NB2: the levelmap is a defaultdict stored in Config; it maps priority
strings onto integers
"""
self.logger.log(logging.CRITICAL, msg)
| sbarham/dsrt | dsrt/data/transform/Padder.py | Padder.py | py | 2,765 | python | en | code | 1 | github-code | 36 |
22372717524 | import os
import sys
import time
import glob
import numpy as np
import torch
import utils
import logging
import argparse
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torchvision.datasets as dset
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from model_search_lfm import Network, Network_w
from architect_lfm import Architect
from encoder_resnet import *
from types import SimpleNamespace
from torch.utils.tensorboard import SummaryWriter
parser = argparse.ArgumentParser("cifar")
parser.add_argument('--data', type=str, default='../data',
help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=32, help='batch size')
parser.add_argument('--learning_rate_min', type=float,
default=0.00025, help='minimum learning rate')
parser.add_argument('--report_freq', type=float,
default=1, help='report frequency')
parser.add_argument('--gpu', type=str, default='0', help='gpu device id')
parser.add_argument('--epochs', type=int, default=50,
help='num of training epochs')
parser.add_argument('--init_channels', type=int,
default=16, help='num of init channels')
parser.add_argument('--layers', type=int, default=8,
help='total number of layers')
parser.add_argument('--model_path', type=str,
default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true',
default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int,
default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float,
default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='EXP', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--train_portion', type=float,
default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true',
default=False, help='use one-step unrolled validation loss')
# new hyperparams.
parser.add_argument('--learning_rate_w1', type=float, default=1e-2)
parser.add_argument('--learning_rate_w2', type=float, default=1e-2)
parser.add_argument('--learning_rate_A', type=float, default=1e-3)
parser.add_argument('--learning_rate_V', type=float, default=1e-2)
parser.add_argument('--learning_rate_r', type=float, default=1e-2)
parser.add_argument('--momentum_w1', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_w2', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_A', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_V', type=float, default=0.9, help='momentum')
parser.add_argument('--momentum_r', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay_w1', type=float, default=1e-4)
parser.add_argument('--weight_decay_w2', type=float, default=1e-4)
parser.add_argument('--weight_decay_A', type=float, default=1e-5)
parser.add_argument('--weight_decay_V', type=float, default=1e-4)
parser.add_argument('--weight_decay_r', type=float, default=1e-4)
parser.add_argument('--grad_clip_w1', type=float, default=5)
parser.add_argument('--grad_clip_w2', type=float, default=5)
parser.add_argument('--grad_clip_A', type=float, default=5)
parser.add_argument('--grad_clip_V', type=float, default=5)
parser.add_argument('--grad_clip_r', type=float, default=5)
parser.add_argument('--is_parallel', type=int, default=0)
parser.add_argument('--encoder_size', type=str, default='18')
parser.add_argument('--is_cifar100', type=int, default=0)
parser.add_argument('--resume', type=str, default='')
args = parser.parse_args()
args.save = 'search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
writer = SummaryWriter(filename_suffix=time.strftime("%Y%m%d-%H%M%S"))
CIFAR_CLASSES = 10
CIFAR100_CLASSES = 100
def save_checkpoint(state, checkpoint=args.save, filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
if not args.is_parallel:
torch.cuda.set_device(int(args.gpu))
logging.info('gpu device = %d' % int(args.gpu))
else:
logging.info('gpu device = %s' % args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info("args = %s", args)
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
# model contains w1, w2 and A
if args.is_cifar100:
model = Network(args.init_channels, CIFAR100_CLASSES, args.layers, criterion, args.is_parallel, args.gpu)
else:
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, criterion, args.is_parallel, args.gpu)
torch.save(model.w_temp, os.path.join(args.save, 'w_temp.pt'))
# encoder contains V
if args.encoder_size == '18':
encoder = resnet18(pretrained=True).cuda()
elif args.encoder_size == '34':
encoder = resnet34(pretrained=True).cuda()
elif args.encoder_size == '50':
encoder = resnet50(pretrained=True).cuda()
elif args.encoder_size == '101':
encoder = resnet101(pretrained=True).cuda()
# contains r
# TODO: check input size
r_vec = nn.Sequential(nn.Linear(args.batch_size, 1, bias=False)).cuda()
r_vec[0].weight = nn.Parameter(torch.ones_like(r_vec[0].weight) + 1e-3*torch.randn_like(r_vec[0].weight))
if args.is_parallel:
args.gpu = '0,1'
gpus = [int(i) for i in args.gpu.split(',')]
encoder = nn.parallel.DataParallel(
encoder, device_ids=gpus, output_device=gpus[1])
model.w1 = nn.parallel.DataParallel(
model.w1, device_ids=gpus, output_device=gpus[1])
model.w2 = nn.parallel.DataParallel(
model.w2, device_ids=gpus, output_device=gpus[1])
encoder = encoder.module
model.w1 = model.w1.module
model.w2 = model.w2.module
# logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizers = SimpleNamespace(
w1=torch.optim.SGD(
model.w1.parameters(),
args.learning_rate_w1,
momentum=args.momentum_w1,
weight_decay=args.weight_decay_w1),
w2=torch.optim.SGD(
model.w2.parameters(),
args.learning_rate_w2,
momentum=args.momentum_w2,
weight_decay=args.weight_decay_w2),
A=torch.optim.Adam(
model.arch_parameters(),
lr=args.learning_rate_A, betas=(0.5, 0.999),
weight_decay=args.weight_decay_A),
V=torch.optim.Adam(
encoder.parameters(),
lr=args.learning_rate_V, betas=(0.5, 0.999),
weight_decay=args.weight_decay_V),
r=torch.optim.Adam(
r_vec.parameters(),
lr=args.learning_rate_r, betas=(0.5, 0.999),
weight_decay=args.weight_decay_r)
)
lr = SimpleNamespace(
w1=args.learning_rate_w1,
w2=args.learning_rate_w2,
A=args.learning_rate_A,
V=args.learning_rate_V,
r=args.learning_rate_r
)
if args.is_cifar100:
train_transform, valid_transform = utils._data_transforms_cifar100(args)
else:
train_transform, valid_transform = utils._data_transforms_cifar10(args)
if args.is_cifar100:
train_data = dset.CIFAR100(root=args.data, train=True,
download=True, transform=train_transform)
else:
train_data = dset.CIFAR10(root=args.data, train=True,
download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=False, num_workers=4, drop_last=True)
valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=False, num_workers=4, drop_last=True)
schedulers = SimpleNamespace(
w1=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.w1, float(args.epochs), eta_min=args.learning_rate_min),
w2=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.w2, float(args.epochs), eta_min=args.learning_rate_min),
A=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.A, float(args.epochs), eta_min=args.learning_rate_min),
V=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.V, float(args.epochs), eta_min=args.learning_rate_min),
r=torch.optim.lr_scheduler.CosineAnnealingLR(
optimizers.r, float(args.epochs), eta_min=args.learning_rate_min)
)
architect = Architect(model, encoder, r_vec, args, optimizers, lr)
start_epoch = 0
if args.resume:
checkpoint = torch.load(os.path.join(args.resume, 'checkpoint.pth.tar'))
start_epoch = checkpoint['epoch']
optimizers.w1.load_state_dict(checkpoint['optimizer-w1'])
optimizers.w2.load_state_dict(checkpoint['optimizer-w2'])
optimizers.A.load_state_dict(checkpoint['optimizer-A'])
optimizers.V.load_state_dict(checkpoint['optimizer-V'])
optimizers.r.load_state_dict(checkpoint['optimizer-r'])
schedulers.w1.load_state_dict(checkpoint['scheduler-w1'])
schedulers.w2.load_state_dict(checkpoint['scheduler-w2'])
schedulers.A.load_state_dict(checkpoint['scheduler-A'])
schedulers.V.load_state_dict(checkpoint['scheduler-V'])
schedulers.r.load_state_dict(checkpoint['scheduler-r'])
model = torch.load(os.path.join(args.resume, 'weights_model.pt')).cuda()
encoder = torch.load(os.path.join(args.resume, 'weights_encoder.pt')).cuda()
r_vec = torch.load(os.path.join(args.resume, 'weights_r.pt')).cuda()
for epoch in range(start_epoch, args.epochs):
for i in schedulers.__dict__:
lr.__dict__[i] = schedulers.__dict__[i].get_last_lr()[0]
# TODO: verify the loop above and then delete below
####lr.w1 = schedulers.w1.get_lr()[0]
####lr.w2 = schedulers.w2.get_lr()[0]
####lr.A = schedulers.A.get_lr()[0]
####lr.V = schedulers.V.get_lr()[0]
####lr.r = schedulers.r.get_lr()[0]
logging.info('epoch %d lr_w1 %f lr_w2 %f lr_A %f lr_V %f lr_r %f', epoch, lr.w1, lr.w2, lr.A, lr.V, lr.r)
genotype = model.genotype()
logging.info('genotype = %s', genotype)
# TODO: log genotypes to a folder and use some good file format -> make it usable with visualize
print(F.softmax(model.alphas_normal, dim=-1))
print(F.softmax(model.alphas_reduce, dim=-1))
# training
train_acc, train_obj = train(
train_queue, valid_queue, model,
architect, criterion, optimizers, lr)
logging.info('train_acc %f', train_acc)
logging.info('train_loss %f', train_obj)
for i in schedulers.__dict__:
schedulers.__dict__[i].step()
# validation
valid_acc, valid_obj = infer(valid_queue, model, architect, criterion)
logging.info('valid_acc %f', valid_acc)
logging.info('valid_loss %f', valid_obj)
# save for the re-training
torch.save(model, os.path.join(args.save, 'weights_model.pt'))
torch.save(encoder, os.path.join(args.save, 'weights_encoder.pt'))
torch.save(r_vec, os.path.join(args.save, 'weights_r.pt'))
save_checkpoint({
'epoch': epoch + 1,
'scheduler_w1': schedulers.w1.state_dict(),
'scheduler-w2': schedulers.w2.state_dict(),
'scheduler-A': schedulers.A.state_dict(),
'scheduler-V': schedulers.V.state_dict(),
'scheduler-r': schedulers.r.state_dict(),
'optimizer-w1': optimizers.w1.state_dict(),
'optimizer-w2': optimizers.w2.state_dict(),
'optimizer-A': optimizers.A.state_dict(),
'optimizer-V': optimizers.V.state_dict(),
'optimizer-r': optimizers.r.state_dict(),
})
writer.close()
def train(train_queue, valid_queue,
model, architect, criterion, optimizers, lr):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
g_step = 0
# for step, ((input, target), (input_val, target_val)) in enumerate(zip(train_queue, valid_queue)):
for step, (input, target) in enumerate(train_queue):
model.train()
architect.encoder.train()
n = input.size(0)
input = input.cuda()
target = target.cuda(non_blocking=True)
# get a random minibatch from the search queue with replacement
input_val, target_val = next(iter(valid_queue))
input_val = input_val.cuda()
target_val = target_val.cuda(non_blocking=True)
###Architect.step will perform W1, W2, V, r, and A updates.
###because equations are all linked, its better to have their updates in a single place
### be careful of leaking gradients!!
architect.step(input, target, input_val, target_val, unrolled=args.unrolled, save_dir=args.save)
# TODO: think on using w1, w2, or average results
logits = model.forward(input, 'w2')
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
writer.add_scalar("train_loss", objs.avg, g_step)
writer.add_scalar("train_top1", top1.avg, g_step)
writer.add_scalar("train_top5", top5.avg, g_step)
if step % args.report_freq == 0:
logging.info('train (on w2) %03d %e %f %f', g_step, objs.avg, top1.avg, top5.avg)
g_step += 1
return top1.avg, objs.avg
def infer(valid_queue, model, architect, criterion):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
architect.encoder.eval()
g_step = 0
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda(non_blocking=True)
# TODO: w1 or w2 or average the two
logits = model.forward(input, 'w2')
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
writer.add_scalar("val_top5", top5.avg, g_step)
writer.add_scalar("val_loss", objs.avg, g_step)
writer.add_scalar("val_top1", top1.avg, g_step)
if step % args.report_freq == 0:
logging.info('valid %03d %e %f %f', g_step, objs.avg, top1.avg, top5.avg)
g_step += 1
return top1.avg, objs.avg
if __name__ == '__main__':
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
main()
| importZL/LFM | NAS/darts-lfm/train_search_lfm.py | train_search_lfm.py | py | 15,995 | python | en | code | 0 | github-code | 36 |
7494741687 | """Train a model on Treebank"""
import random
import json
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as sched
import torch.utils.data as data
import utils
from collections import OrderedDict
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from args import get_train_args
from models import investorConferenceAnalyzer
from utils import Treebank, collate_fn
def main(args):
# Set up logging and devices
args.save_dir = utils.get_save_dir(args.save_dir, args.name, training=True)
log = utils.get_logger(args.save_dir, args.name)
tbx = SummaryWriter(args.save_dir)
device, args.gpu_ids = utils.get_available_devices()
log.info(f'Args: {json.dumps(vars(args), indent=4, sort_keys=True)}')
args.batch_size *= max(1, len(args.gpu_ids))
# Set random seed
log.info(f'Using random seed {args.seed}...')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Get model
log.info('Building model...')
model = investorConferenceAnalyzer(args.pce_model, args.num_labels)
model = nn.DataParallel(model, args.gpu_ids)
if args.load_path:
log.iofo(f'Loading checkpoint from {args.load_path}...')
model, step = utils.load_model(model, args.load_path, args.gpu_ids)
else:
step = 0
model = model.to(device)
model.train()
ema = utils.EMA(model, args.ema_decay)
# Get saver
saver = utils.CheckpointSaver(args.save_dir,
max_checkpoints=args.max_checkpoints,
metric_name=args.metric_name,
maximize_metric=args.maximize_metric,
log=log)
# Get optimizer and scheduler
optimizer_grouped_params = [
{'params': model.module.classifier.albert.parameters()},
{'params': model.module.classifier.classifier.parameters(), 'lr': args.lr_c}
]
optimizer = optim.AdamW(optimizer_grouped_params, args.lr,
weight_decay=args.l2_wd)
scheduler = sched.LambdaLR(optimizer, lambda s: 1.)
# Get data loader
log.info('Building dataset...')
train_dataset = Treebank(args.train_record_file)
train_loader = data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers,
collate_fn=collate_fn)
dev_dataset = Treebank(args.dev_record_file)
dev_loader = data.DataLoader(dev_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_fn)
# Train
log.info('Training...')
steps_till_eval = args.eval_steps
epoch = step // len(train_dataset)
while epoch != args.num_epochs:
epoch += 1
log.info(f'Starting epoch {epoch}...')
with torch.enable_grad(), \
tqdm(total=len(train_dataset)) as progress_bar:
for input_idxs, token_type_idxs, attention_masks, ys, ids in train_loader:
# Set up for forward
input_idxs = input_idxs.to(device)
token_type_idxs = token_type_idxs.to(device)
attention_masks = attention_masks.to(device)
batch_size = input_idxs.size(0)
optimizer.zero_grad()
# Forward
log_p = model(input_idxs, token_type_idxs, attention_masks)
ys = ys.to(device)
if args.smoothing:
loss = utils.nll_loss_label_smoothing(log_p, ys, args.eps)
else:
loss = F.nll_loss(log_p, ys)
loss_val = loss.item()
# Backward
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
ema(model, step // batch_size)
# Log info
step += batch_size
progress_bar.update(batch_size)
progress_bar.set_postfix(epoch=epoch, NLL=loss_val)
tbx.add_scalar('train/NLL', loss_val, step)
tbx.add_scalar('train/LR', optimizer.param_groups[0]['lr'], step)
steps_till_eval -= batch_size
if steps_till_eval <= 0:
steps_till_eval = args.eval_steps
# Evaluate and save checkpoint
log.info(f'Evaluating at step {step}...')
ema.assign(model)
results, pred_dict = evaluate(model, dev_loader,
device, args.dev_eval_file)
saver.save(step, model, results[args.metric_name], device)
ema.resume(model)
# Log to console
results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items())
log.info(f'Dev {results_str}')
# Log to TensorBoard
log.info('Visualizing in TensorBoard...')
for k, v in results.items():
tbx.add_scalar(f'dev/{k}', v, step)
utils.visualize(tbx,
pred_dict=pred_dict,
eval_path=args.dev_eval_file,
step=step,
split='dev',
num_visuals=args.num_visuals)
def evaluate(model, data_loader, device, eval_file):
nll_meter = utils.AverageMeter()
model.eval()
pred_dict = {}
# Load eval info
with open(eval_file, 'r') as fh:
gold_dict = json.load(fh)
with torch.no_grad(), \
tqdm(total=len(data_loader.dataset)) as progress_bar:
for input_idxs, token_type_idxs, attention_masks, ys, ids in data_loader:
# Set up for forward
input_idxs = input_idxs.to(device)
token_type_idxs = token_type_idxs.to(device)
attention_masks = attention_masks.to(device)
batch_size = input_idxs.size(0)
# Forward
log_p = model(input_idxs, token_type_idxs, attention_masks)
ys = ys.to(device)
loss = F.nll_loss(log_p, ys)
nll_meter.update(loss.item(), batch_size)
# Log info
progress_bar.update(batch_size)
progress_bar.set_postfix(NLL=nll_meter.avg)
# Get accuracy
p = log_p.exp()
labels = torch.argmax(p, dim=-1)
preds = utils.predict_sentiments(ids.tolist(), labels.tolist())
pred_dict.update(preds)
model.train()
results = utils.eval_dicts(gold_dict, pred_dict)
results_list = [('NLL', nll_meter.avg),
('Acc', results['Acc'])]
results = OrderedDict(results_list)
return results, pred_dict
if __name__ == '__main__':
main(get_train_args()) | Vincent25-Li/Treebank | train.py | train.py | py | 7,389 | python | en | code | 0 | github-code | 36 |
29055282773 | import io
import picamera
import cv2
import numpy
import serial
import time
import RPi.GPIO as gp
####### Servo Motor Contol #######
gp.setmode(gp.BOARD)
gp.setup(11, gp.OUT)
pwm=gp.PWM(11, 50)
pwm.start(3)
port = '/dev/ttyACM0'
Face = 0
turn=1
while(turn):
i=3
while(i):
#Create a memory stream so photos doesn't need to be saved in a file
stream = io.BytesIO()
#Get the picture (low resolution, so it should be quite fast)
#Here you can also specify other parameters (e.g.:rotate the image)
with picamera.PiCamera() as camera:
camera.resolution = (320, 240)
camera.capture(stream, format='jpeg')
print("Captured......................")
#Convert the picture into a numpy array
buff = numpy.fromstring(stream.getvalue(), dtype=numpy.uint8)
#Now creates an OpenCV image
image = cv2.imdecode(buff, 1)
#Load a cascade file for detecting faces
face_cascade = cv2.CascadeClassifier('/home/pi/Desktop/Buddy/haarcascade_frontalface_alt.xml')
#Convert to grayscale
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#Look for faces in the image using the loaded cascade file
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
#print "Found "+str(len(faces))+" face(s)"
#Draw a rectangle around every found face
for (x,y,w,h) in faces:
Face = 1
cv2.rectangle(image,(x,y),(x+w,y+h),(255,255,0),2)
print("Detected")
ser = serial.Serial(port, 9600, timeout=1)
t=0
while(t<2000):
if(t%10 == 0):
print(t)
t+=1
ser.write(b'0') ## Stop_Detected
with picamera.PiCamera() as camera:
print("Start Video")
camera.start_recording('examplevid.h264')
time.sleep(5)
camera.stop_recording()
print("Stop Video")
#Save the result image
if(i==3):
cv2.imwrite('result1.jpg',image)
if(i==2):
cv2.imwrite('result2.jpg',image)
if(i==1):
cv2.imwrite('result3.jpg',image)
i=i-1
if(Face == 1):
Face = 2
break
################ Move Servo ##################
if(i==0):
pwm.ChangeDutyCycle(3)
#ser.write(b'1') ## Move_Servo_pos1
print("First Pos__________________________")
if(i==2):
pwm.ChangeDutyCycle(5)
#ser.write(b'2') ## Move_Servo_pos2
print("Second Pos__________________________")
if(i==1):
pwm.ChangeDutyCycle(7)
#ser.write(b'3') ## Move_Servo_pos3
print("Third Pos__________________________")
t=0
while(t<200):
if(t%10 == 0):
print(t)
t+=1
print("###############################################");
t=0
while(t<500):
if(t%10 == 0):
print(t)
t+=1
# turn = 0
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
if(Face == 2):
Face = 0
ser = serial.Serial(port, 9600, timeout=1)
t=0
while(t<2000):
if(t%10 == 0):
print(t)
t+=1
ser.write(b'1');
break
| FarhatBuet14/Rescue-BOT | Codes/main.py | main.py | py | 3,571 | python | en | code | 1 | github-code | 36 |
2811075086 | from torch import optim
from torch.distributions import Categorical
import importlib
class Model():
def __init__(self, config, modelParam, env):
self.update_counter = 0
if modelParam['cuda']['use_cuda']:
self.device = f"cuda:{modelParam['cuda']['device_idx']}"
else:
self.device = "cpu"
self.config = config
self.modelParam = modelParam
self.policyNet = self.selectPolicyNet(config, env.size_of_state_space, env.size_of_action_space)
self.policyNet.to(self.device)
self.optimizer = self.selectOptimizer(config)
return
def selectPolicyNet(self, config, size_of_state_space, size_of_action_space):
#Importing the network class based on the config[network] key
module = importlib.import_module("networks." + config['network'])
net = getattr(module, config['network'])(size_of_state_space, size_of_action_space)
return net
def selectOptimizer(self, config):
if config['optimizer'] == 'adam':
optimizer = optim.Adam(self.policyNet.parameters(), lr=config['learningRate']['lr'], weight_decay=config['weight_decay'])
elif config['optimizer'] == 'SGD':
optimizer = optim.SGD(self.policyNet.parameters(), lr=config['learningRate']['lr'],weight_decay=config['weight_decay'])
elif config['optimizer'] == 'RMSprop':
optimizer = optim.RMSprop(self.policyNet.parameters(), lr=config['learningRate']['lr'],weight_decay=config['weight_decay'])
else:
raise Exception('invalid optimizer')
return optimizer
def select_action(self, state):
state = state.to(self.device)
probs = self.policyNet(state)
m = Categorical(probs)
action = m.sample()
log_probs = m.log_prob(action)
return action.item(), log_probs
| ivartz/IN9400_exercises | week14/exercise/policy_learning/utils/model.py | model.py | py | 1,937 | python | en | code | 1 | github-code | 36 |
74207456423 | import argparse
import logging
log_debug = logging.getLogger("debugLog")
_available_commands = ["list"]
def get_parser(parent=None):
# Anomaly commands
conf_file_parser = argparse.ArgumentParser(add_help=False)
conf_file_parser.add_argument('--config_file', '--config_path', help='Path to config file', metavar='[path]',
dest="config_file")
if not parent:
admin = argparse.ArgumentParser(description='Deployment control', prog='deployment control',
parents=[conf_file_parser])
admin.add_argument("--debug", help="Run command in debug mode", dest="debug", action='store_true')
else:
admin = parent.add_parser('admin', help='Deployment control')
# Admin commands
admin_parser = argparse.ArgumentParser(add_help=False)
admin_parser.add_argument("list", help="List %(prog)ss")
admin_parser.add_argument('--host', help='Hostname or ip of target', metavar='[hostname]',
dest='target_host', default='all')
admin_parser.add_argument('--config-path', help='Path to config file', metavar='[path]',
dest="config_path", action='store')
# add more admin commands here
# Admin parser
admin_subcommands = admin.add_subparsers(dest="target")
admin_container = admin_subcommands.add_parser('container', prog='Container', parents=[admin_parser])
admin_node = admin_subcommands.add_parser('node', prog='Node', parents=[admin_parser])
admin_network = admin_subcommands.add_parser('network', prog='Network', parents=[admin_parser])
admin_network.add_argument('--interface', help='Name of interface', type=str, metavar='[NAME]',
dest="target_interface")
admin_deployment = admin_subcommands.add_parser('deployment', prog='Deployment', parents=[admin_parser])
if parent:
return parent
else:
return admin
def parse_arguments(args):
args = vars(args)
unpacked = unpack_targets(args)
unpacked.update(unpack_params(args))
log_debug.debug("Unpacked arguments" + str(unpacked))
return unpacked
def unpack_targets(args):
_unpacked = dict()
for arg in args:
if "target" in arg and args[arg]:
param_split = arg.split("_")
if len(param_split) > 1:
_unpacked[param_split[1]] = args[arg]
else:
_unpacked[arg] = args[arg]
return {"target": _unpacked}
def unpack_params(args):
_unpacked = dict()
for arg in args:
if arg in _available_commands:
return {"action": arg}
| Ydjeen/openstack_anomaly_injection | openstack_anomaly_injection/anomaly_injection/node_control/config/argparser.py | argparser.py | py | 2,668 | python | en | code | 0 | github-code | 36 |
20761353624 | import math
import numpy as np
import statistics
import random
import time
import matplotlib.pyplot as plt
h = 40
limit_number_of_taken_values = 200
nb_of_initial_values = 100
nb_of_Dthet = 100
Dthets = [(i * 1 / nb_of_Dthet) for i in range(nb_of_Dthet)] # thet step for ARL function
# sigs = [(0.5 + i/nb_of_sensors) for i in range(nb_of_sensors)]
# sigs = [2 for i in range(nb_of_sensors)]
sigs = [0.1, 0.5, 1.5]
nb_of_sensors = len(sigs)
def time_before_detection_step_signal(sigs, Dthet, nb_of_iteration, probas=[1] * len(sigs), h=h):
n= len(sigs)
nb_of_values = []
for p in range(nb_of_iteration):
#random.shuffle(sigs)
X_bar = [] # somme y_i -mu_0 / sigma i
nb_of_initial_values =random.randint(200, 200 + n )
for i in range(nb_of_initial_values):
sig = sigs[i % n]
p = random.random()
if p<probas[i % n]:
x = np.random.normal(0, sig)
m = len(X_bar)
for j in range(m):
X_bar[j] = X_bar[j] + x / sig
X_bar.append(x)
# print(reception_error)
# time.sleep(1)
detected = False
i = nb_of_initial_values
while detected is False:
sig = sigs[i % n]
p = random.random()
if p < probas[i % n]:
x = np.random.normal(Dthet, sig)
"""m = len(X_bar)
if m >= limit_number_of_taken_values:
X_bar = X_bar[1:]
m -= 1"""
for j in range(m):
X_bar[j] = X_bar[j] + x / sig
# print((reception_error[j] * (n-j))**2)
X_bar.append(x)
for j in range(m + 1):
if (abs(X_bar[j]) / math.sqrt(m - j + 1) > h):
detected = True
# print(X_bar)
# print(j)
i += 1
nb_of_values.append(i - nb_of_initial_values)
return statistics.mean(nb_of_values), statistics.stdev(nb_of_values), nb_of_values
def necessary_nb_of_value_GSC(sig, Dthet, h):
return math.pow(h * sig / Dthet, 2)
def average_nb_of_necessary_values_before_detection(sig, Dthet, nb_of_iteration):
nb_of_values = []
for p in range(nb_of_iteration):
i = 1
X_bar = [np.random.normal(0, sig)]
mean = statistics.mean(X_bar)
while (mean + (h * sig / math.sqrt(i)) > Dthet):
X_bar.append(np.random.normal(0, sig))
mean = statistics.mean(X_bar)
i += 1
nb_of_values.append(i)
return statistics.mean(nb_of_values)
"""
def plot_theoritical_ARL():
nb_of_Dthet = 100
Dthets = [(0.5 + (i * 2.5 / nb_of_Dthet)) for i in range(1, nb_of_Dthet)]
mean = []
for Dthet in Dthets:
mean.append(necessary_nb_of_value_GSC(sig, Dthet))
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(mean[i]) + ')'
print(stri)
def plot_ARL():
sig = 1
std = []
mean = []
expected = []
h = 10
nb_of_iteration = 1000
Dthet = 0.8
pas = 0.1
Dthets = []
while Dthet < 2:
Dthets.append(Dthet)
a, b = time_before_detection_step_signal([sig], Dthet, int(nb_of_iteration), h)
mean.append(a)
std.append(2.567 * b / math.sqrt(nb_of_iteration))
expected.append(necessary_nb_of_value_GSC(sig, Dthet, h))
# print("ok")
Dthet += pas
pas *= 1.1
nb_of_iteration *= 0.9
stri = ''
stri += '(' + str(Dthets[-1]) + ',' + str((mean[-1] - expected[-1]) * 100 / mean[-1]) + ') +- (0,)'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str((mean[i] - expected[i]) * 100 / mean[i]) + ')'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(mean[i]) + ')'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(expected[i]) + ')'
print(stri)
def time_before_detection_linear_signal(sig, slope, nb_of_iteration, h=h):
nb_of_values = []
for p in range(nb_of_iteration):
X_bar = []
for i in range(nb_of_initial_values):
x = np.random.normal(0, sig)
n = len(X_bar)
for j in range(n):
X_bar[j] = X_bar[j] * (n - j) / (n - j + 1) + x / (n - j + 1)
X_bar.append(x)
i = 0
detected = False
while detected is False:
i += 1
x = np.random.normal(slope * i, sig)
n = len(X_bar)
if n >= limit_number_of_taken_values:
X_bar = X_bar[1:]
n -= 1
for j in range(n):
X_bar[j] = X_bar[j] * (n - j) / (n - j + 1) + x / (n - j + 1)
X_bar.append(x)
for j in range(n + 1):
if (abs(X_bar[j]) > h * sig / math.sqrt(n + 1 - j)):
detected = True
# print(X_bar)
# print(j)
nb_of_values.append(i)
nb_of_values.append(i)
return statistics.mean(nb_of_values), statistics.stdev(nb_of_values)
def plot_LGAARL():
std = []
mean = []
nb_of_iteration = 80000
Dthet = 0.0
pas = 0.0005
Dthets = []
while Dthet < 0.4:
Dthets.append(Dthet)
a, b = time_before_detection_linear_signal(1, Dthet, int(nb_of_iteration))
mean.append(a)
std.append(2.567 * b / math.sqrt(nb_of_iteration))
# print("ok")
Dthet += pas
pas *= 1.1
nb_of_iteration *= 0.9
stri = '(' + str(Dthets[-1]) + ',' + str(mean[-1]) + ') +- (0,' + str(std[-1]) + ')'
print(stri)
stri = ''
for i in range(len(Dthets)):
stri += '(' + str(Dthets[i]) + ',' + str(mean[i]) + ') +- (0,' + str(std[i]) + ')'
print(stri)
"""
def main_1(Dthet):
means = []
stds = []
# Dthet = 1
nb_of_iteration = 10000
#h = 10
sigs = [1, 1.5, 2]
nb_of_sensors = len(sigs)
for sig in sigs:
mean, std,z = time_before_detection_step_signal([sig], Dthet, int(nb_of_iteration / math.sqrt(len(sigs))))
means.append(mean)
stds.append(std / math.sqrt(nb_of_iteration / math.sqrt(len(sigs))))
q = 0
for sig in sigs:
q += math.pow(1 / sig, 2)
mean_adapted_one_by_one = 0
std_adapted_one_by_one = 0
i = 0
for sig in sigs:
mean_adapted_one_by_one += math.pow(1 / (q * math.pow(sig, 2)), 2) * means[i]
std_adapted_one_by_one += stds[i] ** 2 * (1 / (q * math.pow(sig, 2))) ** 2
i += 1
mean_adapted_one_by_one *= len(sigs)
std_adapted_one_by_one = math.sqrt(std_adapted_one_by_one)
std_one_by_one = 0
mean_one_by_one = 0
i = 0
for sig in sigs:
std_one_by_one += stds[i] ** 2
mean_one_by_one += means[i] / math.pow(len(sigs), 2)
i += 1
std_one_by_one = math.sqrt(std_one_by_one) / nb_of_sensors
mean_one_by_one *= len(sigs)
q = 0
for m in means:
q += 1 / m
opti = 0
for m in means:
opti += math.pow(1 / (q * m), 2) * m
opti *= len(sigs)
mean, std, z = time_before_detection_step_signal(sigs, Dthet, nb_of_iteration)
"""
print("one by one")
print(mean_one_by_one)
print(2.567 * std_one_by_one)
print("adapted one by one")
print(mean_adapted_one_by_one)
print(2.567 * std_adapted_one_by_one)
print("simultaneously")
print(mean)
print(2.567 * std / math.sqrt(nb_of_iteration))
"""
return mean, 2.567 * std / math.sqrt(
nb_of_iteration), mean_one_by_one, 2.567 * std_one_by_one, mean_adapted_one_by_one, 2.567 * std_adapted_one_by_one, opti
def main_2():
Dthets = [1 + i * 2 / 10 for i in range(0, 10)]
mean_simul = []
std_simul = []
mean_one_one = []
std_one_one = []
mean_adapted = []
std_adapted = []
mean_opti = []
for Dthet in Dthets:
a, b, c, d, e, f, g = main_1(Dthet)
mean_simul.append(a)
std_simul.append(b)
mean_one_one.append(c)
std_one_one.append(d)
mean_adapted.append(e)
std_adapted.append(f)
mean_opti.append(g)
moyenne = 0
for i in range(len(Dthets)):
moyenne += abs((mean_adapted[i] - mean_opti[i]) / mean_opti[i])
plt.plot(Dthets, mean_simul, label='S0 round robin')
plt.plot(Dthets, mean_one_one, label='S1 un par un un')
plt.plot(Dthets, mean_adapted, label='S2 un par un période modifiée')
plt.plot(Dthets, mean_opti, label="S Opt optimum global pour les stratégies un par un")
lower_boundary = []
upper_boundary = []
for i in range(len(Dthets)):
lower_boundary.append(mean_simul[i] - std_simul[i])
upper_boundary.append(mean_simul[i] + std_simul[i])
plt.fill_between(Dthets, lower_boundary, upper_boundary, color='#D3D3D3')
lower_boundary = []
upper_boundary = []
for i in range(len(Dthets)):
lower_boundary.append(mean_one_one[i] - std_one_one[i])
upper_boundary.append(mean_one_one[i] + std_one_one[i])
plt.fill_between(Dthets, lower_boundary, upper_boundary, color='#D3D3D3')
lower_boundary = []
upper_boundary = []
for i in range(len(Dthets)):
lower_boundary.append(mean_adapted[i] - std_adapted[i])
upper_boundary.append(mean_adapted[i] + std_adapted[i])
plt.fill_between(Dthets, lower_boundary, upper_boundary, color='#D3D3D3', label='99% confiance intervalle')
plt.legend()
plt.xlabel("amplitude du changement à detecter")
plt.ylabel("temps moyen avant de lever une alerte de detection de changement")
plt.title("comparaisons de stratégies d'émission pour des problèmes de detection en utilisant la méthode GLR")
plt.show()
def main_3():
Dthet = 1
sigs = [0.1, 0.5, 1.5]
nb_of_iteration = 1000
##### fst approach, nested one
sigmas = [sigs[0],sigs[1]]
for i in range (5):
sigmas.append(sigs[2])
sigmas.append(sigs[1])
for i in range (5):
sigmas.append(sigs[2])
sigmas.append(sigs[1])
for i in range(5):
sigmas.append(sigs[2])
sigmas.append(sigs[1])
for i in range(5):
sigmas.append(sigs[2])
mean = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, h=h)
print(mean)
sigs_lengths = 500
means = []
stds = []
for i in range(int(nb_of_iteration/100)):
sigmas = []
for j in range(sigs_lengths):
p = random.random()
if p < 0.04:
sigmas.append(sigs[0])
elif p < 0.2:
sigmas.append(sigs[1])
else:
sigmas.append(sigs[2])
mean = time_before_detection_step_signal(sigmas, Dthet, 100, h=h)
means.append(mean[0])
stds.append(mean[1])
print(statistics.mean(means))
print(statistics.mean(stds))
def comparison_of_different_scheduling(nb_of_first,nb_of_second,sigma_first, sigma_second, first_proba, second_proba, nb_of_cases):
infos = []
for i in range(nb_of_first):
infos.append([sigma_first,first_proba])
for i in range(nb_of_second):
infos.append([sigma_second, second_proba])
nb_of_iteration = 1000
h = 40
Dthet = 0.5
means = []
stds = []
for i in range(nb_of_cases):
random.shuffle(infos)
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, z = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration,probas, h)
means.append(mean)
stds.append(std/math.sqrt(nb_of_iteration))
means = sorted(means)
n = len(means)
to_print = ""
tot = 0
for elt in means:
tot += 1/n
to_print += "(" + str(elt) +"," + str(tot) + ') '
print(to_print)
def comparison_of_two_opposite_schedulings(nb_of_first,nb_of_second,sigma_first, sigma_second, first_proba, second_proba):
nb_of_iteration = 50000
h = 40
Dthet = 1
#### construction of the strategy where in the first time it is always the fisrt cat, then after the second cat..
infos = []
for i in range(nb_of_first):
infos.append([sigma_first, first_proba])
for i in range(nb_of_second):
infos.append([sigma_second, second_proba])
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, nb_of_value_before_detection = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, probas, h)
print(mean)
nb_of_value_before_detection = sorted(nb_of_value_before_detection)
values = []
nb_of_items = []
values.append(nb_of_value_before_detection.pop(0))
nb_of_items.append(1)
for elt in nb_of_value_before_detection:
if elt ==values[-1]:
nb_of_items[-1] += 1
else:
values.append(elt)
nb_of_items.append(1)
n = len(nb_of_value_before_detection)
to_print = ""
tot = 0
for elt in zip(values, nb_of_items):
tot += elt[1] / n
to_print += "(" + str(elt[0]) + "," + str(tot) + ') '
print(to_print)
pgcd = math.gcd(nb_of_first, nb_of_second)
infos = []
for i in range(int(nb_of_first/pgcd)):
infos.append([sigma_first,first_proba])
for i in range(int(nb_of_second/pgcd)):
infos.append([sigma_second, second_proba])
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, nb_of_value_before_detection = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, probas,
h)
print(mean)
nb_of_value_before_detection = sorted(nb_of_value_before_detection)
values = []
nb_of_items = []
values.append(nb_of_value_before_detection.pop(0))
nb_of_items.append(1)
for elt in nb_of_value_before_detection:
if elt == values[-1]:
nb_of_items[-1] += 1
else:
values.append(elt)
nb_of_items.append(1)
n = len(nb_of_value_before_detection)
to_print = ""
tot = 0
for elt in zip(values, nb_of_items):
tot += elt[1] / n
to_print += "(" + str(elt[0]) + "," + str(tot) + ') '
print(to_print)
def plot_CDF_of_one_random_solution(nb_of_first,nb_of_second,sigma_first, sigma_second, first_proba, second_proba):
Dthet = 0.5
nb_of_iteration = 10000
h = 40
infos = []
for i in range(nb_of_first):
infos.append([sigma_first, first_proba])
for i in range(nb_of_second):
infos.append([sigma_second, second_proba])
random.shuffle(infos)
sigmas = []
probas = []
for elt in infos:
sigmas.append(elt[0])
probas.append(elt[1])
mean, std, nb_of_value_before_detection = time_before_detection_step_signal(sigmas, Dthet, nb_of_iteration, probas, h)
print(mean)
nb_of_value_before_detection = sorted(nb_of_value_before_detection)
values = []
nb_of_items = []
values.append(nb_of_value_before_detection.pop(0))
nb_of_items.append(1)
for elt in nb_of_value_before_detection:
if elt == values[-1]:
nb_of_items[-1] += 1
else:
values.append(elt)
nb_of_items.append(1)
n = len(nb_of_value_before_detection)
to_print = ""
tot = 0
for elt in zip(values, nb_of_items):
tot += elt[1] / n
to_print += "(" + str(elt[0]) + "," + str(tot) + ') '
print(to_print)
def test():
values = []
for i in range(100000):
values.append(np.random.normal(0,0.1)/0.1)
values = sorted(values)
plt.plot(values)
plt.show()
values = []
for i in range(100000):
values.append(np.random.normal(0, 1))
values = sorted(values)
plt.plot(values)
plt.show()
def function_of_the_performance_according_to_the_error_noise():
Dthet = 1
nb_of_iteration = 10000
h = 40
sigs = [i/10 + 0.1 for i in range(20)]
perfs = []
for sig in sigs:
mean, std, values = time_before_detection_step_signal([sig], Dthet, nb_of_iteration, probas=[1] * len(sigs), h=h)
perfs.append(mean)
plt.plot(sigs,perfs)
plt.show()
if __name__ == "__main__":
# quantify_false_positives(sigs)
# for sig in sigs:
# a, b = time_before_detection_step_signal(sig, 3, 10000, h=10)
# print("########")
# print(sig)
# print(a)
# plot_LGAARL()
#main_3()
"""nb_of_first = 50
nb_of_second = 50
sigma_first = 0.1
sigma_second = 0.1
first_proba = 1
second_proba = 1
nb_of_cases = 2
comparison_of_different_scheduling(nb_of_first, nb_of_second, sigma_first, sigma_second, first_proba, second_proba, nb_of_cases)
"""
function_of_the_performance_according_to_the_error_noise()
#comparison_of_two_opposite_schedulings(nb_of_first, nb_of_second, sigma_first, sigma_second, first_proba, second_proba)
#plot_CDF_of_one_random_solution(nb_of_first, nb_of_second, sigma_first, sigma_second, first_proba, second_proba)
| gwenmaudet/PhD_main | detection_step_signal/GLR.py | GLR.py | py | 17,337 | python | en | code | 0 | github-code | 36 |
74361688425 | from datetime import datetime
import logging
from django.contrib.auth import authenticate
from django.core import serializers
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import login as auth_login
from dispatch.models import ETurtleGroup as Group
from server.dispatch.dispatcher import run_dispatcher
from server.dispatch.models import Courier, Dispatch, Package
from server.utils import api_permission_required, HttpResponseUnauthorized
import json
@csrf_exempt
def loginview(request):
if not request.method=='POST':
return HttpResponseBadRequest("post required")
username = request.POST.get('username' or None)
password = request.POST.get('password' or None)
if not (username and password):
return HttpResponseBadRequest("invalid or missing parameters")
user = authenticate(username=username, password=password)
if user and user.is_active and user.has_perm("dispatch.api_access"):
auth_login(request, user)
return HttpResponse("Logged in")
return HttpResponseUnauthorized('Unathorized')
@api_permission_required
def check_in(request):
courier = Courier.objects.get(id=request.user.id)
courier.state = Courier.STATE_STANDING_BY
courier.save()
run_dispatcher()
return HttpResponse('checked in')
@api_permission_required
def leave(request):
courier = Courier.objects.get(id=request.user.id)
courier.state = Courier.STATE_IDLE
courier.save()
try:
dispatch = Dispatch.objects.get(courier=courier, state=Dispatch.STATE_PENDING)
except Dispatch.DoesNotExist:
pass
else:
#updates the state of the Dispatch
dispatch.state = Dispatch.STATE_REJECTED
dispatch.save()
#updates the state of the Package
dispatch.package.state=Package.STATE_NEW
dispatch.package.save()
run_dispatcher()
return HttpResponse('left')
@api_permission_required
def decline(request):
courier = Courier.objects.get(id=request.user.id)
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_PENDING)
#updates the state of the Dispatch
dispatch.state = Dispatch.STATE_REJECTED
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_STANDING_BY
courier.save()
#updates the state of the Package
dispatch.package.state=Package.STATE_NEW
dispatch.package.save()
run_dispatcher()
return HttpResponse('declined')
@api_permission_required
def get(request):
courier = Courier.objects.get(id=request.user.id)
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_PENDING)
package = dispatch.package
dump = package.serialize()
response = HttpResponse(dump)
response['Content-Type'] = 'application/json; charset=utf-8'
return response
@api_permission_required
def accept(request):
courier = Courier.objects.get(id=request.user.id)
#get the corresponding Dispatch object
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_PENDING)
#updates the state of the pending dispatch
dispatch.state=Dispatch.STATE_SHIPPING
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_SHIPPING
courier.save()
#updates the state of the package
dispatch.package.state=Package.STATE_SHIPPING
dispatch.package.save()
return HttpResponse('accepted')
@api_permission_required
def complete(request):
courier = Courier.objects.get(id=request.user.id)
#get the corresponding Dispatch object
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_SHIPPING)
dispatch.state=Dispatch.STATE_SHIPPED
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_IDLE
courier.save()
#updates the state of the package
dispatch.package.state=Package.STATE_SHIPPED
dispatch.package.save()
return HttpResponse('completed')
@api_permission_required
def fail(request):
courier = Courier.objects.get(id=request.user.id)
#get the corresponding Dispatch object
dispatch = get_object_or_404(Dispatch, courier=courier, state=Dispatch.STATE_SHIPPING)
dispatch.state=Dispatch.STATE_FAILED
dispatch.save()
#updates the state of the Courier
courier.state = Courier.STATE_IDLE
courier.save()
#updates the state of the package
dispatch.package.state=Package.STATE_FAILED
dispatch.package.save()
return HttpResponse('failed')
@csrf_exempt
@api_permission_required
def loc_update(request):
if not request.method=='POST':
return HttpResponseBadRequest("post required")
lat = request.POST.get('lat' or None)
lng = request.POST.get('lng' or None)
if not (lat and lng):
return HttpResponseBadRequest("invalid or missing parameters")
courier = Courier.objects.get(id=request.user.id)
courier.lat = lat
courier.lng = lng
courier.last_pos_update = datetime.now()
courier.save()
logger = logging.getLogger('location_logger')
logger.info("%s: %s, %s @ %s" % (courier,lat,lng,courier.last_pos_update.isoformat()))
return HttpResponse('location updated')
@csrf_exempt
@api_permission_required
def c2dmkey_update(request):
if not request.method=='POST':
return HttpResponseBadRequest("post required")
registration_id = request.POST.get('registration_id')
if not registration_id:
return HttpResponseBadRequest("invalid or missing parameters")
courier = Courier.objects.get(id=request.user.id)
courier.c2dmkey = registration_id
courier.save()
logger = logging.getLogger('c2dm_logger')
logger.info("%s: %s @ %s" % (courier,registration_id,datetime.now()))
return HttpResponse('c2dm key updated')
| lepilepi/eturtle | server/api/views.py | views.py | py | 5,950 | python | en | code | 5 | github-code | 36 |
2515032447 | import sys
import seaborn as sns
import pandas as pd
import numpy as np
import scipy.stats
from collections import defaultdict
from matplotlib import pyplot as plt
from sklearn.metrics import r2_score, mean_absolute_error
#plt.style.use('seaborn-whitegrid')
#sns.set_theme()
#Function for creating a dictionary from the epiAneufinder data
def createDictionaryFromTable(table):
snu_dict=table.set_index(['seq', 'start', 'end']).T.to_dict('list')
return(snu_dict)
def calculatePopulationSomies(atac_dict, density_dict):
gain_atac = []
loss_atac = []
base_atac = []
common_keys = set(density_dict).intersection(atac_dict) #filtering for the common CNV locations between the two datasets
sort_common_keys=sorted(common_keys)
filtered_density_dict = {k: v for k, v in density_dict.items() if k in sort_common_keys}
#print(sort_common_keys)
counts=0
for k in sort_common_keys:
#if k[0]!=0: #selecting for all chromosomes
if k[0]!=0: # selecting for all chromosomes
counts=counts+1
#Calculating pseudobulk representation for the scATAC. 0 is loss, 1 is disomic and 2 is gain
#If the user changes notation it should be changed here as well
loss_atac.append(atac_dict[k].count(0) / len(atac_dict[k]))
base_atac.append(atac_dict[k].count(1) / len(atac_dict[k]))
gain_atac.append(atac_dict[k].count(2) / len(atac_dict[k]))
print("Count Bins:",counts)
return(loss_atac, base_atac, gain_atac, filtered_density_dict)
#Function for calculating different metrics between the two datasets and creating a line plot of the pseudoibulk data
def createLinePlot(density_dict, loss_atac, base_atac, gain_atac):
new_base_atac = [x * 2 for x in base_atac]
new_gain_atac = [x * 3 for x in gain_atac]
atac_plot = [sum(x) for x in zip(new_gain_atac, new_base_atac, loss_atac)]
atac_array=np.array(atac_plot)
density_array=[x for x in density_dict.values()]
x = list(range(len(atac_plot)))
plt.plot(x,density_array)
plt.plot(x, atac_plot, color='orange', label="ATAC")
plt.show()
#print(density_array)
print("Pearson Correlation : ",scipy.stats.pearsonr(atac_array, density_array))
print("Spearman Correlation : ", scipy.stats.spearmanr(atac_array, density_array))
print("Kendall Correlation : ", scipy.stats.kendalltau(atac_array, density_array))
if __name__ =="__main__":
density_table=pd.read_csv("/home/katia/Helmholz/epiAneufinder/Hg38_geneDensity.csv", sep="\t")
snu_full=pd.read_csv("/home/katia/Helmholz/epiAneufinder/revisions/SNU601_br15/epiAneufinder_results/results_table.tsv", sep=" ")
snu_dict=createDictionaryFromTable(snu_full)
density_dict=createDictionaryFromTable(density_table)
loss_atac, base_atac, gain_atac , filtered_density_dict= calculatePopulationSomies(snu_dict,density_dict)
#print(filtered_density_dict)
createLinePlot(filtered_density_dict, loss_atac, base_atac, gain_atac) | thek71/epiScripts | calculateCorrelationDensity.py | calculateCorrelationDensity.py | py | 3,000 | python | en | code | 0 | github-code | 36 |
25163328807 | import operator
import pandas as pd
from easul.action import ResultStoreAction
from easul.algorithm import StoredAlgorithm
from easul.algorithm.factor import OperatorFactor
from easul.data import DataSchema, DFDataInput
from easul.step import VisualStep
from easul.visual import Visual
from easul.visual.element import Prediction
from easul.visual.element.container import HorizContainer, CardContainer, Container
from easul.visual.element.journey import JourneyMap
from easul.visual.element.overall import RocCurve, Accuracy, BalancedAccuracy, Ppp, Npp, Sensitivity, Matthews, \
ModelScore
from easul.visual.element.prediction import ProbabilityPlot, LimeTablePlot
from easul.visual.element.overall import Specificity
import os
import numpy as np
EXAMPLE_PATH = os.path.dirname(__file__) + "/support"
DIABETES_FILE = EXAMPLE_PATH + "/diabetes.txt"
def diabetes_progression_algorithm():
from easul.algorithm import ClassifierAlgorithm
from sklearn.linear_model import LogisticRegression
diab_train = diabetes_progression_dataset()
diab_alg = ClassifierAlgorithm(title="Diabetes progression", model=LogisticRegression(max_iter=500), schema=diab_train.schema)
diab_alg.fit(diab_train)
return diab_alg
def diabetes_progression_dataset():
diab_dset = load_diabetes(raw=True, as_classifier=True)
diab_train, diab_test = diab_dset.train_test_split(train_size=0.75)
return diab_train
# *Data Set Characteristics:**
# :Number of Instances: 442
#
# :Number of Attributes: First 10 columns are numeric predictive values
#
# :Target: Column 11 is a quantitative measure of disease progression one year after baseline
#
# :Attribute Information:
# - age age in years
# - sex
# - bmi body mass index
# - bp average blood pressure
# - s1 tc, total serum cholesterol
# - s2 ldl, low-density lipoproteins
# - s3 hdl, high-density lipoproteins
# - s4 tch, total cholesterol / HDL
# - s5 ltg, possibly log of serum triglycerides level
# - s6 glu, blood sugar level
#
# Note: Each of these 10 feature variables have been mean centered and scaled by the standard deviation times `n_samples` (i.e. the sum of squares of each column totals 1).
#
# Source URL:
# https://www4.stat.ncsu.edu/~boos/var.select/diabetes.html
#
# For more information see:
# Bradley Efron, Trevor Hastie, Iain Johnstone and Robert Tibshirani (2004) "Least Angle Regression," Annals of Statistics (with discussion), 407-499.
# (https://web.stanford.edu/~hastie/Papers/LARS/LeastAngle_2002.pdf)
def load_diabetes(raw=False, as_classifier=False):
import pandas as pd
if raw:
schema = DataSchema(
schema={
"age": {"type": "number", "help": "Age in years"},
"sex": {"type": "category", "options": {1: "Male", 2: "Female"}, "help": "Gender",
"pre_convert": "integer"},
"bmi": {"type": "number", "help": "Body mass index"},
"bp": {"type": "number", "help": "Avg blood pressure"},
"s1": {"type": "number", "help": "tc, total serum cholesterol"},
"s2": {"type": "number", "help": "ldl, low-density lipoproteins"},
"s3": {"type": "number", "help": "hdl, high-density lipoproteins"},
"s4": {"type": "number", "help": "tch, total cholesterol / HDL"},
"s5": {"type": "number", "help": "ltg, possibly log of serum triglycerides level"},
"s6": {"type": "number", "help": "glu, blood sugar level"},
"y": {
"type": "number",
"help": "disease progression (<1 yr)"
}
},
y_names=["y"],
)
df = pd.read_csv(
DIABETES_FILE, delimiter="\t"
)
else:
schema = DataSchema(
schema={
"age": {"type": "number", "help": "Age in years", "min": -1, "max": 1},
"sex": {"type": "category", "options": {-0.04464: "Male", 0.05068: "Female"}, "help": "Gender",
"pre_convert": "integer"},
"bmi": {"type": "number", "help": "Body mass index", "min": -1, "max": 1},
"bp": {"type": "number", "help": "Avg blood pressure", "min": -1, "max": 1},
"s1": {"type": "number", "help": "tc, total serum cholesterol", "min": -1, "max": 1},
"s2": {"type": "number", "help": "ldl, low-density lipoproteins", "min": -1, "max": 1},
"s3": {"type": "number", "help": "hdl, high-density lipoproteins", "min": -1, "max": 1},
"s4": {"type": "number", "help": "tch, total cholesterol / HDL", "min": -1, "max": 1},
"s5": {"type": "number", "help": "ltg, possibly log of serum triglycerides level", "min": -1, "max": 1},
"s6": {"type": "number", "help": "glu, blood sugar level", "min": -1, "max": 1},
"y": {
"type": "number",
"help": "disease progression (<1 yr)"
}
},
y_names=["y"],
)
from sklearn.datasets import load_diabetes
diabetes = load_diabetes()
df = pd.DataFrame(data=diabetes.data, columns=diabetes.feature_names)
df["y"] = diabetes.target
if as_classifier:
schema["y"] = {"type": "category", "help": "Boolean flag for disease progression",
"pre_convert": "integer", "options": {0: "No progression", 1: "Progression"}}
df["y"] = df["y"].apply(lambda x: 1 if x > 150 else 0)
return DFDataInput(data=df, schema=schema)
model_scope_elements = [
CardContainer(
title="The rating below is an average of the accuracies, correlation and AUC scores",
name="rating_card",
elements=[
ModelScore(title="What is the model rating (out of 100)"),
CardContainer(title="The individual aspects of the model can be examined below",
name="individual_card",
heading_level=5,
elements=[
HorizContainer(
elements=[
RocCurve(name="roc", title="ROC curve", width=5, height=5),
Container(
elements=[
Accuracy(name="accu", title="How accurate is the model overall?",
round_dp=1),
BalancedAccuracy(name="bal_accu",
title="How accurate if the responses were balanced?",
round_dp=1),
Ppp(name="ppp", title="Positives correctly identified (PPV)",
round_dp=1),
Npp(name="ppp", title="Negatives correctly identified (NPV)",
round_dp=1),
Sensitivity(name="sens",
title="True positives out of identified positives (Sensitivity)",
round_dp=1),
Specificity(name="specs",
title="True negatives out of identified negatives (Specificity)",
round_dp=1),
Matthews(name="matt",
title="Prediction correlation (Matthews) (between -1 and 1)",
round_dp=1
)
]
)
]
)
]
)
]
)
]
row_scope_elements = [
HorizContainer(elements=[
CardContainer(
title="Prediction and probabilities of survival or death",
elements=[
HorizContainer(elements=[
Prediction(name="pred", title="Prediction", show_label=True, as_value=False,
html_class="bg-info",
html_tag="h5"),
ProbabilityPlot(name="probs", height=4, width=4, title="Probability plot")
]),
CardContainer(
title="Explanation of how supplied values affect the likelihood of this prediction?",
name="lime_card",
heading_level=5,
elements=[
LimeTablePlot()
])
])
])
]
def complex_plan():
from easul.decision import BinaryDecision
from easul.plan import Plan
from easul.visual.element.journey import JourneyMap
from easul.state import State
from easul.step import EndStep, StartStep, Step, AlgorithmStep, PreStep, VisualStep
from easul.visual import Visual
from easul.action import PreRunStateAction
complex_plan = Plan(title="CAP")
complex_plan.add_state("admission_state", State(label="admission", default=None))
complex_plan.add_step("discharge", EndStep(
title="Discharge",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="discharged")]
))
complex_plan.add_step("itu", EndStep(
title="ITU",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="itu")]
))
complex_plan.add_step("admission", StartStep(
title="Patient admission",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="admitted")],
next_step=complex_plan.get_step("catheter_check")
))
complex_plan.add_step("flowchart", Step(
title="CAP logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
complex_plan.add_schema("catheter",
DataSchema(
schema={
"systolic_bp": {"type": "number"},
"score": {"type": "number"}
},
y_names=["score"]
)
)
from easul.algorithm import ScoreAlgorithm
complex_plan.add_algorithm("catheter",
ScoreAlgorithm(
title="Catheter algorithm",
schema=complex_plan.get_schema("catheter"),
factors=[OperatorFactor(operator=operator.gt, input_field="systolic_bp", value=90,
penalty=1, title="High systolic BP")]
)
)
complex_plan.add_step("catheter_check", AlgorithmStep(
title="Catheter check",
actions=[PreRunStateAction(state=complex_plan.get_state("admission_state"), state_value="catheter_check")],
algorithm=complex_plan.get_algorithm("catheter"),
source=complex_plan.get_source("catheter"),
decision=BinaryDecision(
true_step=complex_plan.get_step("itu"),
false_step=complex_plan.get_step("discharge")
)
))
complex_plan.add_step("flowchart", Step(
title="Diabetes logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
from easul.source import ConstantSource
complex_plan.add_source("catheter", ConstantSource(title="Catheter data", data={"systolic_bp": 80}))
return complex_plan
def complex_plan_with_ml_no_metadata(tempdir):
plan = _complex_plan_with_ml()
plan.add_algorithm("progression", StoredAlgorithm(filename=tempdir + "/diabetes.eal",
title="Diabetes progression likelihood",
definition=diabetes_progression_algorithm
))
plan.add_visual("model_scope", Visual(
title="Diabetes model scope",
algorithm=plan.get_algorithm("progression"),
elements=model_scope_elements,
metadata_filename=tempdir+"/test_model.eam",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
plan.add_visual("row_scope", Visual(
title="Diabetes row scope",
algorithm=plan.get_algorithm("progression"),
elements=row_scope_elements,
metadata_filename=tempdir + "/test_row.eam",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
return plan
def _complex_plan_with_ml():
from easul.decision import BinaryDecision
from easul.plan import Plan
from easul.state import State
from easul.step import EndStep, StartStep, Step, AlgorithmStep, PreStep, VisualStep
from easul.visual import Visual
from easul.action import PreRunStateAction
import os
complex_plan_with_ml = Plan(title="CAP")
complex_plan_with_ml.add_state("admission_state", State(label="admission", default=None))
complex_plan_with_ml.add_state("progression", State(label="progression", default=None))
complex_plan_with_ml.add_step("discharge", EndStep(
title="Discharge",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="discharged")]
))
complex_plan_with_ml.add_step("itu", EndStep(
title="ITU",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="itu")]
))
complex_plan_with_ml.add_step("admission", StartStep(
title="Patient admission",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="admitted")],
next_step=complex_plan_with_ml.get_step("catheter_check")
))
complex_plan_with_ml.add_step("flowchart", Step(
title="CAP logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
complex_plan_with_ml.add_schema("catheter",
DataSchema(
schema={
"systolic_bp": {"type": "number"},
"score": {"type": "number"}
},
y_names=["score"]
)
)
complex_plan_with_ml.add_step("progression_low", PreStep(
title="Diabetes progression low",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("progression"), state_value="low")],
next_step=complex_plan_with_ml.get_step("discharge")
))
complex_plan_with_ml.add_step("progression_high", PreStep(
title="Diabetes progression high",
actions=[PreRunStateAction(state=complex_plan_with_ml.get_state("progression"), state_value="high")],
next_step=complex_plan_with_ml.get_step("itu")
))
complex_plan_with_ml.add_step("progression_check", AlgorithmStep(
algorithm=complex_plan_with_ml.get_algorithm("progression"),
title="Progression ML",
actions=[
PreRunStateAction(state=complex_plan_with_ml.get_state("progression"), state_value="pending"),
ResultStoreAction()
],
decision=BinaryDecision(
true_step=complex_plan_with_ml.get_step("progression_high"),
false_step=complex_plan_with_ml.get_step("progression_low")
),
source=complex_plan_with_ml.get_source("progression"),
visual=complex_plan_with_ml.get_visual("row_scope")
))
from easul.algorithm import ScoreAlgorithm, StoredAlgorithm
complex_plan_with_ml.add_algorithm("catheter",
ScoreAlgorithm(
title="Catheter algorithm",
schema=complex_plan_with_ml.get_schema("catheter"),
factors=[
OperatorFactor(title="High blood pressure", operator=operator.gt, input_field="systolic_bp", value=90,
penalty=1)]
)
)
complex_plan_with_ml.add_step("catheter_check", AlgorithmStep(
title="Catheter check",
actions=[
PreRunStateAction(state=complex_plan_with_ml.get_state("admission_state"), state_value="catheter_check")],
algorithm=complex_plan_with_ml.get_algorithm("catheter"),
source=complex_plan_with_ml.get_source("catheter"),
decision=BinaryDecision(
true_step=complex_plan_with_ml.get_step("progression_check"),
false_step=complex_plan_with_ml.get_step("discharge")
)
))
from easul.source import ConstantSource
complex_plan_with_ml.add_source("catheter", ConstantSource(title="Catheter data", data={"systolic_bp": 80}))
complex_plan_with_ml.add_source("progression", ConstantSource(title="Diabetes progression data", data={}))
complex_plan_with_ml.add_step("flowchart", Step(
title="Diabetes logic map",
visual=Visual(
elements=[
JourneyMap(route_only=False, start_step="admission")
]),
exclude_from_chart=True
))
return complex_plan_with_ml
def complex_plan_with_ml():
plan = _complex_plan_with_ml()
plan.add_algorithm("progression", StoredAlgorithm(filename=EXAMPLE_PATH + "/metadata/diabetes.eal",
title="Diabetes progression likelihood",
definition=diabetes_progression_algorithm
))
plan.add_step("overview", VisualStep(
title="Model",
visual=plan.get_visual("model_scope")
))
plan.add_visual("model_scope", Visual(
title="Diabetes model scope",
algorithm=plan.get_algorithm("progression"),
elements=model_scope_elements,
metadata_filename=EXAMPLE_PATH + "/metadata/model_scope.emd",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
plan.add_visual("row_scope", Visual(
title="Diabetes row scope",
algorithm=plan.get_algorithm("progression"),
elements=row_scope_elements,
metadata_filename=EXAMPLE_PATH + "/metadata/row_scope.emd",
metadata_dataset="easul.tests.example.diabetes_progression_dataset"
))
return plan
curb65_schema = DataSchema(
schema={
"confusion": {"type": "boolean", "required": True},
"urea": {"type": "number", "required": True},
"rr": {"type": "number", "required": True},
"sbp": {"type": "number", "required": True},
"dbp": {"type": "number", "required": True},
"age": {"type": "number", "required": True},
"score": {"type": "number", "required": True}
}, y_names=["score"])
prog_input_data = {"age": 59, "sex": 2, "bmi": 32.1, "bp": 101, "s1": 157, "s2": 93.2, "s3": 38, "s4": 4, "s5": 4.9,
"s6": 87}
no_prog_input_data = {"age": 23, "sex": 1, "bmi": 20.1, "bp": 78, "s1": 77, "s2": 93.2, "s3": 38, "s4": 4, "s5": 4.9,
"s6": 37}
def curb65_score_algorithm():
from easul.algorithm import logic, factor
from easul import expression
import operator
return logic.ScoreAlgorithm(
title="CURB65",
factors=[
factor.OperatorFactor(penalty=1, operator=operator.eq, value=1, input_field="confusion", title="Confusion"),
factor.OperatorFactor(penalty=1, operator=operator.gt, value=19, input_field="urea", title="High urea",),
factor.OperatorFactor(penalty=1, operator=operator.ge, value=30, input_field="rr", title="High respiratory rate"),
factor.ExpressionFactor(penalty=1, expression=expression.OrExpression(
expressions=[
expression.OperatorExpression(operator=operator.lt, value=90, input_field="sbp"),
expression.OperatorExpression(operator=operator.le, value=60, input_field="dbp")
]
), title="Low blood pressure"
),
factor.OperatorFactor(penalty=1, operator=operator.ge, value=65, input_field="age", title="Age >= 65")
],
schema=curb65_schema,
start_score=0
)
| rcfgroup/easul | easul/tests/example.py | example.py | py | 21,533 | python | en | code | 1 | github-code | 36 |
496206437 | import os
import pytest
from dagster_aws.emr import EmrJobRunner, emr_pyspark_resource
from dagster_pyspark import pyspark_resource, pyspark_solid
from moto import mock_emr
from dagster import (
DagsterInvalidDefinitionError,
ModeDefinition,
RunConfig,
execute_pipeline,
pipeline,
)
from dagster.seven import mock
from dagster.utils.test import create_test_pipeline_execution_context
@pyspark_solid
def example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pyspark_solid(name='blah', description='this is a test', config={'foo': str, 'bar': int})
def other_example_solid(context):
list_p = [('John', 19), ('Jennifer', 29), ('Adam', 35), ('Henry', 50)]
rdd = context.resources.pyspark.spark_context.parallelize(list_p)
res = rdd.take(2)
for name, age in res:
context.log.info('%s: %d' % (name, age))
@pipeline(
mode_defs=[
ModeDefinition('prod', resource_defs={'pyspark': emr_pyspark_resource}),
ModeDefinition('local', resource_defs={'pyspark': pyspark_resource}),
]
)
def example_pipe():
example_solid()
other_example_solid()
def test_local():
result = execute_pipeline(
example_pipe,
environment_dict={'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},},
run_config=RunConfig(mode='local'),
)
assert result.success
@mock_emr
@mock.patch('dagster_aws.emr.emr.EmrJobRunner.wait_for_steps_to_complete')
def test_pyspark_emr(mock_wait):
run_job_flow_args = dict(
Instances={
'InstanceCount': 1,
'KeepJobFlowAliveWhenNoSteps': True,
'MasterInstanceType': 'c3.medium',
'Placement': {'AvailabilityZone': 'us-west-1a'},
'SlaveInstanceType': 'c3.xlarge',
},
JobFlowRole='EMR_EC2_DefaultRole',
LogUri='s3://mybucket/log',
Name='cluster',
ServiceRole='EMR_DefaultRole',
VisibleToAllUsers=True,
)
# Doing cluster setup outside of a solid here, because run_job_flow is not yet plumbed through
# to the pyspark EMR resource.
job_runner = EmrJobRunner(region='us-west-1')
context = create_test_pipeline_execution_context()
cluster_id = job_runner.run_job_flow(context, run_job_flow_args)
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': cluster_id,
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
assert mock_wait.called_once
def test_bad_requirements_txt():
with pytest.raises(DagsterInvalidDefinitionError) as exc_info:
execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'requirements_file_path': 'DOES_NOT_EXIST',
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': 'some_cluster_id',
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert 'The requirements.txt file that was specified does not exist' in str(exc_info.value)
# We have to manually stop the pyspark context here because we interrupted before resources
# were cleaned up, and so stop() was never called on the spark session.
from pyspark.sql import SparkSession
SparkSession.builder.getOrCreate().stop()
@pytest.mark.skip
def test_do_it_live_emr():
result = execute_pipeline(
example_pipe,
environment_dict={
'solids': {'blah': {'config': {'foo': 'a string', 'bar': 123}}},
'resources': {
'pyspark': {
'config': {
'pipeline_file': __file__,
'pipeline_fn_name': 'example_pipe',
'cluster_id': os.environ.get('AWS_EMR_JOB_FLOW_ID'),
'staging_bucket': 'dagster-scratch-80542c2',
'region_name': 'us-west-1',
}
}
},
},
run_config=RunConfig(mode='prod'),
)
assert result.success
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-aws/dagster_aws_tests/emr_tests/test_pyspark.py | test_pyspark.py | py | 5,158 | python | en | code | 2 | github-code | 36 |
24205678140 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 13:21:56 2019
@author: nilose
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
from scipy import stats
import scipy.integrate as integrate
def gauss(x,mu,sigma):
return (1/(np.sqrt(2*np.pi)*sigma))*np.exp(-0.5*((x-mu)**2)/(sigma**2))
def bigauss(x,mu,sigma, mu2, sigma2):
return gauss(x,mu,sigma)*gauss(x,mu2,sigma2)
def KDE_RSfit(dt_g,dt_cl,outname):
xdata = dt_cl
gals = dt_g
#referencia = 'A85clean-3,7-4r500.csv'
r200 = (xdata['R500(arcmin)']/ 60.0 / 0.65)
rmin = 13.0
rmax = 23.0
grmin = -1.0
grmax = 4.0
z = xdata['Redshift']
ra0 = xdata['RA']
dec0 = xdata['DEC']
rFin = 4.0*r200
rFout = 5.0*r200
rr=40
if rr == 1:
rFin = 3.5*r200
rFout = 3.8*r200
if rr == 8:
rFin = 1.3*r200
rFout = 1.49*r200
if rr == 20:
rFin = 3.0*r200
rFout = 3.8*r200
if rr == 30:
rFin = 5.*r200
rFout = 5.8*r200
if rr == 40:
rFin = 4.*r200
rFout = 4.8*r200
areaCL = np.pi * r200**2
areaF = np.pi * (rFout**2 - rFin**2)
norm = areaCL / areaF
galsCL = gals.query('(ra - @ra0)**2 + (dec - @dec0)**2 < (@r200)**2 & dered_r < @rmax & dered_r > @rmin & grModelColor < @grmax & grModelColor > @grmin')
galsF = gals.query('(ra - @ra0)**2 + (dec - @dec0)**2 < (@rFout)**2 & (ra - @ra0)**2 + (dec - @dec0)**2 > (@rFin)**2 & dered_r < @rmax & dered_r > @rmin & grModelColor < @grmax & grModelColor > @grmin')
#### Plots the Filed galaxies
plt.scatter(galsF['ra'], galsF['dec'], marker='o', color='black', s=4)
nameid = outname + '-fieldring.png'
plt.ylabel('DEC (degrees)')
plt.xlabel('RA (degrees)')
plt.savefig(nameid, format='png')
plt.close()
#### Plots the Cluster galaxies
plt.scatter(galsCL['ra'], galsCL['dec'], marker='o', color='black', s=4)
nameid = outname + '-clusterregion.png'
plt.ylabel('DEC (degrees)')
plt.xlabel('RA (degrees)')
plt.savefig(nameid, format='png')
plt.close()
####################################
NgalsF = float(len(galsF))
NgalsCL = float(len(galsCL))
r = galsCL['dered_r']
gr = galsCL['grModelColor']
xmin = r.min()
xmax = r.max()
ymin = gr.min()
ymax = gr.max()
print(xmin)
print(xmax)
print(ymin)
print(ymax)
print(norm)
X, Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([r, gr])
kernelCL = stats.gaussian_kde(values)
galsCL['kdePDF'] = kernelCL.evaluate(values)
##############################################################################
### Field KDE
rField = galsF['dered_r']
grField = galsF['grModelColor']
valuesField = np.vstack([rField, grField])
kernelF = stats.gaussian_kde(valuesField)
galsCL['kdePDFfield'] = kernelF.evaluate(values) #### KDE PDF do FIELD calculada nos pontos correspondentes as galaxias do Cluster (contaminado)
############################ Probability that a given galaxy is a field galaxy using photoz as prior
galsCL['prob']=0.0
galsCL['member']=0.0
galsCL['prior']=0.0
meanerror = galsCL['Column3'].std()
print(meanerror)
galsclassrest = galsCL.reset_index(drop=True)
# for i in range(len(galsclass1)):
for i in range(len(galsCL)):
mu = galsCL['Column2'].values[i]
sigma = galsCL['Column3'].values[i]
integral = integrate.quad(gauss, z - 1*meanerror, z + 1*meanerror , args=(mu,sigma))
prior = 1 - integral[0]
#integral2 = integrate.quad(bigauss, -np.inf, np.inf , args=(mu,sigma, z, 0.03))
#prior2 = 1.0 - integral2[0]
galsCL['prior'].values[i] = prior
#galsclass1['prior2'][i] = prior2
galsCL['prob'].values[i] = norm * galsCL['kdePDFfield'].values[i] * NgalsF / (galsCL['kdePDF'].values[i] * NgalsCL) * prior
galsclassrest['prob'] = norm * galsclassrest['kdePDFfield'] * NgalsF / (galsclassrest['kdePDF'] * NgalsCL)
##############################################################################
####Plotting The dirty KDE
Z = np.reshape(kernelCL(positions).T, X.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
figure = ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
#ax.plot(r, gr, 'k.', markersize=2)
ax.set_xlim(xmin,xmax)
ax.set_ylim(ymin,ymax)
ax.scatter(r,gr, marker='.', s=1, color='black')
cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
nameid = outname + '-dirty.png'
plt.ylabel('g - r')
plt.xlabel('r')
cbar.set_label('PDF')
plt.savefig(nameid, format='png')
plt.close()
#plt.show()
#plt.figure()
df = galsCL.copy()
# df = df.append(galsclass1, ignore_index=True)
df = df.append(galsclassrest, ignore_index=True)
for m in range(1):
for i in range(int(len(df))):
fica=0
for mcmc in range(100):
if df['prob'][i] < random.uniform(0,1):
fica +=1
if fica >= 68: #1sigma
df['member'][i] = 1
objt=df['obj'][i]
wh=np.where((gals.ra == df.ra[i]) & (gals.dec == df.dec[i]))[0][0]
# wh=np.where((gals.obj == objt) ==True)[0]
gals.ClusterMember[wh]=1
else:
df['member'][i] = 0
wh=np.where((gals.ra == df.ra[i]) & (gals.dec == df.dec[i]))[0][0]
# wh=np.where((gals.obj == objt) ==True)[0]
gals.ClusterMember[wh]=2 #indica que nao esta no cluster mas esta em R200
final=gals.copy()
clean = df.query('member == 1')
it = str(m)
nameid = outname+'_clean.csv'
clean.to_csv(nameid)
nameid = outname+'_dirtyWprob.csv'
df.to_csv(nameid)
### Checks normalization of PDFS
kernelCL.integrate_box([-np.inf,-np.inf],[np.inf,np.inf],maxpts=None)
kernelF.integrate_box([-np.inf,-np.inf],[np.inf,np.inf],maxpts=None)
############################Plots the Field data plus the fitted KDE
ZF = np.reshape(kernelF(positions).T, X.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
figure = ax.imshow(np.rot90(ZF), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
#ax.plot(rclean, grclean, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.scatter(rField,grField, marker='.', s=1, color='black')
cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
nameid = outname+ '-field.png'
plt.ylabel('g - r')
plt.xlabel('r')
cbar.set_label('PDF')
plt.savefig(nameid, format='png')
plt.close()
#plt.show()
##################################Plots the clean data plus the fitted KDE
rclean = clean['dered_r']
grclean = clean['grModelColor']
valuesclean = np.vstack([rclean, grclean])
kernelclean = stats.gaussian_kde(valuesclean)
Zclean = np.reshape(kernelclean(positions).T, X.shape)
fig = plt.figure()
ax = fig.add_subplot(111)
figure = ax.imshow(np.rot90(Zclean), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
#ax.plot(rclean, grclean, 'k.', markersize=2)
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.scatter(rclean,grclean, marker='.', s=1, color='black')
cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
nameid = outname + '-clean.png'
plt.ylabel('g - r')
plt.xlabel('r')
cbar.set_label('PDF')
plt.savefig(nameid, format='png')
plt.close()
#plt.show()
print('##############numeros')
print('areaCL')
print(areaCL)
print('areaF')
print(areaF)
print('norm')
print(norm)
print('NgalsF')
print(NgalsF)
print('NgalsCL')
print(NgalsCL)
print('NgalsF*norm')
print(NgalsF*norm)
############################################# Estimador da PDF clean
# estclean = (np.rot90(Z)*NgalsCL - np.rot90(ZF)*norm*NgalsF)/(NgalsCL - norm*NgalsF)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# figure = ax.imshow(estclean, cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
# #ax.plot(rclean, grclean, 'k.', markersize=2)
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# #ax.scatter(rclean,grclean, marker='.', s=1, color='black')
# cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
# nameid = outname + '-theoryPDF.png'
# plt.ylabel('g - r')
# plt.xlabel('r')
# cbar.set_label('PDF')
# plt.savefig(nameid, format='png')
# plt.close()
# #plt.show()
############################################# Subtrai a PDF-clean calculada da Sorteada por MC
# dif = estclean - np.rot90(Zclean)
# fig = plt.figure()
# ax = fig.add_subplot(111)
# figure = ax.imshow(dif, cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax])
# #ax.plot(rclean, grclean, 'k.', markersize=2)
# ax.set_xlim([xmin, xmax])
# ax.set_ylim([ymin, ymax])
# #ax.scatter(rclean,grclean, marker='.', s=1, color='black')
# cbar = fig.colorbar(figure, ax=ax , use_gridspec=False)
# nameid = cl + '-theoryPDF-cleanPDF.png'
# plt.ylabel('g - r')
# plt.xlabel('r')
# cbar.set_label('theoretical PDF - clean sample PDF')
# plt.savefig(nameid, format='png')
# plt.close()
return final | NataliaDelCoco/FilamentAnalysis | KDE_RS_V2.py | KDE_RS_V2.py | py | 9,660 | python | en | code | 0 | github-code | 36 |
19739909449 | from vigilo.vigiconf.lib.confclasses.test import Test
class NTPSync(Test):
"""Check if a host's time is synchronized with the NTP server (using NRPE)"""
def add_test(self):
self.add_external_sup_service("NTP sync", "check_nrpe_1arg!check_ntp_time")
self.add_perfdata_handler("NTP sync", 'NTP-offset', 'offset', 'offset')
self.add_graph("NTP Sync", [ 'NTP-offset' ], 'lines', 's')
# vim:set expandtab tabstop=4 shiftwidth=4:
| vigilo/vigiconf | src/vigilo/vigiconf/tests/all/NTPSync.py | NTPSync.py | py | 462 | python | en | code | 3 | github-code | 36 |
20886565147 | #!/usr/bin/env python
"""
Identifies groups of medium order (512, 1152, 1536, 1920, 2187, 6561, 15625, 16807, 78125, 161051)
by connecting to devmirror.lmfdb.xyz and using the stored hashes there.
Usage:
Either provide an input file with hashes to identify, one per line, each of the form N.i
./identify.py -i INPUT_FILE.txt -o OUTPUT_FILE.txt
or provide the input
or provide the input at the command line, separated by newlines
./identify.py < echo "512.1"
Output is written to the designated output file, or sent to stdout (if no output file given)
"""
import os
import sys
import argparse
from collections import defaultdict
from psycopg2 import connect
from psycopg2.sql import SQL, Identifier
## We avoid using the LMFDB to eliminate the dependency on Sage
#opj, ops, ope = os.path.join, os.path.split, os.path.exists
#root = os.getcwd()
## This repo contains an LMFDB folder, and some OSes (like OS X) are not case sensitive
#while not (ope(opj(root, "lmfdb")) and ope(opj(root, "lmfdb", "start-lmfdb.py"))):
# newroot = ops(root)[0]
# if root == newroot:
# raise ModuleNotFoundError("No path to the LMFDB in the current directory")
# root = newroot
#sys.path.append(opj(root, "lmfdb"))
# Importing db from the LMFDB prints a bunch of garbage, so we disable printing for a bit
#savedstdout = sys.stdout
#savedstderr = sys.stderr
#with open(os.devnull, 'w') as F:
# try:
# sys.stdout = F
# sys.stderr = F
# from lmfdb import db
# finally:
# sys.stdout = savedstdout
# sys.stderr = savedstderr
SMALLHASHED = [512, 1152, 1536, 1920, 2187, 6561, 15625, 16807, 78125, 161051]
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="file containing the hashes to identify, one per line, each of the form N.hsh")
parser.add_argument("-o", "--output", help="file to write the output, lines corresponding to input")
parser.add_argument("hashes", nargs="*", help="input hashes at the command line")
args = parser.parse_args()
if args.hashes:
hashes = args.hashes
elif args.input:
with open(args.input) as F:
hashes = list(F)
else:
hashes = sys.stdin.read().split("\n")
# The following code will need to be updated once gps_groups has hashes and we support identification of larger groups
hashes = [tuple(int(c) for c in hsh.split(".")) for hsh in hashes if hsh.strip()]
hashlookup = defaultdict(list)
## Reduce number of database calls by grouping by order
byord = defaultdict(set)
for N, hsh in hashes:
if N in SMALLHASHED:
byord[N].add(hsh)
for N in list(byord):
byord[N] = sorted(byord[N])
#if len(byord) > 1:
# query = {"$or": [{"order": N, "hash": ({"$in": L} if len(L) > 1 else L[0])} for (N, L) in byord.items()]}
#else:
# N = list(byord)[0]
# L = byord[N]
# query = {"order": N, "hash": ({"$in": L} if len(L) > 1 else L[0])}
#for rec in db.gps_smallhash.search(query, silent=True):
# hashlookup[rec["order"], rec["hash"]].append(f'{rec["order"]}.{rec["counter"]}')
# We set up the connection manually using psycopg2 to remove dependencies on the LMFDB and Sage for code running on google cloud
conn = connect(dbname="lmfdb", user="lmfdb", password="lmfdb", host="devmirror.lmfdb.xyz")
cur = conn.cursor()
it = byord.items()
opt1 = SQL("({0} = %s AND {1} = ANY(%s))").format(Identifier("order"), Identifier("hash"))
opt2 = SQL("({0} = %s AND {1} = %s)").format(Identifier("order"), Identifier("hash"))
query = SQL(" OR ").join(opt1 if len(L) > 1 else opt2 for (N, L) in it)
values = []
for N, L in it:
if len(L) > 1:
values.extend([N, L])
else:
values.extend([N, L[0]])
query = SQL("SELECT {0}, {1}, {2} FROM gps_smallhash WHERE {3}").format(Identifier("order"), Identifier("hash"), Identifier("counter"), query)
cur.execute(query, values)
for vec in cur:
hashlookup[vec[0], vec[1]].append(f'{vec[0]}.{vec[2]}')
out = [hashlookup.get(pair, [f"{pair[0]}.0"]) for pair in hashes]
if args.output:
with open(args.output, "a") as F:
for opts in out:
_ = F.write("|".join(opts) + "\n")
else:
for opts in out:
print("|".join(opts))
| roed314/FiniteGroups | Code/identify.py | identify.py | py | 4,140 | python | en | code | 2 | github-code | 36 |
22283616647 | #!/Users/tnt/Documents/虚拟环境/Django/bin/python3
# -*- encoding: utf-8 -*-
# Time : 2021/05/27 08:04:03
# Theme : 循环链表
class Node():
def __init__(self, data):
self.data = data
self.next = next
class CircularLinkedList():
def __init__(self):
self.head = None
def append(self,data):
#头节点为空尾指针指向自己
if not self.head:
self.head = Node(data)
self.head.next = self.head
else:
new_node = Node(data)
cur = self.head
# 循环当前节点不等于头节点地址就一直往后,等于就相当于找到了尾节点
while cur.next != self.head:
cur = cur.next
# 将尾巴节点指向新加入节点地址,再将新节点地址指向头节点
cur.next =new_node
new_node.next = self.head
def print_list(self):
cur =self.head
while cur:
print(cur.data)
cur = cur.next
if cur == self.head:
break
def prepend(self, data):
new_node = Node(data)
cur = self.head
# 新节点地址指向头节点
new_node.next = cur
# 为空则指向自己
if not self.head:
new_node.next = new_node
else:
# 尾节点指向头地址
while cur.next != self.head:
cur = cur.next
cur.next = new_node
# 链头移到新节点
self.head = new_node
def remove(self, key):
if self.head:
if self.head.data == key:
cur = self.head
while cur.next != self.head:
cur = cur.next
# 如果刚好只有一个元素
if self.head == self.head.next:
self.head = None
else:
# 尾节点指向头节点下一节点地址
cur.next = self.head.next
# 头节点移动到头节点下一节点地址
self.head = self.head.next
else:
cur = self.head
prev = None
while cur.next != self.head:
prev = cur
cur = cur.next
if cur.data == key:
prev.next = cur.next
cur = cur.next
else:
raise IndexError("List is None")
def __len__(self):
cur = self.head
count = 0
while cur:
count += 1
cur = cur.next
if cur == self.head:
break
return count
def split_list(self):
size = len(self)
if size == 0:
return None
if size == 1:
return self.head
mid = size // 2
count = 0
prev = None
cur = self.head
while cur and count < mid:
count += 1
prev = cur
cur = cur.next
prev.next = self.head
split_cllist = CircularLinkedList()
while cur.next != self.head:
split_cllist.append(cur.data)
cur = cur.next
split_cllist.append(cur.data)
self.print_list()
print("\n")
split_cllist.print_list()
def remove_node(self, node):
if slef.head == node:
cur = self.head
while cur.next != self.head:
cur = cur.next
if self.head == self.head.next:
self.head = None
else:
cur.next = self.head.next
self.head = self.head.next
else:
cur = self.head
prev = None
while cur.next != self.head:
prev = cur
cur = cur.next
if cur == node:
prev.next = cur.next
cur = cur.next
def josephus_circle(self, step):
cur = self.head
length = len(self)
while length > 1:
count = 1
while count != step:
cur = cur.next
count += 1
print("KIll" + str(cur.data))
self.remove_node(cur)
cur = cur.next
length -= 1
def is_circular_linked_list(self, input_list):
if input_list.head:
cur = input_list.head
while cur.next:
cur = cur.next
if cur.next == input_list.head:
return True
return False
else:
return False
cllist = CircularLinkedList()
cllist.append(1)
# cllist.append(2)
# # cllist.append(3)
# # cllist.append(4)
from single_link_list import LinkedList
llist = LinkedList()
llist.append(1)
llist.append(2)
llist.append(3)
llist.append(4)
print(cllist.is_circular_linked_list(cllist))
print(cllist.is_circular_linked_list(llist)) | Createitv/BeatyPython | 05-PythonAlgorithm/BasicDataStructure/linkedList/circular_linked_lists.py | circular_linked_lists.py | py | 5,011 | python | en | code | 1 | github-code | 36 |
71107235304 | # O(n^2) Time and O(1) Space best, average and worst.
def swap(x,y,arr):
arr[x], arr[y] = arr[y], arr[x]
def selectionSort(array):
currIdx = 0
while currIdx < len(array)-1:
smallestIdx = currIdx
for x in range(currIdx+1, len(array)):
if arr[smallesIdx] > arr[x]:
smallestIdx = x
swap(currIdx, smallestIdx,arr)
currIdx += 1
return array | BrianAKass/algo-practice | 012 Seection Sort/Selection Sort.py | Selection Sort.py | py | 371 | python | en | code | 1 | github-code | 36 |
21892894147 | from django.urls import path
from accounts import views
app_name='accounts'
urlpatterns=[
path('register',views.register,name='register'),
path('login',views.login,name='login'),
path('logout',views.logout,name='logout'),
path('page1',views.page1,name='page1'),
path('r^create_view/',views.create_view,name='create_view'),
path('<int:pk>/', views.person_update_view, name='person_change'),
path('ajax/load-cities/', views.load_cities, name='ajax_load_cities'),#AJAX
path('msg/',views.msg,name='msg')
] | amalarosebenny/farming | collegeproject/accounts/urls.py | urls.py | py | 533 | python | en | code | 0 | github-code | 36 |
72164631784 | # -*- encoding: utf-8 -*-
# External imports
import requests
import json
import datetime
# ---------------------------------------- Ne pas mettre là
# # Load settings
# with open('settings.json', encoding="utf-8") as f:
# settings = json.load(f)
# # Get the original file
# API_KEY = settings["API_KEY"]
# TOKEN = settings["TOKEN"]
# idList = settings["create_card_default_list_id"]
# idLabels = ["636b89573b1806052382168b", "6371f95494e5ba0140868cdd"]
# name = "Test création Python"
# desc = "Test description"
# ---------------------------------------- Fin du ne pas mettre là
class Trello_API_cards(object):
"""Trello_API_cards
=======
On init take as arguments :
- api {str} : name of the API to call
- "new_card"
- "update_card"
- "new_comment"
- "add_label"
- "remove_label"
- API_KEY {str}
- TOKEN {str}
- [optional] service {str} : name of the service
- data {dict} : all informations needed to use the API
"""
def __init__(self, api , API_KEY, TOKEN, service='Trello_Cards', data={}):
# self.logger = logging.getLogger(service)
self.endpoint = "https://api.trello.com/1/cards"
self.service = service
self.payload = {
'key': API_KEY,
'token': TOKEN
}
self.headers = {
"Accept":"application/json"
}
# Différentes API
if api == "new_card":
self.payload["pos"] = "top"
self.payload["start"] = datetime.datetime.now().isoformat()
self.payload["idList"] = data["idList"]
self.payload["idLabels"] = data["idLabels"]
self.payload["name"] = data["name"]
self.payload["desc"] = data["desc"]
self.HTTPmethod = "POST"
self.url = self.endpoint
elif api == "update_card":
param_list = ["pos", "idList", "idLabels", "name", "desc"]
for param in param_list:
if param in data:
self.payload[param] = data[param]
self.HTTPmethod = "PUT"
self.url = self.endpoint + "/{}".format(data["id"])
elif api == "new_comment":
self.payload["text"] = data["text"]
self.HTTPmethod = "POST"
self.url = self.endpoint + "/{}/actions/comments".format(data["id"])
elif api == "add_label":
self.payload["value"] = data["value"]
self.HTTPmethod = "POST"
self.url = self.endpoint + "/{}/idLabels".format(data["id"])
elif api == "remove_label":
self.HTTPmethod = "DELETE"
self.url = self.endpoint + "/{}/idLabels/{}".format(data["id"], data["idLabel"])
try:
r = requests.request(self.HTTPmethod, self.url, headers=self.headers, params=self.payload)
r.raise_for_status()
except requests.exceptions.HTTPError:
self.status = 'Error'
# self.logger.error("{} :: {} :: HTTP Status: {} || Method: {} || URL: {} || Response: {}".format(query, service, r.status_code, r.request.method, r.url, r.text))
self.error_msg = "Biblionumber inconnu ou service indisponible"
except requests.exceptions.RequestException as generic_error:
self.status = 'Error'
# self.logger.error("{} :: Koha_API_PublicBiblio_Init :: Generic exception || URL: {} || {}".format(bibnb, url, generic_error))
self.error_msg = "Exception générique, voir les logs pour plus de détails"
else:
self.response = r.content.decode('utf-8')
self.data = json.loads(self.response)
self.status = 'Success'
# self.logger.debug("{} :: {} :: Notice trouvée".format(query, service)) | Alban-Peyrat/Trello_API_interface | Trello_API_cards.py | Trello_API_cards.py | py | 3,799 | python | en | code | 0 | github-code | 36 |
70387249063 | from aplication.models import historical_record
from aplication.dto.dto_record import dto_record
import datetime as dt
def register(_record:dto_record):
historical = historical_record()
historical.registration_date = dt.date.today()
historical.registration_time = dt.datetime.now().strftime('%H:%M:%S')
historical.turn = _record.turn
historical.active = True
historical.save()
| GustavoRosario/pass | pj/aplication/controles/record.py | record.py | py | 414 | python | en | code | 0 | github-code | 36 |
22377866084 | from django import forms
from django.db import transaction
from .models import CustomUser
from django.contrib.auth.forms import UserCreationForm,UserChangeForm
class CustomerSignUpForm(UserCreationForm):
class Meta:
model=CustomUser
fields = ('username', 'name', 'email', 'number', 'address')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_customer = True
user.save()
return user
class StartupSignUpForm(UserCreationForm):
class Meta:
model=CustomUser
fields = ('username', 'name', 'email', 'number', 'address','dipp','description')
@transaction.atomic
def save(self):
user = super().save(commit=False)
user.is_startup = True
user.save()
return user | aditrisinha/Aagman | accounts/forms.py | forms.py | py | 807 | python | en | code | 0 | github-code | 36 |
37402801837 | from regression_tests import *
class TestDetection_QB64(Test):
settings = TestSettings(
tool='fileinfo',
input=files_in_dir('inputs'),
args='--json'
)
def test_detected_autoit(self):
qb64_recognized = False
self.assertTrue(self.fileinfo.succeeded)
for tool in self.fileinfo.output["tools"]:
if tool['type'] == 'compiler' and tool['name'] == 'QB64':
qb64_recognized = True
self.assertTrue(qb64_recognized)
| avast/retdec-regression-tests | tools/fileinfo/detection/compilers/qb64/test.py | test.py | py | 504 | python | en | code | 11 | github-code | 36 |
36808295769 | import copy
from typing import Tuple, Union
from numbers import Number
import torchio as tio
from torchio.transforms.augmentation import RandomTransform
import torch
import numpy as np
class ReconstructMeanDWI(RandomTransform):
def __init__(
self,
full_dwi_image_name: str = "full_dwi",
mean_dwi_image_name: str = "mean_dwi",
bvec_name: str = "grad",
num_dwis: Union[int, Tuple[int, int]] = 15,
num_directions: Union[int, Tuple[int, int]] = 1,
directionality: Union[Number, Tuple[Number, Number]] = 4,
bval_range: Tuple[Number, Number] = (1e-5, 501.0),
**kwargs
):
super().__init__(**kwargs)
self.full_dwi_image_name = full_dwi_image_name
self.mean_dwi_image_name = mean_dwi_image_name
self.bvec_name = bvec_name
self.num_dwis = num_dwis
self.num_directions = num_directions
self.directionality = directionality
self.bval_range = bval_range
self.args_names = ("full_dwi_image_name", "mean_dwi_image_name", "bvec_name", "num_dwis", "num_directions",
"directionality", "bval_range")
def apply_transform(self, subject: tio.Subject) -> tio.Subject:
if self.full_dwi_image_name not in subject:
return subject
full_dwi_image = subject[self.full_dwi_image_name]
full_dwi = full_dwi_image.data.numpy()
grad = full_dwi_image[self.bvec_name].numpy()
bvals = grad[:, 3]
bvecs = grad[:, :3]
mask = (bvals > self.bval_range[0]) & (bvals < self.bval_range[1])
bvecs = bvecs[mask]
full_dwi = full_dwi[mask]
num_dwis = self.get_num_dwis()
num_directions = self.get_num_directions()
directionality = self.get_directionality()
random_directions = np.random.randn(3, num_directions)
random_directions = random_directions / np.linalg.norm(random_directions, axis=0, keepdims=True)
sample_probabilities = np.max(np.abs(bvecs @ random_directions) ** directionality, axis=1)
sample_probabilities = sample_probabilities / sample_probabilities.sum()
indices = np.arange(full_dwi.shape[0])
indices = np.random.choice(indices, size=num_dwis, p=sample_probabilities)
mean_dwi = np.mean(full_dwi[indices], axis=0, keepdims=True)
if self.mean_dwi_image_name in subject:
mean_dwi_image = subject[self.mean_dwi_image_name]
else:
mean_dwi_image = copy.deepcopy(full_dwi_image)
subject.add_image(mean_dwi_image, self.mean_dwi_image_name)
mean_dwi_image.set_data(mean_dwi)
return subject
def get_num_dwis(self):
if isinstance(self.num_dwis, int):
return self.num_dwis
elif isinstance(self.num_dwis, Tuple):
low, high = self.num_dwis
sample = np.random.rand()
sample = sample ** 2
sample = sample * (high - low + 1) + low
sample = int(sample)
return sample
else:
raise ValueError(f"Unexpected type {type(self.num_dwis)} for num_dwis")
def get_num_directions(self):
if isinstance(self.num_directions, int):
return self.num_dwis
elif isinstance(self.num_directions, Tuple):
return np.random.randint(self.num_directions[0], self.num_directions[1] + 1)
else:
raise ValueError(f"Unexpected type {type(self.num_directions)} for num_directions.")
def get_directionality(self):
if isinstance(self.directionality, Number):
return self.directionality
elif isinstance(self.directionality, Tuple):
return np.random.uniform(self.directionality[0], self.directionality[1])
else:
raise ValueError(f"Unexpected type {type(self.directionality)} for directionality")
def is_invertible(self):
return False
class ReconstructMeanDWIClassic(RandomTransform):
"""Reconstructs Mean Diffusion Weighted Images. `subset_size` gradients are first selected based
on their distance to a randomly chosen gradient direction. A random number of images in this subset
are averaged.
Args:
bvec_name: Key for the bvec Tensor in the image dictionary
subset_size: Upper bound of the uniform random variable of images to average
"""
def __init__(
self,
full_dwi_image_name: str = "full_dwi",
mean_dwi_image_name: str = "mean_dwi",
bvec_name: str = "grad",
subset_size: int = 15,
bval_range: Tuple[float, float] = (1e-5, 501.0),
**kwargs
):
super().__init__(**kwargs)
self.full_dwi_image_name = full_dwi_image_name
self.mean_dwi_image_name = mean_dwi_image_name
self.bvec_name = bvec_name
self.subset_size = subset_size
self.bval_range = bval_range
self.args_names = ("full_dwi_image_name", "mean_dwi_image_name", "bvec_name", "subset_size", "bval_range")
def apply_transform(self, subject: tio.Subject) -> tio.Subject:
if self.full_dwi_image_name not in subject:
return subject
full_dwi_image = subject[self.full_dwi_image_name]
full_dwi = full_dwi_image.data
grad = full_dwi_image[self.bvec_name]
bvals = grad[:, 3]
bvecs = grad[:, :3]
mask = (bvals > self.bval_range[0]) & (bvals < self.bval_range[1])
bvecs = bvecs[mask]
full_dwi = full_dwi[mask]
rand_bvec = bvecs[np.random.randint(bvecs.shape[0])]
dist = torch.sum((bvecs - rand_bvec) ** 2, dim=1)
closest_indices = np.argsort(dist)[: self.subset_size]
number_of_selections = np.random.randint(low=1, high=self.subset_size)
ids = torch.randperm(closest_indices.shape[0])[:number_of_selections]
selected_indices = closest_indices[ids]
mean_dwi = torch.mean(full_dwi[selected_indices], dim=0)
if self.mean_dwi_image_name in subject:
mean_dwi_image = subject[self.mean_dwi_image_name]
else:
mean_dwi_image = copy.deepcopy(full_dwi_image)
subject.add_image(mean_dwi_image, self.mean_dwi_image_name)
mean_dwi_image.set_data(mean_dwi.unsqueeze(0))
return subject
def is_invertible(self):
return False
| efirdc/Segmentation-Pipeline | segmentation_pipeline/transforms/reconstruct_mean_dwi.py | reconstruct_mean_dwi.py | py | 6,434 | python | en | code | 1 | github-code | 36 |
13747681752 | import re
def grab_ip(file):
ips = []
occurence = {}
with open("/Users/rajekum/Documents/git/file.txt") as file:
for ip in file:
ip_data=re.findall(r'(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})\.(?:[\d]{1,3})',ip)
for i in ip_data:
ips.append(i)
for ipaddr in ips:
if ipaddr in occurence:
occurence[ipaddr] = occurence[ipaddr] + 1
else:
occurence[ipaddr] = 1
final = sorted(occurence.items(), key =
lambda kv:(kv[1], kv[0]))
for x in reversed(final):
print(x)
print(grab_ip('data'))
| rajekum/Test | bluestackdemo.py | bluestackdemo.py | py | 649 | python | en | code | 0 | github-code | 36 |
26307003757 | # from logger import get_logger
from time import time
import re
class Flow:
def __init__(self, logger, flow):
self._name = None
self.LOGGER = logger
self._flow_config = flow
self._last_run_timestamp = None
self._name = str(flow['name'])
self._params = tuple(flow['params'])
self._run_interval = float(flow['run_interval'])
self._mysql_type = str(flow['mysql_type'])
self._mysql_table = str(flow['mysql_table'])
# Custom method vars
self.lan_traffic_usage_first_run = True
self.interface_usage_list = ['ether1-gateway', 'ether2-master-local']
# Check if method for flow exists
if not self.__method_exists(self._name):
raise Exception('Flow [{}] method not found in flow class'.format(self._name))
def __method_exists(self, methodName):
return hasattr(self.__class__, methodName) and callable(getattr(self.__class__, methodName))
def __run_method(self, methodName, methodArgs=None):
return getattr(self, methodName, None)(methodArgs)
def get_flow_name(self):
return self._name
def get_flow_mysql_type(self):
return self._mysql_type
def get_flow_mysql_table(self):
return self._mysql_table
def get_params(self):
return self._params
# Called each thread loop pass, check if its time to execute method
# Thread passes active api client object and it is passed to every method
def loop_pass(self, client):
if not self._last_run_timestamp or time() - self._last_run_timestamp > self._run_interval:
self.LOGGER.debug("Running method {}".format(self._name))
# Run flow custom method
method_result = self.__run_method(self._name, client)
self._last_run_timestamp = time()
self.LOGGER.debug("Method finished successfully")
if method_result:
return {'name': self._name,
'payload': method_result}
# Custom flow methods. Called exactly as flow name
# Method must return tuple with all results in order defined in flow config
# @RouterOSApiClient
def dhcp_server_leases(self, client):
lease_resource = client.get_resource('/ip/dhcp-server/lease/')
lease_list = lease_resource.get()
self.LOGGER.debug("Retrieved {} leases".format(len(lease_list)))
result = []
for lease in lease_list:
comment = lease.get('comment') or ''
try:
tmp = comment.split(';;')
name = tmp[0]
color = tmp[1]
if not color.startswith('#'):
color = '#{}'.format(color)
except:
name = comment
color = '#44dddd'
result.append(
(
lease.get('mac-address'),
lease.get('address'),
lease.get('host-name') or 'unknown',
name,
color,
1 if lease.get('status') == 'bound' else 0
)
)
return result
# @RouterOSApiClient
def lan_traffic_usage(self, client):
self.LOGGER.debug("Retrieving lan trafic data")
traffic_resource = client.get_resource('/ip/accounting/snapshot/')
traffic_resource.call('take')
traffic_list = traffic_resource.get()
self.LOGGER.debug("Got {} lan traffic rows".format(len(traffic_list)))
res = []
# Determine traffic type
# Use lan range regex for checking which host is in LAN
# Ex '192\.168\.\d{1,3}\.\d{1,3}' for 192.168.0.0/16
lan_regex = re.compile('192\.168\.\d{1,3}\.\d{1,3}')
for traffic in traffic_list:
source_ip = str(traffic.get('src-address')).strip()
destination_ip = str(traffic.get('dst-address')).strip()
bandwidth_count = str(traffic.get('bytes')).strip()
packet_count = str(traffic.get('packets')).strip()
if lan_regex.match(source_ip) and lan_regex.match(destination_ip):
traffic_type = 'local'
local_ip = source_ip
elif lan_regex.match(source_ip) and not lan_regex.match(destination_ip):
traffic_type = 'upload'
local_ip = source_ip
elif not lan_regex.match(source_ip) and lan_regex.match(destination_ip):
traffic_type = 'download'
local_ip = destination_ip
else:
traffic_type = 'wan'
local_ip = ''
res.append(
(
self._run_interval,
traffic_type,
source_ip,
destination_ip,
local_ip,
bandwidth_count,
packet_count
)
)
# If its a first run don't return anything
if self.lan_traffic_usage_first_run:
self.lan_traffic_usage_first_run = False
else:
return res
# @RouterOSApiClient
def interface_usage(self, client):
self.LOGGER.debug("Retrieving interface traffic data")
resource = client.get_resource('/interface')
interface_list = ",".join(self.interface_usage_list)
interface_traffic_results = resource.call('monitor-traffic',
arguments={'interface': interface_list, 'once': ''})
self.LOGGER.debug("Interface traffic data results >> {}".format(interface_traffic_results))
res = []
for interface_traffic in interface_traffic_results:
res.append(
(
interface_traffic['name'],
interface_traffic['rx-bits-per-second'],
interface_traffic['tx-bits-per-second'],
interface_traffic['rx-packets-per-second'],
interface_traffic['tx-packets-per-second'],
interface_traffic['rx-drops-per-second'],
interface_traffic['tx-drops-per-second'],
interface_traffic['rx-errors-per-second'],
interface_traffic['tx-errors-per-second']
)
)
return res
| ivanpavlina/DataScrapper | lib/flow.py | flow.py | py | 6,396 | python | en | code | 0 | github-code | 36 |
2026891662 | import argparse
import torch
from torch.autograd import Variable
from network_prep import create_loaders, prep_model, create_classifier
def get_input_args():
parser = argparse.ArgumentParser(description='Get NN arguments')
parser.add_argument('data_dir', type=str, help='mandatory data directory')
parser.add_argument('--save_dir', default='', help='Directory to save checkpoint.')
parser.add_argument('--arch', default='vgg', help='default architecture, options: vgg, densenet, resnet')
parser.add_argument('--learning_rate', default=0.001, type=float, help='default learning rate')
parser.add_argument('--hidden_units', default='512', type=str, help='default hidden layer sizes')
parser.add_argument('--output_size', default=102, type=int, help='default hidden output_size')
parser.add_argument('--epochs', default=3, type=int, help='default training epochs')
parser.add_argument('--gpu', default=False, action='store_true', help='use GPU processing')
return parser.parse_args()
def train_classifier(model, trainloader, validloader, criterion, optimizer, epochs, gpu):
steps = 0
print_every = 40
run_loss = 0
if gpu and torch.cuda.is_available():
print('GPU TRAINING')
model.cuda()
elif gpu and torch.cuda.is_available() == False:
print('GPU processing selected but no NVIDIA drivers found... Training under cpu')
else:
print('CPU TRAINING')
for e in range(epochs):
model.train()
for images, labels in iter(trainloader):
steps += 1
images, labels = Variable(images), Variable(labels)
if gpu and torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
optimizer.zero_grad()
out = model.forward(images)
loss = criterion(out, labels)
loss.backward()
optimizer.step()
run_loss += loss.data.item()
if steps % print_every == 0:
model.eval()
acc = 0
valid_loss = 0
for images, labels in iter(validloader):
images, labels = Variable(images), Variable(labels)
if gpu and torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
with torch.no_grad():
out = model.forward(images)
valid_loss += criterion(out, labels).data.item()
ps = torch.exp(out).data
equality = (labels.data == ps.max(1)[1])
acc += equality.type_as(torch.FloatTensor()).mean()
print("Epoch: {}/{}.. ".format(e+1, epochs),
"Training Loss: {:.3f}..".format(run_loss/print_every),
"Valid Loss: {:.3f}..".format(valid_loss/len(validloader)),
"Valid Accuracy: {:.3f}".format(acc/len(validloader)))
run_loss = 0
model.train()
print('{} EPOCHS COMPLETE. MODEL TRAINED.'.format(epochs))
return model
def test_classifier(model, testloader, criterion, gpu):
if gpu and torch.cuda.is_available():
print('GPU TESTING')
model.cuda()
elif gpu and torch.cuda.is_available() == False:
print('CPU processing selected but no NVIDIA drivers found... testing under cpu')
else:
print('CPU TESTING')
model.eval()
acc = 0
test_loss = 0
for images, labels in iter(testloader):
images, labels = Variable(images), Variable(labels)
if gpu and torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda()
with torch.no_grad():
out = model.forward(images)
test_loss += criterion(out, labels).data.item()
ps = torch.exp(out).data
equality = (labels.data == ps.max(1)[1])
acc += equality.type_as(torch.FloatTensor()).mean()
print("Test Loss: {:.3f}..".format(test_loss/len(testloader)),
"Test Accuracy: {:.3f}".format(acc/len(testloader)))
pass
def save_model_checkpoint(model, input_size, epochs, save_dir, arch, learning_rate, class_idx, optimizer, output_size):
saved_model = {
'input_size':input_size,
'epochs':epochs,
'arch':arch,
'hidden_units':[each.out_features for each in model.classifier if hasattr(each, 'out_features') == True],
'output_size':output_size,
'learning_rate':learning_rate,
'class_to_idx':class_idx,
'optimizer_dict':optimizer.state_dict(),
'classifier':model.classifier,
'state_dict':model.state_dict()
}
if len(save_dir) == 0:
save_path = save_dir + 'checkpoint.pth'
else:
save_path = save_dir + '/checkpoint.pth'
torch.save(saved_model, save_path)
print('Model saved at {}'.format(save_path))
pass
def main():
in_args = get_input_args()
trainloader, testloader, validloader, class_idx = create_loaders(in_args.data_dir)
model, input_size = prep_model(in_args.arch)
model, criterion, optimizer = create_classifier(model, input_size, in_args.hidden_units, in_args.output_size, in_args.learning_rate)
trained_model = train_classifier(model, trainloader, validloader, criterion, optimizer, in_args.epochs, in_args.gpu)
test_classifier(trained_model, testloader, criterion, in_args.gpu)
save_model_checkpoint(trained_model, input_size, in_args.epochs, in_args.save_dir, in_args.arch, in_args.learning_rate, class_idx, optimizer, in_args.output_size)
pass
if __name__ == '__main__':
main()
| hikaruendo/udacity | ai programming with python1/train.py | train.py | py | 6,086 | python | en | code | 0 | github-code | 36 |
28516429877 | bins = [2, 3, 6, 10, 20, 50, 100]
bins_str = [str(i) for i in bins]
bin_pre = None
bin_var = 'establishment.employment_lag1'
lower_bound = ['(%s >= %s)' % (bin_var, bin) for bin in bins]
upper_bound = ['(%s < %s)' % (bin_var, bin) for bin in bins[1:]] + ['']
vars = []
for bin, l, u in zip(bins_str, lower_bound, upper_bound):
w = 'w%s=%s*%s' % (bin, l, u)
vars.append( w.strip('*') )
wslope = 'w%sslope=paris.establishment.w%s*(%s - %s)' % (bin, bin, bin_var, bin)
vars.append( wslope )
aliases = vars + [
"dept_id = establishment.disaggregate(building.dept)",
"insee = establishment.disaggregate(zone.insee, intermediates=[building])",
"LaDef = numpy.setmember1d(establishment.insee, (92050, 92026, 92062))",
"CVilNouvel = numpy.setmember1d(establishment.insee, (92050, 92026, 92062))",
"rmse_ln_emp_ratio = numpy.sqrt(establishment.disaggregate(sector.aggregate(establishment._init_error_ln_emp_ratio**2, function=mean)))",
"emp_250 = (establishment.employment < 250).astype('i')"
]
| psrc/urbansim | paris/establishment/aliases.py | aliases.py | py | 1,062 | python | en | code | 4 | github-code | 36 |
39056592319 | from numpy import array,zeros
from matplotlib import pyplot as plt
num='0016'
path='/Users/dmelgar/Slip_inv/Amatrice_3Dfitsgeol_final1/output/inverse_models/models/_previous/'
root1='bigkahuna_vrtest3win_vr'
root2='.'+num+'.log'
vr=array([1.6,1.8,2.0,2.2,2.4,2.6])
vr_static=zeros(len(vr))
vr_insar=zeros(len(vr))
vr_velocity=zeros(len(vr))
for k in range(len(vr)):
f=open(path+root1+str(vr[k])+root2,'r')
while True:
line=f.readline()
if 'VR static' in line:
vr_static[k]=float(line.split('=')[-1])
elif 'VR velocity' in line:
vr_velocity[k]=float(line.split('=')[-1])
elif 'VR InSAR' in line:
vr_insar[k]=float(line.split('=')[-1])
break
f.close()
plt.figure()
plt.plot(vr,vr_static+19)
plt.plot(vr,vr_velocity+44)
plt.plot(vr,vr_insar+14)
plt.legend(['GPS','SM','InSAR'],loc=3)
plt.xlabel('vr (km/s)')
plt.ylabel('VR (%)')
plt.show() | Ogweno/mylife | amatrice/plot_vr_test.py | plot_vr_test.py | py | 935 | python | en | code | 0 | github-code | 36 |
4489955191 | """
Name : test_addmember.py
Author : Tiffany
Time : 2022/8/1 19:02
DESC:
"""
import time
import yaml
from faker import Faker
from selenium import webdriver
from selenium.common import StaleElementReferenceException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
class TestAddMemberFromeContact:
def setup_class(self):
fake = Faker("zh_CN")
self.username = fake.name()
self.acctid = fake.ssn()
self.mobile = fake.phone_number()
# 实例化
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(5)
self.driver.maximize_window()
# 一.登录
# 1.访问企业微信登录页面
self.driver.get("https://work.weixin.qq.com/wework_admin/loginpage_wx?from=myhome")
# 2.获取本地的cookie记录
cookie = yaml.safe_load(open("../data/cookies.yaml"))
# 3.植入cookie
for c in cookie:
self.driver.add_cookie(c)
time.sleep(3)
# 4.重新访问企业微信首页
self.driver.get("https://work.weixin.qq.com/wework_admin/loginpage_wx?from=myhome")
def teardown_class(self):
pass
def test_addmember(self):
"""通讯录页面:添加成员"""
# 点击通讯录按钮
self.driver.find_element(By.ID, "menu_contacts").click()
# 点击添加成员按钮
time.sleep(5)
attempts = 0
while attempts < 3:
try:
self.driver.find_element\
(By.XPATH, '//*[@id="js_contacts82"]/div/div[2]/div/div[2]/div[3]/div[1]/a[1]').click()
time.sleep(5)
self.driver.find_element(By.ID, "username").send_keys(self.username)
self.driver.find_element(By.ID, "memberAdd_acctid").send_keys(self.acctid)
self.driver.find_element(By.ID, "memberAdd_phone").send_keys(self.mobile)
self.driver.find_elements(By.CLASS_NAME, "js_btn_save")[0].click()
break
except StaleElementReferenceException:
attempts += 1
# 输入姓名、账号、手机
# 点击保存按钮
# 4.断言结果
loc_tips = (By.ID, "js_tips")
WebDriverWait(self.driver, 10, 2).until(expected_conditions.visibility_of_element_located(loc_tips))
tips_value = self.driver.find_element(*loc_tips).text
assert tips_value == "保存成功"
def test_dept_contact(self):
"""通讯录页面:添加部门"""
# 点击通讯录菜单
self.driver.find_element(By.ID, "menu_contacts").click()
# 点击加号
self.driver.find_element(By.XPATH, "//i[@class='member_colLeft_top_addBtn']").click()
# 点击添加部门
self.driver.find_element(By.XPATH, "//a[text()='添加部门']").click()
# 填写部门名称
self.driver.find_element(By.XPATH, "//input[@name='name']").send_keys(self.username)
# 选择所属部门
self.driver.find_element(By.XPATH, "//span[@class='js_parent_party_name']").click()
self.driver.find_element(By.XPATH, "//div[@class='inputDlg_item']//a[text()='加加加']").click()
# 点击确定按钮
self.driver.find_element(By.XPATH, "//a[text()='确定']").click()
# 断言结果
loc_tips = (By.ID, "js_tips")
WebDriverWait(self.driver, 10, 2).until(expected_conditions.visibility_of_element_located(loc_tips))
tips_value = self.driver.find_element(*loc_tips).text
assert tips_value == "新建部门成功"
pass
| TiffanyWang1108/web_camp | prepare/test_case/test_addmember.py | test_addmember.py | py | 3,698 | python | en | code | 0 | github-code | 36 |
73818234985 | import requests
from bs4 import BeautifulSoup
from database import DataBase
from log import log
from scrape import Scrape
class Flipkart(Scrape):
def formatData(self, soupText):
"""
This function extracts specific information from the `soupText` object and returns it in a formatted manner.
Args:
soupText (bs4.BeautifulSoup): An object of the BeautifulSoup class.
Returns:
tuple: A tuple containing the following information in the following order:
- phone (str): The phone name, extracted from the `soupText` object.
- price (str): The price of the phone, extracted from the `soupText` object.
- ramD (str): The amount of RAM in the phone, extracted from the `soupText` object.
"""
phone = soupText.find("div", class_="_4rR01T")
price = soupText.find("div", class_="_30jeq3 _1_WHN1")
ram = soupText.find_all("li", class_="rgWa7D")
ramD = 0
# formatting the phone and price variable and extracting the Ram value
if price is not None:
price = price.text
price = price.replace(",", "")
price = price.replace("₹", "")
if price is None:
price = 0
if phone is not None:
phone = phone.text
# formatting the Ram value
for oneRam in ram:
if "RAM" in oneRam.text:
ramList = oneRam.text.split("|")
for one in ramList:
if "RAM" in one:
ramD = one
ramD = ramD.replace("GB", "")
ramD = ramD.replace("RAM", "")
ramD = ramD.replace(" ", "")
ramD = ramD.replace("MB", "")
return phone, price, ramD
def scrape(self, hostname):
"""
This function scrapes information about smartphones from the Amazon.in website and stores the information
in a collection.
Args:
self: The instance of the class that the function is being called on.This argument provides access to
the attributes and methods of the class,
hostname: The Database host name
Returns:
"""
self.item = DataBase(hostname).getIndex()
while self.soup.find('a', class_='_1LKTO3'):
log.info("Scrapping flipkart.com website, page no. :" + str(self.page))
url = self.url2 + str(self.page)
req = requests.get(url, headers=self.headers)
self.soup = BeautifulSoup(req.content, 'html.parser')
box = self.soup.find_all("div", class_="_2kHMtA")
for onePhone in box:
data = self.formatData(onePhone)
if data not in self.listPhone:
self.item += 1
self.listPhone.append(data)
info = {
"_id": self.item,
"name": data[0],
"price": float(data[1]),
"ram": int(data[2])
}
self.phoneinfo.append(info)
self.page += 1
log.info("Scrapping Completed for flipkart.com")
| ujitkumar1/ramranger | src/flipkart_scrape.py | flipkart_scrape.py | py | 3,338 | python | en | code | 0 | github-code | 36 |
35051050576 | import torch
import torchsl
from torchsl._extensions import _has_ops
from ._helpers import *
__all__ = ['lpp']
# ===========================================================
# Locality Preserving Projection
# ===========================================================
# noinspection DuplicatedCode
def lpp(X):
# if _has_ops():
# return torchsl.ops.lpp(X, y, y_unique)
options = dict(dtype=X.dtype, device=X.device)
num_samples = X.size(0)
A = torchsl.rbf_kernel(X)
W = A
D = W.sum(-1).diag()
Sw = X.t().mm(D - W).mm(X)
Sb = X.t().mm(D).mm(X)
return Sw, Sb
| inspiros/pcmvda | torchsl/ops/subspace_learning/lpp.py | lpp.py | py | 608 | python | en | code | 1 | github-code | 36 |
34773386089 | from flask import Flask, render_template
from bs4 import BeautifulSoup
import requests, json
def scrapCars():
source = requests.get('https://www.izmostock.com/car-stock-photos-by-brand').text
soup = BeautifulSoup(source, 'lxml')
my_table = soup.find('div', {'id': 'page-content'})
links = my_table.findAll('span')
cars = []
for link in links:
cars.append(link.text)
with open ('data.json', 'w', encoding='utf-8') as f:
json.dump(cars, f, ensure_ascii=False, indent=4)
| tech387-academy-python/PythonAppDemo | webscraper.py | webscraper.py | py | 537 | python | en | code | 0 | github-code | 36 |
72284442024 | import kth_native as nat
import sys
import time
import asyncio
import kth
# def fetch_last_height_async(chain):
# loop = asyncio.get_event_loop()
# fut = loop.create_future()
# nat.chain_fetch_last_height(chain, lambda err, h: fut.set_result((err, h)))
# return fut
def generic_async_1(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a: fut.set_result((a)))
return fut
def generic_async_2(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a, b: fut.set_result((a, b)))
return fut
def generic_async_3(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a, b, c: fut.set_result((a, b, c)))
return fut
def generic_async_4(func, *args):
loop = asyncio.get_event_loop()
fut = loop.create_future()
func(*args, lambda a, b, c, d: fut.set_result((a, b, c, d)))
return fut
# async def generic_async_3(func, *args):
# future = asyncio.Future()
# loop = asyncio.get_event_loop()
# def callback(args):
# loop.call_soon_threadsafe(future.set_result, args)
# func(*args, callback)
# callback_args = await future
# return callback_args
##
# Represents the Bitcoin blockchain.
class Chain:
def __init__(self, executor, chain):
##
# @private
self._executor = executor
self._chain = chain
# Gets the height of the highest block in the local copy of the blockchain.
# This number will grow as the node synchronizes with the blockchain.
# This is an asynchronous method; a callback must be provided to receive the result
async def getLastHeight(self):
# ret = await fetch_last_height_async(self._chain)
ret = await generic_async_2(nat.chain_fetch_last_height, self._chain)
return ret
# Given a block hash, it queries the chain for the block height.
async def getBlockHeight(self, hash):
# nat.chain_fetch_block_height(self._chain, hash, handler)
ret = await generic_async_2(nat.chain_fetch_block_height, self._chain, hash)
return ret
# Get the block header from the specified height in the chain.
async def getBlockHeaderByHeight(self, height):
# nat.chain_fetch_block_header_by_height(self._chain, height, self._fetch_block_header_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_header_by_height, self._chain, height)
if err != 0:
return (err, None, height)
return (err, kth.chain.Header.fromNative(obj), height)
# Get the block header from the specified block hash.
async def getBlockHeaderByHash(self, hash):
# nat.chain_fetch_block_header_by_hash(self._chain, hash, self._fetch_block_header_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_header_by_hash, self._chain, hash)
if err != 0:
return (err, None, height)
return (err, kth.chain.Header.fromNative(obj), height)
# Gets a block from the specified height in the chain.
async def getBlockByHeight(self, height):
# nat.chain_fetch_block_by_height(self._chain, height, self._fetch_block_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_by_height, self._chain, height)
if err != 0:
return (err, None, height)
return (err, kth.chain.Block.fromNative(obj), height)
# Gets a block from the specified hash.
async def getBlockByHash(self, hash):
# nat.chain_fetch_block_by_hash(self._chain, hash, self._fetch_block_converter)
(err, obj, height) = await generic_async_3(nat.chain_fetch_block_by_hash, self._chain, hash)
if err != 0:
return (err, None, height)
return (err, kth.chain.Block.fromNative(obj), height)
# Get a transaction by its hash.
async def getTransaction(self, hash, require_confirmed):
# nat.chain_fetch_transaction(self._chain, hash, require_confirmed, self._fetch_transaction_converter)
(err, obj, index, height) = await generic_async_4(nat.chain_fetch_transaction, self._chain, hash, require_confirmed)
if err != 0:
return (err, None, index, height)
return (err, kth.chain.Transaction.fromNative(obj), index, height)
# Given a transaction hash, it fetches the height and position inside the block.
async def getTransactionPosition(self, hash, require_confirmed):
# nat.chain_fetch_transaction_position(self._chain, hash, require_confirmed, handler)
ret = await generic_async_3(nat.chain_fetch_transaction_position, self._chain, hash, require_confirmed)
return ret
##
# Given a block height in the chain, it retrieves the block's associated Merkle block.
# Args:
# height (unsigned int): Block height in the chain.
# handler (Callable (error, merkle_block, block_height)): Will be executed when the chain is queried.
# * error (int): Error code. 0 if successful.
# * merkle_block (MerkleBlock): The requested block's Merkle block.
# * block_height (unsigned int): The block's height in the chain.
def fetch_merkle_block_by_height(self, height, handler):
self._fetch_merkle_block_handler = handler
nat.chain_fetch_merkle_block_by_height(self._chain, height, self._fetch_merkle_block_converter)
##
# Given a block hash, it retrieves the block's associated Merkle block.
# Args:
# hash (bytearray): 32 bytes of the block hash.
# handler (Callable (error, merkle_block, block_height)): Will be executed when the chain is queried.
# * error (int): Error code. 0 if successful.
# * merkle_block (MerkleBlock): The requested block's Merkle block.
# * block_height (unsigned int): The block's height in the chain.
def fetch_merkle_block_by_hash(self, hash, handler):
self._fetch_merkle_block_handler = handler
nat.chain_fetch_merkle_block_by_hash(self._chain, hash, self._fetch_merkle_block_converter)
# ----------------------------------------------------------------------------
# Note: removed on 3.3.0
# def _fetch_output_converter(self, e, output):
# if e == 0:
# _output = Output(output)
# else:
# _output = None
# self._fetch_output_handler(e, _output)
# ##
# # Get a transaction output by its transaction hash and index inside the transaction.
# # Args:
# # hash (bytearray): 32 bytes of the transaction hash.
# # index (unsigned int): Output index inside the transaction (starting at zero).
# # require_confirmed (int): 1 if and only if transaction should be in a block, 0 otherwise.
# # handler (Callable (error, output)): Will be executed when the chain is queried.
# # * error (int): Error code. 0 if successful.
# # * output (Output): Output found.
# def fetch_output(self, hash, index, require_confirmed, handler):
# self._fetch_output_handler = handler
# nat.chain_fetch_output(self._chain, hash, index, require_confirmed, self._fetch_output_converter)
# ----------------------------------------------------------------------------
async def organizeBlock(self, block):
# void chain_organize_handler(kth_chain_t chain, void* ctx, kth_error_code_t error) {
ret = await generic_async_1(nat.chain_organize_block, self._chain, block.toNative())
return ret
# nat.chain_organize_block(self._chain, block, handler)
async def organizeTransaction(self, transaction):
# nat.chain_organize_transaction(self._chain, transaction, handler)
ret = await generic_async_1(nat.chain_organize_transaction, self._chain, transaction.toNative())
return ret
##
# Determine if a transaction is valid for submission to the blockchain.
# Args:
# transaction (Transaction): transaction to be checked.
# handler (Callable (error, message)): Will be executed after the chain is queried.
# * error (int): error code. 0 if successful.
# * message (str): string describing the result of the query. Example: 'The transaction is valid'
def validate_tx(self, transaction, handler):
nat.chain_validate_tx(self._chain, transaction, handler)
def _fetch_compact_block_converter(self, e, compact_block, height):
if e == 0:
_compact_block = _CompactBlock(compact_block)
else:
_compact_block = None
self._fetch_compact_block_handler(e, _compact_block, height)
def _fetch_compact_block_by_height(self, height, handler):
self._fetch_compact_block_handler = handler
nat.chain_fetch_compact_block_by_height(self._chain, height, self._fetch_compact_block_converter)
def _fetch_compact_block_by_hash(self, hash, handler):
self._fetch_compact_block_handler = handler
nat.chain_fetch_compact_block_by_hash(self._chain, hash, self._fetch_compact_block_converter)
def _fetch_spend_converter(self, e, point):
if e == 0:
_spend = Point(point)
else:
_spend = None
self._fetch_spend_handler(e, _spend)
##
# Fetch the transaction input which spends the indicated output. The `fetch_spend_handler`
# callback will be executed after querying the chain.
# Args:
# output_point (OutputPoint): tx hash and index pair.
# handler (Callable (error, input_point)): Will be executed when the chain is queried.
# * error (int): Error code. 0 if successful.
# * input_point (Point): Tx hash and index pair where the output was spent.
def fetch_spend(self, output_point, handler):
self._fetch_spend_handler = handler
nat.chain_fetch_spend(self._chain, output_point._ptr, self._fetch_spend_converter)
def _subscribe_blockchain_converter(self, e, fork_height, blocks_incoming, blocks_replaced):
if self._executor.stopped or e == 1:
return False
if e == 0:
_incoming = BlockList(blocks_incoming) if blocks_incoming else None
_replaced = BlockList(blocks_replaced) if blocks_replaced else None
else:
_incoming = None
_replaced = None
return self._subscribe_blockchain_handler(e, fork_height, _incoming, _replaced)
def subscribe_blockchain(self, handler):
self._subscribe_blockchain_handler = handler
nat.chain_subscribe_blockchain(self._executor._executor, self._chain, self._subscribe_blockchain_converter)
def _subscribe_transaction_converter(self, e, tx):
if self._executor.stopped or e == 1:
return False
if e == 0:
_tx = Transacion(tx) if tx else None
else:
_tx = None
self._subscribe_transaction_handler(e, _tx)
def _subscribe_transaction(self, handler):
self._subscribe_transaction_handler = handler
nat.chain_subscribe_transaction(self._executor._executor, self._chain, self._subscribe_transaction_converter)
def unsubscribe(self):
nat.chain_unsubscribe(self._chain)
##
# @var history_fetch_handler_
# Internal callback which is called by the native fetch_history function and marshalls parameters to the managed callback
##
# @var fetch_block_header_handler_
# Internal callback which is called by the native fetch_block_header function and marshalls parameters to the managed callback
# ----------------------------------------------------------------------
# TODO(fernando): implement the following
# ----------------------------------------------------------------------
# ##
# # Get a list of output points, values, and spends for a given payment address.
# # This is an asynchronous method; a callback must be provided to receive the result
# #
# # Args:
# # address (PaymentAddress): Wallet to search.
# # limit (unsigned int): Max amount of results to fetch.
# # from_height (unsigned int): Starting height to search for transactions.
# # handler (Callable (error, list)): Will be executed when the chain is queried.
# # * error (int): Error code. 0 if and only if successful.
# # * list (HistoryList): A list with every element found.
# def fetch_history(self, address, limit, from_height, handler):
# self.history_fetch_handler_ = handler
# nat.chain_fetch_history(self._chain, address, limit, from_height, self._history_fetch_handler_converter)
# def _history_fetch_handler_converter(self, e, l):
# if e == 0:
# list = HistoryList(l)
# else:
# list = None
# self.history_fetch_handler_(e, list)
# ##### Stealth
# def _stealth_fetch_handler_converter(self, e, l):
# if e == 0:
# _list = StealthList(l)
# else:
# _list = None
# self._stealth_fetch_handler(e, _list)
# ##
# # Get metadata on potential payment transactions by stealth filter.
# # Given a filter and a height in the chain, it queries the chain for transactions matching the given filter.
# # Args:
# # binary_filter_str (string): Must be at least 8 bits in length. example "10101010"
# # from_height (unsigned int): Starting height in the chain to search for transactions.
# # handler (Callable (error, list)): Will be executed when the chain is queried.
# # * error (int): Error code. 0 if and only if successful.
# # * list (StealthList): list with every transaction matching the given filter.
# def fetch_stealth(self, binary_filter_str, from_height, handler):
# self._stealth_fetch_handler = handler
# binary_filter = Binary.construct_string(binary_filter_str)
# nat.chain_fetch_stealth(self._chain, binary_filter._ptr, from_height, self._stealth_fetch_handler_converter)
| k-nuth/py-api | kth/chain/chain.py | chain.py | py | 14,093 | python | en | code | 0 | github-code | 36 |
37502388397 | check = [0] * 10001
visit = [0] * 10001
check[1] = 1
for i in range(4, 10001, 2): check[i] = 1
for i in range(3, 10001, 2):
swi = 0
if check[i]: continue
for j in range(2, i):
if i * i < j or i % j == 0:
swi = 1
break
if not swi:
for j in range(i + i, 10001, i):
check[j] = 1
n, m = map(int, input().split())
a = list(map(int, input().split()))
ans = []
def rec(depth, idx, value):
global n, ans
if depth == m:
if not check[value] and not visit[value]:
visit[value] = 1
ans.append(value)
return
for i in range(idx, n):
rec(depth + 1, i + 1, value + a[i])
rec(0, 0, 0)
if ans:
print(" ".join(map(str, sorted(ans))))
else:
print(-1) | junsgi/Algorithm | BackTracking/소-난다!.py | 소-난다!.py | py | 774 | python | en | code | 0 | github-code | 36 |
18252621901 | from typing import List
class Solution:
def minStartValue1(self, nums: List[int]) -> int:
n = len(nums)
m = 100
left = 1
right = m * n + 1
while left < right:
middle = (left + right) // 2
total = middle
is_valid = True
for num in nums:
total += num
if total < 1:
is_valid = False
break
if is_valid:
right = middle
else:
left = middle + 1
return left
def minStartValue2(self, nums: List[int]) -> int:
min_val = 0
total = 0
for num in nums:
total += num
min_val = min(min_val, total)
return -min_val + 1
solution = Solution()
nums = [-3,2,-3,4,2]
assert solution.minStartValue1(nums) == 5, "Should be 5"
assert solution.minStartValue2(nums) == 5, "Should be 5"
| hujienan/Jet-Algorithm | leetcode/1413. Minimum Value to Get Positive Step by Step Sum/index.py | index.py | py | 951 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.