seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
74078292179 | from pyhdf.HDF import *
from pyhdf.V import *
from pyhdf.VS import *
from pyhdf.SD import *
import numpy as np
import pprint
from HDFread import HDFread # 读取hdf文件
from decoder import decoderScenario
import cartopy.crs as ccrs
import cartopy.feature as cfeature
# matplotlib:用来绘制图表
import matplotlib.pyplot as plt
# shapely:用来处理点线数据
import shapely.geometry as sgeom
import warnings
import re
import numpy as np
import pandas as pd
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import matplotlib.ticker as mticker
import os
def readmac(src,date):
# print(src)
# src = '../data/MAC/20080101/MAC06S0.A2008001.0000.002.2017074151447.hdf'
hdf = SD(src)
lat = hdf.select('Latitude').get().flatten() # 纬度
lon = hdf.select('Longitude').get().flatten() # 经度
time = np.zeros(len(lat))
time[:] = date # 日期
bt0 = np.array(hdf.select('Brightness_Temperature').get()[0].flatten()) # 亮温
bt1 = np.array(hdf.select('Brightness_Temperature').get()[1].flatten())
bt2 = np.array(hdf.select('Brightness_Temperature').get()[2].flatten())
bt3 = np.array(hdf.select('Brightness_Temperature').get()[3].flatten())
bt4 = np.array(hdf.select('Brightness_Temperature').get()[4].flatten())
bt5 = np.array(hdf.select('Brightness_Temperature').get()[5].flatten())
bt6 = np.array(hdf.select('Brightness_Temperature').get()[6].flatten())
ctt = np.array(hdf.select('Cloud_Top_Temperature').get().flatten()) # 云顶温度
cth = np.array(hdf.select('Cloud_Top_Height').get().flatten()) # 云顶高度
ctp = np.array(hdf.select('Cloud_Top_Pressure').get().flatten()) # 云顶压力
th = np.array(hdf.select('Tropopause_Height').get().flatten()) # 对流层高度
cf = np.array(hdf.select('Cloud_Fraction').get().flatten()) # 云量
sft = np.array(hdf.select('Surface_Temperature').get().flatten()) # 表面温度
sfp = np.array(hdf.select('Surface_Pressure').get().flatten()) # 表面压力
df1 = pd.DataFrame({'lat': lat, 'lon': lon,'date':time,
'bt0': bt0, 'bt1': bt1,'bt2': bt2,'bt3': bt3,
'bt4': bt4, 'bt5': bt5, 'bt6': bt6,
'ctt': ctt,'cth': cth,'ctp': ctp,
'th': th,'cf': cf,
'sft': sft,'sfp': sfp})
return df1
def readcloudsat1(csFilePath,cs_file_list):
csclassdata = np.empty(shape=[0, 127], dtype=float)
for path in cs_file_list:
# file1 = "../../data/CloudSat/2008182053651_11561_CS_2B-CLDCLASS_GRANULE_P1_R05_E02_F00.hdf"
file1 = str(csFilePath + '\\' + path)
hdf = SD(file1)
type = decoderScenario(file1)
lon = HDFread(file1, 'Longitude').flatten()
lat = HDFread(file1, 'Latitude').flatten()
csclass = np.arange(len(lon) * 127, dtype="float").reshape(len(lon), 127)
csclass[:, 0] = lon
csclass[:, 1] = lat
for i in range(125):
csclass[:, 2 + i] = type[:, i]
csclassdata = np.append(csclassdata, csclass, axis=0)
datacsDict = {} # 把 cloudsat数据转换成字典
for item in range(127):
if item == 0:
a = {'lon': csclassdata[:, 0]}
elif item == 1:
a = {'lat': csclassdata[:, 1]}
else:
a = {'type' + str(item - 2): csclassdata[:, item]}
datacsDict.update(a)
# print(datacsDict)
datacs = pd.DataFrame(datacsDict)
return datacs
def readcloudsat(csFilePath,cs_file_list):
csclassdata = np.empty(shape=[0, 5], dtype=float)
for path in cs_file_list:
# file1 = "../../data/CloudSat/2008182053651_11561_CS_2B-CLDCLASS_GRANULE_P1_R05_E02_F00.hdf"
file1 = str(csFilePath + '\\' + path)
hdf = SD(file1)
# type = decoderScenario(file1)
lon = HDFread(file1, 'Longitude').flatten()
lat = HDFread(file1, 'Latitude').flatten()
CloudLayerType = hdf.select('CloudLayerType').get() # 读取CloudLayerType
CloudLayerTop = hdf.select('CloudLayerTop').get() # 读取CloudLayerTop
# 读取CloudLayer
hdfobj = HDF(file1, HC.READ)
vs = hdfobj.vstart()
v = hdfobj.vgstart()
layertype_index = vs.find('CloudLayer') # cloudlayer数据存放位置
nrec = vs.attach(layertype_index).inquire()[0] # 数据总数
CloudLayer = np.array(vs.attach(layertype_index).read(nrec)).flatten() # 取出所有数据
csclass = np.arange(len(lon) * 5, dtype="float").reshape(len(lon), 5)
csclass[:, 0] = lon
csclass[:, 1] = lat
csclass[:, 2] = CloudLayer
csclass[:, 3] =CloudLayerType[:, 0]
csclass[:, 4] = CloudLayerTop[:, 0]
# for i in range(10):
# csclass[:, 3 + i] = CloudLayerType[:, i]
# for i in range(10):
# csclass[:, 13 + i] = CloudLayerTop[:, i]
csclassdata = np.append(csclassdata, csclass, axis=0)
datacsDict = {} # 把 cloudsat数据转换成字典
for item in range(5):
if item == 0:
a = {'lon': csclassdata[:, 0]}
elif item == 1:
a = {'lat': csclassdata[:, 1]}
elif item == 2:
a = {'CloudLayer': csclassdata[:, 2]}
elif item == 3:
a = {'type': csclassdata[:, 3]}
else:
a = {'top': csclassdata[:, 4]}
# elif item > 2 and item <= 12:
# a = {'type' + str(item - 3): csclassdata[:, item]}
# else:
# a = {'top' + str(item - 13): csclassdata[:, item]}
datacsDict.update(a)
# print(datacsDict)
datacs = pd.DataFrame(datacsDict)
datacs['CloudLayer'] = datacs['CloudLayer'].astype(int)
datacs['type'] = datacs['type'].astype(int)
# for item in range(0,11):
# if item == 10:
# datacs['CloudLayer'] = datacs['CloudLayer'].astype(int)
# else:
# datacs['type' + str(item)] = datacs['type' + str(item)].astype(int)
return datacs
def DAYlist(start,end):
yearmonthday = pd.date_range(start,end,freq="D").strftime("%Y%m%d").to_list()
return yearmonthday
if __name__ == '__main__':
# pass
days = DAYlist('2008-01-01','2008-12-31')
for date in days:
# date = "20080101"
print('读取',date,'的数据')
macFilePath = fr'E:\Code\python\cloudclassfication\data\MAC\{date}'
mac_file_list = os.listdir(macFilePath)
cloudsatFilePath = fr'E:\Code\python\cloudclassfication\data\CloudSat\{date}'
cloudsat_file_list = os.listdir(cloudsatFilePath)
# print(mac_file_list)
# MAC数据
df0 = pd.DataFrame({})
for i in range(1, len(mac_file_list)):
macPath = "../../data/MAC/" + date + '/' + mac_file_list[i]
# print(macPath)
df1 = readmac(macPath,date)
df0 = pd.concat([df0, df1])
# print(mac_file_list[i] + " ok")
mac_path = '../../data/DoneData/MAC/MAC' + date +'.csv'
df0.to_csv(mac_path,index=False)
# print("共有数据",df0.shape[0],"条")
dfcs = readcloudsat(cloudsatFilePath,cloudsat_file_list)
cs_path = '../../data/DoneData/CloudSat/CS' +date +'.csv'
dfcs.to_csv(cs_path,index=False)
print("ok")
# break
print("全部读取完毕!") | eraevil/cloud_type_classfication | src/1_match/hdf_to_csv.py | hdf_to_csv.py | py | 7,346 | python | en | code | 0 | github-code | 13 |
36588242266 | from heapq import heappush, heappop
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
A max-heap to store the smaller half of the input numbers
A min-heap to store the larger half of the input numbers
"""
self.minheap, self.maxheap = [], []
def addNum(self, num: int) -> None:
if len(self.minheap) == len(self.maxheap):
if not self.maxheap:
heappush(self.maxheap, -num)
else:
if num > self.minheap[0]: # num should belong to larger half
heappush(self.maxheap, -self.minheap[0])
heappop(self.minheap)
heappush(self.minheap, num)
else: # num belongs to the smaller half
heappush(self.maxheap, -num)
else:
if num > -self.maxheap[0]: # num belongs to the larger half
heappush(self.minheap, num)
else: # num belongs to the smaller half
heappush(self.minheap, -self.maxheap[0])
heappop(self.maxheap)
heappush(self.maxheap, -num)
def findMedian(self) -> float:
if len(self.maxheap) == len(self.minheap):
return (-self.maxheap[0] + self.minheap[0])/2.0
return -self.maxheap[0]
| ysonggit/leetcode_python | 0295_FindMedianfromDataStream.py | 0295_FindMedianfromDataStream.py | py | 1,360 | python | en | code | 1 | github-code | 13 |
10887855732 | import optuna
import pandas as pd
import numpy as np
import xgboost as xgb
from dotenv import dotenv_values
from sklearn.model_selection import train_test_split
config = dotenv_values('../../.env')
train = pd.read_parquet(config["ENGINEERED_DATA"] + "viml_train_V1.parquet")
def amex_metric_mod(y_true, y_pred):
labels = np.transpose(np.array([y_true, y_pred]))
labels = labels[labels[:, 1].argsort()[::-1]]
weights = np.where(labels[:,0]==0, 20, 1)
cut_vals = labels[np.cumsum(weights) <= int(0.04 * np.sum(weights))]
top_four = np.sum(cut_vals[:,0]) / np.sum(labels[:,0])
gini = [0,0]
for i in [1,0]:
labels = np.transpose(np.array([y_true, y_pred]))
labels = labels[labels[:, i].argsort()[::-1]]
weight = np.where(labels[:,0]==0, 20, 1)
weight_random = np.cumsum(weight / np.sum(weight))
total_pos = np.sum(labels[:, 0] * weight)
cum_pos_found = np.cumsum(labels[:, 0] * weight)
lorentz = cum_pos_found / total_pos
gini[i] = np.sum((lorentz - weight_random) * weight)
return 0.5 * (gini[1]/gini[0] + top_four)
def objective(trial):
FEATURES = train.columns[:-1]
X_train, X_valid, y_train, y_valid = train_test_split(train[FEATURES],
train["target"],
test_size=0.2)
dtrain = xgb.DMatrix(X_train, label=y_train)
dvalid = xgb.DMatrix(X_valid, label=y_valid)
param = {
"verbosity": 0,
"objective": trial.suggest_categorical("objective", ["reg:logistic", "binary:logistic"]),
"tree_method": "gpu_hist",
# defines booster, gblinear for linear functions.
"booster": trial.suggest_categorical("booster", ["gbtree", "gblinear", "dart"]),
# L2 regularization weight.
"lambda": trial.suggest_float("lambda", 1e-8, 1.0, log=True),
# L1 regularization weight.
"alpha": trial.suggest_float("alpha", 1e-8, 1.0, log=True),
# sampling ratio for training data.
"subsample": trial.suggest_float("subsample", 0.2, 1.0),
# sampling according to each tree.
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.2, 1.0),
}
if param["booster"] in ["gbtree", "dart"]:
# maximum depth of the tree, signifies complexity of the tree.
param["max_depth"] = trial.suggest_int("max_depth", 3, 9, step=2)
# minimum child weight, larger the term more conservative the tree.
param["min_child_weight"] = trial.suggest_int("min_child_weight", 2, 10)
param["eta"] = trial.suggest_float("eta", 1e-8, 1.0, log=True)
# defines how selective algorithm is.
param["gamma"] = trial.suggest_float("gamma", 1e-8, 1.0, log=True)
param["grow_policy"] = trial.suggest_categorical("grow_policy", ["depthwise", "lossguide"])
if param["booster"] == "dart":
param["sample_type"] = trial.suggest_categorical("sample_type", ["uniform", "weighted"])
param["normalize_type"] = trial.suggest_categorical("normalize_type", ["tree", "forest"])
param["rate_drop"] = trial.suggest_float("rate_drop", 1e-8, 1.0, log=True)
param["skip_drop"] = trial.suggest_float("skip_drop", 1e-8, 1.0, log=True)
bst = xgb.train(param, dtrain)
preds = bst.predict(dvalid)
metric = amex_metric_mod(y_valid.values, preds)
return metric
if __name__ == '__main__':
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=1000, timeout=3600)
print("Number of finished trials: ", len(study.trials))
print("Best trial:")
trial = study.best_trial
print(" Value: {}".format(trial.value))
print(" Params: ")
for key, value in trial.params.items():
print(" {}: {}".format(key, value)) | Dael-the-Mailman/ML-Capstone-Project | models/model_6/xgb.py | xgb.py | py | 3,926 | python | en | code | 0 | github-code | 13 |
4278345695 | from .dependencies import *
def get_tadm_picture_callbacks(app):
@app.callback(
Output("tadm-images", "items"),
Input("tadm-pictures-table", "selectionChanged"),
Input("tadm-pictures-table", "rowData"),
)
def get_tadm_pictures(selection, data):
if ctx.triggered_id == "tadm-pictures-table" and data:
items = []
item = add_item_to_carousel(
title="Some ID",
description="Some Photo",
container_name="neumodxsystemqc-tadmpictures",
blob_name=selection[0]["FileId"],
)
items.append(item)
return items
return []
@app.callback(
Output("upload-tadm-message", "children"),
Output("upload-tadm-response", "is_open"),
Input("upload-tadm-pictures", "contents"),
State("upload-tadm-pictures", "filename"),
State("runset-selection-data", "data"),
State("runset-review", "data"),
State("upload-tadm-response", "is_open"),
)
def upload_tadm_image_to_blob_storage(
list_of_contents, list_of_filenames, runset_selection, runset_review, is_open
):
if list_of_contents:
files = {
list_of_filenames[i]: list_of_contents[i]
for i in range(len(list_of_filenames))
}
upload_status = []
for file in files:
"""
Upload file to blob storage
"""
content_type, content_string = files[file].split(",")
file_content = base64.b64decode(content_string)
file_id = str(uuid.uuid4()) + ".jpg"
file_url = save_uploaded_file_to_blob_storage(
file_content, file_id, "neumodxsystemqc-tadmpictures"
)
"""
Create Database Entry
"""
file_payload = {
"userId": session["user"].id,
"runSetId": runset_selection["id"],
"runSetReviewId": runset_review["id"],
"uri": file_url,
"name": file,
"fileid": file_id,
}
tadm_picture_url = os.environ["RUN_REVIEW_API_BASE"] + "TADMPictures"
resp = requests.post(
url=tadm_picture_url, json=file_payload, verify=False
)
upload_status.append(html.Li(file))
# Return a message with the URL of the uploaded file
return upload_status, not is_open
else:
return no_update
@app.callback(
Output("tadm-pictures-table", "rowData"),
Output("tadm-pictures-table", "columnDefs"),
Input("review-tabs", "active_tab"),
Input("upload-tadm-message", "children"),
Input("delete-tadm-picture-response", "is_open"),
Input("update-tadm-run-confirmation", "is_open"),
State("runset-selection-data", "data"),
State("runset-subject-ids", "data"),
State("runset-subject-descriptions", "data"),
)
def get_tadm_picture_table(
active_tab,
message_children,
delete_response_is_open,
update_run_response_is_open,
runset_data,
runset_subject_ids,
runset_subject_descriptions,
):
if (
(ctx.triggered_id == "upload-tadm-message")
or ctx.triggered_id == "review-tabs"
or (
ctx.triggered_id == "delete-tadm-picture-response"
and delete_response_is_open == True
)
or (
ctx.triggered_id == "update-tadm-run-confirmation"
and update_run_response_is_open == True
)
) and active_tab == "tadm-pictures":
"""
Get TADM Picture Info from API
"""
tadm_pictures_url = os.environ[
"RUN_REVIEW_API_BASE"
] + "RunSets/{}/tadmpictures".format(runset_data["id"])
runset = requests.get(tadm_pictures_url, verify=False).json()
"""
Extract Details from each TADM Picture into pandas DataFrame
"""
tadm_picture_data = pd.DataFrame(
columns=[
"Id",
"FileId",
"File Name",
"Uploaded By",
"Upload Date",
"Run Number",
"UserId",
]
)
idx = 0
runset_tadm_ids = runset_subject_ids["Cartridge"]
runset_run_descriptions = runset_subject_descriptions["Run"]
for tadm_picture in runset["tadmPictures"]:
entry = {}
entry["Id"] = tadm_picture["id"]
entry["UserId"] = tadm_picture["validFromUser"]
entry["FileId"] = tadm_picture["fileid"]
entry["File Name"] = tadm_picture["name"]
entry["Uploaded By"] = tadm_picture["runSetReview"]["reviewerName"]
entry["Upload Date"] = tadm_picture["validFrom"]
if tadm_picture["cartridgeId"]:
tadm_id = tadm_picture["cartridgeId"]
runset_tadm_id = [
key for key, val in runset_tadm_ids.items() if val == tadm_id
][0]
run_number = runset_run_descriptions[runset_tadm_id]
entry["Run Number"] = run_number
else:
entry["Run Number"] = tadm_picture["cartridgeId"]
tadm_picture_data.loc[idx] = entry
idx += 1
"""
Create Column Definitions for Table
"""
column_definitions = []
initial_selection = [x for x in tadm_picture_data.columns if "Id" not in x]
for column in tadm_picture_data.columns:
column_definition = {
"headerName": column,
"field": column,
"filter": True,
"sortable": True,
}
if column not in initial_selection:
column_definition["hide"] = True
if "Date" in column:
tadm_picture_data[column] = (
tadm_picture_data[column]
.astype("datetime64")
.dt.strftime("%d %B %Y %H:%M:%S")
)
column_definitions.append(column_definition)
return tadm_picture_data.to_dict(orient="records"), column_definitions
return no_update
@app.callback(
Output("delete-tadm-picture-button", "disabled"),
Input("tadm-pictures-table", "selectionChanged"),
)
def check_tadm_delete_validity(selection):
if ctx.triggered_id == "tadm-pictures-table":
if selection[0]["UserId"] == session["user"].id:
return False
return True
@app.callback(
Output("update-tadm-run-button", "disabled"),
Input("tadm-pictures-table", "selectionChanged"),
)
def check_tadm_run_update_validity(selection):
if ctx.triggered_id == "tadm-pictures-table":
if selection[0]:
return False
return True
@app.callback(
Output("update-tadm-run-selection", "is_open"),
Output("update-tadm-run-options", "options"),
Output("update-tadm-run-options", "value"),
Input("update-tadm-run-button", "n_clicks"),
Input("update-tadm-run-submit", "n_clicks"),
Input("update-tadm-run-cancel", "n_clicks"),
State("update-tadm-run-selection", "is_open"),
State("runset-subject-descriptions", "data"),
State("runset-subject-ids", "data"),
)
def control_update_tadm_run_selection_popup(
update_click,
submit_click,
cancel_click,
is_open,
runset_subject_descriptions,
runset_subject_ids,
):
if runset_subject_descriptions:
runset_tadm_descriptions = runset_subject_descriptions["Run"]
runset_tadm_ids = runset_subject_ids["Cartridge"]
run_options = {}
for runset_tadm_id in runset_tadm_ids:
run_options[runset_tadm_ids[runset_tadm_id]] = runset_tadm_descriptions[
runset_tadm_id
]
if ctx.triggered_id:
return (not is_open, run_options, [x for x in run_options][0])
else:
return is_open, run_options, [x for x in run_options][0]
else:
return no_update
@app.callback(
Output("update-tadm-run-confirmation", "is_open"),
Output("update-tadm-run-message", "children"),
Input("update-tadm-run-submit", "n_clicks"),
State("update-tadm-run-options", "value"),
State("tadm-pictures-table", "selectionChanged"),
State("update-tadm-run-confirmation", "is_open"),
)
def update_tadm_run(submit_button, cartridge_id, rowSelection, is_open):
confirmation_message = ""
if ctx.triggered_id:
update_tadm_picture_run_url = os.environ[
"RUN_REVIEW_API_BASE"
] + "TADMPictures/{}/cartridge".format(rowSelection[0]["Id"])
query_params = {"cartridgeid": cartridge_id}
response = requests.put(
url=update_tadm_picture_run_url, params=query_params, verify=False
)
if response.status_code == 200:
confirmation_message = "Run for TADM Picture was successfully updated."
else:
confirmation_message = (
"Run for TADM Picture was not successful updated."
)
return (not is_open, confirmation_message)
else:
return is_open, confirmation_message
@app.callback(
Output("delete-tadm-picture-confirmation", "is_open"),
Input("delete-tadm-picture-button", "n_clicks"),
Input("delete-tadm-picture-confirm", "n_clicks"),
Input("delete-tadm-picture-cancel", "n_clicks"),
State("delete-tadm-picture-confirmation", "is_open"),
prevent_intitial_call=True,
)
def control_delete_tadm_picture_popup(
delete_click, confirm_click, cancel_click, is_open
):
if ctx.triggered_id and "delete" in ctx.triggered_id:
return not is_open
return is_open
@app.callback(
Output("delete-tadm-picture-response", "is_open"),
Input("delete-tadm-picture-confirm", "n_clicks"),
State("tadm-pictures-table", "selectionChanged"),
State("delete-tadm-picture-response", "is_open"),
)
def delete_tadm_picture(confirm_click, selection, is_open):
if ctx.triggered_id == "delete-tadm-picture-confirm":
delete_tadm_picture_url = os.environ[
"RUN_REVIEW_API_BASE"
] + "tadmpictures/{}".format(selection[0]["Id"])
response = requests.delete(url=delete_tadm_picture_url, verify=False)
return not is_open
else:
return is_open
| AaronRipleyQiagen/NeuMoDxRawDataServices.SystemQCDataSync | RunReview/Callbacks/tadm_pictures.py | tadm_pictures.py | py | 11,334 | python | en | code | 0 | github-code | 13 |
24362270366 | import sqlite3
from dataclasses import dataclass
@dataclass
class User:
name: str
age: int
gender: str
# class User:
# def __init__(self, name: str, age: int, gender : str):
# self.name = name
# self.age = age
# self.gender = gender
#
# try:
# connection = sqlite3.connect('sqlite.db')
# except sqlite3.DatabaseError:
# print('Ошибка')
# finally:
# connection.close()
def create_user_table(cur: sqlite3.Cursor):
command = """
CREATE TABLE IF NOT EXISTS users(
id INTEGER PRIMARY KEY,
name TEXT,
age INTEGER,
gender TEXT)
"""
cur.execute(command)
def add_user(cur: sqlite3.Cursor, user: User):
command = """
INSERT INTO users(name, age, gender) VALUES (?, ?, ?)
"""
cur.execute(command, (user.name, user.age, user.gender))
def get_all_users(cur: sqlite3.Cursor):
command = """
SELECT * FROM users
"""
result = cur.execute(command)
return result.fetchall()
def update_user_name(cur: sqlite3.Cursor, user_id: int, name: str):
command = """
UPDATE users SET name = ? WHERE id = ?
"""
cur.execute(command, (name, user_id))
def get_user_by_id(cur: sqlite3.Cursor, user_id: int):
command = """
SELECT id, name, age, gender FROM users WHERE id = ?
"""
result = cur.execute(command, (user_id,))
return result.fetchall()
def delete_all_users(cur: sqlite3.Cursor):
command = """
DELETE FROM users
"""
cur.execute(command)
if __name__ == '__main__':
with sqlite3.connect('sqlite.db') as connection:
cursor = connection.cursor()
create_user_table(cursor)
delete_all_users(cursor)
sergey = User(name="Серёжа", age=16, gender='М')
irina = User(name='Ирина', age=17, gender='Ж')
add_user(cursor, sergey)
add_user(cursor, irina)
users = get_all_users(cursor)
print(users)
update_user_name(cursor, 1, 'Никита')
users = get_all_users(cursor)
print(users)
user = get_user_by_id(cursor, 2)
print(user)
| Den4ik20020/modul4 | modul3/lesson11/1.py | 1.py | py | 2,131 | python | en | code | 0 | github-code | 13 |
12700194915 | # -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
from finance_crawl.items import FinanceCrawlItem
import json
import requests
class TvbsSpider(scrapy.Spider):
name = 'tvbs'
allowed_domains = ['news.tvbs.com.tw']
# start_urls = ['https://news.tvbs.com.tw/news/LoadMoreOverview?limit=30&offset=1&cateid=12&cate=tech&newsid=1012090&newslist=%27%27']
start_urls = ['https://news.tvbs.com.tw/tech']
def parse(self, response):
# first time get link
source = BeautifulSoup(response.text, 'lxml')
news_a_tag = source.select('div.content_center_list_box ul#block_pc li a')
links = []
for link in news_a_tag:
link = link.get('href').split('/')[-1]
links.append(link)
# get either link from last ID
for idx in range(20):
print(links[-1])
api = 'https://news.tvbs.com.tw/news/LoadMoreOverview?limit=30&offset=1&cateid=12&cate=tech&newsid='+ links[-1] +'&newslist=%27%27'
r = requests.get(api)
news_new_links = json.loads(r.text)['news_id_list'][3:].replace("'",'').split(',')
links = links + news_new_links
for news_id in links:
link = 'https://news.tvbs.com.tw/tech/' + news_id
yield scrapy.Request(link, callback = self.parse_product)
def parse_product(self, response):
item = FinanceCrawlItem()
# print(response.url)
source = BeautifulSoup(response.text, 'lxml')
title = source.select_one('h1.margin_b20').text
time = source.select_one('div.icon_time').text
text = source.select_one('div#news_detail_div').text.replace('\n','').replace('\t\t\t\t\t \t\t','')
item['time'] = time
item['title'] = title
item['link'] = response.url
item['text'] = text
return item | plusoneee/crawl.collection | finance_news/finance_crawl/spiders/tvbs.py | tvbs.py | py | 1,878 | python | en | code | 0 | github-code | 13 |
30140707590 | import os
import sys
import time
import cutil
import signal
import logging
from scraper_monitor import scraper_monitor
from models import db_session, Setting, Whatif, NoResultFound, DBSession
from scraper_lib import Scraper
from web_wrapper import DriverSeleniumPhantomJS, DriverRequests
# Create logger for this script
logger = logging.getLogger(__name__)
class Worker:
def __init__(self, scraper, web, whatif_id):
"""
Worker Profile
Run for each item that needs parsing
Each thread has a web instance that is used for parsing
"""
# `web` is what utilizes the profiles and proxying
self.web = web
self.scraper = scraper
self.whatif_id = whatif_id
# Get the sites content as a beautifulsoup object
logger.info("Getting what if {id}".format(id=self.whatif_id))
url = "http://what-if.xkcd.com/{id}/".format(id=self.whatif_id)
response = self.web.get_site(url, page_format='html')
if response is None:
logger.warning("Response was None for url {url}".format(url=url))
else:
parsed_data = self.parse(response)
if len(parsed_data) > 0:
# Add raw data to db
self.scraper.insert_data(parsed_data)
# Remove id from list of comics to get
self.scraper.whatif_ids.remove(self.whatif_id)
# Add success count to stats. Keeps track of how much ref data has been parsed
self.scraper.track_stat('ref_data_success_count', 1)
# Take it easy on the site
time.sleep(1)
def parse(self, soup):
"""
:return: List of items with their details
"""
rdata = self.scraper.archive_list.get(self.whatif_id)
# Parse the items here and return the content to be added to the db
article = self.web.driver.find_element_by_css_selector('article.entry')
rdata['question'] = soup.find('article', {'class': 'entry'}).find('p', {'id': 'question'}).get_text()
whatif_filename = '{base}/{last_num}/{whatif_id}.png'\
.format(base=self.scraper.BASE_SAVE_DIR,
last_num=str(self.whatif_id)[-1],
whatif_id=self.whatif_id)
rdata.update({'whatif_id': self.whatif_id,
'saved_file_location': self.web.screenshot(whatif_filename, element=article)
.replace(self.scraper.BASE_DATA_DIR + os.path.sep, ''),
'time_collected': cutil.get_datetime(),
})
return rdata
class XkcdWhatif(Scraper):
def __init__(self, config_file=None):
super().__init__('xkcd')
self.archive_list = self.load_archive_list()
self.max_id = self.get_latest()
self.last_id_scraped = self.get_last_scraped()
self.whatif_ids = []
def start(self):
"""
Send the ref data to the worker threads
"""
if self.max_id == self.last_id_scraped:
# No need to continue
logger.info("Already have the newest whatif")
return
self.whatif_ids = list(range(self.last_id_scraped + 1, self.max_id + 1))
# Log how many items in total we will be parsing
scraper.stats['ref_data_count'] = len(self.whatif_ids)
# Only ever use 1 thread here
self.thread_profile(1, DriverSeleniumPhantomJS, self.whatif_ids, Worker)
def load_archive_list(self):
"""
Load all the whatifs and store in a dict with the id's as keys
Need to do this since this is the only place where the date posted is listed
"""
rdata = {}
tmp_web = DriverRequests()
url = "http://what-if.xkcd.com/archive/"
try:
soup = tmp_web.get_site(url, page_format='html')
except Exception:
logger.critical("Problem getting whatif archive", exc_info=True)
sys.exit(1)
entries = soup.find_all('div', {'class': 'archive-entry'})
for entry in entries:
try:
_id = int(entry.find('a')['href'].split('/')[-2])
title = entry.find(class_='archive-title').text
posted_at = entry.find(class_='archive-date').text
rdata[_id] = {'posted_at': posted_at,
'title': title,
}
except (AttributeError, ValueError):
logger.critical("Cannot parse data for entry {entry}".format(entry=str(entry)))
return rdata
def get_latest(self):
"""
Get the latest whatif id posted
"""
max_id = max(self.archive_list.keys())
logger.info("Newest upload: {id}".format(id=max_id))
return max_id
def get_last_scraped(self):
"""
Get last whatif scraped
"""
last_scraped_id = db_session.query(Setting).filter(Setting.bit == 0).one().whatif_last_id
if last_scraped_id is None:
last_scraped_id = 0
return last_scraped_id
def log_last_scraped(self):
try:
try:
last_whatif_id = min(self.whatif_ids) - 1
except ValueError:
last_whatif_id = self.max_id
setting = db_session.query(Setting).filter(Setting.bit == 0).one()
setting.whatif_last_id = last_whatif_id
setting.whatif_last_ran = cutil.get_datetime()
db_session.add(setting)
db_session.commit()
except:
logger.exception("Problem logging last whatif scraped")
def insert_data(self, data):
"""
Will handle inserting data into the database
"""
try:
db_session = DBSession()
# Check if whatif is in database, if so update else create
try:
whatif = db_session.query(Whatif).filter(Whatif.whatif_id == data.get('whatif_id')).one()
except NoResultFound:
whatif = Whatif()
whatif.title = data.get('title')
whatif.question = data.get('question')
whatif.whatif_id = data.get('whatif_id')
whatif.saved_file_location = data.get('saved_file_location')
whatif.posted_at = data.get('posted_at')
whatif.time_collected = data.get('time_collected')
db_session.add(whatif)
db_session.commit()
except Exception:
db_session.rollback()
logger.exception("Error adding to db {data}".format(data=data))
def sigint_handler(signal, frame):
logger.critical("Keyboard Interrupt")
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, sigint_handler)
try:
# Setup the scraper
scraper = XkcdWhatif()
try:
# Start scraping
scraper.start()
scraper.cleanup()
except Exception:
logger.critical("Main Error", exc_info=True)
except Exception:
logger.critical("Setup Error", exc_info=True)
finally:
scraper.log_last_scraped()
try:
# Log stats
scraper_monitor.stop(total_urls=scraper.stats['total_urls'],
ref_data_count=scraper.stats['ref_data_count'],
ref_data_success_count=scraper.stats['ref_data_success_count'],
rows_added_to_db=scraper.stats['rows_added_to_db'])
except NameError:
# If there is an issue with scraper.stats
scraper_monitor.stop()
except Exception:
logger.critical("Scraper Monitor Stop Error", exc_info=True)
scraper_monitor.stop()
| xtream1101/scrape-xkcd | xkcd-whatif.py | xkcd-whatif.py | py | 7,877 | python | en | code | 0 | github-code | 13 |
5408702329 | import os
import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import (
TemplateView,
ListView,
CreateView,
UpdateView,
)
from django.urls import reverse_lazy
from django.shortcuts import redirect
from items.models import SpotifySession, Comment, Item
from items.services import ItemStorage, SpotifyAPI
class Spotify:
def __init__(self):
spotify_session, created = SpotifySession.objects.get_or_create(
client_id=os.getenv("CLIENT_ID")
)
if not created:
spotify_session.token_expires = spotify_session.token_expires.replace(
tzinfo=None
)
spotify_session.token_expires += datetime.timedelta(hours=2)
if not created and datetime.datetime.now() < spotify_session.token_expires:
self.spotify = SpotifyAPI(
client_id=os.getenv("CLIENT_ID"),
client_secret=os.getenv("CLIENT_SECRET"),
access_token=spotify_session.access_token,
token_type=spotify_session.token_type,
token_expires=spotify_session.token_expires,
)
else:
self.spotify = SpotifyAPI(
client_id=os.getenv("CLIENT_ID"),
client_secret=os.getenv("CLIENT_SECRET"),
)
self.spotify.auth()
SpotifySession.objects.filter(client_id=os.getenv("CLIENT_ID")).update(
access_token=self.spotify.access_token,
token_type=self.spotify.token_type,
token_expires=self.spotify.token_expires,
)
class SearchView(Spotify, TemplateView):
template_name = "items/search.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["query"] = self.request.GET["query"]
context["spotify"] = self.spotify.search(self.request.GET["query"], limit=8)
return context
class CommentsListView(ListView):
model = Comment
context_object_name = "comments"
def get_queryset(self):
return super().get_queryset().filter(item=self.kwargs["idx"])
class IsInYourFavorites(ListView):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_authenticated:
context["liked"] = Item.objects.filter(
idx=self.kwargs["idx"], like=self.request.user
).exists()
else:
context["liked"] = False
return context
class AlbumDetailsView(Spotify, CommentsListView, IsInYourFavorites):
template_name = "items/album_details.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["spotify"] = self.spotify.get_album(self.kwargs["idx"])
context["item_type"] = "album"
return context
class ArtistDetailsView(Spotify, CommentsListView, IsInYourFavorites):
template_name = "items/artist_details.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["spotify"] = self.spotify.get_artist(self.kwargs["idx"])
if "error" not in context["spotify"]:
context["spotify"]["top_tracks"] = self.spotify.get_top_tracks(
self.kwargs["idx"]
)
context["spotify"]["discography"] = self.spotify.get_discography(
self.kwargs["idx"], limit=6
)
context["spotify"]["related"] = self.spotify.get_related_artists(
self.kwargs["idx"], limit=6
)
context["item_type"] = "artist"
return context
class TrackDetailsView(Spotify, CommentsListView, IsInYourFavorites):
template_name = "items/track_details.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["spotify"] = self.spotify.get_track(self.kwargs["idx"])
if "artists" in context["spotify"]:
context["spotify"]["artists"] = self.spotify.get_artists(
[artist["id"] for artist in context["spotify"]["artists"]]
)
context["item_type"] = "track"
return context
class CommentCreateView(LoginRequiredMixin, Spotify, CreateView):
http_method_names = ['post']
model = Comment
fields = ["text"]
def setup(self, request, *args, **kwargs):
if not ItemStorage().create(self.spotify, kwargs["item_type"], kwargs["idx"]):
redirect(reverse_lazy("index"))
return super().setup(request, *args, **kwargs)
def form_valid(self, form):
form.instance.published_by = self.request.user
form.instance.item = Item.objects.get(idx=self.kwargs["idx"])
return super().form_valid(form)
def get_success_url(self):
if self.kwargs["item_type"] == "artist":
return (
reverse_lazy("artist-details", kwargs={"idx": self.kwargs["idx"]})
+ "#comment-form"
)
elif self.kwargs["item_type"] == "album":
return (
reverse_lazy("album-details", kwargs={"idx": self.kwargs["idx"]})
+ "#comment-form"
)
elif self.kwargs["item_type"] == "track":
return (
reverse_lazy("track-details", kwargs={"idx": self.kwargs["idx"]})
+ "#comment-form"
)
else:
return reverse_lazy("index")
class FavoritesEditView(LoginRequiredMixin, UpdateView):
def get_success_url(self):
if self.kwargs["item_type"] == "artist":
return reverse_lazy("artist-details", kwargs={"idx": self.kwargs["pk"]})
elif self.kwargs["item_type"] == "album":
return reverse_lazy("album-details", kwargs={"idx": self.kwargs["pk"]})
elif self.kwargs["item_type"] == "track":
return reverse_lazy("track-details", kwargs={"idx": self.kwargs["pk"]})
else:
return reverse_lazy("index")
class FavoriteSaveView(Spotify, FavoritesEditView):
http_method_names = ['post']
model = Item
fields = []
def setup(self, request, *args, **kwargs):
if not ItemStorage().create(self.spotify, kwargs["item_type"], kwargs["pk"]):
redirect(reverse_lazy("index"))
return super().setup(request, *args, **kwargs)
def form_valid(self, form):
form.instance.like.add(self.request.user)
return super().form_valid(form)
class FavoriteDeleteView(FavoritesEditView):
http_method_names = ['post']
model = Item
fields = []
def form_valid(self, form):
form.instance.like.remove(self.request.user)
return super().form_valid(form)
| titou386/OCmusic | items/views.py | views.py | py | 6,798 | python | en | code | 0 | github-code | 13 |
2864457288 | import httplib
import mock
import stubout
import webtest
from google.apputils import app
from google.apputils import basetest
from simian.mac import admin
from simian.mac import models
from simian.mac.admin import main as gae_main
from simian.mac.common import auth
from tests.simian.mac.common import test
@mock.patch.object(auth, 'IsAdminUser', return_value=True)
@mock.patch.object(admin.template, 'render', return_value='html:)')
class AdminGroupsTest(test.AppengineTest):
def setUp(self):
super(AdminGroupsTest, self).setUp()
self.testapp = webtest.TestApp(gae_main.app)
models.Group(key_name='test group', users=['user1', 'user4']).put()
models.Group(key_name='test group2', users=['user1', 'user2']).put()
def testGet(self, render_mock, *unused_args):
"""Test get()."""
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
self.assertIn('test group', [x.key().name() for x in render_dict['groups']])
def testPostCreate(self, render_mock, *unused_args):
"""Test post() create action."""
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
params = {
'xsrf_token': render_dict['xsrf_token'],
'group': 'test group3',
'action': 'create',
'user': 'user5',
}
resp = self.testapp.post('/admin/groups', params, status=httplib.FOUND)
redirect_url = '/admin/groups?msg=Group successfully saved.'
self.assertTrue(resp.location.endswith(redirect_url))
self.assertIn('user5', models.Group.get_by_key_name('test group3').users)
def testPostDeleteNoManMods(self, render_mock, *unused_args):
"""Test post() delete action, no manifest modifications."""
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
params = {
'xsrf_token': render_dict['xsrf_token'],
'group': 'test group',
'action': 'delete',
}
resp = self.testapp.post('/admin/groups', params, status=httplib.FOUND)
redirect_url = '/admin/groups?msg=Group successfully deleted.'
self.assertTrue(resp.location.endswith(redirect_url))
self.assertNotIn('test group', models.Group.GetAllGroupNames())
def testPostDeleteNoManModsNoGroup(self, render_mock, *unused_args):
"""Test post() delete action, no manifest modifications, no group."""
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
params = {
'xsrf_token': render_dict['xsrf_token'],
'group': 'test group42',
'action': 'delete',
}
self.testapp.post('/admin/groups', params, status=httplib.NOT_FOUND)
def testPostDeleteWithManMods(self, render_mock, *unused_args):
"""Test post() delete action, manifiest modifications exist."""
models.GroupManifestModification.GenerateInstance(
mod_type='group', target='test group', munki_pkg_name='Firefox').put()
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
params = {
'xsrf_token': render_dict['xsrf_token'],
'group': 'test group',
'action': 'delete',
}
resp = self.testapp.post('/admin/groups', params, status=httplib.FOUND)
redirect_url = ("/admin/groups?msg=Group not deleted as it's being used "
"for Manifest Modifications.")
self.assertTrue(resp.location.endswith(redirect_url))
self.assertIn('test group', models.Group.GetAllGroupNames())
def testPostChangeAdd(self, render_mock, *unused_args):
"""Test post() change action, add user."""
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
params = {
'xsrf_token': render_dict['xsrf_token'],
'group': 'test group',
'action': 'change',
'user': 'user7',
'add': '1'
}
resp = self.testapp.post('/admin/groups', params, status=httplib.FOUND)
redirect_url = '/admin/groups?msg=Group successfully modified.'
self.assertTrue(resp.location.endswith(redirect_url))
self.assertIn('user7', models.Group.get_by_key_name('test group').users)
def testPostChangeRemove(self, render_mock, *unused_args):
"""Test post() change action, remove user."""
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
params = {
'xsrf_token': render_dict['xsrf_token'],
'group': 'test group',
'action': 'change',
'user': 'user4',
'add': '0'
}
resp = self.testapp.post('/admin/groups', params, status=httplib.FOUND)
redirect_url = '/admin/groups?msg=Group successfully modified.'
self.assertTrue(resp.location.endswith(redirect_url))
self.assertNotIn('user4', models.Group.get_by_key_name('test group').users)
def testPostChangeNoGroup(self, render_mock, *unused_args):
"""Test post() change action, group doesn't exist."""
self.testapp.get('/admin/groups', status=httplib.OK)
render_dict = test.GetArgFromCallHistory(render_mock, arg_index=1)
params = {
'xsrf_token': render_dict['xsrf_token'],
'group': 'test group42',
'action': 'change',
'user': 'user4',
'add': '1'
}
self.testapp.post('/admin/groups', params, status=httplib.NOT_FOUND)
def main(unused_argv):
basetest.main()
if __name__ == '__main__':
app.run()
| googlearchive/simian | src/tests/simian/mac/admin/groups_test.py | groups_test.py | py | 5,642 | python | en | code | 334 | github-code | 13 |
26455531432 | import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
line_x = width // 3
line_y_top = height // 3
line_y_bottom = height * 2 // 3
line_height = height // 3
line_color_left = (0, 0, 255)
line_color_right = (0, 0, 255)
left_line_touched = False
right_line_touched = False
with mp_hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5) as hands:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
continue
frame = cv2.flip(frame, 1)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = hands.process(frame_rgb)
left_line_touched = False
right_line_touched = False
if results.multi_hand_landmarks:
hand_landmarks = results.multi_hand_landmarks[0] # Tomar solo la primera mano
mp_drawing.draw_landmarks(frame, hand_landmarks, mp_hands.HAND_CONNECTIONS)
for landmark in hand_landmarks.landmark:
x, y = int(landmark.x * width), int(landmark.y * height)
# Tocar la línea izquierda
if line_x - 20 < x < line_x + 20 and line_y_top < y < line_y_bottom:
line_color_left = (0, 255, 0) # Cambiar color a verde
left_line_touched = True
# Tocar la línea derecha
if width - line_x - 20 < x < width - line_x + 20 and line_y_top < y < line_y_bottom:
line_color_right = (0, 255, 0) # Cambiar color a verde
right_line_touched = True
cv2.line(frame, (line_x, line_y_top), (line_x, line_y_bottom), line_color_left if left_line_touched else (0, 0, 255), 10)
cv2.line(frame, (width - line_x, line_y_top), (width - line_x, line_y_bottom), line_color_right if right_line_touched else (0, 0, 255), 10)
cv2.imshow('Hand Lines Interaction', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| diegoperea20/hand-interaction-lines | lines.py | lines.py | py | 2,162 | python | en | code | 0 | github-code | 13 |
72124305937 | from django.urls import path
from .views import (
compare_games_view, global_sales_view,
compare_publishers_productions, compare_genres_view,
)
namespace = 'api'
urlpatterns = [
path('compare-games/',compare_games_view, name='compare_games'),
path('global-sale/', global_sales_view, name='global-sale'),
path('compare-publishers-sale/', compare_publishers_productions, name='publishers-sales'),
path('compare-genres-sale/', compare_genres_view, name='compare-genres'),
]
| disciple-zarrin/cloud | analytical_service/api/urls.py | urls.py | py | 507 | python | en | code | 1 | github-code | 13 |
17537016756 | import sv
import vtk
# FROM https://github.com/SimVascular/SimVascular-Tests/blob/master/new-api-tests/graphics/graphics.py
def add_line(renderer, pt1, pt2, color=[1.0, 1.0, 1.0], width=2):
line = vtk.vtkLineSource()
line.SetPoint1(pt1)
line.SetPoint2(pt2)
line.Update()
polydata = line.GetOutput()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
mapper.ScalarVisibilityOff()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color[0], color[1], color[2])
actor.GetProperty().SetLineWidth(width)
renderer.AddActor(actor)
def create_segmentation_geometry(renderer, segmentation, color=[1.0, 1.0, 1.0], show_cpts=False, show_center=False):
''' Create geometry for the segmentation points and control points.
'''
#print("---------- gr.create_segmentation_geometry ----------")
coords = segmentation.get_points()
num_pts = len(coords)
#print("[gr.create_segmentation_geometry] num_pts: {0:d}".format(num_pts))
if num_pts == 0:
return
## Create segmentation geometry points and line connectivity.
#
points = vtk.vtkPoints()
points.SetNumberOfPoints(num_pts)
lines = vtk.vtkCellArray()
lines.InsertNextCell(num_pts+1)
n = 0
for pt in coords:
points.SetPoint(n, pt[0], pt[1], pt[2])
lines.InsertCellPoint(n)
n += 1
#_for pt in coords
lines.InsertCellPoint(0)
geom = vtk.vtkPolyData()
geom.SetPoints(points)
geom.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(geom)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(2.0)
actor.GetProperty().SetColor(color[0], color[1], color[2])
renderer.AddActor(actor)
## Add center point.
#
if show_center:
center = segmentation.get_center()
#print("gr.create_segmentation_geometry] Center: {0:g} {1:g} {2:g}".format(center[0], center[1], center[2]))
num_pts = 1
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
pid = points.InsertNextPoint(center)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(pid)
points_pd = vtk.vtkPolyData()
points_pd.SetPoints(points)
points_pd.SetVerts(vertices)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(points_pd)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color[0], color[1], color[2])
renderer.AddActor(actor)
actor.GetProperty().SetPointSize(5)
renderer.AddActor(actor)
## Add control points.
#
if show_cpts:
try:
coords = segmentation.get_control_points()
except:
coords = []
num_pts = len(coords)
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
for pt in coords:
pid = points.InsertNextPoint(pt)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(pid)
#_for pt in coords
points_pd = vtk.vtkPolyData()
points_pd.SetPoints(points)
points_pd.SetVerts(vertices)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(points_pd)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(1.0, 0.0, 0.0)
actor.GetProperty().SetPointSize(5)
renderer.AddActor(actor)
def create_path_geometry(renderer, path, line_color=[0.0, 0.6, 0.0], marker_color=[1.0,0.0,0.0], show_points=False):
''' Create geometry for the path curve and control points.
'''
coords = path.get_curve_points()
num_pts = len(coords)
# Create contour geometry points and line connectivity.
points = vtk.vtkPoints()
points.SetNumberOfPoints(num_pts)
lines = vtk.vtkCellArray()
lines.InsertNextCell(num_pts)
n = 0
for pt in coords:
points.SetPoint(n, pt[0], pt[1], pt[2])
lines.InsertCellPoint(n)
n += 1
#_for pt in coords
geom = vtk.vtkPolyData()
geom.SetPoints(points)
geom.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(geom)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(2.0)
actor.GetProperty().SetColor(line_color[0], line_color[1], line_color[2])
renderer.AddActor(actor)
## Show curve points.
if show_points:
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
for pt in coords:
pid = points.InsertNextPoint(pt)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(pid)
#_for pt in coords
points_pd = vtk.vtkPolyData()
points_pd.SetPoints(points)
points_pd.SetVerts(vertices)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(points_pd)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(0.0, 0.0, 1.0)
actor.GetProperty().SetPointSize(5)
renderer.AddActor(actor)
## Add control points.
coords = path.get_control_points()
num_pts = len(coords)
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
for pt in coords:
pid = points.InsertNextPoint(pt)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(pid)
#_for pt in coords
points_pd = vtk.vtkPolyData()
points_pd.SetPoints(points)
points_pd.SetVerts(vertices)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(points_pd)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(marker_color[0], marker_color[1], marker_color[2])
actor.GetProperty().SetPointSize(8)
renderer.AddActor(actor)
#_create_path_geometry(renderer, path)
def convert_ug_to_polydata(mesh):
'''
Convert mesh to polydata.
'''
geometry_filter = vtk.vtkGeometryFilter()
geometry_filter.SetInputData(mesh)
geometry_filter.Update()
mesh_polydata = geometry_filter.GetOutput()
triangle_filter = vtk.vtkTriangleFilter()
triangle_filter.SetInputData(mesh_polydata)
triangle_filter.Update()
polydata = triangle_filter.GetOutput()
return polydata
def create_contour_geometry(renderer, contour):
""" Create geometry for the contour points and control points.
"""
coords = contour.get_points()
num_pts = len(coords)
## Create contour geometry points and line connectivity.
#
points = vtk.vtkPoints()
points.SetNumberOfPoints(num_pts)
lines = vtk.vtkCellArray()
lines.InsertNextCell(num_pts+1)
n = 0
for pt in coords:
points.SetPoint(n, pt[0], pt[1], pt[2])
lines.InsertCellPoint(n)
n += 1
#_for pt in coords
lines.InsertCellPoint(0)
geom = vtk.vtkPolyData()
geom.SetPoints(points)
geom.SetLines(lines)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(geom)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(2.0)
actor.GetProperty().SetColor(0.0, 0.6, 0.0)
renderer.AddActor(actor)
## Add center point.
#
center = contour.get_center()
num_pts = 1
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
pid = points.InsertNextPoint(center)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(pid)
points_pd = vtk.vtkPolyData()
points_pd.SetPoints(points)
points_pd.SetVerts(vertices)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(points_pd)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(1.0, 0.0, 0.0)
actor.GetProperty().SetPointSize(5)
renderer.AddActor(actor)
## Add control points.
#
try:
coords = contour.get_control_points()
num_pts = len(coords)
points = vtk.vtkPoints()
vertices = vtk.vtkCellArray()
for pt in coords:
pid = points.InsertNextPoint(pt)
vertices.InsertNextCell(1)
vertices.InsertCellPoint(pid)
#_for pt in coords
points_pd = vtk.vtkPolyData()
points_pd.SetPoints(points)
points_pd.SetVerts(vertices)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(points_pd)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(1.0, 0.0, 0.0)
actor.GetProperty().SetPointSize(5)
except:
pass
# renderer.AddActor(actor)
def display(renderer_win):
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
interactor.SetRenderWindow(renderer_win)
# Set the window title.
renderer_win.Render()
renderer_win.SetWindowName("SV Python API")
interactor.Start()
def add_geometry(renderer, polydata, color=[1.0, 1.0, 1.0], wire=False, edges=False):
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(polydata)
mapper.SetScalarVisibility(False)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(color[0], color[1], color[2])
#actor.GetProperty().SetPointSize(5)
if wire:
actor.GetProperty().SetRepresentationToWireframe()
actor.GetProperty().SetLineWidth(1.0)
elif not edges:
actor.GetProperty().SetLineWidth(2.0)
if edges:
actor.GetProperty().EdgeVisibilityOn();
else:
actor.GetProperty().EdgeVisibilityOff();
renderer.AddActor(actor)
def add_plane(renderer, center, normal, color=[1.0, 1.0, 1.0], wire=False):
planeSource = vtk.vtkPlaneSource()
planeSource.SetCenter(center);
planeSource.SetNormal(normal)
planeSource.Update()
plane_pd = planeSource.GetOutput()
add_geometry(renderer, plane_pd, color, wire)
def add_sphere(renderer, center, radius, color=[1.0, 1.0, 1.0], wire=False):
sphere = vtk.vtkSphereSource()
sphere.SetCenter(center[0], center[1], center[2])
sphere.SetRadius(radius)
sphere.SetPhiResolution(16)
sphere.SetThetaResolution(16)
sphere.Update()
sphere_pd = sphere.GetOutput()
add_geometry(renderer, sphere_pd, color, wire)
def init_graphics(win_width, win_height):
''' Create renderer and graphics window.
'''
renderer = vtk.vtkRenderer()
renderer_win = vtk.vtkRenderWindow()
renderer_win.AddRenderer(renderer)
renderer.SetBackground(0.8, 0.8, 0.8)
renderer_win.SetSize(win_width, win_height)
#renderer_win.Render()
#renderer_win.SetWindowName("SV Python API")
return renderer, renderer_win | eric-yim/simvascular_scripts | graphics.py | graphics.py | py | 10,604 | python | en | code | 3 | github-code | 13 |
23149198725 | # Evergreen Tweets
# By @5amwiltshire
# Version 1.0
import datetime
import tweepy
import random
import gspread
from _constant import *
from gdocs import sheet_tweets, sheet_log
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
user = api.get_user(handle)
# HARDCODED TWEET TIMES
times = [
[7, 8], # in hours, minutes format without leading 0s
[12, 10],
[17, 43]]
# PUBLISH TWEET - DONE
def tweet(msg, log):
# Twitter variables
status = api.update_status(msg)
tweet_id = status.id_str
timestamp = json_serial(status.created_at)
text = status.text
print('tweeting: ' + text)
logger(tweet_id, text, timestamp, log)
# JSON serializer for objects not serializable by default json code
def json_serial(obj):
if isinstance(obj, (datetime.datetime, datetime.datetime.date)):
return obj.isoformat()
raise TypeError ("Type %s not serializable" % type(obj))
# RANDOMISER - DONE
def randomiser(tweets_list, log):
next_id = random.randint(0, len(tweets_list) - 1)
result = tweets_list.pop(next_id)['Tweet']
return result
# LOGGER
def logger(id, msg, timestamp, log):
next_row = len(log) + 2
# for these below, look at why client.update_acell is not working via the gspread package. According the the documentation this is how to update cells
sheet_log.update_acell('A{}'.format(next_row), id)
sheet_log.update_acell('B{}'.format(next_row), msg)
sheet_log.update_acell('C{}'.format(next_row), timestamp)
# CHECKER
# Get tweets from last 7 days
def tweets_in_past_7_days(log, cutoff):
row_count = len(log)
n = 0
for x in range(row_count, 0, -1):
tweet_timestamp = datetime.datetime.strptime(log[x - 1]['Published timestamp'], '%Y-%m-%dT%H:%M:%S')
if tweet_timestamp > cutoff:
n += 1
return n
# Log checker
def used_in_prev_7_days(msg, log, cutoff):
recent_log_count = tweets_in_past_7_days(log, cutoff)
recent_log = log[recent_log_count*-1:]
for x in range(recent_log_count):
if msg == recent_log[x - 1]['Tweet']:
return True
return False
# SCHEDULER
def scheduler(data, context): # need these arguments as Google Cloud Functions passes these anyway
now = datetime.datetime.now().replace(microsecond=0)
cutoff = now - datetime.timedelta(days=7, minutes=5)
tweets_list = sheet_tweets.get_all_records()
log = sheet_log.get_all_records()
msg = get_tweet(tweets_list, log, cutoff)
for n in times:
print('Checking ' + str(now.replace(second=0, microsecond=0)) + ' == ' + str(check_time(n[0], n[1])) )
if now.replace(second=0, microsecond=0) == check_time(n[0], n[1]):
if msg is not None:
tweet(msg, log)
else:
print('Posting tweet failed')
else:
print('Time ' + str(n) + ' is not now')
def check_time(hr, min, sec=0, micros=0):
return datetime.datetime.now().replace(hour=hr, minute=min, second=sec, microsecond=micros)
def get_tweet(tweets_list, log, cutoff):
check = True
tweet_count = len(tweets_list)
while check:
msg = randomiser(tweets_list, log)
check = used_in_prev_7_days(msg, log, cutoff) # return True or False
tweet_count -= 1
if (check == False) or (tweet_count == 0):
break
if check == False:
return msg
else:
print('No evergreen tweets left!')
| 5amwiltshire/evergreentweets | main.py | main.py | py | 3,680 | python | en | code | 0 | github-code | 13 |
35173312920 | import logging
from typing import Dict, Sequence, TypeVar, Generic, List
from io import StringIO
E = TypeVar('E')
class Element:
class_ = []
output = StringIO()
def __init__(self, *args, **kwargs):
super().__init__()
self.children = []
if args:
for arg in args:
self.children.append(arg)
for k, v in kwargs.items():
setattr(self, k, v)
def append(self, *args):
for arg in args:
self.children.append(arg)
@property
def tag_name(self):
return self.__class__.__name__.lower()
def render_attribute(self, name, value, output):
value = value and value or ""
# logging.debug("render attribute %s=%s" % (name, value))
output.write(" ")
output.write(name)
output.write("=\"")
output.write(str(value))
output.write("\"")
def render_attributes(self, output):
if self.class_:
output.write(" class=\"")
output.write(" ".join(self.class_))
output.write("\"")
if hasattr(self, "attributes") and self.attributes:
try:
for attr in self.attributes:
if hasattr(self, attr):
self.render_attribute(attr, getattr(self, attr), output)
except TypeError as e:
logging.error(e)
@property
def attributes(self):
return []
def as_html(self, output=None):
output = not output and StringIO() or output
output.write("<")
output.write(self.tag_name)
self.render_attributes(output)
output.write(">")
for child in self.children:
if isinstance(child, Element):
child.as_html(output)
else:
# logging.debug(child)
if child:
output.write(child)
output.write("</")
output.write(self.tag_name)
output.write(">")
return output.getvalue()
def _as_html(self, output):
pass
def render(self):
pass
class Div(Element):
class_ = []
class Li(Element):
pass
class H1(Element):
pass
class FormInput(Element):
@property
def attributes(self):
return ['id', 'name', "disabled"]
class TextArea(FormInput):
@property
def attributes(self):
return super().attributes + ['rows', 'cols']
class Label(Element):
for_ = None
def render_attributes(self, output: StringIO):
super().render_attributes(output)
if self.for_:
output.write(" for=\"")
output.write(self.for_)
output.write("\"")
class Section(Element):
title: str
class Form(Element):
action: str = ''
class Input(FormInput):
@property
def attributes(self):
return super().attributes + ["value", "type", "placeholder", "checked"]
class Button(Element):
type: str = 'submit'
class HtmlInput:
label: str = None
value: str = None
description: str = None
placeholder: str = None
type: str = 'text'
name: str = None
def __init__(self, **kwargs):
super().__init__()
for k, v in kwargs:
setattr(self, k, v)
def to_dict(self):
return {
'label': self.label,
'value': self.value,
'description': self.description,
'placeholder': self.placeholder,
'type': self.type
}
class TitleHtmlInput(HtmlInput):
name: str = 'title'
label: str = '标题'
value: str = '未命名'
class DefaultValueHtmlInput(HtmlInput):
label: str = '默认值'
class RequiredHtmlInput(HtmlInput):
type: str = 'checkbox'
label: str = '必填项'
description: str = '将所有字段设为 <a href="">必须填</a> 或 非必须填'
class FormFieldTemplate:
name: str = None
attributes = None
validators = None
def to_dict(self):
return {
'name': self.name,
'attributes': [x.to_dict() for x in self.attributes],
'validators': [x.to_dict() for x in self.attributes],
}
class FormInputFieldTemplate(FormFieldTemplate):
@property
def name(self):
return 'single_input'
@property
def attributes(self):
return (
TitleHtmlInput(),
DefaultValueHtmlInput()
)
@property
def validators(self):
return (
RequiredHtmlInput()
)
| iuantu/openform | app/markup.py | markup.py | py | 4,548 | python | en | code | 9 | github-code | 13 |
73491841936 | import os
import pickle as pkl
import numpy as np
from collections import defaultdict
def sequence_ids(file_path,file_name):
IdsL = [] # L is for denoting list
file_path = os.path.join(file_path,file_name+".fasta")
file = open(file_path,'r')
for line in file:
if line[0] == '>':
IdsL.append(line)
file.close()
return IdsL
def fasta_to_dict(file_path,file_name):
D = defaultdict()
file_path = os.path.join(file_path,file_name+".fasta")
file = open(file_path, 'r')
for line in file:
if line[0] == '>':
temp_id = line
D[temp_id] = ""
continue
else:
temp = line
#temp = line.split('\n')
D[temp_id] = D[temp_id]+temp
file.close()
return D
def gen_file(main_ids_dict, db_ids_query, filename):
"""
:main_ids_dict: ids of main file ; DB
:ids_dict: To be selected
:filename : filename to be used to store the sequence from selected classes
:clses
"""
file = open(filename,'w')
for id in db_ids_query:
file.write(id)
seq = main_ids_dict[id]
file.write(seq)
file.close()
def load_data(Dir_path, file_name):
"""
load database sequence vectors
"""
temp = open(os.path.join(Dir_path, file_name + ".pkl"), 'rb')
temp_ = pkl.load(temp)
vecs = temp_['sequence_vectors']
vecs = np.asarray(vecs)
family_sizes = temp_['family_sizes']
temp.close()
return (vecs,family_sizes)
def extract_labels(Dir_path, file_name):
"""
script for extracting labels from a fasta file
"""
file_path = os.path.join(Dir_path,file_name+".fasta")
file_in = open(file_path,'r')
# test_ = file_in.readlines()
file_iter = iter(file_in) # iterator
labels = []
while True:
try:
line = file_iter.next()
if line[0] == ">":
line = line.replace('\n','')
line = line.split('_')
label = int(line[2])
labels.append(label)
except StopIteration:
break
return(labels)
def unique_labels(labels):
#To check unique labels
labels = set(labels)
labels = list(labels)
return labels
def inter_pre_values_per_query(qry_sub_distance, total_relevant, db_family_index):
"""
:param qry_sub_distance: 2D array of subject indices based on their distance with the query.
:return: interpolated precision value at 100 points
"""
precisionAt = np.zeros(101)
found = 0
retreived = 0
# qry_sub_distance # ranked class list to be processed for calculating precision recall values
while (found < total_relevant):
if (db_family_index[0] <= qry_sub_distance[0][retreived] < db_family_index[1]):
found = found + 1
retreived = retreived + 1
recall = float(found) / total_relevant
precision = float(found) / retreived
recall = int(np.ceil(recall * 100))
if precisionAt[recall] < precision:
precisionAt[recall] = precision
for j in range(99, -1, -1):
precisionAt[j] = max(precisionAt[j], precisionAt[j + 1])
return precisionAt
def select_node_weights(score, root_labels, unique_labels_persplit, nbrs = 20):
ranked_indices = np.argsort(score)
# print(ranked_indices)
neighbors = ranked_indices[0][0:nbrs]
# print(root_labels)
# print(neighbors)
out = map(root_labels.__getitem__,neighbors)
out = list(out)
right = 0
left = 0
for indx in out:
if indx in unique_labels_persplit[0]:
left += 1
else:
right +=1
# print(left,right)
left_weight = left/float(nbrs)
right_weight = right/float(nbrs)
return (left_weight, right_weight)
| DhananjayKimothi/Supervised-BioRepL | HSuVec_and_BLAST/utils_herierchical_approach.py | utils_herierchical_approach.py | py | 3,811 | python | en | code | 1 | github-code | 13 |
20463715038 | import sys
sys.path.append('../../')
from hydroDL import master, utils
from hydroDL.data import camels
from hydroDL.master import default
from hydroDL.model import rnn, crit, train
import os
import numpy as np
import torch
from collections import OrderedDict
import random
import json
import datetime as dt
## fix the random seeds for reproducibility
randomseed = 111111
random.seed(randomseed)
torch.manual_seed(randomseed)
np.random.seed(randomseed)
torch.cuda.manual_seed(randomseed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
## GPU setting
# which GPU to use when having multiple
traingpuid = 0
torch.cuda.set_device(traingpuid)
## Setting training options here
PUOpt = 0
# PUOpt values and explanations:
# 0: train and test on ALL basins;
# 1 for PUB spatial test, randomly hold out basins;
# 2 for PUR spatial test, hold out a continuous region;
buffOpt = 0
# buffOpt defines the warm-up option for the first year of training forcing data
# 0: do nothing, the first year forcing would only be used to warm up the next year;
# 1: repeat first year forcing to warm up the first year;
# 2: load one more year forcing to warm up the first year
TDOpt = False
# TDOpt, True as using dynamic parameters and False as using static parameters
forType = 'daymet'
# for Type defines which forcing in CAMELS to use: 'daymet', 'nldas', 'maurer'
## Set hyperparameters
EPOCH = 50 # total epoches to train the mode
BATCH_SIZE = 100
RHO = 365
HIDDENSIZE = 256
saveEPOCH = 10
Ttrain = [19801001, 19951001] # Training period
# Ttrain = [19891001, 19991001] # PUB/PUR period
Tinv = [19801001, 19951001] # Inversion period for historical forcings
# Tinv = [19891001, 19991001] # PUB/PUR period
Nfea = 12 # number of HBV parameters. 12:original HBV; 13:includes the added dynamic ET para when setting ETMod=True
BUFFTIME = 365 # for each training sample, to use BUFFTIME days to warm up the states.
routing = True # Whether to use the routing module for simulated runoff
Nmul = 16 # Multi-component model. How many parallel HBV components to use. 1 means the original HBV.
comprout = False # True is doing routing for each component
compwts = False # True is using weighted average for components; False is the simple mean
pcorr = None # or a list to give the range of precip correction
if TDOpt is True:
# Below options are only for running models with dynamic parameters
tdRep = [1, 13] # When using dynamic parameters, this list defines which parameters to set as dynamic
tdRepS = [str(ix) for ix in tdRep]
# ETMod: if True, use the added shape parameter (index 13) for ET. Default as False.
# Must set below ETMod as True and Nfea=13 when including 13 index in above tdRep list for dynamic parameters
# If 13 not in tdRep list, set below ETMod=False and Nfea=12 to use the original HBV without ET shape para
ETMod = True
Nfea = 13 # should be 13 when setting ETMod=True. 12 when ETMod=False
dydrop = 0.0 # dropout possibility for those dynamic parameters: 0.0 always dynamic; 1.0 always static
staind = -1 # which time step to use from the learned para time series for those static parameters
TDN = '/TDTestforc/'+'TD'+"_".join(tdRepS) +'/'
else:
TDN = '/Testforc/'
# Define root directory of database and output
# Modify these based on your own location of CAMELS dataset
# Following the data download instruction in README file, you should organize the folders like
# 'your/path/to/Camels/basin_timeseries_v1p2_metForcing_obsFlow' and 'your/path/to/Camels/camels_attributes_v2.0'
# Then 'rootDatabase' here should be 'your/path/to/Camels';
# 'rootOut' is the root dir where you save the trained model
rootDatabase = os.path.join(os.path.sep, 'scratch', 'Camels') # CAMELS dataset root directory
camels.initcamels(rootDatabase) # initialize camels module-scope variables in camels.py (dirDB, gageDict) to read basin info
rootOut = os.path.join(os.path.sep, 'data', 'rnnStreamflow') # Model output root directory
## set up different data loadings for ALL, PUB, PUR
testfoldInd = 1
# Which fold to hold out for PUB (10 folds, from 1 to 10) and PUR (7 folds, from 1 to 7).
# It doesn't matter when training on ALL basins (setting PUOpt=0), could always set testfoldInd=1 for this case.
# load CAMELS basin information
gageinfo = camels.gageDict
hucinfo = gageinfo['huc']
gageid = gageinfo['id']
gageidLst = gageid.tolist()
if PUOpt == 0: # training on all basins without spatial hold-out
puN = 'ALL'
TrainLS = gageidLst # all basins
TrainInd = [gageidLst.index(j) for j in TrainLS]
TestLS = gageidLst
TestInd = [gageidLst.index(j) for j in TestLS]
gageDic = {'TrainID':TrainLS, 'TestID':TestLS}
elif PUOpt == 1: # random hold out basins. hold out the fold set by testfoldInd
puN = 'PUB'
# load the PUB basin groups
# randomly divide CAMELS basins into 10 groups and this file contains the basin ID for each group
# located in splitPath
splitPath = 'PUBsplitLst.txt'
with open(splitPath, 'r') as fp:
testIDLst=json.load(fp)
# Generate training ID lists excluding the hold out fold
TestLS = testIDLst[testfoldInd - 1]
TestInd = [gageidLst.index(j) for j in TestLS]
TrainLS = list(set(gageid.tolist()) - set(TestLS))
TrainInd = [gageidLst.index(j) for j in TrainLS]
gageDic = {'TrainID':TrainLS, 'TestID':TestLS}
elif PUOpt == 2:
puN = 'PUR'
# Divide CAMELS dataset into 7 continous PUR regions, as shown in Feng et al, 2021 GRL; 2022 HESSD
# get the id list of each PUR region, save to list
regionID = list()
regionNum = list()
# seven regions including different HUCs
regionDivide = [ [1,2], [3,6], [4,5,7], [9,10], [8,11,12,13], [14,15,16,18], [17] ]
for ii in range(len(regionDivide)):
tempcomb = regionDivide[ii]
tempregid = list()
for ih in tempcomb:
tempid = gageid[hucinfo==ih].tolist()
tempregid = tempregid + tempid
regionID.append(tempregid)
regionNum.append(len(tempregid))
iexp = testfoldInd - 1 #index
TestLS = regionID[iexp] # basin ID list for testing, hold out for training
TestInd = [gageidLst.index(j) for j in TestLS]
TrainLS = list(set(gageid.tolist()) - set(TestLS)) # basin ID for training
TrainInd = [gageidLst.index(j) for j in TrainLS]
gageDic = {'TrainID': TrainLS, 'TestID': TestLS}
# apply buffOPt to solve the warm-up for the first year
if buffOpt ==2: # load more BUFFTIME data for the first year
sd = utils.time.t2dt(Ttrain[0]) - dt.timedelta(days=BUFFTIME)
sdint = int(sd.strftime("%Y%m%d"))
TtrainLoad = [sdint, Ttrain[1]]
TinvLoad = [sdint, Ttrain[1]]
else:
TtrainLoad = Ttrain
TinvLoad = Tinv
## prepare input data
## load camels dataset
if forType is 'daymet':
varF = ['prcp', 'tmean']
varFInv = ['prcp', 'tmean']
else:
varF = ['prcp', 'tmax'] # For CAMELS maurer and nldas forcings, tmax is actually tmean
varFInv = ['prcp', 'tmax']
# the attributes used to learn parameters
attrnewLst = [ 'p_mean','pet_mean','p_seasonality','frac_snow','aridity','high_prec_freq','high_prec_dur',
'low_prec_freq','low_prec_dur', 'elev_mean', 'slope_mean', 'area_gages2', 'frac_forest', 'lai_max',
'lai_diff', 'gvf_max', 'gvf_diff', 'dom_land_cover_frac', 'dom_land_cover', 'root_depth_50',
'soil_depth_pelletier', 'soil_depth_statsgo', 'soil_porosity', 'soil_conductivity',
'max_water_content', 'sand_frac', 'silt_frac', 'clay_frac', 'geol_1st_class', 'glim_1st_class_frac',
'geol_2nd_class', 'glim_2nd_class_frac', 'carbonate_rocks_frac', 'geol_porostiy', 'geol_permeability']
optData = default.optDataCamels # a default dictionary for logging, updated below
# Update the training period and variables
optData = default.update(optData, tRange=TtrainLoad, varT=varFInv, varC=attrnewLst, subset=TrainLS, forType=forType)
# for HBV model training inputs
dfTrain = camels.DataframeCamels(tRange=TtrainLoad, subset=TrainLS, forType=forType)
forcUN = dfTrain.getDataTs(varLst=varF, doNorm=False, rmNan=False)
obsUN = dfTrain.getDataObs(doNorm=False, rmNan=False, basinnorm=False)
# for dPL inversion data, inputs of gA
dfInv = camels.DataframeCamels(tRange=TinvLoad, subset=TrainLS, forType=forType)
forcInvUN = dfInv.getDataTs(varLst=varFInv, doNorm=False, rmNan=False)
attrsUN = dfInv.getDataConst(varLst=attrnewLst, doNorm=False, rmNan=False)
# Unit transformation, discharge obs from ft3/s to mm/day
areas = gageinfo['area'][TrainInd] # unit km2
temparea = np.tile(areas[:, None, None], (1, obsUN.shape[1],1))
obsUN = (obsUN * 0.0283168 * 3600 * 24) / (temparea * (10 ** 6)) * 10**3 # transform to mm/day
# load potential ET calculated by hargreaves method
varLstNL = ['PEVAP']
usgsIdLst = gageid
if forType == 'maurer':
tPETRange = [19800101, 20090101]
else:
tPETRange = [19800101, 20150101]
tPETLst = utils.time.tRange2Array(tPETRange)
# Modify this as the directory where you put PET
PETDir = rootDatabase + '/pet_harg/' + forType + '/'
ntime = len(tPETLst)
PETfull = np.empty([len(usgsIdLst), ntime, len(varLstNL)])
for k in range(len(usgsIdLst)):
dataTemp = camels.readcsvGage(PETDir, usgsIdLst[k], varLstNL, ntime)
PETfull[k, :, :] = dataTemp
TtrainLst = utils.time.tRange2Array(TtrainLoad)
TinvLst = utils.time.tRange2Array(TinvLoad)
C, ind1, ind2 = np.intersect1d(TtrainLst, tPETLst, return_indices=True)
PETUN = PETfull[:, ind2, :]
PETUN = PETUN[TrainInd, :, :] # select basins
C, ind1, ind2inv = np.intersect1d(TinvLst, tPETLst, return_indices=True)
PETInvUN = PETfull[:, ind2inv, :]
PETInvUN = PETInvUN[TrainInd, :, :]
# process data, do normalization and remove nan
series_inv = np.concatenate([forcInvUN, PETInvUN], axis=2)
seriesvarLst = varFInv + ['pet']
# calculate statistics for normalization and saved to a dictionary
statDict = camels.getStatDic(attrLst=attrnewLst, attrdata=attrsUN, seriesLst=seriesvarLst, seriesdata=series_inv)
# normalize data
attr_norm = camels.transNormbyDic(attrsUN, attrnewLst, statDict, toNorm=True)
attr_norm[np.isnan(attr_norm)] = 0.0
series_norm = camels.transNormbyDic(series_inv, seriesvarLst, statDict, toNorm=True)
series_norm[np.isnan(series_norm)] = 0.0
# prepare the inputs
zTrain = series_norm # used as the inputs for dPL inversion gA along with attributes
xTrain = np.concatenate([forcUN, PETUN], axis=2) # used as HBV forcing
xTrain[np.isnan(xTrain)] = 0.0
if buffOpt == 1: # repeat the first year warm up the first year itself
zTrainIn = np.concatenate([zTrain[:,0:BUFFTIME,:], zTrain], axis=1)
xTrainIn = np.concatenate([xTrain[:,0:BUFFTIME,:], xTrain], axis=1) # repeat forcing to warm up the first year
yTrainIn = np.concatenate([obsUN[:,0:BUFFTIME,:], obsUN], axis=1)
else: # no repeat, original data, the first year data would only be used as warmup for the next following year
zTrainIn = zTrain
xTrainIn = xTrain
yTrainIn = obsUN
forcTuple = (xTrainIn, zTrainIn)
attrs = attr_norm
## Train the model
# define loss function
alpha = 0.25 # a weight for RMSE loss to balance low and peak flow
optLoss = default.update(default.optLossComb, name='hydroDL.model.crit.RmseLossComb', weight=alpha)
lossFun = crit.RmseLossComb(alpha=alpha)
# define training options
optTrain = default.update(default.optTrainCamels, miniBatch=[BATCH_SIZE, RHO], nEpoch=EPOCH, saveEpoch=saveEPOCH)
# define output folder to save model results
exp_name = 'CAMELSDemo'
exp_disp = 'dPLHBV/' + puN + TDN + forType + '/BuffOpt'+str(buffOpt)+'/RMSE_para'+str(alpha)+'/' + str(randomseed) + \
'/Fold' + str(testfoldInd)
exp_info = 'T_'+str(Ttrain[0])+'_'+str(Ttrain[1])+'_BS_'+str(BATCH_SIZE)+'_HS_'+str(HIDDENSIZE)\
+'_RHO_'+str(RHO)+'_NF_'+str(Nfea)+'_Buff_'+str(BUFFTIME)+'_Mul_'+str(Nmul)
save_path = os.path.join(exp_name, exp_disp)
out = os.path.join(rootOut, save_path, exp_info) # output folder to save results
# define and load model
Ninv = zTrain.shape[-1] + attrs.shape[-1]
if TDOpt is False:
# model with all static parameters
model = rnn.MultiInv_HBVModel(ninv=Ninv, nfea=Nfea, nmul=Nmul, hiddeninv=HIDDENSIZE, inittime=BUFFTIME,
routOpt = routing, comprout=comprout, compwts=compwts, pcorr=pcorr)
# dict only for logging
optModel = OrderedDict(name='dPLHBV', nx=Ninv, nfea=Nfea, nmul=Nmul, hiddenSize=HIDDENSIZE, doReLU=True,
Tinv=Tinv, Trainbuff=BUFFTIME, routOpt=routing, comprout=comprout, compwts=compwts,
pcorr=pcorr, buffOpt=buffOpt, TDOpt=TDOpt)
else:
# model with some dynamic parameters
model = rnn.MultiInv_HBVTDModel(ninv=Ninv, nfea=Nfea, nmul=Nmul, hiddeninv=HIDDENSIZE, inittime=BUFFTIME,
routOpt=routing, comprout=comprout, compwts=compwts, staind=staind, tdlst=tdRep,
dydrop=dydrop, ETMod=ETMod)
# dict only for logging
optModel = OrderedDict(name='dPLHBVDP', nx=Ninv, nfea=Nfea, nmul=Nmul, hiddenSize=HIDDENSIZE, doReLU=True,
Tinv=Tinv, Trainbuff=BUFFTIME, routOpt=routing, comprout=comprout, compwts=compwts,
pcorr=pcorr, staind=staind, tdlst=tdRep, dydrop=dydrop,buffOpt=buffOpt, TDOpt=TDOpt, ETMod=ETMod)
# Wrap up all the training configurations to one dictionary in order to save into "out" folder as logging
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.writeMasterFile(masterDict)
# log statistics for normalization
statFile = os.path.join(out, 'statDict.json')
with open(statFile, 'w') as fp:
json.dump(statDict, fp, indent=4)
# Train the model
trainedModel = train.trainModel(
model,
forcTuple,
yTrainIn,
attrs,
lossFun,
nEpoch=EPOCH,
miniBatch=[BATCH_SIZE, RHO],
saveEpoch=saveEPOCH,
saveFolder=out,
bufftime=BUFFTIME)
| mhpi/dPLHBVrelease | hydroDL-dev/example/dPLHBV/traindPLHBV.py | traindPLHBV.py | py | 13,923 | python | en | code | 5 | github-code | 13 |
39167917299 | # -*- coding: utf-8 -*-
# @Author: Macpotty
# @Date: 2016-03-12 09:58:53
# @Last Modified by: Macsnow
# @Last Modified time: 2017-04-01 08:58:22
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import copy
# V = np.arange(10)
# E = np.random.randint(1, 50, size=[10, 10])
sizePop = 15
V = list(zip(np.random.random(sizePop)*100, np.random.random(sizePop)*100))
E = np.zeros([sizePop, sizePop])
for i in range(0, sizePop):
for j in range(0, sizePop):
if i == j:
E[i, j] = None
else:
E[i, j] = np.sqrt((V[i][0]-V[j][0])**2+(V[i][1]-V[j][1])**2)
class GAUnit:
def __init__(self, sizePop, dimention, bound):
self.dimention = dimention
self.bound = bound
self.fitness = 0
self.geneCode = np.zeros(self.dimention, dtype=int)
self.length = 0
self.sizePop = sizePop
# def initGrapg(self):
# V = list(zip(np.random.random(self.imention)*100, np.random.random(self.dimention)*100))
# E = np.zeros([self.dimention, self.dimention])
# for i in range(0, self.dimention):
# for j in range(0, self.dimention):
# if i == j:
# E[i, j] = None
# else:
# E[i, j] = np.sqrt((V[i][0]-V[j][0])**2+(V[i][1]-V[j][1])**2)
def generate(self):
for i in range(0, self.dimention):
gen = np.random.randint(0, self.dimention - i)
self.geneCode[i] = gen
def geneDecode(self):
self.cityIndex = copy.deepcopy(self.geneCode)
for i in range(0, self.dimention)[::-1]:
for j in range(0, i)[::-1]:
if self.cityIndex[j] <= self.cityIndex[i]:
self.cityIndex[i] += 1
return self.cityIndex
def evaluateFitness(self):
self.length = 0
global E
self.geneDecode()
for i in range(0, self.dimention):
if i == self.dimention - 1:
pass
else:
self.length += E[self.cityIndex[i], self.cityIndex[i+1]]
self.length += E[1, -1]
self.fitness = self.sizePop/self.length
class GA:
def __init__(self, sizePop, dimention, bound, maxGen, params):
self.sizePop = sizePop
self.maxGen = maxGen
self.dimention = dimention
self.bound = bound
self.params = params #Mutation, crossRate
self.population = []
self.fitness = np.zeros((self.sizePop, 1))
self.trace = np.zeros((self.maxGen, 2))
self.genTh = 0
self.best = None
self.t = 0
self.done = False
def initUnit(self):
for i in range(0, self.sizePop):
unit = GAUnit(self.sizePop, self.dimention, self.bound)
unit.generate()
self.population.append(unit)
def evaluate(self):
for i in range(0, self.sizePop):
self.population[i].evaluateFitness()
self.fitness[i] = self.population[i].fitness
def selection(self):
newPop = []
fitnessTotal = np.sum(self.fitness)
fitnessSection = np.zeros((self.sizePop, 1))
temp = 0
for i in range(0, self.sizePop):
fitnessSection[i] = temp + self.fitness[i]/fitnessTotal
temp = fitnessSection[i]
for i in range(0, self.sizePop):
seed = np.random.random()
index = 0
for j in range(0, self.sizePop - 1):
if j == 0 and seed < fitnessSection[j]:
index = 0
break
elif seed >= fitnessSection[j] and seed < fitnessSection[j+1]:
index = j + 1
break
newPop.append(self.population[index])
self.population = newPop
def recombination(self):
newPop = []
for i in range(0, self.sizePop, 2):
index1 = np.random.randint(0, self.sizePop-1)
index2 = np.random.randint(0, self.sizePop-1)
while index1 == index2:
index2 = np.random.randint(0, self.sizePop-1)
newPop.append(copy.deepcopy(self.population[index1]))
newPop.append(copy.deepcopy(self.population[index2]))
seed = np.random.random()
if seed < self.params[1]:
swapIndex = np.random.randint(1, self.dimention - 2)
for j in range(swapIndex, self.dimention - 1):
newPop[i].geneCode[j] = int(newPop[i].geneCode[j]+(newPop[i+1].geneCode[j]-newPop[i].geneCode[j])*self.params[2])
newPop[i+1].geneCode[j] = int(newPop[i+1].geneCode[j]+(newPop[i].geneCode[j]-newPop[i+1].geneCode[j])*self.params[2])
# swap
self.population = newPop
def mutation(self):
newPop = []
for i in range(0, self.sizePop):
newPop.append(copy.deepcopy(self.population[i]))
seed = np.random.random()
if seed < self.params[0]:
mutateIndex = np.random.randint(0, self.dimention - 1)
# alpha = np.random.random()
# if alpha < 0.5:
newPop[i].geneCode[mutateIndex] = int((self.bound[1, mutateIndex]-mutateIndex) ** (1 - self.t / self.maxGen))
# else:
# newPop[i].geneCode[mutateIndex] = int((self.dimention-mutateIndex)**(1-self.t/self.maxGen))
self.population = newPop
def bigBang(self):
self.initUnit()
self.evaluate()
self.best = np.max(self.fitness), np.argmax(self.fitness), copy.deepcopy(self.population[np.argmax(self.fitness)])
self.aveFitness = np.mean(self.fitness)
self.trace[self.t, 0] = (1 - self.best[0])/self.best[0]
self.trace[self.t, 1] = (1 - self.aveFitness)/self.aveFitness
while(self.t < self.maxGen - 1):
self.t += 1
self.selection()
# self.recombination()
self.mutation()
self.evaluate()
self.best = np.max(self.fitness), np.argmax(self.fitness), copy.deepcopy(self.population[np.argmax(self.fitness)])
self.aveFitness = np.mean(self.fitness)
self.trace[self.t, 0] = (1 - self.best[0])/self.best[0]
self.trace[self.t, 1] = (1 - self.aveFitness)/self.aveFitness
self.x_data, self.y_data = [], []
for i in self.best[2].cityIndex:
self.x_data.append(V[i][0])
self.y_data.append(V[i][1])
self.x_data.append(V[self.best[2].cityIndex[0]][0])
self.y_data.append(V[self.best[2].cityIndex[0]][1])
yield V[i][0], V[i][1], self.best[2].length
self.done = True
print('done')
for i in range(0, self.sizePop):
print(self.population[i].cityIndex)
class PlotGraph(GA):
def __init__(self, sizePop, dimention, bound, maxGen, params):
GA.__init__(self, sizePop, dimention, bound, maxGen, params)
global V
self.fig = plt.figure(figsize=(10, 10))
self.ax = self.fig.add_subplot(111)
self.ax.set_xlim(0, 100)
self.ax.set_ylim(0, 100)
self.ax.set_title("TSP")
self.x_data, self.y_data = [], []
self.length_text = self.ax.text(2, 95, '')
self.line, = self.ax.plot([], [], 'g-', lw=2)
def init(self):
self.length_text.set_text('')
self.line.set_data([], [])
return self.line
def generate(self):
while not self.done:
self.x_data, self.y_data = [], []
for i in self.best[2].cityIndex:
self.x_data.append(V[i][0])
self.y_data.append(V[i][1])
self.x_data.append(V[self.best[2].cityIndex[0]][0])
self.y_data.append(V[self.best[2].cityIndex[0]][1])
yield V[i][0], V[i][1], self.best[2].length
def func(self, generate):
self.length_text.set_text('length = %.2f' % self.best[2].length)
self.line.set_data(self.x_data, self.y_data)
return self.line
def animationInit(self):
self.draw = animation.FuncAnimation(self.fig, self.func, self.bigBang, init_func=self.init, blit=False, interval=50, repeat=False)
if __name__ == '__main__':
bound = np.tile([[0], [sizePop-1]], sizePop)
ga = PlotGraph(100, sizePop, bound, 2000, [0.1, 0.9, 0.25])
ga.animationInit()
plt.show()
| Thrimbda/python_code | GeneticAlgorithm/GeneticAlgorithm.py | GeneticAlgorithm.py | py | 8,447 | python | en | code | 1 | github-code | 13 |
219482703 | def Qsort(left, right):
if left < right:
pivot = arr[right]
pos = left
for i in range(left, right):
if arr[i] <= pivot:
arr[i], arr[pos] = arr[pos], arr[i]
pos += 1
arr[right], arr[pos] = arr[pos], arr[right]
Qsort(left, pos - 1)
Qsort(pos + 1, right)
if __name__ == "__main__":
arr = [2, 5, 1, 9, 3, 7, 0, 10]
print("Before sort")
print(arr)
Qsort(0, 7)
print("After sort")
print(arr) | ignis535/baekjoon | DFS, BFS, 그래프/퀵정렬.py | 퀵정렬.py | py | 506 | python | en | code | 0 | github-code | 13 |
5584259911 | #消息队列通信 通过消息包传递
# 在同一时刻,只能有一个进程来取值,它内部有一个锁的机制。那么另外一个进程就会阻塞一会,但是阻塞的时间非常短
# 队列能保证数据安全,同一个数据,不能被多个进程获取。
from multiprocessing import Queue,Process
from time import sleep
from random import randint
#创建消息队列
# q = Queue(5) 自定义队列大小 5 个 Queue(maxsize = 0) 默认值是根据内存大小存放个数
#
# def request():
# for i in range(20):
# x = randint(0,100)
# y = randint(0,100)
# q.put((x,y))
#
# def handle():
# while True:
# sleep(0.5)
# try:
# x,y = q.get(timeout = 3)
# except:
# break
# else:
# print("%d + %d = %d"%(x,y,x+y))
#
# p1 = Process(target=request)
# p2 = Process(target=handle)
# p1.start()
# p2.start()
# p1.join()
# p2.join()
# import time
# from multiprocessing import Process, Queue
#
#
# def wahaha(q):
# print(q.get()) #1 q.get()阻塞,直到主进程put(1)
# q.put(2) # 增加数字2
#
#
# if __name__ == '__main__':
# q = Queue()
# p = Process(target=wahaha, args=[q, ])
# p.start()
# q.put(1) # 增加数字1
# time.sleep(0.1)
# print(q.get()) #2 q.get()阻塞,直到子进程put(2)
# 生产者消费者模型,解决数据供需不平衡的情况
# https://www.cnblogs.com/xiao987334176/articles/9036763.html
import time
import random
from multiprocessing import Process, Queue
def producer(q, name, food):
for i in range(5):
time.sleep(random.random()) # 模拟生产时间
print('{}生产了{}{}'.format(name, food, i))
q.put('{}{}'.format(food, i)) # 放入队列
def consumer(q, name):
while True:
food = q.get() # 获取队列
if food == 'done': break # 当获取的值为done时,结束循环
time.sleep(random.random()) # 模拟吃的时间
print('{}吃了{}'.format(name, food))
if __name__ == '__main__':
q = Queue() # 创建队列对象,如果不提供maxsize,则队列数无限制
p1 = Process(target=producer, args=[q, '康师傅', '红烧牛肉'])
p2 = Process(target=producer, args=[q, '郑师傅', '红烧鱼块'])
p1.start() # 启动进程
p2.start()
Process(target=consumer, args=[q, 'xiao']).start()
Process(target=consumer, args=[q, 'lin']).start()
p1.join() # 保证子进程结束后再向下执行
p2.join()
q.put('done') # 向队列添加一个值done
q.put('done')
# import time
# import random
# from multiprocessing import Process, JoinableQueue
#
#
# def producer(q, name, food):
# for i in range(5):
# time.sleep(random.random()) # 模拟生产时间
# print('{}生产了{}{}'.format(name, food, i))
# q.put('{}{}'.format(food, i))
# q.join() # 等到所有的数据都被task_done才结束
#
#
# def consumer(q, name):
# while True:
# food = q.get() # 获取队列
# time.sleep(random.random()) # 模拟吃的时间
# print('{}吃了{}'.format(name, food))
# q.task_done() # 向q.join()发送一次信号,证明一个数据已经被取走了
#
#
# if __name__ == '__main__':
# q = JoinableQueue() # 创建可连接的共享进程队列
# # 生产者们:即厨师们
# p1 = Process(target=producer, args=[q, '康师傅', '红烧牛肉'])
# p2 = Process(target=producer, args=[q, '郑师傅', '红烧鱼块'])
# p1.start() # 启动进程
# p2.start()
# # 消费者们:即吃货们
# c1 = Process(target=consumer, args=[q, 'xiao'])
# c2 = Process(target=consumer, args=[q, 'lin'])
# c1.daemon = True # 设置守护进程
# c2.daemon = True
# c1.start() # 启动进程
# c2.start()
# p1.join() # 保证子进程结束后再向下执行
# p2.join() | ivoryli/myproject | class/phase2/current/day02/queue_1.py | queue_1.py | py | 3,933 | python | en | code | 0 | github-code | 13 |
15489289221 | import requests
from django.shortcuts import render
def home(request):
url='http://api.openweathermap.org/data/2.5/weather?q={}&units=imperial&appid=8fc1bb7939c21d3e79b02852cd3d6e79'
if request.method=='POST':
city=request.POST['city']
#city='las vegas'
r=requests.get(url.format(city)).json()
city_weather={
'city': city,
'temperature' : r['main']['temp'],
'description' : r['weather'][0]['description'],
'icon' : r['weather'][0]['icon']
}
contex ={'city_weather':city_weather}
return render(request,'mm.html',contex)
else:
return render(request,'mm.html')
# Create your views here.
| mrmoon007/Weather-Forecast | home/views.py | views.py | py | 709 | python | en | code | 0 | github-code | 13 |
16184812933 | """
정렬된 배열에서 특정 수의 개수 구하기
- 시간 복잡도 O(logN)으로 알고리즘을 설계하지 않으면 시간초과 판정
- 입력 : N, x(1 <= N <= 1,000,000, -10^9 <= x <= 10^9)
N개의 정수(-10^9 <= 각 원소의 값 <= 10^9)
- 출력 : 수열의 원소 중에서 값이 x인 원소의 개수, 없으면 -1을 출력
"""
from sys import stdin
# 정렬된 수열에서 값이 x인 원소의 개수를 세는 메서드
def count_by_value(array, x):
# 데이터의 개수
n = len(array)
# x가 처음 등장하는 인덱스 게산
a = first(array, x, 0, n - 1)
# 수열에 x가 존재하지 않는 경우
if a == None:
return 0 # 값이 x인 원소의 개수는 0개이므로 0 반환
# x가 마지막으로 등장한 인덱스 계산
b = last(array, x, 0, n - 1)
# 개수를 반환
return b - a + 1
# 처음 위치를 찾는 이진 탐색 메서드
def first(array, target, start, end):
if start > end:
return None
mid = (start + end) // 2
# 해당 값을 가지는 원소 중에서 가장 왼쪽에 있는 경우에만 인덱스 반환
if (mid == 0 or target > array[mid - 1]) and array[mid] == target:
return mid
# 중간점의 값보다 찾고자 하는 값이 작거나 같은 경우 왼쪽 확인
elif array[mid] >= target:
return first(array, target, start, mid - 1)
# 중간점의 값보다 찾고자 하는 값이 큰 경우 오른쪽 확인
else:
return first(array, target, mid + 1, end)
# 마지막 위치를 찾는 이진 탐색 메서드
def last(array, target, start, end):
if start > end:
return None
mid = (start + end) // 2
# 해당 값을 가지는 원소 중에서 가장 오른쪽에 있는 경우에만 인덱스 반환
if (mid == n - 1 or target < array[mid + 1]) and array[mid] == target:
return mid
# 중간점의 값보다 찾고자 하는 값이 작은 경우 왼쪽 확인
elif array[mid] > target:
return last(array, target, start, mid - 1)
# 중간점의 값보다 찾고자 하는 값이 크거나 같은 경우 오른쪽을 확인
else:
return last(array, target, mid + 1, end)
n, x = map(int, stdin.readline().split())
array = list(map(int, stdin.readline().split()))
# 값이 x인 데이터의 개수 계산
count = count_by_value(array, x)
if count == 0:
print(-1)
else:
print(count)
""" ***** 2번째 방법 - 이진탐색 라이브러리 사용 *****
from bisect import bisect_left, bisect_right
from sys import stdin
# 값이 [left_value, right_value]인 데이터의 개수를 반환하는 함수
def count_by range(array, left_value, right_value):
right_index = bisect_right(array, right_value)
left_index = bisect_left(array, left_value)
return right_index - left_index
n, x = map(int, stdin.readline().split())
array = list(map(int, stdin.readline().split()))
# 값이 [x, x] 범위에 있는 데이터의 개수 계산
count = count_by_range(array, x, x)
# 값이 x인 원소가 존재하지 않는다면
if count == 0:
print(-1)
else:
print(count)
"""
| akana0321/Algorithm | 이것이 코딩테스트다 with 파이썬/Previous_Question_by_Algorithm_Type/specific_number.py | specific_number.py | py | 3,184 | python | ko | code | 0 | github-code | 13 |
74870472016 | N, K = list(map(int, input().split()))
idx, cur = 1, 0
needs = list(map(int, input().split()))
needs = [[i+1,j] for i, j in enumerate(needs)]
while True:
least = min(needs, key=lambda x:x[1])[1]
if least * N < K:
K -= least * N
needs = [(i, j-least) for i,j in needs if j > least]
N = len(needs)
else:
print(needs[(K-1) % N][0])
break
| yeung66/leetcode-everyday | py/ponyai-1.py | ponyai-1.py | py | 391 | python | en | code | 0 | github-code | 13 |
2852591705 | # pip install GitPython mteb beir seaborn
import os
import random
import seaborn as sns
import matplotlib.pyplot as plt
from mteb import MTEB
from mteb.evaluation.evaluators.utils import cos_sim
import numpy as np
import pandas as pd
from sentence_transformers import SentenceTransformer
import torch
if os.path.exists("sim_data.csv"):
data_emb_df = (pd.read_csv("sim_data.csv", index_col=0) * 100).round(0).astype(int)
plt.figure(figsize=(40, 24))
# define the mask to set the values in the upper triangle to True
mask = np.triu(np.ones_like(data_emb_df, dtype=np.bool))
heatmap = sns.heatmap(
data_emb_df,
mask=mask,
vmin=data_emb_df.values.min(),
vmax=data_emb_df.values.max(),
annot=True,
cmap='Blues',
fmt='g',
)
heatmap.set_xticklabels(heatmap.get_xmajorticklabels(), fontsize=16)#, fontweight="bold")
heatmap.set_yticklabels(heatmap.get_ymajorticklabels(), fontsize=16)#, fontweight="bold")
# Save
plt.savefig('heatmap_data.pdf', dpi=450, bbox_inches='tight')
exit()
### GLOBAL VARIABLES ###
DATAPATH = "./"
SEED = 42
K_SAMPLES = 100
LEN_KEYS = {
"text",
"sentences",
"sentence1",
"sentence2",
"sent1",
"sent2"
"query",
"positive",
"negative"
"queries",
"corpus",
"machine_summaries",
"human_summaries",
}
TASK_LIST_CLASSIFICATION = [
"AmazonCounterfactualClassification",
"AmazonPolarityClassification",
"AmazonReviewsClassification",
"Banking77Classification",
"EmotionClassification",
"ImdbClassification",
"MassiveIntentClassification",
"MassiveScenarioClassification",
"MTOPDomainClassification",
"MTOPIntentClassification",
"ToxicConversationsClassification",
"TweetSentimentExtractionClassification",
]
TASK_LIST_CLUSTERING = [
"ArxivClusteringP2P",
"ArxivClusteringS2S",
"BiorxivClusteringP2P",
"BiorxivClusteringS2S",
"MedrxivClusteringP2P",
"MedrxivClusteringS2S",
"RedditClustering",
"RedditClusteringP2P",
"StackExchangeClustering",
"StackExchangeClusteringP2P",
"TwentyNewsgroupsClustering",
]
TASK_LIST_PAIR_CLASSIFICATION = [
"SprintDuplicateQuestions",
"TwitterSemEval2015",
"TwitterURLCorpus",
]
TASK_LIST_RERANKING = [
"AskUbuntuDupQuestions",
"MindSmallReranking",
"SciDocsRR",
"StackOverflowDupQuestions",
]
TASK_LIST_RETRIEVAL = [
"ArguAna",
"ClimateFEVER",
"CQADupstackAndroidRetrieval",
"CQADupstackEnglishRetrieval",
"CQADupstackGamingRetrieval",
"CQADupstackGisRetrieval",
"CQADupstackMathematicaRetrieval",
"CQADupstackPhysicsRetrieval",
"CQADupstackProgrammersRetrieval",
"CQADupstackStatsRetrieval",
"CQADupstackTexRetrieval",
"CQADupstackUnixRetrieval",
"CQADupstackWebmastersRetrieval",
"CQADupstackWordpressRetrieval",
"DBPedia",
"FEVER",
"FiQA2018",
"HotpotQA",
"MSMARCO",
"NFCorpus",
"NQ",
"QuoraRetrieval",
"SCIDOCS",
"SciFact",
"Touche2020",
"TRECCOVID",
]
TASK_LIST_STS = [
"BIOSSES",
"SICK-R",
"STS12",
"STS13",
"STS14",
"STS15",
"STS16",
"STS17",
"STS22",
"STSBenchmark",
]
TASK_LIST_SUMMARIZATION = [
"SummEval",
]
TASK_LIST_EN = (
TASK_LIST_CLASSIFICATION
+ TASK_LIST_CLUSTERING
+ TASK_LIST_PAIR_CLASSIFICATION
+ TASK_LIST_RERANKING
+ TASK_LIST_RETRIEVAL
+ TASK_LIST_STS
+ TASK_LIST_SUMMARIZATION
)
### LOGIC ###
def get_samples_beir(hf_hub_name):
# Somehow needs to be set in the function scope
random.seed(SEED)
from beir.datasets.data_loader import GenericDataLoader as BeirDataLoader
path = os.path.join(DATAPATH, hf_hub_name)
print("GOT PATH", path)
split = "validation" if "MSMARCO" in hf_hub_name else "test"
if not os.path.exists(path):
from beir import util
if "cqadupstack" in hf_hub_name:
hf_hub_name = "cqadupstack"
url = f"https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{hf_hub_name}.zip"
util.download_and_unzip(url, DATAPATH)
corpus, queries, relevant_docs = BeirDataLoader(path).load(split=split)
# Pick shortest k samples
samples = [v["text"] + " " + v["title"] for v in sorted(list(corpus.values()), key=lambda x: len(x["text"]))[:K_SAMPLES]]
# Optionally randomly pick
#samples = [v["text"] + " " + v["title"] for v in random.choices(sorted(list(corpus.values()), key=lambda x: len(x["text"])), k=K_SAMPLES)]
return samples
def load_data(hf_hub_name, subset=None):
"""
Load dataset from Hub via cloning for easy offline usage with HF_DATASETS_OFFLINE=1
Can be replaced with just `load_dataset(hf_hub_name, subset)` if preferred
"""
from datasets import load_dataset
path = os.path.join(DATAPATH, hf_hub_name)
if os.path.exists(path):
dataset = load_dataset(path, subset)
else:
from git import Repo
Repo.clone_from("https://huggingface.co/datasets/mteb/" + hf_hub_name, path)
dataset = load_dataset(path, subset)
return dataset
def get_samples_ds(hf_hub_name):
ds = load_data(hf_hub_name)
# Optionally shuffle
# .shuffle(seed=SEED)
assert "test" in ds, f"No test set for {hf_hub_name}"
len_keys = list(set(ds["test"].features.keys()) & LEN_KEYS)
split = "test"
k = len_keys[0]
if isinstance(ds[split][k][0], str):
# Select K shortest examples
samples = sorted([x for x in ds[split][k]], key=len)[:K_SAMPLES]
elif isinstance(ds[split][k][0], list):
assert isinstance(ds[split][k][0][0], str), f"Too nested: {k}"
# Select K shortest examples
samples = [y for x in ds[split][k] for y in x]
samples = sorted(samples, key=len)[:K_SAMPLES]
# Optionally randomly select
# random.choices(samples, k=K_SAMPLES)
else:
raise ValueError(f"Unknown type {type(ds[split][k])}")
return samples
embeddings = {}
model = SentenceTransformer("sentence-transformers/sentence-t5-xxl")
# Optionally custom selection
# TASKS = ["ArguAna", "ClimateFEVER", "DBPedia", "FEVER", "FiQA2018", "HotpotQA", "NFCorpus", "NQ", "QuoraRetrieval", "SCIDOCS", "SciFact", "Touche2020", "TRECCOVID"]
TASKS = TASK_LIST_EN
for task in MTEB(tasks=TASKS).tasks:
print("Task: ", task)
if "hf_hub_name" in task.description:
hub_name = hub_url = task.description.get("hf_hub_name")
samples = get_samples_ds(hub_name.split("/")[-1])
if "beir_name" in task.description:
hub_name = hub_url = "BeIR/" + task.description.get("beir_name")
samples = get_samples_beir("/".join(hub_name.split("/")[1:]))
embeddings[task.description["name"]] = model.encode(samples)
# Plot 1: Compute all cos sims & then average
"""
data_dict = []
for i, task_1 in enumerate(TASKS):
data_dict.append({task_2: torch.mean(cos_sim(embeddings[task_1], embeddings[task_2])).item() for j, task_2 in enumerate(TASKS)})
data_df = pd.DataFrame(data_dict)
data_df.set_index(data_df.columns, inplace=True)
# Save
data_df.to_csv("data.csv")
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(32, 16))
# define the mask to set the values in the upper triangle to True
mask = np.triu(np.ones_like(data_df, dtype=np.bool))
#heatmap = sns.heatmap(data_df, mask=mask, vmin=-1, vmax=1, annot=True, cmap='Blues')
heatmap = sns.heatmap(data_df, mask=mask, vmin=data_df.values.min(), vmax=data_df.values.max(), annot=True, cmap='Blues')
heatmap.set_title('Similarity of MTEB datasets', fontdict={'fontsize':18}, pad=16)
plt.savefig('heatmap_data.pdf', dpi=300, bbox_inches='tight')
"""
# Plot 2: Average embeddings & then compute cos_sim
data_dict_emb = []
for i, task_1 in enumerate(TASKS):
data_dict_emb.append({task_2: cos_sim(np.mean(embeddings[task_1], axis=0), np.mean(embeddings[task_2], axis=0)).item() for j, task_2 in enumerate(TASKS)})
data_emb_df = pd.DataFrame(data_dict_emb)
data_emb_df.set_index(data_emb_df.columns, inplace=True)
plt.figure(figsize=(36, 24))
# define the mask to set the values in the upper triangle to True
mask = np.triu(np.ones_like(data_emb_df, dtype=np.bool))
heatmap = sns.heatmap(data_emb_df, mask=mask, vmin=data_emb_df.values.min(), vmax=data_emb_df.values.max(), annot=True, cmap='Blues')
#heatmap.set_title('Similarity of MTEB datasets', fontdict={'fontsize':18}, pad=16)
# Save
data_emb_df.to_csv("sim_data.csv")
plt.savefig('heatmap_data.pdf', dpi=450, bbox_inches='tight')
# Plot 3: Min (/Max) embeddings & then compute cos_sim
"""
data_dict_emb = []
for i, task_1 in enumerate(TASKS):
data_dict_emb.append({task_2: cos_sim(np.min(embeddings[i], axis=0), np.min(embeddings[j], axis=0)).item() for j, task_2 in enumerate(TASKS)})
data_emb_df = pd.DataFrame(data_dict_emb)
data_emb_df.set_index(data_emb_df.columns, inplace=True)
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(32, 16))
# define the mask to set the values in the upper triangle to True
mask = np.triu(np.ones_like(data_emb_df, dtype=np.bool))
heatmap = sns.heatmap(data_emb_df, mask=mask, vmin=data_emb_df.values.min(), vmax=data_emb_df.values.max(), annot=True, cmap='Blues')
heatmap.set_title('Similarity of MTEB datasets', fontdict={'fontsize':18}, pad=16)
plt.savefig('heatmap_data.pdf', dpi=300, bbox_inches='tight')
"""
| embeddings-benchmark/mtebscripts | plotstables/dataset_sim.py | dataset_sim.py | py | 9,421 | python | en | code | 7 | github-code | 13 |
19147172654 | import json
from gdown.download_folder import download_and_parse_google_drive_link # noqa
from gdown.download_folder import get_directory_structure # noqa
def nest_dict(dict1):
result = {}
for k, v in dict1.items():
# for each key call method split_rec which
# will split keys to form recursively
# nested dictionary
split_rec(k, v, result)
return result
def split_rec(k, v, out):
# splitting keys in dict
# calling_recursively to break items on '_'
k, *rest = k.split("/", 1)
if rest:
split_rec(rest[0], v, out.setdefault(k, {}))
else:
out[k] = v
def main():
url = "https://drive.google.com/drive/folders/16CmKNZWIK907Cj5J-_zaEy4YuKOTjupK"
return_code, gdrive_file = download_and_parse_google_drive_link(
url, remaining_ok=True
)
assert return_code
d = {
folder: file_id for file_id, folder in get_directory_structure(gdrive_file, "")
}
d = {k: v for k, v in d.items() if v is not None and "64px" not in k}
print(json.dumps(nest_dict(d), indent=4))
if __name__ == "__main__":
main()
| ethanluoyc/corax | corax/datasets/tfds/vd4rl/generate_vd4rl_file_list.py | generate_vd4rl_file_list.py | py | 1,127 | python | en | code | 27 | github-code | 13 |
6105640987 | import joblib
import shap
from utils.settings import *
from utils.utils import load_data
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--combined', action='store_true',
help='Path to model which should be explained')
parser.add_argument('-s', '--sample', action='store_true',
help='Sample data to boost performance')
args = parser.parse_args()
print(args)
neural_embedding = True
if args.combined:
model_path = combined_model_path
only_statistics = False
embedding_dim = 768
max_display = 10
else:
model_path = statistics_model_path
only_statistics = True
embedding_dim = 0
max_display = 12
print('Load model from file')
clf = joblib.load(model_path)
print('Load training data')
X_train_vec, _, _ = load_data(training_data_path, neural_embedding_path=training_feature_path,
neural_embedding=neural_embedding, only_statistics=only_statistics,
tfidf_vectorizer=tfidf_vectorizer_path, has_label=True)
if args.sample:
idx = np.random.randint(X_train_vec.shape[0], size=10)
X_train_vec = X_train_vec.tocsr()[idx]
print('Building explainer and calculating SHAP values')
explainer = shap.KernelExplainer(clf.predict, X_train_vec)
shap_values = explainer.shap_values(X_train_vec.tocsr())
features_names = [f'distil_%i'%i for i in range(embedding_dim)] + \
['mtd', 'ps3', 'asl', 'pw6', 'ps1', 'asc', 'wstf1', 'wstf2', 'wstf3', 'wstf4', 'fre_amstad', 'SMOG']
shap.summary_plot(shap_values, X_train_vec, feature_names=features_names, plot_type='bar', max_display=max_display)
| MiriUll/text_complexity | feature_relevance_analysis.py | feature_relevance_analysis.py | py | 1,705 | python | en | code | 3 | github-code | 13 |
36733262750 | #!/usr/bin/python3
""" Takes in a URL and an email, sends a POST request to the passed URL
with the email as a parameter, and displays the body of the response.
"""
if __name__ == "__main__":
from urllib.request import Request, urlopen
from urllib.parse import urlencode
import sys
url = sys.argv[1]
val = {'email': sys.argv[2]}
data = urlencode(val)
data = data.encode('ascii')
req = Request(url, data)
with urlopen(req) as response:
body = response.read()
body_decoded = body.decode('utf-8', "replace")
print(body_decoded)
| SNderi/alx-higher_level_programming | 0x11-python-network_1/2-post_email.py | 2-post_email.py | py | 588 | python | en | code | 0 | github-code | 13 |
10385692015 | import matplotlib
matplotlib.use('Agg')
import seaborn as sns, numpy as np
sns.set();
#np.random.seed(0)
#x = np.random.randn(100)
#sns_plot = sns.distplot(x)
#figure = sns_plot.get_figure()
#figure.savefig('/tmp/yzy.output.png', dpi=400)
import fileinput
import time
# _pos_dict = {
# "U_PREF": 30,
# "V_AUDIENCE_NUM": 102
# }
#
#
# length_require = 147
#
# _pos = []
#
# _label_info = list(range(1, 7, 1))
# _user_info = [8, 16, 18, 19, 21]
# _anchor_info = [36, 41]
# _video_info = list(range(127, 147, 1)) + [_pos_dict["U_PREF"], _pos_dict["V_AUDIENCE_NUM"]]
#
# _pos.extend(_label_info)
# _pos.extend(_user_info)
# _pos.extend(_anchor_info)
# _pos.extend(_video_info)
#
# _filter = {
# "POSID": {
# "index": 7,
# "in": {
# "8002",
# "9002"
# }
# },
# "ZONE": {
# "index": 125,
# "in": {
# "A_US"
# }
# }
# }
# _exp = ["cheez_46", "cheez_45", "cheez_44", "cheez_43"]
# _country = ["US", "IN"]
#
# _pos = {
# "SVE_EXP": 7,
# "SVU_COUNTRY": 15,
# "SVV_COUNTRY": 17
# }
# count_same = dict()
# count_diff = dict()
# for exp in _exp:
# for country in _country:
# key = country + "#" + exp
# count_same[key] = 0
# count_diff[key] = 0
import json
import pandas as pd
l = []
d = {"predictor_id":[], "cal_d":[]}
for line in fileinput.input():
line = line.rstrip('\n')
j = json.loads(line)
d["predictor_id"].append(j["predictor_id"])
d["cal_d"].append(j["cal_d"])
df = pd.DataFrame(d)
#print(df)
df["cal_d"] = df["cal_d"].astype(int)//10
#df["times"] = 1
#print(df)
#print(df[['predictor_id', 'cal_d', "times"]].groupby(['predictor_id', 'cal_d']).agg(['count']))
#for predictor_id in ["liveme_tensorflow_dnn_01", "liveme_tensorflow_dnn_02", "sv_tensorflow_dnn_01"]:
for predictor_id in ["liveme_tensorflow_dnn_01"]:
df_tmp = df[df.predictor_id == predictor_id][["cal_d"]]
arr = np.histogram(df_tmp, bins=[0, 1, 2, 3, 4, 5, 10, 100, 200, 500, 1200, 1400, 1000000000])
print("\t".join([str(k) for k in arr[0]]))
print("\t".join([str(k) for k in arr[1]]))
sns_plot = sns.distplot(df_tmp[["cal_d"]], kde=False)
figure = sns_plot.get_figure()
figure.savefig('/tmp/yzy.ps.log.cal_d.dist.' + predictor_id + '.png', dpi=400)
# if len(line) < length_require:
# continue
#
# is_drop = False
#
# for k, v in _filter.iteritems():
# if line[v["index"]] not in v["in"]:
# is_drop = True
#
# if is_drop:
# continue
#
# res = "\t".join([line[i] if line[i] != "None" else "" for i in _pos])
#
# # st_str = line[5]
# # if not st_str.isdigit():
# # line_time_tm_hour = ""
# # line_time_tm_wday = ""
# # else:
# # line_time = time.gmtime(int(st_str))
# # line_time_tm_hour = str(line_time.tm_hour)
# # line_time_tm_wday = str((line_time.tm_wday + 1) % 7)
# #
# # res += "\t" + line_time_tm_hour + "\t" + line_time_tm_wday
#
# print(res)
#
# for exp in _exp:
# for country in _country:
# key = country + "#" + exp
# if exp in line[_pos["SVE_EXP"]] and country == line[_pos["SVU_COUNTRY"]]:
# if line[_pos["SVU_COUNTRY"]] == line[_pos["SVV_COUNTRY"]]:
# count_same[key] += 1
# else:
# count_diff[key] += 1
#
# print "\t".join(["filter_type", "same_country", "diff_country"])
#
# for key, val in count_same.iteritems():
# print "\t".join([str(key), str(val), str(count_diff[key])])
| alever520/tensorflow-ctr | python/yzy/data/parser/ps_log_parser.py | ps_log_parser.py | py | 3,628 | python | en | code | 0 | github-code | 13 |
4114903898 | from djitellopy import tello
from threading import Thread
from pygame import mixer
import TestKeyboard as kp
import numpy as np
import time
import cv2
import os
kp.init()
mixer.init()
me = tello.Tello()
me.connect()
print(me.get_battery())
global img
me.streamon()
w,h = 360,200
fbRange = [6200,6800]
pid=[0.4,0.4,0]
pError = 0
def findFace(img):
faceCascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.2,8)
myFaceListC = []
myFaceListArea = []
for(x,y,w,h) in faces:
mixer.music.load("SNAP.mp3")
# Setting the volume
mixer.music.set_volume(0.7)
# Start playing the song
mixer.music.play()
#cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
cx = x+w //2
cy = y+h //2
area = w*h
#cv2.circle(img,(cx,cy),5,(0,255,0),cv2.FILLED)
myFaceListC.append([cx,cy])
myFaceListArea.append(area)
if len(myFaceListArea) != 0:
i = myFaceListArea.index(max(myFaceListArea))
cv2.imwrite(f'C:/Users/OCG/Desktop/NAVY/Foto Flight{time.time()}.jpg',img)
return img, [myFaceListC[i],myFaceListArea[i]]
else:
return img, [[0,0],0]
def trackFace(me,info,w,pid,pError):
area = info[1]
x,y = info[0]
fb = 0
error = x-w//2
speed = pid[0]*error +pid[1]*(error-pError)
speed = int(np.clip(speed,-100,100))
if area > fbRange[0] and area <fbRange[1]:
fb = 0
elif area >fbRange[1]:
fb =-20
elif area <fbRange[0] and area != 0:
fb = 20
if x == 0:
speed = 0
error = 0
me.send_rc_control(0,fb,0,speed)
return error
def getKeyboardInput():
lr,fb,ud,yv = 0,0,0,0
speed = 50
if kp.getKey("LEFT"): lr = -speed
elif kp.getKey("RIGHT"): lr = speed
if kp.getKey("UP"): fb = speed
elif kp.getKey("DOWN"): fb = -speed
if kp.getKey("w"): ud = speed
elif kp.getKey("s"): ud = -speed
if kp.getKey("a"): yv = speed
elif kp.getKey("d"): yv = -speed
if kp.getKey("q"): me.land()
if kp.getKey("e"): me.takeoff()
if kp.getKey("x"): print(me.get_battery())
return [lr,fb,ud,yv]
while True:
vals = getKeyboardInput()
me.send_rc_control(vals[0],vals[1],vals[2],vals[3])
img = me.get_frame_read().frame
img, info = findFace(img)
pError = trackFace(me,info,w,pid,pError)
print("Area",info[1],"Center",info[0])
cv2.imshow("Image",img)
cv2.waitKey(1)
| Oscar6647/Parker-Drone | FotoDrone.py | FotoDrone.py | py | 2,683 | python | en | code | 0 | github-code | 13 |
28567754298 | """
Module Docstring
"""
import pandas as pd
import plotly.express as px
import snoop
from dash import Dash, dash_table, dcc, html
from snoop import pp
def type_watch(source, value):
return f"type({source})", type(value)
snoop.install(watch_extras=[type_watch])
app = Dash(__name__)
df = pd.read_csv(
"https://raw.githubusercontent.com/plotly/datasets/master/gapminder2007.csv"
)
# App layout
app.layout = html.Div(
[
html.Div(children="My First App with Data and a Graph"),
dash_table.DataTable(data=df.to_dict("records"), page_size=10),
dcc.Graph(figure=px.histogram(df, x="continent", y="lifeExp", histfunc="avg")),
]
)
# Run the app
if __name__ == "__main__":
app.run_server(debug=True)
| miccaldas/oficina | oficina/dash/app.py | app.py | py | 742 | python | en | code | 0 | github-code | 13 |
10446825046 | import torch
import torch.nn as nn
import torch.nn.functional as F
from blocks import *
class ResNet(nn.Module):
def __init__(self, block, num_blocks, in_channel=3, zero_init_residual=False):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves
# like an identity. This improves the model by 0.2~0.3% according to:
# https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for i in range(num_blocks):
stride = strides[i]
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x, layer=100):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avgpool(out)
out = torch.flatten(out, 1)
return out
def resnet18(**kwargs):
return ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
model_dict = {
'resnet18': [resnet18, 512],
}
class SupConResNet(nn.Module):
"""backbone + projection head"""
def __init__(self, name='resnet18', head='mlp', feat_dim=128):
super(SupConResNet, self).__init__()
model_fun, dim_in = model_dict[name]
self.encoder = model_fun()
if head == 'linear':
self.head = nn.Linear(dim_in, feat_dim)
elif head == 'mlp':
self.head = nn.Sequential(
nn.Linear(dim_in, dim_in),
nn.ReLU(inplace=True),
nn.Linear(dim_in, feat_dim)
)
else:
raise NotImplementedError(
'head not supported: {}'.format(head))
def forward(self, x):
feat = self.encoder(x)
feat = F.normalize(self.head(feat), dim=1)
return feat
class MaskGenerator(nn.Sequential):
def __init__(self, dataset, out_channels=1):
super(MaskGenerator, self).__init__()
# For MNIST
if dataset == "mnist":
input_channel = 1
channel_init = 16
steps = 2
# For CIFAR
elif dataset == "cifar10":
input_channel = 3
channel_init = 32
steps = 3
elif dataset == 'voc2012':
input_channel = 3
channel_init = 64
steps = 3
# channel_current is 'in_channel'
# channel_next is 'out_channel'
channel_current = input_channel
channel_next = channel_init
for step in range(steps):
self.add_module("convblock_down_{}".format(2 * step),
Conv2dBlock(channel_current, channel_next))
self.add_module("convblock_down_{}".format(2 * step + 1),
Conv2dBlock(channel_next, channel_next))
self.add_module("downsample_{}".format(step), DownSampleBlock())
if step < steps - 1:
channel_current = channel_next
channel_next *= 2
self.add_module("convblock_middle", Conv2dBlock(channel_next, channel_next))
channel_current = channel_next
channel_next = channel_current // 2
for step in range(steps):
self.add_module("upsample_{}".format(step), UpSampleBlock())
self.add_module("convblock_up_{}".format(2 * step), Conv2dBlock(channel_current, channel_current))
# Add another convblock_up. However, if the layer is the last layer, do not use ReLU!
if step == steps - 1:
self.add_module(
"convblock_up_{}".format(2 * step + 1), Conv2dBlock(channel_current, channel_next, relu=False)
)
else:
self.add_module("convblock_up_{}".format(2 * step + 1), Conv2dBlock(channel_current, channel_next))
channel_current = channel_next
channel_next = channel_next // 2
if step == steps - 2:
if out_channels is None:
channel_next = input_channel
else:
channel_next = out_channels
def forward(self, x):
for module in self.children():
x = module(x)
x = nn.Tanh()(x) / 2 + 0.5
x = torch.round(x)
return x | khangt1k25/Contrastive-Bottleneck-Segmentation | models.py | models.py | py | 5,678 | python | en | code | 0 | github-code | 13 |
39352537376 | class Time:
"""Represents the time of day.
attributes: hour, minute, second
"""
def __init__(self, hour=0, minute=0, second=0):
"""Initializes a time object.
hour: int
minute: int
second: int or float
"""
self.hour = hour
self.minute = minute
self.second = second
def print_time(self):
"""Prints a string representation of the time."""
print('%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second))
def is_after(self, time):
"""Returns True if t1 is after t2; false otherwise."""
time1=3600*self.hour+60*self.minute+self.second
time2=3600*time.hour+60*time.minute+time.second
return time1 > time2
def increment(self, seconds):
"""Time increment that doesnt contain any loops or if statement. Returns new time"""
a=self.second+seconds
b=self.minute+a/60
self.hour+=int(b/60)
self.minute=int(b%60)
self.second=a%60
return '%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second)
def pureinc(self,seconds):
"""Pure version of increment that creates and returns a new Time object rather than modifying the parameter"""
import copy
time2=copy.copy(self)
time2.increment(seconds)
return '%.2d:%.2d:%.2d' % (time2.hour, time2.minute, time2.second)
def time_to_int(self):
"""Computes the number of seconds since midnight. Time: Time object"""
minutes = self.hour * 60 + self.minute
seconds = minutes * 60 + self.second
return seconds
def __str__(self):
"""Special method for string and useful for debugging. Returns a string representation of the time."""
return '%.2d:%.2d:%.2d' % (self.hour, self.minute, self.second)
def __add__(self, other):
"""Adds two Time objects or a Time object and a number.
other: Time object or number of seconds
"""
#The built-in function isinstance takes a value and a class object, and returns True if the value is an instance of the class.
if isinstance(other, Time):
return self.add_time(other)
else:
return self.increment2(other)
def __radd__(self, other):
"""Adds two Time objects or a Time object and a number."""
return self.__add__(other)
def add_time(self, other):
"""Adds two time objects. Self (for example Object t1), t2: Time. Returns: Time
assert: checks a given invariant and raises an exception if it fails
replace:
if not valid_time(t1) or not valid_time(t2):
raise ValueError('invalid Time object in add_time')"""
assert self.valid_time() and other.valid_time(), "Time is not valid"
seconds = self.time_to_int() + other.time_to_int()
return int_to_time(seconds)
def increment2(self, seconds):
"""Return new Time by adding seconds"""
seconds += self.time_to_int()
return int_to_time(seconds)
def valid_time(self):
"""Return False if time is not correct, if time is correct return true"""
if self.hour < 0 or self.minute < 0 or self.second < 0:
return False
if self.minute >= 60 or self.second >= 60:
return False
return True
def print_attributes(self):
"""print_attributes traverses the dictionary and prints each attribute name and its corresponding value,
If you are not sure whether an object has a particular attribute.
The built-in function getattr takes an object and an attribute name (as a string) and returns the attribute’s value."""
for attr in vars(self):
print(attr, getattr(self, attr))
def int_to_time(seconds):
"""Makes a new Time object. Seconds: int seconds since midnight."""
minutes, second = divmod(seconds, 60)
hour, minute = divmod(minutes, 60)
time = Time(hour, minute, second)
return time
#creation of an object
time = Time(11, 59, 30)
#calling print_time method to print the Time object to a string as time
time.print_time()
#creation of two further time object, t1, t2
t1 = Time(16, 59, 59)
t2 = Time(13, 54, 20)
#calling method t1 is after t2 --> true
print("is_after:", t1.is_after(t2))
#increment method. t2 will increase to 60 seconds
print("increment:", t2.increment(60))
#pure function same as increment method
print("pureinc:", t2.pureinc(10))
#convert interger to time!
print("int_to_time:", int_to_time(43170))
#convert time to integer
print("time_to_int():", time.time_to_int())
#t1 add with t2
print("add_time:", t1.add_time(t2))
#shorter and easier than increment method
print("increment2:", time.increment2(1337))
#special method __str__: print_time is not necessary anymore
specialMethodTime = Time(9,45)
print("__str__ special method:", specialMethodTime)
#__add__ special method check: + operator on Time objects
m1 = Time(9, 3, 5)
m2 = Time(10, 27, 5)
print("__add__ special method:", m1+m2)
print(m1 + 1345)
print(1345 + m1)
#functions that work with several types are called polymorphic! Example of polymorphism with the built-in function sum:
z1 = Time(7, 43)
z2 = Time(7, 41)
z3 = Time(7, 37)
total = sum([z1, z2, z3])
print("Example of Polymorphism sum([z1, z2, z3]:", total)
#calling method valid_time, check if time is valid
print("Time is:", t1.valid_time())
#calling print_attributes
print(t1.print_attributes())
#Another way to access attributes is the built-in function vars, which takes an object and returns a dictionary that maps from attribute names (as strings) to their values:
print(vars(t2)) | gicanon/class_time | class Time.py | class Time.py | py | 5,702 | python | en | code | 0 | github-code | 13 |
27409432235 | import sys
def main():
infile = sys.argv[1]
outfile = sys.argv[2]
with open(infile, 'r') as f:
with open(outfile,'w') as out:
out.write( "track type=wiggle_0" )
current_chrom = None
total = 0
reads = {}
for line in f:
chrom,pos,count = line.strip().split()
pos,count = int(pos),float(count)
# If this is the first entry I've ever read, store the chrom name
if current_chrom is None:
current_chrom = chrom
# If the current line has a different chromosome,
# write out a new header and update the current_chrom variable
if chrom != current_chrom:
if total > 0:
print( current_chrom,total)
out.write( "\n" + "variableStep chrom="+current_chrom )
for my_pos,my_count in sorted(reads.items(),
key=lambda x: x[0]):
out.write("\n"+str(my_pos)+"\t"+str(my_count))
# Reset the variables for the new chromosome
current_chrom = chrom
total = 0
reads = {}
# If the line has non-zero reads at the position,
# write to the wig file, and update the total
if count != 0:
reads[pos] = count
total += count
# Write the final chromosome to the file
print( current_chrom,total )
out.write( "\n" + "variableStep chrom="+chrom )
for my_pos,my_count in sorted(reads.items(),
key=lambda x: x[0]):
out.write("\n"+str(my_pos)+"\t"+str(my_count))
# If the current line has a different chromosome,
# write out a new header and update the current_chrom variable
if chrom != current_chrom:
out.write( "\n" + "variableStep chrom="+chrom )
print( current_chrom,total )
current_chrom = chrom
total = 0
if __name__=="__main__":
main()
| gwlilabmit/MTC_2023_Scripts | Raw Data Analysis Scripts/density_to_wig.py | density_to_wig.py | py | 2,290 | python | en | code | 0 | github-code | 13 |
26414775142 | from ursina import *
import GameConfiguration
from Screens.Screen import Screen
from Graphics.Container import Container
from Graphics.GameButton import GameButton
from utils.Event import Event
from .TestingCategories.Entity.Entity import Entity
from .TestingCategories.UI.UI import UI
from .TestingCategories.Components.Components import Components
from .TestingCategories.Gameplay.Gameplay import Gameplay
from .TestingScreen import TestingScreen
from .Test import Test
from .TestTypes import TestTypes
class Testing(Screen):
def __init__(self):
super().__init__(
False
)
self.screen = TestingScreen(
position=(0.32, -0.1),
scale=(0.7, 0.7),
color=color.gray,
parent=self
)
Test.screen = self.screen
self.category_container = Container(
position=(0, 0.45),
scale=(1.5, 0.35),
parent=self
)
self.tests_container = Container(
position=window.left,
origin=(-0.5, 0),
scale=(0.35, 0.5),
parent=self
)
self.tests_option_container = Container(
position=(-0.22, -0.3),
origin=(0, 0.5),
scale=(0.3, 1),
parent=self
)
self.categories = [Entity(), UI(), Components(), Gameplay()]
self.tests = []
self.screen_info_text = Text(
"",
position=(-0.5, 0.5, -1),
origin=(-0.52, 0.55),
parent=self.screen
)
self.testing_info_text = Text(
"",
position=(0.5, 0.5, -1),
origin=(0.52, 0.55),
parent=self.screen
)
self.testing_info_text.disable()
self.category = None
self.change_category(Entity())
reload_button = GameButton(
"Reload",
parent=self.tests_option_container,
position=(0, 0.57),
scale=(1, 0.07)
)
reload_button.on_click = self.reload_test
x = 0
space_coefficient = 1
scale_x = 1 / (space_coefficient * len(self.categories))
center_offset = (len(self.categories) - 1) * scale_x / 2
def assign_click(button, category):
button.on_click = lambda: self.change_category(category)
for category in self.categories:
button = GameButton(
category.name,
model='quad',
parent=self.category_container
)
button.scale = (scale_x - (scale_x * 0.1), 0.4) if len(self.categories) > 2 else (0.4, 0.4)
button.text_entity.scale = (0.3, 0.5)
button.position = ((x * scale_x) - center_offset, -0.1)
assign_click(button, category)
x += 1
@property
def color(self):
return color.rgb(35, 35, 35)
@property
def fades(self) -> bool:
return False
def change_category(self, category):
if self.category:
self.category.selected_test.unload()
self.category = category
self.category.selected_test.load()
self.load_category()
def load_category(self):
"""
load a category
"""
self.clear_tests()
for test in self.category.tests:
def assign_click(button, index):
clicky = Event("onClick", 0)
clicky += lambda: self.category.select_test(index)
clicky += self.update_screen_text
clicky += self.display_test
button.on_click = clicky
index = self.category.tests.index(test)
button = GameButton(
test.name,
position=(0.53, 0.5 - (index * 0.12)),
scale=(1, 0.1),
parent=self.tests_container
)
assign_click(button, index)
self.tests.append(button)
self.update_screen_text()
self.display_test()
def display_test(self):
"""
Load the selected test
"""
y = 0.5
for button in self.category.selected_test.method_buttons:
button.parent = self.tests_option_container
button.enable()
button.position = (0, y)
y -= 0.06
if self.category.selected_test.type == TestTypes.ScreenTest:
self.tests_option_container.animate_position((-0.22, -0.3), duration=GameConfiguration.fade_time)
self.screen.fade_in(1, GameConfiguration.fade_time)
self.screen_info_text.animate_position((-0.5, 0.5, -1))
self.screen_info_text.animate_scale((1, 1), GameConfiguration.fade_time)
else:
self.tests_option_container.animate_position((0.65, -0.3), duration=GameConfiguration.fade_time)
self.screen.fade_out(0, GameConfiguration.fade_time)
self.screen_info_text.animate_scale((3, 3), GameConfiguration.fade_time)
self.screen_info_text.animate_position((-1, 0.5, -1), GameConfiguration.fade_time)
for variable_handler in self.category.selected_test.variables:
variable_handler.parent = self.tests_option_container
variable_handler.enable()
variable_handler.position = (0, y)
y -= 0.06
def clear_tests(self):
[destroy(test) for test in self.tests]
def update_screen_text(self):
self.screen_info_text.text = f"Testing {self.category.name}.{self.category.selected_test.name}"
self.testing_info_text.text = f"Testing Info:\n{self.category.selected_test.info}"
def reload_test(self):
self.category.selected_test.reload()
self.display_test() | GDcheeriosYT/Gentrys-Quest-Ursina | Screens/Testing/Testing.py | Testing.py | py | 5,773 | python | en | code | 1 | github-code | 13 |
16621512721 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
from similarity_words import *
import sys
import codecs
import re
import argparse
#reload(sys)
#sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
#sys.stdin = codecs.getreader('utf-8')(sys.stdin)
class duplicateFilter(object):
def __init__(self, threshold = 0.65):
self.thresh = threshold
self.retweetedMemory = []
self.recentTweetMemory = []
self.retweetedMemorySize = 15
self.recentTweetMemorySize = 3000
self.ss = SS2Similar(5)
#
self.logf = open('./removeDups.log','w')
self.logf.write("*"*42 + "\n")
#
self.hist = {}
for i in range(int(threshold*100),101):
self.hist[i] = 0
self.countRepeats = 0
self.countMatches = 0
self.countShort = 0
def isDup(self, id, text):
result = True
tweetList = [x for x in re.split('\W+', text) if x != '']
newPair = [id, tweetList]
if len(newPair[1]) > 2:
# check the memory of recent tweets first
for pair in self.recentTweetMemory:
score = self.ss.similarity(newPair[1], pair[1])
if score > self.thresh:
self.countRepeats += 1
break # one match is all we need
else: # only do this if we finished the above loop without a match
# check agaist unique tweet memory
for pair in self.retweetedMemory:
score = self.ss.similarity(newPair[1], pair[1])
if score > self.thresh:
self.recentTweetMemory.append(pair)
self.hist[int(score*100)] += 1
self.countMatches += 1
break # one match is all we need
else: # only do this if we finish without a match!
result = False
self.retweetedMemory.append(newPair)
if len(self.retweetedMemory) > self.retweetedMemorySize:
self.retweetedMemory.pop(0)
else:
self.countShort += 1
if len(self.recentTweetMemory) > self.recentTweetMemorySize:
self.logf.write(str(self.recentTweetMemory.pop(0)) + '\n')
return result
def writeRetweetsToLog(self):
self.logf.write("Retweet memory dump at end of run (size smaller of (%d,%d)):\n"%(self.countMatches, self.recentTweetMemorySize))
for a in self.recentTweetMemory:
self.logf.write("%s\n"%str(a))
def writeHistToLog(self):
self.logf.write("*"*42 + "\n")
for a in self.hist:
self.logf.write("%d, %s\n"%(a, self.hist[a]))
self.logf.write("\n**************\nHit rates:\n")
self.logf.write(" Original matches: %d\n"%self.countMatches)
self.logf.write(" Repeat matches: %d\n"%self.countRepeats)
self.logf.write(" Short tweets: %d\n"%self.countShort)
self.logf.write(" Total repeated (filtered) Tweets: %d\n"%(
self.countShort+self.countRepeats+self.countMatches))
if __name__ == '__main__':
def args():
args_parser = argparse.ArgumentParser(
description="Command line duplicate and near match filtering")
args_parser.add_argument("-c", "--column", dest="col_txt",
default=2, help="Column containing the text to deplup on [default is 2nd column]")
args_parser.add_argument("-i", "--index-column", dest="col_idx",
default=1, help="Column containing the unique identifier for the item [default is 1st column]")
args_parser.add_argument("-d", "--delimiter", dest="delimiter",
default="|",
help="Delimiter. Default is pipe |")
args_parser.add_argument("-l", "--logging", dest="log_flag", action="store_true",
default=False, help="Log discarded duplicates")
args_parser.add_argument("-t", "--threshold", dest="command_line_threshold", default=65)
return args_parser
options = args().parse_args()
id_txt = int(options.col_txt)-1
id_idx = int(options.col_idx)-1
t = int(options.command_line_threshold)*.01
f = duplicateFilter(t)
for row in sys.stdin:
row = row.strip().split(options.delimiter)
if not f.isDup(row[id_idx],row[id_txt]):
print(options.delimiter.join(row))
# else do something with dups here
f.writeRetweetsToLog()
if options.log_flag:
f.writeHistToLog()
| DrSkippy/Data-Science-45min-Intros | pos-tagging/duplicate_filter.py | duplicate_filter.py | py | 4,989 | python | en | code | 1,560 | github-code | 13 |
37063772289 | # python3
# -*- coding: utf-8 -*-
# @File : rst2md.py
# @Desc : rst & md converter
# @Project : docTools
# @Time : 19-6-3 上午10:42
# @Author : Loopy
# @Contact : peter@mail.loopy.tech
# @License : CC BY-NC-SA 4.0 (subject to project license)
import requests
def help_md_rst(from_file, to_file, data):
""" rst & md 转化辅助函数 """
response = requests.post(
url="http://c.docverter.com/convert",
data=data,
files={"input_files[]": open(from_file, "rb")},
)
if response.ok:
if to_file is None:
# auto backup
to_file = from_file
with open(to_file, "w") as f:
with open("bak_" + to_file, "w") as f_bac:
f_bac.write(f.read())
else:
with open(to_file, "w") as f:
f.write(response.content)
else:
print("response is not ok")
def md_to_rst(from_file, to_file=None):
data = {"to": "rst", "from": "markdown"}
help_md_rst(from_file, to_file, data)
def rst_to_md(from_file, to_file=None):
data = {"to": "markdown", "from": "rst"}
help_md_rst(from_file, to_file, data)
if __name__ == "__main__":
md_to_rst("README.md", "README.rst")
| loopyme/docTools | rst2md.py | rst2md.py | py | 1,237 | python | en | code | 0 | github-code | 13 |
33578949925 | from typing import List
from schemas import BucketSchema, BucketUpdateSchema
from sqlalchemy.orm import Session
from fastapi import HTTPException
from db.models import User as UserModel, Bucket as BucketModel
from db.repository.bucket import (
query_bucket_by_id,
query_bucket_by_user_id,
query_buckets_by_user_id,
add_single_bucket,
add_multiple_buckets,
query_buckets_by_visibility,
)
def create_new_bucket(db: Session, bucket_data: BucketSchema, user_id: int):
"""
@Route: POST /api/v1/bucket
@Description: Create a new bucket in the database
@Args:
db {Session} - Database session
bucket_data {BucketSchema} - Bucket data
user_id {int} - User ID
@Requires Auth: True
"""
try:
new_bucket = add_single_bucket(db, bucket_data, user_id)
if new_bucket:
return {
"status": True,
"message": "New bucket successfully added to bucket list.",
"data": new_bucket,
}
else:
raise HTTPException(status_code=422, detail="Bucket creation failed.")
except Exception as e:
raise HTTPException(detail=str(e), status_code=400)
def create_new_buckets(db: Session, buckets_data: List[BucketSchema], user_id: int):
"""
@Route: POST /api/v1/buckets
@Description: Create new buckets (multiple)
@Args:
db {Session} - Database Session
buckets_data {[BucketSchema]} - List of buckets to be added
user_id {int} - User ID
"""
try:
new_buckets = add_multiple_buckets(db, buckets_data, user_id)
print(new_buckets)
if new_buckets:
return {
"status": True,
"message": f"{len(buckets_data)} buckets successfully added to bucket list.",
"data": buckets_data,
}
else:
raise HTTPException(status_code=422, detail="Buckets creation failed.")
except Exception as e:
raise HTTPException(detail=str(e), status_code=400)
def update_bucket(
db: Session, bucket_data: BucketUpdateSchema, user_id: int, bucket_id: int
):
"""
@Route: PUT /api/v1/bucket/{bucket_id}
@Description: Update an existing bucket
@Args:
db {Session} - Database session
bucket_data {BucketSchema} - Bucket data to be updated
bucket_id {int} - ID of bucket to be updated in the database
user_id {int} - User ID
@Requires Auth: True
"""
existing_bucket = db.query(BucketModel).filter_by(id=bucket_id)
if not existing_bucket.first():
raise HTTPException(detail="Bucket not found.", status_code=404)
existing_bucket.update(bucket_data.dict())
db.commit()
updated_bucket = query_bucket_by_id(db, bucket_id)
return {
"status": True,
"message": "New bucket successfully added to bucket list.",
"data": updated_bucket,
}
def delete_bucket(db: Session, user_id: int, bucket_id: int):
"""
@Route: DELETE /api/v1/bucket/{bucket_id}
@Description: Delete an existing bucket
@Args:
db {Session} - Database session
bucket_id {int} - ID of bucket to be deleted from the database
user_id {int} - User ID
"""
existing_bucket = query_bucket_by_id(db, bucket_id)
if not existing_bucket:
raise HTTPException(detail="Bucket not found.", status_code=404)
db.delete(existing_bucket)
db.commit()
return {
"status": True,
"message": "Bucket deleted successfully",
"data": existing_bucket,
}
def get_all_public_buckets(db: Session):
"""
@Route: GET /api/v1/buckets/public
@Description: Get all public buckets
@Args:
db {Session} - Database session
@Requires Auth: True
"""
public_buckets = query_buckets_by_visibility(db, visibility="public")
if not public_buckets:
raise HTTPException(detail="No public buckets found.", status_code=404)
return {
"status": True,
"message": f"Successfully fetched {len(public_buckets)} buckets.",
"data": public_buckets,
}
| rexsimiloluwah/fastapi-github-actions-test | src/controllers/bucket.py | bucket.py | py | 4,142 | python | en | code | 1 | github-code | 13 |
24556870341 | import enum
from dataclasses import dataclass
from typing import Any, Union
from trees import tree_exceptions
from trees.binary_trees import binary_tree
class Color(enum.Enum):
"""Color definition for Red-Black Tree."""
RED = enum.auto()
BLACK = enum.auto()
@dataclass
class LeafNode(binary_tree.Node):
"""Definition Red-Black Tree Leaf node whose color is always black."""
left = None
right = None
parent = None
color = Color.BLACK
@dataclass
class RBNode(binary_tree.Node):
"""Red-Black Tree non-leaf node definition."""
left: Union["RBNode", LeafNode]
right: Union["RBNode", LeafNode]
parent: Union["RBNode", LeafNode]
color: Color = Color.RED
class RBTree(binary_tree.BinaryTree):
"""Red-Black Tree.
Attributes
----------
root: `Union[RBNode, LeafNode]`
The root node of the right threaded binary search tree.
empty: `bool`
`True` if the tree is `LeafNode`; `False` otherwise.
Methods
-------
search(key: `Any`)
Look for a node based on the given key.
insert(key: `Any`, data: `Any`)
Insert a (key, data) pair into the tree.
delete(key: `Any`)
Delete a node based on the given key from the tree.
inorder_traverse()
Perform In-order traversal.
preorder_traverse()
Perform Pre-order traversal.
postorder_traverse()
Perform Post-order traversal.
get_leftmost(node: `RBNode`)
Return the node whose key is the smallest from the given subtree.
get_rightmost(node: `RBNode`)
Return the node whose key is the biggest from the given subtree.
get_successor(node: `RBNode`)
Return the successor node in the in-order order.
get_predecessor(node: `RBNode`)
Return the predecessor node in the in-order order.
get_height(node: `Optional[RBNode]`)
Return the height of the given node.
Examples
--------
>>> from trees.binary_trees import red_black_tree
>>> tree = red_black_tree.RBTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in tree.inorder_traverse()]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
>>> [item for item in tree.preorder_traverse()]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
>>> tree.get_leftmost().key
1
>>> tree.get_leftmost().data
'1'
>>> tree.get_rightmost().key
34
>>> tree.get_rightmost().data
"34"
>>> tree.get_height(tree.root)
4
>>> tree.search(24).data
`24`
>>> tree.delete(15)
"""
def __init__(self):
binary_tree.BinaryTree.__init__(self)
self._NIL: LeafNode = LeafNode(key=None, data=None)
self.root: Union[RBNode, LeafNode] = self._NIL
# Override
def search(self, key: Any) -> RBNode:
"""Look for a node by a given key.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.search`.
"""
temp: Union[RBNode, LeafNode] = self.root
while isinstance(temp, RBNode):
if key < temp.key:
temp = temp.left
elif key > temp.key:
temp = temp.right
else: # Key found
return temp
raise tree_exceptions.KeyNotFoundError(key=key)
# Override
def insert(self, key: Any, data: Any):
"""Insert a (key, data) pair into the Red-Black tree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.insert`.
"""
node = RBNode(
key=key,
data=data,
left=self._NIL,
right=self._NIL,
parent=self._NIL,
color=Color.RED,
) # Color the new node as red.
parent: Union[RBNode, LeafNode] = self._NIL
temp: Union[RBNode, LeafNode] = self.root
while isinstance(temp, RBNode): # Look for the insert location
parent = temp
if node.key < temp.key:
temp = temp.left
else:
temp = temp.right
# If the parent is a LeafNode, set the new node to be the root.
if isinstance(parent, LeafNode):
node.color = Color.BLACK
self.root = node
else:
node.parent = parent
if node.key < parent.key:
parent.left = node
else:
parent.right = node
# After the insertion, fix the broken red-black-tree-properties.
self._insert_fixup(node)
# Override
def delete(self, key: Any):
"""Delete the node by the given key.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.delete`.
"""
deleting_node: RBNode = self.search(key=key)
original_color = deleting_node.color
# No children or only one right child
if isinstance(deleting_node.left, LeafNode):
replacing_node = deleting_node.right
self._transplant(deleting_node=deleting_node, replacing_node=replacing_node)
# Fixup
if original_color == Color.BLACK:
if isinstance(replacing_node, RBNode):
self._delete_fixup(fixing_node=replacing_node)
# Only one left child
elif isinstance(deleting_node.right, LeafNode):
replacing_node = deleting_node.left
self._transplant(deleting_node=deleting_node, replacing_node=replacing_node)
# Fixup
if original_color == Color.BLACK:
self._delete_fixup(fixing_node=replacing_node)
# Two children
else:
replacing_node = self.get_leftmost(deleting_node.right)
original_color = replacing_node.color
replacing_replacement = replacing_node.right
# The replacing node is not the direct child of the deleting node
if replacing_node.parent != deleting_node:
self._transplant(replacing_node, replacing_node.right)
replacing_node.right = deleting_node.right
replacing_node.right.parent = replacing_node
self._transplant(deleting_node, replacing_node)
replacing_node.left = deleting_node.left
replacing_node.left.parent = replacing_node
replacing_node.color = deleting_node.color
# Fixup
if original_color == Color.BLACK:
if isinstance(replacing_replacement, RBNode):
self._delete_fixup(fixing_node=replacing_replacement)
# Override
def get_leftmost(self, node: RBNode) -> RBNode:
"""Return the leftmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_leftmost`.
"""
current_node = node
while isinstance(current_node.left, RBNode):
current_node = current_node.left
return current_node
# Override
def get_rightmost(self, node: RBNode) -> RBNode:
"""Return the rightmost node from a given subtree.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_rightmost`.
"""
current_node = node
while isinstance(current_node.right, RBNode):
current_node = current_node.right
return current_node
# Override
def get_successor(self, node: RBNode) -> Union[RBNode, LeafNode]:
"""Return the successor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_successor`.
"""
if isinstance(node.right, RBNode):
return self.get_leftmost(node=node.right)
parent = node.parent
while isinstance(parent, RBNode) and node == parent.right:
node = parent
parent = parent.parent
return parent
# Override
def get_predecessor(self, node: RBNode) -> Union[RBNode, LeafNode]:
"""Return the predecessor node in the in-order order.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_predecessor`.
"""
if isinstance(node.left, RBNode):
return self.get_rightmost(node=node.left)
parent = node.parent
while isinstance(parent, RBNode) and node == parent.left:
node = parent
parent = parent.parent
return node.parent
# Override
def get_height(self, node: Union[None, LeafNode, RBNode]) -> int:
"""Return the height of the given node.
See Also
--------
:py:meth:`trees.binary_trees.binary_tree.BinaryTree.get_height`.
"""
if node is None:
return 0
if isinstance(node.left, LeafNode) and isinstance(node.right, LeafNode):
return 0
return max(self.get_height(node.left), self.get_height(node.right)) + 1
def inorder_traverse(self) -> binary_tree.Pairs:
"""Perform In-Order traversal.
In-order traversal traverses a tree by the order:
left subtree, current node, right subtree (LDR)
Yields
------
`Pairs`
The next (key, data) pair in the in-order traversal.
Examples
--------
>>> from trees.binary_trees import red_black_tree
>>> tree = red_black_tree.RBTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in tree.preorder_traverse()]
[(1, '1'), (4, '4'), (7, '7'), (11, '11'), (15, '15'), (20, '20'),
(22, '22'), (23, '23'), (24, '24'), (30, '30'), (34, '34')]
"""
return self._inorder_traverse(node=self.root) # type: ignore
def preorder_traverse(self) -> binary_tree.Pairs:
"""Perform Pre-Order traversal.
Pre-order traversal traverses a tree by the order:
current node, left subtree, right subtree (DLR)
Yields
------
`Pairs`
The next (key, data) pair in the pre-order traversal.
Examples
--------
>>> from trees.binary_trees import red_black_tree
>>> tree = red_black_tree.RBTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in tree.preorder_traverse()]
[(20, "20"), (7, "7"), (4, "4"), (1, "1"), (11, "11"), (15, "15"),
(23, "23"), (22, "22"), (30, "30"), (24, "24"), (34, "34")]
"""
return self._preorder_traverse(node=self.root) # type: ignore
def postorder_traverse(self) -> binary_tree.Pairs:
"""Perform Post-Order traversal.
Post-order traversal traverses a tree by the order:
left subtree, right subtree, current node (LRD)
Yields
------
`Pairs`
The next (key, data) pair in the post-order traversal.
Examples
--------
>>> from trees.binary_trees import red_black_tree
>>> tree = red_black_tree.RBTree()
>>> tree.insert(key=23, data="23")
>>> tree.insert(key=4, data="4")
>>> tree.insert(key=30, data="30")
>>> tree.insert(key=11, data="11")
>>> tree.insert(key=7, data="7")
>>> tree.insert(key=34, data="34")
>>> tree.insert(key=20, data="20")
>>> tree.insert(key=24, data="24")
>>> tree.insert(key=22, data="22")
>>> tree.insert(key=15, data="15")
>>> tree.insert(key=1, data="1")
>>> [item for item in tree.postorder_traverse()]
[(1, "1"), (4, "4"), (15, "15"), (11, "11"), (7, "7"), (22, "22"),
(24, "24"), (34, "34"), (30, "30"), (23, "23"), (20, "20")]
"""
return self._postorder_traverse(node=self.root) # type: ignore
def _left_rotate(self, node_x: RBNode):
node_y = node_x.right # Set node y
if isinstance(node_y, LeafNode): # Node y cannot be a LeafNode
raise RuntimeError("Invalid left rotate")
# Turn node y's subtree into node x's subtree
node_x.right = node_y.left
if isinstance(node_y.left, RBNode):
node_y.left.parent = node_x
node_y.parent = node_x.parent
# If node's parent is a LeafNode, node y becomes the new root.
if isinstance(node_x.parent, LeafNode):
self.root = node_y
# Otherwise, update node x's parent.
elif node_x == node_x.parent.left:
node_x.parent.left = node_y
else:
node_x.parent.right = node_y
node_y.left = node_x
node_x.parent = node_y
def _right_rotate(self, node_x: RBNode):
node_y = node_x.left # Set node y
if isinstance(node_y, LeafNode): # Node y cannot be a LeafNode
raise RuntimeError("Invalid right rotate")
# Turn node y's subtree into node x's subtree
node_x.left = node_y.right
if isinstance(node_y.right, RBNode):
node_y.right.parent = node_x
node_y.parent = node_x.parent
# If node's parent is a LeafNode, node y becomes the new root.
if isinstance(node_x.parent, LeafNode):
self.root = node_y
# Otherwise, update node x's parent.
elif node_x == node_x.parent.right:
node_x.parent.right = node_y
else:
node_x.parent.left = node_y
node_y.right = node_x
node_x.parent = node_y
def _insert_fixup(self, fixing_node: RBNode):
while fixing_node.parent.color == Color.RED:
if fixing_node.parent == fixing_node.parent.parent.left: # type: ignore
parent_sibling = fixing_node.parent.parent.right # type: ignore
# Case 1
if parent_sibling.color == Color.RED: # type: ignore
fixing_node.parent.color = Color.BLACK
parent_sibling.color = Color.BLACK # type: ignore
fixing_node.parent.parent.color = Color.RED # type: ignore
fixing_node = fixing_node.parent.parent # type: ignore
else:
# Case 2
if fixing_node == fixing_node.parent.right: # type: ignore
fixing_node = fixing_node.parent # type: ignore
self._left_rotate(fixing_node)
# Case 3
fixing_node.parent.color = Color.BLACK
fixing_node.parent.parent.color = Color.RED # type: ignore
self._right_rotate(fixing_node.parent.parent) # type: ignore
else:
parent_sibling = fixing_node.parent.parent.left # type: ignore
# Case 4
if parent_sibling.color == Color.RED: # type: ignore
fixing_node.parent.color = Color.BLACK
parent_sibling.color = Color.BLACK # type: ignore
fixing_node.parent.parent.color = Color.RED # type: ignore
fixing_node = fixing_node.parent.parent # type: ignore
else:
# Case 5
if fixing_node == fixing_node.parent.left: # type: ignore
fixing_node = fixing_node.parent # type: ignore
self._right_rotate(fixing_node)
# Case 6
fixing_node.parent.color = Color.BLACK
fixing_node.parent.parent.color = Color.RED # type: ignore
self._left_rotate(fixing_node.parent.parent) # type: ignore
self.root.color = Color.BLACK
def _delete_fixup(self, fixing_node: Union[LeafNode, RBNode]):
while (fixing_node is not self.root) and (fixing_node.color == Color.BLACK):
if fixing_node == fixing_node.parent.left: # type: ignore
sibling = fixing_node.parent.right # type: ignore
# Case 1: the sibling is red.
if sibling.color == Color.RED: # type: ignore
sibling.color == Color.BLACK # type: ignore
fixing_node.parent.color = Color.RED # type: ignore
self._left_rotate(fixing_node.parent) # type: ignore
sibling = fixing_node.parent.right # type: ignore
if isinstance(sibling, LeafNode):
break
# Case 2: the sibling is black and its children are black.
if (sibling.left.color == Color.BLACK) and ( # type: ignore
sibling.right.color == Color.BLACK # type: ignore
):
sibling.color = Color.RED # type: ignore
# new fixing node
fixing_node = fixing_node.parent # type: ignore
# Cases 3 and 4: the sibling is black and one of
# its child is red and the other is black.
else:
# Case 3: the sibling is black and its left child is red.
if sibling.right.color == Color.BLACK: # type: ignore
sibling.left.color = Color.BLACK # type: ignore
sibling.color = Color.RED # type: ignore
self._right_rotate(node_x=sibling) # type: ignore
# Case 4: the sibling is black and its right child is red.
sibling.color = fixing_node.parent.color # type: ignore
fixing_node.parent.color = Color.BLACK # type: ignore
sibling.right.color = Color.BLACK # type: ignore
self._left_rotate(node_x=fixing_node.parent) # type: ignore
# Once we are here, all the violation has been fixed, so
# move to the root to terminate the loop.
fixing_node = self.root
else:
sibling = fixing_node.parent.left # type: ignore
# Case 5: the sibling is red.
if sibling.color == Color.RED: # type: ignore
sibling.color == Color.BLACK # type: ignore
fixing_node.parent.color = Color.RED # type: ignore
self._right_rotate(node_x=fixing_node.parent) # type: ignore
sibling = fixing_node.parent.left # type: ignore
if isinstance(sibling, LeafNode):
break
# Case 6: the sibling is black and its children are black.
if (sibling.right.color == Color.BLACK) and ( # type: ignore
sibling.left.color == Color.BLACK # type: ignore
):
sibling.color = Color.RED # type: ignore
fixing_node = fixing_node.parent # type: ignore
else:
# Case 7: the sibling is black and its right child is red.
if sibling.left.color == Color.BLACK: # type: ignore
sibling.right.color = Color.BLACK # type: ignore
sibling.color = Color.RED # type: ignore
self._left_rotate(node_x=sibling) # type: ignore
# Case 8: the sibling is black and its left child is red.
sibling.color = fixing_node.parent.color # type: ignore
fixing_node.parent.color = Color.BLACK # type: ignore
sibling.left.color = Color.BLACK # type: ignore
self._right_rotate(node_x=fixing_node.parent) # type: ignore
# Once we are here, all the violation has been fixed, so
# move to the root to terminate the loop.
fixing_node = self.root
fixing_node.color = Color.BLACK
def _transplant(
self, deleting_node: RBNode, replacing_node: Union[RBNode, LeafNode]
):
if isinstance(deleting_node.parent, LeafNode):
self.root = replacing_node
elif deleting_node == deleting_node.parent.left:
deleting_node.parent.left = replacing_node
else:
deleting_node.parent.right = replacing_node
replacing_node.parent = deleting_node.parent
def _inorder_traverse(self, node: Union[RBNode, LeafNode]):
if isinstance(node, RBNode):
yield from self._inorder_traverse(node.left)
yield (node.key, node.data)
yield from self._inorder_traverse(node.right)
def _preorder_traverse(self, node: Union[RBNode, LeafNode]):
if isinstance(node, RBNode):
yield (node.key, node.data)
yield from self._preorder_traverse(node.left)
yield from self._preorder_traverse(node.right)
def _postorder_traverse(self, node: Union[RBNode, LeafNode]):
if isinstance(node, RBNode):
yield from self._postorder_traverse(node.left)
yield from self._postorder_traverse(node.right)
yield (node.key, node.data)
| burpeesDaily/python-sample-code | trees/binary_trees/red_black_tree.py | red_black_tree.py | py | 22,400 | python | en | code | 10 | github-code | 13 |
70662700819 | import time
import matplotlib.pyplot as plt
import numpy as np
import math
def pow(x,a):
return math.pow(x,a)
plt.ion()
figure,ax=plt.subplots()
lines,=ax.plot([],[],color="red")
ax.set_autoscaley_on(True)
ax.grid()
X=np.linspace(-1.8,1.8,1000)
a=1
while True:
#设置函数
y = [pow(pow(x, 2), 1 / 3) + 0.9 * pow(3.3 - x * x, 0.5) * np.sin(a * np.pi * x) for x in X]
a=a+0.1
lines.set_xdata(X)
lines.set_ydata(y)
ax.relim()
ax.autoscale_view()
figure.canvas.draw()
figure.canvas.flush_events()
time.sleep(0.01)
| LxmSpirit/PyhtonPycharm | untitled/123321/2.py | 2.py | py | 556 | python | en | code | 0 | github-code | 13 |
3870197291 | import hppfcl
import pinocchio as pin
import numpy as np
from utils_render import create_complex_scene
def reset_objects_placements(scene, transforms):
for s in range(len(scene.collision_objects)):
scene.collision_objects[s].setTransform(transforms[s])
# The scene is made of a box (6 walls) with a bunch of objects inside
shapes, transforms, scene = create_complex_scene()
n_walls = 6
# Initialize scene renderer
scene.init_renderer()
# render the scene
scene.render_scene()
num_collision_objects = len(scene.collision_objects)
print("Number of collision objects", num_collision_objects)
num_possible_collision_pairs = (int)(len(scene.collision_objects)*(len(scene.collision_objects) - 1)/2)
print("Number of possible collision pairs: ", num_possible_collision_pairs)
# Starting velocities of each shape
velocities = []
for i in range(num_collision_objects - n_walls):
v = np.random.rand(6) * 0.25
velocities.append(v)
# We don't want the walls to move
for i in range(n_walls):
velocities.append(np.zeros(6))
# Simulation loop
# You can increase the time horizon of the simulation
T = 100
dt = 0.1 # timestep
# Collision request and result needed for the narrow phase
colreq = hppfcl.CollisionRequest()
colres = hppfcl.CollisionResult()
for t in range(T):
# Render the current scene
scene.render_scene()
# Loop through all collision pairs
for i in range(0, num_collision_objects-1):
for j in range(i+1, num_collision_objects):
# If both object are walls, we don't really care about checking their collision
if i < num_collision_objects - n_walls or j < num_collision_objects - n_walls:
colres.clear()
is_colliding = hppfcl.collide(scene.collision_objects[i], scene.collision_objects[j], colreq, colres)
if (is_colliding):
v1 = velocities[i]
v2 = velocities[j]
contact: hppfcl.Contact = colres.getContact(0)
new_v1 = np.zeros(6)
new_v1[3:] = velocities[i][3:]
new_v1[:3] = -np.linalg.norm(v1[:3]) * contact.normal
new_v2 = np.zeros(6)
new_v2[3:] = velocities[j][3:]
new_v2[:3] = np.linalg.norm(v2[:3]) * contact.normal
if i < num_collision_objects - n_walls:
velocities[i] = new_v1
if j < num_collision_objects - n_walls:
velocities[j] = new_v2
# Update the placements of the shapes based on their velocities
for i in range(num_collision_objects - n_walls):
M = scene.collision_objects[i].getTransform()
# I will be using the first 3 elements of velocities[i] to apply a linear velocity to M
# in the world frame.
# And I will be using the last 3 elements of velocities[i] to apply an angular velocity to M
# in its local frame.
v1 = np.zeros(6)
v2 = np.zeros(6)
v1[:3] = velocities[i][:3]
v2[3:] = velocities[i][3:]
M = pin.exp6(v1*dt) * M * pin.exp6(v2*dt)
scene.collision_objects[i].setTransform(M)
| agimus-project/winter-school-2023 | simulation/sim2_collision/aws_collision.py | aws_collision.py | py | 3,200 | python | en | code | 0 | github-code | 13 |
37158320414 | from distutils.command import register
from django.contrib import admin
# Register your models here.
from app.models import Club, Activity, ClubPartnerPreregister, ClubPartner, Category, ClubSeatActivity, \
ClubSeatActivityPlace, ClubSeat
class AdminClub(admin.ModelAdmin):
list_display = ('name','status',)
prepopulated_fields = {'slug': ('name',)}
readonly_fields = ('updated_at', 'created_at')
admin.site.register(Club,AdminClub)
class AdminClubSeat(admin.ModelAdmin):
list_display = ('club','name','status',)
prepopulated_fields = {'slug': ('name',)}
readonly_fields = ('updated_at', 'created_at')
admin.site.register(ClubSeat,AdminClubSeat)
class AdminActivity(admin.ModelAdmin):
list_display = ('name','category','status',)
list_filter = ('category',)
prepopulated_fields = {'slug': ('name',)}
readonly_fields = ('updated_at', 'created_at')
admin.site.register(Activity, AdminActivity)
class AdminClubSeatActivity(admin.ModelAdmin):
list_display = ('club_seat','activity','status',)
# list_filter = ('club','activity',)
admin.site.register(ClubSeatActivity, AdminClubSeatActivity)
class AdminClubSeatActivityPlace(admin.ModelAdmin):
list_display = ('club_seat_activity','name','description','status',)
# list_filter = ('club','activity',)
admin.site.register(ClubSeatActivityPlace, AdminClubSeatActivityPlace)
class AdminClubPartnerPreregister(admin.ModelAdmin):
list_display = ('club','dni','code','status')
list_filter = ('club',)
admin.site.register(ClubPartnerPreregister, AdminClubPartnerPreregister)
class AdminClubPartner(admin.ModelAdmin):
list_display = ('club','user','dni','code',)
list_filter = ('club','user',)
admin.site.register(ClubPartner, AdminClubPartner)
admin.site.register(Category)
| BrendaManrique/cod-bookingApp | app/admin.py | admin.py | py | 1,799 | python | en | code | 0 | github-code | 13 |
35037941553 | """
0 right
1 down
2 left
3 up
"""
# Qlearning implementation in example 6.6
import gym
import numpy as np
import matplotlib.pyplot as plt
from cliffWalking import CustomEnvironment
float_formatter = "{:.3f}".format
np.set_printoptions(formatter={'float_kind': float_formatter})
env = CustomEnvironment(render_mode = 'rgb_array')
qValue = np.zeros([48, 4])
numEpisodes = 500
stepSizeParameter = 0.5
discountFactor = 0.9
def epsilon_greedy(Q, s, epsilon):
if np.random.random() < epsilon:
return np.random.choice(env.action_space.n)
else:
return np.random.choice(np.argwhere(Q[s] == np.amax(Q[s])).reshape(-1))
for episode in range(numEpisodes):
done = False
state = 36
env.reset()
while not done:
action = epsilon_greedy(qValue, state, 0.1)
next_state, reward, done, truncated, info = env.step(action)
nxt = next_state
next_state = next_state[1] * 12 + next_state[0]
ntaction = np.argmax(qValue[next_state])
# print('episode: ', episode, 'action', action, 'nextaction', ntaction, 'nextState', nxt )
# if done == True and next_state == 15 and ntaction == 2:
# print(state, action)
# print('right')
qValue[state, action] += stepSizeParameter*(
reward + discountFactor*qValue[next_state][ntaction] - qValue[state][action])
state = next_state
print('terminated at state', state)
# print(f'qTable after {episode} episodes: \n {qValue}')
for i in range(qValue.shape[0]):
print(f'index [{int(i/12)},{np.mod(i, 12)}] qvalue {qValue[i]}')
| Aditya12123/RL | qCliff.py | qCliff.py | py | 1,658 | python | en | code | 0 | github-code | 13 |
16863741943 | from __future__ import division
from __future__ import print_function
from builtins import map
from builtins import str
from builtins import range
from past.utils import old_div
import sys
import networkx as nx
import chicago_edge_scores as ces
import random
def is_shaved_tail(G,shave_round,shaved_degree,shave_limit):
leaves=[]
r={}
for n in sorted(G.nodes()):
if shave_round.get(n,0)>shave_limit and shaved_degree.get(n,0)==1:
leaves.append(n)
for l in sorted(leaves):
q=[l]
while len(q)>0:
n=q.pop()
r[n]=True
for nn in G.neighbors(n):
if shave_round.get(nn,0)>shave_limit and shaved_degree.get(nn,0)<=2 and (not nn in q) and (not nn in r ):
q.append(nn)
return r
def distance_to_nearest_branch(G,shave_round,shave_limit,trim_degree):
branchpoints=[]
r={}
for n in G.nodes():
#print "#",n, G.degree(n),G.degree(n)==1
if shave_round.get(n,0)>shave_limit and trim_degree.get(n,0)>2:
branchpoints.append(n)
# print leaves
round=1
# print "#",leaves
boundary=list(branchpoints)
next=[]
done=[]
#r={}
for b in boundary: r[b]=round
while len(boundary)>0:
round +=1
next=[]
for n in boundary:
for nn in G.neighbors(n):
if (not nn in boundary+done+next) :
next.append(nn)
# r[nn]=round
for b in next:
r[b] = round
# if len( [nn for nn in G.neighbors(b) if not r.has_key(nn)] )==1: r[b]=round
for b in boundary: done.append(b)
boundary = []
for n in next:
if n in r: boundary.append(n)
# next=[]
# for n in G.nodes():
# if not r.has_key(n): r[n]=round
return r
def distance_to_nearest_leaf(G,shave_round,shave_limit,trim_degree):
leaves=[]
r={}
for n in G.nodes():
#print "#",n, G.degree(n),G.degree(n)==1
if shave_round.get(n,0)<=shave_limit :
r[n]=0
for n in G.nodes():
#print "#",n, G.degree(n),G.degree(n)==1
if trim_degree.get(n,0)==1 and shave_round.get(n,0)>shave_limit:
leaves.append(n)
# print leaves
round=1
# print "#",leaves
boundary=list(leaves)
next=[]
done=[]
# r={}
for b in boundary: r[b]=round
while len(boundary)>0:
round +=1
next=[]
for n in boundary:
for nn in G.neighbors(n):
if (not nn in boundary+done+next):
next.append(nn)
# r[nn]=round
for b in next:
r[b] = round
# if len( [nn for nn in G.neighbors(b) if not r.has_key(nn)] )==1: r[b]=round
for b in boundary: done.append(b)
boundary = []
for n in next:
if n in r: boundary.append(n)
# next=[]
# for n in G.nodes():
# if not r.has_key(n): r[n]=round
return r
def shave_round(G):
leaves=[]
for n in G.nodes():
#print "#",n, G.degree(n),G.degree(n)==1
if G.degree(n)==1:
leaves.append(n)
# print leaves
round=1
# print "#",leaves
boundary=list(leaves)
next=[]
done=[]
r={}
for b in boundary: r[b]=round
while len(boundary)>0:
round +=1
next=[]
for n in boundary:
for nn in G.neighbors(n):
if (not nn in boundary+done+next):
next.append(nn)
# r[nn]=round
nr={}
for b in next:
if len( [nn for nn in G.neighbors(b) if nn not in r] )==1: nr[b]=round
r.update(nr)
for b in boundary: done.append(b)
boundary = []
for n in next:
if n in r: boundary.append(n)
# next=[]
for n in G.nodes():
if n not in r: r[n]=round
return r
def log(x):
sys.stderr.write(x+"\n")
LOCAL_BRIDGE=1
CUT_ME=2
edge_tags={}
def add_tag(s1,s2,tag):
if s1<s2:
ot = edge_tags.get((s1,s2),set())
ot.add(tag)
edge_tags[s1,s2] = ot
else:
ot = edge_tags.get((s2,s1),set())
ot.add(tag)
edge_tags[s2,s1] = ot
def get_tags(s1,s2):
if s1<s2:
ot = edge_tags.get((s1,s2),set())
return tuple(ot)
else:
ot = edge_tags.get((s2,s1),set())
return tuple(ot)
edge_color_setting="hair"
def edge_tag_to_style(tags,setting=edge_color_setting):
if setting == "hair":
style=""
if "hair" in tags:
style= "color=red"
elif "longHair" in tags:
style= "color=orange"
elif "H" in tags:
style= "color=blue"
elif "Y" in tags:
style= "color=goldenrod"
elif "nearY" in tags:
style= "color=goldenrod4"
elif "bigH" in tags:
style= "color=green"
if "promisc" in tags:
style += " style=dashed"
return style
def printdot(g,gg0,c,n,ll,bh,annot,trim_level={},post_trim_degree={},tag="bad",yDist={},leafDist={},edgeTags=edge_tags):
#def printdot(g,c,n,ll,bh,annot,tag="bad"):
# print ll
import colorsys
chromosomes={}
chr_mins={}
chr_maxs={}
if bh:
sb=[]
for cc in c:
bhi = bh.get(cc,[0,0,0,0,0,0])
sb.append( ( bhi[1],old_div((float(bhi[3])+float(bhi[4])),2.0),bhi[2],cc ) )
chromosomes[bhi[1]]=1
if chr_mins.get(bhi[1],5.0e9)>min( float(bhi[3]), float(bhi[4]) ): chr_mins[bhi[1]]=min( float(bhi[3]), float(bhi[4]) )
if chr_maxs.get(bhi[1],-1.0) <max( float(bhi[3]), float(bhi[4]) ): chr_maxs[bhi[1]]=max( float(bhi[3]), float(bhi[4]) )
sb.sort()
# {'19': 46194830.0, '18': 59221558.0, '8': 96645227.0, '4': 18548230.0, 'X': 102465955.0}
# {}
print("#",chr_mins)
print("#",chr_maxs)
nchrs=len(list(chromosomes.keys()))
i=0
chr_hue={}
for ch in list(chromosomes.keys()):
chr_hue[ch] = old_div(float(i),nchrs)
i+=1
gg = nx.subgraph(g,c)
f=open("%s-%d.txt" % (tag,n), "wt")
nn=1
lab0={}
for x in c:
p=""
d=x
if x[0]=="-":
p="-"
d=x[1:]
# lab0[d]=lab0.get(d,nn)
lab0[d]=lab0.get( d, float(ll.get(d,0)))
#print x,d,p,lab0[d]
nn+=1
lab={}
node_fill={}
for x in c:
p=""
d=x
if x[0]=="-":
p="-"
d=x[1:]
bhi = bh.get(x,False)
if bhi:
lab[x]="{:.1f} {}{}\\n{:.2f}-{:.2f}\\n{}".format( old_div(lab0.get(d,0.0),1000), bhi[1],bhi[2],old_div(float(bhi[3]),1.0e6),old_div(float(bhi[4]),1.0e6), x)
# lab[x]="{:.1f} {}{}\\n{:.2f}-{:.2f}\\n{} {} {} {}".format( lab0.get(d,0.0)/1000, bhi[1],bhi[2],float(bhi[3])/1.0e6,float(bhi[4])/1.0e6,leafDist.get(x,""),yDist.get(x,""), trim_level[x], post_trim_degree.get(x,"") )
if ( chr_maxs.get(bhi[1], (1.0+float(bhi[3])+float(bhi[4])) ) ) ==0.0: #/2.0)-chr_mins.get(bhi[1],0.0))==0.0:
print("wtf?",x,bhi,bhi[1],( chr_maxs.get(bhi[1], (1.0+float(bhi[3])+float(bhi[4])) ) ))
rgb=(0,0,0)
try:
rgb=colorsys.hls_to_rgb( chr_hue[bhi[1]], 0.5, old_div((old_div((float(bhi[3])+float(bhi[4])),2.0) - chr_mins.get(bhi[1],0)),(chr_maxs.get(bhi[1],old_div((1.0+float(bhi[3])+float(bhi[4])),2.0))-chr_mins.get(bhi[1],0.0))) )
except Exception as e:
print(e)
node_fill[x]= '#%02x%02x%02x' % (255.0*rgb[0], 255.0*rgb[1], 255.0*rgb[2] ) #"#{}{}{}".format()
else:
lab[x]="{:.1f}\\n{}".format(old_div(lab0.get(d,0.0),1000),str(x))
node_fill[x]="white"
# lab[x]="{} {}".format( trim_level.get(x,"?"), post_trim_degree.get(x,"?") )
f.write( "graph G {\n")
# f.write( "node [margin=0 fontcolor=blue fontsize=32 width=0.5 shape=circle style=filled]")
f.write( "node [margin=0 fontsize=6 shape=box];\n")
f.write( "edge [ fontsize=6 ];\n")
for x in list(lab.keys()):
f.write( "{0} [label=\"{1}\" fillcolor=\"{2}\" style=\"filled\" color=\"{2}\"] ; \n".format(x,lab[x],node_fill[x]) )
if bh:
last=False
lastx=0.0
lastc=0
for c in sb:
if last and c[0]==last and (c[1]-lastx)<1000000:
last_bhi = bh.get(lastc,False)
this_bhi = bh.get(c[-1],False)
blast_label=str(last_bhi) + str(this_bhi)
if this_bhi and last_bhi :
aa = tuple(last_bhi[1:5])
bb = tuple(this_bhi[1:5])
qd = qdist(aa,bb)
blast_label = "{}".format(qd)
if gg0.has_edge(lastc,c[-1]) and not t.has_edge(lastc,c[-1]):
f.write("\t \"{}\" -- \"{}\" [weight=2 style=dotted label=\"{} {}\" fontcolor=red] ;\n".format(lastc,c[-1],blast_label,int(abs(gg0[lastc][c[-1]]['weight']))))
else:
f.write("\t \"{}\" -- \"{}\" [weight=2 style=dotted label=\"{}\" fontcolor=blue] ;\n".format(lastc,c[-1],blast_label))
last=c[0]
lastx=c[1]
lastc=c[-1]
for e in gg.edges():
# f.write( "\t\"%s\" -- \"%s\";\n" % (lab[e[0]],lab[e[1]]) ) #,gg[e[0]][e[1]]['weight'])
# color="black"
# if annot.get(e,0)&LOCAL_BRIDGE : color="red"
# if annot.get(e,0)&CUT_ME : color="yellow"
f.write( "\t \"%s\" -- \"%s\" [label=\"%d\" weight=1 %s];\n" % ( e[0],e[1],int(abs(gg[e[0]][e[1]]['weight'])),edge_tag_to_style( get_tags(e[0],e[1]) ) ))
f.write( "}\n")
def independent_path(G,a,b,k,t):
q=[a]
l={}
l[a]=0
r=[]
while len(q)>0:
# print a,b,G[a][b],q,r
n=q.pop(0)
r.append(n)
for nn in G.neighbors(n):
if (n==a and nn==b) or (n==b and nn==a): continue
if G[n][nn]['weight']>-t: continue
if nn==b: return True
# print q,[l[i] for i in q]
l[nn] = min(l.get(nn,10000), l[n]+1)
if (not nn in q+r) and (l[nn]<=k):
q.append(nn)
return False
def annotate_edges(t,G,node_list):
an={}
# nn= len(list(t.edges()))
# i=0.0
for a,b in t.edges():
if not independent_path(G,a,b,4,2):
an[a,b]="local_bridge"
an[b,a]="local_bridge"
return an
def pairs_overlap(x,y):
a=min(x[0],x[1])
b=max(x[0],x[1])
c=min(y[0],y[1])
d=max(y[0],y[1])
if a<=c and c<=b: return True
if a<=d and d<=b: return True
if c<=a and a<=d: return True
if c<=b and b<=d: return True
return False
def qdist(x,y):
if (not x) or (not y): return (-1)
if x[0]==y[0]:
x,y,w,z = list(map(int,[x[2],x[3],y[2],y[3]]))
ol = pairs_overlap((x,y),(w,z))
if ol:
return(-1*min(
abs(x-w),
abs(x-z),
abs(y-w),
abs(y-z )))
else:
return(min(
abs(x-w),
abs(x-z),
abs(y-w),
abs(y-z )))
else:
return(1.0e12)
def pledge_singletons2(g,sg,thresh=2):
singletons=[]
ccn=0
comp={}
for c in nx.connected_components(sg):
ccn+=1
if len(c)==1: singletons.append(c[0])
for cc in c: comp[cc]=ccn
for s in singletons:
total_weight_by_comp={}
links_by_comp=[]
exemplar_by_comp={}
for n in g.neighbors(s):
ncomp = comp.get(n,-1)
w = abs(g[s][n]['weight'])
if w >= thresh:
links_by_comp.append( (w,ncomp,n) )
exemplar_by_comp[ncomp]=n
links_by_comp.sort(reverse=True)
print("#pledge_stat:",links_by_comp,s)
if len(links_by_comp)==0: continue
if len(links_by_comp)==1 or links_by_comp[0][1]==links_by_comp[1][1]:
n = exemplar_by_comp[links_by_comp[0][1]]
sg.add_edge( s, n, {'weight': -1} )
def pledge_singletons(g,sg,min_combined_weight,min_delta):
singletons=[]
ccn=0
comp={}
for c in nx.connected_components(sg):
ccn+=1
if len(c)==1: singletons.append(c[0])
for cc in c: comp[cc]=ccn
for s in sorted(singletons):
total_weight_by_comp={}
exemplar_by_comp={}
for n in sorted(g.neighbors(s)):
ncomp = comp.get(n,-1)
total_weight_by_comp[ncomp] = total_weight_by_comp.get(ncomp,0.0) + abs(g[s][n]['weight'] )
exemplar_by_comp[ncomp]=n
ncomps = list(total_weight_by_comp.keys())
ncomps.sort(key=lambda x: total_weight_by_comp[x],reverse=True)
best = total_weight_by_comp[ncomps[0]]
delta = 10000.0
if len(ncomps)>1:
delta = total_weight_by_comp[ncomps[0]]-total_weight_by_comp[ncomps[1]]
print("#pledge_stat:",best,delta,s,exemplar_by_comp[ncomps[0]])
if best >= min_combined_weight and delta >= min_delta:
sg.add_edge( s, exemplar_by_comp[ncomps[0]], {'weight': -1} )
if __name__=="__main__":
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-t','--threshold',default=0.0 , type=float)
parser.add_argument('--define_components_only',default=False,action='store_true')
parser.add_argument('-H','--head',default=False,type=int)
parser.add_argument('-D','--outdir',default="link_chunks")
parser.add_argument('-d','--debug',default=False,action='store_true')
parser.add_argument('-p','--progress',default=False,action='store_true')
parser.add_argument('-M','--maxdegree',default=False,type=int)
parser.add_argument('-m','--minlength',default=500,type=int)
parser.add_argument('-S','--silent',default=False,action='store_true')
parser.add_argument('-K','--cutPromisc',default=False,action='store_true')
parser.add_argument('-J','--logH',default=False,action='store_true')
parser.add_argument('-T','--logTags',default=False,action='store_true')
parser.add_argument('-C','--cheat',default=False,action='store_true')
parser.add_argument('-B','--blacklist')
parser.add_argument('-b','--besthits')
parser.add_argument('-c','--nchunks',default=32,type=int)
parser.add_argument('-l','--lengths')
parser.add_argument('-E','--edgefile')
parser.add_argument('-e','--pledgeingedgefile')
parser.add_argument('-L','--maxLength',type=float,default=150000.0)
parser.add_argument('-P','--promisc',type=float,default=0.023)
parser.add_argument('-o','--dotLabel',default="bad")
parser.add_argument('--seed',required=False,type=int,default=1, help="Seed for random number generation, use -1 for no seed")
args = parser.parse_args()
if args.debug:
args.progress=True
if args.seed != -1 :
random.seed(args.seed)
if args.progress: log( str(args) )
print("#"+str(args))
G=nx.Graph()
SG=nx.Graph()
ll={}
if args.lengths:
f = open(args.lengths)
while True:
l = f.readline()
if not l: break
if l[0]=="#": continue
c=l.strip().split()
l = int(c[1])
ll[c[0]]=int(c[1])
if l>= args.minlength:
G.add_node(c[0])
SG.add_node(c[0])
f.close()
if args.progress: print("#Done reading lengths")
besthit={}
if args.besthits:
# besthit={}
if args.besthits:
f = open(args.besthits)
while True:
l = f.readline()
if not l: break
if not l[:5]=="best:": continue
c=l.strip().split()
besthit[c[1]]=c[2:]
# print c[1],besthit[c[1]]
f.close()
if args.progress: print("#Done reading besthits")
if args.edgefile:
f = open(args.edgefile)
else:
f=sys.stdin
while True:
l = f.readline()
if not l: break
if l[0]=="#": continue
c=l.strip().split()
u,v,w = c[0],c[1],float(c[2])
if ( not args.lengths ) or (ll[u]>=args.minlength and ll[v]>=args.minlength):
G.add_edge(u,v,weight=-w)
SG.add_node(u)
SG.add_node(v)
if w >= args.threshold:
SG.add_edge(u,v,weight=-w)
if args.edgefile:
f.close()
if args.pledgeingedgefile:
f = open(args.pledgeingedgefile)
while True:
l = f.readline()
if not l: break
if l[0]=="#": continue
c=l.strip().split()
u,v,w = c[0],c[1],float(c[2])
if ( not args.lengths ) or (ll[u]>=args.minlength and ll[v]>=args.minlength):
G.add_edge(u,v,weight=-w)
f.close()
if args.progress: print("#Done reading edgelist")
bad_nodes=[]
total_discarded_length=0
total_discarded_length1=0
n_discarded1=0
n_discarded2=0
if args.maxdegree:
for n in sorted(SG.nodes()):
print("#dg:", SG.degree(n))
if SG.degree(n)>args.maxdegree:
n_discarded1+=1
bad_nodes.append(n)
total_discarded_length += ll[n]
print("#discard:",n,ll[n],SG.degree(n))
for nn in SG.neighbors(n):
if SG.degree(nn)==1:
n_discarded2+=1
total_discarded_length1+=ll[nn]
for n in bad_nodes:
e_to_remove=[]
for e in SG.edges([n]):
e_to_remove.append(e)
SG.remove_edges_from(e_to_remove)
if args.cutPromisc:
bad_nodes=[]
for n in sorted(SG.nodes()):
print("#ps:", old_div(float(SG.degree(n)), ll[n]), args.promisc,SG.degree(n),ll[n])
print("#pr:", old_div(float(G.degree(n)), ll[n]), args.promisc,G.degree(n),ll[n])
if (old_div(float(G.degree(n)), ll[n]))>args.promisc : # G.degree(n)/ll[n]>args.maxdegree:
n_discarded1+=1
bad_nodes.append(n)
total_discarded_length += ll[n]
print("#discard:",n,ll[n],G.degree(n))
# for nn in G.neighbors(n):
# if G.degree(nn)==1:
# n_discarded2+=1
# total_discarded_length1+=ll[nn]
for n in bad_nodes:
e_to_remove=[]
for e in SG.edges([n]):
e_to_remove.append(e)
#G.remove_edges_from(e_to_remove)
SG.remove_edges_from(e_to_remove)
if args.blacklist:
f=open(args.blacklist)
e_to_remove=[]
while True:
l=f.readline()
if not l: break
c=l.strip().split()
e_to_remove.append((c[1],c[2]))
G.remove_edges_from(e_to_remove)
SG.remove_edges_from(e_to_remove)
if args.cheat:
e_to_remove=[]
for a,b in sorted(SG.edges()):
if not ( a in besthit and b in besthit):
e_to_remove.append((a,b))
else:
aa = tuple(besthit[a][1:5])
bb = tuple(besthit[b][1:5])
qd = qdist(aa,bb)
if qd >= args.maxLength :
e_to_remove.append((a,b))
SG.remove_edges_from(e_to_remove)
if args.progress: print("#total_discarded_length",n_discarded1,n_discarded2,old_div(float(total_discarded_length),1.0e6),old_div(float(total_discarded_length1),1.0e6),old_div(float(total_discarded_length+total_discarded_length1),1.0e6))
promisc = {}
for n in sorted(SG.nodes()):
if args.debug: print("#r:",old_div(float(SG.degree(n)),ll[n]))
if (old_div(float(SG.degree(n)),ll[n]))>args.promisc: promisc[n]=True
tag_tallies={}
bad_tag_tallies={}
strx={"+":0, "-":1}
strings = []
ccn=0
component={}
component_contigs={}
chunk={}
#pledge_singletons(G,SG,args.threshold,2)
pledge_singletons2(G,SG,2)
for c in sorted(nx.connected_components(SG), key=lambda x: " ".join(sorted(x))) :
ccn+=1
print("c:",ccn,ccn%args.nchunks,len(c),sorted(c))
for cc in c:
component[cc]=ccn
chunk[cc]=ccn%args.nchunks
component_contigs[ccn] = tuple(c)
if args.define_components_only: exit(0)
intra_fhs={}
inter_fhs={}
for i in range(args.nchunks):
intra_fhs[i] = open("{}/intra.{}.links".format(args.outdir,i),"wt")
for j in range(i,args.nchunks):
inter_fhs[i,j] = open("{}/inter.{}-{}.links".format(args.outdir,i,j),"wt")
inter_fhs[j,i] = inter_fhs[i,j]
while True:
l = sys.stdin.readline()
if not l: break
if l[0]=="#": continue
c=l.strip().split("\t")
if not (ll[c[0]]>=args.minlength and ll[c[1]]>=args.minlength): continue
# if ( not args.lengths ) or (ll[u]>=args.minlength and ll[v]>=args.minlength):
if args.debug:
try:
print("#",c[0],c[1],component.get(c[0]),component.get(c[1]))
except Exception as e:
print(e)
print("#wtf",l)
chunk1,chunk2 = chunk[c[0]],chunk[c[1]]
comp1,comp2 = component[c[0]],component[c[1]]
if comp1==comp2:
intra_fhs[chunk1].write(l)
else:
inter_fhs[chunk1,chunk2].write(l)
# if component.get(c[0]) and component.get(c[1]) and component.get(c[0])==component.get(c[1]):
# print l.strip()
| DovetailGenomics/HiRise_July2015_GR | scripts/component_chunk_filter.py | component_chunk_filter.py | py | 21,779 | python | en | code | 28 | github-code | 13 |
42828601189 | from clean import preprocess
import sqlite3
import pandas as pd
import spacy
import re
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix,classification_report,accuracy_score
from keras.callbacks import EarlyStopping, ModelCheckpoint
import datetime as dt
import numpy as np
import keras
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.initializers import he_normal
from keras.layers import BatchNormalization, Dense, Dropout, Flatten, LSTM
from keras.layers.embeddings import Embedding
from keras.regularizers import L1L2
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import GridSearchCV
import pickle
import warnings
warnings.filterwarnings("ignore")
class twitter():
def training(self,x,y):
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=35)
param_grid = {'C': [0.1, 1, 10, 100, 1000],
'gamma': [1, 0.1, 0.01, 0.001, 0.0001]}
pl = Pipeline([('tfidf',TfidfVectorizer()),('clf',GridSearchCV(SVC(probability=True), param_grid, refit = True, verbose = 3))])
pl.fit(x_train,y_train)
predicts = pl.predict(x_test)
print(confusion_matrix(y_test,predicts))
print(classification_report(y_test,predicts))
print("accuracy::",accuracy_score(y_test,predicts))
with open('model_svc.pkl','wb') as f:
pickle.dump(pl,f)
return pl,accuracy_score(y_test,predicts)
def data_gen(self,data):
y=data['airline_sentiment']
le = LabelEncoder()
y=le.fit_transform(y)
pickle.dump(le,open('label_svm.pkl', 'wb'))
data.text = data.text.apply(lambda x: preprocess(x))
print("number of classes::",len(list(le.classes_)))
nclasses=len(list(le.classes_))
x=data.text
return x,y,nclasses
def find_word_index(self,row,word_index_dict):
holder = []
for word in row.split():
if word in word_index_dict:
holder.append(word_index_dict[word])
else:
holder.append(0)
return holder
def lstm_training(self,x,y):
total_words = []
for sent in x:
words = sent.split()
total_words+=words
from collections import Counter
counter = Counter(total_words)
top_words_count = int(len(counter)/0.95)
sorted_words = counter.most_common(top_words_count)
word_index_dict = dict()
i = 1
for word,frequency in sorted_words:
word_index_dict[word] = i
i += 1
text=[]
for t in x:
text.append(self.find_word_index(t,word_index_dict))
pickle.dump(le,open('word_index_dict.pkl', 'wb'))
label_binarizer = LabelBinarizer()
labels = label_binarizer.fit_transform(y)
pickle.dump(label_binarizer,open('label_lstm.pkl', 'wb'))
n_classes = len(label_binarizer.classes_)
x_train,x_test,y_train,y_test = train_test_split(text,labels,test_size=0.1,shuffle=True,random_state=35)
m=0
for ind in text:
i=len(ind)
m=max(m,i)
max_review_length = m
x_train = sequence.pad_sequences(x_train, maxlen=max_review_length)
x_test = sequence.pad_sequences(x_test, maxlen=max_review_length)
print("nclasses:",n_classes)
print("max length:",m)
vocab_size = len(counter.most_common()) + 1
model = Sequential()
# Add Embedding Layer
model.add(Embedding(vocab_size, 32, input_length=max_review_length))
# Add batch normalization
model.add(BatchNormalization())
# Add dropout
model.add(Dropout(0.20))
# Add LSTM Layer
model.add(LSTM(128,return_sequences=True))
model.add(LSTM(64))
# Add dropout
model.add(Dropout(0.20))
# Add Dense Layer
model.add(Dense(3, activation='softmax'))
# Summary of the model
print("Model Summary: \n")
print(model.summary())
callbacks = [ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', save_best_only=True)]
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1,restore_best_weights=True)
results = model.fit(x_train, np.array(y_train), batch_size = 32, epochs = 10, verbose=2, validation_data=(x_test, y_test),callbacks=[early_stop])
test_scores = model.evaluate(x_test,y_test,verbose=1)
accuracy=test_scores[1]
predicts = model.predict(x_test)
# print("accuracy::",accuracy_score(y_test,predicts))
with open('model_lstm.pkl','wb') as f:
pickle.dump(model,f)
return model,accuracy
def main():
data = pd.read_csv("Tweets.csv")
print("done")
t=twitter()
x,y,nclasses=t.data_gen(data)
print("done")
svm_model,accuracy_svm=t.training(x,y)
print("done")
lstm_model,accuracy_lstm=t.lstm_training(x,y)
if(accuracy_lstm>=accuracy_svm):
with open('best_model/model.pkl','wb') as f:
pickle.dump(lstm_model,f)
print("LSTM")
else:
with open('best_model/model.pkl','wb') as f:
pickle.dump(svm_model,f)
print("SVM")
if __name__ == "__main__":
# calling main function
main()
| saireddyavs/Avanov | model/main.py | main.py | py | 5,878 | python | en | code | 0 | github-code | 13 |
9018671678 | import socket
target_host = "127.0.0.1"
target_port = 9997
#ソケットオブジェクトの作成
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#データの送信
client.sendto(b"AAABBBCCC", (target_host, target_port))
#データの受信
data, address = client.recvfrom(4096)
print("Success!")
print(data.decode('utf-8'))
print(address)
client.close() | ryu1998/Security_Practice | base practice/udp_client.py | udp_client.py | py | 373 | python | en | code | 0 | github-code | 13 |
31097820542 | # -*- coding: utf-8 -*-
"""
Created on Thu May 3 08:46:17 2018
@author: Thierry CHAUVIER
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
class T_roc_curve():
"""
Return metrics on ROC curve for a binary classifier
"""
def __init__(self,y_test, y_pred_proba):
self.nom = "t_confusion_matrix"
'''
On peut résumer la courbe ROC par un nombre : "l'aire sous la courbe", aussi dénotée AUROC pour « Area Under the ROC »,
qui permet plus aisément de comparer plusieurs modèles.
Un classifieur parfait a une AUROC de 1 ; un classifieur aléatoire, une AUROC de 0.5
'''
y_pred_proba2 = y_pred_proba[:, 1]
[self.fpr, self.tpr, self.thr] = metrics.roc_curve(y_test, y_pred_proba2)
self.AUROC = metrics.auc(self.fpr, self.tpr)
def graph(self):
plt.plot(self.fpr, self.tpr, color='coral', lw=2)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.title('ROC Curve')
plt.xlabel('1 - specificite', fontsize=14)
plt.ylabel('Sensibilite', fontsize=14)
print('AUROC = ',self.AUROC)
class T_confusion_matrix():
"""
Return metrics on confusion matrix for a binary classifier
"""
def __init__(self,y_test, y_pred):
self.nom = "t_confusion_matrix"
self.cnf_matrix=metrics.confusion_matrix(y_test, y_pred)
self.TN=self.cnf_matrix[0,0]
self.FN=self.cnf_matrix[1,0]
self.FP=self.cnf_matrix[0,1]
self.TP=self.cnf_matrix[1,1]
'''
Le rappel ("recall" en anglais), ou sensibilité ("sensitivity" en anglais),
est le taux de vrais positifs, c’est à dire la proportion de positifs
que l’on a correctement identifiés.
C’est la capacité de notre modèle à détecter tous les incendies.
'''
self.recall=self.TP/(self.TP+self.FN)
'''
la précision, c’est-à-dire la proportion de prédictions correctes parmi
les points que l’on a prédits positifs.
C’est la capacité de notre modèle à ne déclencher d’alarme que pour un vrai incendie.
'''
self.precision=self.TP/(self.TP+self.FP)
'''
la "F-mesure", qui est leur moyenne harmonique.
'''
self.f_mesure=(2*self.TP)/((2*self.TP)+self.FP+self.FN)
'''
la spécificité ("specificity" en anglais), qui est le taux de vrais négatifs,
autrement dit la capacité à détecter toutes les situations où il n’y a pas d’incendie.
C’est une mesure complémentaire de la sensibilité.
'''
self.specificity=self.TN/(self.FP+self.TN)
def graph(self):
from mlxtend.plotting import plot_confusion_matrix
fig, ax = plot_confusion_matrix(conf_mat=self.cnf_matrix,figsize=(20,20))
plt.title('Confusion Matrix')
plt.show()
print("True Negative=",self.TN)
print("True Positive=",self.TP)
print("False Negative=",self.FN)
print("False Positive=",self.FP)
print("recall TP/(TP+FN) = ",self.recall)
print("precision TP/(TP+FP)= ",self.precision)
print("f_mesure (2*TP)/((2*TP)+FP+FN)= ",self.f_mesure)
print("specificity TN/(FP+TN) = ",self.specificity)
def t_confusion_matrix(y_test, y_pred,aff=1):
"""
Return metrics on confusion matrix for a binary classifier
"""
cnf_matrix=metrics.confusion_matrix(y_test, y_pred)
TN=cnf_matrix[0,0]
FN=cnf_matrix[1,0]
FP=cnf_matrix[0,1]
TP=cnf_matrix[1,1]
'''
Le rappel ("recall" en anglais), ou sensibilité ("sensitivity" en anglais),
est le taux de vrais positifs, c’est à dire la proportion de positifs
que l’on a correctement identifiés.
C’est la capacité de notre modèle à détecter tous les incendies.
'''
recall=TP/(TP+FN)
'''
la précision, c’est-à-dire la proportion de prédictions correctes parmi
les points que l’on a prédits positifs.
C’est la capacité de notre modèle à ne déclencher d’alarme que pour un vrai incendie.
'''
precision=TP/(TP+FP)
'''
la "F-mesure", qui est leur moyenne harmonique.
'''
f_mesure=(2*TP)/((2*TP)+FP+FN)
'''
la spécificité ("specificity" en anglais), qui est le taux de vrais négatifs,
autrement dit la capacité à détecter toutes les situations où il n’y a pas d’incendie.
C’est une mesure complémentaire de la sensibilité.
'''
specificity=TN/(FP+TN)
if aff > 0:
from mlxtend.plotting import plot_confusion_matrix
fig, ax = plot_confusion_matrix(conf_mat=cnf_matrix)
plt.show()
print("True Negative=",TN)
print("True Positive=",TP)
print("False Negative=",FN)
print("False Positive=",FP)
print("recall TP/(TP+FN) = ",recall)
print("precision TP/(TP+FP)= ",precision)
print("f_mesure (2*TP)/((2*TP)+FP+FN)= ",f_mesure)
print("specificity TN/(FP+TN) = ",specificity)
return(recall,specificity,precision)
def t_variance(X_features,X_components,nb,seuil=0.8,aff=1):
"""
Calculate variance after dimension reduction
"""
tot_var_X = np.sum(np.var(X_features,axis=0)) # variance des features
tot_var_comp_nb = np.sum(np.var(X_components[:,0:nb],axis=0)) # cumul de la variance des N premières composantes principales
tot_var_comp = np.sum(np.var(X_components,axis=0)) # cumul de la variance des composantes principales
explained_variance = np.var(X_components,axis=0) # variance expliquée pour chaque composantes principales
if np.sum(explained_variance) == 0:
explained_variance_ratio = 0
else:
explained_variance_ratio = explained_variance / np.sum(explained_variance) # ratio de la variance expliquée pour chaque composantes principales
explained_variance_ratio_cumsum = np.cumsum(explained_variance_ratio) # somme cumulée des ratios de la variance expliquée des composantes principales
nb_comp = explained_variance_ratio_cumsum.shape[0]
# recherche du nombre minimale de composantes principale pour atteindre le seuil demandé
for i in range(0,explained_variance_ratio_cumsum.shape[0]):
if explained_variance_ratio_cumsum[i] > seuil:
nb_comp = i
break
explained_variance_ratio_cumsum = np.round(explained_variance_ratio_cumsum,2)
if tot_var_comp == 0:
pct_exp = 0
else:
pct_exp=tot_var_comp_nb/tot_var_comp # ratio de la variance expliquée par les N premières composantes principales
if aff > 0:
print("Nombre des features de X = ",X_features.shape[1])
print("Total variance des features = %0.03f"%(tot_var_X))
print("Total variance des composantes principales = %0.03f"%(tot_var_comp))
print("Total variance des %i premières composantes principales = %0.03f"%(nb,tot_var_comp_nb))
print("Pourcentage de la variance expliquée par les %i premières composantes principales = %0.03f "%(nb,pct_exp))
print("Il faut les %i premières composantes principales pour expliquer au moins %0.00f pct de la variance"%(nb_comp+1,seuil*100))
plt.figure(figsize=(5,5))
plt.plot(explained_variance_ratio_cumsum)
plt.ylabel("% explained variance ration ")
plt.xlabel("Nb components")
plt.show()
return(nb_comp+1,pct_exp)
def t_is_numeric(obj):
"""
This function test if arg is numeric
"""
attrs = ['__add__', '__sub__', '__mul__', '__truediv__', '__pow__']
return all(hasattr(obj, attr) for attr in attrs)
def t_analyze(df):
"""
this function will analyze each columns with different metrics
"""
# Create dataframe which will recieve metrics
v_columns=['nb_lignes','nb_lignes_distinctes','nb_doublons','nb_nan','pct_nan','nb_val_num','pct_val_num','nb_val_alpha','pct_val_alpha',
'Max','Min',"Ecart Type","Moyenne","quantile_25","quantile_50","quantile_75"]
result=pd.DataFrame(np.zeros((len(df.columns.values),len(v_columns))),
index=df.columns.values,
columns=v_columns)
for ind,col in enumerate(df.columns.values):
print("Column : {} : {}".format(ind,col))
result['nb_lignes'][col] = len(df[col])
result['nb_lignes_distinctes'][col] = len(df[col].value_counts())
try:
result['nb_doublons'][col] = len(df[col][df.groupby(col)[col].transform('count') > 1].unique())
except:
pass
result['nb_nan'][col] = df[col].isnull().values.sum()
result['nb_val_num'][col] = df[col][df[col].apply(lambda x: t_is_numeric(x))].count()
result['nb_val_alpha'][col] = result['nb_lignes'][col] - result['nb_val_num'][col] - result['nb_nan'][col]
result['pct_nan'][col]=result['nb_nan'][col]/result['nb_lignes'][col]
result['pct_val_num'][col]=result['nb_val_num'][col]/result['nb_lignes'][col]
result['pct_val_alpha'][col]=result['nb_val_alpha'][col]/result['nb_lignes'][col]
# calcul des statistiques
if result['nb_val_alpha'][col] == 0:
result['Max'][col]=df[col].max()
result['Min'][col]=df[col].min()
result['Ecart Type'][col]=df[col].std()
result['Moyenne'][col]=df[col].mean()
result['quantile_25'][col]=df[col].quantile(q=0.25)
result['quantile_50'][col]=df[col].quantile(q=0.5)
result['quantile_75'][col]=df[col].quantile(q=0.75)
else:
result['Ecart Type'][col]=np.nan
result['Max'][col]=np.nan
result['Min'][col]=np.nan
result['Moyenne'][col]=np.nan
result['quantile_25'][col]=np.nan
result['quantile_50'][col]=np.nan
result['quantile_75'][col]=np.nan
# detect values data types
dtypeCount =[df.iloc[:,i].apply(type).value_counts() for i in range(df.shape[1])]
dtcount = pd.DataFrame(dtypeCount)
dtcount.columns=dtcount.columns.astype(str)
result = pd.concat([result,dtcount], axis=1)
result = pd.concat([result,df.dtypes], axis=1)
result.rename(columns={0:'pandas_dtypes'}, inplace=True)
result['pandas_dtypes']=result['pandas_dtypes'].astype('str')
result.columns=result.columns.str.replace("'","")
result.columns=result.columns.str.replace("<","")
result.columns=result.columns.str.replace(">","")
print ("End of analyze : ")
return(result)
def main():
pass
# =============================================================================
# Start run
# =============================================================================
if __name__ == "__main__":
main() | tchau2403/p6repo | t_lib_util.py | t_lib_util.py | py | 11,051 | python | fr | code | 0 | github-code | 13 |
71717271059 | #!/usr/bin/env python
# coding: utf-8
'''
使用迭代方法
'''
def fab(n):
n1 = 1
n2 = 1
n3 = 1
if n < 1:
print('输入有误!')
return -1
while (n-2) > 0:
n3 = n2 + n1
n1 = n2
n2 = n3
n -= 1
return n3
result = fab(40)
if result != -1:
print('总共有%d对小兔崽子诞生!' % result) | auspbro/code-snippets | Python/pycode_LXF/fab_1.py | fab_1.py | py | 371 | python | en | code | 2 | github-code | 13 |
72915493458 | import logging
import os.path
import pytest
from qutebrowser.qt.core import QUrl
from qutebrowser.browser import pdfjs
from qutebrowser.utils import urlmatch
pytestmark = [pytest.mark.usefixtures('data_tmpdir')]
@pytest.mark.parametrize('available, snippet', [
(True, '<title>PDF.js viewer</title>'),
(False, '<h1>No pdf.js installation found</h1>'),
('force', 'fake PDF.js'),
])
def test_generate_pdfjs_page(available, snippet, monkeypatch):
if available == 'force':
monkeypatch.setattr(pdfjs, 'is_available', lambda: True)
monkeypatch.setattr(pdfjs, 'get_pdfjs_res',
lambda filename: b'fake PDF.js')
elif available:
if not pdfjs.is_available():
pytest.skip("PDF.js unavailable")
monkeypatch.setattr(pdfjs, 'is_available', lambda: True)
else:
monkeypatch.setattr(pdfjs, 'is_available', lambda: False)
content = pdfjs.generate_pdfjs_page('example.pdf', QUrl())
print(content)
assert snippet in content
# Note that we got double protection, once because we use QUrl.ComponentFormattingOption.FullyEncoded and
# because we use qutebrowser.utils.javascript.to_js. Characters like " are
# already replaced by QUrl.
@pytest.mark.parametrize('filename, expected', [
('foo.bar', "foo.bar"),
('foo"bar', "foo%22bar"),
('foo\0bar', 'foo%00bar'),
('foobar");alert("attack!");',
'foobar%22);alert(%22attack!%22);'),
])
def test_generate_pdfjs_script(filename, expected):
expected_open = 'open("qute://pdfjs/file?filename={}");'.format(expected)
actual = pdfjs._generate_pdfjs_script(filename)
assert expected_open in actual
assert 'PDFView' in actual
class TestResources:
@pytest.fixture
def read_system_mock(self, mocker):
return mocker.patch.object(pdfjs, '_read_from_system', autospec=True)
@pytest.fixture
def read_file_mock(self, mocker):
return mocker.patch.object(pdfjs.resources, 'read_file_binary', autospec=True)
def test_get_pdfjs_res_system(self, read_system_mock):
read_system_mock.return_value = (b'content', 'path')
assert pdfjs.get_pdfjs_res_and_path('web/test') == (b'content', 'path')
assert pdfjs.get_pdfjs_res('web/test') == b'content'
read_system_mock.assert_called_with('/usr/share/pdf.js/',
['web/test', 'test'])
def test_get_pdfjs_res_bundled(self, read_system_mock, read_file_mock,
tmpdir):
read_system_mock.return_value = (None, None)
read_file_mock.return_value = b'content'
assert pdfjs.get_pdfjs_res_and_path('web/test') == (b'content', None)
assert pdfjs.get_pdfjs_res('web/test') == b'content'
for path in ['/usr/share/pdf.js/',
str(tmpdir / 'data' / 'pdfjs'),
# hardcoded for --temp-basedir
os.path.expanduser('~/.local/share/qutebrowser/pdfjs/')]:
read_system_mock.assert_any_call(path, ['web/test', 'test'])
def test_get_pdfjs_res_not_found(self, read_system_mock, read_file_mock,
caplog):
read_system_mock.return_value = (None, None)
read_file_mock.side_effect = FileNotFoundError
with pytest.raises(pdfjs.PDFJSNotFound,
match="Path 'web/test' not found"):
pdfjs.get_pdfjs_res_and_path('web/test')
assert not caplog.records
def test_get_pdfjs_res_oserror(self, read_system_mock, read_file_mock,
caplog):
read_system_mock.return_value = (None, None)
read_file_mock.side_effect = OSError("Message")
with caplog.at_level(logging.WARNING):
with pytest.raises(pdfjs.PDFJSNotFound,
match="Path 'web/test' not found"):
pdfjs.get_pdfjs_res_and_path('web/test')
expected = 'OSError while reading PDF.js file: Message'
assert caplog.messages == [expected]
def test_broken_installation(self, data_tmpdir, tmpdir, monkeypatch,
read_file_mock):
"""Make sure we don't crash with a broken local installation."""
monkeypatch.setattr(pdfjs, '_SYSTEM_PATHS', [])
monkeypatch.setattr(pdfjs.os.path, 'expanduser',
lambda _in: tmpdir / 'fallback')
read_file_mock.side_effect = FileNotFoundError
(data_tmpdir / 'pdfjs' / 'pdf.js').ensure() # But no viewer.html
content = pdfjs.generate_pdfjs_page('example.pdf', QUrl())
assert '<h1>No pdf.js installation found</h1>' in content
@pytest.mark.parametrize('path, expected', [
('web/viewer.js', 'viewer.js'),
('build/locale/foo.bar', 'locale/foo.bar'),
('viewer.js', 'viewer.js'),
('foo/viewer.css', 'foo/viewer.css'),
])
def test_remove_prefix(path, expected):
assert pdfjs._remove_prefix(path) == expected
@pytest.mark.parametrize('names, expected_name', [
(['one'], 'one'),
(['doesnotexist', 'two'], 'two'),
(['one', 'two'], 'one'),
(['does', 'not', 'onexist'], None),
])
def test_read_from_system(names, expected_name, tmpdir):
file1 = tmpdir / 'one'
file1.write_text('text1', encoding='ascii')
file2 = tmpdir / 'two'
file2.write_text('text2', encoding='ascii')
if expected_name == 'one':
expected = (b'text1', str(file1))
elif expected_name == 'two':
expected = (b'text2', str(file2))
elif expected_name is None:
expected = (None, None)
assert pdfjs._read_from_system(str(tmpdir), names) == expected
@pytest.fixture
def unreadable_file(tmpdir):
unreadable_file = tmpdir / 'unreadable'
unreadable_file.ensure()
unreadable_file.chmod(0)
if os.access(unreadable_file, os.R_OK):
# Docker container or similar
pytest.skip("File was still readable")
yield unreadable_file
unreadable_file.chmod(0o755)
def test_read_from_system_oserror(tmpdir, caplog, unreadable_file):
expected = (None, None)
with caplog.at_level(logging.WARNING):
assert pdfjs._read_from_system(str(tmpdir), ['unreadable']) == expected
assert len(caplog.records) == 1
message = caplog.messages[0]
assert message.startswith('OSError while reading PDF.js file:')
@pytest.mark.parametrize('available', [True, False])
def test_is_available(available, mocker):
mock = mocker.patch.object(pdfjs, 'get_pdfjs_res', autospec=True)
if available:
mock.return_value = b'foo'
else:
mock.side_effect = pdfjs.PDFJSNotFound('build/pdf.js')
assert pdfjs.is_available() == available
@pytest.mark.parametrize('mimetype, url, enabled, expected', [
# PDF files
('application/pdf', 'http://www.example.com', True, True),
('application/x-pdf', 'http://www.example.com', True, True),
# Not a PDF
('application/octet-stream', 'http://www.example.com', True, False),
# PDF.js disabled
('application/pdf', 'http://www.example.com', False, False),
# Download button in PDF.js
('application/pdf', 'blob:qute%3A///b45250b3', True, False),
])
def test_should_use_pdfjs(mimetype, url, enabled, expected, config_stub):
config_stub.val.content.pdfjs = enabled
assert pdfjs.should_use_pdfjs(mimetype, QUrl(url)) == expected
@pytest.mark.parametrize('url, expected', [
('http://example.com', True),
('http://example.org', False),
])
def test_should_use_pdfjs_url_pattern(config_stub, url, expected):
config_stub.val.content.pdfjs = False
pattern = urlmatch.UrlPattern('http://example.com')
config_stub.set_obj('content.pdfjs', True, pattern=pattern)
assert pdfjs.should_use_pdfjs('application/pdf', QUrl(url)) == expected
def test_get_main_url():
expected = QUrl('qute://pdfjs/web/viewer.html?filename=hello?world.pdf&'
'file=&source=http://a.com/hello?world.pdf')
original_url = QUrl('http://a.com/hello?world.pdf')
assert pdfjs.get_main_url('hello?world.pdf', original_url) == expected
| qutebrowser/qutebrowser | tests/unit/browser/test_pdfjs.py | test_pdfjs.py | py | 8,101 | python | en | code | 9,084 | github-code | 13 |
25328093150 | import pandas as pd
import altair as alt
df = pd.read_csv('data/beer.csv')
df['time'] = pd.to_timedelta(df['time'] + ':00')
df = pd.melt(df,
id_vars=['time', 'beer', 'ml', 'abv'],
value_vars=['Mark', 'Max', 'Adam'],
var_name='name', value_name='quantity'
)
weight = pd.DataFrame({
'name': ['Max', 'Mark', 'Adam'],
'weight': [165, 155, 200]
})
df = pd.merge(df, weight, how='left', on='name')
# standard drink has 17.2 ml of alcohol
df['standard_drink'] = (df['ml'] * (df['abv'] / 100) * df['quantity']) / 17.2
df['cumsum_drinks'] = df.groupby(['name'])['standard_drink'].apply(lambda x: x.cumsum())
df['hours'] = df['time'] - df['time'].min()
df['hours'] = df['hours'].apply(lambda x: x.seconds / 3600)
def ebac(standard_drinks, weight, hours):
# https://en.wikipedia.org/wiki/Blood_alcohol_content
BLOOD_BODY_WATER_CONSTANT = 0.806
SWEDISH_STANDARD = 1.2
BODY_WATER = 0.58
META_CONSTANT = 0.015
def lb_to_kg(weight):
return weight * 0.4535924
n = BLOOD_BODY_WATER_CONSTANT * standard_drinks * SWEDISH_STANDARD
d = BODY_WATER * lb_to_kg(weight)
bac = (n / d - META_CONSTANT * hours)
return bac
df['bac'] = df.apply(
lambda row: ebac(
row['cumsum_drinks'], row['weight'], row['hours']
), axis=1
)
mh = df[df['name'] == 'Max'][['time', 'bac']]
# def timedelta_to_datetime(date, timedelta):
# t = timedelta.to_pytimedelta()
# d = pd.Timestamp(date)
# return (d + t).to_pydatetime()
#
# mh['datetime'] = mh.apply(lambda row: timedelta_to_datetime('2018-04-21', row['time']), axis=1)
ratings = pd.read_csv('data/ratings.csv')
ratings = pd.melt(ratings,
id_vars=['beer'],
value_vars=['Mark', 'Max', 'Adam'],
var_name='name', value_name='rating'
)
df = pd.merge(df, ratings, how='left', on=['name', 'beer'])
(alt.Chart(
df[['rating', 'bac', 'name']],
background='white',
title='12 Beers'
)
.mark_circle(opacity=0.9, size=80)
.encode(x='bac', y='rating', color='name')
.properties(height=400, width=600)
.interactive()
)
| maxhumber/talks | 2018-05-03_data_creationism/03-2_beer.py | 03-2_beer.py | py | 2,082 | python | en | code | 8 | github-code | 13 |
34834733489 | import re
from sqlalchemy import Column, DateTime, ForeignKeyConstraint, Integer, String
from sqlalchemy.orm import relationship, validates
from database import Base
# it seems that github has a limit of:
# * 39 chars for usernames (according to https://github.com/shinnn/github-username-regex)
# * 100 chars for repo names (according to https://github.com/evalEmpire/gitpan/issues/123)
MAX_REPO_LENGTH = 140
class PullRequest(Base):
__tablename__ = 'pull_requests'
# courtesy of the same repo as for MAX_REPO_LENGTH
_REPO_REGEX = re.compile(r'^[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,38}/[a-z\d](?:[a-z\d]|-(?=[a-z\d])){0,99}$', re.IGNORECASE) # noqa
_URL_REGEX = re.compile(r'^(?:https://)?github.com/(%s)/pull/([0-9]+)$' % (_REPO_REGEX.pattern[1:-1]), re.IGNORECASE) # noqa
repo = Column(String(MAX_REPO_LENGTH), primary_key=True)
number = Column(Integer, primary_key=True)
last_processed_sha = Column(String(40))
STATUSES = ['successful', 'pending', 'failed']
status = Column(String(max([len(status) for status in STATUSES])))
_SHA_REGEX = re.compile(r'^[0-9a-f]{40}$')
# repo is eg moby/moby
# number is eg 34567
def __init__(self, repo, number):
self.repo = repo
self.number = int(number)
@classmethod
def from_url(cls, url):
match = cls._URL_REGEX.match(url)
if not match:
raise RuntimeError('Not a valid PR URL: %s' % (url, ))
return cls(match.group(1), match.group(2))
@validates('repo')
def _validate_repo(self, _key, repo):
if not self._REPO_REGEX.match(repo):
raise AssertionError
return repo
@validates('status')
def _validate_repo(self, _key, status):
if status not in self.STATUSES:
raise AssertionError
return status
@validates('last_processed_sha')
def _validate_last_processed_sha(self, _key, sha):
if sha and not self.__class__.is_valid_sha(sha):
raise AssertionError
return sha
@classmethod
def is_valid_sha(cls, sha):
return cls._SHA_REGEX.match(sha)
@property
def slug(self):
return '%s#%s' % (self.repo, self.number)
@property
def url(self):
return 'https://github.com/%s/pull/%s' % (self.repo, self.number)
def __repr__(self):
return self.slug
checks = relationship('Check', cascade='all,delete')
class Check(Base):
__tablename__ = 'checks'
__table_args__ = (ForeignKeyConstraint(['repo', 'number'], ['pull_requests.repo', 'pull_requests.number']), )
repo = Column(String(MAX_REPO_LENGTH), primary_key=True)
number = Column(Integer, primary_key=True)
context = Column(String(255), primary_key=True)
# counts how many failures have been observed in a row,
# that is, _consecutive_ failures
failure_count = Column(Integer)
# that's the ID of the last processed GH event
last_errored_id = Column(Integer)
# the last time this check got retried _after a failure_
last_retried_at = Column(DateTime())
def __init__(self, pr, context):
self.repo = pr.repo
self.number = pr.number
self.context = context
self.failure_count = 0
| wk8/github_retry | models.py | models.py | py | 3,240 | python | en | code | 0 | github-code | 13 |
34935977015 | from keras.models import model_from_yaml
from keras.optimizers import Adam
from keras.datasets import fashion_mnist
from keras.utils import to_categorical
yaml_file = open('best-gBest-model.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
loaded_model.load_weights('best-gBest-weights.h5')
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, decay=0.0)
loaded_model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
loaded_model.save('model_1.h5') # save the model (overwrite weights file with full arch)
(trainX, trainY), (testX, testY) = fashion_mnist.load_data() # load the training and testing data.
testYc = to_categorical(testY)
print(loaded_model.summary())
_, acc = loaded_model.evaluate(testX, testYc, verbose=0)
print('Model Accuracy: %.4f' % acc)
| seamusl/OpenNAS-v1 | pso-util-build_model.py | pso-util-build_model.py | py | 865 | python | en | code | 0 | github-code | 13 |
38324938635 | from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from app.geo import models
class State(serializers.ModelSerializer):
initials = serializers.CharField()
class Meta:
model = models.State
exclude = ('id', )
def create(self, validated_data):
obj, _ = models.State.objects.get_or_create(
initials=validated_data['initials'].strip(),
name=validated_data.get('name', '').strip()
)
return obj
class City(serializers.ModelSerializer):
state = State()
class Meta:
model = models.City
exclude = ('id', )
def create(self, validated_data):
obj = models.City.objects.filter(
name=validated_data['name'].strip(),
state__initials=validated_data['state']['initials'].strip()
).first()
if not obj:
state = State(data=validated_data['state'])
if state.is_valid():
validated_data['state'] = state.save()
obj, _ = models.City.objects.get_or_create(
name=validated_data['name'],
state=validated_data['state']
)
return ValueError('`state` is not valid')
return obj
| shinneider/events-challenge | django-event/app/geo/api_v1/serializer/event.py | event.py | py | 1,304 | python | en | code | 0 | github-code | 13 |
11939258472 | from django.db import models
from django.utils import timezone
# Main model of Elevators
class Elevator(models.Model):
direction = models.CharField(max_length=64,choices=(('up','up'),('down','down'),('ideal','ideal')),default='ideal')
door = models.CharField(max_length=64,choices=(('open','open'),('close','close')),default='close')
running_status = models.CharField(max_length=64,choices=(('start','start'),('stop','stop')),default='stop')
available_status = models.CharField(max_length=64,choices=(('available','available'),('busy','busy')),default='available')
operational = models.BooleanField(default=True)
created = models.DateTimeField(null=True,blank=True)
current_floor = models.IntegerField(default=0)
# TO get real-time time updated while saving
def save(self,*args,**kwargs):
if not self.id:
self.created = timezone.now()
super(Elevator, self).save(*args, **kwargs)
# TO get Api Response in JSON
@property
def serialize(self):
dic = {}
dic['id'] = self.id
dic['direction'] = self.direction
dic['door'] = self.door
dic['running_status'] = self.running_status
dic['current_floor'] = self.current_floor
dic['available_status'] = self.available_status
dic['operational'] = self.operational
dic['next_floors_requested'] = self.get_requested_floors()
return dic
# TO get list of next Floors request details for an elevator
def get_requested_floors(self):
return [i.__dict__['request_floor'] for i in ElevatorRequest.objects.filter(elevator=self)]
# TO save the request of each floor in db
class ElevatorRequest(models.Model):
elevator = models.ForeignKey(Elevator, on_delete=models.CASCADE)
request_count = models.IntegerField(default=0)
request_floor = models.IntegerField(default=0,unique=True)
created = models.DateTimeField(null=True,blank=True)
# TO get real-time time updated while saving
def save(self,*args,**kwargs):
if not self.id:
self.created = timezone.now()
super(ElevatorRequest, self).save(*args, **kwargs)
# TO get Api Response in JSON
@property
def serialize(self):
dic = {}
dic['id'] = self.id
dic['elevator'] = self.elevator.id
dic['request_count'] = self.request_count
dic['request_floor'] = self.request_floor
dic['created'] = str(self.created)
return dic
| knowarunyadav/elevator_project | api/models.py | models.py | py | 2,496 | python | en | code | 0 | github-code | 13 |
39406011780 | import sys
import os
import json
import urllib2
import xmltodict
query = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=sra&term='
fetch = 'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=sra&id='
sra_file = open('SRA_IDs.txt', 'r')
for line in sra_file:
# Get the current SRA ID.
sra_id = line.strip()
print('Retreiving ' + sra_id)
# Make sure our sample directory exists.
if not os.path.exists(sra_id):
os.makedirs(sra_id)
# First perform the entrez query with the SRA database.
response_obj = urllib2.urlopen(query + sra_id + '[Accession]')
response_xml = response_obj.read()
response = xmltodict.parse(response_xml)
query_id = response['eSearchResult']['IdList']['Id']
# Next get the query results. We are only querying a single SRA
# record at a time.
response_obj = urllib2.urlopen(fetch + query_id)
response_xml = response_obj.read()
response = xmltodict.parse(response_xml)
meta_file = open(sra_id + '/' + sra_id + '.sra.json', 'w')
meta_file.write(json.dumps(response))
meta_file.close()
sra_file.close()
| spficklin/Kamiak-GEM | scripts/retrieve_sample_metadata.py | retrieve_sample_metadata.py | py | 1,099 | python | en | code | 0 | github-code | 13 |
19254035492 | import pandas as pd
import plotly.express as px
import numpy as np
import pycountry_convert as pc
import dash
from dash import dcc
from dash import html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
# http://127.0.0.1:8050/ to go to the website
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.LUX])
path = 'HieuWork\CO2_by_capita.xlsx'
df_CO2_country = pd.read_excel(io = path, sheet_name='fossil_CO2_per_capita_by_countr')
nordic_countries = ['Denmark', 'Finland', 'Iceland', 'Norway', 'Sweden', 'Greenland', 'Faroes']
# Convert country name into continent name
def country_to_continent(country_name):
try:
country_alpha2 = pc.country_name_to_country_alpha2(country_name)
country_continent_code = pc.country_alpha2_to_continent_code(country_alpha2)
country_continent_name = pc.convert_continent_code_to_continent_name(country_continent_code)
except:
return 'Unspecified'
return country_continent_name
df_CO2_country['Continent'] = df_CO2_country['Country'].apply(lambda x: country_to_continent(x))
region = [{'label':c, 'value': c} for c in ['World', 'Asia', 'Africa', 'Europe', 'North America', 'Nordic', 'Oceania', 'South America']]
year = [{'label': str(c), 'value': c} for c in df_CO2_country.columns[3:]]
#print(year)
app.layout = html.Div(
children = [
html.H1('CO2 emission per capita', style = {'text-align':'center'}),
html.Div(
children = [
html.H3('Choose a region:'),
],
style={'width': '100%', 'margin-left': '50px'}
),
dcc.RadioItems(id = 'region',
options=region,
value = 'World',
inline=False,
style={'margin-left':'50px'},
labelStyle={'display': 'inline-block', 'margin-right':'15px'}, # block for column, inline-block for line
),
html.Br(),
dcc.Graph(id = 'co2_graph', figure = {}, style = {'margin-left':'150px'}),
html.Br(),
html.Div(
children =
[dcc.Slider(min = 1970,
max = 2021,
step = 1,
value = 2021,
marks=None,
tooltip={"placement": "bottom", "always_visible": False},
id = 'year_slider')],
style = {'width': '50%', 'margin-left':'440px'}
),
html.Div(id = 'output_container', children = [], style={'text-align':'center', 'font-size':'25px'})
]
)
@app.callback(
[Output(component_id='output_container', component_property='children'),
Output(component_id = 'co2_graph', component_property='figure')],
[Input(component_id='region', component_property='value'),
Input(component_id='year_slider', component_property='value')]
)
def update_graph(region_slctd,year_slctd): # number of arguments is the same as the number of inputs
print(region_slctd)
print(type(region_slctd))
print(year_slctd)
print(type(year_slctd))
container = ' CO2 emission per capita in {}'.format(year_slctd)
if (region_slctd == 'World'):
df_CO2 = df_CO2_country.copy()
elif (region_slctd == 'Nordic'):
df_CO2 = df_CO2_country[df_CO2_country['Country'].isin(nordic_countries)]
else:
df_CO2 = df_CO2_country[df_CO2_country['Continent'] == region_slctd]
df_CO2 = df_CO2[['Country', year_slctd]]
df_CO2[year_slctd] = np.round(df_CO2[year_slctd], 3)
center_dict = {
'World': dict(lat=0,lon=0),
'Asia': dict(lat=60,lon=150),
'Africa': dict(lat=40, lon=80),
'Europe': dict(lat=40, lon=80)
}
fig = px.choropleth(
data_frame=df_CO2,
locationmode='country names',
locations= 'Country',
color= year_slctd,
range_color=[0, 20],
color_continuous_scale=px.colors.sequential.Aggrnyl,
hover_data={'Country': False},
labels={str(year_slctd): 'CO2 emission per capita'},
hover_name='Country',
basemap_visible=True,
# center = center_dict[region_slctd]
)
fig.update_layout(margin = {'r':0,'t':0,'l':0,'b':0}) # template in ["plotly", "plotly_white", "plotly_dark", "ggplot2", "seaborn", "simple_white", "none"]
return container, fig
if __name__ == '__main__':
app.run_server(debug=True) | HieuPhamNgoc/Data-Science-Project-Group-2 | HieuWork/second.py | second.py | py | 4,508 | python | en | code | 0 | github-code | 13 |
41893672772 | #coding:utf-8
'''
读取基因的gff数据
[geneID:[flag,start,end]]
1.需要基因组gff注释文件
2.ot2gtf脚本处理并且过滤之后的文件
3.每个基因名字的长度信息
eg: Ghir_A09G006360 填15
4.sgRNA的结果文件 sgRNAcas9_report.xls
5.输出文件
'''
def usage():
print("usage:\n")
print("\t"+"-h|--help"+"\t"+"print help information")
print("\t"+"-g|--gff="+"\t"+"gff file path way")
print("\t"+"-s|--sgRNA="+"\t"+"sgRNA file path way")
print("\t"+"-l|--genelength="+"\t"+"length of gene")
print("\t"+"-r|--sequence="+"\t"+"sgRNA sequence path way")
print("\t"+"-o|--outfile="+"\t"+"output file path way")
import sys,re,getopt
try:
opts,args=getopt.getopt(sys.argv[1:],"hg:s:l:o:r:",["help","gff=","sgRNA=","genelength=","outfile=","sequence="])
except getopt.GetoptError:
print("commd parameters is wrong!\n")
sys.exit()
for name,value in opts:
if name in ("-h","--help"):
usage()
sys.exit()
if name in("-g","--gff"):
gfffile=value
if name in("-s","--sgRNA"):
sgRNAfile=value
if name in("-l","--genelength"):
genelength=int(value)
if name in("-o","--outfile"):
outfile=value
if name in("-r","--sequence"):
sgRNAsquencefile=value
with open(gfffile,'r') as genegff:
list1=genegff.readlines()
genelist={}
for i in range(0,len(list1)):
list1[i]=list1[i].strip('\n')
tmp=list1[i].split()
if tmp[2]!="gene":
continue
else:
#获取对应的基因编号
genelist[tmp[8][3:18]]=[tmp[6],tmp[3],tmp[4]]
with open(sgRNAfile,'r') as sgRNA:
list2=sgRNA.readlines()
sgRNAlist={}
###给每个基因赋一个初始值
#with open("11111111111",'w') as out:
for i in range(0,len(list2)):
list2[i]=list2[i].strip('\n')
tmp=list2[i].split()
sgRNAlist[tmp[0][0:genelength]]=[0 for i in range(4)]
# 获取靶向基因的信息
if genelist[tmp[-1]][0]=="+":
sgRNAlist[tmp[0][0:genelength]][0]=tmp[0]
sgRNAlist[tmp[0][0:genelength]][1]=int(tmp[-3])-int(genelist[tmp[-1]][1])
#out.write(tmp[0]+"\t"+tmp[-3]+"\t"+tmp[28]+"\t"+tmp[-1]+"\t"+genelist[tmp[0][0:genelength]][0]+"\t"+genelist[tmp[0][0:genelength]][1]+"\t"+genelist[tmp[0][0:genelength]][2]+"\n")
else:
sgRNAlist[tmp[0][0:genelength]][0]=tmp[0]
sgRNAlist[tmp[0][0:genelength]][1]=int(genelist[tmp[-1]][2])-int(tmp[-3])
##找最靠近5'的那个sgRNA
for i in range(0,len(list2)):
tmp=list2[i].split()
if (genelist[tmp[-1]][0]=="+") and (int(tmp[-3])-int(genelist[tmp[-1]][1]))<sgRNAlist[tmp[0][0:genelength]][1]:
sgRNAlist[tmp[0][0:genelength]][0]=tmp[0]
sgRNAlist[tmp[0][0:genelength]][1]=int(tmp[-3])-int(genelist[tmp[-1]][1])
###初始化第三第四位,用于放第二个sgRNA
elif (genelist[tmp[-1]][0]=="-") and (int(genelist[tmp[-1]][2])-int(tmp[-3]))<sgRNAlist[tmp[0][0:genelength]][1]:
sgRNAlist[tmp[0][0:genelength]][0]=tmp[0]
sgRNAlist[tmp[0][0:genelength]][1]=int(genelist[tmp[-1]][2])-int(tmp[-3])
else:
continue
###找下一个间隔100bp~200bp的sgRNA
for i in range(0,len(list2)):
tmp=list2[i].split()
if sgRNAlist[tmp[0][0:genelength]][0]==tmp[0]:
continue
elif (genelist[tmp[-1]][0]=="+") and (int(tmp[-3])-int(genelist[tmp[-1]][1])-sgRNAlist[tmp[0][0:genelength]][1])>=50 and (int(tmp[-3])-int(genelist[tmp[-1]][1])-sgRNAlist[tmp[0][0:genelength]][1])<=200:
sgRNAlist[tmp[0][0:genelength]][2]=tmp[0]
sgRNAlist[tmp[0][0:genelength]][3]=int(tmp[-3])-int(genelist[tmp[-1]][1])
elif (genelist[tmp[-1]][0]=="-") and (int(genelist[tmp[-1]][2])-int(tmp[-3]))-sgRNAlist[tmp[0][0:genelength]][1]>=50 and (int(genelist[tmp[-1]][2])-int(tmp[-3])-sgRNAlist[tmp[0][0:genelength]][1])<=200:
sgRNAlist[tmp[0][0:genelength]][2]=tmp[0]
sgRNAlist[tmp[0][0:genelength]][3]=int(genelist[tmp[-1]][2])-int(tmp[-3])
###输出数据
# with open(sys.argv[3],'w') as out:
# for k in sgRNAlist:
# out.write(str(k)+"\t"+str(sgRNAlist[k][0])+"\t"+str(sgRNAlist[k][1])+"\t"+str(sgRNAlist[k][2])+"\t"+str(sgRNAlist[k][3])+"\n")
#提取sgRNA序列
#'Ghir_D06G001960_S_1\t3\t25\tGGCTTCTACGAGGAAAGATATGG\t23\t45.0 %\t#\t2\t0\t0\t5\t118\t195\t319\t#\t2\t0\t0\t0\t0\t1\t2\tRepeat_sites_or_bad?\n'
with open(sgRNAsquencefile,'r') as sgRNAseq:
list3=sgRNAseq.readlines()
seq={}
for i in range(1,len(list3)):
seq[list3[i].strip("\n").split("\t")[0]]=list3[i].strip("\n").split("\t")[3]
#反向互补函数
def complement(s):
basecomplement={"A":"T","T":"A","G":"C","C":"G","a":"t","t":"a","g":"c","c":"g"}
letters=[basecomplement[base] for base in s]
return ''.join(letters)
##对应到每一个基因
# genename={}
# with open("./../Tf_CDS_and_Tf",'r') as name:
# list4=name.readlines()
# for i in range(0,len(list4)):
# if re.match("^>",list4[i]):
# genename[list4[i].strip("\n").split()[1][0:genelength]]=list4[i].strip("\n").split()[0]
# else:
# continue
#输出文件
with open(outfile,'w') as out:
for k in sgRNAlist:
# 仅仅只找到一个sgRNA
if sgRNAlist[k][2]==0:
seq1="Ttctagctctaaaac"+complement(seq[sgRNAlist[k][0]][0:20][::-1])+"tgcaccagccgggaat"
out.write(">"+str(k)+"\t"+seq[sgRNAlist[k][0]]+"\n"+seq1+"\n")
else:
seq1="Ttctagctctaaaac"+complement(seq[sgRNAlist[k][0]][0:20][::-1])+"tgcaccagccgggaat"
seq2="Ttctagctctaaaac"+complement(seq[sgRNAlist[k][2]][0:20][::-1])+"tgcaccagccgggaat"
out.write(">"+str(k)+"\t"+seq[sgRNAlist[k][0]]+"\t"+seq1+"\t"+seq[sgRNAlist[k][2]]+"\t"+seq2+"\n")
| zpliu1126/Bioinformatic | sgRNAcas9/comparisonsgRNA.py | comparisonsgRNA.py | py | 5,434 | python | en | code | 0 | github-code | 13 |
40336517755 | import os
import numpy as np
from skimage import io
import tensorflow as tf
import matplotlib.pyplot as plt
from PIL import Image
from tqdm import tqdm
from utils.utils import working_directory
from utils.download_data import download_data_material
from utils.dirs import listdir_nohidden
from utils.logger import Logger
from shutil import rmtree
from natsort import natsorted
class DataLoader:
def __init__(self, config):
"""
Args:
data_dir: this folder path should contain both Anomalous and Normal images
"""
self.config = config
self.image_size = self.config.data_loader.image_size
log_object = Logger(self.config)
self.logger = log_object.get_logger(__name__)
dataset_type = self.config.data_loader.dataset_name
if dataset_type == "material":
self.build_material_dataset()
elif dataset_type == "cifar10":
self.build_cifar10_dataset()
elif dataset_type == "mnist":
self.build_mnist_dataset()
def build_material_dataset(self):
self.data_dir = self.config.dirs.data
self.train = "train_{}".format(self.image_size)
self.test = "test_{}".format(self.image_size)
self.test_vis = "test_vis"
self.test_vis_big ="test_vis_big"
self.valid = "valid_{}".format(self.image_size)
self.train_dataset = os.path.join(self.data_dir, self.train)
self.valid_dataset = os.path.join(self.data_dir, self.valid)
self.img_location = os.path.join(self.data_dir, self.test, "imgs/")
self.img_location_vis = os.path.join(self.data_dir, self.test_vis, "imgs/")
self.img_location_vis_big = os.path.join(self.data_dir, self.test_vis_big, "imgs/")
self.tag_location = os.path.join(self.data_dir, self.test, "labels/")
self.tag_location_vis = os.path.join(self.data_dir, self.test_vis, "labels/")
self.tag_location_vis_big = os.path.join(self.data_dir, self.test_vis_big, "labels/")
if not os.path.exists(self.data_dir):
self.logger.info("Dataset is not present. Download is started.")
download_data_material(self.data_dir)
self.data_dir_normal = self.config.dirs.data_normal
self.data_dir_anomalous = self.config.dirs.data_anomalous
# Up until this part only the raw dataset existence is checked and downloaded if not
self.dataset_name = None
# this is to list all the folders
self.dir_names = listdir_nohidden(self.data_dir)
self.test_size_per_img = (
None
) # This will be the number of patches that will be extracted from each test image
# Normal images for the train and validation dataset
normal_imgs = self.data_dir_normal
# Anormal images and the tag infor regarding the anomaly for test set
anorm_imgs = self.data_dir_anomalous + "/images/"
anorm_tag_imgs = self.data_dir_anomalous + "/gt/"
norm_img_names = [normal_imgs + x for x in listdir_nohidden(normal_imgs)]
anorm_img_names = [anorm_imgs + x for x in listdir_nohidden(anorm_imgs)]
anorm_tag_names = [anorm_tag_imgs + x for x in listdir_nohidden(anorm_tag_imgs)]
self.norm_img_array = self.create_image_array(norm_img_names, save=False)
self.anorm_img_array = self.create_image_array(anorm_img_names, save=False)
self.anorm_tag_array = self.create_image_array(anorm_tag_names, save=False)
self.image_tag_list = list(zip(self.anorm_img_array, self.anorm_tag_array))
if not self.config.data_loader.validation:
self.populate_train_material()
else:
self.populate_train_valid_material()
if self.config.data_loader.mode == "anomaly":
self.populate_test_material()
if self.config.data_loader.mode == "visualization":
self.populate_test_material_vis()
if self.config.data_loader.mode == "visualization_big":
self.populate_test_material_vis_big()
def build_cifar10_dataset(self):
pass
def build_mnist_dataset(self):
pass
def populate_train_material(self):
# Check if we have the data already
if self.train in self.dir_names:
self.logger.info("Train Dataset is already populated.")
else:
self.logger.info("Train Dataset will be populated")
size = self.config.data_loader.image_size
num_images = 10240
imgs = []
for ind, img in enumerate(self.norm_img_array):
h, w = img.shape[:2]
new_h, new_w = size, size
for idx in range(num_images):
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = img[top : top + new_h, left : left + new_w]
imgs.append(image)
self.logger.debug("{} images generated".format(num_images * (ind + 1)))
# Check if the folder is there
if not os.path.exists(self.train_dataset):
os.mkdir(self.train_dataset)
with working_directory(self.train_dataset):
for idx, img in enumerate(imgs):
im = Image.fromarray(img)
im.save("img_{}.jpg".format(str(idx)))
def populate_train_valid_material(self):
if self.train in self.dir_names and self.valid in self.dir_names:
self.logger.info("Train and Validation datasets are already populated")
else:
# Remove train dataset from the previous run
if os.path.exists(self.train_dataset):
rmtree(self.train_dataset)
self.logger.info("Train and Validations Datasets will be populated")
size = self.config.data_loader.image_size
num_images = 10240
imgs = []
for ind, img in enumerate(self.norm_img_array):
h, w = img.shape[:2]
new_h, new_w = size, size
for idx in range(num_images):
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = img[top : top + new_h, left : left + new_w]
imgs.append(image)
self.logger.debug("{} images generated".format(num_images * (ind + 1)))
# Creation of validation dataset
np.random.seed(self.config.data_loader.random_seed)
validation_list = np.random.choice(51200, 5120) # 10% of the training set
imgs_train = [x for ind, x in enumerate(imgs) if ind not in validation_list]
imgs_valid = [x for ind, x in enumerate(imgs) if ind in validation_list]
# Check if the folder is there
if not os.path.exists(self.train_dataset):
os.mkdir(self.train_dataset)
# Check if the folder is there
if not os.path.exists(self.valid_dataset):
os.mkdir(self.valid_dataset)
with working_directory(self.train_dataset):
for idx, img in enumerate(imgs_train):
im = Image.fromarray(img)
im.save("img_{}.jpg".format(str(idx)))
with working_directory(self.valid_dataset):
for idx, img in enumerate(imgs_valid):
im = Image.fromarray(img)
im.save("img_{}.jpg".format(str(idx)))
def populate_test_material(self):
if self.test in self.dir_names:
self.logger.info("Test Dataset is already populated")
else:
self.logger.info("Test Dataset will be populated")
size = self.config.data_loader.image_size
folder_name = self.test
first_level = os.path.join(self.data_dir, folder_name)
if not os.path.exists(first_level):
os.mkdir(first_level)
img_files = []
tag_files = []
for img_, tag_ in self.image_tag_list:
h, w = img_.shape[:2]
self.w_turns = (w // size) * 2 - 1
self.h_turns = (h // size) * 2 - 1
slide = int(size / 2)
for adv_h in range(self.h_turns):
for adv_w in range(self.w_turns):
image = img_[
adv_h * slide : size + ((adv_h) * slide),
adv_w * slide : ((adv_w) * slide) + size,
]
tag = tag_[
adv_h * slide : size + ((adv_h) * slide),
adv_w * slide : ((adv_w) * slide) + size,
]
img_files.append(image)
tag_files.append(tag)
self.test_size_per_img = self.w_turns * self.h_turns
if not os.path.exists(self.img_location):
os.mkdir(self.img_location)
with working_directory(self.img_location):
for idx, img in enumerate(img_files):
im = Image.fromarray(img)
im.save(
"img_{}_{}.jpg".format(
idx // self.test_size_per_img, idx % self.test_size_per_img
)
)
if not os.path.exists(self.tag_location):
os.mkdir(self.tag_location)
with working_directory(self.tag_location):
for idx, tag in enumerate(tag_files):
im = Image.fromarray(tag)
im.save(
"label_{}_{}.jpg".format(
idx // self.test_size_per_img, idx % self.test_size_per_img
)
)
def populate_test_material_vis(self):
if self.test_vis in self.dir_names:
self.logger.info("Test Dataset is already populated")
else:
self.logger.info("Test Dataset will be populated")
size = self.config.data_loader.image_size
folder_name = self.test_vis
first_level = os.path.join(self.data_dir, folder_name)
if not os.path.exists(first_level):
os.mkdir(first_level)
img_files = []
tag_files = []
for img_, tag_ in self.image_tag_list:
h, w = img_.shape[:2]
self.w_turns = (w // size)
self.h_turns = (h // size)
slide = int(size)
for adv_h in range(self.h_turns):
for adv_w in range(self.w_turns):
image = img_[
adv_h * slide : size + ((adv_h) * slide),
adv_w * slide : ((adv_w) * slide) + size,
]
tag = tag_[
adv_h * slide : size + ((adv_h) * slide),
adv_w * slide : ((adv_w) * slide) + size,
]
img_files.append(image)
tag_files.append(tag)
self.test_size_per_img = self.w_turns * self.h_turns
if not os.path.exists(self.img_location_vis):
os.mkdir(self.img_location_vis)
with working_directory(self.img_location_vis):
for idx, img in enumerate(img_files):
im = Image.fromarray(img)
im.save(
"{}.jpg".format(
idx
)
)
if not os.path.exists(self.tag_location_vis):
os.mkdir(self.tag_location_vis)
with working_directory(self.tag_location_vis):
for idx, tag in enumerate(tag_files):
im = Image.fromarray(tag)
im.save(
"{}.jpg".format(
idx
)
)
def populate_test_material_vis_big(self):
if self.test_vis_big in self.dir_names:
self.logger.info("Test Dataset is already populated")
else:
self.logger.info("Test Dataset will be populated")
size = self.config.data_loader.image_size
folder_name = self.test_vis_big
first_level = os.path.join(self.data_dir, folder_name)
if not os.path.exists(first_level):
os.mkdir(first_level)
img_files = []
tag_files = []
#index_list = [0,6,7,8,9,10,11,15]
index_list = [0,6,7,8,9,10,11,15]
image_tags = [self.image_tag_list[i] for i in index_list]
for img_, tag_ in image_tags:
h, w = img_.shape[:2]
self.w_turns = w - size + 1
self.h_turns = h - size + 1
slide = 1
for adv_h in range(self.h_turns):
for adv_w in range(self.w_turns):
image = img_[
adv_h * slide : size + ((adv_h) * slide),
adv_w * slide : ((adv_w) * slide) + size,
]
tag = tag_[
adv_h * slide : size + ((adv_h) * slide),
adv_w * slide : ((adv_w) * slide) + size,
]
img_files.append(image)
tag_files.append(tag)
self.test_size_per_img = self.w_turns * self.h_turns
if not os.path.exists(self.img_location_vis_big):
os.mkdir(self.img_location_vis_big)
with working_directory(self.img_location_vis_big):
for idx, img in enumerate(img_files):
im = Image.fromarray(img)
im.save(
"{}.jpg".format(
idx
)
)
if not os.path.exists(self.tag_location_vis_big):
os.mkdir(self.tag_location_vis_big)
with working_directory(self.tag_location_vis_big):
for idx, tag in enumerate(tag_files):
im = Image.fromarray(tag)
im.save(
"{}.jpg".format(
idx
)
)
def create_image_array(self, img_names, save=True, file_name="Dataset"):
"""
Args:
img_names:
"""
self.dataset_name = os.path.join(self.data_dir, file_name)
img_array = []
for img in img_names:
im2arr = io.imread(img)
img_array.append(im2arr)
if save:
np.save(self.dataset_name, img_array)
return np.array(img_array)
def get_train_dataset(self):
"""
:param size: size of the image
:return: numpy array of images and corresponding labels
"""
img_list = listdir_nohidden(self.train_dataset)
img_names = tf.constant([os.path.join(self.train_dataset, x) for x in img_list])
self.logger.info("Train Dataset is Loaded")
return img_names
def get_valid_dataset(self):
"""
:param size: size of the image
:return: numpy array of images and corresponding labels
"""
img_list = listdir_nohidden(self.valid_dataset)
img_names = tf.constant([os.path.join(self.train_dataset, x) for x in img_list])
self.logger.info("Validation Dataset is Loaded")
return img_names
def get_test_dataset(self):
"""
:param size: size of the image
:return: numpy array of images and corresponding labels
"""
img_list = listdir_nohidden(self.img_location)
img_names = tf.constant([os.path.join(self.img_location, x) for x in img_list])
tag_list = listdir_nohidden(self.tag_location)
tag_list_merged = [os.path.join(self.tag_location, x) for x in tag_list]
labels = []
for label in tag_list_merged:
im2arr = io.imread(label)
labels.append(1) if np.sum(im2arr) > 5100 else labels.append(0)
labels_f = tf.constant(labels)
self.logger.info("Test Dataset is Loaded")
return [img_names, labels_f]
def get_test_dataset_vis(self):
"""
:param size: size of the image
:return: numpy array of images and corresponding labels
"""
img_list = listdir_nohidden(self.img_location_vis)
img_list = natsorted(img_list)
img_names = tf.constant([os.path.join(self.img_location_vis, x) for x in img_list])
tag_list = listdir_nohidden(self.tag_location_vis)
tag_list = natsorted(tag_list)
tag_list_merged = [os.path.join(self.tag_location_vis, x) for x in tag_list]
labels = []
for label in tag_list_merged:
im2arr = io.imread(label)
labels.append(1) if np.sum(im2arr) > 5100 else labels.append(0)
labels_f = tf.constant(labels)
self.logger.info("Test Dataset is Loaded")
return [img_names, labels_f, tag_list_merged]
def get_test_dataset_vis_big(self):
"""
:param size: size of the image
:return: numpy array of images and corresponding labels
"""
img_list = listdir_nohidden(self.img_location_vis_big)
img_list = natsorted(img_list)
#img_list = img_list[660345:660345 * 2]
img_names = tf.constant([os.path.join(self.img_location_vis_big, x) for x in img_list])
tag_list = listdir_nohidden(self.tag_location_vis_big)
tag_list = natsorted(tag_list)
#tag_list = tag_list[660345:660345* 2]
tag_list_merged = [os.path.join(self.tag_location_vis_big, x) for x in tag_list]
labels = []
for label in tag_list_merged:
im2arr = io.imread(label)
labels.append(1) if np.sum(im2arr) > 5100 else labels.append(0)
labels_f = tf.constant(labels)
self.logger.info("Test Dataset is Loaded")
return [img_names, labels_f, tag_list_merged]
| yigitozgumus/Polimi_Thesis | utils/DataLoader.py | DataLoader.py | py | 18,319 | python | en | code | 5 | github-code | 13 |
8909799305 | import json
import os
from flask import Blueprint, request, jsonify, send_file, abort
from flask_jwt_extended import jwt_required
analysis = Blueprint("analysis", __name__)
# api/analysis/model/<folder>/<file>
@analysis.route("/model/<folder>/<file>", methods=["GET"])
def serve_model(folder="tfjs_model", file="model.json"):
try:
path = os.path.realpath(os.path.join("ml-model", folder, file))
if file == "model.json":
with open(path, 'r') as f:
j = json.load(f)
return json.dumps(j), 200
else:
return send_file(path, as_attachment=True)
except FileNotFoundError:
abort(404)
| marinov98/Sign-Lang-Tutor | api/routes/analysis.py | analysis.py | py | 700 | python | en | code | 3 | github-code | 13 |
30599368735 | """Exercise from https://exercism.io/my/tracks/python."""
import sys
import time
def flatten(iterable):
"""Returns a flattened list of non-list-like objects from `iterable` in DFS
traversal order.
"""
return list(items_from(iterable))
def items_from(iterable):
"""Genertor that yields every non-list-like objects from `iterable` in DFS
traversal order.
"""
# The base idea is to mimic the python stack with `cursors`. This function
# iterate the inpute `iterable` in DFS order starting from the root
# (`iterable`) and going from the left-most item (``iterable[0]``) to the right-most
# item (``iterable[-1]``). During traversal, two kinds of node will be met,
# list-like (`sub_iterable`) objects and simple `item`s.
#
# PRE When the traversal gets to a sub-iterable (a subtree), a new cursor is
# pused to `cursor_stack`.
# IN When a simple `item` is traversed it's yield.
# POST When a sub-iterable is consumed (the subtree has completely traversed),
# the cursor goes back to the root of the corresponding subtree.
#
# iterable = [0, [1,2], 3, 4]
#
# I
# |
# -------------------
# | | | |
# 0 --I-- 3 4
# | |
# 1 2
#
# This tree contains two `sub_iterables` ('I') and five items ('0', '1', '2', '3', '4').
cursor_stack = [iter(iterable)]
while cursor_stack:
sub_iterable = cursor_stack[-1]
try:
item = next(sub_iterable)
except StopIteration: # post-order
cursor_stack.pop()
continue
try: # pre-order
cursor_stack.append(list_like_iter(item))
except TypeError:
if item is not None:
yield item # in-order
def list_like_iter(item):
"""Returns an iterator of `item` if `item` is considered list-like (non-string iterable)"""
if isinstance(item, str):
raise TypeError("String are not iterable considered list-like objects.")
return iter(item)
def build_deep_list(depth):
"""Returns a list of the form $l_{depth} = [depth-1, l_{depth-1}]$
with $depth > 1$ and $l_0 = [0]$.
"""
sub_list = [0]
for d in range(1, depth):
sub_list = [d, sub_list]
return sub_list
def repr_long_list(_list):
if depth > 10:
str_begining = str(_list[:5])[1:-1]
str_end = str(_list[-5:])[1:-1]
return "[{}, ..., {}]".format(str_begining, str_end)
else:
return str(_list)
def flatten_str(lst):
return eval('[' + str(lst).replace('[', '').replace(']', '') + ']')
if __name__ == "__main__":
# Default python maximum stack size is around 10**3.
depth = int(sys.argv[1]) if len(sys.argv) > 1 else 10**4
deep_list = build_deep_list(depth)
try:
print(deep_list) # Will fail if depth > max stack size.
except RecursionError as error:
print(error)
flat_list = flatten(deep_list) # Will not fail because of stack limitation.
print(repr_long_list(flat_list))
| cglacet/exercism-python | flatten-array/complete_flatten_array.py | complete_flatten_array.py | py | 3,190 | python | en | code | 5 | github-code | 13 |
41337144145 | import requests
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import csv
import codecs
from multiprocessing import Pool
import random
import time
import sys
ua_list = [
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
"Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
"Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
]
headers = {'User-agent': random.choice(ua_list)}
def my_soup(url):
response = requests.get(url, headers=headers, timeout=0.5)
bs = BeautifulSoup(response.text, 'lxml')
return bs
def get_page(url):
try:
soup = my_soup(url)
result_li = soup.find_all('li', class_='list-item')
# print(str(result_li[1]))
for i in result_li:
# print(type(i)) <class 'bs4.element.Tag'>
# i里是每个li标签下的内容 将i转化为字符串形式
page_text = str(i)
page_soup = BeautifulSoup(page_text, 'lxml')
result_href = page_soup.find_all('a', class_='houseListTitle')
# result_href为a标签的列表,故只有第0个内容,即一个参数。获取每一个房源的url
# print(result_href[0].attrs['href'])
get_page_detail(result_href[0].attrs['href'])
#判断下一页的标签是否为空,不为空则递归得到所有页面的url
result_nextpage = soup.find_all('a', class_='aNxt')
if len(result_nextpage) != 0:
get_page(result_nextpage[0].attrs['href'])
else:
print('没有下一页了')
# result_li获得包含所有li的list,[0][1]……可以取出各个li
except RequestException:
return ('bad requests')
# print(type(response.text)) <class 'str'>
def my_strip(s):
return str(s).replace(' ', '').replace('\n', '').replace('\t', '').strip()
def find_info(response):
info = BeautifulSoup(str(response), 'lxml')
detail = info.find_all('dd')
return detail
def write_info(result):
with codecs.open('aaaa.csv', 'a', 'utf_8_sig') as f:
writer = csv.writer(f)
writer.writerow(result)
f.close()
def get_url(quyu, m, a, b):
url = 'https://ks.anjuke.com/sale/' + quyu + '/a' + str(a) + '-b' + str(b) + '-m' + str(m) + '/'
return url
# 详细页面的爬取
def get_page_detail(url):
try:
soup = my_soup(url)
result_list = []
house_title = soup.find_all('h3', class_='long-title')[0]
house_price = soup.find_all('span', class_='light info-tag')[0]
detail_1 = soup.find_all('div', class_='first-col detail-col')
detail_2 = soup.find_all('div', class_='second-col detail-col')
detail_3 = soup.find_all('div', class_='third-col detail-col')
col_1 = find_info(detail_1)
col_2 = find_info(detail_2)
col_3 = find_info(detail_3)
# print(type(my_strip(house_title.text))) <class 'str'>
title = my_strip(house_title.text)
price = my_strip(house_price.text[:-1])
community = my_strip(col_1[0].text)
address = my_strip(col_1[1].find_all('p')[0].text)
time = my_strip(col_1[2].text[:-1])
style = my_strip(col_1[3].text)
house_type = my_strip(col_2[0].text)
floor_area = my_strip(col_2[1].text[:-3])
orientation = my_strip(col_2[2].text)
floor = my_strip(col_2[3].text)
decoration = my_strip(col_3[0].text)
unit_price = my_strip(col_3[1].text[:-4])
result = [title, price, community, address, time, style, house_type, floor_area, orientation, floor, decoration,
unit_price]
'''
['世茂东外滩景观在卖房中小区*,全新毛坯有钥匙看房方便', '140', '世茂东外滩', '玉山-城东-东城大道与景王路交汇处', '2016', '普通住宅', '3室2厅1卫', '96', '南北', '高层(共33层)', '毛坯', '14583']
'''
result_list.append(result)
print(result)
# with open('1.txt', 'a', encoding='UTF-8') as f:
# f.write(str(result)+'\n')
# f.close()
write_info(result)
except RequestException:
return ('bad requests')
if __name__ == '__main__':
start = time.clock()
# quyu:地区 m:价格 a:面积 b:房型
list = []
for quyu in ('chengxikunshan', 'chengnankunshan', 'chengbeikunshan', 'kunshanchengdong', 'shiqukunshan', 'zhoushia',
'bachenga', 'zhangpua', 'lujiab', 'huaqiaob', 'qiandengb', 'zhouzhuanga', 'jinxig', 'dianshanhua'):
for m in (345, 346, 347, 348, 349, 350, 351, 352, 353, 699):
for a in range(307, 315):
for b in range(267, 273):
url = get_url(quyu, m, a, b)
#list.append(url)
get_page(url)
'''
pool = Pool(30)
pool.map(get_page, list)
pool.close()
pool.join()
'''
end = time.clock()
print('程序运行时长为 %f 秒' % (end - start))
| otracyleeo/anjuke | anjuke_ks.py | anjuke_ks.py | py | 7,037 | python | en | code | 0 | github-code | 13 |
14386402590 | """functions for updating the pagebrowser"""
"""test this locally like thus:
curl http://localhost:5000/archivefiles/1/2495 -d status=1
curl -X PUT -d status=2 http://localhost:5000/archivefiles/1/2495
"""
import logging
from restrepo import celery_tasks
# dont spoil our log with lots of info about requests handler
x = logging.getLogger("requests")
x.setLevel(logging.WARN)
update_logger = logging.getLogger('restrepo.pagebrowser_update')
update_logger.setLevel(logging.INFO)
def refresh_book(context, *args, **kwargs):
url = context.registry.settings.get('publish_in_pagebrowser.url')
logging.debug('Refreshing book at {url}'.format(**locals()))
return celery_tasks.update_book.delay('refresh', url, *args, **kwargs)
def delete_book(context, *args, **kwargs):
url = context.registry.settings.get('unpublish_in_pagebrowser.url')
logging.debug('Deleting book at {url}'.format(**locals()))
return celery_tasks.update_book.delay('delete', url, *args, **kwargs)
| sejarah-nusantara/repository | src/restrepo/restrepo/pagebrowser/update.py | update.py | py | 997 | python | en | code | 0 | github-code | 13 |
15864172815 | # -*- coding: utf-8 -*-
# This file is part of PyBOSSA.
#
# PyBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBOSSA. If not, see <http://www.gnu.org/licenses/>.
import requests
from urllib import quote
serverurl = "http://serv.cusp.nyu.edu/files/sonyc/citizensound/"
def get_sounds(group):
# url = "https://api.mongolab.com/api/1/databases/sonyc/collections/audio-data/538a43f9e4b0d7a3741b7fe2?apiKey=eFdR9h45nm-AuciNuN6d4G1Pd7NM38NS"
jsonurl = serverurl + "json/" + "clips.json"
#payload = {'group':group}
#res = requests.get(url, params=payload)
res = requests.get(jsonurl)
# print url
data = res.json()
for s in data['sounds']:
s['clip_url'] = serverurl + "audio/" + s['filename'] + ".wav"
s['spec_url'] = serverurl + "spectrograms/" + s['filename'] + ".png"
# print clip_url
# print spec_url
return data['sounds']
| justinsalamon/sonyc-citizensound | sounddata.py | sounddata.py | py | 1,427 | python | en | code | 0 | github-code | 13 |
7517086862 | """
CUDA_VISIBLE_DEVICES=1 nsys profile --force-overwrite true -o "output/nsys" -c cudaProfilerApi -t cuda,cublas,nvtx -e EMIT_NVTX=1 python -c "from boardlaw.multinet import *; profile()"
docker cp boardlaw:/code/output/nsys.qdrep ~/Code/tmp/nsys.qdrep
/usr/local/NVIDIA-Nsight-Compute/nv-nsight-cu-cli -f -o prof/ncomp python -c "import os; os.environ['EMIT_NVTX'] = '1'; from drones.complete import *; step()"
"""
import os
import aljpy
import torch
from functools import wraps
log = aljpy.logger()
def nvtx(f):
name = f'{f.__module__}.{f.__qualname__}'
emit = os.environ.get('EMIT_NVTX') == '1'
if emit:
@wraps(f)
def g(*args, **kwargs):
torch.cuda.nvtx.range_push(name)
try:
return f(*args, **kwargs)
finally:
torch.cuda.nvtx.range_pop()
return g
else:
return f
def nvtxgen(f):
name = f'{f.__module__}.{f.__qualname__}'
emit = os.environ.get('EMIT_NVTX') == '1'
def g(*args, **kwargs):
if emit:
torch.cuda.nvtx.range_push(name)
try:
return (yield from f(*args, **kwargs))
finally:
if emit:
torch.cuda.nvtx.range_pop()
return g
def profilable(f):
@wraps(f)
def g(*args, **kwargs):
if os.environ.get('EMIT_NVTX') == '1':
log.info('Emitting NVTX')
try:
torch.cuda.profiler.cudart().cudaProfilerStart()
with torch.autograd.profiler.emit_nvtx(record_shapes=True):
return f(*args, **kwargs)
finally:
torch.cuda.profiler.cudart().cudaProfilerStop()
else:
return f(*args, **kwargs)
return g | andyljones/boardlaw | rebar/profiling.py | profiling.py | py | 1,746 | python | en | code | 29 | github-code | 13 |
19971509706 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/7/7 3:45
@Author : miaoweiwei
@File : test.py
@Software: PyCharm
@Desc :
"""
import tensorflow as tf
hello = tf.constant('Hello, TensorFlow!')
sess = tf.Session()
print(sess.run(hello))
if __name__ == '__main__':
print(tf.__version__)
| miaoweiwei/Smart-Scales | smartscales/test.py | test.py | py | 316 | python | en | code | 0 | github-code | 13 |
19649398432 | # type:str UNICODE gegevens
# Encoding is mapping tussen bytes en karakters
# ASCII encoding: Bevat 127 tekens. 32 -> SPACE
# ANSI (heel oud, microsoft) 256 tekens (1 byte)
# 'latin-1' 256 tekens
# UNICODE Encoding
# UTF-8: Twee bytes na elkaar kunnen 1 karakter zijn.
# UTF-16
f = open("c:\\Users\\denni\\test.txt", "r") #Stiekem omgezet
string = f.read()
print(string) # Print roept implictiet en automatisch de '__str__' methode aan van het bytes object
f.close()
| Xorbit17/motoblog | open_files_lesson.py | open_files_lesson.py | py | 476 | python | nl | code | 0 | github-code | 13 |
38618931331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
from multiprocessing import Process
import uwsgi
from pyprometheus.contrib.uwsgi_features import UWSGICollector, UWSGIStorage, UWSGIFlushStorage
from pyprometheus.registry import BaseRegistry
from pyprometheus.utils.exposition import registry_to_text
try:
xrange = xrange
except Exception:
xrange = range
def test_uwsgi_collector():
registry = BaseRegistry()
uwsgi_collector = UWSGICollector(namespace="uwsgi_namespace", labels={"env_role": "test"})
registry.register(uwsgi_collector)
collectors = {x.name: x for x in registry.collect()}
metrics_count = sorted(map(lambda x: x.split(" ")[2],
filter(lambda x: x.startswith("# HELP"), [x for x in registry_to_text(registry).split("\n")])))
assert len(metrics_count) == len(set(metrics_count))
assert len(registry_to_text(registry).split("\n")) == 60
assert collectors["uwsgi_namespace:buffer_size_bytes"].get_samples()[0].value == uwsgi.buffer_size
assert collectors["uwsgi_namespace:processes_total"].get_samples()[0].value == uwsgi.numproc
assert collectors["uwsgi_namespace:requests_total"].get_samples()[0].value == uwsgi.total_requests()
for name in ["requests", "respawn_count", "running_time", "exceptions", "delta_requests"]:
assert collectors["uwsgi_namespace:process:{0}".format(name)].get_samples()[0].value == uwsgi.workers()[0][name]
assert uwsgi_collector.metric_name("test") == "uwsgi_namespace:test"
DATA = (
((2, "metric_gauge_name", "", (("label1", "value1"), ("label2", "value2"))), 5),
((3, "metric_counter_name", "", (("label1", "value1"), ("label2", "value2"))), 7),
((5, "metric_summary_name", "_sum", (("label1", "value1"), ("label2", "value2"))), 4),
((7, "metric_summary_name", "_count", (("label1", "value1"), ("label2", "value2"))), 1),
((11, "metric_histogram_name", "_sum", (("label1", "value1"), ("label2", "value2"))), 6),
((12, "metric_histogram_name", "_count", (("label1", "value1"), ("label2", "value2"))), 1),
((13, "metric_histogram_name", "_bucket", (("bucket", "0.005"), ("label1", "value1"), ("label2", "value2"))), 0),
((13, "metric_histogram_name", "_bucket", (("bucket", "0.01"), ("label1", "value1"), ("label2", "value2"))), 0),
((13, "metric_histogram_name", "_bucket", (("bucket", "7.5"), ("label1", "value1"), ("label2", "value2"))), 1),
((13, "metric_histogram_name", "_bucket", (("bucket", "+Inf"), ("label1", "value1"), ("label2", "value2"))), 1),
((2, "metric_gauge_name", "", (("label1", "value3"), ("label2", "value4"))), 5),
((3, "metric_counter_name", "", (("label1", "value3"), ("label2", "value4"))), 7),
((5, "metric_summary_name", "_sum", (("label1", "value3"), ("label2", "value4"))), 4),
((7, "metric_summary_name", "_count", (("label1", "value3"), ("label2", "value4"))), 1),
((11, "metric_histogram_name", "_sum", (("label1", "value3"), ("label2", "value4"))), 6),
((12, "metric_histogram_name", "_count", (("label1", "value3"), ("label2", "value4"))), 1),
((13, "metric_histogram_name", "_bucket", (("bucket", "0.005"), ("label1", "value3"), ("label2", "value4"))), 0),
((13, "metric_histogram_name", "_bucket", (("bucket", 0.01), ("label1", "value3"), ("label2", "value4"))), 0),
((13, "metric_histogram_name", "_bucket", (("bucket", 7.5), ("label1", "value3"), ("label2", "value4"))), 1),
((13, "metric_histogram_name", "_bucket", (("bucket", float("inf")), ("label1", "value3"), ("label2", "value4"))), 1))
def test_uwsgi_storage():
storage = UWSGIStorage(0)
storage2 = UWSGIStorage(0)
# 100 pages
assert len(storage.m) == 409600 == 100 * 1024 * 4
assert (storage.get_area_size()) == 14
assert storage.m[15] == "\x00"
with storage.lock():
assert storage.wlocked
assert storage.rlocked
assert not storage.wlocked
assert not storage.rlocked
with storage.rlock():
assert not storage.wlocked
assert storage.rlocked
assert not storage.wlocked
assert not storage.rlocked
assert storage.is_actual
area_sign = storage.get_area_sign()
assert area_sign == storage2.get_area_sign()
storage.m[storage.SIGN_POSITION + 2] = os.urandom(1)
assert not storage.is_actual
s = "keyname"
assert storage.get_string_padding(s) == 5
assert len(s.encode("utf-8")) + storage.get_string_padding(s) == 12
assert storage.validate_actuality()
assert storage.is_actual
assert storage.get_key_position("keyname") == ([14, 18, 25, 33], True)
assert (storage.get_area_size()) == 33
assert storage.get_key_size("keyname") == 24
storage.write_value("keyname", 10)
assert storage.get_value("keyname") == 10.0
storage.clear()
assert storage.get_area_size() == 0 == storage2.get_area_size()
storage.validate_actuality()
assert storage.get_area_size() == 14 == storage2.get_area_size()
storage.write_value(DATA[0][0], DATA[0][1])
assert storage.get_key_size(DATA[0][0]) == 108
assert storage.get_area_size() == 122 == storage2.get_area_size()
assert storage2.get_value(DATA[0][0]) == DATA[0][1] == storage.get_value(DATA[0][0])
for x in DATA:
storage.write_value(x[0], x[1])
assert storage.get_value(x[0]) == x[1]
for x in DATA:
assert storage.get_value(x[0]) == x[1]
assert len(storage) == len(DATA) == 20
assert storage.get_area_size() == 2531
assert not storage2.is_actual
assert storage.is_actual
storage2.validate_actuality()
for x in DATA:
assert storage2.get_value(x[0]) == x[1]
def test_multiprocessing(measure_time, iterations, num_workers):
storage = UWSGIStorage(0)
storage2 = UWSGIStorage(0)
storage3 = UWSGIStorage(0)
ITERATIONS = iterations
with measure_time("multiprocessing writes {0}".format(ITERATIONS)) as mt:
def f1():
for _ in xrange(ITERATIONS):
for x in DATA:
storage.inc_value(x[0], x[1])
def f2():
for _ in xrange(ITERATIONS):
for x in DATA:
storage2.inc_value(x[0], x[1])
def f3():
for _ in xrange(ITERATIONS):
for x in DATA:
storage3.inc_value(x[0], x[1])
workers = []
for _ in xrange(num_workers):
func = random.choice([f1, f2, f3])
p = Process(target=func)
p.start()
workers.append(p)
for x in workers:
x.join()
mt.set_num_ops(ITERATIONS * len(workers) * len(DATA))
with measure_time("multiprocessing reads") as mt:
mt.set_num_ops(3 * len(DATA))
for x in DATA:
assert storage2.get_value(x[0]) == storage.get_value(x[0]) == storage3.get_value(x[0]) == x[1] * ITERATIONS * len(workers)
def test_uwsgi_flush_storage():
storage1 = UWSGIFlushStorage(0)
storage2 = UWSGIFlushStorage(0)
for x in xrange(10):
for k, v in DATA:
storage1.inc_value(k, v)
storage1.get_value(k) == v
storage2.get_value(k) == 0
storage1.flush()
for x in DATA:
storage1.get_value(x[0]) == 0
storage1.persistent_storage.get_value(x[0]) == x[1] * 10
def test_uwsgi_flush_storage_multiprocessing(measure_time, iterations, num_workers):
storage = UWSGIFlushStorage(0)
storage2 = UWSGIFlushStorage(0)
storage3 = UWSGIFlushStorage(0)
ITERATIONS = iterations
with measure_time("flush storage multiprocessing writes {0}".format(ITERATIONS)) as mt:
def f1():
for _ in xrange(ITERATIONS):
for x in DATA:
storage.inc_value(x[0], x[1])
storage.flush()
def f2():
for _ in xrange(ITERATIONS):
for x in DATA:
storage2.inc_value(x[0], x[1])
storage2.flush()
def f3():
for _ in xrange(ITERATIONS):
for x in DATA:
storage3.inc_value(x[0], x[1])
storage3.flush()
workers = []
for _ in xrange(num_workers):
func = random.choice([f1, f2, f3])
p = Process(target=func)
p.start()
workers.append(p)
for x in workers:
x.join()
mt.set_num_ops(ITERATIONS * len(workers) * len(DATA))
storage.flush()
storage2.flush()
storage3.flush()
with measure_time("flush storage multiprocessing reads") as mt:
mt.set_num_ops(3 * len(DATA))
for x in DATA:
assert storage2.get_value(x[0]) == storage.get_value(x[0]) == storage3.get_value(x[0]) == 0
assert storage2.persistent_storage.get_value(x[0]) == storage.persistent_storage.get_value(x[0]) == storage3.persistent_storage.get_value(x[0])
assert storage.persistent_storage.get_value(x[0]) == x[1] * ITERATIONS * len(workers)
def test_uwsgi_storage_metrics(iterations):
registry = BaseRegistry()
storage = UWSGIStorage(0, namespace="namespace", stats=True)
registry.register(storage)
for x in xrange(iterations):
for k, v in DATA:
storage.inc_value(k, v)
collectors = {x.name: x for x in registry.collect()}
metric = collectors["namespace:memory_size"]
assert metric.get_samples()[0].value == storage.get_area_size()
metric = collectors["namespace:num_keys"]
assert metric.get_samples()[0].value == 20
| Lispython/pyprometheus | tests/test_uwsgi_collector.py | test_uwsgi_collector.py | py | 9,606 | python | en | code | 13 | github-code | 13 |
32506153814 | import pandas as pd
import datetime as dt
import geopandas as gpd
import folium
import branca.colormap as cm
from folium.plugins import TimestampedGeoJson
#from folium.features import GeoJsonPopup, GeoJsonTooltip
#Reading json file
bikedata=pd.read_json('/Users/alexanderlindell/Documents/Programmering /Python/Sthlm-EbikeVis-/stations2-copy.json')
#Dict with column names
df={'date':[],'station':[],'occupancy':[],'capacity':[],'long':[],'lat':[]}
df
for _,row in bikedata.iterrows():
for item in row:
for cell in item['data']:
#print(cell['date'])
df['occupancy'].append(cell['occupancy'])
df['capacity'].append(cell['capacity'])
df['date'].append(cell['date'])
df['station'].append(item['id'])
#print(cell['date'],item['coord']['lon'],item['coord']['lat'])
df['long'].append(item['coord']['lon'])
df['lat'].append(item['coord']['lat'])
#Setting up dataframe from dict and fix formatting on timestamp.
data=pd.DataFrame.from_dict(df)
data['date']= pd.to_datetime(df['date'],unit='s')
data['date']=data['date'].dt.strftime('%Y-%m-%dT%H:%M:%S')
data=data.sort_values(by='date')
data
#Setting the status colors for occupancy dependent on capacity.
colorbar = cm.StepColormap(colors=['#15B01A','#FFA500','#FF7F00','#FF4500','#8B0000'], vmin=0,vmax=1)
data['color']=list(colorbar(x/y) for x,y in zip(data['occupancy'],data['capacity']))
#Adjusting size of markers.
def setradius(size):
radius=4 if (size*0.2) < 4 else (size*0.2)
return(radius)
features = [{'type': 'Feature',
'geometry': {'type':'Point', 'coordinates':[row['long'],row['lat']]},
'properties': {'time': row['date'],
'popup':('Occupancy: '+str(row['occupancy'])+'/'+str(row['capacity'])),
'style': {'color' : ''},
'icon': 'circle',
'iconstyle':{'fillColor': row['color'],
'fillOpacity': 1,
'stroke': 'true',
'radius': setradius(row['capacity'])}}
} for _,row in data.iterrows()]
map = folium.Map(location = [59.32760990395156, 18.06760960579676], tiles='Stamen Toner' , zoom_start = 12)
TimestampedGeoJson( features,
add_last_point = True,
period='PT10M',
loop_button=True,
time_slider_drag_update=True,
transition_time = 100,
).add_to(map)
map.add_child(colorbar)
map
map.save('SthlmEBikeVis.html')
| ACRLindell/Sthlm-EbikeVis- | BikeVis.py | BikeVis.py | py | 2,741 | python | en | code | 1 | github-code | 13 |
8933667673 | import pandas as pd
unpickled_df = pd.read_pickle("./mydata.pkl")
# for index, row in unpickled_df.iterrows():
# print("index", index)
# print("row", row)
print(unpickled_df.head)
apps = unpickled_df.iterrows()
count_row = unpickled_df.shape[0]
print("row count is: ", count_row)
# while True:
# try:
# row = next(apps)
# except:
# # print("re iterate")
# apps = unpickled_df.iterrows()
# row = next(apps)
# print("row ", row)
# print("row", row)
# print("obs", row.obs)
# print("action", row.action)
# print("rew", row.rew)
# print("done", row.done) | MzXuan/RL_motion_plan | data/load_test.py | load_test.py | py | 634 | python | en | code | 2 | github-code | 13 |
376664597 | import matplotlib.pyplot as plt
from math import *
def plot(x,y):
fig = plt.figure(figsize=(7,7))
ax = fig.add_axes([0.06, 0.05, 0.6, 0.9])
ax.plot(x,y,'go-')
plt.show()
plt.close('all')
def getFn(x,a0,an,bn):
f = pi/2
for n in range(1,11):
f = f + (eval(an)*cos(n*x))+(eval(bn)*sin(n*x))
return f
def start(a0,an,bn):
end = int(pi*100*2)
xarr,yarr = end*[None],end*[None]
for x in range(0,end,1):
x_offset = (x/100)-round(pi,2)
xarr[x] = x_offset
yarr[x] = getFn(x_offset,a0,an,bn)
plot(xarr,yarr)
def sample():
start("pi/2","(((2*((-1)**n))-2))/(pi*n*n)","0") | bleezmo/mat434 | fourier_series.py | fourier_series.py | py | 586 | python | en | code | 0 | github-code | 13 |
688734184 | #!/usr/bin/env python
#
# Modules can't add the same outbox multiple times
#
from I3Tray import *
tray = I3Tray()
from icecube.icetray import I3Module
class DoubleOutboxModule(I3Module):
def __init__(self, context):
I3Module.__init__(self, context)
def Configure(self):
self.AddOutBox("box")
self.AddOutBox("box")
def Physics(self, frame):
self.PushFrame(frame)
# generate empty frames
tray.AddModule("BottomlessSource","bottomless")
# 2 dumps, both
tray.AddModule(DoubleOutboxModule,"do")
try:
tray.Execute(1)
assert False, "that should have thrown"
except:
print("okay, that threw as we would have hoped.")
| wardVD/IceSimV05 | src/icetray/resources/test/double_outbox.py | double_outbox.py | py | 674 | python | en | code | 1 | github-code | 13 |
8613816004 | #!/usr/bin/env python3
from pwn import *
from colorama import Fore
offset = input("Specify Offset: ")
buff = 'A' * int(offset)
program_name = input('Please specify the program to overflow. ')
program_name = program_name.strip()
program = ELF(program_name)
function_address = program.symbols['flag']
EBP = b'BBBB'
EBX = 0x0804bf10
EIP = p32(function_address)
FILL = b'CCCC'
Arg1 = 0xdeadbeef
Arg2 = 0xc0ded00d
payload = buff.encode() + p32(EBX) + EBP + EIP + FILL + p32(Arg1) + p32(Arg2)
#Remote_or_Local = input('Remote or Local? ')
#if Remote_or_Local == 'Remote' or 'remote':
# binary = remote(remote_ip.strip(), remote_port)
# remote_ip = input("Enter Target IP: ")
# remote_port = int(input("Enter Target Port: "), 10)
#elif Remote_or_Local == 'Local' or 'local':
# binary = process(program_name)
#else:
# print("Enter either \'remote\' or \'local\'")
binary = process(program_name)
print(binary.recv())
binary.sendline(payload)
print(binary.recvall())
| GreyStrawHat/Portfolio | buffer_overflow.py | buffer_overflow.py | py | 987 | python | en | code | 0 | github-code | 13 |
35184342027 | import os
from strategy import *
from base_options import *
from player import MultiPlayer
def IPDRoundRobin(players, num_iter, against_itself=False, return_ranking=False, save_plot=False, save_img=False, DEBUG=False, root=''):
"""Round Robin tournament."""
n = len(players)
p = {obj:[0] * num_iter for obj in players}
yields = {obj:[] for obj in players}
achieves = {obj:[] for obj in players}
for (i, p1) in zip(np.arange(n), players):
if DEBUG:
print("Match progress = {}/{}".format((i+1), players.size))
start = i if against_itself else i+1
for (j, p2) in zip(np.arange(start, n), players[start:]):
p1.clear_history()
p2.clear_history()
p1.play_iter(p2, num_iter)
rew1, yield1, best1 = p1.metrics()
rew2, yield2, best2 = p2.metrics()
yields[p1].append(rew1[-1]/yield1[-1])
yields[p2].append(rew2[-1]/yield2[-1])
achieves[p1].append(rew1[-1]/best1[-1])
achieves[p2].append(rew2[-1]/best2[-1])
if save_plot:
p[p1] += rew1
p[p2] += rew2
if save_plot:
plt.figure(figsize=(12,5))
for i in p:
plt.plot(p[i], label=i.s)
plt.xlabel('Iteration')
plt.ylabel('Cum. reward')
plt.title("Evolution of the game")
plt.legend(bbox_to_anchor=(0,-0.1), ncol=5, loc=2)
if save_img: # we only save images for ipdmp script
plt.savefig('{}/img/ipdmp/ipdmp-evolution-of-game-{}.eps'.format(root,len(p)),format='eps',bbox_inches='tight')
plt.close()
else:
plt.show()
# calculate ranking and matches dataframes
# has to be done after the tournament
ranking_df = pd.DataFrame() # all points gained by players
for (i, p) in zip(np.arange(n), players):
if DEBUG:
print("Point progress = {}/{}".format((i+1), players.size))
points = p.get_points()
cooperate_count, defect_count = p.get_coop_def_count()
df = pd.DataFrame(
[[str(p.s), int(points[-1]), cooperate_count, defect_count, p, p.s.id, 100*np.mean(yields[p]), 100*np.mean(achieves[p])]],
columns=['Player', 'points', 'coop_count', 'defect_count', 'rrp', 'labels', 'yield', 'achieve']
)
ranking_df = ranking_df.append(df)
# ranking_df = ranking_df.sort_values(['points'], ascending=False)
players = np.array(ranking_df.sort_values(['points'], ascending=False)['rrp'])
ranking_df = ranking_df.drop(columns=['rrp']).reset_index(drop=True)
# rank calculation is expensive when players are a lot, but necessary to have rank
if not return_ranking:
return players
return players, ranking_df
def main():
root = os.path.dirname(os.path.abspath(__file__))[:-5]
opt = BaseOptions().parse(BaseOptions.IPDMP)
NUM_ITER = opt.niter
NUM_PLAYERS = opt.nplay
NUM_REPETITIONS = opt.nrep
FIXED = opt.fixed
SAVE_IMG = opt.saveimg
LATEX = opt.latex
np.random.seed(opt.seed) # None = clock, no-number = 100
print("Testing round-robin tournament with {}-people".format(NUM_PLAYERS))
# define k for strategy probabilities
k_strategies = Strategy.generatePlayers(NUM_PLAYERS, replace=(NUM_PLAYERS > Strategy.TOT_STRAT), fixed=FIXED)
#k_strategies = np.array([GRT,GRT, TFT,TFT, TF2T,TF2T, BAD,BAD, NICE,NICE]) # paper test
#k_strategies = np.array([TFT,TFT, TFT, TFT, BAD, BAD, BAD, BAD, NICE,NICE]) # paper test 2
repeated_players = []
for i in range(NUM_REPETITIONS):
# initialize players with given strategies
players = np.array([MultiPlayer(k) for k in k_strategies])
players, ranking_df = IPDRoundRobin(players, NUM_ITER, return_ranking=True,
save_plot=(i==(NUM_REPETITIONS-1)), save_img=SAVE_IMG, root=root) # not against itself, plot last rep.
repeated_players.append(players)
repeated_ranking_df = repeated_ranking_df.append(ranking_df) if i!=0 else ranking_df
# print tables
group = repeated_ranking_df[['points', 'coop_count', 'defect_count']].groupby(repeated_ranking_df.index)
group_mean = group.mean()
group_mean.columns = [str(col) + '_mean' for col in group_mean.columns]
group_std = group.std()
group_std.columns = [str(col) + '_std' for col in group_std.columns]
group_df = group_mean.merge(group_std, left_index=True, right_index=True, how='left')
group_df['coop_perc'] = group_df['coop_count_mean']*100/(group_df['coop_count_mean']+group_df['defect_count_mean'])
group_df['str'] = repeated_ranking_df['Player'][:NUM_PLAYERS]
group_df['yield'] = repeated_ranking_df['yield'][:NUM_PLAYERS]
group_df['achieve'] = repeated_ranking_df['achieve'][:NUM_PLAYERS]
group_df = group_df[['str','points_mean','points_std','yield','achieve',
'coop_count_mean','coop_count_std','defect_count_mean','defect_count_std','coop_perc']] # column reordering
group_df = group_df.sort_values(by=['points_mean'], ascending=False)
if LATEX:
print(group_df.to_latex(index=False))
else:
print(group_df)
# box plot of last match
one_round_results = [p.results for p in players]
one_round = pd.DataFrame(one_round_results).T
meds = one_round.median().sort_values(ascending=False)
one_round = one_round[meds.index]
one_round.boxplot(figsize=(12,5))
plt.xticks(np.arange(NUM_PLAYERS)+1, [players[p].s for p in meds.index], rotation=90)
plt.suptitle('Mean and variance for each type vs the other players \n One complete round')
plt.ylabel('Points')
plt.xlabel('Player')
if SAVE_IMG:
plt.savefig('{}/img/ipdmp/ipdmp-boxplot-single-match-{}.eps'.format(root, NUM_PLAYERS),format='eps',bbox_inches='tight')
plt.close()
else:
plt.show()
# box plot of all points
group_median = group.median().sort_values(by=['points'], ascending=False)
temp_df = pd.DataFrame()
for index in group_median.index:
temp_df = temp_df.append(group.get_group(index))
temp_df['index'] = np.repeat(np.arange(NUM_PLAYERS), NUM_REPETITIONS)
temp_df.boxplot(column='points', by='index', figsize=(12,5))
plt.xticks(np.arange(NUM_PLAYERS)+1, group_df['str'][group_median.index], rotation=90)
plt.suptitle(("Mean and variance for each type at the end of the tournament - {} repetitions").format(NUM_REPETITIONS))
plt.ylabel('Points')
plt.xlabel('Player')
if SAVE_IMG:
plt.savefig('{}/img/ipdmp/ipdmp-boxplot-final-points-{}.eps'.format(root,NUM_PLAYERS),format='eps',bbox_inches='tight')
plt.close()
else:
plt.show()
if __name__ == "__main__":
main()
| eliabntt/iterative_prisoner_dilemma | code/ipdmp.py | ipdmp.py | py | 6,751 | python | en | code | 3 | github-code | 13 |
20850709393 | import json
import os
from urllib.request import Request, urlopen
def _build_header_as_dict():
"""return HTTP request header as dict
token to call API is specified
as a environment variable `SLACK_BOT_USER_TOKEN`.
"""
token = os.environ.get("SLACK_BOT_USER_TOKEN")
if token is None:
raise RuntimeError("環境変数 SLACK_BOT_USER_TOKEN を指定してください")
return {
"Authorization": f"Bearer {token}",
"Content-type": "application/json",
}
def _build_body_as_bytes(channel, message):
"""return HTTP request body as bytes, utf-8 encoded"""
data_dict = {"channel": channel, "text": message}
data_str = json.dumps(data_dict)
return data_str.encode()
def _request_to_api(header, body):
uri = "https://slack.com/api/chat.postMessage"
request = Request(uri, data=body, headers=header, method="POST")
urlopen(request)
| ftnext/diy-slack-post | postslack/http.py | http.py | py | 912 | python | en | code | 3 | github-code | 13 |
21668740888 | # -*- coding: utf-8 -*-
"""
Model PyTorch implementation.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .backbone.lucas import AlignedXception
class Model(nn.Module):
def __init__(self, backbone='lucas',
filters=[32, 64, 128, 256, 256, 512], pi=0.1,
num_classes=1, btn_size=4, image=True, desc=True,
vocab_size=76, desc_output_size=512, final_mlp_list=[256],
fusion_output_filters=512, num_filters_dyn=10, scale_shift=False,
broadcast=False, dmn=False, drop3d=None,
loc=True, s_e=False,
fusion_pointwise=False):
super().__init__()
self.image = image
self.desc = desc
self.vocab_size = vocab_size
self.desc_output_size = desc_output_size
self.final_mlp_list = final_mlp_list
self.fusion_output_filters = fusion_output_filters
self.num_filters_dyn = num_filters_dyn
self.scale_shift = scale_shift
self.broadcast = broadcast
self.dmn = dmn
self.drop3d = drop3d
self.loc = loc
self.s_e = s_e
self.fusion_pointwise = fusion_pointwise
self.prior = -np.log((1-pi)/pi) #Last layer bias initialization prior
self.backbone_name = backbone
if not (image or desc):
raise ValueError('At least one modality should be used')
image_output_size = 0
# Images
if self.image:
BatchNorm = nn.InstanceNorm3d
image_output_size = filters[-1] * btn_size * btn_size * btn_size
if self.backbone_name == 'lucas':
self.backbone = AlignedXception(BatchNorm, filters)
else:
raise ValueError('Backbone not implemented')
# Descriptor
if self.desc:
self.mlp_d = MLP(self.vocab_size, self.desc_output_size, [])
self.scale_shift = Scale_and_shift()
#Dynamic Multimodal Network (DMN)
if self.dmn:
self.adaptative_filter = nn.Linear(
in_features=self.desc_output_size,
out_features=(self.num_filters_dyn * (filters[-1] + (12*int(self.loc)))))
nn.init.xavier_normal_(self.adaptative_filter.weight)
if self.drop3d:
self.dropout3d = nn.Dropout3d(p=self.drop3d)
concat_output_channels = (filters[-1] +
self.desc_output_size*int(self.broadcast) +
self.num_filters_dyn* int(self.dmn) + 12*int(self.loc))
#Squeeze-and-Excitation block
if self.s_e:
self.se_layer = SEBlock(concat_output_channels,
reduction=16)
if self.fusion_pointwise:
self.pointwise = nn.Conv3d(concat_output_channels,
self.fusion_output_filters, 1)
else:
self.fusion_output_filters = concat_output_channels
if self.image:
fusion_output_size = self.fusion_output_filters * btn_size * \
btn_size * btn_size
else:
fusion_output_size = self.desc_output_size
self.mlp_f = MLP(fusion_output_size, num_classes,
self.final_mlp_list, p=[0, 0.2], prior=self.prior,
last=True)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, y=None):
if self.image:
x = self.backbone(x)
if self.desc:
if y is not None:
concat = [x]
y = self.relu(self.mlp_d(y))
if self.scale_shift:
y = self.scale_shift(y)
#Descriptor broadcasted vector
if self.broadcast:
b = y.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
b = b.expand(b.shape[0], b.shape[1],
x.shape[-3], x.shape[-2], x.shape[-1])
concat.append(b)
if self.loc:
N, _, D, H, W = x.shape
loc = generate_spatial_batch(D, H, W)
loc = loc.repeat([N, 1, 1, 1, 1])
concat.append(loc)
if self.dmn:
d_filters = self.adaptative_filter(y)
d_filters = torch.sigmoid(d_filters)
resp = []
for idx, _filter in enumerate(d_filters):
_filter = _filter.view(self.num_filters_dyn,
x.shape[1] + (12*int(self.loc)), 1, 1, 1)
if self.loc:
resp.append(F.conv3d(
input=torch.cat([x[idx], loc[idx]]).unsqueeze(0),
weight=_filter))
else:
resp.append(F.conv3d(
input=x[idx].unsqueeze(0),
weight=_filter))
resp = torch.cat(resp)
if self.drop3d:
resp = self.dropout3d(resp)
concat.append(resp)
x = torch.cat(concat, dim=1)
if self.s_e:
x = self.se_layer(x)
if self.fusion_pointwise:
x = self.pointwise(x)
else:
x = self.relu(self.mlp_d(x))
if self.scale_shift:
x = self.scale_shift(x)
# Combination
x = x.view(x.shape[0], -1)
out = self.mlp_f(x)
return out
class MLP(nn.Module):
def __init__(self, in_dim, out_dim, hidden=[], p=[0], prior=None, last=False):
super().__init__()
in_dim = in_dim
layers = []
p = p * (len(hidden)+1)
if len(hidden) > 0:
for i, h_dim in enumerate(hidden):
layers.append(nn.Dropout(p[i]))
layers.append(nn.Linear(in_dim, h_dim))
layers.append(nn.ReLU())
in_dim = h_dim
layers.append(nn.Dropout(p[-1]))
layers.append(nn.Linear(in_dim, out_dim))
if not last:
layers.append(nn.ReLU())
self.mlp = nn.Sequential(*layers)
if prior:
nn.init.constant_(self.mlp[-1].bias, prior)
def forward(self, x):
return self.mlp(x)
class Scale_and_shift(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.rand(1))
self.bias = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.weight * x + self.bias
class SEBlock(nn.Module):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
approx_sigmoid : bool, default False
Whether to use approximated sigmoid function.
activation : function, or str, or nn.Module
Activation function or name of activation function.
"""
def __init__(self,
channels,
reduction=16):
super(SEBlock, self).__init__()
mid_channels = channels // reduction
self.pool = nn.AdaptiveAvgPool3d(output_size=1)
self.conv1 = nn.Conv3d(in_channels=channels,
out_channels=mid_channels, kernel_size=1,
stride=1, groups=1, bias=False)
self.activ = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(in_channels=mid_channels,
out_channels=channels, kernel_size=1,
stride=1, groups=1, bias=False)
nn.init.xavier_normal_(self.conv2.weight)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
w = self.pool(x)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x = x * w
return x
def generate_spatial_batch(featmap_D, featmap_H, featmap_W):
"""Generate additional visual coordinates feature maps.
Function taken from
https://github.com/chenxi116/TF-phrasecut-public/blob/master/util/processing_tools.py#L5
and slightly modified
"""
spatial_batch_val = np.zeros(
(1, 12, featmap_D, featmap_H, featmap_W), dtype=np.float32)
for h in range(featmap_H):
for w in range(featmap_W):
for d in range(featmap_D):
xmin = w / featmap_W * 2 - 1
xmax = (w + 1) / featmap_W * 2 - 1
xctr = (xmin + xmax) / 2
ymin = h / featmap_H * 2 - 1
ymax = (h + 1) / featmap_H * 2 - 1
yctr = (ymin + ymax) / 2
zmin = d / featmap_D * 2 - 1
zmax = (d + 1) / featmap_D * 2 - 1
zctr = (zmin + zmax) / 2
spatial_batch_val[0, :, d, h, w] = (
[xmin, ymin, zmin,
xmax, ymax, zmax,
xctr, yctr, zctr,
1 / featmap_W, 1 / featmap_H, 1/featmap_D])
return torch.from_numpy(spatial_batch_val).cuda()
if __name__ == "__main__":
model = Model()
model.eval()
input = torch.rand(1, 1, 256, 256, 256)
output = model(input)
print(output.size())
| BCV-Uniandes/SAMA | models/model.py | model.py | py | 9,800 | python | en | code | 1 | github-code | 13 |
7063501566 | from flask_login import current_user, login_required
from flask_restful import Resource, fields, marshal
from sqlalchemy.orm import aliased
from app import db
from app.chat.models import Message
from app.users.models import User
chat_fields = {
'recipientId': fields.Integer,
'recipientName': fields.String,
'senderId': fields.Integer,
'senderName': fields.String,
'body': fields.String,
'createdAt': fields.DateTime
}
message_fields = {
'recipientId': fields.Integer,
'senderId': fields.Integer,
'body': fields.String,
'createdAt': fields.DateTime
}
class ChatList(Resource):
@login_required
def get(self):
"""
Endpoint to get all user chats with the label of the last message sent.
"""
Recipient = aliased(User)
Sender = aliased(User)
chats = db.session \
.query(Recipient.id.label('recipientId'),
Recipient.name.label('recipientName'),
Sender.id.label('senderId'),
Sender.name.label('senderName'),
Message.body,
Message.created_at.label('createdAt')) \
.filter((Message.sender_id == current_user.id) | (Message.recipient_id == current_user.id)) \
.join(Recipient, (Recipient.id == Message.sender_id) & (Message.sender_id != current_user.id) |
(Recipient.id == Message.recipient_id) & (Message.recipient_id != current_user.id)) \
.join(Sender, (Sender.id == Message.sender_id)) \
.distinct(Recipient.id) \
.order_by(Recipient.id, Message.created_at.desc()) \
.all()
return [marshal(chat, chat_fields) for chat in chats], 200
class ChatData(Resource):
@login_required
def get(self, user_id):
"""
Endpoint to get all messages sent to and received from the user.
"""
messages = db.session \
.query(Message.recipient_id.label('recipientId'),
Message.sender_id.label('senderId'),
Message.body,
Message.created_at.label('createdAt')) \
.filter((Message.sender_id == current_user.id) & (Message.recipient_id == user_id) |
(Message.sender_id == user_id) & (Message.recipient_id == current_user.id)) \
.all()
return [marshal(message, message_fields) for message in messages], 200
| micpst/chat-app | backend/app/chat/resources.py | resources.py | py | 2,524 | python | en | code | 0 | github-code | 13 |
17493828714 | import numpy as np
import math
def score_function(A, B):
"""
A and B are pitch-class representations
:param A:
:param B:
:return:
"""
denominator = len(A | B)
if denominator == 0:
# Means both are silence
return 1
AandB = A & B
AorB = A | B
posTerm = len(AandB)
negTerm = len(AorB - AandB)
score = (posTerm - negTerm) / denominator
return 3 * score
# def score_function(orchestra, piano):
# score = 0
# for (pitch_b, type_b) in piano:
# for pitch_a, type_a in orchestra:
# # Same notes
# if pitch_a == pitch_b:
# if
# number_same_notes = len([_ for e in B if (e[0] == pitch) and (e[1] == type)])
# number_same_notes_diff_type = len([_ for e in B if (e[0] == pitch) and (e[1] == type)])
# # Octave
# number_same_notes = len([_ for e in B if (e[0] == pitch) and (e[1] == type)])
# number_same_notes_diff_type = len([_ for e in B if (e[0] == pitch) and (e[1] == type)])
#
# # Different note
#
# return
def nwalign(seqj, seqi, gapOpen, gapExtend, score_matrix):
"""
>>> global_align('COELANCANTH', 'PELICAN')
('COELANCANTH', '-PEL-ICAN--')
nwalign must be used on list of pitch_class ensemble
Two versions of the sequences are returned
First tuple is with removed elements (useful for training and initialize generation)
Second tuple simply indicates where elemnts have been skipped in each sequence (useful for generating after a sequence has been init)
TODO:
Limit search zone
Abort exploration if score becomes too small
"""
UP, LEFT, DIAG, NONE = range(4)
max_j = len(seqj)
max_i = len(seqi)
score = np.zeros((max_i + 1, max_j + 1), dtype='f') - math.inf
pointer = np.zeros((max_i + 1, max_j + 1), dtype='i')
max_i, max_j
pointer[0, 0] = NONE
score[0, 0] = 0.0
pointer[0, 1:] = LEFT
pointer[1:, 0] = UP
# Do we do that ?? Not sure...
score[0, 1:] = gapExtend * np.arange(max_j)
score[1:, 0] = gapExtend * np.arange(max_i)
termScores = []
##################################
# Build score matrix
for i in range(1, max_i + 1):
ci = seqi[i - 1]
# Constraint to a losange
# Faster, probably sufficient for almost aligned sequences like in Beethov/Liszt,
# but probably do not work in general
j_min = max(i - 1000, 1)
j_max = min(i + 1000, max_j + 1)
for j in range(j_min, j_max):
# for j in range(1, max_j + 1):
cj = seqj[j - 1]
if score_matrix is not None:
termScore = score_matrix[ci, cj]
else:
termScore = score_function(ci, cj)
termScores.append(termScore)
diag_score = score[i - 1, j - 1] + termScore
if pointer[i - 1, j] == UP:
up_score = score[i - 1, j] + gapExtend
else:
up_score = score[i - 1, j] + gapOpen
if pointer[i, j - 1] == LEFT:
left_score = score[i, j - 1] + gapExtend
else:
left_score = score[i, j - 1] + gapOpen
if diag_score >= up_score:
if diag_score >= left_score:
score[i, j] = diag_score
pointer[i, j] = DIAG
else:
score[i, j] = left_score
pointer[i, j] = LEFT
else:
if up_score > left_score:
score[i, j] = up_score
pointer[i, j] = UP
else:
score[i, j] = left_score
pointer[i, j] = LEFT
##################################
# Build aligned indices
pairs = []
previous_coord = None
while True:
p = pointer[i, j]
if p == NONE:
break
if p == DIAG:
i -= 1
j -= 1
pairs.append((j, i))
# if previous_coord is not None:
# pairs.append(previous_coord)
elif p == LEFT:
j -= 1
elif p == UP:
i -= 1
else:
raise Exception('wtf!')
# if (i != len(seqi)) and (j != len(seqj)):
# previous_coord = j, i
# Don't forget last one
# pairs.append(previous_coord)
# return (align_j[::-1], align_i[::-1]), (skip_j[::-1], skip_i[::-1])
return pairs[::-1], score
| qsdfo/orchestration_aws | DatasetManager/DatasetManager/arrangement/nw_align.py | nw_align.py | py | 4,542 | python | en | code | 0 | github-code | 13 |
22021475402 | import pygame
pygame.init()
white = (255,255,255)
black = (0,0,0)
gameDisplay = pygame.display.set_mode((800,600))
pygame.display.set_caption('Slither')
gameExit = False
lead_x = 300
lead_y = 300
lead_x_change = 0 #for continuous pressing of the key, it should move
lead_y_change = 0
clock = pygame.time.Clock()
while not gameExit:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
lead_x_change = -10
lead_y_change = 0 #to avoid diagonal movement
elif event.key == pygame.K_RIGHT:
lead_x_change = 10
lead_y_change = 0
elif event.key == pygame.K_UP:
lead_y_change = -10
lead_x_change = 0
elif event.key == pygame.K_DOWN:
lead_y_change = 10
lead_x_change = 0
if event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
lead_x_change = 0 #if we release the key, the snake should stop moving
lead_x += lead_x_change
lead_y += lead_y_change
gameDisplay.fill(white)
pygame.draw.rect(gameDisplay,black,[lead_x,lead_y,10,10])
pygame.display.update()
clock.tick(10) #frames per second
pygame.quit()
quit()
| yashasviananya/test-5 | test3.py | test3.py | py | 1,425 | python | en | code | 0 | github-code | 13 |
35444486631 | from flask import Flask
from flask_restful import Resource, Api
import json
# it will run this file automatically
# import jx_cpu_kprobe
from subprocess import call
from threading import Thread
import sys
import v2_grpc_client
import threading
app = Flask(__name__)
api = Api(app)
# node2port
node2port = {
"n1": "7777",
"n2": "8888",
"n3": "9999",
}
class UserAgent():
def run_web_server(self):
app.run(debug=True)
class Metrics(Resource):
def get(self, metrics_names, nodes):
# print(metrics_names)
# print(nodes)
results = {}
threads = {}
for node in nodes.split(","):
port = node2port[node]
node_name = node
# print(node, port)
thread = threading.Thread(target=v2_grpc_client.SendQueryMetrics, args=(results, metrics_names, node_name, port))
threads[node] = thread
for thread in threads.values():
thread.start()
for thread in threads.values():
thread.join()
print("results is done")
return results
api.add_resource(Metrics, '/<string:metrics_names>/<string:nodes>')
if __name__ == '__main__':
app.run(debug=True, port=6000, host='localhost')
| victorliu-sq/theebees | v2/v2_coordinator.py | v2_coordinator.py | py | 1,290 | python | en | code | 0 | github-code | 13 |
71830985299 | L = ['Michael', 'Sarah', 'Tracy', 'Bob', 'Jack']
print (L[0:3])
print (L[:3])
print (L[1:3])
print (L[-1])
def trim(num):
if num[:1] == ' ':
return trim(num[1:])
elif num[-1:] == ' ':
return trim(num[:-1])
else:
return num
print (trim(' 123 ')) | amusitelangdan/pythonTest | 20200103py/do_slice.py | do_slice.py | py | 280 | python | en | code | 0 | github-code | 13 |
27049789996 | import os
import numpy as np
import librosa
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, ConfusionMatrixDisplay
# Preprocessing
def preprocessing(filename, sample_rate):
print('[Process]: Preprocessing Started')
# Read the input file with specific sampling rate
signal, sr = librosa.load(filename, sr=sample_rate)
# Apply the FIR Band Pass Filter
signal = fir_band_pass(signal, sample_rate, 200,
4000, 100, 100, np.float32)
print('[Process]: Preprocessing Completed')
return signal
# FIR Band Pass Filter
def fir_band_pass(samples, fs, fL, fH, NL, NH, outputType):
fH = fH / fs
fL = fL / fs
# Compute a low-pass filter with cutoff frequency fH.
lpf = np.sinc(2 * fH * (np.arange(NH) - (NH - 1) / 2.))
lpf *= np.blackman(NH)
lpf /= np.sum(lpf)
# Compute a high-pass filter with cutoff frequency fL.
hpf = np.sinc(2 * fL * (np.arange(NL) - (NL - 1) / 2.))
hpf *= np.blackman(NL)
hpf /= np.sum(hpf)
hpf = -hpf
hpf[int((NL - 1) / 2)] += 1
# Convolve both filters.
h = np.convolve(lpf, hpf)
# Applying the filter to a signal s can be as simple as writing
s = np.convolve(samples, h).astype(outputType)
return s
# Dataset creation
def build_dataset(dir, sample_rate, frame_length, hop_length):
print('[Process]: Dataset Training Started')
# List for the MFCC features of each .wav file in the dataset
train_dataset_x = []
# List with the labels of each .wav file in the dataset
train_dataset_y = []
# Read each folder inside the dataset folder
for folder in os.listdir(dir):
# Get the label of the digit from the folder name
label = folder.split('_')[1]
# Get all the .wav files inside the folder
fileList = [f for f in os.listdir(
dir+folder) if os.path.splitext(f)[1] == '.wav']
# For each .wav file
for fileName in fileList:
# Read the file with specific sampling rate
audio, sr = librosa.load(dir+folder+'/'+fileName, sample_rate)
# Apply the FIR Band Pass Filter
audio = fir_band_pass(audio, sample_rate, 200,
4000, 100, 100, np.float32)
# Extract the MFCC features
feature = librosa.feature.mfcc(
audio, sample_rate, n_fft=frame_length, hop_length=hop_length)
# Find the mean values for these features
feature = np.mean(feature, axis=1)
# Add them to the list
train_dataset_x.append(feature)
# Add the label to the list
train_dataset_y.append(label)
# Create a Random Forest Classifier and insert the 2 lists for training
rfc = RandomForestClassifier(n_estimators=150).fit(
train_dataset_x, train_dataset_y)
print('[Process]: Dataset Training Completed')
return rfc
# Root Mean Square Energy
def rmse(signal, frame_length, hop_length):
# Get the Root Mean Square Energy
energy = librosa.feature.rms(
signal, frame_length=frame_length, hop_length=hop_length)[0]
print('[Process]: RMSE Calculated')
return np.array(energy)
# Zero Crossing Rate
def zero_crossing_rate(signal, frame_length, hop_length):
# Get the Zero Crossing Rate
zcr = librosa.feature.zero_crossing_rate(
signal, frame_length=frame_length, hop_length=hop_length)[0]
print('[Process]: ZCR Calculated')
return np.array(zcr)
# Background vs Foreground Classifier
def b_vs_f(signal, energy, zcr, frame_length, hop_length):
print('[Process]: Background vs Foreground Classification Started')
# List that seperates background from foreground
bvsf = []
# Thresholds for Energy and Zero Crossing Rate
energy_threshold = np.mean(energy)
zcr_threshold = np.mean(zcr)
# For each frame add 1 to the list if Zero Crossing Rate is under it's threshold and Energy is over it's threshold
# Otherwise add 0 to the list
for i in range(energy.size):
if zcr[i] <= zcr_threshold and energy[i] >= energy_threshold:
bvsf.append(1)
else:
bvsf.append(0)
# List for the recognised digits
numbers = []
# Starting sample
start = 0
# Ending sample
end = frame_length
# For every frame of the signal
for i in range(1, len(bvsf)):
# If the current and previous frame are voiced frames, update the ending sample
if bvsf[i-1] == 1 and bvsf[i] == 1:
end = i*hop_length+frame_length
# Else if the current frame is voiced, the previous is unvoiced and the second previous is voiced, update the ending sample
elif i-2 >= 0 and bvsf[i-2] == 1 and bvsf[i-1] == 0 and bvsf[i] == 1:
end = i*hop_length+frame_length
# Else if the current frame is voiced, the previous is unvoiced and the next is voiced, update the starting and ending sample
elif bvsf[i-1] == 0 and bvsf[i] == 1 and bvsf[i+1] == 1:
start = (i-1)*hop_length
end = start+frame_length
# Else if the current frame is unvoiced, the previous is unvoiced and one of the next 4 is voiced, update the ending sample
elif bvsf[i-1] == 1 and bvsf[i] == 0 and i+4 < len(bvsf) and (bvsf[i+1] == 1 or bvsf[i+2] == 1 or bvsf[i+3] == 1 or bvsf[i+4] == 1):
end = i*hop_length+frame_length
# Else if the current frame is unvoiced and the previous is voiced, get a part of signal based on the calculated starting and ending samples
elif bvsf[i-1] == 1 and bvsf[i] == 0:
numbers.append(signal[start:end])
print('[Process]: Background vs Foreground Classification Completed')
return numbers
# Digit Recognition
def recognition(rfc, numbers, sample_rate, frame_length, hop_length):
print('[Process]: Digits Recognition Started')
# List for the MFCC features of each recognised digit
features = []
# For each digit of the recognised digits
for number in numbers:
# Extract the MFCC features
feature = librosa.feature.mfcc(
number, sample_rate, n_fft=frame_length, hop_length=hop_length)
# Find the mean values for these features
feature = np.mean(feature, axis=1)
# Add them to the list
features.append(feature)
# Get the prediction from the Random Forest Classifier
prediction = rfc.predict(features)
print('[Process]: Digits Recognition Completed')
print('[Result]: ' + str(prediction))
return prediction
# Plot Graphs
def plots(signal, energy, zcr, sample_rate, frame_length, hop_length, ):
# Figure 1 (Waveplot, RMSE, ZCR)
fig, ax = plt.subplots(nrows=3, sharex=True,
sharey=True, constrained_layout=True)
librosa.display.waveplot(signal, sr=sample_rate, ax=ax[0])
ax[0].set(title='Waveplot')
ax[0].label_outer()
frames = range(len(energy))
t = librosa.frames_to_time(frames, sr=sample_rate, hop_length=hop_length)
librosa.display.waveplot(signal, sr=sample_rate, alpha=0.5, ax=ax[1])
ax[1].plot(t, energy, color="r")
ax[1].set_ylim((-1, 1))
ax[1].set(title='RMSE')
ax[1].label_outer()
librosa.display.waveplot(signal, sr=sample_rate, alpha=0.5, ax=ax[2])
ax[2].plot(t, zcr, color="r")
ax[2].set_ylim((-1, 1))
ax[2].set(title="ZCR")
ax[2].label_outer()
plt.show()
# Figure 2 (Spectrogram, Mel-Spectrogram, MFCC)
fig, ax = plt.subplots(nrows=3, sharex=False,
sharey=False, constrained_layout=True)
y_to_db = librosa.amplitude_to_db(abs(librosa.stft(signal)))
librosa.display.specshow(
y_to_db, sr=sample_rate, x_axis='time', y_axis='hz', ax=ax[0])
ax[0].set(title='Spectrogram')
ax[0].label_outer()
mel_spectogram = librosa.feature.melspectrogram(
signal, sr=sample_rate, n_fft=frame_length, hop_length=hop_length)
log_mel_spectogram = librosa.power_to_db(mel_spectogram)
librosa.display.specshow(
log_mel_spectogram, x_axis="time", y_axis="mel", sr=sample_rate, ax=ax[1])
ax[1].set(title='Mel-Spectrogram')
mfccs = librosa.feature.mfcc(
y=signal, sr=sample_rate, n_fft=frame_length, hop_length=hop_length)
librosa.display.specshow(mfccs, x_axis='time', sr=sample_rate, ax=ax[2])
ax[2].set(title='MFCC')
plt.show()
# Get the accuracy score for the Background vs Foreground Classifier and Random Forest Classifier
def accuracy(dir, rfc, sample_rate, frame_length, hop_length):
# List with the digits for all files
all_digits = []
# List with all the predictions from Random Forest Classifier
predictions = []
# List with the digits for all files that the Background vs Foreground Classifier was correct
new_all_digits = []
# Correct number of digits from Background vs Foreground Classifier
correct_number_of_digits = 0
# Correct recognised digits from Random Forest Classifier
correct_recognised_digits = 0
# Get all the .wav files inside the folder
fileList = [f for f in os.listdir(dir) if os.path.splitext(f)[1] == '.wav']
# For each .wav file
for fileName in fileList:
# List with digits of the file
digits = []
# Get the name of the file
label = fileName.split('.wav')[0]
# For loop for all the characters of the file name with step 2
for i in range(0, len(label), 2):
# Add each digit to the list
digits.append(label[i])
# Add the digits list to the list for all the files
all_digits.append(digits)
# Execute preprocessing for the input file
signal = preprocessing(dir+fileName, sample_rate)
# Find the Root Mean Square Energy of the input file
energy = rmse(signal, frame_length, hop_length)
# Find the Zero Crossing Rate of the input file
zcr = zero_crossing_rate(signal, frame_length, hop_length)
# Seperate background from foreground and get the splitted digits
numbers = b_vs_f(signal, energy, zcr, frame_length, hop_length)
# If the number of digits from the Background vs Foreground Classifier is equal to the file's number of digits
if len(numbers) == len(digits):
# For each digit
for digit in digits:
# Add it to the list
new_all_digits.append(digit)
# Add one to the correct number of digits
correct_number_of_digits = correct_number_of_digits + 1
# Predict the digits found from the background and foreground seperation
prediction = recognition(
rfc, numbers, sample_rate, frame_length, hop_length)
# Add the predicted digits to the predictions list
for digit in prediction:
predictions.append(digit)
# Add the correct recognised digits
correct_recognised_digits = correct_recognised_digits + \
accuracy_score(prediction, digits, normalize=False)
# Print the percentage of the accuracy of Background vs Foreground Classifier
print('[Result]: Background vs Foreground Classifier Accuracy: ' +
str("{:.2f}".format((correct_number_of_digits/len(all_digits)) * 100)) + '%')
# Print the percentage of the accuracy of Background vs Foreground Classifier
print('[Result]: Random Forest Classifier Accuracy: ' +
str("{:.2f}".format((correct_recognised_digits/len(new_all_digits))*100))+'%')
# Figure for the confusion matrix
disp = ConfusionMatrixDisplay(confusion_matrix=confusion_matrix(
new_all_digits, predictions), display_labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
disp.plot(include_values=True)
plt.title('Confusion Matrix')
plt.show()
| dmatsanganis/Spoken_Digit_Recognition_System | Source/functions.py | functions.py | py | 12,179 | python | en | code | 0 | github-code | 13 |
41509322975 | #!/usr/bin/env python
from typing import List
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
if not intervals:
return []
intervals.sort(key=lambda x: x[0])
ans = []
prev = intervals[0]
for ele in intervals[1:]:
if prev[1] >= ele[0]:
prev[1] = max(ele[1],prev[1])
else:
ans.append(prev)
prev = ele
ans.append(prev)
return ans
sol = Solution()
print(sol.merge([[1,3],[2,6],[8,10],[15,18]]))
print(sol.merge([[1,4],[4,5]]))
| aadi58002/leetcode | python/merge-intervals.py | merge-intervals.py | py | 635 | python | en | code | 0 | github-code | 13 |
13614765410 | class Rlist(object):
class EmptyList(object):
def __len__(self):
return 0
empty = EmptyList()
def __init__(self, first, rest=empty):
self.first = first
self.rest = rest
def __len__(self):
return 1 + len(self.rest)
def __getitem__(self, index):
if index == 0:
return self.first
elif self.rest is Rlist.empty:
print('Index out of bounds')
else:
return self.rest[index - 1]
def __repr__(self):
if self.rest is Rlist.empty:
return 'Rlist({0})'.format(self.first)
else:
rest = repr(self.rest)
return 'Rlist({0}, {1})'.format(self.first, rest)
def reverse(rlist):
"""Return an Rlist that is the reverse of the original.
>>> Rlist(1).rest is Rlist.empty
True
>>> rlist = Rlist(1, Rlist(2, Rlist(3)))
>>> reverse(rlist)
Rlist(3, Rlist(2, Rlist(1)))
>>> reverse(Rlist(1))
Rlist(1)
"""
## iterative
new = Rlist.empty
while rlist is not Rlist.empty:
new = Rlist(rlist.first, new)
rlist = rlist.rest
return new
## recursive
if rlist.rest is not Rlist.empty:
second, last = rlist.rest, rlist
rlist = reverse(second)
second.rest, last.rest = last, Rlist.empty
return rlist
if __name__ == "__main__":
import doctest
doctest.testmod()
| clovery410/mycode | python/chapter-2/lab8-rlist-5.py | lab8-rlist-5.py | py | 1,429 | python | en | code | 1 | github-code | 13 |
15049011542 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 21 00:18:50 2023
@author: osama
"""
import pandas as pd
import numpy as np
from datetime import date, timedelta
import os
#Directory where the Script loactes
os.chdir('C:/Work/Research/Data Analysis/Tools/Python_Scripts')
from Data_Analyses_Fns import *
Working_dir = 'C:/Work/Research/LOONE'
os.chdir('%s'%Working_dir)
from Stg_Sto_Ar import Stg_Sto_Ar
M3_Yr = 2007
M3_M = 10
M3_D = 1
D2_Yr = 2007
D2_M = 12
D2_D = 30
St_Yr = 2008
St_M = 1
St_D = 1
En_Yr = 2022
En_M = 12
En_D = 31
# To create File (Average_LO_Storage_LORS20082023)
#Read LO Average Stage (ft)
Working_dir = 'C:/Work/Research/LOONE/LOONE_Data_Pre'
os.chdir('%s'%Working_dir)
LO_Stage = pd.read_csv('./LO_Stage_2023.csv')
# Create Column (EOD Stg(ft,NGVD)) in File (SFWMM_Daily_Outputs_LORS20082023)
LO_Stage = DF_Date_Range(LO_Stage, M3_Yr, M3_M, M3_D, En_Yr, En_M, En_D)
LO_Storage = Stg_Sto_Ar.stg2sto(LO_Stage['Average_Stage'], 0)
LO_SA = Stg_Sto_Ar.stg2ar(LO_Stage['Average_Stage'], 0)
LO_Stg_Sto_SA_df = pd.DataFrame(LO_Stage['date'],columns=['date'])
LO_Stg_Sto_SA_df['Stage_ft'] = LO_Stage['Average_Stage']
LO_Stg_Sto_SA_df['Stage_m'] = LO_Stg_Sto_SA_df['Stage_ft'].values * 0.3048 #ft to m
LO_Stg_Sto_SA_df['Storage_acft'] = LO_Storage
LO_Stg_Sto_SA_df['Storage_cmd'] = LO_Stg_Sto_SA_df['Storage_acft'] * 1233.48 #acft to m3/d
LO_Stg_Sto_SA_df['SA_acres'] = LO_SA #acres
#Read flow data cubic meters per day
Working_dir = 'C:/Work/Research/LOONE/Inflow_Data_2023'
os.chdir('%s'%Working_dir)
S65_total = pd.read_csv('./S65E_total.csv')
S65_total["S65E_tot_cmd"] = S65_total[["S65E_S_FLOW_cfs", "S65EX1_S_FLOW_cfs"]].sum(axis=1)
S71_S = pd.read_csv('./S71_S_FLOW_cmd.csv')
S72_S = pd.read_csv('./S72_S_FLOW_cmd.csv')
S84_S = pd.read_csv('./S84_S_FLOW_cmd.csv')
S127_C = pd.read_csv('./S127_C_FLOW_cmd.csv')
S127_P = pd.read_csv('./S127_P_FLOW_cmd.csv')
S129_C = pd.read_csv('./S129_C_FLOW_cmd.csv')
S129_P = pd.read_csv('./S129_PMP_P_FLOW_cmd.csv')
S133_P = pd.read_csv('./S133_P_FLOW_cmd.csv')
S135_C = pd.read_csv('./S135_C_FLOW_cmd.csv')
S135_P = pd.read_csv('./S135_PMP_P_FLOW_cmd.csv')
S154_C = pd.read_csv('./S154_C_FLOW_cmd.csv')
S191_S = pd.read_csv('./S191_S_FLOW_cmd.csv')
S308 = pd.read_csv('./S308.DS_FLOW_cmd.csv')
S351_S = pd.read_csv('./S351_S_FLOW_cmd.csv')
S352_S = pd.read_csv('./S352_S_FLOW_cmd.csv')
S354_S = pd.read_csv('./S354_S_FLOW_cmd.csv')
FISHP = pd.read_csv('./FISHP_FLOW_cmd.csv')
L8 = pd.read_csv('./L8.441_FLOW_cmd.csv')
S2_P = pd.read_csv('./S2_P_FLOW_cmd.csv')
S3_P = pd.read_csv('./S3_P_FLOW_cmd.csv')
S4_P = pd.read_csv('./S4_P_FLOW_cmd.csv')
Working_dir = 'C:/Work/Research/LOONE/Outflow_Data_2023'
os.chdir('%s'%Working_dir)
S77_S = pd.read_csv('./S77_S_FLOW_cmd.csv')
INDUST = pd.read_csv('./INDUST_FLOW_cmd.csv')
#Read Interpolated TP data
# Data_Interpolation Python Script is used to interpolate TP data for all inflow stations addressed below!
Working_dir = 'C:/Work/Research/LOONE/WQ_Jun23'
os.chdir('%s'%Working_dir)
S65_total_TP = pd.read_csv('./water_quality_S65E_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S71_TP = pd.read_csv('./water_quality_S71_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S72_TP = pd.read_csv('./water_quality_S72_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S84_TP = pd.read_csv('./water_quality_S84_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S127_TP = pd.read_csv('./water_quality_S127_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S133_TP = pd.read_csv('./water_quality_S133_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S135_TP = pd.read_csv('./water_quality_S135_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S154_TP = pd.read_csv('./water_quality_S154_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S191_TP = pd.read_csv('./water_quality_S191_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S308_TP = pd.read_csv('./water_quality_S308C_PHOSPHATE, TOTAL AS P_Interpolated.csv')
FISHP_TP = pd.read_csv('./water_quality_FECSR78_PHOSPHATE, TOTAL AS P_Interpolated.csv')
L8_TP = pd.read_csv('./water_quality_CULV10A_PHOSPHATE, TOTAL AS P_Interpolated.csv')
S4_TP = pd.read_csv('./water_quality_S4_PHOSPHATE, TOTAL AS P_Interpolated.csv')
#Set date range for S65 TP
S65_total_TP = DF_Date_Range(S65_total_TP, M3_Yr, M3_M, M3_D, En_Yr, En_M, En_D)
#Set Date Range
Q_names = ['S65_Q','S71_Q', 'S72_Q','S84_Q','S127_C_Q','S127_P_Q','S129_C_Q','S129_P_Q','S133_P_Q','S135_C_Q','S135_P_Q','S154_Q','S191_Q',
'S308_Q','S351_Q','S352_Q','S354_Q','FISHP_Q','L8_Q','S2_P_Q','S3_P_Q','S4_P_Q','S77_Q','INDUST_Q']
Q_list = {'S65_Q':S65_total,'S71_Q':S71_S,'S72_Q':S72_S,'S84_Q':S84_S,'S127_C_Q':S127_C,'S127_P_Q':S127_P,'S129_C_Q':S129_C,
'S129_P_Q':S129_P,'S133_P_Q':S133_P,'S135_C_Q':S135_C,'S135_P_Q':S135_P,'S154_Q':S154_C,'S191_Q':S191_S,'S308_Q':S308,
'S351_Q':S351_S,'S352_Q':S352_S,'S354_Q':S354_S,'FISHP_Q':FISHP,'L8_Q':L8,'S2_P_Q':S2_P,'S3_P_Q':S3_P,'S4_P_Q':S4_P,
'S77_Q':S77_S,'INDUST_Q':INDUST}
# Identify date range
date = pd.date_range(start = '%s/%s/%s'%(M3_M,M3_D,M3_Yr),end = '%s/%s/%s'%(En_M,En_D,En_Yr),freq = 'D')
# Create Flow Dataframe
Flow_df = pd.DataFrame(date, columns=['date'])
for i in range(len(Q_names)):
x = DF_Date_Range(Q_list[Q_names[i]], M3_Yr, M3_M, M3_D, En_Yr, En_M, En_D)
Flow_df['%s'%Q_names[i]] = x.iloc[:,-1:].values
Flow_df['S127_C_Q'] = Flow_df['S127_C_Q'][Flow_df['S127_C_Q']>=0]
Flow_df['S127_C_Q'] = Flow_df['S127_C_Q'].fillna(0)
Flow_df['S127_In'] = Flow_df[["S127_C_Q", "S127_P_Q"]].sum(axis=1)
Flow_df['S129_C_Q'] = Flow_df['S129_C_Q'][Flow_df['S129_C_Q']>=0]
Flow_df['S129_C_Q'] = Flow_df['S129_C_Q'].fillna(0)
Flow_df['S129_In'] = Flow_df[["S129_C_Q", "S129_P_Q"]].sum(axis=1)
Flow_df['S135_C_Q'] = Flow_df['S135_C_Q'][Flow_df['S135_C_Q']>=0]
Flow_df['S135_C_Q'] = Flow_df['S135_C_Q'].fillna(0)
Flow_df['S135_In'] = Flow_df[["S135_C_Q", "S135_P_Q"]].sum(axis=1)
Flow_df['S308_In'] = Flow_df['S308_Q'][Flow_df['S308_Q']<0]
Flow_df['S308_In'] = Flow_df['S308_In'] * -1
Flow_df['S308_In'] = Flow_df['S308_In'].fillna(0)
Flow_df['S77_In'] = Flow_df['S77_Q'][Flow_df['S77_Q']<0]
Flow_df['S77_In'] = Flow_df['S77_In'] * -1
Flow_df['S77_In'] = Flow_df['S77_In'].fillna(0)
Flow_df['S351_In'] = Flow_df['S351_Q'][Flow_df['S351_Q']<0]
Flow_df['S351_In'] = Flow_df['S351_In'] * -1
Flow_df['S351_In'] = Flow_df['S351_In'].fillna(0)
Flow_df['S352_In'] = Flow_df['S352_Q'][Flow_df['S352_Q']<0]
Flow_df['S352_In'] = Flow_df['S352_In'] * -1
Flow_df['S352_In'] = Flow_df['S352_In'].fillna(0)
Flow_df['S354_In'] = Flow_df['S354_Q'][Flow_df['S354_Q']<0]
Flow_df['S354_In'] = Flow_df['S354_In'] * -1
Flow_df['S354_In'] = Flow_df['S354_In'].fillna(0)
Flow_df['L8_In'] = Flow_df['L8_Q'][Flow_df['L8_Q']<0]
Flow_df['L8_In'] = Flow_df['L8_In'] * -1
Flow_df['L8_In'] = Flow_df['L8_In'].fillna(0)
Flow_df['S308_Out'] = Flow_df['S308_Q'][Flow_df['S308_Q']>=0]
Flow_df['S308_Out'] = Flow_df['S308_Out'].fillna(0)
Flow_df['S77_Out'] = Flow_df['S77_Q'][Flow_df['S77_Q']>=0]
Flow_df['S77_Out'] = Flow_df['S77_Out'].fillna(0)
Flow_df['INDUST_Out'] = Flow_df['INDUST_Q'][Flow_df['INDUST_Q']>=0]
Flow_df['INDUST_Out'] = Flow_df['INDUST_Out'].fillna(0)
Flow_df['S351_Out'] = Flow_df['S351_Q'][Flow_df['S351_Q']>=0]
Flow_df['S351_Out'] = Flow_df['S351_Out'].fillna(0)
Flow_df['S352_Out'] = Flow_df['S352_Q'][Flow_df['S352_Q']>=0]
Flow_df['S352_Out'] = Flow_df['S352_Out'].fillna(0)
Flow_df['S354_Out'] = Flow_df['S354_Q'][Flow_df['S354_Q']>=0]
Flow_df['S354_Out'] = Flow_df['S354_Out'].fillna(0)
Flow_df['L8_Out'] = Flow_df['L8_Q'][Flow_df['L8_Q']>=0]
Flow_df['L8_Out'] = Flow_df['L8_Out'].fillna(0)
Flow_df['Inflows'] = Flow_df[["S65_Q", "S71_Q",'S72_Q','S84_Q','S127_In','S129_In','S133_P_Q','S135_In',
'S154_Q','S191_Q','S308_In','S77_In','S351_In','S352_In','S354_In','L8_In','FISHP_Q',
'S2_P_Q','S3_P_Q','S4_P_Q']].sum(axis=1)
Flow_df['Netflows'] = Flow_df['Inflows'] - Flow_df['INDUST_Out']
Flow_df['Outflows'] = Flow_df[["S308_Out", "S77_Out",'S351_Out','S352_Out','S354_Out','INDUST_Out','L8_Out']].sum(axis=1)
TP_names = ['S65_TP','S71_TP','S72_TP','S84_TP','S127_TP','S133_TP','S135_TP','S154_TP','S191_TP','S308_TP','FISHP_TP','L8_TP','S4_TP']
TP_list = {'S65_TP':S65_total_TP,'S71_TP':S71_TP,'S72_TP':S72_TP,'S84_TP':S84_TP,'S127_TP':S127_TP,'S133_TP':S133_TP,'S135_TP':S135_TP,
'S154_TP':S154_TP,'S191_TP':S191_TP,'S308_TP':S308_TP,'FISHP_TP':FISHP_TP,'L8_TP':L8_TP,'S4_TP':S4_TP}
#Create TP Concentrations Dataframe
TP_df = pd.DataFrame(date, columns=['date'])
for i in range(len(TP_names)):
y = DF_Date_Range(TP_list[TP_names[i]], M3_Yr, M3_M, M3_D, En_Yr, En_M, En_D)
TP_df['%s'%TP_names[i]] = y.iloc[:,-1:].values
#Determine TP Loads (mg)
TP_Loads_In = pd.DataFrame(date, columns=['date'])
TP_Loads_In['S65_P_Ld'] = Flow_df['S65_Q'] * TP_df['S65_TP'] * 1000 #(m3/d * mg/L * 1000 = mg/d)
TP_Loads_In['S71_P_Ld'] = Flow_df['S71_Q'] * TP_df['S71_TP'] * 1000
TP_Loads_In['S72_P_Ld'] = Flow_df['S72_Q'] * TP_df['S72_TP'] * 1000
TP_Loads_In['S84_P_Ld'] = Flow_df['S84_Q'] * TP_df['S84_TP'] * 1000
TP_Loads_In['S127_P_Ld'] = Flow_df['S127_In'] * TP_df['S127_TP'] * 1000
TP_Loads_In['S133_P_Ld'] = Flow_df['S133_P_Q'] * TP_df['S133_TP'] * 1000
TP_Loads_In['S135_P_Ld'] = Flow_df['S135_In'] * TP_df['S135_TP'] * 1000
TP_Loads_In['S154_P_Ld'] = Flow_df['S154_Q'] * TP_df['S154_TP'] * 1000
TP_Loads_In['S191_P_Ld'] = Flow_df['S191_Q'] * TP_df['S191_TP'] * 1000
TP_Loads_In['S308_P_Ld'] = Flow_df['S308_In'] * TP_df['S308_TP'] * 1000
TP_Loads_In['FISHP_P_Ld'] = Flow_df['FISHP_Q'] * TP_df['FISHP_TP'] * 1000
TP_Loads_In['L8_P_Ld'] = Flow_df['L8_In'] * TP_df['L8_TP'] * 1000
TP_Loads_In['S4_P_Ld'] = Flow_df['S4_P_Q'] * TP_df['S4_TP'] * 1000
#Calculate the total External Loads to Lake Okeechobee
TP_Loads_In['External_P_Ld_mg'] = TP_Loads_In.sum(axis=1)
# Create File (LO_External_Loadings_3MLag_LORS20082023)
TP_Loads_In_3MLag = DF_Date_Range(TP_Loads_In, M3_Yr, M3_M, M3_D, En_Yr, En_M, En_D)
TP_Loads_In_3MLag_df = pd.DataFrame(TP_Loads_In_3MLag['date'],columns=['date'])
TP_Loads_In_3MLag_df['External_Loads'] = TP_Loads_In_3MLag['External_P_Ld_mg']
#Create File (LO_Inflows_BK_LORS20082023)
LO_Inflows_BK = pd.DataFrame(Flow_df['date'],columns=['date'])
LO_Inflows_BK['Inflows_cmd'] = Flow_df['Inflows']
LO_Inflows_BK = DF_Date_Range(LO_Inflows_BK, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
#Create File (Outflows_consd_20082023)
Outflows_consd= pd.DataFrame(Flow_df['date'],columns=['date'])
Outflows_consd['Outflows_acft'] = Flow_df['Outflows']/1233.48 #acft
Outflows_consd = DF_Date_Range(Outflows_consd, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
# Create File (INDUST_Outflow_20082023)
INDUST_Outflows = pd.DataFrame(Flow_df['date'],columns=['date'])
INDUST_Outflows['INDUST'] = Flow_df['INDUST_Out']
#Create File (Netflows_acft_LORS20082023)
# This is also Column (Net Inflow) in File (SFWMM_Daily_Outputs_LORS20082023)
Netflows = pd.DataFrame(Flow_df['date'],columns=['date'])
Netflows['Netflows_acft'] = Flow_df['Netflows']/1233.48 #acft
Netflows = DF_Date_Range(Netflows, D2_Yr, D2_M, D2_D, En_Yr, En_M, En_D)
# Create File (TotalQWCA_Obs_LORS20082023)
# This is also Column (RegWCA) in File (SFWMM_Daily_Outputs_LORS20082023)
TotalQWCA = pd.DataFrame(Flow_df['date'],columns=['date'])
TotalQWCA['S351_Out'] = Flow_df['S351_Out'] * (35.3147/86400) #cmd to cfs
TotalQWCA['S354_Out'] = Flow_df['S354_Out'] * (35.3147/86400)
TotalQWCA['RegWCA_cfs'] = TotalQWCA.sum(axis=1) #cfs
TotalQWCA['RegWCA_acft'] = TotalQWCA['RegWCA_cfs'] *1.9835 #acft
TotalQWCA = DF_Date_Range(TotalQWCA, D2_Yr, D2_M, D2_D, En_Yr, En_M, En_D)
# Create Column (RegL8C51) in the File (SFWMM_Daily_Outputs_LORS20082023)
L8C51 = pd.DataFrame(Flow_df['date'],columns=['date'])
L8C51['S352_Out'] = Flow_df['S352_Out'].values * (35.3147/86400) #cmd to cfs
L8C51['L8_O_cfs'] = Flow_df['L8_Out'].values * (35.3147/86400) #cmd to cfs
L8C51['L8C51_cfs'] = L8C51.sum(axis=1) #cfs
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
L8C51.to_csv('./L8C51.csv')
#C43 RO C44 RO
#Create Files (C43RO_LORS20082023, C43RO_Monthly_LORS20082023,C44RO_LORS20082023, C44RO_Monthly_LORS20082023)
#As well as Columns C43Runoff and C44Runoff in File (SFWMM_Daily_Outputs_LORS20082023)
Working_dir = 'C:/Work/Research/LOONE/Outflow_Data_2023'
os.chdir('%s'%Working_dir)
S79 = pd.read_csv('./S79.csv')
S79 = S79.fillna(0)
S80 = pd.read_csv('./S80.csv')
S80 = S80.fillna(0)
S79['Q_cmd'] = S79['S79_TOT_FLOW_cfs'] * 0.0283168466 * 86400
S80['Q_cmd'] = S80['S80_S_FLOW_cfs'] * 0.0283168466 * 86400
S79 = DF_Date_Range(S79, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
S80 = DF_Date_Range(S80, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
C43RO_df = pd.DataFrame(S79['date'], columns=['date'])
C44RO_df = pd.DataFrame(S79['date'], columns=['date'])
C43RO = np.zeros(len(C43RO_df.index))
C44RO = np.zeros(len(C44RO_df.index))
for i in range(len(C44RO_df.index)):
if S79['Q_cmd'].iloc[i] - Flow_df['S77_Out'].iloc[i] + Flow_df['S77_In'].iloc[i] < 0:
C43RO[i] = 0
else:
C43RO[i] = S79['Q_cmd'].iloc[i] - Flow_df['S77_Out'].iloc[i] + Flow_df['S77_In'].iloc[i]
for i in range(len(C44RO_df.index)):
if S80['Q_cmd'].iloc[i] - Flow_df['S308_Out'].iloc[i] + Flow_df['S308_In'].iloc[i] < 0:
C44RO[i] = 0
else:
C44RO[i] = S80['Q_cmd'].iloc[i] - Flow_df['S308_Out'].iloc[i] + Flow_df['S308_In'].iloc[i]
C43RO_df['C43RO_cmd'] = C43RO
C44RO_df['C44RO_cmd'] = C44RO
C43RO_df['C43RO_cfs'] = C43RO_df['C43RO_cmd']/(0.0283168466 * 86400)
C44RO_df['C44RO_cfs'] = C44RO_df['C44RO_cmd']/(0.0283168466 * 86400)
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
C43RO_df.to_csv('./C43RO.csv')
C44RO_df.to_csv('./C44RO.csv')
C43RO_df = C43RO_df.set_index(C43RO_df['date'])
C44RO_df = C44RO_df.set_index(C44RO_df['date'])
C43RO_df.index = pd.to_datetime(C43RO_df.index, unit = 'ns')
C44RO_df.index = pd.to_datetime(C44RO_df.index, unit = 'ns')
C43Mon = C43RO_df.resample('M').mean()
C44Mon = C44RO_df.resample('M').mean()
C43Mon.to_csv('./C43RO_Monthly_LORS20082023.csv')
C44Mon.to_csv('./C44RO_Monthly_LORS20082023.csv')
#SLTRIB
#Create File (SLTRIB_Monthly_LORS20082023)
Working_dir = 'C:/Work/Research/LOONE/LOONE_Data_Pre'
os.chdir('%s'%Working_dir)
S48_S = pd.read_csv('./S48_S.csv')
S49_S = pd.read_csv('./S49_S.csv')
S48_S = DF_Date_Range(S48_S, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
S49_S = DF_Date_Range(S49_S, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
SLTRIB = pd.DataFrame(S48_S['date'],columns=['date'])
SLTRIB['SLTRIB_cmd'] = S48_S['S48_S_FLOW_cmd'] + S49_S['S49_S_FLOW_cmd']
SLTRIB['SLTRIB_cfs'] = SLTRIB['SLTRIB_cmd']/(0.0283168466 * 86400)
SLTRIB = SLTRIB.set_index(SLTRIB['date'])
SLTRIB.index = pd.to_datetime(SLTRIB.index, unit = 'ns')
SLTRIBMon = SLTRIB.resample('M').mean()
SLTRIB = SLTRIB.reset_index()
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
SLTRIB.to_csv('./SLTRIB.csv')
SLTRIBMon.to_csv('./SLTRIB_Monthly_LORS20082023.csv')
Basin_RO = pd.DataFrame(SLTRIBMon.index,columns=['date'])
Basin_RO['SLTRIB'] = SLTRIBMon['SLTRIB_cfs'].values * 1.9835 #cfs to acft
Basin_RO['C44RO'] = C44Mon['C44RO_cfs'].values * 86400
Basin_RO['C43RO'] = C43Mon['C43RO_cfs'].values * 86400
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
Basin_RO.to_csv('./Basin_RO_inputs_LORS20082023.csv')
#EAA MIA RUNOFF
#Create File (EAA_MIA_RUNOFF_Inputs_LORS20082023)
Working_dir = 'C:/Work/Research/LOONE/LOONE_Data_Pre'
os.chdir('%s'%Working_dir)
S3_Miami_data = pd.read_csv('./S3_Miami.csv')
S3_Miami_data = DF_Date_Range(S3_Miami_data, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
S3_Miami = S3_Miami_data['S3_FLOW_cfs']
S2_NNR_data = pd.read_csv('./S2_NNR.csv')
S2_NNR_data = DF_Date_Range(S2_NNR_data, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
S2_NNR = S2_NNR_data['S2 NNR_FLOW_cfs']
EAA_MIA_RO = pd.DataFrame(date,columns=['date'])
EAA_MIA_RO['MIA'] = S3_Miami.values
EAA_MIA_RO['NNR'] = S2_NNR.values
EAA_MIA_RO['WPB'] = Flow_df['S352_Out']/(0.0283168466 * 86400)
EAA_MIA_RO['S2PMP'] = Flow_df['S2_P_Q']/(0.0283168466 * 86400)
EAA_MIA_RO['S3PMP'] = Flow_df['S3_P_Q']/(0.0283168466 * 86400)
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
EAA_MIA_RO.to_csv('./EAA_MIA_RUNOFF_Inputs_LORS20082023.csv')
#Weekly Tributary Conditions
#Create File (Trib_cond_wkly_data_LORS20082023)
Working_dir = 'C:/Work/Research/LOONE/LOONE_Data_Pre'
os.chdir('%s'%Working_dir)
#Net RF Inch
RF_data = pd.read_csv('./LAKE_RAINFALL_DATA_2008-2023.csv')
ET_data = pd.read_csv('./LOONE_AVERAGE_ETPI_DATA_2008-2023.csv')
Net_RF = pd.DataFrame(RF_data['date'], columns=['date'])
Net_RF['NetRF_In'] = RF_data['Avg_RF_In'] - ET_data['Avg_ET_In']
Net_RF = Net_RF.set_index(['date'])
Net_RF.index = pd.to_datetime(Net_RF.index, unit = 'ns')
Net_RF_Weekly = Net_RF.resample('W-FRI').sum()
#Net Inflows cfs
Net_Inflows = pd.DataFrame(Flow_df['date'], columns=['date'])
Net_Inflows = DF_Date_Range(Net_Inflows, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Net_Inflows['Net_Inflows'] = Flow_df['Netflows']/(0.0283168466 * 86400) #cmd to cfs
Net_Inflows = Net_Inflows.set_index(['date'])
Net_Inflows.index = pd.to_datetime(Net_Inflows.index, unit = 'ns')
Net_Inflow_Weekly = Net_Inflows.resample('W-FRI').mean()
# S65 cfs
S65E = pd.DataFrame(Flow_df['date'], columns=['date'])
S65E = DF_Date_Range(S65E, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
S65E['S65E'] = Flow_df['S65_Q']/(0.0283168466 * 86400) #cmd to cfs
S65E = S65E.set_index(['date'])
S65E.index = pd.to_datetime(S65E.index, unit = 'ns')
S65E_Weekly = S65E.resample('W-FRI').mean()
# PI
#TODO
#This is prepared manually
#Weekly data is downloaded from https://www.ncei.noaa.gov/access/monitoring/weekly-palmers/time-series/0804
#State:Florida Division:4.South Central
PI = pd.DataFrame(S65E_Weekly.index,columns=['date'])
PI_data = pd.read_csv('./PI_2008-2023.csv')
PI['PI'] = PI_data['PI']
Trib_Cond_Wkly = pd.DataFrame(S65E_Weekly.index,columns=['date'])
Trib_Cond_Wkly['NetRF'] = Net_RF_Weekly['NetRF_In'].values
Trib_Cond_Wkly['NetInf'] = Net_Inflow_Weekly['Net_Inflows'].values
Trib_Cond_Wkly['S65E'] = S65E_Weekly['S65E'].values
Trib_Cond_Wkly['Palmer'] = PI['PI'].values
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
Trib_Cond_Wkly.to_csv('./Trib_cond_wkly_data_LORS20082023.csv')
#Wind Speed
#Create File (LOWS)
Working_dir = 'C:/Work/Research/LOONE/LOONE_Data_Pre'
os.chdir('%s'%Working_dir)
L001WS = pd.read_csv('./L001_WNDS_MPH.csv')
L005WS = pd.read_csv('./L005_WNDS_MPH.csv')
L006WS = pd.read_csv('./L006_WNDS_MPH.csv')
LZ40WS = pd.read_csv('./LZ40_WNDS_MPH.csv')
L001WS = DF_Date_Range(L001WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
L005WS = DF_Date_Range(L005WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
L006WS = DF_Date_Range(L006WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
LZ40WS = DF_Date_Range(LZ40WS, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
LOWS = pd.DataFrame(L001WS['date'], columns=['date'])
LOWS['L001WS'] = L001WS['L001_WNDS_MPH']
LOWS['L005WS'] = L005WS['L005_WNDS_MPH']
LOWS['L006WS'] = L006WS['L006_WNDS_MPH']
LOWS['LZ40WS'] = LZ40WS['LZ40_WNDS_MPH']
LOWS['LO_Avg_WS_MPH'] = LOWS.mean(axis=1)
LOWS.to_csv('./LOWS.csv')
#RFVol acft
#Create File (RF_Volume_LORS20082023)
RFVol = pd.DataFrame(RF_data['date'],columns=['date'])
RFVol['RFVol_acft'] = (RF_data['Avg_RF_In'].values/12) * LO_Stg_Sto_SA_df['SA_acres'].values
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
RFVol.to_csv('./RFVol_LORS_20082023.csv')
#ETVol acft
#Create File (ETVol_LORS20082023)
ETVol = pd.DataFrame(ET_data['date'],columns=['date'])
ETVol['ETVol_acft'] = (ET_data['Avg_ET_In'].values/12) * LO_Stg_Sto_SA_df['SA_acres'].values
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
ETVol.to_csv('./ETVol_LORS_20082023.csv')
#WCA Stages
#Create File (WCA_Stages_Inputs_LORS20082023)
Working_dir = 'C:/Work/Research/LOONE/LOONE_Data_Pre'
os.chdir('%s'%Working_dir)
Stg_3ANW = pd.read_csv('./Stg_3ANW.csv')
Stg_3ANW = DF_Date_Range(Stg_3ANW, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Stg_2A17 = pd.read_csv('./Stg_2A17.csv')
Stg_2A17 = DF_Date_Range(Stg_2A17, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Stg_3A3 = pd.read_csv('./Stg_3A3.csv')
Stg_3A3 = DF_Date_Range(Stg_3A3, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Stg_3A4 = pd.read_csv('./Stg_3A4.csv')
Stg_3A4 = DF_Date_Range(Stg_3A4, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Stg_3A28 = pd.read_csv('./Stg_3A28.csv')
Stg_3A28 = DF_Date_Range(Stg_3A28, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
WCA_Stg = pd.DataFrame(Stg_3A28['date'],columns=['date'])
WCA_Stg['3A-NW'] = Stg_3ANW['3A-NW_STG_ft NGVD29'].values
WCA_Stg['2A-17'] = Stg_2A17['2-17_GAGHT_feet'].values
WCA_Stg['3A-3'] = Stg_3A3['3-63_GAGHT_feet'].values
WCA_Stg['3A-4'] = Stg_3A4['3-64_GAGHT_feet'].values
WCA_Stg['3A-28'] = Stg_3A28['3-65_GAGHT_feet'].values
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
WCA_Stg.to_csv('./WCA_Stages_Inputs_LORS20082023.csv')
# Predict Water Temp Function of Air Temp
Working_dir = 'C:/Work/Research/LOONE/LOONE_Data_Pre'
os.chdir('%s'%Working_dir)
Water_Temp_data = pd.read_csv('./Temp_Avg.csv')
L001_AirT = pd.read_csv('./L001_AirT.csv')
L001_AirT = DF_Date_Range(L001_AirT, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
L005_AirT = pd.read_csv('./L005_AirT.csv')
L005_AirT = DF_Date_Range(L005_AirT, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
L006_AirT = pd.read_csv('./L006_AirT.csv')
L006_AirT = DF_Date_Range(L006_AirT, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
LZ40_AirT = pd.read_csv('./LZ40_AirT.csv')
LZ40_AirT = DF_Date_Range(LZ40_AirT, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Water_Temp_data['L001_WaterT'] = Water_Temp_data[['L001_H2OT_C_1', 'L001_H2OT_C_2', 'L001_H2OT_C_3']].mean(axis=1)
Water_Temp_data['L005_WaterT'] = Water_Temp_data[['L005_H2OT_C_1', 'L005_H2OT_C_2', 'L005_H2OT_C_3']].mean(axis=1)
Water_Temp_data['L006_WaterT'] = Water_Temp_data[['L006_H2OT_C_1', 'L006_H2OT_C_2', 'L006_H2OT_C_3']].mean(axis=1)
Water_Temp_data['LZ40_WaterT'] = Water_Temp_data[['LZ40_H2OT_C_1', 'LZ40_H2OT_C_2', 'LZ40_H2OT_C_3']].mean(axis=1)
Water_Temp_data = DF_Date_Range(Water_Temp_data, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
WaterT_pred_df = pd.DataFrame(L001_AirT['date'], columns=['date'])
WaterT_pred_df['L001_WaterT_pred'] = 1.862667 + 0.936899 * L001_AirT['L001_AIRT_Degrees Celsius'].values
WaterT_pred_df['L005_WaterT_pred'] = 1.330211 + 0.909713 * L005_AirT['L005_AIRT_Degrees Celsius'].values
WaterT_pred_df['L006_WaterT_pred'] = -0.88564 + 1.01585 * L006_AirT['L006_AIRT_Degrees Celsius'].values
WaterT_pred_df['LZ40_WaterT_pred'] = 0.388231 + 0.980154 * LZ40_AirT['LZ40_AIRT_Degrees Celsius'].values
WaterT_pred_df['WaterT_pred_Mean'] = WaterT_pred_df[['L001_WaterT_pred','L005_WaterT_pred','L006_WaterT_pred','LZ40_WaterT_pred']].mean(axis=1)
WaterT_pred_df_1 = DF_Date_Range(WaterT_pred_df, St_Yr, St_M, St_D, 2020, 8, 25)
WaterT_pred_df_2 = DF_Date_Range(WaterT_pred_df, 2020, 8, 26, En_Yr, En_M, En_D)
Filled_WaterT_1 = np.zeros(len(WaterT_pred_df_1.index))
Filled_WaterT_2 = np.zeros(len(WaterT_pred_df_2.index))
for i in range(len(Water_Temp_data.index)):
if isnan(Water_Temp_data['WaterT_Mean'].iloc[i]):
Filled_WaterT_1[i] = WaterT_pred_df_1['WaterT_pred_Mean'].iloc[i]
else:
Filled_WaterT_1[i] = Water_Temp_data['WaterT_Mean'].iloc[i]
Filled_WaterT_2 = WaterT_pred_df_2['WaterT_pred_Mean']
Filled_WaterT_1df = pd.DataFrame(WaterT_pred_df_1['date'],columns=['date'])
Filled_WaterT_2df = pd.DataFrame(WaterT_pred_df_2['date'],columns=['date'])
Filled_WaterT_1df['Water_T'] = Filled_WaterT_1
Filled_WaterT_2df['Water_T'] = Filled_WaterT_2
Filled_WaterT = pd.concat([Filled_WaterT_1df, Filled_WaterT_2df]).reset_index(drop= True)
Filled_WaterT.to_csv('./Filled_WaterT_20082023.csv')
# TP Observations in Lake
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_TP = pd.read_csv('./water_quality_L001_PHOSPHATE, TOTAL AS P.csv')
L004_TP = pd.read_csv('./water_quality_L004_PHOSPHATE, TOTAL AS P.csv')
L005_TP = pd.read_csv('./water_quality_L005_PHOSPHATE, TOTAL AS P.csv')
L006_TP = pd.read_csv('./water_quality_L006_PHOSPHATE, TOTAL AS P.csv')
L007_TP = pd.read_csv('./water_quality_L007_PHOSPHATE, TOTAL AS P.csv')
L008_TP = pd.read_csv('./water_quality_L008_PHOSPHATE, TOTAL AS P.csv')
LZ40_TP = pd.read_csv('./water_quality_LZ40_PHOSPHATE, TOTAL AS P.csv')
LO_TP_data = pd.merge(L001_TP,L004_TP, how = 'left', on = 'date')
LO_TP_data = pd.merge(LO_TP_data,L005_TP, how = 'left', on = 'date')
LO_TP_data = pd.merge(LO_TP_data,L006_TP, how = 'left', on = 'date')
LO_TP_data = pd.merge(LO_TP_data,L007_TP, how = 'left', on = 'date')
LO_TP_data = pd.merge(LO_TP_data,L008_TP, how = 'left', on = 'date')
LO_TP_data = pd.merge(LO_TP_data,LZ40_TP, how = 'left', on = 'date')
LO_TP_data = LO_TP_data.loc[:,~LO_TP_data.columns.str.startswith('Unnamed')]
LO_TP_data['Mean_TP'] = LO_TP_data.mean(axis=1)
LO_TP_data = LO_TP_data.set_index(['date'])
LO_TP_data.index = pd.to_datetime(LO_TP_data.index, unit = 'ns')
LO_TP_Monthly = LO_TP_data.resample('M').mean()
LO_TP_Monthly.to_csv('./LO_TP_Monthly.csv')
# Interpolated TP Observations in Lake
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_TP_Inter = pd.read_csv('./water_quality_L001_PHOSPHATE, TOTAL AS P_Interpolated.csv')
L004_TP_Inter = pd.read_csv('./water_quality_L004_PHOSPHATE, TOTAL AS P_Interpolated.csv')
L005_TP_Inter = pd.read_csv('./water_quality_L005_PHOSPHATE, TOTAL AS P_Interpolated.csv')
L006_TP_Inter = pd.read_csv('./water_quality_L006_PHOSPHATE, TOTAL AS P_Interpolated.csv')
L007_TP_Inter = pd.read_csv('./water_quality_L007_PHOSPHATE, TOTAL AS P_Interpolated.csv')
L008_TP_Inter = pd.read_csv('./water_quality_L008_PHOSPHATE, TOTAL AS P_Interpolated.csv')
LZ40_TP_Inter = pd.read_csv('./water_quality_LZ40_PHOSPHATE, TOTAL AS P_Interpolated.csv')
LO_TP_data_Inter = pd.merge(L001_TP_Inter,L004_TP_Inter, how = 'left', on = 'date')
LO_TP_data_Inter = pd.merge(LO_TP_data_Inter,L005_TP_Inter, how = 'left', on = 'date')
LO_TP_data_Inter = pd.merge(LO_TP_data_Inter,L006_TP_Inter, how = 'left', on = 'date')
LO_TP_data_Inter = pd.merge(LO_TP_data_Inter,L007_TP_Inter, how = 'left', on = 'date')
LO_TP_data_Inter = pd.merge(LO_TP_data_Inter,L008_TP_Inter, how = 'left', on = 'date')
LO_TP_data_Inter = pd.merge(LO_TP_data_Inter,LZ40_TP_Inter, how = 'left', on = 'date')
LO_TP_data_Inter = LO_TP_data_Inter.loc[:,~LO_TP_data_Inter.columns.str.startswith('Unnamed')]
LO_TP_data_Inter['Mean_TP'] = LO_TP_data_Inter.mean(axis=1)
LO_TP_data_Inter = LO_TP_data_Inter.set_index(['date'])
LO_TP_data_Inter.index = pd.to_datetime(LO_TP_data_Inter.index, unit = 'ns')
LO_TP_Monthly_Inter = LO_TP_data_Inter.resample('M').mean()
Max = LO_TP_Monthly_Inter.max(axis=1)
Min = LO_TP_Monthly_Inter.min(axis=1)
LO_TP_Monthly_Inter['Max'] = Max.values
LO_TP_Monthly_Inter['Min'] = Min.values
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
LO_TP_Monthly_Inter.to_csv('./LO_TP_Monthly.csv')
# Interpolated OP Observations in Lake
# Create File (LO_Avg_OP_2008-2022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_OP_Inter = pd.read_csv('./water_quality_L001_PHOSPHATE, ORTHO AS P_Interpolated.csv')
L004_OP_Inter = pd.read_csv('./water_quality_L004_PHOSPHATE, ORTHO AS P_Interpolated.csv')
L005_OP_Inter = pd.read_csv('./water_quality_L005_PHOSPHATE, ORTHO AS P_Interpolated.csv')
L006_OP_Inter = pd.read_csv('./water_quality_L006_PHOSPHATE, ORTHO AS P_Interpolated.csv')
L007_OP_Inter = pd.read_csv('./water_quality_L007_PHOSPHATE, ORTHO AS P_Interpolated.csv')
L008_OP_Inter = pd.read_csv('./water_quality_L008_PHOSPHATE, ORTHO AS P_Interpolated.csv')
LZ40_OP_Inter = pd.read_csv('./water_quality_LZ40_PHOSPHATE, ORTHO AS P_Interpolated.csv')
LO_OP_data_Inter = pd.merge(L001_OP_Inter,L004_OP_Inter, how = 'left', on = 'date')
LO_OP_data_Inter = pd.merge(LO_OP_data_Inter,L005_OP_Inter, how = 'left', on = 'date')
LO_OP_data_Inter = pd.merge(LO_OP_data_Inter,L006_OP_Inter, how = 'left', on = 'date')
LO_OP_data_Inter = pd.merge(LO_OP_data_Inter,L007_OP_Inter, how = 'left', on = 'date')
LO_OP_data_Inter = pd.merge(LO_OP_data_Inter,L008_OP_Inter, how = 'left', on = 'date')
LO_OP_data_Inter = pd.merge(LO_OP_data_Inter,LZ40_OP_Inter, how = 'left', on = 'date')
LO_OP_data_Inter = LO_OP_data_Inter.loc[:,~LO_OP_data_Inter.columns.str.startswith('Unnamed')]
LO_OP_data_Inter['Mean_OP'] = LO_OP_data_Inter.mean(axis=1)
LO_OP_data_Inter = DF_Date_Range(LO_OP_data_Inter, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
LO_OP_data_Inter.to_csv('./LO_OP.csv')
# Interpolated NH4 Observations in Lake
#Create File (LO_Avg_NH4_2008-2022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_NH4_Inter = pd.read_csv('./water_quality_L001_AMMONIA-N_Interpolated.csv')
L004_NH4_Inter = pd.read_csv('./water_quality_L004_AMMONIA-N_Interpolated.csv')
L005_NH4_Inter = pd.read_csv('./water_quality_L005_AMMONIA-N_Interpolated.csv')
L006_NH4_Inter = pd.read_csv('./water_quality_L006_AMMONIA-N_Interpolated.csv')
L007_NH4_Inter = pd.read_csv('./water_quality_L007_AMMONIA-N_Interpolated.csv')
L008_NH4_Inter = pd.read_csv('./water_quality_L008_AMMONIA-N_Interpolated.csv')
LZ40_NH4_Inter = pd.read_csv('./water_quality_LZ40_AMMONIA-N_Interpolated.csv')
LO_NH4_data_Inter = pd.merge(L001_NH4_Inter,L004_NH4_Inter, how = 'left', on = 'date')
LO_NH4_data_Inter = pd.merge(LO_NH4_data_Inter,L005_NH4_Inter, how = 'left', on = 'date')
LO_NH4_data_Inter = pd.merge(LO_NH4_data_Inter,L006_NH4_Inter, how = 'left', on = 'date')
LO_NH4_data_Inter = pd.merge(LO_NH4_data_Inter,L007_NH4_Inter, how = 'left', on = 'date')
LO_NH4_data_Inter = pd.merge(LO_NH4_data_Inter,L008_NH4_Inter, how = 'left', on = 'date')
LO_NH4_data_Inter = pd.merge(LO_NH4_data_Inter,LZ40_NH4_Inter, how = 'left', on = 'date')
LO_NH4_data_Inter.to_csv('./LO_NH4_Inter.csv')
#Read clean LO_NH4 data
LO_NH4_Clean_Inter = pd.read_csv('./LO_NH4_Inter.csv')
LO_NH4_Clean_Inter['Mean_NH4'] = LO_NH4_Clean_Inter.mean(axis=1)
LO_NH4_Clean_Inter.to_csv('./LO_NH4_Clean_daily.csv')
LO_NH4_Clean_Inter = LO_NH4_Clean_Inter.set_index(['date'])
LO_NH4_Clean_Inter.index = pd.to_datetime(LO_NH4_Clean_Inter.index, unit = 'ns')
LO_NH4_Monthly_Inter = LO_NH4_Clean_Inter.resample('M').mean()
LO_NH4_Monthly_Inter.to_csv('./LO_NH4_Monthly_Inter.csv')
# Interpolated NO Observations in Lake
#Create File (LO_Avg_NO_2008-2022) and (LO_NO_Obs20082022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_NO_Inter = pd.read_csv('./water_quality_L001_NITRATE+NITRITE-N_Interpolated.csv')
L004_NO_Inter = pd.read_csv('./water_quality_L004_NITRATE+NITRITE-N_Interpolated.csv')
L005_NO_Inter = pd.read_csv('./water_quality_L005_NITRATE+NITRITE-N_Interpolated.csv')
L006_NO_Inter = pd.read_csv('./water_quality_L006_NITRATE+NITRITE-N_Interpolated.csv')
L007_NO_Inter = pd.read_csv('./water_quality_L007_NITRATE+NITRITE-N_Interpolated.csv')
L008_NO_Inter = pd.read_csv('./water_quality_L008_NITRATE+NITRITE-N_Interpolated.csv')
LZ40_NO_Inter = pd.read_csv('./water_quality_LZ40_NITRATE+NITRITE-N_Interpolated.csv')
LO_NO_data_Inter = pd.merge(L001_NO_Inter,L004_NO_Inter, how = 'left', on = 'date')
LO_NO_data_Inter = pd.merge(LO_NO_data_Inter,L005_NO_Inter, how = 'left', on = 'date')
LO_NO_data_Inter = pd.merge(LO_NO_data_Inter,L006_NO_Inter, how = 'left', on = 'date')
LO_NO_data_Inter = pd.merge(LO_NO_data_Inter,L007_NO_Inter, how = 'left', on = 'date')
LO_NO_data_Inter = pd.merge(LO_NO_data_Inter,L008_NO_Inter, how = 'left', on = 'date')
LO_NO_data_Inter = pd.merge(LO_NO_data_Inter,LZ40_NO_Inter, how = 'left', on = 'date')
LO_NO_data_Inter = LO_NO_data_Inter.loc[:,~LO_NO_data_Inter.columns.str.startswith('Unnamed')]
LO_NO_data_Inter['Mean_NO'] = LO_NO_data_Inter.mean(axis=1)
# LO_NO_data_Inter.to_csv('./LO_NO_Clean_daily.csv')
LO_NO_data_Inter = LO_NO_data_Inter.set_index(['date'])
LO_NO_data_Inter.index = pd.to_datetime(LO_NO_data_Inter.index, unit = 'ns')
LO_NO_Monthly_Inter = LO_NO_data_Inter.resample('M').mean()
NO_Max = LO_NO_Monthly_Inter.max(axis=1)
NO_Min = LO_NO_Monthly_Inter.min(axis=1)
LO_NO_Monthly_Inter['Max'] = NO_Max.values
LO_NO_Monthly_Inter['Min'] = NO_Min.values
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
LO_NO_Monthly_Inter.to_csv('./LO_NO_Monthly_Inter.csv')
#Create File (LO_DIN_2008-2022)
date_DIN = pd.date_range(start = '%s/%s/%s'%(St_M,St_D,St_Yr),end = '%s/%s/%s'%(En_M,En_D,En_Yr),freq = 'D')
LO_DIN = pd.DataFrame(date_DIN,columns=['date'])
LO_NH4_Clean_Inter = DF_Date_Range(LO_NH4_Clean_Inter, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
LO_NO_Clean_Inter = DF_Date_Range(LO_NO_Clean_Inter, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
LO_DIN['NH4'] = LO_NH4_Clean_Inter['Mean_NH4'].values
LO_DIN['NO'] = LO_NO_Clean_Inter['Mean_NO'].values
LO_DIN['DIN_mg/m3'] = LO_DIN[['NH4','NO']].sum(axis=1)*1000
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
LO_DIN.to_csv('./LO_DIN.csv')
# Interpolated DO Observations in Lake
#Create File (LO_Avg_DO_2008-2022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_DO_Inter = pd.read_csv('./water_quality_L001_DISSOLVED OXYGEN_Interpolated.csv')
L004_DO_Inter = pd.read_csv('./water_quality_L004_DISSOLVED OXYGEN_Interpolated.csv')
L005_DO_Inter = pd.read_csv('./water_quality_L005_DISSOLVED OXYGEN_Interpolated.csv')
L006_DO_Inter = pd.read_csv('./water_quality_L006_DISSOLVED OXYGEN_Interpolated.csv')
L007_DO_Inter = pd.read_csv('./water_quality_L007_DISSOLVED OXYGEN_Interpolated.csv')
L008_DO_Inter = pd.read_csv('./water_quality_L008_DISSOLVED OXYGEN_Interpolated.csv')
LZ40_DO_Inter = pd.read_csv('./water_quality_LZ40_DISSOLVED OXYGEN_Interpolated.csv')
LO_DO_data_Inter = pd.merge(L001_DO_Inter,L004_DO_Inter, how = 'left', on = 'date')
LO_DO_data_Inter = pd.merge(LO_DO_data_Inter,L005_DO_Inter, how = 'left', on = 'date')
LO_DO_data_Inter = pd.merge(LO_DO_data_Inter,L006_DO_Inter, how = 'left', on = 'date')
LO_DO_data_Inter = pd.merge(LO_DO_data_Inter,L007_DO_Inter, how = 'left', on = 'date')
LO_DO_data_Inter = pd.merge(LO_DO_data_Inter,L008_DO_Inter, how = 'left', on = 'date')
LO_DO_data_Inter = pd.merge(LO_DO_data_Inter,LZ40_DO_Inter, how = 'left', on = 'date')
LO_DO_data_Inter = LO_DO_data_Inter.loc[:,~LO_DO_data_Inter.columns.str.startswith('Unnamed')]
#Read clean LO_DO data
LO_DO_data_Inter['Mean_DO'] = LO_DO_data_Inter.mean(axis=1)
LO_DO_data_Inter = DF_Date_Range(LO_DO_data_Inter, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
LO_DO_data_Inter.to_csv('./LO_DO_Clean_daily.csv')
LO_DO_data_Inter = LO_DO_data_Inter.set_index(['date'])
LO_DO_data_Inter.index = pd.to_datetime(LO_DO_data_Inter.index, unit = 'ns')
LO_DO_Monthly_Inter = LO_DO_data_Inter.resample('M').mean()
LO_DO_Monthly_Inter.to_csv('./LO_DO_Monthly_Inter.csv')
#RADT Data in Lake Okeechobee
#Create File (LO_RADT_2008-2022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_Weather_Data/Weather_data_2023'
os.chdir('%s'%Working_dir)
L001_RADT = pd.read_csv('./L001_RADT.csv')
L005_RADT = pd.read_csv('./L005_RADT.csv')
L006_RADT = pd.read_csv('./L006_RADT.csv')
LZ40_RADT = pd.read_csv('./LZ40_RADT.csv')
LO_RADT_data = pd.merge(L006_RADT,L001_RADT, how = 'left', on = 'date')
LO_RADT_data = pd.merge(LO_RADT_data,L005_RADT, how = 'left', on = 'date')
LO_RADT_data = pd.merge(LO_RADT_data,LZ40_RADT, how = 'left', on = 'date')
LO_RADT_data = LO_RADT_data.loc[:,~LO_RADT_data.columns.str.startswith('Unnamed')]
LO_RADT_data['Mean_RADT'] = LO_RADT_data.mean(axis=1)
LO_RADT_data = DF_Date_Range(LO_RADT_data, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
LO_RADT_data.to_csv('./LO_RADT_data_20082022.csv')
#RADP Data in Lake Okeechobee
#Create File (LO_RADP_2008-2022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_Weather_Data/Weather_data_2023'
os.chdir('%s'%Working_dir)
L001_RADP = pd.read_csv('./L001_RADP.csv')
L005_RADP = pd.read_csv('./L005_RADP.csv')
L006_RADP = pd.read_csv('./L006_RADP.csv')
LZ40_RADP = pd.read_csv('./LZ40_RADP.csv')
LO_RADP_data = pd.merge(L006_RADP,L001_RADP, how = 'left', on = 'date')
LO_RADP_data = pd.merge(LO_RADP_data,L005_RADP, how = 'left', on = 'date')
LO_RADP_data = pd.merge(LO_RADP_data,LZ40_RADP, how = 'left', on = 'date')
LO_RADP_data = LO_RADP_data.loc[:,~LO_RADP_data.columns.str.startswith('Unnamed')]
LO_RADP_data['Mean_RADP'] = LO_RADP_data.mean(axis=1)
LO_RADP_data = DF_Date_Range(LO_RADP_data, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
LO_RADP_data.to_csv('./LO_RADP_data_20082022.csv')
# Interpolated Chla Corrected Observations in Lake
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_Chla_Inter = pd.read_csv('./water_quality_L001_CHLOROPHYLL-A, CORRECTED_Interpolated.csv')
L004_Chla_Inter = pd.read_csv('./water_quality_L004_CHLOROPHYLL-A, CORRECTED_Interpolated.csv')
L005_Chla_Inter = pd.read_csv('./water_quality_L005_CHLOROPHYLL-A, CORRECTED_Interpolated.csv')
L006_Chla_Inter = pd.read_csv('./water_quality_L006_CHLOROPHYLL-A, CORRECTED_Interpolated.csv')
L007_Chla_Inter = pd.read_csv('./water_quality_L007_CHLOROPHYLL-A, CORRECTED_Interpolated.csv')
L008_Chla_Inter = pd.read_csv('./water_quality_L008_CHLOROPHYLL-A, CORRECTED_Interpolated.csv')
LZ40_Chla_Inter = pd.read_csv('./water_quality_LZ40_CHLOROPHYLL-A, CORRECTED_Interpolated.csv')
LO_Chla_data_Inter = pd.merge(L001_Chla_Inter,L004_Chla_Inter, how = 'left', on = 'date')
LO_Chla_data_Inter = pd.merge(LO_Chla_data_Inter,L005_Chla_Inter, how = 'left', on = 'date')
LO_Chla_data_Inter = pd.merge(LO_Chla_data_Inter,L006_Chla_Inter, how = 'left', on = 'date')
LO_Chla_data_Inter = pd.merge(LO_Chla_data_Inter,L007_Chla_Inter, how = 'left', on = 'date')
LO_Chla_data_Inter = pd.merge(LO_Chla_data_Inter,L008_Chla_Inter, how = 'left', on = 'date')
LO_Chla_data_Inter = pd.merge(LO_Chla_data_Inter,LZ40_Chla_Inter, how = 'left', on = 'date')
LO_Chla_data_Inter = LO_Chla_data_Inter.loc[:,~LO_Chla_data_Inter.columns.str.startswith('Unnamed')]
#Read clean LO_Chla data
LO_Chla_data_Inter['Mean_Chla'] = LO_Chla_data_Inter.mean(axis=1)
LO_Chla_data_Inter.to_csv('./LO_Chla_Clean_daily.csv')
#Monthly
LO_Chla_data_Inter = LO_Chla_data_Inter.set_index(['date'])
LO_Chla_data_Inter.index = pd.to_datetime(LO_Chla_data_Inter.index, unit = 'ns')
LO_Chla_Monthly_Inter = LO_Chla_data_Inter.resample('M').mean()
LO_Chla_Monthly_Inter.to_csv('./LO_Chla_Monthly_Inter.csv')
# Interpolated Chla LC Observations in Lake
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/LO_WQ_June2023'
os.chdir('%s'%Working_dir)
L001_Chla_LC_Inter = pd.read_csv('./water_quality_L001_CHLOROPHYLL-A(LC)_Interpolated.csv')
L004_Chla_LC_Inter = pd.read_csv('./water_quality_L004_CHLOROPHYLL-A(LC)_Interpolated.csv')
L005_Chla_LC_Inter = pd.read_csv('./water_quality_L005_CHLOROPHYLL-A(LC)_Interpolated.csv')
L006_Chla_LC_Inter = pd.read_csv('./water_quality_L006_CHLOROPHYLL-A(LC)_Interpolated.csv')
L007_Chla_LC_Inter = pd.read_csv('./water_quality_L007_CHLOROPHYLL-A(LC)_Interpolated.csv')
L008_Chla_LC_Inter = pd.read_csv('./water_quality_L008_CHLOROPHYLL-A(LC)_Interpolated.csv')
LZ40_Chla_LC_Inter = pd.read_csv('./water_quality_LZ40_CHLOROPHYLL-A(LC)_Interpolated.csv')
LO_Chla_LC_data_Inter = pd.merge(L001_Chla_LC_Inter,L004_Chla_LC_Inter, how = 'left', on = 'date')
LO_Chla_LC_data_Inter = pd.merge(LO_Chla_LC_data_Inter,L005_Chla_LC_Inter, how = 'left', on = 'date')
LO_Chla_LC_data_Inter = pd.merge(LO_Chla_LC_data_Inter,L006_Chla_LC_Inter, how = 'left', on = 'date')
LO_Chla_LC_data_Inter = pd.merge(LO_Chla_LC_data_Inter,L007_Chla_LC_Inter, how = 'left', on = 'date')
LO_Chla_LC_data_Inter = pd.merge(LO_Chla_LC_data_Inter,L008_Chla_LC_Inter, how = 'left', on = 'date')
LO_Chla_LC_data_Inter = pd.merge(LO_Chla_LC_data_Inter,LZ40_Chla_LC_Inter, how = 'left', on = 'date')
LO_Chla_LC_data_Inter = LO_Chla_LC_data_Inter.loc[:,~LO_Chla_LC_data_Inter.columns.str.startswith('Unnamed')]
#Read clean LO_Chla_LC data
LO_Chla_LC_data_Inter['Mean_Chla_LC'] = LO_Chla_LC_data_Inter.mean(axis=1)
LO_Chla_LC_data_Inter.to_csv('./LO_Chla_LC_Clean_daily.csv')
#Monthly
LO_Chla_LC_data_Inter = LO_Chla_LC_data_Inter.set_index(['date'])
LO_Chla_LC_data_Inter.index = pd.to_datetime(LO_Chla_LC_data_Inter.index, unit = 'ns')
LO_Chla_LC_Monthly_Inter = LO_Chla_LC_data_Inter.resample('M').mean()
LO_Chla_LC_Monthly_Inter.to_csv('./LO_Chla_LC_Monthly_Inter.csv')
#Merge the Chla Data
#Create Files LO_Avg_Chla_2008-2022 and Obs_Chla_LO_2008-2022
# Chla_date = pd.date_range(start = LO_Chla_data_Inter['date'].iloc[0],end =LO_Chla_LC_data_Inter['date'].iloc[-1],freq = 'D')
LO_Chla_data_Inter = DF_Date_Range(LO_Chla_data_Inter, St_Yr, St_M, St_D, 2010, 10, 19)
LO_Chla_df = pd.DataFrame(LO_Chla_data_Inter['date'],columns=['date'])
LO_Chla_df['Chla'] = LO_Chla_data_Inter['Mean_Chla']
LO_Chla_LC_df = pd.DataFrame(LO_Chla_LC_data_Inter['date'],columns=['date'])
LO_Chla_LC_df['Chla'] = LO_Chla_LC_data_Inter['Mean_Chla_LC']
LO_Chla_Merge = pd.concat([LO_Chla_df, LO_Chla_LC_df]).reset_index(drop= True)
LO_Chla_Merge.to_csv('./LO_Merged_Chla.csv')
LO_Chla_Merge = LO_Chla_Merge.set_index(['date'])
LO_Chla_Merge.index = pd.to_datetime(LO_Chla_Merge.index, unit = 'ns')
LO_Chla_Merge_Monthly_Inter = LO_Chla_Merge.resample('M').mean()
LO_Chla_Merge_Monthly_Inter.to_csv('./LO_Chla_Monthly_Inter.csv')
#NO Loads
#Create File (Daily_NOx_External_Loads_2008-2022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/WQ_Data_May2023'
os.chdir('%s'%Working_dir)
S65_NO = pd.read_csv('./S65E_NO_Interpolated.csv')
S71_NO = pd.read_csv('./S71_NO_Interpolated.csv')
S72_NO = pd.read_csv('./S72_NO_Interpolated.csv')
S84_NO = pd.read_csv('./S84_NO_Interpolated.csv')
S127_NO = pd.read_csv('./S127_NO_Interpolated.csv')
S133_NO = pd.read_csv('./S133_NO_Interpolated.csv')
S135_NO = pd.read_csv('./S135_NO_Interpolated.csv')
S154_NO = pd.read_csv('./S154_NO_Interpolated.csv')
S191_NO = pd.read_csv('./S191_NO_Interpolated.csv')
S308_NO = pd.read_csv('./S308C_NO_Interpolated.csv')
FISHP_NO = pd.read_csv('./FECSR78_NO_Interpolated.csv')
L8_NO = pd.read_csv('./CULV10A_NO_Interpolated.csv')
S4_NO = pd.read_csv('./S4_NO_Interpolated.csv')
NO_names = ['S65_NO','S71_NO','S72_NO','S84_NO','S127_NO','S133_NO','S135_NO','S154_NO','S191_NO','S308_NO','FISHP_NO','L8_NO','S4_NO']
NO_list = {'S65_NO':S65_NO,'S71_NO':S71_NO,'S72_NO':S72_NO,'S84_NO':S84_NO,'S127_NO':S127_NO,'S133_NO':S133_NO,'S135_NO':S135_NO,
'S154_NO':S154_NO,'S191_NO':S191_NO,'S308_NO':S308_NO,'FISHP_NO':FISHP_NO,'L8_NO':L8_NO,'S4_NO':S4_NO}
date_NO = pd.date_range(start = '1/1/2008',end ='12/31/2022',freq = 'D')
NO_df = pd.DataFrame(date_NO, columns=['date'])
for i in range(len(NO_names)):
y = DF_Date_Range(NO_list[NO_names[i]], St_Yr, St_M, St_D, En_Yr, En_M, En_D)
NO_df['%s'%NO_names[i]] = y.iloc[:,-1:].values
Flow_df = DF_Date_Range(Flow_df, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
#Determine NO Loads
NO_Loads_In = pd.DataFrame(date_NO, columns=['date'])
NO_Loads_In['S65_NO_Ld'] = Flow_df['S65_Q'].values * NO_df['S65_NO'].values * 1000
NO_Loads_In['S71_NO_Ld'] = Flow_df['S71_Q'].values * NO_df['S71_NO'].values * 1000
NO_Loads_In['S72_NO_Ld'] = Flow_df['S72_Q'].values * NO_df['S72_NO'].values * 1000
NO_Loads_In['S84_NO_Ld'] = Flow_df['S84_Q'].values * NO_df['S84_NO'].values * 1000
NO_Loads_In['S127_NO_Ld'] = Flow_df['S127_In'].values * NO_df['S127_NO'].values * 1000
NO_Loads_In['S133_NO_Ld'] = Flow_df['S133_P_Q'].values * NO_df['S133_NO'].values * 1000
NO_Loads_In['S135_NO_Ld'] = Flow_df['S135_In'].values * NO_df['S135_NO'].values * 1000
NO_Loads_In['S154_NO_Ld'] = Flow_df['S154_Q'].values * NO_df['S154_NO'].values * 1000
NO_Loads_In['S191_NO_Ld'] = Flow_df['S191_Q'].values * NO_df['S191_NO'].values * 1000
NO_Loads_In['S308_NO_Ld'] = Flow_df['S308_In'].values * NO_df['S308_NO'].values * 1000
NO_Loads_In['FISHP_NO_Ld'] = Flow_df['FISHP_Q'].values * NO_df['FISHP_NO'].values * 1000
NO_Loads_In['L8_NO_Ld'] = Flow_df['L8_In'].values * NO_df['L8_NO'].values * 1000
NO_Loads_In['S4_NO_Ld'] = Flow_df['S4_P_Q'].values * NO_df['S4_NO'].values * 1000
#Calculate the total External Loads to Lake Okeechobee
NO_Loads_In['External_NO_Ld_mg'] = NO_Loads_In.sum(axis=1)
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
NO_Loads_In.to_csv('./LO_External_Loadings_NO.csv')
#Determine Chla Loads
#Create File (Chla_Loads_In_2008-2022)
Working_dir = 'C:/Work/Research/Data Analysis/Lake_O_water_Q_data/WQ_Data_May2023'
os.chdir('%s'%Working_dir)
S65E_Chla = pd.read_csv('./S65E_Chla_Merged.csv')
S65E_Chla = DF_Date_Range(S65E_Chla, St_Yr, St_M, St_D, En_Yr, En_M, En_D)
Chla_Loads_In = pd.DataFrame(date_NO, columns=['date'])
Chla_Loads_In['Chla_Loads'] = Flow_df['Inflows'].values * S65E_Chla['Data'].values
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
Chla_Loads_In.to_csv('./Chla_Loads_In.csv')
#Write Data into csv files
Working_dir = 'C:/Work/Research/LOONE/Final_Data_20082023'
os.chdir('%s'%Working_dir)
#write Avg Stage (ft,m) Storage (acft, m3) SA (acres) to csv
LO_Stg_Sto_SA_df.to_csv('./Average_LO_Storage_3MLagLORS20082023.csv')
#Write S65 TP concentrations (mg/L)
S65_total_TP.to_csv('./S65_TP_3MLag.csv')
# TP External Loads 3 Months Lag (mg)
TP_Loads_In_3MLag_df.to_csv('./LO_External_Loadings_3MLag_LORS20082023.csv')
# Flow dataframe including Inflows, NetFlows, and Outflows (all in m3/day)
Flow_df.to_csv('./Flow_df_3MLag.csv')
#Inflows (cmd)
LO_Inflows_BK.to_csv('./LO_Inflows_BK_LORS20082023.csv')
#Outflows (cmd)
Outflows_consd.to_csv('./Outflows_consd_LORS20082023.csv')
# NetFlows (cmd)
Netflows.to_csv('./Netflows_acft_LORS20082023.csv')
#Total flows to WCAs (acft)
TotalQWCA.to_csv('./TotalQWCA_Obs_LORS20082023.csv')
# INDUST Outflows (cmd)
INDUST_Outflows.to_csv('./INDUST_Outflows.csv')
| osamatarabih/LOONE | Extra Scripts/LOONE_DATA_PREP.py | LOONE_DATA_PREP.py | py | 47,225 | python | en | code | 3 | github-code | 13 |
3019702763 | #!/usr/bin/env python
#
import time
import zmq
import sys
# initialie request argument
i1 = 4
i2 = 7
print(sys.argv, len(sys.argv))
if len(sys.argv) > 1:
i1 = int(sys.argv[1])
if len(sys.argv) > 2:
i2 = int(sys.argv[2])
request = {'i1': i1, 'i2': i2}
print("Connecting to 5555")
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
print("Sending: {}".format(request))
socket.send_json(request)
response = socket.recv_json()
print("Received: {}".format(response))
print("Answer is {}".format(response['i3']))
| jsk-lecture/software2-Kota-0226 | 0627/zmq/add_two_client.py | add_two_client.py | py | 565 | python | en | code | 0 | github-code | 13 |
24151035424 | import sqlite3
from typing import Any, Optional, List
DATA: List[dict] = [
{'id': 0, 'title': 'A Byte of Python', 'author': 'Swaroop C. H.'},
{'id': 1, 'title': 'Moby-Dick; or, The Whale', 'author': 'Herman Melville'},
{'id': 3, 'title': 'War and Peace', 'author': 'Leo Tolstoy'},
]
class Book:
def __init__(self, id: Optional[int], title: str, author: str, count: int) -> None:
self.id: Optional[int] = id
self.title: str = title
self.author: str = author
self.count: int = count
def __getitem__(self, item: str) -> Any:
return getattr(self, item)
def init_db(initial_records: List[dict]) -> None:
with sqlite3.connect('table_books.db') as conn:
cursor: sqlite3.Cursor = conn.cursor()
cursor.execute(
"""
SELECT name FROM sqlite_master
WHERE type='table' AND name='table_books';
"""
)
exists: Optional[tuple[str,]] = cursor.fetchone()
# now in `exist` we have tuple with table name if table really exists in DB
if not exists:
cursor.executescript(
"""
CREATE TABLE `table_books` (
id INTEGER PRIMARY KEY AUTOINCREMENT,
title TEXT,
author TEXT,
count INTEGER default 0
)
"""
)
cursor.executemany(
"""
INSERT INTO `table_books`
(title, author) VALUES (?, ?)
""",
[
(item['title'], item['author'])
for item in initial_records
]
)
def get_all_books() -> List[Book]:
with sqlite3.connect('table_books.db') as conn:
cursor: sqlite3.Cursor = conn.cursor()
cursor.execute(
"""
SELECT * from `table_books`
"""
)
return [Book(*row) for row in cursor.fetchall()]
def add_book(book: Book) -> None:
with sqlite3.connect('table_books.db') as conn:
cursor: sqlite3.Cursor = conn.cursor()
query = f"""
INSERT INTO table_books (title, author) VALUES
(?, ?)
"""
cursor.execute(query, (book.title, book.author))
def get_books(author: str) -> List[Book]:
with sqlite3.connect('table_books.db') as conn:
cursor: sqlite3.Cursor = conn.cursor()
cursor.execute(
f"""
SELECT * from `table_books` WHERE author = '{author}'
"""
)
return [Book(*row) for row in cursor.fetchall()]
def get_book_by_id(id :int) -> Book:
with sqlite3.connect('table_books.db') as conn:
cursor: sqlite3.Cursor = conn.cursor()
cursor.execute(
f"""
SELECT * from `table_books` WHERE id = {id}
"""
)
return Book(*cursor.fetchone())
def update_count_many_books(books: List[Book]) -> None:
with sqlite3.connect('table_books.db') as conn:
cursor: sqlite3.Cursor = conn.cursor()
parameters = []
query = """
UPDATE table_books SET count= :count
WHERE id= :id
"""
for book in books:
parameters.append({
"id": book.id,
"count": book.count + 1
})
cursor.executemany(query, parameters)
def update_count_book(book: Book) -> None:
with sqlite3.connect('table_books.db') as conn:
cursor: sqlite3.Cursor = conn.cursor()
query = """
UPDATE table_books SET count= :count
WHERE id= :id
"""
cursor.execute(query, {"id": book.id, "count": book.count})
| ilnrzakirov/Python_advanced | module_14_mvc/homework/models.py | models.py | py | 3,786 | python | en | code | 0 | github-code | 13 |
22977313105 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 8 21:10:24 2020
@author: mitta
"""
import pulp as p
import time
from datetime import timedelta
x = []
month = [0,1,2,3,4]
m = 3
demand = [0,50,40,70,0]
D = dict(zip(month,demand))
E=5
Hcost=32
Fcost=40
S=200
C=8
OTC=3
OTprice=35
W=6
w = p.LpVariable.dicts('w', month, cat = 'Integer', lowBound = 0)
h = p.LpVariable.dicts('h', month, cat = 'Integer', lowBound = 0)
f = p.LpVariable.dicts('f', month, cat = 'Integer', lowBound = 0)
s = p.LpVariable.dicts('s', month, cat = 'Integer', lowBound = 0)
o = p.LpVariable.dicts('o', month, cat = 'Integer', lowBound = 0)
x = p.LpVariable.dicts('x', month, cat = 'Integer', lowBound = 0)
model = p.LpProblem("Porduction Planning", p.LpMinimize)
model += S*sum(w[t] for t in month[1:m+1]) + Fcost*sum(f[t] for t in month[1:2+m]) + Hcost*sum(h[t] for t in month[1:2+m]) + W*sum(s[t] for t in month[1:m+1])+OTprice*sum(o[t] for t in month[1:m+1])
model += w[0] == E
model += s[0] == 0
model += w[m+1] == E
for t in month[1:m+1]:
model += x[t] == C*w[t] + o[t]
for t in month[1:m+2]:
model += w[t] == w[t-1] + h[t]-f[t]
for t in month[1:m+1]:
model += s[t] == s[t-1] + x[t]-demand[t]
for t in month[0:m+2]:
model += o[t] <= OTC
start_time = time.monotonic()
model.solve()
end_time = time.monotonic()
#print("Status:",p.LpStatus[model.status])
print("MinCost: ",p.value(model.objective))
print("Duration:",timedelta(seconds=end_time - start_time)) | divyansh99991/LPDAAHW5 | untitled1.py | untitled1.py | py | 1,540 | python | en | code | 0 | github-code | 13 |
18697918450 | from setuptools import setup
package_name = 'wall_following'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='nan',
maintainer_email='to@do.com',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'sing_vehicle_mode = wall_following.single_vehicle:main',
'head_to_head_mode = wall_following.head_to_head:main'
],
},
)
| cosynus-lix/f1tenth_quickstart_ros2 | src/wall_following/setup.py | setup.py | py | 749 | python | en | code | 1 | github-code | 13 |
70294748178 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
import sweetify
from environment.env import DATA_HORA_ZONA, DATA_ANO
from aluno.forms import Aluno_Form, Matricula_Form, Reclamacao_Form, Confirmar_Matricula_Form
from pessoa.forms import Pessoa_Form
from config.views import prepara_foto
from aluno.models import Reclamacao
# Create your views here.
def listar_reclamacao(request):
lista = Reclamacao.objects.select_related('aluno').all()
context = {'lista': lista}
return render(request, 'aluno/listar_reclamacao.html', context)
def efectuar_reclamacao(request):
form = Reclamacao_Form(request.POST or None)
if request.method == 'POST':
if form.is_valid():
recl = form.save(commit=False)
recl.aluno_id = form.cleaned_data['aluno']
recl.save()
sweetify.success(request, 'Reclamação feita com sucesso!...', button='Ok', timer='3100', persistent="Close")
form = Reclamacao_Form()
context = {'form': form}
return render(request, 'aluno/efecturReclamacao.html', context)
"""
[FUNÇÃO QUE VAI FAZER A CONFIRMAÇÃO DE MATRICULA DO ESTUDANTE]
"""
def confirmacao_matricula(request):
form = Confirmar_Matricula_Form(request.POST or None)
if request.method == 'POST':
if form.is_valid():
recl = form.save(commit=False)
recl.aluno_id = form.cleaned_data['aluno']
recl.tremestre_id = form.cleaned_data['tremestre']
recl.save()
sweetify.success(request, 'confirmação feita com sucesso!...', button='Ok', timer='3100', persistent="Close")
context = {'pessoa': form.instance}
return render (request, 'aluno/reciboInscricao.html', context)
context = {'form': form}
return render(request, 'aluno/confirmacao_matricula.html', context)
def adicionarNovoCadastro_aluno(request):
form = Pessoa_Form(request.POST or None)
form2 = Aluno_Form(request.POST or None)
form3 = Matricula_Form(request.POST or None)
if request.method == 'POST':
form = Pessoa_Form(request.POST, request.FILES or None)
if form.is_valid() and form2.is_valid():
curso = request.POST.get('curso')
pessoa = form.save(commit=False)
pessoa.municipio_id = form.cleaned_data.get('municipio')
if len(request.POST['foto']) > 0:
pessoa.foto = prepara_foto(request)
pessoa.save()
else:
pessoa.foto ='user.jpg'
pessoa.save()
dados = form2.save(commit=False)
dados.pessoa_id = pessoa.id
dados.curso_id = curso
dados.save()
resp = form3.save(commit=False)
resp.aluno_id = dados.id
resp.save()
sweetify.success(request, 'Dados registado com sucesso!....', button='Ok', timer='3100', persistent="Close")
context = {'pessoa': form.instance, 'aluno': form2.instance, 'matricula': form3.instance}
return render (request, 'aluno/reciboInscricao.html', context)
context = {'form':form,'form2': form2,'form3':form3}
return render (request, 'aluno/adicionarNovoCadastro-aluno.html', context) | ismaely/kanguito-academic-system | aluno/views.py | views.py | py | 3,291 | python | pt | code | 1 | github-code | 13 |
22642472664 | import codecs
import datetime as dt
import pickle
import matplotlib.pyplot as plt
#For tokenizing sentences
import nltk
import numpy as np
import pandas as pd
from tqdm import tqdm_notebook as tqdm
from tone_count import *
nltk.download('punkt')
plt.style.use('seaborn-whitegrid')
jpn = 'DB/rate_jpn'
infile = open(jpn,'rb')
jpn = pickle.load(infile)
jpn.rename(index={'2009-15-29':'2009-12-29'},inplace=True)
jpn.head(15)
lm = 'E:\\GitRepo\\CB speeches\\data\\list_sent'
infile = open(lm,'rb')
lmdict = pickle.load(infile)
neg = 'E:\\GitRepo\\CB speeches\\data\\negate'
infile = open(neg,'rb')
negate = pickle.load(infile)
print(len(jpn))
## Elapse time
import time
start = time.time()
temp = [tone_count_with_negation_check(lmdict,x) for x in jpn.text]
temp = pd.DataFrame(temp)
end = time.time()
print(end - start)
jpn['wordcount'] = temp.iloc[:,0].values
jpn['NPositiveWords'] = temp.iloc[:,1].values
jpn['NNegativeWords'] = temp.iloc[:,2].values
#Sentiment Score normalized by the number of words
jpn['sentiment'] = (jpn['NPositiveWords'] - jpn['NNegativeWords']) / jpn['wordcount'] * 100
jpn['Poswords'] = temp.iloc[:,3].values
jpn['Negwords'] = temp.iloc[:,4].values
temp.head()
jpn.head()
pl = pd.DataFrame()
pl['date'] = pd.to_datetime(jpn.index.values,format='%Y-%m-%d')
pl["b"] = pl['date'].apply(lambda x: x.strftime('%Y-%m'))
print(pl["b"])
jpn = pd.merge(jpn, pl, left_on='Index', right_index=True)
jpn.head()
jpn.info()
##
## Net Sentimen analysis
import matplotlib.dates as mdates
NetSentiment = jpn['NPositiveWords'] - jpn['NNegativeWords']
fig = plt.figure(figsize=(20,10))
ax = plt.subplot()
plt.plot(jpn.date, jpn['NPositiveWords'], c='green', linewidth= 1.0)
plt.plot(jpn.date, jpn['NNegativeWords']*-1, c='red', linewidth=1.0)
plt.plot(jpn.date, NetSentiment, c='grey', linewidth=1.0)
plt.title('The number of positive/negative words in statement: European Central Bank', fontsize=14)
plt.legend(['Positive Words', 'Negative Words', 'Net Sentiment'], prop={'size': 8}, loc=1)
ax.fill_between(jpn.date, NetSentiment, where=(NetSentiment > 0), color='green', alpha=0.3, interpolate=True)
ax.fill_between(jpn.date, NetSentiment, where=(NetSentiment <= 0), color='red', alpha=0.3, interpolate=True)
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# Minor ticks every month.
fmt_month = mdates.MonthLocator()
ax.xaxis.set_minor_locator(fmt_month)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
datemin = np.datetime64(jpn.date[0], 'Y')
datemax = np.datetime64(jpn.date[-1], 'Y') + np.timedelta64(1, 'Y')
# plt.xticks(range(len(pl.b)), pl.b, rotation = 'vertical',fontsize=8)
ax.set_xlim(datemin, datemax)
ax.grid(True)
plt.show()
fig.savefig('E:\\GitRepo\\CB speeches\\JPN\\num.png')
# Normalize data
NPositiveWordsNorm = jpn['NPositiveWords'] / jpn['wordcount'] * np.mean(jpn['wordcount'])
NNegativeWordsNorm = jpn['NNegativeWords'] / jpn['wordcount'] * np.mean(jpn['wordcount'])
NetSentimentNorm = (NPositiveWordsNorm - NNegativeWordsNorm)
fig, ax = plt.subplots(figsize=(15,7))
ax.plot(jpn.date, NPositiveWordsNorm, c='green', linewidth= 1.0)
plt.plot(jpn.date, NNegativeWordsNorm, c='red', linewidth=1.0)
plt.title('Counts normalized by the number of words', fontsize=16)
plt.legend(['Count of Positive Words', 'Count of Negative Words'],
prop={'size': 12}, loc = 1)
# format the ticks round to nearest years.
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
# format the coords message box
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# Minor ticks every month.
fmt_month = mdates.MonthLocator()
ax.xaxis.set_minor_locator(fmt_month)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
datemin = np.datetime64(jpn.date[0], 'Y')
datemax = np.datetime64(jpn.date[-1], 'Y') + np.timedelta64(1, 'Y')
# plt.xticks(range(0,len(pl.b),6), pl.b, rotation = 45,fontsize=8)
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
plt.show()
fig.savefig('E:\\GitRepo\\CB speeches\\JPN\\norm.png')
## Loading interest rates DB
jpnrate = pd.read_excel(r'DB/rate.xlsx')
# jpnrate['Date'] = pd.to_datetime(jpnrate.date.values,format='%Y-%m-%d')
jpnrate.set_index(['date'])
jpnrate.fillna(method='ffill', inplace=True)
jpnrate.info()
selected_columns = jpnrate[["date","rate"]]
rate_df = selected_columns.copy()
rate_df.rename(columns={"rate": "Rate"}, inplace=True)
datetime_series = pd.to_datetime(rate_df['date'])
datetime_index = pd.DatetimeIndex(datetime_series.values)
rate_df = rate_df.set_index(datetime_index)
print(rate_df.index)
fig, ax = plt.subplots(figsize=(15,7))
plt.title('Official interest rate: Japan', fontsize=16)
ax.plot(rate_df.date, rate_df['Rate'].values, c = 'green', linewidth= 1.0)
# format the ticks round to nearest years.
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
# format the coords message box
# format the ticks
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# Minor ticks every month.
fmt_month = mdates.MonthLocator()
ax.xaxis.set_minor_locator(fmt_month)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))
datemin = np.datetime64(jpn.date[0], 'Y')
datemax = np.datetime64(jpn.date[-1], 'Y') + np.timedelta64(1, 'Y')
# plt.xticks(range(0,len(pl.b),6), pl.b, rotation = 45,fontsize=8)
ax.set_xlim(datemin, datemax)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
plt.show()
fig.savefig('E:\\GitRepo\\CB speeches\\JPN\\rate.png')
rt = rate_df['Rate'].resample('D').ffill()
rt= rt.to_frame()
## Adding interest rate decision
jpn['RateDecision'] = None
jpn['Rate'] = None
for i in range(len(jpn)):
for j in range(len(rt)):
if jpn.date[i] == rt.index.values[j]:
jpn['Rate'][i] = float(rt['Rate'][j+1])
jpn.tail(15)
# #checking for NAS, possible due to spare days
jpn[jpn['Rate'].isna()]
jpn['Rate'].fillna(method='bfill', inplace=True)
for i in range(len(jpn)-1):
if jpn['Rate'][i] == jpn['Rate'][i+1]:
jpn['RateDecision'][i] = 0
elif jpn['Rate'][i] < jpn['Rate'][i+1]:
jpn['RateDecision'][i] = 1
elif jpn['Rate'][i] > jpn['Rate'][i+1]:
jpn['RateDecision'][i] = -1
jpn[jpn['RateDecision'].isna()]
jpn['RateDecision'].fillna(method='ffill', inplace=True)
jpn.tail(15)
## saving the pickle
jpn_pickle = 'DB/jpn_f'
jpn_fo = open(jpn_pickle,'wb')
pickle.dump(jpn, jpn_fo)
jpn_fo.close()
jpn.to_excel("E:\\GitRepo\\CB speeches\\data\\sent_jpn.xlsx", engine='xlsxwriter')
rate_des_pickle = 'DB/rate_jpn'
rate_des_o = open(rate_des_pickle,'wb')
pickle.dump(jpn, rate_des_o)
rate_des_o.close()
#Speaker window
Fukui = np.logical_and(jpn.index > '2003-03-20', jpn.index < '2008-03-19')
Shirakawa = np.logical_and(jpn.index > '2008-04-09', jpn.index < '2013-03-19')
Kuroda = np.logical_and(jpn.index > '2013-03-20', jpn.index < '2023-04-08')
Speaker = np.logical_or.reduce((Fukui, Kuroda))
# Moving Average
Window = round(0.025 * len(jpn))
CompToMA = NetSentimentNorm.rolling(Window).mean()
cmin, cmax = None, None
if CompToMA.min() < NetSentimentNorm.min():
cmin = CompToMA.min()
else:
cmin = NetSentimentNorm.min()
if CompToMA.max() > NetSentimentNorm.max():
cmax = CompToMA.max()
else:
cmax = NetSentimentNorm.max()
# Final Plotting Data
fig, ax = plt.subplots(figsize=(15,7))
plt.title('Sentiment analysis evolution', fontsize=16)
ax.scatter(jpn.date, jpn['Rate']*180, c = 'blue', alpha = 0.5)
ax.plot(jpn.date, CompToMA, c = 'red', linewidth= 2.0)
ax.plot(jpn.date, NetSentimentNorm, c = 'green', linewidth= 1, alpha = 0.5)
ax.legend(['Japan Rate', str(str(Window) + ' statements moving average'),
'Net sentiment of individual statements'], prop={'size': 14}, loc = 1)
import datetime
# Format X-axis
import matplotlib.dates as mdates
years = mdates.YearLocator() # every year
months = mdates.MonthLocator() # every month
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(months)
# Set X-axis and Y-axis range
datemin = np.datetime64(jpn.date[0], 'Y')
datemax = np.datetime64(jpn.date[-1], 'Y') + np.timedelta64(1, 'Y')
ax.set_xlim(datemin, datemax)
# plt.xticks(range(0,len(pl.b),6), pl.b, rotation = 45,fontsize=8)
ax.set_ylim(cmin+5,cmax+5)
# format the coords message box
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.grid(True)
ax.tick_params(axis='both', which='major', labelsize=12)
# Fill speaker
import matplotlib.transforms as mtransforms
trans = mtransforms.blended_transform_factory(ax.transData, ax.transAxes)
theta = 0.9
ax.fill_between(jpn.index, 0, 10, where = Speaker, facecolor='lightblue', alpha=0.5, transform=trans)
# Add text
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
ax.text(0.005, 0.73, "Toshihiko Fukui", transform=ax.transAxes, fontsize=10, verticalalignment='top', bbox=props)
ax.text(0.20, 0.75, "Masaaki Shirakawa", transform=ax.transAxes, fontsize=10, verticalalignment='top', bbox=props)
ax.text(0.63, 0.75, "Haruhiko Kuroda", transform=ax.transAxes, fontsize=10, verticalalignment='top', bbox=props)
# Add annotations
qe_0 = (mdates.date2num(datetime.datetime(2011,8,13)))
qe = (mdates.date2num(datetime.datetime(2013,4,13)))
qe1 = (mdates.date2num(datetime.datetime(2014,10,1)))
qe2 = (mdates.date2num(datetime.datetime(2016,7,29)))
qe3 = (mdates.date2num(datetime.datetime(2018,7,31)))
cov = (mdates.date2num(datetime.datetime(2020,3,16)))
cov1 = (mdates.date2num(datetime.datetime(2020,4,27)))
arrow_style = dict(facecolor='black', edgecolor='white', shrink=0.05)
ax.annotate('QE', xy=(qe_0, 50), xytext=(qe_0, 30), size=11, ha='center', verticalalignment= 'bottom',
arrowprops=dict(arrow_style, shrink=0.05,ls='--', color='gray',lw=0.5))
ax.annotate('QE1', xy=(qe, 50), xytext=(qe, 30), size=11, ha='center', verticalalignment= 'bottom',
arrowprops=dict(arrow_style, shrink=0.05,ls='--', color='gray',lw=0.5))
ax.annotate('QE1+', xy=(qe1, 50), xytext=(qe1, 30), size=11, ha='center', verticalalignment= 'bottom',
arrowprops=dict(arrow_style, shrink=0.05,ls='--', color='gray',lw=0.5))
ax.annotate('QE2', xy=(qe2, 50), xytext=(qe2, 30), size=11, ha='center', verticalalignment= 'bottom',
arrowprops=dict(arrow_style, shrink=0.05,ls='--', color='gray',lw=0.5))
ax.annotate('QE3', xy=(qe3, 40), xytext=(qe3, 25), size=11, ha='center', verticalalignment= 'bottom',
arrowprops=dict(arrow_style, shrink=0.05,ls='--', color='gray',lw=0.5))
ax.annotate('Cov-19', xy=(cov,20), xytext=(cov, 10), size=11, ha='right', verticalalignment= 'bottom',
arrowprops=dict(arrow_style, shrink=0.05,ls='--', color='gray',lw=0.5))
ax.annotate('Cov-19+', xy=(cov1, 20), xytext=(cov1, 10), size=11, ha='left', verticalalignment= 'bottom',
arrowprops=dict(arrow_style, shrink=0.05,ls='--', color='gray',lw=0.5))
plt.show()
fig.savefig('E:\\GitRepo\\CB speeches\\JPN\\sentiment.png')
## TO QUATERLY
jpn.info()
jpn.index = pd.to_datetime(jpn.index)
b2 = jpn.resample('QS').sum()
b2['ind']=jpn.Index.resample('QS').count()
b2.head(10)
b2.to_excel("E:\\GitRepo\\CB speeches\\data\\eda_JPN.xlsx", sheet_name='JPN',engine='xlsxwriter')
## JPN for topic creation
jpn.to_excel(
"E:\\GitRepo\\CB speeches\\data\\Topic_JPN.xlsx",
sheet_name="JPN",
engine="xlsxwriter",
) | Vedia-JerezDaniel/CB | JPN/Analysis_preliminary.py | Analysis_preliminary.py | py | 11,754 | python | en | code | 0 | github-code | 13 |
1115430051 | # This is file 5.py
#
# def AppendtoList(s):
# l = [1, 4, 9, 10, 23]
# l.append(s)
# return l
#
#
# print(AppendtoList(90))
# l1 = [1, 2, 5, 20]
# print(l1)
#
# l1 = l1 + [90]
# print(l1)
# def getAverage(s):
# avg = sum(s) / len(s)
# return avg
#
#
# s = [1, 4, 9, 10, 23]
# print(getAverage(s))
def removeList():
l1 = [1, 4, 9, 10, 23]
l2 = [4, 9]
l1.remove(l2[0])
l1.remove(l2[1])
return l1
l1 = removeList()
print(l1)
| tripura-kant/Python-Scripting | 250questions/5.py | 5.py | py | 447 | python | en | code | 0 | github-code | 13 |
28126882325 | #20190628
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from tqdm import tqdm
def itx_to_pandas(path):
file = open(path,'r')
a = file.read().splitlines()
file.close()
key = []
values = []
cur = 0
while cur < len(a) - 1 :
if 'WAVES/D/N=' in a[cur]:
key.append(a[cur].split('\'')[1].split('.')[0])
loc_len = int(a[cur].split(')')[0].split('(')[1])
value = a[cur + 2 : cur + 2 + loc_len]
value = [float(line) for line in value]
values.append(value)
cur += loc_len
cur += 2
cur += 1
dfn = pd.DataFrame(values).T
dfn.columns = key
return dfn
def all_itx(wd):
#cwd = os.getcwd()
itxs = [wd + file for file in os.listdir(wd) if file.endswith(".itx")]
itxs.sort()
keys = ['%s.itx' %i for i in range(0,len(itxs))]
frames = [itx_to_pandas(itxs[i]) for i in tqdm(range(0, len(itxs)))]
df_all = pd.concat(frames, keys = keys)
df_all.index.names = ['page','row']
return df_all
def triangle_range(df, page, wave):
start_index = min(df.loc[page, wave].idxmax(),
df.loc[page, wave].idxmin())
end_index = max(df.loc[page, wave].idxmax(),
df.loc[page, wave].idxmin())
return start_index, end_index
def resample_itx(df_all, current_array, page, voltage_wave, current_wave, start_index, end_index):
#making tmp df
df_tmp = pd.DataFrame({'voltage':df_all.loc['%s.itx' %page, voltage_wave][start_index:end_index],
'current':df_all.loc['%s.itx' %page, current_wave][start_index:end_index]})
df_tmp.sort_values(by = 'current', inplace = True)
upper, lower = df_tmp.current.max(), df_tmp.current.min()
#making target df
df_target = pd.DataFrame(current_array)
df_target.columns = ['current']
df_target['voltage'] = np.nan
#resample
df_tmp = pd.concat([df_target, df_tmp], sort=False)
df_tmp = df_tmp.reset_index(drop = True)
df_tmp = df_tmp.sort_values(by = 'current')
df_tmp = df_tmp.interpolate()
df_tmp = df_tmp.sort_index()
df_tmp = df_tmp[:len(current_array)]
#padding zero
df_tmp.at[df_tmp.loc[df_tmp.current<lower].index.tolist(), 'voltage'] = np.nan
df_tmp.at[df_tmp.loc[df_tmp.current>upper].index.tolist(), 'voltage'] = np.nan
return df_tmp
def resamp_helper(df_target, df_source, name):
df_tmp = pd.concat([df_target, df_source], sort=False)
df_tmp = df_tmp.reset_index(drop = True)
df_tmp = df_tmp.sort_values(by = name)
df_tmp = df_tmp.interpolate()
df_tmp = df_tmp.sort_index()
df_tmp = df_tmp[:len(df_target)]
return df_tmp
| yypai/ITX-pandas | itx_to_pandas_df.py | itx_to_pandas_df.py | py | 2,738 | python | en | code | 0 | github-code | 13 |
74265554259 | from flask import Flask, render_template, Response, jsonify,request
from Camera import VideoCamera
import cv2
a = 0
app = Flask(__name__)
video_stream = VideoCamera()
@app.route('/',methods=['GET','POST'])
def index():
global a
if request.method == 'POST':
if 'button_name' in request.form:
if a == 0:
a = 1
elif a == 1:
a = 0
return render_template('index.html')
def gen(camera):
while True:
frame = camera.get_frame(a)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(video_stream), mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='127.0.0.1', debug=True, port="5000") | Thulasirobocop/Emotion-Detection | Web Application/app.py | app.py | py | 867 | python | en | code | 0 | github-code | 13 |
17061847084 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ZmEpAePrepayExtParam import ZmEpAePrepayExtParam
class ZhimaCreditEpAeprepayOrderRefundModel(object):
def __init__(self):
self._advance_amount = None
self._advance_currency = None
self._ext_param = None
self._order_id = None
self._order_time_millis = None
self._refund_amount = None
self._refund_balance_amount = None
self._refund_currency = None
self._refund_time = None
self._seller_login_id = None
self._son_order_id = None
self._sub_out_order_id = None
@property
def advance_amount(self):
return self._advance_amount
@advance_amount.setter
def advance_amount(self, value):
self._advance_amount = value
@property
def advance_currency(self):
return self._advance_currency
@advance_currency.setter
def advance_currency(self, value):
self._advance_currency = value
@property
def ext_param(self):
return self._ext_param
@ext_param.setter
def ext_param(self, value):
if isinstance(value, ZmEpAePrepayExtParam):
self._ext_param = value
else:
self._ext_param = ZmEpAePrepayExtParam.from_alipay_dict(value)
@property
def order_id(self):
return self._order_id
@order_id.setter
def order_id(self, value):
self._order_id = value
@property
def order_time_millis(self):
return self._order_time_millis
@order_time_millis.setter
def order_time_millis(self, value):
self._order_time_millis = value
@property
def refund_amount(self):
return self._refund_amount
@refund_amount.setter
def refund_amount(self, value):
self._refund_amount = value
@property
def refund_balance_amount(self):
return self._refund_balance_amount
@refund_balance_amount.setter
def refund_balance_amount(self, value):
self._refund_balance_amount = value
@property
def refund_currency(self):
return self._refund_currency
@refund_currency.setter
def refund_currency(self, value):
self._refund_currency = value
@property
def refund_time(self):
return self._refund_time
@refund_time.setter
def refund_time(self, value):
self._refund_time = value
@property
def seller_login_id(self):
return self._seller_login_id
@seller_login_id.setter
def seller_login_id(self, value):
self._seller_login_id = value
@property
def son_order_id(self):
return self._son_order_id
@son_order_id.setter
def son_order_id(self, value):
self._son_order_id = value
@property
def sub_out_order_id(self):
return self._sub_out_order_id
@sub_out_order_id.setter
def sub_out_order_id(self, value):
self._sub_out_order_id = value
def to_alipay_dict(self):
params = dict()
if self.advance_amount:
if hasattr(self.advance_amount, 'to_alipay_dict'):
params['advance_amount'] = self.advance_amount.to_alipay_dict()
else:
params['advance_amount'] = self.advance_amount
if self.advance_currency:
if hasattr(self.advance_currency, 'to_alipay_dict'):
params['advance_currency'] = self.advance_currency.to_alipay_dict()
else:
params['advance_currency'] = self.advance_currency
if self.ext_param:
if hasattr(self.ext_param, 'to_alipay_dict'):
params['ext_param'] = self.ext_param.to_alipay_dict()
else:
params['ext_param'] = self.ext_param
if self.order_id:
if hasattr(self.order_id, 'to_alipay_dict'):
params['order_id'] = self.order_id.to_alipay_dict()
else:
params['order_id'] = self.order_id
if self.order_time_millis:
if hasattr(self.order_time_millis, 'to_alipay_dict'):
params['order_time_millis'] = self.order_time_millis.to_alipay_dict()
else:
params['order_time_millis'] = self.order_time_millis
if self.refund_amount:
if hasattr(self.refund_amount, 'to_alipay_dict'):
params['refund_amount'] = self.refund_amount.to_alipay_dict()
else:
params['refund_amount'] = self.refund_amount
if self.refund_balance_amount:
if hasattr(self.refund_balance_amount, 'to_alipay_dict'):
params['refund_balance_amount'] = self.refund_balance_amount.to_alipay_dict()
else:
params['refund_balance_amount'] = self.refund_balance_amount
if self.refund_currency:
if hasattr(self.refund_currency, 'to_alipay_dict'):
params['refund_currency'] = self.refund_currency.to_alipay_dict()
else:
params['refund_currency'] = self.refund_currency
if self.refund_time:
if hasattr(self.refund_time, 'to_alipay_dict'):
params['refund_time'] = self.refund_time.to_alipay_dict()
else:
params['refund_time'] = self.refund_time
if self.seller_login_id:
if hasattr(self.seller_login_id, 'to_alipay_dict'):
params['seller_login_id'] = self.seller_login_id.to_alipay_dict()
else:
params['seller_login_id'] = self.seller_login_id
if self.son_order_id:
if hasattr(self.son_order_id, 'to_alipay_dict'):
params['son_order_id'] = self.son_order_id.to_alipay_dict()
else:
params['son_order_id'] = self.son_order_id
if self.sub_out_order_id:
if hasattr(self.sub_out_order_id, 'to_alipay_dict'):
params['sub_out_order_id'] = self.sub_out_order_id.to_alipay_dict()
else:
params['sub_out_order_id'] = self.sub_out_order_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZhimaCreditEpAeprepayOrderRefundModel()
if 'advance_amount' in d:
o.advance_amount = d['advance_amount']
if 'advance_currency' in d:
o.advance_currency = d['advance_currency']
if 'ext_param' in d:
o.ext_param = d['ext_param']
if 'order_id' in d:
o.order_id = d['order_id']
if 'order_time_millis' in d:
o.order_time_millis = d['order_time_millis']
if 'refund_amount' in d:
o.refund_amount = d['refund_amount']
if 'refund_balance_amount' in d:
o.refund_balance_amount = d['refund_balance_amount']
if 'refund_currency' in d:
o.refund_currency = d['refund_currency']
if 'refund_time' in d:
o.refund_time = d['refund_time']
if 'seller_login_id' in d:
o.seller_login_id = d['seller_login_id']
if 'son_order_id' in d:
o.son_order_id = d['son_order_id']
if 'sub_out_order_id' in d:
o.sub_out_order_id = d['sub_out_order_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ZhimaCreditEpAeprepayOrderRefundModel.py | ZhimaCreditEpAeprepayOrderRefundModel.py | py | 7,346 | python | en | code | 241 | github-code | 13 |
27151539434 | from django.test import TestCase, Client
from repository.models import PRIVATE, Repository
from django.contrib.auth.models import User
from branch.models import Branch
from django.urls import reverse, resolve
class BranchTestCase(TestCase):
def setUp(self):
user = User.objects.create(username="user1", password="user1")
repo = Repository.objects.create(name="repo", status=PRIVATE, creator=user)
Branch.objects.create(name="master", is_default=True, repository=repo)
Branch.objects.create(name="develop", is_default=False, repository=repo)
def test_branch_is_default(self):
master = Branch.objects.get(name="master")
develop = Branch.objects.get(name="develop")
self.assertEqual(master.is_default, True)
self.assertEqual(develop.is_default, False)
def test_branch_name(self):
master = Branch.objects.get(name="master")
develop = Branch.objects.get(name="develop")
self.assertEqual(master.name, "master")
self.assertEqual(develop.name, "develop")
def test_branch_repo(self):
master = Branch.objects.get(name="master")
develop = Branch.objects.get(name="develop")
self.assertEqual(master.repository.name, "repo")
self.assertEqual(develop.repository.name, "repo")
| marijamilanovic/UksGitHub | Uks/branch/tests/test_branch.py | test_branch.py | py | 1,323 | python | en | code | 0 | github-code | 13 |
38449248725 | import random
N = 100000 #試行回数
#一回の試行
def montyOneTime():
treasure_door = random.randint(1,3)
#print("司会:宝は",treasure_door)
challengers_choice = random.randint(1,3)
#print("ゲスト:最初の選択",challengers_choice)
#司会の誘導
left_door=[1,2,3]
#当たりの場合
if challengers_choice == treasure_door:
left_door.remove(challengers_choice)
mc_ans = random.choice(left_door)
#ハズレの場合
else:
left_door.remove(treasure_door)
left_door.remove(challengers_choice)
mc_ans = left_door[0]
#print("司会:ハズレの箱は",mc_ans)
#Change or Not 変えるか否かはランダムで
r = random.randint(1,2)
if r==1: #変える
left_door = [1,2,3]
left_door.remove(mc_ans)
left_door.remove(challengers_choice)
last_answer = left_door[0]
#print("ゲスト:変更します!")
chg_flag = 1
else: #変えない
last_answer = challengers_choice
#print("ゲスト:変えません")
chg_flag = 0
#print("最終回答は",last_answer)
#当たり外れの判定
if last_answer == treasure_door:
#print("正解です")
hit_flag = 1
else:
#print("残念でした")
hit_flag = 0
return chg_flag, hit_flag
#変えたときと変えないときの勝率の計算
change_counter = 0 # 変えたとき回数
change_hit_counter = 0 # 変えてヒットの回数
no_change_counter = 0 # 変えないときの回数
nochange_hit_counter = 0 # 変えないでヒットの回数
#N回繰り返す
for i in range(N):
ch,ok = montyOneTime()
if ch == 1:#変えたとき
change_counter += 1
if ok == 1:
change_hit_counter += 1
else:
no_change_counter += 1
if ok == 1:
nochange_hit_counter += 1
print("変えたとき (回数=", change_counter, " ヒット数=", change_hit_counter, " 勝率=", change_hit_counter/change_counter*100, "%)")
print("変えないとき(回数=", no_change_counter, " ヒット数=", nochange_hit_counter, " 勝率=", nochange_hit_counter/no_change_counter*100, "%)")
print("トータル (回数=", no_change_counter + change_counter, " ヒット数=", change_hit_counter + nochange_hit_counter, " 勝率=",(change_hit_counter + nochange_hit_counter)/( change_counter + no_change_counter)*100, "%)") | bkh4149/aocchi | monty/monty.py | monty.py | py | 2,492 | python | ja | code | 0 | github-code | 13 |
73876167057 | import torch
class ImagesFromChunksCreator():
def __init__(self, chunk_size: int, image_size: int, inner_noise_dim: int, noise_dim: int):
self.__chunk_size = chunk_size
self.image_size = image_size
self.__inner_noise_dim = inner_noise_dim
self.__noise_dim = noise_dim
def get_images(self, generator: torch.nn.Module, noise: torch.Tensor, device: str) -> torch.Tensor:
samples_count = noise.size(0)
result_images = torch.zeros(samples_count, 3, self.image_size, self.image_size, device=device)
chunks_count = self.image_size // self.__chunk_size
chunk_sizes = self.__chunk_size * torch.ones((samples_count, 1), dtype=torch.float, device=device)
for h_chunk_index in range(chunks_count):
x_start_noise = h_chunk_index * self.__inner_noise_dim
x_end_noise = x_start_noise + self.__noise_dim
x_pos = h_chunk_index*self.__chunk_size
fullface_x = torch.ones((samples_count, 1), dtype=torch.float, device=device) * x_pos
for v_chunk_index in range(chunks_count):
y_pos = v_chunk_index*self.__chunk_size
y_start_noise = v_chunk_index * self.__inner_noise_dim
y_end_noise = y_start_noise + self.__noise_dim
current_noise = noise[:, :, y_start_noise:y_end_noise, x_start_noise:x_end_noise]
fullface_y = torch.ones((samples_count, 1), dtype=torch.float, device=device) * y_pos
current_chunk = generator(current_noise, fullface_x, fullface_y, chunk_sizes)
result_images[:, :, y_pos:(y_pos+self.__chunk_size), x_pos:(x_pos+self.__chunk_size)] = current_chunk.detach()
return result_images
| gmum/LocoGAN | src/utils/images_from_chunks_creator.py | images_from_chunks_creator.py | py | 1,776 | python | en | code | 11 | github-code | 13 |
21700636227 | import logging
from django.core.management.base import BaseCommand
from mooringlicensing.components.main.utils import sticker_export, email_stickers_document
logger = logging.getLogger('mooringlicensing')
class Command(BaseCommand):
help = 'Export and email sticker data'
def handle(self, *args, **options):
updates, errors = sticker_export()
success_filenames, error_filenames = email_stickers_document()
cmd_name = __name__.split('.')[-1].replace('_', ' ').upper()
error_count = len(errors) + len(error_filenames)
err_str = '<strong style="color: red;">Errors: {}</strong>'.format(error_count) if error_count else '<strong style="color: green;">Errors: 0</strong>'
msg = '<p>{} completed. {}. IDs updated: {}.</p>'.format(cmd_name, err_str, updates)
logger.info(msg)
print(msg) # will redirect to cron_tasks.log file, by the parent script
| jmushtaq/mooringlicensing-old | mooringlicensing/management/commands/export_and_email_sticker_data.py | export_and_email_sticker_data.py | py | 920 | python | en | code | 0 | github-code | 13 |
34824871949 | from tkinter import *
from tkinter import ttk
root = Tk()
# Top row, stick means expand in West.
# Different directions are N S E W NE NW etc
# Padding 4 pixels
Label(root,text = "First Name").grid(row = 0,sticky = W, padx = 4)
# A space for user entry
Entry(root).grid(row = 0, column = 1, sticky = E, pady = 4)
Label(root,text = "Last Name").grid(row = 1,sticky = W, padx = 4)
Entry(root).grid(row = 1, column = 1, sticky = E, pady = 4)
Button(root, text = "Submit").grid(row = 3)
root.mainloop()
| miketr33/python-learning | tkinter_with_derekbanas/grid_manager.py | grid_manager.py | py | 509 | python | en | code | 0 | github-code | 13 |
8154509261 | import pytest
from pg_grant import NoSuchObjectError
from pg_grant.query import get_all_table_acls, get_table_acl
expected_acls = {
'public': {
# table1 has default privileges, so None is returned.
'table1': None,
# alice is owner, bob was granted all
'table2': {'alice=arwdDxt/alice', 'bob=ar*wdDxt/alice'},
# view1 has default privileges, so None is returned.
'view1': None,
# alice is owner, bob was granted INSERT
'view2': {'alice=arwdDxt/alice', 'bob=a/alice'},
# mview1 has default privileges, so None is returned.
'mview1': None,
},
}
def as_set(v):
if v is not None:
return set(v)
@pytest.mark.parametrize('name, acls', expected_acls['public'].items())
def test_get_table_acl_visible(connection, name, acls):
"""Find visible (i.e. in search path) tables matching ``name``."""
table = get_table_acl(connection, name)
assert as_set(table.acl) == acls
@pytest.mark.parametrize('schema, name, acls', [
(schema, name, acl)
for schema, d in expected_acls.items()
for name, acl in d.items()
])
def test_get_table_acl_schema(connection, schema, name, acls):
"""Find tables from ``schema`` matching ``name``."""
table = get_table_acl(connection, name, schema)
assert as_set(table.acl) == acls
def test_get_all_table_acls(connection):
"""Get all sequences in all schemas."""
tables = get_all_table_acls(connection)
schemas = {x.schema for x in tables}
assert schemas == {'public', 'information_schema', 'pg_catalog'}
tested = 0
for table in tables:
if table.schema not in expected_acls:
continue
if table.name not in expected_acls[table.schema]:
continue
assert as_set(table.acl) == expected_acls[table.schema][table.name]
tested += 1
assert tested == sum(len(v) for v in expected_acls.values())
def test_no_such_object(connection):
with pytest.raises(NoSuchObjectError):
get_table_acl(connection, 'table3')
| RazerM/pg_grant | tests/query/test_table.py | test_table.py | py | 2,056 | python | en | code | 5 | github-code | 13 |
38924493815 | import threading
import concurrent.futures
from perf.defines import DATA_FEED_CONTAINER, REDIS_CONTAINER, REDIS_EXPORTER_CONTAINER
from perf.state.phase_result_scheduling_state import PhaseResultSchedulingState
from perf.utils import local_now
MAX_RESCHEDULES = 1
class SchedulingState(PhaseResultSchedulingState):
def __init__(self):
super(SchedulingState, self).__init__()
self.pods_work_queue = None
self.pods_done = []
self.pods_per_node = {}
# contains info about oom_score_adj per pid per container per pod. See OOMHandler
self.pids_per_container_per_pod = {}
self.pods_priorities = {}
self.reschedule_events_per_pod = {}
self.last_oom_event_time_per_node = {}
# TODO is this needed?
self.global_lock = threading.Lock()
def init_pods_work_queue(self, work_queue):
self.pods_work_queue = work_queue
def get_last_scheduled_pod(self, node):
if node not in self.pods_per_node or len(self.pods_per_node[node]) == 0:
return None
return self.pods_per_node[node][-1]
def get_node_for_scheduled_pod(self, pod):
for node in self.pods_per_node:
for scheduled_pod in self.pods_per_node[node]:
if scheduled_pod == pod:
return node
return None
def get_containers_per_pod(self, pod):
# TODO make this dynamic
return [DATA_FEED_CONTAINER, REDIS_CONTAINER, REDIS_EXPORTER_CONTAINER]
def get_schedulable_pod_priority(self, node_name):
last_pod = self.get_last_scheduled_pod(node_name)
if last_pod is None:
priority = 10000
else:
# TODO what if key does not exist
last_priority = self.pods_priorities[last_pod]
priority = last_priority - 1
return priority
def add_pod_to_schedule_state(self, pod_name, node_name, priority):
# check all nodes to make sure pod is not scheduled twice
for node in self.pods_per_node:
if pod_name in self.pods_per_node[node]:
raise Exception(f'[Scheduler] Pod {pod_name} is already assigned to node {node}')
# scheduling state update
if node_name in self.pods_per_node:
self.pods_per_node[node_name].append(pod_name)
else:
self.pods_per_node[node_name] = [pod_name]
self.pods_priorities[pod_name] = priority
def remove_pod_from_schedule_state(self, pod_name):
node_name = None
count = 0 # to make sure only 1 pod exists
for node in self.pods_per_node:
if pod_name in self.pods_per_node[node]:
count += 1
node_name = node
if node_name is None:
return
if count > 1:
raise Exception(f'[Scheduler] Found {count} pods with name {pod_name}, should be 1')
self.pods_per_node[node_name].remove(pod_name)
# clean priority
del self.pods_priorities[pod_name]
def pop_or_wait_work_queue(self, pending_futures):
pod_name = None
if len(self.pods_work_queue) == 0:
# check running tasks
# wait for first finished task
print(f'[Scheduler] No pods in queue, waiting for pending futures to finish...')
for _ in concurrent.futures.as_completed(pending_futures.keys()):
self.global_lock.acquire()
# check if it was the last one
all_done = True
for f in pending_futures.keys():
if not f.done():
all_done = False
if len(self.pods_work_queue) == 0:
if all_done:
# all tasks finished and no more queued
self.global_lock.release()
print(f'[Scheduler] All tasks finished')
return None
else:
# continue waiting
print(f'[Scheduler] Continue waiting')
self.global_lock.release()
continue
else:
# continue scheduling
pod_name = self.pods_work_queue.pop()
print(f'[Scheduler] Continue scheduling with {pod_name}')
break
else:
pod_name = self.pods_work_queue.pop()
print(f'[Scheduler] Popped {pod_name}')
if self.global_lock.locked():
self.global_lock.release()
return pod_name
def reschedule_or_complete(self, pod_name, reschedule, reason):
self.remove_pod_from_schedule_state(pod_name)
# decide if move to done schedule state or reschedule for another run
self.global_lock.acquire()
if not reschedule:
print(f'[Scheduler] {pod_name} done, {reason}')
self.pods_done.append(pod_name)
else:
reschedule_counter = len(self.get_reschedule_reasons(pod_name))
if reschedule_counter < MAX_RESCHEDULES:
# reschedule - append to the end of the work queue
print(f'[Scheduler] {pod_name} rescheduled, reason {reason}')
self.inc_reschedule_counter(pod_name, reason)
self.pods_work_queue.append(pod_name)
else:
print(f'[Scheduler] {pod_name} done after max {MAX_RESCHEDULES} reschedule attempts')
self.pods_done.append(pod_name)
self.global_lock.release()
# TODO handle
# exception calling callback for <Future at 0x110fa6760 state=finished returned tuple>
# Traceback (most recent call last):
# File "/usr/local/Cellar/python@3.9/3.9.12/Frameworks/Python.framework/Versions/3.9/lib/python3.9/concurrent/futures/_base.py", line 330, in _invoke_callbacks
# callback(self)
# File "/Users/anov/IdeaProjects/svoe/data_feed/perf/scheduler/scheduler.py", line 305, in done_estimation_callback
# self.scheduling_state.reschedule_or_complete(pod_name, reschedule, reason)
# File "/Users/anov/IdeaProjects/svoe/data_feed/perf/state/scheduling_state.py", line 141, in reschedule_or_complete
# self.global_lock.release()
# RuntimeError: release unlocked lock
def get_reschedule_reasons(self, pod_name):
if pod_name not in self.reschedule_events_per_pod:
return []
return self.reschedule_events_per_pod[pod_name]
def inc_reschedule_counter(self, pod_name, reason):
if pod_name not in self.reschedule_events_per_pod:
self.reschedule_events_per_pod[pod_name] = []
self.reschedule_events_per_pod[pod_name].append(reason)
def find_pod_container_by_pid(self, pid):
for pod in self.pids_per_container_per_pod:
for container in self.pids_per_container_per_pod[pod]:
if pid in self.pids_per_container_per_pod[pod][container]:
return pod, container
return None, None
def get_last_oom_time(self, node):
if node not in self.last_oom_event_time_per_node:
return None
return self.last_oom_event_time_per_node[node]
def mark_last_oom_time(self, node):
self.last_oom_event_time_per_node[node] = local_now()
| dirtyValera/svoe | data_feed/perf/state/scheduling_state.py | scheduling_state.py | py | 7,387 | python | en | code | 12 | github-code | 13 |
19383440824 | import requests as reqs
import os
import sys
def main(fileList :list):
for i, file in enumerate(fileList):
url = file
name = url.split('/')
if name[-1] == '':
name.pop(-1)
name = name[-1]
r = reqs.get(url)
rstr = str(r.content)
rstr = rstr[rstr.find('/streamta.pe/'):]
link = rstr[:rstr.find('<')]
rstr = rstr[rstr.find("xcdd"):]
rstr = rstr[:rstr.find("\\")]
rstr = rstr[-2:]
link = 'https:/' + link[:-2] + rstr + '&stream=1'
checkFile(f"./videos/{name}.mp4")
download(link, str(i+1), f"./videos/{name}.mp4")
sys.stdout.write(f"\nFile {i+1} download complete\n")
print("All downloads are complete")
def download(url, fileIndex, fileName):
with open(fileName, "wb") as w:
print('File opened')
r = reqs.get(url, stream=True)
total = int(r.headers.get("content-length"))
downloaded = 0
sys.stdout.write(f"Downloading file {fileIndex} from {url}\n")
for data in r.iter_content(chunk_size=max(int(total/1000), 1024*1024)):
downloaded += len(data)
percentage = int((downloaded/total)/0.02)
sys.stdout.write(f'\r[{"*"*percentage}{"."*(50-percentage)}]')
w.write(data)
def checkFolder(name :str):
try:
os.makedirs(f"./videos/{name}")
except:
pass
def checkFile(name :str):
if not(os.path.isfile(name)):
with open(name, "x"):
pass
with open ("STToDownload.txt", "r") as t:
t = t.read().split("\n")
name = t[0]
if t[-1] == '':
t.pop(-1)
print(t)
checkFolder('./videos/')
main(t)
| KirppuAapo/StreamTapeDownloader | StreamTapeDownloader.py | StreamTapeDownloader.py | py | 1,706 | python | en | code | 3 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.