seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
27188462283 | from collections import deque
import sys
input = sys.stdin.readline
def dfs(s):
visited = [0] * (N + 1)
stack = []
visited[s] = 1
ans_dfs.append(s)
while True:
for w in range(1, N + 1):
if w in connection[s] and visited[w] == 0:
stack.append(s)
stack.append(w)
ans_dfs.append(w)
s = w
visited[s] = 1
break
else:
if stack:
s = stack.pop()
else:
break
def bfs(s):
visited = [0] * (N + 1)
queue = deque()
queue.append(s)
while queue:
n = queue.popleft()
if visited[n] == 0:
visited[n] = 1
ans_bfs.append(n)
connection[n].sort()
for k in connection[n]:
queue.append(k)
return ans_bfs
N, M, V = map(int, input().split())
arr = []
ans_dfs = []
ans_bfs = []
for _ in range(M):
A, B = map(int, input().split())
arr.append(A)
arr.append(B)
connection = [[] for _ in range(N + 1)]
for ar in range(0, len(arr), 2):
connection[arr[ar]].append(arr[ar + 1])
connection[arr[ar + 1]].append(arr[ar])
dfs(V)
bfs(V)
print(*ans_dfs)
print(*ans_bfs) | Nam4o/Algorithm | 백준/Silver/1260. DFS와 BFS/DFS와 BFS.py | DFS와 BFS.py | py | 1,331 | python | en | code | 1 | github-code | 13 |
13230514965 | # You are given an array A of size N, and a number K. You have to find the sum of all the prime numbers in the array, whose value is strictly lesser than K.
def isPrime(n):
if(n==1):
return False
for i in range(2,n):
if(n%i==0):
return False
return True
tests=int(input())
for i in range(tests):
sum=0
size=int(input())
arr=list(map(int,input().split()))
k=int(input())
for i in range(0,size):
if((isPrime(arr[i])==True) and (arr[i]<k)):
sum=sum+arr[i]
print(sum)
| muskaan190/Python-Codes | Sum Of Files.py | Sum Of Files.py | py | 515 | python | en | code | 1 | github-code | 13 |
7733836214 | import tensorflow as tf
tf.compat.v1.disable_eager_execution()
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import logging
import os
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
import json
import random
from collections import defaultdict
# import s3fs
import h5py
import obspy
from scipy.interpolate import interp1d
from tqdm import tqdm
def py_func_decorator(output_types=None, output_shapes=None, name=None):
def decorator(func):
def call(*args, **kwargs):
nonlocal output_shapes
# flat_output_types = nest.flatten(output_types)
flat_output_types = tf.nest.flatten(output_types)
# flat_values = tf.py_func(
flat_values = tf.numpy_function(func, inp=args, Tout=flat_output_types, name=name)
if output_shapes is not None:
for v, s in zip(flat_values, output_shapes):
v.set_shape(s)
# return nest.pack_sequence_as(output_types, flat_values)
return tf.nest.pack_sequence_as(output_types, flat_values)
return call
return decorator
def dataset_map(iterator, output_types, output_shapes=None, num_parallel_calls=None, name=None, shuffle=False):
dataset = tf.data.Dataset.range(len(iterator))
if shuffle:
dataset = dataset.shuffle(len(iterator), reshuffle_each_iteration=True)
@py_func_decorator(output_types, output_shapes, name=name)
def index_to_entry(idx):
return iterator[idx]
return dataset.map(index_to_entry, num_parallel_calls=num_parallel_calls)
def normalize(data, axis=(0,)):
"""data shape: (nt, nsta, nch)"""
data -= np.mean(data, axis=axis, keepdims=True)
std_data = np.std(data, axis=axis, keepdims=True)
std_data[std_data == 0] = 1
data /= std_data
# data /= (std_data + 1e-12)
return data
def normalize_long(data, axis=(0,), window=3000):
"""
data: nt, nch
"""
nt, nar, nch = data.shape
if window is None:
window = nt
shift = window // 2
dtype = data.dtype
## std in slide windows
data_pad = np.pad(data, ((window // 2, window // 2), (0, 0), (0, 0)), mode="reflect")
t = np.arange(0, nt, shift, dtype="int")
std = np.zeros([len(t) + 1, nar, nch])
mean = np.zeros([len(t) + 1, nar, nch])
for i in range(1, len(std)):
std[i, :] = np.std(data_pad[i * shift : i * shift + window, :, :], axis=axis)
mean[i, :] = np.mean(data_pad[i * shift : i * shift + window, :, :], axis=axis)
t = np.append(t, nt)
# std[-1, :] = np.std(data_pad[-window:, :], axis=0)
# mean[-1, :] = np.mean(data_pad[-window:, :], axis=0)
std[-1, ...], mean[-1, ...] = std[-2, ...], mean[-2, ...]
std[0, ...], mean[0, ...] = std[1, ...], mean[1, ...]
# std[std == 0] = 1.0
## normalize data with interplated std
t_interp = np.arange(nt, dtype="int")
std_interp = interp1d(t, std, axis=0, kind="slinear")(t_interp)
# std_interp = np.exp(interp1d(t, np.log(std), axis=0, kind="slinear")(t_interp))
mean_interp = interp1d(t, mean, axis=0, kind="slinear")(t_interp)
tmp = np.sum(std_interp, axis=(0, 1))
std_interp[std_interp == 0] = 1.0
data = (data - mean_interp) / std_interp
# data = (data - mean_interp)/(std_interp + 1e-12)
### dropout effect of < 3 channel
nonzero = np.count_nonzero(tmp)
if (nonzero < 3) and (nonzero > 0):
data *= 3.0 / nonzero
return data.astype(dtype)
def normalize_batch(data, window=3000):
"""
data: nsta, nt, nch
"""
nsta, nt, nar, nch = data.shape
if window is None:
window = nt
shift = window // 2
## std in slide windows
data_pad = np.pad(data, ((0, 0), (window // 2, window // 2), (0, 0), (0, 0)), mode="reflect")
t = np.arange(0, nt, shift, dtype="int")
std = np.zeros([nsta, len(t) + 1, nar, nch])
mean = np.zeros([nsta, len(t) + 1, nar, nch])
for i in range(1, len(t)):
std[:, i, :, :] = np.std(data_pad[:, i * shift : i * shift + window, :, :], axis=1)
mean[:, i, :, :] = np.mean(data_pad[:, i * shift : i * shift + window, :, :], axis=1)
t = np.append(t, nt)
# std[:, -1, :] = np.std(data_pad[:, -window:, :], axis=1)
# mean[:, -1, :] = np.mean(data_pad[:, -window:, :], axis=1)
std[:, -1, :, :], mean[:, -1, :, :] = std[:, -2, :, :], mean[:, -2, :, :]
std[:, 0, :, :], mean[:, 0, :, :] = std[:, 1, :, :], mean[:, 1, :, :]
# std[std == 0] = 1
# ## normalize data with interplated std
t_interp = np.arange(nt, dtype="int")
std_interp = interp1d(t, std, axis=1, kind="slinear")(t_interp)
# std_interp = np.exp(interp1d(t, np.log(std), axis=1, kind="slinear")(t_interp))
mean_interp = interp1d(t, mean, axis=1, kind="slinear")(t_interp)
tmp = np.sum(std_interp, axis=(1, 2))
std_interp[std_interp == 0] = 1.0
data = (data - mean_interp) / std_interp
# data = (data - mean_interp)/(std_interp + 1e-12)
### dropout effect of < 3 channel
nonzero = np.count_nonzero(tmp, axis=-1)
data[nonzero > 0, ...] *= 3.0 / nonzero[nonzero > 0][:, np.newaxis, np.newaxis, np.newaxis]
return data
class DataConfig:
seed = 123
use_seed = True
n_channel = 3
n_class = 3
sampling_rate = 100
dt = 1.0 / sampling_rate
X_shape = [3000, 1, n_channel]
Y_shape = [3000, 1, n_class]
min_event_gap = 3 * sampling_rate
label_shape = "gaussian"
label_width = 30
dtype = "float32"
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class DataReader:
def __init__(
self, format="numpy", config=DataConfig(), response_xml=None, sampling_rate=100, highpass_filter=0, **kwargs
):
self.buffer = {}
self.n_channel = config.n_channel
self.n_class = config.n_class
self.X_shape = config.X_shape
self.Y_shape = config.Y_shape
self.dt = config.dt
self.dtype = config.dtype
self.label_shape = config.label_shape
self.label_width = config.label_width
self.config = config
self.format = format
# if "highpass_filter" in kwargs:
# self.highpass_filter = kwargs["highpass_filter"]
self.highpass_filter = highpass_filter
# self.response_xml = response_xml
if response_xml is not None:
self.response = obspy.read_inventory(response_xml)
else:
self.response = None
self.sampling_rate = sampling_rate
if format in ["numpy", "mseed", "sac"]:
self.data_dir = kwargs["data_dir"]
try:
csv = pd.read_csv(kwargs["data_list"], header=0, sep="[,|\s+]", engine="python")
except:
csv = pd.read_csv(kwargs["data_list"], header=0, sep="\t")
self.data_list = csv["fname"]
self.num_data = len(self.data_list)
elif format == "hdf5":
self.h5 = h5py.File(kwargs["hdf5_file"], "r", libver="latest", swmr=True)
self.h5_data = self.h5[kwargs["hdf5_group"]]
self.data_list = list(self.h5_data.keys())
self.num_data = len(self.data_list)
elif format == "s3":
self.s3fs = s3fs.S3FileSystem(
anon=kwargs["anon"],
key=kwargs["key"],
secret=kwargs["secret"],
client_kwargs={"endpoint_url": kwargs["s3_url"]},
use_ssl=kwargs["use_ssl"],
)
self.num_data = 0
else:
raise (f"{format} not support!")
def __len__(self):
return self.num_data
def read_numpy(self, fname):
# try:
if fname not in self.buffer:
npz = np.load(fname)
meta = {}
if len(npz["data"].shape) == 2:
meta["data"] = npz["data"][:, np.newaxis, :]
else:
meta["data"] = npz["data"]
if "p_idx" in npz.files:
if len(npz["p_idx"].shape) == 0:
meta["itp"] = [[npz["p_idx"]]]
else:
meta["itp"] = npz["p_idx"]
if "s_idx" in npz.files:
if len(npz["s_idx"].shape) == 0:
meta["its"] = [[npz["s_idx"]]]
else:
meta["its"] = npz["s_idx"]
if "itp" in npz.files:
if len(npz["itp"].shape) == 0:
meta["itp"] = [[npz["itp"]]]
else:
meta["itp"] = npz["itp"]
if "its" in npz.files:
if len(npz["its"].shape) == 0:
meta["its"] = [[npz["its"]]]
else:
meta["its"] = npz["its"]
if "station_id" in npz.files:
meta["station_id"] = npz["station_id"]
if "sta_id" in npz.files:
meta["station_id"] = npz["sta_id"]
if "t0" in npz.files:
meta["t0"] = npz["t0"]
self.buffer[fname] = meta
else:
meta = self.buffer[fname]
return meta
# except:
# logging.error("Failed reading {}".format(fname))
# return None
def read_hdf5(self, fname):
data = self.h5_data[fname][()]
attrs = self.h5_data[fname].attrs
meta = {}
if len(data.shape) == 2:
meta["data"] = data[:, np.newaxis, :]
else:
meta["data"] = data
if "p_idx" in attrs:
if len(attrs["p_idx"].shape) == 0:
meta["itp"] = [[attrs["p_idx"]]]
else:
meta["itp"] = attrs["p_idx"]
if "s_idx" in attrs:
if len(attrs["s_idx"].shape) == 0:
meta["its"] = [[attrs["s_idx"]]]
else:
meta["its"] = attrs["s_idx"]
if "itp" in attrs:
if len(attrs["itp"].shape) == 0:
meta["itp"] = [[attrs["itp"]]]
else:
meta["itp"] = attrs["itp"]
if "its" in attrs:
if len(attrs["its"].shape) == 0:
meta["its"] = [[attrs["its"]]]
else:
meta["its"] = attrs["its"]
if "t0" in attrs:
meta["t0"] = attrs["t0"]
return meta
def read_s3(self, format, fname, bucket, key, secret, s3_url, use_ssl):
with self.s3fs.open(bucket + "/" + fname, "rb") as fp:
if format == "numpy":
meta = self.read_numpy(fp)
elif format == "mseed":
meta = self.read_mseed(fp)
else:
raise (f"Format {format} not supported")
return meta
def read_mseed(self, fname, response=None, highpass_filter=0.0, sampling_rate=100, return_single_station=True):
try:
stream = obspy.read(fname)
stream = stream.merge(fill_value="latest")
if response is not None:
# response = obspy.read_inventory(response_xml)
stream = stream.remove_sensitivity(response)
except Exception as e:
print(f"Error reading {fname}:\n{e}")
return {}
tmp_stream = obspy.Stream()
for trace in stream:
if len(trace.data) < 10:
continue
## interpolate to 100 Hz
if abs(trace.stats.sampling_rate - sampling_rate) > 0.1:
logging.warning(f"Resampling {trace.id} from {trace.stats.sampling_rate} to {sampling_rate} Hz")
try:
trace = trace.interpolate(sampling_rate, method="linear")
except Exception as e:
print(f"Error resampling {trace.id}:\n{e}")
trace = trace.detrend("demean")
## highpass filtering > 1Hz
if highpass_filter > 0.0:
trace = trace.filter("highpass", freq=highpass_filter)
tmp_stream.append(trace)
if len(tmp_stream) == 0:
return {}
stream = tmp_stream
begin_time = min([st.stats.starttime for st in stream])
end_time = max([st.stats.endtime for st in stream])
stream = stream.trim(begin_time, end_time, pad=True, fill_value=0)
comp = ["3", "2", "1", "E", "N", "U", "V", "Z"]
order = {key: i for i, key in enumerate(comp)}
comp2idx = {
"3": 0,
"2": 1,
"1": 2,
"E": 0,
"N": 1,
"Z": 2,
"U": 0,
"V": 1,
} ## only for cases less than 3 components
station_ids = defaultdict(list)
for tr in stream:
station_ids[tr.id[:-1]].append(tr.id[-1])
if tr.id[-1] not in comp:
print(f"Unknown component {tr.id[-1]}")
station_keys = sorted(list(station_ids.keys()))
nx = len(station_ids)
nt = len(stream[0].data)
data = np.zeros([3, nt, nx], dtype=np.float32)
for i, sta in enumerate(station_keys):
for j, c in enumerate(sorted(station_ids[sta], key=lambda x: order[x])):
if len(station_ids[sta]) != 3: ## less than 3 component
j = comp2idx[c]
if len(stream.select(id=sta + c)) == 0:
print(f"Empty trace: {sta+c} {begin_time}")
continue
trace = stream.select(id=sta + c)[0]
## accerleration to velocity
if sta[-1] == "N":
trace = trace.integrate().filter("highpass", freq=1.0)
tmp = trace.data.astype("float32")
data[j, : len(tmp), i] = tmp[:nt]
# if return_single_station and (len(station_keys) > 1):
# print(f"Warning: {fname} has multiple stations, returning only the first one {station_keys[0]}")
# data = data[:, :, 0:1]
# station_keys = station_keys[0:1]
meta = {
"data": data.transpose([1, 2, 0]),
"t0": begin_time.datetime.isoformat(timespec="milliseconds"),
"station_id": station_keys,
}
return meta
def read_sac(self, fname):
mseed = obspy.read(fname)
mseed = mseed.detrend("spline", order=2, dspline=5 * mseed[0].stats.sampling_rate)
mseed = mseed.merge(fill_value=0)
if self.highpass_filter > 0:
mseed = mseed.filter("highpass", freq=self.highpass_filter)
starttime = min([st.stats.starttime for st in mseed])
endtime = max([st.stats.endtime for st in mseed])
mseed = mseed.trim(starttime, endtime, pad=True, fill_value=0)
if abs(mseed[0].stats.sampling_rate - self.config.sampling_rate) > 1:
logging.warning(
f"Sampling rate mismatch in {fname.split('/')[-1]}: {mseed[0].stats.sampling_rate}Hz != {self.config.sampling_rate}Hz "
)
order = ["3", "2", "1", "E", "N", "Z"]
order = {key: i for i, key in enumerate(order)}
comp2idx = {"3": 0, "2": 1, "1": 2, "E": 0, "N": 1, "Z": 2}
t0 = starttime.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
nt = len(mseed[0].data)
data = np.zeros([nt, self.config.n_channel], dtype=self.dtype)
ids = [x.get_id() for x in mseed]
for j, id in enumerate(sorted(ids, key=lambda x: order[x[-1]])):
if len(ids) != 3:
if len(ids) > 3:
logging.warning(f"More than 3 channels {ids}!")
j = comp2idx[id[-1]]
data[:, j] = mseed.select(id=id)[0].data.astype(self.dtype)
data = data[:, np.newaxis, :]
meta = {"data": data, "t0": t0}
return meta
def read_mseed_array(self, fname, stations, amplitude=False, remove_resp=True):
data = []
station_id = []
t0 = []
raw_amp = []
try:
mseed = obspy.read(fname)
read_success = True
except Exception as e:
read_success = False
print(e)
if read_success:
try:
mseed = mseed.merge(fill_value=0)
except Exception as e:
print(e)
for i in range(len(mseed)):
if mseed[i].stats.sampling_rate != self.config.sampling_rate:
logging.warning(
f"Resampling {mseed[i].id} from {mseed[i].stats.sampling_rate} to {self.config.sampling_rate} Hz"
)
try:
mseed[i] = mseed[i].interpolate(self.config.sampling_rate, method="linear")
except Exception as e:
print(e)
mseed[i].data = mseed[i].data.astype(float) * 0.0 ## set to zero if resampling fails
if self.highpass_filter == 0:
try:
mseed = mseed.detrend("spline", order=2, dspline=5 * mseed[0].stats.sampling_rate)
except:
logging.error(f"Error: spline detrend failed at file {fname}")
mseed = mseed.detrend("demean")
else:
mseed = mseed.filter("highpass", freq=self.highpass_filter)
starttime = min([st.stats.starttime for st in mseed])
endtime = max([st.stats.endtime for st in mseed])
mseed = mseed.trim(starttime, endtime, pad=True, fill_value=0)
order = ["3", "2", "1", "E", "N", "Z"]
order = {key: i for i, key in enumerate(order)}
comp2idx = {"3": 0, "2": 1, "1": 2, "E": 0, "N": 1, "Z": 2}
nsta = len(stations)
nt = len(mseed[0].data)
# for i in range(nsta):
for sta in stations:
trace_data = np.zeros([nt, self.config.n_channel], dtype=self.dtype)
if amplitude:
trace_amp = np.zeros([nt, self.config.n_channel], dtype=self.dtype)
empty_station = True
# sta = stations.iloc[i]["station"]
# comp = stations.iloc[i]["component"].split(",")
comp = stations[sta]["component"]
if amplitude:
# resp = stations.iloc[i]["response"].split(",")
resp = stations[sta]["response"]
for j, c in enumerate(sorted(comp, key=lambda x: order[x[-1]])):
resp_j = resp[j]
if len(comp) != 3: ## less than 3 component
j = comp2idx[c]
if len(mseed.select(id=sta + c)) == 0:
print(f"Empty trace: {sta+c} {starttime}")
continue
else:
empty_station = False
tmp = mseed.select(id=sta + c)[0].data.astype(self.dtype)
trace_data[: len(tmp), j] = tmp[:nt]
if amplitude:
# if stations.iloc[i]["unit"] == "m/s**2":
if stations[sta]["unit"] == "m/s**2":
tmp = mseed.select(id=sta + c)[0]
tmp = tmp.integrate()
tmp = tmp.filter("highpass", freq=1.0)
tmp = tmp.data.astype(self.dtype)
trace_amp[: len(tmp), j] = tmp[:nt]
# elif stations.iloc[i]["unit"] == "m/s":
elif stations[sta]["unit"] == "m/s":
tmp = mseed.select(id=sta + c)[0].data.astype(self.dtype)
trace_amp[: len(tmp), j] = tmp[:nt]
else:
print(
f"Error in {stations.iloc[i]['station']}\n{stations.iloc[i]['unit']} should be m/s**2 or m/s!"
)
if amplitude and remove_resp:
# trace_amp[:, j] /= float(resp[j])
trace_amp[:, j] /= float(resp_j)
if not empty_station:
data.append(trace_data)
if amplitude:
raw_amp.append(trace_amp)
station_id.append([sta])
t0.append(starttime.datetime.isoformat(timespec="milliseconds"))
if len(data) > 0:
data = np.stack(data)
if len(data.shape) == 3:
data = data[:, :, np.newaxis, :]
if amplitude:
raw_amp = np.stack(raw_amp)
if len(raw_amp.shape) == 3:
raw_amp = raw_amp[:, :, np.newaxis, :]
else:
nt = 60 * 60 * self.config.sampling_rate # assume 1 hour data
data = np.zeros([1, nt, 1, self.config.n_channel], dtype=self.dtype)
if amplitude:
raw_amp = np.zeros([1, nt, 1, self.config.n_channel], dtype=self.dtype)
t0 = ["1970-01-01T00:00:00.000"]
station_id = ["None"]
if amplitude:
meta = {"data": data, "t0": t0, "station_id": station_id, "fname": fname.split("/")[-1], "raw_amp": raw_amp}
else:
meta = {"data": data, "t0": t0, "station_id": station_id, "fname": fname.split("/")[-1]}
return meta
def generate_label(self, data, phase_list, mask=None):
# target = np.zeros(self.Y_shape, dtype=self.dtype)
target = np.zeros_like(data)
if self.label_shape == "gaussian":
label_window = np.exp(
-((np.arange(-self.label_width // 2, self.label_width // 2 + 1)) ** 2)
/ (2 * (self.label_width / 5) ** 2)
)
elif self.label_shape == "triangle":
label_window = 1 - np.abs(
2 / self.label_width * (np.arange(-self.label_width // 2, self.label_width // 2 + 1))
)
else:
print(f"Label shape {self.label_shape} should be guassian or triangle")
raise
for i, phases in enumerate(phase_list):
for j, idx_list in enumerate(phases):
for idx in idx_list:
if np.isnan(idx):
continue
idx = int(idx)
if (idx - self.label_width // 2 >= 0) and (idx + self.label_width // 2 + 1 <= target.shape[0]):
target[idx - self.label_width // 2 : idx + self.label_width // 2 + 1, j, i + 1] = label_window
target[..., 0] = 1 - np.sum(target[..., 1:], axis=-1)
if mask is not None:
target[:, mask == 0, :] = 0
return target
def random_shift(self, sample, itp, its, itp_old=None, its_old=None, shift_range=None):
# anchor = np.round(1/2 * (min(itp[~np.isnan(itp.astype(float))]) + min(its[~np.isnan(its.astype(float))]))).astype(int)
flattern = lambda x: np.array([i for trace in x for i in trace], dtype=float)
shift_pick = lambda x, shift: [[i - shift for i in trace] for trace in x]
itp_flat = flattern(itp)
its_flat = flattern(its)
if (itp_old is None) and (its_old is None):
hi = np.round(np.median(itp_flat[~np.isnan(itp_flat)])).astype(int)
lo = -(sample.shape[0] - np.round(np.median(its_flat[~np.isnan(its_flat)])).astype(int))
if shift_range is None:
shift = np.random.randint(low=lo, high=hi + 1)
else:
shift = np.random.randint(low=max(lo, shift_range[0]), high=min(hi + 1, shift_range[1]))
else:
itp_old_flat = flattern(itp_old)
its_old_flat = flattern(its_old)
itp_ref = np.round(np.min(itp_flat[~np.isnan(itp_flat)])).astype(int)
its_ref = np.round(np.max(its_flat[~np.isnan(its_flat)])).astype(int)
itp_old_ref = np.round(np.min(itp_old_flat[~np.isnan(itp_old_flat)])).astype(int)
its_old_ref = np.round(np.max(its_old_flat[~np.isnan(its_old_flat)])).astype(int)
# min_event_gap = np.round(self.min_event_gap*(its_ref-itp_ref)).astype(int)
# min_event_gap_old = np.round(self.min_event_gap*(its_old_ref-itp_old_ref)).astype(int)
if shift_range is None:
hi = list(range(max(its_ref - itp_old_ref + self.min_event_gap, 0), itp_ref))
lo = list(range(-(sample.shape[0] - its_ref), -(max(its_old_ref - itp_ref + self.min_event_gap, 0))))
else:
lo_ = max(-(sample.shape[0] - its_ref), shift_range[0])
hi_ = min(itp_ref, shift_range[1])
hi = list(range(max(its_ref - itp_old_ref + self.min_event_gap, 0), hi_))
lo = list(range(lo_, -(max(its_old_ref - itp_ref + self.min_event_gap, 0))))
if len(hi + lo) > 0:
shift = np.random.choice(hi + lo)
else:
shift = 0
shifted_sample = np.zeros_like(sample)
if shift > 0:
shifted_sample[:-shift, ...] = sample[shift:, ...]
elif shift < 0:
shifted_sample[-shift:, ...] = sample[:shift, ...]
else:
shifted_sample[...] = sample[...]
return shifted_sample, shift_pick(itp, shift), shift_pick(its, shift), shift
def stack_events(self, sample_old, itp_old, its_old, shift_range=None, mask_old=None):
i = np.random.randint(self.num_data)
base_name = self.data_list[i]
if self.format == "numpy":
meta = self.read_numpy(os.path.join(self.data_dir, base_name))
elif self.format == "hdf5":
meta = self.read_hdf5(base_name)
if meta == -1:
return sample_old, itp_old, its_old
sample = np.copy(meta["data"])
itp = meta["itp"]
its = meta["its"]
if mask_old is not None:
mask = np.copy(meta["mask"])
sample = normalize(sample)
sample, itp, its, shift = self.random_shift(sample, itp, its, itp_old, its_old, shift_range)
if shift != 0:
sample_old += sample
# itp_old = [np.hstack([i, j]) for i,j in zip(itp_old, itp)]
# its_old = [np.hstack([i, j]) for i,j in zip(its_old, its)]
itp_old = [i + j for i, j in zip(itp_old, itp)]
its_old = [i + j for i, j in zip(its_old, its)]
if mask_old is not None:
mask_old = mask_old * mask
return sample_old, itp_old, its_old, mask_old
def cut_window(self, sample, target, itp, its, select_range):
shift_pick = lambda x, shift: [[i - shift for i in trace] for trace in x]
sample = sample[select_range[0] : select_range[1]]
target = target[select_range[0] : select_range[1]]
return (sample, target, shift_pick(itp, select_range[0]), shift_pick(its, select_range[0]))
class DataReader_train(DataReader):
def __init__(self, format="numpy", config=DataConfig(), **kwargs):
super().__init__(format=format, config=config, **kwargs)
self.min_event_gap = config.min_event_gap
self.buffer_channels = {}
self.shift_range = [-2000 + self.label_width * 2, 1000 - self.label_width * 2]
self.select_range = [5000, 8000]
def __getitem__(self, i):
base_name = self.data_list[i]
if self.format == "numpy":
meta = self.read_numpy(os.path.join(self.data_dir, base_name))
elif self.format == "hdf5":
meta = self.read_hdf5(base_name)
if meta == None:
return (np.zeros(self.X_shape, dtype=self.dtype), np.zeros(self.Y_shape, dtype=self.dtype), base_name)
sample = np.copy(meta["data"])
itp_list = meta["itp"]
its_list = meta["its"]
sample = normalize(sample)
if np.random.random() < 0.95:
sample, itp_list, its_list, _ = self.random_shift(sample, itp_list, its_list, shift_range=self.shift_range)
sample, itp_list, its_list, _ = self.stack_events(sample, itp_list, its_list, shift_range=self.shift_range)
target = self.generate_label(sample, [itp_list, its_list])
sample, target, itp_list, its_list = self.cut_window(sample, target, itp_list, its_list, self.select_range)
else:
## noise
assert self.X_shape[0] <= min(min(itp_list))
sample = sample[: self.X_shape[0], ...]
target = np.zeros(self.Y_shape).astype(self.dtype)
itp_list = [[]]
its_list = [[]]
sample = normalize(sample)
return (sample.astype(self.dtype), target.astype(self.dtype), base_name)
def dataset(self, batch_size, num_parallel_calls=2, shuffle=True, drop_remainder=True):
dataset = dataset_map(
self,
output_types=(self.dtype, self.dtype, "string"),
output_shapes=(self.X_shape, self.Y_shape, None),
num_parallel_calls=num_parallel_calls,
shuffle=shuffle,
)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder).prefetch(batch_size * 2)
return dataset
class DataReader_test(DataReader):
def __init__(self, format="numpy", config=DataConfig(), **kwargs):
super().__init__(format=format, config=config, **kwargs)
self.select_range = [5000, 8000]
def __getitem__(self, i):
base_name = self.data_list[i]
if self.format == "numpy":
meta = self.read_numpy(os.path.join(self.data_dir, base_name))
elif self.format == "hdf5":
meta = self.read_hdf5(base_name)
if meta == -1:
return (np.zeros(self.Y_shape, dtype=self.dtype), np.zeros(self.X_shape, dtype=self.dtype), base_name)
sample = np.copy(meta["data"])
itp_list = meta["itp"]
its_list = meta["its"]
# sample, itp_list, its_list, _ = self.random_shift(sample, itp_list, its_list, shift_range=self.shift_range)
target = self.generate_label(sample, [itp_list, its_list])
sample, target, itp_list, its_list = self.cut_window(sample, target, itp_list, its_list, self.select_range)
sample = normalize(sample)
return (sample, target, base_name, itp_list, its_list)
def dataset(self, batch_size, num_parallel_calls=2, shuffle=False, drop_remainder=False):
dataset = dataset_map(
self,
output_types=(self.dtype, self.dtype, "string", "int64", "int64"),
output_shapes=(self.X_shape, self.Y_shape, None, None, None),
num_parallel_calls=num_parallel_calls,
shuffle=shuffle,
)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder).prefetch(batch_size * 2)
return dataset
class DataReader_pred(DataReader):
def __init__(self, format="numpy", amplitude=True, config=DataConfig(), **kwargs):
super().__init__(format=format, config=config, **kwargs)
self.amplitude = amplitude
def adjust_missingchannels(self, data):
tmp = np.max(np.abs(data), axis=0, keepdims=True)
assert tmp.shape[-1] == data.shape[-1]
if np.count_nonzero(tmp) > 0:
data *= data.shape[-1] / np.count_nonzero(tmp)
return data
def __getitem__(self, i):
base_name = self.data_list[i]
if self.format == "numpy":
meta = self.read_numpy(os.path.join(self.data_dir, base_name))
elif (self.format == "mseed") or (self.format == "sac"):
meta = self.read_mseed(
os.path.join(self.data_dir, base_name),
response=self.response,
sampling_rate=self.sampling_rate,
highpass_filter=self.highpass_filter,
return_single_station=True,
)
elif self.format == "hdf5":
meta = self.read_hdf5(base_name)
else:
raise (f"{self.format} does not support!")
if "data" in meta:
raw_amp = meta["data"].copy()
sample = normalize_long(meta["data"])
else:
raw_amp = np.zeros([3000, 1, 3], dtype=np.float32)
sample = np.zeros([3000, 1, 3], dtype=np.float32)
if "t0" in meta:
t0 = meta["t0"]
else:
t0 = "1970-01-01T00:00:00.000"
if "station_id" in meta:
station_id = meta["station_id"]
else:
# station_id = base_name.split("/")[-1].rstrip("*")
station_id = os.path.basename(base_name).rstrip("*")
if np.isnan(sample).any() or np.isinf(sample).any():
logging.warning(f"Data error: Nan or Inf found in {base_name}")
sample[np.isnan(sample)] = 0
sample[np.isinf(sample)] = 0
# sample = self.adjust_missingchannels(sample)
if self.amplitude:
return (sample, raw_amp, base_name, t0, station_id)
else:
return (sample, base_name, t0, station_id)
def dataset(self, batch_size, num_parallel_calls=2, shuffle=False, drop_remainder=False):
if self.amplitude:
dataset = dataset_map(
self,
output_types=(self.dtype, self.dtype, "string", "string", "string"),
output_shapes=([None, None, 3], [None, None, 3], None, None, None),
num_parallel_calls=num_parallel_calls,
shuffle=shuffle,
)
else:
dataset = dataset_map(
self,
output_types=(self.dtype, "string", "string", "string"),
output_shapes=([None, None, 3], None, None, None),
num_parallel_calls=num_parallel_calls,
shuffle=shuffle,
)
dataset = dataset.batch(batch_size, drop_remainder=drop_remainder).prefetch(batch_size * 2)
return dataset
class DataReader_mseed_array(DataReader):
def __init__(self, stations, amplitude=True, remove_resp=True, config=DataConfig(), **kwargs):
super().__init__(format="mseed", config=config, **kwargs)
# self.stations = pd.read_json(stations)
with open(stations, "r") as f:
self.stations = json.load(f)
print(pd.DataFrame.from_dict(self.stations, orient="index").to_string())
self.amplitude = amplitude
self.remove_resp = remove_resp
self.X_shape = self.get_data_shape()
def get_data_shape(self):
fname = os.path.join(self.data_dir, self.data_list[0])
meta = self.read_mseed_array(fname, self.stations, self.amplitude, self.remove_resp)
return meta["data"].shape
def __getitem__(self, i):
fp = os.path.join(self.data_dir, self.data_list[i])
# try:
meta = self.read_mseed_array(fp, self.stations, self.amplitude, self.remove_resp)
# except Exception as e:
# logging.error(f"Failed reading {fp}: {e}")
# if self.amplitude:
# return (np.zeros(self.X_shape).astype(self.dtype), np.zeros(self.X_shape).astype(self.dtype),
# [self.stations.iloc[i]["station"] for i in range(len(self.stations))], ["0" for i in range(len(self.stations))])
# else:
# return (np.zeros(self.X_shape).astype(self.dtype), ["" for i in range(len(self.stations))],
# [self.stations.iloc[i]["station"] for i in range(len(self.stations))])
sample = np.zeros([len(meta["data"]), *self.X_shape[1:]], dtype=self.dtype)
sample[:, : meta["data"].shape[1], :, :] = normalize_batch(meta["data"])[:, : self.X_shape[1], :, :]
if np.isnan(sample).any() or np.isinf(sample).any():
logging.warning(f"Data error: Nan or Inf found in {fp}")
sample[np.isnan(sample)] = 0
sample[np.isinf(sample)] = 0
t0 = meta["t0"]
base_name = meta["fname"]
station_id = meta["station_id"]
# base_name = [self.stations.iloc[i]["station"]+"."+t0[i] for i in range(len(self.stations))]
# base_name = [self.stations.iloc[i]["station"] for i in range(len(self.stations))]
if self.amplitude:
raw_amp = np.zeros([len(meta["raw_amp"]), *self.X_shape[1:]], dtype=self.dtype)
raw_amp[:, : meta["raw_amp"].shape[1], :, :] = meta["raw_amp"][:, : self.X_shape[1], :, :]
if np.isnan(raw_amp).any() or np.isinf(raw_amp).any():
logging.warning(f"Data error: Nan or Inf found in {fp}")
raw_amp[np.isnan(raw_amp)] = 0
raw_amp[np.isinf(raw_amp)] = 0
return (sample, raw_amp, base_name, t0, station_id)
else:
return (sample, base_name, t0, station_id)
def dataset(self, num_parallel_calls=1, shuffle=False):
if self.amplitude:
dataset = dataset_map(
self,
output_types=(self.dtype, self.dtype, "string", "string", "string"),
output_shapes=([None, *self.X_shape[1:]], [None, *self.X_shape[1:]], None, None, None),
num_parallel_calls=num_parallel_calls,
)
else:
dataset = dataset_map(
self,
output_types=(self.dtype, "string", "string", "string"),
output_shapes=([None, *self.X_shape[1:]], None, None, None),
num_parallel_calls=num_parallel_calls,
)
dataset = dataset.prefetch(1)
# dataset = dataset.prefetch(len(self.stations)*2)
return dataset
###### test ########
def test_DataReader():
import os
import timeit
import matplotlib.pyplot as plt
if not os.path.exists("test_figures"):
os.mkdir("test_figures")
def plot_sample(sample, fname, label=None):
plt.clf()
plt.subplot(211)
plt.plot(sample[:, 0, -1])
if label is not None:
plt.subplot(212)
plt.plot(label[:, 0, 0])
plt.plot(label[:, 0, 1])
plt.plot(label[:, 0, 2])
plt.savefig(f"test_figures/{fname.decode()}.png")
def read(data_reader, batch=1):
start_time = timeit.default_timer()
if batch is None:
dataset = data_reader.dataset(shuffle=False)
else:
dataset = data_reader.dataset(1, shuffle=False)
sess = tf.compat.v1.Session()
print(len(data_reader))
print("-------", tf.data.Dataset.cardinality(dataset))
num = 0
x = tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()
while True:
num += 1
# print(num)
try:
out = sess.run(x)
if len(out) == 2:
sample, fname = out[0], out[1]
for i in range(len(sample)):
plot_sample(sample[i], fname[i])
else:
sample, label, fname = out[0], out[1], out[2]
for i in range(len(sample)):
plot_sample(sample[i], fname[i], label[i])
except tf.errors.OutOfRangeError:
break
print("End of dataset")
print("Tensorflow Dataset:\nexecution time = ", timeit.default_timer() - start_time)
data_reader = DataReader_train(data_list="test_data/selected_phases.csv", data_dir="test_data/data/")
read(data_reader)
data_reader = DataReader_train(format="hdf5", hdf5="test_data/data.h5", group="data")
read(data_reader)
data_reader = DataReader_test(data_list="test_data/selected_phases.csv", data_dir="test_data/data/")
read(data_reader)
data_reader = DataReader_test(format="hdf5", hdf5="test_data/data.h5", group="data")
read(data_reader)
data_reader = DataReader_pred(format="numpy", data_list="test_data/selected_phases.csv", data_dir="test_data/data/")
read(data_reader)
data_reader = DataReader_pred(
format="mseed", data_list="test_data/mseed_station.csv", data_dir="test_data/waveforms/"
)
read(data_reader)
data_reader = DataReader_pred(
format="mseed", amplitude=True, data_list="test_data/mseed_station.csv", data_dir="test_data/waveforms/"
)
read(data_reader)
data_reader = DataReader_mseed_array(
data_list="test_data/mseed.csv",
data_dir="test_data/waveforms/",
stations="test_data/stations.csv",
remove_resp=False,
)
read(data_reader, batch=None)
if __name__ == "__main__":
test_DataReader()
| AI4EPS/PhaseNet | phasenet/data_reader.py | data_reader.py | py | 40,501 | python | en | code | 190 | github-code | 13 |
5845511 | import pytest
from django.shortcuts import get_object_or_404
from django.urls import reverse
from pytest_django.asserts import assertTemplateUsed
from apps.notes.models import Note
# This flags all tests in the file as needing database access
# Once setup, the database is cached to be used for all subsequent tests
# and rolls back transactions, to isolate tests from each other.
# This is the same way the standard Django TestCase uses the database.
# However pytest-django also caters for transaction test cases and allows you
# to keep the test databases configured across different test runs.
pytestmark = pytest.mark.django_db
def test_note_string(note):
note = Note.objects.get(subject="Things I Like")
assert str(note) == f"{note.subject}"
def test_note_content(note, user, folder1):
note = Note.objects.filter(subject="Things I Like").get()
expectedValues = {
"user": user,
"folder": folder1,
"selected": 1,
"subject": "Things I Like",
"note": "Ice cream and cookies are nice",
}
for key, val in expectedValues.items():
assert getattr(note, key) == val
def test_index(client):
response = client.get("/notes/")
assert response.status_code == 200
response = client.get(reverse("notes"))
assert response.status_code == 200
response = client.get(reverse("notes"))
assertTemplateUsed(response, "notes/content.html")
def test_select(client, user, folder1, note):
response = client.get(f"/notes/{note.id}")
assert response.status_code == 302
selected_note = get_object_or_404(Note, pk=note.id)
assert selected_note.selected == 1
def test_add_form(client):
response = client.get("/notes/add")
assert response.status_code == 200
assertTemplateUsed(response, "notes/form.html")
def test_add_data(client, folder1):
data = {
"folder": folder1.id,
"subject": "Plato",
"note": "A Greek philosopher",
}
response = client.post("/notes/add", data)
assert response.status_code == 302
found = Note.objects.filter(subject="Plato").exists()
assert found
def test_edit_form(client, note):
response = client.get(f"/notes/{note.id}/edit")
assert response.status_code == 200
assertTemplateUsed(response, "notes/form.html")
def test_edit_data(client, folder1, note):
data = {
"folder": folder1.id,
"subject": "Descartes",
"note": "A French philosopher",
}
response = client.post(f"/notes/{note.id}/edit", data)
assert response.status_code == 302
found = Note.objects.filter(subject="Descartes").exists()
assert found
def test_delete(client, note):
response = client.get(f"/notes/{note.id}/delete")
assert response.status_code == 302
found = Note.objects.filter(subject="Things I like").exists()
assert not found
| jamescrg/minhome | apps/notes/tests/test_notes.py | test_notes.py | py | 2,864 | python | en | code | 0 | github-code | 13 |
16131472923 | from gazpacho import get
url = "https://en.wikipedia.org/wiki/Gazpacho"
html = get(url)
print(html[:50], "\n\n")
# get, with optional params
url = "https://httpbin.org/anything"
html2 = get(
url, params={"foo": "bar", "bar": "baz"}, headers={"User-Agent": "gazpacho"}
)
print(html2, "\n\n")
| udhayprakash/PythonMaterial | python3/16_Web_Services/d_web_scraping/c_gazpacho/b_get_data.py | b_get_data.py | py | 297 | python | en | code | 7 | github-code | 13 |
2097272083 | from logging import root
import functools
from typing import List
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def preorder(self, root: 'Node') -> List[int]:
return (root and [root.val] + functools.reduce(lambda res, node: res + self.preorder(node), root and root.children or [], [])) or []
| atlanmatrix/Algorithm-Py3 | leetcode/_589_n_ary_tree_preorder_traversal.py | _589_n_ary_tree_preorder_traversal.py | py | 419 | python | en | code | 0 | github-code | 13 |
855234526 | import unittest
from httmock import all_requests, HTTMock
from apihelper import Api
url = 'http://cool-site.com'
@all_requests
def response_content(url, request):
if request.method == 'GET':
return get_response
elif request.method == 'POST':
return post_response
elif request.method == 'HEAD':
return head_response
elif request.method == 'PUT':
return put_response
elif request.method == 'DELETE':
return delete_response
class TestApi(unittest.TestCase):
def setUp(self):
self.url = url
self.test_api = Api(self.url)
def test_that_base_url_is_set(self):
self.assertEqual(self.test_api.base_url, self.url)
def test_that_headers_are_set(self):
headers = {'Authorization': 'Super secret OAuth'}
api_with_headers = Api(self.url, headers=headers)
self.assertDictEqual(api_with_headers.headers, {
'ACCEPT': 'application/json',
'User-Agent': 'ApiHelper v{}'.format(api_with_headers._version),
'Authorization': 'Super secret OAuth'
})
def test_that_user_agent_is_set(self):
user_agent = 'Super cool user agent'
api_with_agent = Api(self.url, user_agent=user_agent)
self.assertEqual(api_with_agent.headers['User-Agent'], user_agent)
def test_route_with_leading_slash(self):
with HTTMock(response_content):
res = self.test_api.get('/url')
self.assertEqual(res.url, '{}/url'.format(self.url))
def test_route_without_leading_slash(self):
with HTTMock(response_content):
res = self.test_api.get('url')
self.assertEqual(res.url, '{}/url'.format(self.url))
def test_that_get_request_succeeds(self):
with HTTMock(response_content):
res = self.test_api.get('/', {'user': 'nate'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.request.method, 'GET')
self.assertDictEqual(res.json(), {
'success': True,
'message': get_message
})
def test_that_post_request_succeeds(self):
with HTTMock(response_content):
res = self.test_api.post('/', {'password': 'new_pass'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.request.method, 'POST')
self.assertDictEqual(res.json(), {
'success': True,
'message': post_message
})
def test_that_head_request_succeeds(self):
with HTTMock(response_content):
res = self.test_api.head('/', {'person': 'john'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.request.method, 'HEAD')
self.assertDictEqual(res.json(), {
'success': True,
'message': head_message
})
def test_that_put_request_succeeds(self):
with HTTMock(response_content):
res = self.test_api.put('/', {'new_user': 'naiyt'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.request.method, 'PUT')
self.assertDictEqual(res.json(), {
'success': True,
'message': put_message
})
def test_that_delete_request_succeeds(self):
with HTTMock(response_content):
res = self.test_api.delete('/', {'user': 'nate'})
self.assertEqual(res.status_code, 200)
self.assertEqual(res.request.method, 'DELETE')
self.assertDictEqual(res.json(), {
'success': True,
'message': delete_message
})
get_message = 'You gat!'
get_response = {
'status_code': 200,
'content': {
'success': True,
'message': get_message
}
}
post_message = 'You post!'
post_response = {
'status_code': 200,
'content': {
'success': True,
'message': post_message
}
}
head_message = 'You head!'
head_response = {
'status_code': 200,
'content': {
'success': True,
'message': head_message
}
}
put_message = 'You put!'
put_response = {
'status_code': 200,
'content': {
'success': True,
'message': put_message
}
}
delete_message = 'You delete!'
delete_response = {
'status_code': 200,
'content': {
'success': True,
'message': delete_message
}
}
if __name__ == '__main__':
unittest.main()
| naiyt/api-wrapper-helper | test.py | test.py | py | 4,446 | python | en | code | 0 | github-code | 13 |
69850294419 |
"""
Proszę zaimplementować algorytm Prima
"""
from queue import PriorityQueue
import sys
def Prim(g):
n=len(g)
parent=[None]*n
dist=[sys.maxsize]*n
dist[0]=0
q=PriorityQueue()
q.put((0,0))
taken=[False]*n
taken[0]=True
while not q.empty():
d,u=q.get()
taken[u]=True
if d==dist[u]:
for i in range(n):
if g[u][i]!=0 and dist[i]>g[u][i] and taken[i]==False:
parent[i]=u
dist[i]=g[u][i]
q.put((g[u][i],i))
print(parent)
print("g:")
g=[[0,5,0,2,0],
[5,0,7,0,1],
[0,0,0,3,0],
[2,7,3,0,2],
[0,1,0,2,0]]
Prim(g)
print("G:")
G=[[0,5,0,1,0,2],
[5,0,0,0,7,0],
[0,0,0,4,0,8],
[1,0,4,0,0,3],
[0,7,0,0,0,0],
[2,0,8,3,0,0]]
Prim(G)
| rogzan/ASD | graphs/019 - MST Prim.py | 019 - MST Prim.py | py | 805 | python | en | code | 0 | github-code | 13 |
28441641829 | from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
class FNN(nn.Module):
def __init__(self, d, n):
super(FNN, self).__init__()
self.d = d
self.n = n
self.fc1 = torch.nn.Linear(self.d, 64)
self.fc2 = torch.nn.Linear(64, 128)
self.fc3 = torch.nn.Linear(128, 1024)
self.fc4 = torch.nn.Linear(1024, 512)
self.fc5 = torch.nn.Linear(512, 256)
self.fc6 = torch.nn.Linear(256, 128)
self.fc7 = torch.nn.Linear(128, self.d)
#self.bn1 = nn.BatchNorm1d(self.n)
#self.bn2 = nn.BatchNorm1d(self.n)
#self.bn3 = nn.BatchNorm1d(self.n)
#self.bn4 = nn.BatchNorm1d(self.n)
#self.bn5 = nn.BatchNorm1d(self.n)
#self.bn6 = nn.BatchNorm1d(self.n)
def forward(self, x):
"""
x = F.relu(self.bn1(self.fc1(x)))
x = F.relu(self.bn2(self.fc2(x)))
x = F.relu(self.bn3(self.fc3(x)))
x = F.relu(self.bn4(self.fc4(x)))
x = F.relu(self.bn5(self.fc5(x)))
x = F.relu(self.bn6(self.fc6(x)))
"""
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x | hsack6/BMNN_exp | FNN/model.py | model.py | py | 1,459 | python | en | code | 0 | github-code | 13 |
636965235 | from abc import ABC, abstractmethod
from exceptions import NoPatientInLineException, LogNotFoundException
class BaseController(ABC):
@abstractmethod
def __init__(self, view, controller=None):
self.__view = view
self.__controller = controller
def open_view(self, options: dict):
self.__view.init_components()
chosen_option, _ = self.__view.open()
if chosen_option is not None:
self.__view.close()
try:
options[chosen_option]()
except NoPatientInLineException:
self.__view.display_msg('Nenhum paciente na fila!', success=False)
except LogNotFoundException:
self.__view.display_msg(f'Nenhum log encontrado com esse id!', success=False)
def return_to_main_menu(self):
"""
Goes back to the system controller view
:return: None
"""
self.__view.close()
if self.__controller is not None:
return self.__controller.open_main_view()
| p-schlickmann/hospital | controller/base_controller.py | base_controller.py | py | 1,040 | python | en | code | 0 | github-code | 13 |
29523441465 | import re
import urllib.parse
import logging
import ijson
import json
import sqlite3
from helpers import *
BASE_PATH = 'parsed'
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(message)s',
handlers=[logging.FileHandler("logs/parser.log"),
logging.StreamHandler()])
def create_sql_db():
conn = sqlite3.connect(f'{BASE_PATH}/ration_cards.sqlite')
cursor = conn.cursor()
cursor.execute('Create Table if not exists ration_card_details (ration_card_number TEXT NOT NULL PRIMARY KEY, card_type Text, img_url Text, know Text, mobile_number Text, fair_price_shopkeeper_name Text, fps_id Text, village_id Text, plc_id Text, unique_rc_id Text, url Text, family_members_table_id Text)')
cursor.execute('Create Table if not exists district (id INTEGER NOT NULL PRIMARY KEY, name Text)')
cursor.execute('Create Table if not exists town (id INTEGER NOT NULL PRIMARY KEY, name Text)')
cursor.execute('Create Table if not exists village (id TEXT NOT NULL PRIMARY KEY, name Text)')
cursor.execute('Create Table if not exists fps (id INTEGER NOT NULL PRIMARY KEY, name text)')
cursor.execute('Create Table if not exists tahsil (id INTEGER NOT NULL PRIMARY KEY, name Text)')
cursor.execute('Create Table if not exists panchayat (id INTEGER NOT NULL PRIMARY KEY, name Text)')
cursor.execute('Create Table if not exists family_members_tables (id Text NOT NULL PRIMARY KEY, members_qty INTEGER, sub_table Text)')
return conn, cursor
def feed_tree(cursor, cat):
tw = cat.get('town_wise')
if tw:
query = """INSERT OR IGNORE INTO town (id, name) VALUES (?, ?)"""
insert = (
1 if tw == 'rural' else 2,
tw
)
cursor.execute(query, insert)
if cat.get('District_Code_PMO'):
query = """INSERT OR IGNORE INTO district (id, name) VALUES (?, ?)"""
insert = (
cat.get('District_Code_PMO', ''),
cat.get('District_Name_PMO', '')
)
cursor.execute(query, insert)
if cat.get('FPS_CODE_PMO'):
query = """INSERT OR IGNORE INTO fps (id, name) VALUES (?, ?)"""
insert = (
cat.get('FPS_CODE_PMO', ''),
cat.get('FPS_Name_PMO', '')
)
cursor.execute(query, insert)
if cat.get('Village_Code_PMO'):
query = """INSERT OR IGNORE INTO village (id, name) VALUES (?, ?)"""
insert = (
cat.get('Village_Code_PMO'),
cat.get('Village_Name_PMO', '')
)
cursor.execute(query, insert)
elif cat.get('Village_Name_PMO'):
query = """INSERT OR IGNORE INTO village (id, name) VALUES (?, ?)"""
insert = (
cat.get('Village_Name_PMO'),
cat.get('Village_Name_PMO', '')
)
cursor.execute(query, insert)
if cat.get('Tahsil_Code_PMO'):
query = """INSERT OR IGNORE INTO tahsil (id, name) VALUES (?, ?)"""
insert = (
cat.get('Tahsil_Code_PMO', ''),
cat.get('Tahsil_Name_PMO', '')
)
cursor.execute(query, insert)
if cat.get('Panchayat_Code_PMO'):
query = """INSERT OR IGNORE INTO panchayat (id, name) VALUES (?, ?)"""
insert = (
cat.get('Panchayat_Code_PMO', ''),
cat.get('Panchayat_Name_PMO', '')
)
cursor.execute(query, insert)
def family_members_table(rcn, fmt, cursor):
members_qty = len(fmt)
sub_table = json.dumps(list(filter(None, fmt)))
query = """INSERT OR IGNORE INTO family_members_tables (id, members_qty, sub_table) VALUES (?, ?, ?)"""
insert = (
rcn,
members_qty,
sub_table
)
cursor.execute(query, insert)
return cursor
def json_to_sqlite(conn, cursor, paths):
for path in paths:
logging.info(f'Processing {path}...')
with open(path, 'rb') as f:
for item in ijson.items(f, "item"):
# Ration card details
rcd = item.get('ration_card_details')
if rcd:
rcn = rcd.get('राशनकार्ड संख्या', '')
categories = item.get('categories', {})
query = """INSERT OR IGNORE INTO ration_card_details
(ration_card_number, card_type, img_url, know, mobile_number, fair_price_shopkeeper_name, fps_id, village_id, plc_id, unique_rc_id, url, family_members_table_id)
VALUES
(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
# Store family members table
fmt = json.loads(item.get('sub_table', []))
if fmt:
cursor = family_members_table(rcn, fmt, cursor)
insert = (
rcn,
rcd.get('कार्ड का प्रकार', ''),
rcd.get('img', ''),
rcd.get('पता', ''),
rcd.get('मोबाईल संख्या', ''),
rcd.get('उचित मूल्य दुकानदार का नाम', ''),
categories.get('FPS_CODE_PMO', ''),
categories.get('Village_Code_PMO', ''),
categories.get('PLC_code', ''),
categories.get('Unique_RC_ID', ''),
categories.get('url', ''),
rcn
)
cursor.execute(query, insert)
feed_tree(cursor, categories)
conn.commit()
return conn
def clean_field(f):
try:
f = f.replace('+', ' ')
f = f.replace('%20', ' ')
f = re.sub('\s+', ' ', f)
f = f.strip()
except Exception as e:
print(e)
return f
def clean_decode(obj):
return {k:urllib.parse.unquote(clean_field(v)) for k,v in obj.items()}
def main():
path = f'{BASE_PATH}/extracted/'
paths = get_json_files_paths(path)
logging.info(f'Dumping results into {path}...')
conn, cursor = create_sql_db()
conn = json_to_sqlite(conn, cursor, paths)
conn.close()
logging.info(f'Done...')
if __name__ == '__main__':
main() | in-rolls/ration_bihar | scripts/parser/main.py | main.py | py | 6,412 | python | en | code | 1 | github-code | 13 |
12861196800 | import os
import random
from kivy.clock import Clock
from kivy.uix.anchorlayout import AnchorLayout
from kivy.graphics.context_instructions import Color
from kivy.graphics.vertex_instructions import Rectangle
from kivy.uix.button import Button
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
from cell import ManCell, TentCell, EmptyCell, TreeCell, PrintCell, LampCell, SwitchCell
from room import SwitchRoom, KnightsRoom, CampRoom, LampRoom
from kivy.uix.slider import Slider
class Candle(Button):
def __init__(self,**kwargs):
super().__init__(**kwargs)
self.size_hint = None, None
self.size = 110, 190
self.border = 0,0,0,0
self.n = 0
self.loi = []
for file in os.listdir("./animation"):
if file.endswith(".png"):
self.loi += [os.path.join("./animation", file)]
Clock.schedule_interval(self.animate, 1.0 / 10.0)
def animate(self, dt):
self.n += 1
self.n %= len(self.loi)
self.background_normal = self.loi[self.n]
self.background_down = self.loi[self.n]
class Volume(Slider):
def __init__(self, music, **kwargs):
super().__init__(**kwargs)
self.music = music
self.min = -10
self.max = 10
def on_touch_move(self, touch):
super().on_touch_move(touch)
self.music.volume = self.value_normalized
class Pause(Button):
def __init__(self, music, **kwargs):
super().__init__(**kwargs)
self.pnu = 0
self.music = music
self.pose = 0
def on_press(self):
self.pnu += 1
if self.pnu % 2 == 1:
self.music.stop()
self.pose = self.music.get_pos()
else:
self.music.seek(self.pose+0.1)
class Win(AnchorLayout):
def __init__(self, game, to_remoove, **kwargs):
super().__init__(**kwargs)
self.game = game
for i in range(len(to_remoove)):
self.game.remove_widget(to_remoove[i])
self.add_widget(Button(text='Congratulation! More?', on_press=self.on_press))
def on_press(self, x):
self.game.remove_widget(self)
self.game._init_game()
class Cell(Button):
def __init__(self, game, ij, room, image_active, image_inactive, **kwargs):
super().__init__(**kwargs)
self.border = (0, 0, 0, 0)
self.image_active = image_active
self.image_inactive = image_inactive
self.background_normal = image_inactive
self.size_hint = 1, None
self.ij = ij
self.room = room
self.game = game
def animate(self, k):
if self.room.r[self.ij[0]][self.ij[1]].is_active():
self.background_normal = self.image_active
self.background_down = self.image_active
else:
self.background_normal = self.image_inactive
self.background_down = self.image_inactive
def on_size(self, w, h): # hook size set and make height equal to width
if w == h:
return
self.height = self.width
def on_press(self):
self.room.r[self.ij[0]][self.ij[1]].activate()
if self.room.r[self.ij[0]][self.ij[1]].is_active():
self.background_normal = self.image_active
self.background_down = self.image_active
else:
self.background_normal = self.image_inactive
self.background_down = self.image_inactive
self.room.checks()
if self.room.checks():
self.game.on_win()
class KnightLiarCellV(Cell):
def __init__(self, game, ij, room, **kwargs):
super().__init__(game, ij, room, 'images/knight.png', 'images/liar.png', **kwargs)
class TentCellV(Cell):
def __init__(self, game, ij, room, **kwargs):
super().__init__(game, ij, room, 'images/tent.png', 'images/grass.png', **kwargs)
class LampCellV(Cell):
def __init__(self, game, ij, room, **kwargs):
super().__init__(game, ij, room, 'images/lamp1.png', 'images/lamp.png', **kwargs)
class SwitchCellV(Cell):
def __init__(self, game, ij, room, **kwargs):
super().__init__(game, ij, room, 'images/tile1.png', 'images/tile2.png', **kwargs)
self.event = Clock.schedule_interval(self.animate, 1.0 / 10.0)
game.add_to_close(lambda : self.close())
def close(self):
self.event.cancel()
print('clock canceled')
def on_press(self):
self.room.r[self.ij[0]][self.ij[1]].invert_neighbours(self.ij[0], self.ij[1])
self.room.checks()
if self.room.checks():
self.game.on_win()
class PrintCellV(Cell):
def __init__(self, game, ij, room, **kwargs):
super().__init__(game, ij, room, 'images/black.png', 'images/black.png', **kwargs)
self.color = (0.9, 0.9, 0.9, 1)
self.text = f'{self.room.r[self.ij[0]][self.ij[1]].say()}'
class EmptyCellV(Cell):
def __init__(self, game, ij, room, **kwargs):
super().__init__(game, ij, room, 'images/tile.png', 'images/tile.png', **kwargs)
class TreeCellV(Cell):
def __init__(self, game, ij, room, **kwargs):
super().__init__(game, ij, room, 'images/tree1.png', 'images/tree1.png', **kwargs)
class Board(GridLayout):
def __init__(self, game, **kwargs):
self.room = random.sample([LampRoom, SwitchRoom, KnightsRoom, CampRoom], 1)[0]()
self.r = self.room.r
super().__init__(**kwargs)
self.cols = 7
self.rows = 7
for i in range(self.cols):
for j in range(self.rows):
if isinstance(self.r[i][j], TreeCell):
self.add_widget(TreeCellV(game, (i, j), self.room, **kwargs))
elif isinstance(self.r[i][j], PrintCell):
self.add_widget(PrintCellV(game, (i, j), self.room, **kwargs))
elif isinstance(self.r[i][j], TentCell):
self.add_widget(TentCellV(game, (i, j), self.room, **kwargs))
elif isinstance(self.r[i][j], LampCell):
self.add_widget(LampCellV(game, (i, j), self.room, **kwargs))
elif isinstance(self.r[i][j], SwitchCell):
self.add_widget(SwitchCellV(game, (i, j), self.room, **kwargs))
elif isinstance(self.r[i][j], ManCell):
self.add_widget(KnightLiarCellV(game, (i, j), self.room, **kwargs))
elif isinstance(self.r[i][j], EmptyCell):
self.add_widget(EmptyCellV(game, (i, j), self.room, **kwargs))
with self.canvas.before:
Color(0 / 255, 0 / 255, 75 / 255, 1)
Rectangle(pos=(0, 0), size=(1000, 2000))
| brainnotincluded/kivy_puzzle | widgets.py | widgets.py | py | 6,697 | python | en | code | 0 | github-code | 13 |
71412266579 | #! /usr/bin/python
import feedparser
name = 'FML'
desc = 'Display FML entries'
types = ['PUBMSG']
fml_url = 'http://feedpress.me/fmylife'
def FetchFreshEntries():
feed = feedparser.parse(fml_url)
entries = feed.entries
entries.reverse()
return entries
def init(server, storage):
storage[name] = {'entries': FetchFreshEntries(), 'shown_entries': set()}
def PopNewEntry(storage):
while storage[name]['entries']:
entry = storage[name]['entries'].pop()
if entry.id in storage[name]['shown_entries']:
continue
storage[name]['shown_entries'].add(entry.id)
content = entry.content[0].value[3:-4]
return content
return None
def GetFML(storage):
entry = PopNewEntry(storage)
if entry is None:
# print('Ran out of FML entries; fetching new entries.')
storage[name]['entries'] = FetchFreshEntries()
entry = PopNewEntry(storage)
if entry is None:
entry = 'We are clean out of fresh FMLs. Try again later.'
entry = entry.encode('ascii', 'replace')
return entry.decode('ascii')
def hookCode(server, data, storage):
if data['Message'] != '.fml':
return
server.msg(data['Channel'], GetFML(storage))
# Debug code
if __name__ == '__main__':
class dummy:
def msg ( s, d, m ):
print ( "{}: {}".format( d, m ) )
s = dummy ()
d = {}
d['Channel'] = '#some_channel'
d['Message'] = '.fml'
d['User'] = {}
d['User']['Nick'] = 'me'
storage = {}
init(s, storage)
for i in range(5):
hookCode(s, d, storage)
| IsaacG/python-projects | ircbot/FML.py | FML.py | py | 1,494 | python | en | code | 3 | github-code | 13 |
29028018869 | import os
import pandas
###########################################################################
# Take HIVE1314 data, aggregate same birth year, get average pre and post Texas titer
firstY = 1937
lastY = 1999
### read hive data
hivefName = os.path.normpath("../data/HIVE1314.CSV")
hiveoutfName = os.path.normpath("../data/HIVE1314_import.csv")
hive = pandas.read_csv(hivefName)
hive_out = open(hiveoutfName, "w")
hive_out.write("id,pre,post\n")
### put titers by year of birth
byYOB_pre = [[] for i in range(firstY, lastY+1)]
byYOB_post = [[] for i in range(firstY, lastY+1)]
for i in range(len(hive['id'].values)):
id = str(hive['id'].values[i])
try:
ex1h3pre = float(hive['ex1-h3-pre'].values[i])
ex2h3pre = float(hive['ex2-h3-pre'].values[i])
ex1h3post = float(hive['ex1-h3-post'].values[i])
ex2h3post = float(hive['ex2-h3-post'].values[i])
if type(ex2h3post) != type(1.0):
print (ex2-h3-post)
pre = 1.0*(float(hive['ex1-h3-pre'].values[i]) + float(hive['ex2-h3-pre'].values[i])) / 2
post = 1.0*(float(hive['ex1-h3-post'].values[i]) + float(hive['ex2-h3-post'].values[i])) / 2
yob = int(hive['yob'].values[i])
byYOB_pre[yob-firstY].append(pre)
byYOB_post[yob-firstY].append(post)
except ValueError:
continue
### get average pre and post titers of each cohort
for y in range(len(byYOB_pre)):
try:
avg_pre = 1.0*sum(byYOB_pre[y])/len(byYOB_pre[y])
avg_post = 1.0*sum(byYOB_post[y])/len(byYOB_post[y])
oneline = str(y+firstY)+","+str(avg_pre)+","+str(avg_post)
except ZeroDivisionError:
oneline = str(y+firstY)+",NA,NA"
hive_out.write(oneline+"\n")
hive_out.close()
################################################################################
# Calculate A similarity, B similarty, #gly at A, #gly at B, gly similarity, HA2 similarity
import epi_similarity
import distance
### read vaccine strain, read H3N2 sequences and put them in list by year
vacfName = os.path.normpath("../data/Texas2012_aa.fas")
vacf = open(vacfName, "rU")
for line in vacf:
if line.find(">") > 0:
continue
else:
vac = line.split('\n')[0]
byYear = [[] for i in range(firstY, lastY+1)]
seqfName = os.path.normpath("../data/ncbi_aligned_6812_AA.fas")
seqf = open(seqfName, "rU")
for line in seqf:
if line.find(">") >= 0:
each = line.split("\n")[0].split("|")
y = int(each[2].split("/")[0])
else:
if y < firstY or y > lastY:
continue
byYear[y-firstY].append(line.split("\n")[0])
############todo: remove ambiguous
### determine epitope A and B by Shih et al. 2009 PNAS
shihfName = os.path.normpath("../data/shih_epitope.txt")
epitopes = epi_similarity.read_epitope_shih(shihfName)
epitopeA = epitopes[0]
epitopeB = epitopes[1]
### determine HA2
HA2 = range(330,550+1)
### calc epitope similarities
A_similarities = distance.calc_similarities(byYear, firstY, lastY, epitopeA, vac)
B_similarities = distance.calc_similarities(byYear, firstY, lastY, epitopeB, vac)
### calc HA2 similarities
HA2_similarities = distance.calc_similarities(byYear, firstY, lastY, HA2, vac)
### calc glycosylation similarities
### write sero-similarities file
sero_simfName = os.path.normpath("../data/HIVE1314_similarities.csv")
sero_simf = open(sero_simfName, "w")
sero_simf.write("id,pre,post,Asimilarity,Bsimilarity,HA2similarity\n")
hive_out = open(hiveoutfName, "rU")
sero = []
sero_sim = ['' for i in range(firstY, lastY)]
for line in hive_out:
if line.find("id") >= 0:
continue
else:
sero.append(line.split("\n")[0])
for y in range(0, lastY-firstY+1):
oneline = sero[y]+","
oneline += str(A_similarities[y])+"," +str(B_similarities[y])
oneline += ","+str(HA2_similarities[y])
sero_simf.write(oneline+"\n")
| kangchonsara/HIVE1314 | src/serology_similarity.py | serology_similarity.py | py | 3,715 | python | en | code | 0 | github-code | 13 |
24631474610 | """
The module and class MXKarma is the communication between the API and the Karma class.
"""
import datetime
from pyplanet.contrib.setting import Setting
from pyplanet.apps.contrib.karma.mxkarmaapi import MXKarmaApi
class MXKarma:
"""
The MX Karma sub-app of the Karma app.
"""
def __init__(self, app):
self.app = app
self.api = MXKarmaApi(self)
self.current_count = 0
self.current_average = 0.0
self.current_start = datetime.datetime.now()
self.current_votes = None
self.setting_mx_karma = Setting(
'mx_karma', 'Enable MX Karma', Setting.CAT_BEHAVIOUR, type=bool,
description='Enabling MX Karma will provide you with global karma information from ManiaExchange.',
default=False, change_target=self.reload_settings
)
self.setting_mx_karma_key = Setting(
'mx_karma_key', 'MX Karma API Key', Setting.CAT_BEHAVIOUR, type=str,
description='Enabling MX Karma will provide you with global karma information from ManiaExchange.',
default=None, change_target=self.reload_settings
)
async def reload_settings(self, *args, **kwargs):
"""
Reload the settings from the PyPlanet settings storage and reload/restart the session.
:param args: args
:param kwargs: kwargs
"""
enabled = await self.setting_mx_karma.get_value()
key = await self.setting_mx_karma_key.get_value()
if enabled is True and key is not None:
await self.api.create_session()
await self.api.start_session()
else:
await self.api.close_session()
async def determine_vote(self, vote):
"""
Convert a local vote to a MX vote value.
:param vote: vote value of PyPlanet karma app.
:return: MX Karma vote value
"""
mx_vote = 0
if vote == -0.5:
mx_vote = 25
elif vote == 0:
mx_vote = 50
elif vote == 0.5:
mx_vote = 75
elif vote == 1:
mx_vote = 100
return mx_vote
async def handle_rating(self, rating, importvotes=True):
"""
Handle the event of a rating.
:param rating: Rating dict
:param importvotes: Import boolean
:return:
"""
if rating is not None:
self.current_count = rating['votecount']
self.current_average = rating['voteaverage']
self.current_votes = rating['votes']
if not importvotes:
return
import_votes = []
for vote in self.app.current_votes:
login = vote.player.login
score = vote.score
if vote.expanded_score is not None:
score = vote.expanded_score
if not any(mx['login'] == login for mx in self.current_votes):
import_votes.append({'login': login, 'nickname': vote.player.nickname, 'vote': await self.determine_vote(score)})
if len(import_votes) > 0:
if await self.api.save_votes(map=self.app.instance.map_manager.current_map, is_import=True, votes=import_votes):
rating = await self.api.get_map_rating(self.app.instance.map_manager.current_map)
await self.handle_rating(rating, importvotes=False)
if len(self.current_votes) != len(import_votes):
self.current_votes = import_votes
else:
self.current_count = 0
self.current_average = 0.0
self.current_votes = None
async def on_start(self):
"""
On start of module.
"""
await self.app.context.setting.register(
self.setting_mx_karma, self.setting_mx_karma_key
)
if await self.setting_mx_karma.get_value() is False or await self.setting_mx_karma_key.get_value() is None:
return
self.current_start = datetime.datetime.now()
await self.api.create_session()
await self.api.start_session()
rating = await self.api.get_map_rating(self.app.instance.map_manager.current_map)
await self.handle_rating(rating)
async def on_stop(self):
"""
On stop of module.
"""
if await self.setting_mx_karma.get_value() is False or await self.setting_mx_karma_key.get_value() is None:
return
await self.api.close_session()
async def player_connect(self, player):
"""
Get player rating on connection.
:param player: player instance
"""
rating = await self.api.get_map_rating(self.app.instance.map_manager.current_map, player.login)
async def map_begin(self, map):
"""
On map begin, load karma.
:param map: map instance
"""
if await self.setting_mx_karma.get_value() is False or await self.setting_mx_karma_key.get_value() is None:
return
if not self.api.activated:
return
self.current_start = datetime.datetime.now()
rating = await self.api.get_map_rating(map)
await self.handle_rating(rating)
async def map_end(self, map):
"""
Map end, save votes to API if needed.
:param map: map instance.
"""
if await self.setting_mx_karma.get_value() is False or await self.setting_mx_karma_key.get_value() is None:
return
if not self.api.activated:
return
current_map_length = int((datetime.datetime.now() - self.current_start).total_seconds())
self.current_start = datetime.datetime.now()
save_votes = []
for vote in self.app.current_votes:
login = vote.player.login
score = vote.score
if vote.expanded_score is not None:
score = vote.expanded_score
player_vote = []
if self.current_votes is not None:
player_vote = [v for v in self.current_votes if v['login'] == login]
new_score = await self.determine_vote(score)
if len(player_vote) == 0 or (len(player_vote) == 1 and player_vote[0]['vote'] != new_score):
save_votes.append({'login': login, 'nickname': vote.player.nickname, 'vote': new_score})
if len(save_votes) > 0:
await self.api.save_votes(map=self.app.instance.map_manager.current_map, map_length=current_map_length, votes=save_votes)
| 15009199/PyPlanet-F8-F9-rebind | pyplanet/apps/contrib/karma/mxkarma.py | mxkarma.py | py | 5,515 | python | en | code | null | github-code | 13 |
34076476185 | from brownie import accounts, Disgufu
REQUIRED_CONFIRMATIONS = 2
admin = accounts.load('p7m')
def tx_params(gas_limit: int = None):
return {
"from": admin,
"required_confs": REQUIRED_CONFIRMATIONS,
"gas_limit": gas_limit
}
def main():
digufu = Disgufu.deploy(10, tx_params())
| perpetuum7/tx-flex-contracts | scripts/deploy.py | deploy.py | py | 318 | python | en | code | 0 | github-code | 13 |
8480954124 | import sys
sys.stdin = open("피시방 알바_input.txt")
N = int(input()) # 손님 수
A = list(map(int, input().split())) # 앉으려는 자리
x = len(list(set(A))) # 앉으려는 자리 중복 제거 후 갯수
print(N-x) # 손님 수 빼기 앉으려는 자리 중복 제거 후 갯수
print(x) | kimheekimhee/TIL | python/20220808/피시방 알바.py | 피시방 알바.py | py | 304 | python | ko | code | 1 | github-code | 13 |
39110039342 | import os
import json
import tensorflow as tf
import numpy as np
from run_scripts.run_sweep import run_sweep_serial
from asynch_mb.utils.utils import set_seed, ClassEncoder
from asynch_mb.baselines.linear_baseline import LinearFeatureBaseline
from asynch_mb.envs.mb_envs import *
from asynch_mb.envs.normalized_env import normalize
from asynch_mb.algos.trpo import TRPO
from asynch_mb.trainers.mf_trainer import Trainer
from asynch_mb.samplers.sampler import Sampler
from asynch_mb.samplers.single_sample_processor import SingleSampleProcessor
from asynch_mb.policies.gaussian_mlp_policy import GaussianMLPPolicy
from asynch_mb.logger import logger
EXP_NAME = 'trpo'
def run_experiment(**kwargs):
exp_dir = os.getcwd() + '/data/' + EXP_NAME
logger.configure(dir=exp_dir, format_strs=['stdout', 'log', 'csv'], snapshot_mode='last')
json.dump(kwargs, open(exp_dir + '/params.json', 'w'), indent=2, sort_keys=True, cls=ClassEncoder)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = kwargs.get('gpu_frac', 0.95)
sess = tf.Session(config=config)
with sess.as_default() as sess:
# Instantiate classes
set_seed(kwargs['seed'])
baseline = kwargs['baseline']()
env = normalize(kwargs['env']())
policy = GaussianMLPPolicy(
name="policy",
obs_dim=np.prod(env.observation_space.shape),
action_dim=np.prod(env.action_space.shape),
hidden_sizes=kwargs['hidden_sizes'],
learn_std=kwargs['learn_std'],
hidden_nonlinearity=kwargs['hidden_nonlinearity'],
output_nonlinearity=kwargs['output_nonlinearity'],
init_std=kwargs['init_std'],
squashed=kwargs['squashed']
)
# Load policy here
sampler = Sampler(
env=env,
policy=policy,
num_rollouts=kwargs['num_rollouts'],
max_path_length=kwargs['max_path_length'],
n_parallel=kwargs['n_parallel'],
)
sample_processor = SingleSampleProcessor(
baseline=baseline,
discount=kwargs['discount'],
gae_lambda=kwargs['gae_lambda'],
normalize_adv=kwargs['normalize_adv'],
positive_adv=kwargs['positive_adv'],
)
algo = TRPO(
policy=policy,
step_size=kwargs['step_size'],
)
trainer = Trainer(
algo=algo,
policy=policy,
env=env,
sampler=sampler,
sample_processor=sample_processor,
n_itr=kwargs['n_itr'],
sess=sess,
)
trainer.train()
if __name__ == '__main__':
sweep_params = {
'algo': ['trpo'],
'seed': [1, 2, 3, 4],
'baseline': [LinearFeatureBaseline],
'env': [Walker2dEnv, HopperEnv, HalfCheetahEnv, AntEnv],
'num_rollouts': [50],
'max_path_length': [200],
'n_parallel': [10],
'discount': [0.99],
'gae_lambda': [.975],
'normalize_adv': [True],
'positive_adv': [False],
'hidden_sizes': [(64, 64)],
'learn_std': [True],
'hidden_nonlinearity': [tf.nn.tanh],
'output_nonlinearity': [None],
'init_std': [1.],
'step_size': [0.01],
'squashed': [False],
'n_itr': [2000],
'scope': [None],
'exp_tag': ['trpo']
}
run_sweep_serial(run_experiment, sweep_params)
| zzyunzhi/asynch-mb | run_scripts/sequential_exp/trpo_run_sweep.py | trpo_run_sweep.py | py | 3,531 | python | en | code | 12 | github-code | 13 |
21580463356 | from ietf.settings import * # pyflakes:ignore
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'HOST': 'db',
'PORT': 3306,
'NAME': 'ietf_utf8',
'ENGINE': 'django.db.backends.mysql',
'USER': 'django',
'PASSWORD': 'RkTkDPFnKpko',
'OPTIONS': {
'sql_mode': 'STRICT_TRANS_TABLES',
'init_command': 'SET storage_engine=InnoDB; SET names "utf8"',
},
},
}
DATABASE_TEST_OPTIONS = {
'init_command': 'SET storage_engine=InnoDB',
}
IDSUBMIT_IDNITS_BINARY = "/usr/local/bin/idnits"
IDSUBMIT_REPOSITORY_PATH = "test/id/"
IDSUBMIT_STAGING_PATH = "test/staging/"
INTERNET_DRAFT_ARCHIVE_DIR = "test/archive/"
INTERNET_ALL_DRAFTS_ARCHIVE_DIR = "test/archive/"
RFC_PATH = "test/rfc/"
AGENDA_PATH = 'data/developers/www6s/proceedings/'
MEETINGHOST_LOGO_PATH = AGENDA_PATH
USING_DEBUG_EMAIL_SERVER=True
EMAIL_HOST='localhost'
EMAIL_PORT=2025
TRAC_WIKI_DIR_PATTERN = "test/wiki/%s"
TRAC_SVN_DIR_PATTERN = "test/svn/%s"
MEDIA_BASE_DIR = 'test'
MEDIA_ROOT = MEDIA_BASE_DIR + '/media/'
MEDIA_URL = '/media/'
PHOTOS_DIRNAME = 'photo'
PHOTOS_DIR = MEDIA_ROOT + PHOTOS_DIRNAME
DOCUMENT_PATH_PATTERN = 'data/developers/ietf-ftp/{doc.type_id}/'
SUBMIT_YANG_CATALOG_MODEL_DIR = 'data/developers/ietf-ftp/yang/catalogmod/'
SUBMIT_YANG_DRAFT_MODEL_DIR = 'data/developers/ietf-ftp/yang/draftmod/'
SUBMIT_YANG_INVAL_MODEL_DIR = 'data/developers/ietf-ftp/yang/invalmod/'
SUBMIT_YANG_IANA_MODEL_DIR = 'data/developers/ietf-ftp/yang/ianamod/'
SUBMIT_YANG_RFC_MODEL_DIR = 'data/developers/ietf-ftp/yang/rfcmod/'
# Set INTERNAL_IPS for use within Docker. See https://knasmueller.net/fix-djangos-debug-toolbar-not-showing-inside-docker
import socket
hostname, _, ips = socket.gethostbyname_ex(socket.gethostname())
INTERNAL_IPS = [".".join(ip.split(".")[:-1] + ["1"]) for ip in ips]
DE_GFM_BINARY = '/usr/local/bin/de-gfm' | ietf-tools/old-datatracker-branches | docker/configs/settings_local.py | settings_local.py | py | 1,937 | python | en | code | 5 | github-code | 13 |
42549126241 | # -*-coding:UTF-8-*-
import os
import re
import torch
import scipy.io
import pickle
import numpy as np
import glob
import fnmatch
import torch.utils.data as data
import scipy.misc
from PIL import Image
import cv2
from .transforms import Mytransforms
from .standard_legends import std_legend_lst, idx_MHP
from dataset.frei_utils.fh_utils import plot_hand
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
def recursive_glob(rootdir='.', pattern='*'):
matches = []
for root, dirnames, filenames in os.walk(rootdir):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def readAnnotation3D(file):
f = open(file, "r")
an = []
for l in f:
l = l.split()
an.append((float(l[1]),float(l[2]), float(l[3])))
return np.array(an, dtype=float)
def read_mat_file(mode, root_dir, img_list):
"""
get the groundtruth
mode (str): 'lsp' or 'lspet'
return: three list: key_points list , centers list and scales list
Notice:
lsp_dataset differ from lspet dataset
"""
mat_arr = scipy.io.loadmat(os.path.join(root_dir, 'joints.mat'))['joints']
# lspnet (14,3,10000)
if mode == 'lspet':
lms = mat_arr.transpose([2, 1, 0])
pose2ds = mat_arr.transpose([2, 0, 1]).tolist()
# lsp (3,14,2000)
if mode == 'lsp':
mat_arr[2] = np.logical_not(mat_arr[2])
lms = mat_arr.transpose([2, 0, 1])
pose2ds = mat_arr.transpose([2, 1, 0]).tolist()
centers = []
scales = []
for idx in range(lms.shape[0]):
im = Image.open(img_list[idx])
w = im.size[0]
h = im.size[1]
# lsp and lspet dataset doesn't exist groundtruth of center points
center_x = (lms[idx][0][lms[idx][0] < w].max() +
lms[idx][0][lms[idx][0] > 0].min()) / 2
center_y = (lms[idx][1][lms[idx][1] < h].max() +
lms[idx][1][lms[idx][1] > 0].min()) / 2
centers.append([center_x, center_y])
scale = (lms[idx][1][lms[idx][1] < h].max() -
lms[idx][1][lms[idx][1] > 0].min() + 4) / 368.0
scales.append(scale)
return pose2ds, centers, scales
def guassian_kernel(size_w, size_h, center_x, center_y, sigma):
gridy, gridx = np.mgrid[0:size_h, 0:size_w]
D2 = (gridx - center_x) ** 2 + (gridy - center_y) ** 2
return np.exp(-D2 / 2.0 / sigma / sigma)
class MHP_CPMDataset(data.Dataset):
"""
Args:
root_dir (str): the path of train_val dateset.
stride (float): default = 8
transformer (Mytransforms): expand dataset.
Notice:
you have to change code to fit your own dataset except LSP
"""
def __init__(self, config, set_name, heatmap_generator=None, transform=None, stride=8):
self.exception = False
self.name = 'MHP'
self.config = config
self.orig_img_size = [640, 480]
self.data_dir = os.path.join(config.DATA_DIR, self.name) # FreiHAND
self.image_paths = recursive_glob(self.data_dir, "*_webcam_[0-9]*")
self.image_paths = natural_sort(self.image_paths)
self.set_name = set_name
self.split = 0.8 # According to the dataset paper, the 20% for the test split and the remaining 80% for the training split.
if set_name in ['train', 'training']:
self.start_idx = 0
self.end_idx = int(len(self.image_paths) * self.split)
self.transform = Mytransforms.Compose([Mytransforms.RandomResized(),
Mytransforms.RandomRotate(40),
Mytransforms.RandomCrop(256),
Mytransforms.RandomHorizontalFlip(),
])
elif set_name in ['eval', 'valid', 'val', 'evaluation', 'validation']:
self.start_idx = int(len(self.image_paths) * self.split)
self.end_idx = len(self.image_paths)
self.transform = Mytransforms.Compose([Mytransforms.TestResized(256)])
Fx, Fy, Cx, Cy = 614.878, 615.479, 313.219, 231.288
self.intrinsic_matrix = np.array([[Fx, 0, Cx],
[0, Fy, Cy],
[0, 0, 1 ]])
self.distortion_coeffs = np.array([0.092701, -0.175877, -0.0035687, -0.00302299, 0])
# rearrange the order of the annotations of 21 joints
self.reorder_idx = idx_MHP
self.stride = stride
self.sigma = config.DATASET.SIGMA
def __getitem__(self, idx):
self.exception = False
img_path = self.image_paths[self.start_idx + idx]
img = cv2.resize(cv2.imread(img_path), tuple(self.config.MODEL.IMAGE_SIZE))
# load 3D pose (world coord)
dir_name, img_name = os.path.split(img_path) # ex: ../MHP/annotated_frames/data_1, 0_webcam_1.jpg
dir_id = dir_name.split('_')[-1]
img_idx, _, webcam_id = img_name[0:-4].split('_')
# ex: ../MHP/annotated_frames/data1/0_joints.txt
pose3d_path = os.path.join(self.data_dir, 'annotations', os.path.basename(dir_name), img_idx + '_joints.txt')
pose3d = readAnnotation3D(pose3d_path)[self.reorder_idx]
# load extrinsic params
rvec = pickle.load(
open(
os.path.join(
self.data_dir, 'calibrations', 'data_{}'.format(dir_id), 'webcam_{}'.format(webcam_id), 'rvec.pkl'), "rb"), encoding='latin1')
tvec = pickle.load(open(os.path.join(self.data_dir, 'calibrations', 'data_{}'.format(dir_id), 'webcam_{}'.format(webcam_id), 'tvec.pkl'), "rb"), encoding='latin1')
pose2d, _ = cv2.projectPoints(pose3d, rvec, tvec, self.intrinsic_matrix, self.distortion_coeffs) # 21 x 1 x 2
pose2d = pose2d.squeeze() # 21 x 2
pose2d[:,0] *= (256 / self.orig_img_size[0])
pose2d[:,1] *= (256 / self.orig_img_size[1])
h, w = img.shape[0:2]
visibility = np.ones((pose2d.shape[0],1))
for k in range(pose2d.shape[0]):
if pose2d[k,0] < 0 or pose2d[k,1] < 0 or pose2d[k,0] >= w or pose2d[k,1] >= h:
visibility[k] = 0
pose2d = np.concatenate((pose2d, visibility), axis=1)
try:
center_x = (pose2d[np.argwhere(pose2d[:,0] < w), 0].max() +
pose2d[np.argwhere(pose2d[:,0] > 0), 0].min()) / 2
except:
center_x = w / 2
self.exception = True
try:
center_y = (pose2d[np.argwhere(pose2d[:,1] < h), 1].max() +
pose2d[np.argwhere(pose2d[:,1] > 0), 1].min()) / 2
except:
center_y = h / 2
self.exception = True
center = [center_x, center_y]
try:
scale = (pose2d[np.argwhere(pose2d[:,1] < h), 1].max() -
pose2d[np.argwhere(pose2d[:,1] > 0), 1].min() + 4) / h
except:
scale = 0.5
# expand dataset. pose2d 21 x 3 [u,v,vis]
img, pose2d, center = self.transform(img, pose2d, center, scale)
heatmap = np.zeros((len(pose2d) + 1, h // self.stride, w // self.stride), dtype=np.float32)
for i in range(len(pose2d)):
# resize from 256 to 32
x = int(pose2d[i][0]) * 1.0 / self.stride
y = int(pose2d[i][1]) * 1.0 / self.stride
heat_map = guassian_kernel(size_h=h / self.stride, size_w=w / self.stride, center_x=x, center_y=y, sigma=self.sigma)
heat_map[heat_map > 1] = 1
heat_map[heat_map < 0.0099] = 0
heatmap[i + 1, :, :] = heat_map
heatmap[0, :, :] = 1.0 - np.max(heatmap[1:, :, :], axis=0) # for background
# show
# import matplotlib.pyplot as plt
# for k in range(0,21,1):
# fig = plt.figure()
# ax1 = fig.add_subplot(121)
# ax2 = fig.add_subplot(122)
# print('subpixel:',pose2d[k])
# ax1.imshow(cv2.cvtColor(img / img.max(), cv2.COLOR_BGR2RGB))
# plot_hand(ax1, pose2d[:,0:2], order='uv')
# ax2.imshow(heatmap[k])
# plot_hand(ax2, pose2d[:,0:2] / self.stride, order='uv')
# plt.title('MHP: {} Joint id: {} Vis: {}'.format(idx, k, pose2d[k,2]==1))
# plt.show()
centermap = np.zeros((h, w, 1), dtype=np.float32)
center_map = guassian_kernel(size_h=h, size_w=w, center_x=center[0], center_y=center[1], sigma=3)
center_map[center_map > 1] = 1
center_map[center_map < 0.0099] = 0
centermap[:, :, 0] = center_map
img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0],
[256.0, 256.0, 256.0])
centermap = Mytransforms.to_tensor(centermap)
ret = {
'imgs': img, # 3 x 256 x 256
'pose2d': pose2d[:,0:-1] / self.stride,
'heatmaps': heatmap, # (21+1) x 32 x 32
'visibility': visibility,
'centermaps': centermap # 1 x 256 x 256
}
return ret
def __repr__(self):
fmt_str = '{} Dataset '.format(self.set_name.title()) + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.data_dir)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def __len__(self):
return self.end_idx - self.start_idx
def get_kpts(self, maps, img_h = 256.0, img_w = 256.0):
# maps (b,21,32,32)
pose2d_pred_u = torch.argmax(maps.view((maps.shape[0], maps.shape[1],-1)),dim=2)
pose2d_pred_u, pose2d_pred_v = pose2d_pred_u % maps.shape[-1], pose2d_pred_u // maps.shape[-1]
return torch.stack((pose2d_pred_u, pose2d_pred_v), dim=2).float()
| ZJULiHongxin/HRNet-Hand-Pose-Estimation | lib/dataset/MHP_CPMDataset.py | MHP_CPMDataset.py | py | 10,056 | python | en | code | 1 | github-code | 13 |
35658629375 | #!/usr/bin/python3
from wifi import Cell
iface = "wlp2s0"
for cell in Cell.all(iface):
output = "%s\t(%s)\tchannel %d\tsignal %d\tmode %s " % \
(cell.ssid, cell.address, cell.channel, cell.signal, cell.mode)
if cell.encrypted:
output += "(%s)" % (cell.encryption_type.upper(),)
else:
output += "(Open)"
print(output)
| balle/python-network-hacks | wlan-scanner.py | wlan-scanner.py | py | 367 | python | en | code | 135 | github-code | 13 |
31900734349 | ##################
# django imports #
##################
from django.urls import path, include
##########################################
# import modules from current directory #
##########################################
from . import views
from accounts import views as AccountViews
#####################
# url pattern LISTS #
#####################
urlpatterns = [
# URL for the Merchant Dashboard view
path('', AccountViews.merchantDashboard, name='merchantDashboard'),
# URL for the Merchant Profile view
path('profile/', views.merchantProfile, name='merchantProfile'),
path('menu-builder/', views.menu_builder, name='menu_builder'),
path('menu-builder/product-items-category/<int:pk>/', views.product_items_by_category, name='product_items_by_category'),
# Product Category CRUD
path('menu-builder/product-items-category/add/', views.add_product_category, name='add_product_category'),
path('menu-builder/product-items-category/edit/<int:pk>/', views.edit_product_category, name='edit_product_category'),
path('menu-builder/product-items-category/delete/<int:pk>/', views.delete_product_category, name='delete_product_category'),
# Product Item CRUD
path('menu-builder/product/add/', views.add_product_item, name='add_product_item'),
path('menu-builder/product/edit/<int:pk>/', views.edit_product_item, name='edit_product_item'),
path('menu-builder/product/delete/<int:pk>/', views.delete_product_item, name='delete_product_item'),
# operating Hour CRUD
path('operating-hours/', views.operating_hours, name='operating_hours'),
path('operating-hours/add/', views.add_operating_hours, name='add_operating_hours'),
path('operating-hours/remove/<int:pk>/', views.remove_operating_hours, name='remove_operating_hours'),
path('order_detail/<int:order_number>/', views.merchant_order_detail, name='merchant_order_detail'),
path('my_orders/', views.merchant_my_orders, name='merchant_my_orders'),
]
| rtactayjr/Online-Food-Web-App | merchant/urls.py | urls.py | py | 1,990 | python | en | code | 1 | github-code | 13 |
38002084744 | from tkinter.ttk import Frame, Button
from tkinter_gui.game_state import GameState
class ControlsFrame(Frame):
def __init__(self, game_state: GameState, *args, **kwargs):
super().__init__(*args, **kwargs)
self._game_state = game_state
self.columnconfigure(0, weight=1)
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
draw = Button(self, text="Offer Draw")
draw.grid(column=0, row=0)
resign = Button(self, text="Resign")
resign.grid(column=1, row=0)
def update(self):
super().update()
| Shtepser/chess | tkinter_gui/controls_frame.py | controls_frame.py | py | 595 | python | en | code | 0 | github-code | 13 |
23027885102 | #!/usr/bin/env python3
def split_long_utt(text, maxchars):
parts = []
splits_to_be_made = len(text) // maxchars
while splits_to_be_made > len(parts):
parts.append(text[len(parts)*maxchars:(len(parts)+1)*maxchars])
parts.append(text[len(parts)*maxchars:])
return parts
if __name__ == "__main__":
import argparse
import fileinput
parser = argparse.ArgumentParser("Split long training samples to multiple shorter ones")
parser.add_argument("--maxchars", type=int, default=30)
parser.add_argument("input", nargs = "+", help = "file(s) to process or - for stdin")
args = parser.parse_args()
for line in fileinput.input(args.input):
line = line.strip()
utts = split_long_utt(line, maxchars = args.maxchars)
for utt in utts:
print(utt)
| Gastron/omstart-net | preprocessing/split_long_utts.py | split_long_utts.py | py | 823 | python | en | code | 0 | github-code | 13 |
24987991177 | """Container for each experiment, has a dataframe and metadata"""
import os
import re
from datetime import datetime
import traceback
import pandas as pd
from . import _version
class UserData:
def __init__(self, recno=None, datafile=None, runno=1, searchno=1, no_taxa_redistrib=0,
addedby='', indir = '.', outdir='.', rawfiledir='.',
labeltype='none', quant_source=None, phospho=False,
searchdb=None, taxonid=None, miscuts=2):
if recno is None:
raise ValueError('Must supply record number (recno)')
self.recno = recno
self.runno = runno
self.searchno = searchno
self.taxonid = taxonid
self.added_by = addedby
self.labeltype = labeltype
self.no_taxa_redistrib = no_taxa_redistrib
self.filtervalues = dict()
self.indir = indir
self.outdir = outdir
self.rawfiledir = rawfiledir
self.searchdb = searchdb # file name for refseq
self.datafile = datafile
self.df = pd.DataFrame()
self.pipeline = None
self.original_columns = None
rrs = '{}_{}_{}_'.format(recno, runno, searchno)
basename = os.path.splitext(os.path.basename(datafile))[0]
self.basename = basename.split(rrs)[-1]
self.LOGFILE = os.path.join(outdir, self.output_name(ext='log'))
self._LOGSTACK = list()
self.EXIT_CODE = 0
self.ERROR = None
self.taxon_ratio_totals = dict()
self.miscuts = miscuts
self.phospho = phospho
with open(self.LOGFILE, 'w') as f:
f.write('{} PyGrouper {}'.format(datetime.now(), _version.__version__))
@property
def taxon_miscut_id(self):
return hash(self.taxonid) + hash(self.miscuts)
def __repr__(self):
return '{}_{}_{}'.format(self.recno, self.runno, self.searchno)
def __bool__(self):
if self.datafile is not None and self.recno is not None:
return True
return False
def to_log(self, message):
if self._LOGSTACK: # flush
messages = self._LOGSTACK + (messages,)
else:
messages = (message,)
with open(self.LOGFILE, 'w+') as f:
for message in messages:
f.write(message)
# f.write(sep)
f.write('\n')
def to_logq(self, message):
self._LOGSTACK.append(message+'\n')
return self
def flush_log(self):
if self._LOGSTACK:
stack, self._LOGSTACK = self._LOGSTACK, list()
self.to_log('\n'.join(stack))
return self
def full_path(self, in_or_out='in'):
"""returns data file with given path"""
if in_or_out == 'in':
mydir = self.indir
elif in_or_out == 'out':
mydir = self.outdir
else:
mydir = '.'
return os.path.join(mydir, self.datafile or '')
def read_csv(self, *args, **kwargs):
"""Uses pandas read_csv function to read an input file
args and kwargs are passed to this function"""
try:
self.df = pd.read_csv(self.full_path(), *args, **kwargs)
self.original_columns = self.df.columns.values
except Exception as e:
# self.to_log(''.join(traceback.format_exc()))
self.to_log(traceback.format_exc())
self.ERROR = traceback.format_exc()
self.EXIT_CODE = 1
return 1
if len(self.df) == 0:
self.EXIT_CODE = 1
return 2
return 0
def output_name(self, suffix=None, ext='tab'):
"""generate an appropriate output file name
returns rec_run_search_labeltype_filetype.tab"""
# suffix = '_'.join([str(ix) for ix in suffix])
return '{!r}_{}_{}{}.{}'.format(self,
self.labeltype,
self.basename,
'_' + suffix if suffix else '',
ext
)
def populate_base_data(self):
"""Populate dataframe with base data prior to grouping"""
self.categorical_assign('EXPRecNo', self.recno)
self.categorical_assign('EXPRunNo', self.runno)
self.categorical_assign('EXPSearchNo', self.searchno)
self.categorical_assign('CreationTS', datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
self.categorical_assign('AddedBy', self.added_by)
# self.categorical_assign('metadatainfo', '') # not sure if this is okay
# self.df['EXPRecNo'] = self._categorical_assign(self.recno)
# self.df['EXPRunNo'] = self._categorical_assign(self.runno)
# self.df['EXPSearchNo'] = self._categorical_assign(self.searchno)
# self.df['CreationTS'] = self._categorical_assign(datetime.now().strftime("%m/%d/%Y) %H:%M:%S"))
# self.df['AddedBy'] = self._categorical_assign(self.added_by)
# self.df['psm_EXPTechRepNo'] = self.techrepno
# self.df['psm_TaxonID'] = self.taxonid
#self.df['psm_GeneList'] = ''
#self.df['psm_ProteinList'] = ''
#self.df['psm_GeneCount'] = 0
#self.df['psm_ProteinCount'] = 0
#self.df['psm_HomologeneID'] = ''
#self.df['psm_ProteinCapacity'] = ''
# self.df['metadatainfo'] = [tuple()] * len(self.df)
self.df['metadatainfo'] = ''
if not 'ion_score_bins' in self.filtervalues:
self.filtervalues['ion_score_bins'] = (10, 20, 30)
return self
@property
def filterstamp(self):
s = 'is{ion_score}_qv{qvalue}_pep{pep}_idg{idg}_z{zmin}to{zmax}_mo{modi}_is_bins{ion_score_bins}'.format(**self.filtervalues)
if self.phospho:
s += '_phospho_only'
return s
def categorical_assign(self, name, value, **kwargs):
"""
Assign a static value to a new column.
Saves memory by using pandas Categorical dtype.
:kwargs: passed to pd.Series.astype
"""
self.df[name] = value
self.df[name] = self.df[name].astype('category', **kwargs)
return self
| malovannaya-lab/gpgrouper | gpgrouper/containers.py | containers.py | py | 6,171 | python | en | code | 6 | github-code | 13 |
72688758417 | class SimpleDate:
def __init__(self, day: int, month: int, year: int):
self._day = day
self._month = month
self._year = year
def __str__(self):
return f"{self._day}.{self._month}.{self._year}"
def __eq__(self, another):
if self._year != another._year :
return False
elif self._month != another._month :
return False
elif self._day != another._day:
return False
return True
def __ne__(self, another):
if (self._year == another._year and
self._month == another._month and
self._day == another._day):
return False
return True
def __gt__(self, another):
if self._year > another._year:
return True
elif self._year < another._year:
return False
else:
if self._month > another._month:
return True
elif self._month < another._month:
return False
else:
if self._day > another._day:
return True
else :
return False
def __lt__(self, another):
if self._year < another._year:
return True
elif self._year > another._year:
return False
else:
if self._month < another._month:
return True
elif self._month > another._month:
return False
else :
if self._day < another._day :
return True
else:
return False
def __add__(self, nod: int):
day = self._day
month = self._month
year = self._year
x = 0
while x < nod :
day += 1
if day > 30:
day = 1
month += 1
if month > 12:
month = 1
year += 1
x += 1
return SimpleDate(day, month, year)
def __sub__(self, another):
x = (self._year * 360) + (self._month * 30) + self._day
y = (another._year * 360) + (another._month * 30) + another._day
dif = abs(x - y)
return dif
| crawwwler/mooc-python | part 10/simple date/src/simple_date.py | simple_date.py | py | 2,300 | python | en | code | 0 | github-code | 13 |
25162623182 | from bs4 import BeautifulSoup
import requests
Title = ""
def tread_spider(page_num):
s=120
while s <= page_num:
url = 'https://sfbay.craigslist.org/search/bia?s=' + str(s)
response = requests.get(url)
plain_responce = response.text
soup = BeautifulSoup(plain_responce)
for link in soup.findAll('a', {'class': 'result-title hdrlnk'}):
href = "https://sfbay.craigslist.org" + link.get('href')
Title = link.string
#print(href)
#print(Title)
file.write(href +" " + Title + '\n')
get_item_info(href,Title)
page_num = 122
def get_item_info(item_url,title):
response = requests.get(item_url)
plain_responce = response.text
soup = BeautifulSoup(plain_responce)
for item_desc in soup.findAll('time', {'class': 'timeago'}):
#print(item_desc.string)
file2.write(title + item_desc.string + '\n')
file = open('data.txt', 'w')
file2 = open('time.txt', 'w')
tread_spider(120) | tpatil2/Python | Web Crawler/main_crawl.py | main_crawl.py | py | 1,031 | python | en | code | 0 | github-code | 13 |
9520934927 | from __future__ import absolute_import
from django.core.exceptions import ValidationError
from rest_framework import serializers
from silver.api.serializers.common import MeteredFeatureSerializer
from silver.api.serializers.product_codes_serializer import ProductCodeRelatedField
from silver.models import Provider, Plan, MeteredFeature
class PlanSerializer(serializers.HyperlinkedModelSerializer):
metered_features = MeteredFeatureSerializer(
required=False, many=True
)
provider = serializers.HyperlinkedRelatedField(
queryset=Provider.objects.all(),
view_name='provider-detail',
)
product_code = ProductCodeRelatedField()
class Meta:
model = Plan
fields = ('name', 'url', 'interval', 'interval_count', 'amount',
'currency', 'trial_period_days', 'generate_after', 'enabled',
'private', 'product_code', 'metered_features', 'provider')
def validate_metered_features(self, value):
metered_features = []
for mf_data in value:
metered_features.append(MeteredFeature(**mf_data))
try:
Plan.validate_metered_features(metered_features)
except ValidationError as e:
raise serializers.ValidationError(str(e)[3:-2])
return value
def create(self, validated_data):
metered_features_data = validated_data.pop('metered_features')
metered_features = []
for mf_data in metered_features_data:
mf = MeteredFeatureSerializer(data=mf_data)
mf.is_valid(raise_exception=True)
mf = mf.create(mf.validated_data)
metered_features.append(mf)
product_code = validated_data.pop('product_code')
product_code.save()
validated_data.update({'product_code': product_code})
plan = Plan.objects.create(**validated_data)
plan.metered_features.add(*metered_features)
plan.product_code = product_code
plan.save()
return plan
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.generate_after = validated_data.get('generate_after',
instance.generate_after)
instance.due_days = validated_data.get('due_days', instance.due_days)
instance.save()
return instance
| silverapp/silver | silver/api/serializers/plans_serializer.py | plans_serializer.py | py | 2,413 | python | en | code | 292 | github-code | 13 |
36266986413 | import sqlite3
from random import randint, choice
from faker import Faker
import pandas as pd
conn = sqlite3.connect('social_network.db')
#creating a query to get list of all married couples as asked in the lab
married_couple = """
SELECT person1.name, person2_name, beginning_date
FROM relationships
JOIN people person1 ON person1_id = person1.id
JOIN people person2 ON person2_id = person2.id
WHERE Relationship_type = 'spouse'
"""
c = conn.cursor()
c.execute(married_couple)
# Using pandas package to generate a CSV file
Output = c.fetchall()
Table = pd.DataFrame(Output, columns=['Person1', 'Person2', 'beginning_date'])
Table.to_csv('married_couples.csv', index=False)
conn.close() | kunjthakkar/Lab_008 | Script2.py | Script2.py | py | 709 | python | en | code | 0 | github-code | 13 |
43460315413 | # File: MagicSquare.py
# Description: A n x n matrix that is filled with the numbers 1, 2, 3, ..., n² is a magic square if the sum of the elements in each row, in each column, and in the two diagonals is the same value.
# I just copied the description from the Assignment, not too sure if we create our own or not...
# Student's Name: Peyton Breech
# Student's UT EID: pb23489
# Course Name: CS 313E
# Unique Number: 50205
# Date Created: 9/5/2019
# Date Last Modified: 9/6/2019
# This is the main function that asks the user to enter an odd number 1 or greater and will check their input to see
# if it is correct. If the user inputs an even number or a number less than one, the fucntion will prompt
# the user to enter another number that is correct. Once it has a correct number, it will send that number to
# the makeSquare function.
def main ():
numChoice = int(input('Please enter an odd number 1 or greater...'))
#Will constantly loop until the user inputs a odd number or number greater than 1
while numChoice%2 == 0 or numChoice < 1:
numChoice = int(input('Incorrect. Please enter an odd number 1 or greater... '))
#Creating the EMPTY 2D array for the magic Square based on the users choice of number
magicSquareArray = [[0 for x in range(numChoice)]
for y in range(numChoice)]
row = numChoice - 1
column = numChoice / 2
#Calls the functions for the given program to work.
makeSquare(numChoice, magicSquareArray, row, column)
printSquare(numChoice, magicSquareArray, row, column)
checkSquare (numChoice, magicSquareArray, row, column)
#This function FILLS the 2D Array List Magic Square by following three conditions that will be able to fill out the magic square by wanting to go down and right by one square to place the next number.
#The other conditions are created so that when either the number goes out of bounds or is placed a square that is already filled with another number, it will move to the square straight up or to the 0th of the
#given column or row.
def makeSquare(numChoice,magicSquareArray, row, column):
#Number that is being placed on square.
currentNum = 1
#Condition when the number is off the square by both the row and the column, will move to the square straight up (from the last square that it was on) instead.
while currentNum <= (numChoice * numChoice):
if row == numChoice and column == numChoice:
row = row - 2
column = column - 1
else:
#When the square goes out of index by the row, will send it to row 0.
if row == numChoice:
row = 0
#When the square goes out of index by the column, will send it to column 0.
if column >= numChoice:
column = 0
#Condition where if a number is already inside the following square, it will instead move up to the square straight up (from the last square that it was on) instead.
if magicSquareArray[int(row)][int(column)]:
row = row - 2
column = column - 1
continue
#If the square it is going to is open, it will place the number there, add a new number to the currentNumber, and go down and right by one to try out the next square.
else:
magicSquareArray[int(row)][int(column)] = currentNum
currentNum = currentNum + 1
row = row + 1
column = column + 1
#This function is used to PRINT out the Magic Square 2D array by showing us the Magic Square in its physical form and relaying to us what kind of square it is, for example a 5 x 5 square.
def printSquare (numChoice, magicSquareArray, row, column):
print()
print("Here is a " + str(numChoice) + " x " + str(numChoice) + " magic square:")
print()
#This loop is used to print out the Magic Square by looping through each "column" of each array given from the previous function that created all of the arrays. It will then print a new line once the first
#array is completed, allowing it to draw what the magic square looks like instead of it being in the same line.
for row in range(0,numChoice):
for column in range(0,numChoice):
print('%2d ' % (magicSquareArray[row][column]),end = '')
if column == numChoice - 1:
print()
print()
#This function is used to VALIDATE the Magic Square by checking the sum of the rows, columns, and both diagnols of the square to make sure that the Square we created from the 2D Array Lists is in fact a
#magic square.
def checkSquare (numChoice, magicSquareArray, row, column):
#These two variables will be used to find out what our sum is supposed to be, and whether or not if each given attribute is "magic" by having the same sum as the check.
checkSum = 0
squareCheck = True
#This loop is created to check the sum of the rows given in the magic square.
for row in range(0,numChoice):
sumRow = 0
for column in range(0,numChoice):
sumRow += magicSquareArray[row][column]
if(sumRow != checkSum):
squareCheck == False
#This loop is created to check the sum of the columns given in the magic square.
for row in range(0,numChoice):
sumCol = 0
for column in range(0,numChoice):
sumCol += magicSquareArray[column][row]
if(sumCol != checkSum):
squareCheck == False
#This loop is created to check the sum of the 1st diagnol given in the magic square.
for row in range(0, numChoice):
checkSum = checkSum + magicSquareArray[row][row]
#This loop is created to check the sum of the 2nd diagnol given in the magic square.
sumDiag = 0
for row in range(0, numChoice):
sumDiag = sumDiag + magicSquareArray[row][numChoice-row-1]
if(checkSum != sumDiag):
squareCheck == False
if squareCheck == False:
print('This is not a magic square')
else:
print('This is a magic square and the canonical sum is ' + str(checkSum))
main() | jasoncsonic/CS313E | MagicSquare.py | MagicSquare.py | py | 6,255 | python | en | code | 0 | github-code | 13 |
7318067026 | import matplotlib.pyplot as plt
import pandas as pd
import statsmodels.api as sm
# create some data
#
# N.B. In linear regression, there is a SINGLE y-value for each data point, but there
# may be MULTIPLE x-values, corresponding to the multiple factors that might affect the
# experiment, i.e. y = b_1 * x_1 + b_2 * x_2 + b_3 * x_3 + .....
# Therefore, the x data is a TWO DIMENSIONAL array ... the columns correspond to the different
# variables (x_1, x_2, x_3, ...), and the rows correspond to the values of those variables
# for each data point.
#
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'y': [1.1, 2.1, 2.9, 3.9, 5.1, 5.9, 6.9, 8.05, 9.1, 9.7] } )
print(df)
y = df['y']
X = df['x']
# X = sm.add_constant(X)
# Ordinary Least Squares model from statsmodels
model = sm.OLS(y, X).fit()
# View model summary
print(model.summary())
# Create prediction
y_pred = model.predict(X)
# Plotting!
plt.plot(df['x'], y, 'o', label='Data')
plt.plot(df['x'], y_pred, 'r-', label="Linear Regression Fit")
plt.title("Basic Linear Regression")
plt.xlabel("X")
plt.ylabel("Y")
plt.legend()
plt.show()
| brash99/cpsc250 | Week4_Examples/linear_regression_statsmodels.py | linear_regression_statsmodels.py | py | 1,161 | python | en | code | 1 | github-code | 13 |
32170038212 | '''
UE20CS302 (D Section)
Machine Intelligence
Week 3: Decision Tree Classifier
Mitul Joby
PES2UG20CS199
'''
import numpy as np
'''Calculate the entropy of the enitre dataset'''
# input:pandas_dataframe
# output:int/float
def get_entropy_of_dataset(df):
entropy = 0
colLast = df.columns[-1]
vals = df[colLast].unique()
length = len(df[colLast])
for x in vals:
value = df[colLast].value_counts()[x] / length
if (value != 0):
entropy = entropy -(value * np.log2(value))
return entropy
'''Return avg_info of the attribute provided as parameter'''
# input:pandas_dataframe,str {i.e the column name ,ex: Temperature in the Play tennis dataset}
# output:int/float
def get_avg_info_of_attribute(df, attribute):
avg_info = 0
vals = df[attribute].unique()
length = len(df[attribute])
for x in vals:
value = df[attribute].value_counts()[x] / length
dfAttr = df[df[attribute] == x]
avg_info = avg_info + value * get_entropy_of_dataset(dfAttr)
return avg_info
'''Return Information Gain of the attribute provided as parameter'''
# input:pandas_dataframe,str
# output:int/float
def get_information_gain(df, attribute):
information_gain = get_entropy_of_dataset(df) - get_avg_info_of_attribute(df, attribute)
return information_gain
'''
Return a tuple with the first element as a dictionary which has IG of all columns
and the second element as a string with the name of the column selected
example : ({'A':0.123,'B':0.768,'C':1.23} , 'C')
'''
#input: pandas_dataframe
#output: ({dict},'str')
def get_selected_attribute(df):
informationGain = {}
maxGain = float("-inf")
for x in df.columns[:-1]:
attr = get_information_gain(df, x)
if attr > maxGain:
col_name = x
maxGain = attr
informationGain[x] = attr
return (informationGain, col_name) | Mitul-Joby/Machine-Intelligence-Lab | Week 3/PES2UG20CS199.py | PES2UG20CS199.py | py | 1,905 | python | en | code | 0 | github-code | 13 |
24289643055 | import numpy as np
import multiprocessing as mt
if __name__ == "__main__":
from voronoi import _voronoi_analysis
else:
import _voronoi_analysis
class VoronoiAnalysis:
"""This class is used to calculate the Voronoi polygon, which can be applied to
estimate the atomic volume. The calculation is conducted by the `voro++ <https://math.lbl.gov/voro++/>`_ package and
this class only provides a wrapper. From mdapy v0.8.6, we use extended parallel voro++ to improve the performance, the
implementation can be found in `An extension to VORO++ for multithreaded computation of Voronoi cells <https://arxiv.org/abs/2209.11606>`_.
Args:
pos (np.ndarray): (:math:`N_p, 3`) particles positions.
box (np.ndarray): (:math:`3, 2`) system box.
boundary (list): boundary conditions, 1 is periodic and 0 is free boundary, such as [1, 1, 1].
num_t (int, optional): threads number to generate Voronoi diagram. If not given, use all avilable threads.
Outputs:
- **vol** (np.ndarray) - (:math:`N_p`), atom Voronoi volume.
- **neighbor_number** (np.ndarray) - (:math:`N_p`), atom Voronoi neighbor number.
- **cavity_radius** (np.ndarray) - the distance from the particle to the farthest vertex of its Voronoi cell.
Examples:
>>> import mdapy as mp
>>> mp.init()
>>> FCC = mp.LatticeMaker(4.05, 'FCC', 10, 10, 10) # Create a FCC structure.
>>> FCC.compute() # Get atom positions.
>>> avol = mp.VoronoiAnalysis(FCC.pos, FCC.box, [1, 1, 1]) # Initilize the Voronoi class.
>>> avol.compute() # Calculate the Voronoi volume.
>>> avol.vol # Check atomic Voronoi volume.
>>> avol.neighbor_number # Check neighbor number.
>>> avol.cavity_radius # Check the cavity radius.
"""
def __init__(self, pos, box, boundary, num_t=None) -> None:
self.pos = pos
self.box = box
self.boundary = boundary
if num_t is None:
self.num_t = mt.cpu_count()
else:
assert num_t >= 1, "num_t should be a positive integer!"
self.num_t = int(num_t)
def compute(self):
"""Do the real Voronoi volume calculation."""
N = self.pos.shape[0]
self.vol = np.zeros(N)
self.neighbor_number = np.zeros(N, dtype=int)
self.cavity_radius = np.zeros(N)
_voronoi_analysis.get_voronoi_volume(
self.pos,
self.box,
np.bool_(self.boundary),
self.vol,
self.neighbor_number,
self.cavity_radius,
self.num_t,
)
if __name__ == "__main__":
import taichi as ti
from lattice_maker import LatticeMaker
from time import time
ti.init()
FCC = LatticeMaker(4.05, "FCC", 50, 50, 50) # Create a FCC structure.
FCC.compute() # Get atom positions.
# FCC.write_data()
start = time()
avol = VoronoiAnalysis(
FCC.pos, FCC.box, [1, 1, 1], 40
) # Initilize the Voronoi class.
avol.compute() # Calculate the Voronoi volume.
end = time()
print(f"Calculate volume time: {end-start} s.")
print(avol.vol) # Check atomic Voronoi volume.
print(avol.neighbor_number) # Check neighbor number.
print(avol.cavity_radius) # Check the cavity radius.
| mushroomfire/mdapy | mdapy/voronoi_analysis.py | voronoi_analysis.py | py | 3,333 | python | en | code | 15 | github-code | 13 |
37385160040 |
import random
import logging
import statistics
import pandas as pd
from src.utils.custom_decorators import time_func
from nltk.sentiment.vader import SentimentIntensityAnalyzer
logger = logging.getLogger(__name__)
class SentimentAnalysis:
@staticmethod
def scale_compound_score(compound_score, text):
'''
Effecitvely takes
in a score between -1 and 1, and then converts
(based on predetermined rules) what the score will be
'''
if compound_score == 0: #if 0, then the sentiment analysis didn't work, so need to log
logger.info("ERROR, NLTK WAS NOT ABLE TO ANALYZE THE SENTIMENT of this statement: %s" % text)
return None
if compound_score >= 0.5:
return 0.75
elif compound_score < 0.5 and compound_score >= 0:
return 0.52
elif compound_score < 0 and compound_score >= -0.5:
return 0.39
else:
return 0.2
@staticmethod
@time_func
def analyze_text_sentiment(text, mentioned_words) -> float:
sent_analysis_list = [[word, text] for word in mentioned_words]
nltk_vader = SentimentIntensityAnalyzer()
columns = ['ticker', 'post']
post_df = pd.DataFrame(sent_analysis_list, columns=columns)
scores = post_df['post'].apply(nltk_vader.polarity_scores).tolist()
mean_compound_score = statistics.mean([score['compound'] for score in scores])
final_score = SentimentAnalysis.scale_compound_score(mean_compound_score, text)
if final_score is not None:
logger.info("returning a final sentiment score of %f for this text: %s" % (final_score, text))
print(final_score)
return final_score
| dstambler17/Social-Media-Post-Stock-Pipeline | src/machine_learning/SentimentAnalysis.py | SentimentAnalysis.py | py | 1,749 | python | en | code | 1 | github-code | 13 |
43508374866 | from dsmpy import root_resources
from dsmpy.event import Event, MomentTensor
from dsmpy.spc.stf import SourceTimeFunction
from obspy import read_events
import numpy as np
import warnings
from datetime import date
import re
import requests
def convert_catalog(cat):
#cat = read_events(root_resources + 'gcmt.ndk')
#mts = np.zeros((cat.count(), 6), dtype=np.float32)
events = np.empty(cat.count(), dtype=np.object)
for i, event in enumerate(cat):
tensor = event.preferred_focal_mechanism().moment_tensor.tensor
mw = [m for m in event.magnitudes
if m.magnitude_type=='Mwc'][0].mag
centroid_time = [o for o in event.origins
if o.origin_type == 'centroid'][0].time
stf_obspy = (event.preferred_focal_mechanism().
moment_tensor.source_time_function)
source_time_function = SourceTimeFunction(
stf_obspy.type, 0.5*stf_obspy.duration)
mt = _mt_from_tensor(tensor, mw)
lon = event.origins[1].longitude
lat = event.origins[1].latitude
depth = event.origins[1].depth / 1000.
event_id = [e.text for e in event.event_descriptions
if e.type == 'earthquake name'][0][1:]
event = Event(event_id, lat, lon, depth, mt,
centroid_time, source_time_function)
events[i] = event
np.save(root_resources + 'gcmt', events)
def _mt_from_tensor(tensor, Mw):
m_rr = tensor.m_rr * 1e-18
m_rt = tensor.m_rt * 1e-18
m_rp = tensor.m_rp * 1e-18
m_tt = tensor.m_tt * 1e-18
m_tp = tensor.m_tp * 1e-18
m_pp = tensor.m_pp * 1e-18
# unit conversion. DSM in units of 10**25 [dyne cm]
mt = MomentTensor(m_rr, m_rt, m_rp, m_tt, m_tp, m_pp, Mw)
return mt
def read_catalog():
"""Get the GCMT catalog.
Returns:
cat (ndarray): ndarray of pydsm.Event objects
"""
try:
cat = np.load(root_resources + 'gcmt.npy', allow_pickle=True)
except:
print('Dowloading gcmt catalog.\n'
+ 'Takes a few minutes. Done only once.')
cat = _download_gcmt_catalog()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
convert_catalog(cat)
cat = np.load(root_resources + 'gcmt.npy', allow_pickle=True)
return cat
def _download_gcmt_catalog():
cat = read_events('https://www.ldeo.columbia.edu/~gcmt/projects/CMT/'
'catalog/jan76_dec17.ndk')
start_year = 2018
end_year = date.today().year
p=re.compile(r'[a-z]+\d\d\.ndk')
for year in range(start_year, end_year+1):
dir = ('https://www.ldeo.columbia.edu/~gcmt/projects/CMT/catalog/'
'NEW_MONTHLY/' + str(year))
r = requests.get(dir)
ndk_files = p.findall(r.text)
for ndk_file in set(ndk_files):
try:
cat_tmp = read_events(dir + '/' + ndk_file)
cat.extend(cat_tmp.events)
except:
pass
#cat.write(root_resources + 'gcmt.xml', format='quakeml')
return cat
if __name__ == '__main__':
cat = read_catalog()
| afeborgeaud/dsmpy | dsmpy/utils/cmtcatalog.py | cmtcatalog.py | py | 3,145 | python | en | code | 10 | github-code | 13 |
45034293405 | from book_packer import get_book_data_from_file, sort_books_by_weight, \
sort_books_into_boxes, export_boxes_to_json, extract_book_data, \
Book, Box, OUTPUT_FILE
import unittest
import json
class TestGetBookDataFromFile(unittest.TestCase):
def test_get_book_data_from_file(self):
# Ideally we'd mock up a test file here, but in the interest of time
# let's use an existing one with known values.
book = get_book_data_from_file("book9.html")
self.assertEqual(book.author, u"Stephen Wolfram")
self.assertEqual(book.title, u"A New Kind of Science [Hardcover]")
self.assertEqual(book.price, u"$35.25")
self.assertEqual(book.isbn_10, u"1579550088")
self.assertEqual(book.weight, 5.6)
class TestSortBooksByWeight(unittest.TestCase):
def test_sort_books_by_weight(self):
book1 = Book(author="Foo", weight=10)
book2 = Book(author="Bar", weight=1)
book3 = Book(author="Baz", weight=5)
books = [book1, book2, book3]
self.assertEqual(sort_books_by_weight(books), [book1, book3, book2])
def test_sort_empty_list_returns_empty_list(self):
self.assertEqual(sort_books_by_weight([]), [])
class TestSortBooksIntoBoxes(unittest.TestCase):
def test_sort_books_into_boxes(self):
book1 = Book(author="Foo", weight=10)
book2 = Book(author="Bar", weight=1)
book3 = Book(author="Baz", weight=5)
books = [book1,book2,book3]
box1 = Box(id=1)
box1.append(book1)
box2 = Box(id=2)
box2.append(book3)
box2.append(book2)
# Getting object deep comparison related issues here, just
# look to make sure boxes have the requisite weight, which in this
# case means the proper books went into each box, for production we could
# be more accurate here
list_of_boxes = sort_books_into_boxes(books)
self.assertEqual(list_of_boxes[0].totalWeight, box1.totalWeight)
self.assertEqual(list_of_boxes[1].totalWeight, box2.totalWeight)
def test_sort_empty_list(self):
list_of_boxes = sort_books_into_boxes([])
self.assertEqual(list_of_boxes, [])
class TestExportBoxesToJson(unittest.TestCase):
def test_export_boxes_to_json(self):
test_file = "test.json"
book1 = Book(author="Foo", weight=10)
book2 = Book(author="Bar", weight=1)
book3 = Book(author="Baz", weight=5)
box1 = Box(id=1)
box1.append(book1)
box2 = Box(id=2)
box2.append(book3)
box2.append(book2)
export_boxes_to_json([box1, box2], output_file=test_file)
f = open("test.json")
data = f.read()
# json loads wil die if it's not proper json
jsondata = json.loads(data)
# Look at first record
self.assertEqual(jsondata[0]['totalWeight'], 10)
# Look at a book
self.assertEqual(jsondata[0]['contents'][0]['author'], "Foo")
# We could do more extensive testing of the json here if we were in production
class TestBoxClass(unittest.TestCase):
def test_update_box_weight(self):
book1 = Book(author="Foo", weight=6)
book2 = Book(author="Bar", weight=1)
box1 = Box(id=1)
box1.append(book1)
box1.append(book2)
self.assertEqual(box1.totalWeight, 7)
# Note that there are issues with floating point representations when adding
# boxes where 6.4 + 1.4 = 7.80000000001 sometimes. This exists for all versions of
# Python and all processors. This is something that should be
# solved for a production application. However, the easiest solution which is to use
# the Decimal module for arithmetic is not json serializable. In the interest of time
# I'm going to leave it as is, but this is something that should be solved in a production
# application.
class TestExtractBookData(unittest.TestCase):
def test_extract_book_data(self):
test_data_dir = "tests/data"
# for production we'd mock the output file here, but for now let's
# use the existing one
extract_book_data(test_data_dir)
f = open("data.json")
data = f.read()
jsondata = json.loads(data)
# look at first record
self.assertEqual(jsondata[0]['totalWeight'], 7.3)
# look at a book
self.assertEqual(jsondata[0]['contents'][0]['author'], "Neil Gaiman")
self.assertEqual(jsondata[0]['contents'][1]['author'], "David S Cohen")
if __name__ == '__main__':
unittest.main() | almosteverywhere/psychic-fortnight | tests/test_book_packer.py | test_book_packer.py | py | 4,619 | python | en | code | 0 | github-code | 13 |
20584305048 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.db.utils import IntegrityError
from django.utils import timezone
import datetime
from core.models import Address
from order.models import Cart, CartItem, Order, OrderItem, Transaction
from shop.models import Product
class CartModelTest(TestCase):
def setUp(self) -> None:
self.test_user = get_user_model().objects.create_user('test@mail.com', 'Test1234')
self.test_cart = Cart.objects.create(user=self.test_user)
def test_cart_str(self):
cart = self.test_cart
self.assertEqual(str(cart), cart.user.email)
class CartItemModelTest(TestCase):
def setUp(self) -> None:
self.test_user = get_user_model().objects.create_user('test@mail.com', 'Test1234')
self.test_cart = Cart.objects.create(user=self.test_user)
self.test_product = Product.objects.create(name='test', price=1000, quantity=1)
def test_cart_item_str(self):
cart_item = CartItem.objects.create(count=1,
cart=self.test_cart,
product=self.test_product
)
self.assertEqual(str(cart_item), f'{cart_item.cart} : {cart_item.product}')
def test_cart_item_count(self):
with self.assertRaises(IntegrityError):
CartItem.objects.create(count=-1,
cart=self.test_cart,
product=self.test_product
)
class OrderModelTest(TestCase):
def setUp(self) -> None:
self.test_user = get_user_model().objects.create_user('test@mail.com', 'Test1234')
self.test_address = Address.objects.create(user=self.test_user)
self.test_order = Order.objects.create(address=self.test_address, user=self.test_user)
def test_order_time_for_pay(self):
order = self.test_order
self.assertEqual(order.time_for_pay.date(), (timezone.now() + datetime.timedelta(minutes=30)).date())
def test_order_str(self):
order = self.test_order
self.assertEqual(str(order), f'{order.user} : {order.status}')
def test_order_status(self):
order = self.test_order
self.assertEqual(order.status, '1')
class OrderItemModelTest(TestCase):
def setUp(self) -> None:
self.test_user = get_user_model().objects.create_user('test@mail.com', 'Test1234')
self.test_product = Product.objects.create(name='test', price=1000, quantity=1)
self.test_address = Address.objects.create(user=self.test_user)
self.test_order = Order.objects.create(address=self.test_address, user=self.test_user)
self.test_order_item = OrderItem(count=1, product=self.test_product, order=self.test_order)
def test_order_item_str(self):
order_item = self.test_order_item
self.assertEqual(str(order_item), f'{order_item.order} : {order_item.product}')
def test_order_item_count(self):
with self.assertRaises(IntegrityError):
order_item = OrderItem.objects.create(count=-1,
order=self.test_order,
product=self.test_product
)
class TransactionModelTest(TestCase):
def setUp(self) -> None:
self.test_user = get_user_model().objects.create_user('test@mail.com', 'Test1234')
self.test_address = Address.objects.create(user=self.test_user)
self.test_order = Order.objects.create(address=self.test_address, user=self.test_user)
self.test_transaction = Transaction.objects.create(user=self.test_user, order=self.test_order, transaction_code='1234', total_price=1)
def test_transaction_str(self):
transaction = self.test_transaction
self.assertEqual(str(transaction), f'{transaction.user} : {transaction.transaction_code}')
| mejomba/django_shop_m89_final | order/tests/test_model.py | test_model.py | py | 4,104 | python | en | code | 0 | github-code | 13 |
40672619995 | import json
import os
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from src.general import create_dir
from sys_config import AL_RES_DIR, RES_DIR, BASE_DIR
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
def read_results_json(dataset, model, af, seeds=[964, 131, 821, 12, 71],
unc='vanilla',
indicator=None, indicator_df=False):
df = pd.DataFrame()
keys_excluded = ['X_train_current_inds', 'X_train_remaining_inds', 'last_iteration', 'current_annotations',
'annotations_per_iteration', 'X_val_inds', 'dpool_augm_inds']
for seed in seeds:
print(dataset, seed, af)
path = os.path.join(AL_RES_DIR, 'al_{}_{}_{}_{}_{}'.format(dataset, model, unc, af, seed))
if indicator is not None: path += '_{}'.format(indicator)
if os.path.exists(path):
results_file = os.path.join(path, 'results_of_iteration') + '.json'
ids_file = os.path.join(path, 'selected_ids_per_iteration') + '.json'
if os.path.isfile(results_file):
with open(results_file) as json_file:
results = json.load(json_file)
if 'test_results' in results['1'].keys():
_iterations = [i for i in list(results.keys()) if i not in keys_excluded]
iterations = [x for x in _iterations if int(x) < 52]
data_percent = [results[i]['data_percent'] for i in iterations]
samples = [results[i]['total_train_samples'] for i in iterations]
inf_time = [results[i]['inference_time'] for i in iterations]
sel_time = [results[i]['selection_time'] for i in iterations]
test_acc = [round(results[i]['test_results']['acc'] * 100, 2) for i in iterations]
val_acc = [round(results[i]['val_results']['acc'] * 100, 2) for i in iterations]
classes_selected = [results[i]['class_selected_samples'] for i in iterations]
classes_before = [results[i]['class_samples_before'] for i in iterations]
classes_after = [results[i]['class_samples_after'] for i in iterations]
ece = [results[i]['test_results']['ece']['ece'] for i in iterations]
entropy = [results[i]['test_results']['entropy']['mean'] for i in iterations]
nll = [results[i]['test_results']['nll']['mean'] for i in iterations]
brier = [results[i]['test_results']['brier']['mean'] for i in iterations]
# ood_results = None
if 'ood_test_results' in results['1'].keys():
ood_results = [round(results[i]['ood_test_results']['acc'] * 100, 2) for i in iterations]
else:
ood_results = [None for i in iterations]
if 'contrast_test_results' in results['1'].keys():
consistency_test_results = [
round(results[i]['contrast_test_results']['consistency_acc'] * 100, 2) for i in
iterations]
contrast_test_results = [
round(results[i]['contrast_test_results']['test_contrast_acc'] * 100, 2) for i in
iterations]
ori_test_results = [round(results[i]['contrast_test_results']['test_ori_acc'] * 100, 2) for
i in iterations]
else:
consistency_test_results = None
contrast_test_results = None
ori_test_results = None
if af == "adv":
advs = [results[i]['num_adv'] for i in iterations]
else:
advs = [None for i in iterations]
num_val_adv = None
if 'val_adv_inds' in results['1']['val_results'].keys():
num_val_adv = [len(results[i]['val_results']['val_adv_inds']) for i in iterations]
df_ = pd.DataFrame(
{'iterations': iterations, 'val_acc': val_acc, 'test_acc': test_acc,
'ori_acc': ori_test_results, 'contrast_acc': contrast_test_results,
'consistency_acc': consistency_test_results,
'ood_test_acc': ood_results, 'data_percent': data_percent,
'samples': samples,
# 'inference_time': inf_time, 'selection_time': sel_time,
# 'classes_after': classes_after, 'classes_before': classes_before,
# 'classes_selected': classes_selected,
'ece': ece, 'entropy': entropy, 'nll': nll, 'brier': brier,
# 'num_adv': advs, 'num_val_advs':num_val_adv
})
df_['seed'] = seed
df_['dataset'] = dataset
df_['acquisition'] = af
df_['unc'] = unc
df = df.append(df_, ignore_index=True)
else:
if 'ece' not in results['1'].keys():
break
_iterations = [i for i in list(results.keys()) if i not in keys_excluded]
iterations = [x for x in _iterations if int(x) < 52]
train_loss = [results[i]['train_loss'] for i in iterations]
val_loss = [results[i]['loss'] for i in iterations]
val_acc = [round(results[i]['acc'] * 100, 2) for i in iterations]
if 'f1_macro' in results['1']:
val_f1 = [round(results[i]['f1_macro'] * 100, 2) for i in iterations]
elif 'f1' in results['1']:
val_f1 = [round(results[i]['f1'] * 100, 2) for i in iterations]
data_percent = [results[i]['data_percent'] for i in iterations]
samples = [results[i]['total_train_samples'] for i in iterations]
inf_time = [results[i]['inference_time'] for i in iterations]
sel_time = [results[i]['selection_time'] for i in iterations]
times_trained = [results[i]['times_trained'] for i in iterations]
classes_selected = [results[i]['class_selected_samples'] for i in iterations]
classes_before = [results[i]['class_samples_before'] for i in iterations]
classes_after = [results[i]['class_samples_after'] for i in iterations]
ece = [results[i]['ece']['ece'] for i in iterations]
entropy = [results[i]['entropy']['mean'] for i in iterations]
nll = [results[i]['nll']['mean'] for i in iterations]
brier = [results[i]['brier']['mean'] for i in iterations]
df_ = pd.DataFrame(
{'iterations': iterations, 'val_acc': val_acc, 'val_f1': val_f1,
'data_percent': data_percent,
'samples': samples, 'inference_time': inf_time, 'selection_time': sel_time,
'classes_after': classes_after, 'classes_before': classes_before,
'classes_selected': classes_selected,
'ece': ece, 'entropy': entropy, 'nll': nll, 'brier': brier})
df_['seed'] = seed
df_['dataset'] = dataset
df_['acquisition'] = af
df_['training'] = 'SL'
if 'uda' in indicator:
df_['training'] = 'CT'
df = df.append(df_, ignore_index=True)
if indicator_df:
if indicator is None: indicator = 'baseline'
df['indicator'] = indicator
return df
def al_plot(dataset, model='bert',
af=['entropy', 'random'],
seeds=[],
unc=['vanilla'],
plot_dir=None,
indicator=None,
y='acc',
legend=None,
test=True,
ood=False,
contrast=False):
sns.set_style("whitegrid")
# Choose path to save figure
dataset_dir = os.path.join(BASE_DIR, 'paper_results')
create_dir(dataset_dir)
if plot_dir is not None:
dataset_dir = plot_dir
create_dir(dataset_dir)
# seed format for title and filename
print_af = str(af[0])
if len(af) > 1:
for s in af:
if s != af[0]:
print_af += '_{}'.format(s)
if type(af) is not list:
af = [af]
# Create dataframe with all values
list_of_df = []
if type(indicator) is list:
for i in indicator:
for a in af:
if a=='random' and 'bayes' in i:
pass
else:
for u in unc:
if 'bayes' in i and u == 'temp':
pass
else:
_i = i
list_of_df.append(read_results_json(dataset, model, a, seeds, indicator=_i, unc=u, indicator_df=True))
else:
for a in af:
list_of_df.append(read_results_json(dataset, model, a, seeds, indicator=indicator))
df = list_of_df[0]
for d in range(1, len(list_of_df)):
df = df.append(list_of_df[d])
if df.empty: return
# Create dataframe with 100% data
full_model_dir = os.path.join(BASE_DIR, 'results')
path = os.path.join(full_model_dir, '{}_{}_100%'.format(dataset, model))
# path = os.path.join(RES_DIR, '{}_{}_100%'.format(dataset, model))
val_acc = []
test_acc = []
val_f1 = []
for seed in seeds:
all_filepath = os.path.join(path, 'seed_{}_lr_2e-05_bs_32_epochs_5'.format(seed))
if not os.path.exists(all_filepath): all_filepath = os.path.join(path,
'seed_{}_lr_2e-05_bs_32_epochs_20'.format(
seed))
if os.path.exists(all_filepath) and os.path.isfile(os.path.join(all_filepath, 'vanilla_results.json')):
with open(os.path.join(all_filepath, 'vanilla_results.json')) as json_file:
results = json.load(json_file)
val_acc.append(results['val_results']['acc'] * 100)
test_acc.append(results['test_results']['acc'] * 100)
# Plot
if dataset == 'sst-2':
label_100 = '60.6K training data (100%)'
elif dataset == 'mrpc':
label_100 = '3.6K training data (100%)'
elif dataset == 'qnli':
label_100 = '105K training data (100%)'
elif dataset == 'trec-6':
label_100 = '4.9K training data (100%)'
elif dataset == 'ag_news':
label_100 = '114K training data (100%)'
elif dataset == 'imdb':
label_100 = '22.5K training data (100%)'
elif dataset == 'rte':
label_100 = '2K training data (100%)'
elif dataset == 'qqp':
label_100 = '???K training data (100%)'
elif dataset == 'mnli':
label_100 = '???K training data (100%)'
elif dataset == 'dbpedia':
label_100 = '20K training data'
if df.empty:
return
title_dataset = dataset.upper()
if title_dataset == 'AG_NEWS':
title_dataset = 'AGNEWS'
x = np.linspace(0, int(max(df['samples'])), num=50, endpoint=True)
x_per = np.linspace(0, int(max(df['data_percent'])), num=50, endpoint=True)
fig = plt.figure(figsize=(4.0, 3.5))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8])
# Change names for plotting:
# change model names
df['indicator'].loc[(df['indicator'] == '25_config')] = 'BERT'
df['indicator'].loc[(df['indicator'] == '25_config_bayes')] = 'BERT+BayesOutput'
# change df columns
df = df.rename(columns={'acquisition': 'Acquisition',
'indicator': 'Model'})
if y == 'acc':
if test:
y_plot = "test_acc"
else:
y_plot = "val_acc"
# Accuracy
if val_acc != []:
df_all = pd.DataFrame()
df_all['samples'] = x_per
df_all['test_acc'] = test_acc[0]
df_all['val_acc'] = val_acc[0]
# for d in range(1, len(val_acc)):
for d in range(1, len(test_acc)):
df_all = df_all.append(pd.DataFrame({'data_percent': x_per, 'test_acc': test_acc[d], 'val_acc': val_acc[d]}))
if legend:
all_ax = sns.lineplot(x="data_percent", y=y_plot,
data=df_all, ci='sd', estimator='mean', label=label_100,
color='black',
linestyle='-.', legend=False)
else:
all_ax = sns.lineplot(x="data_percent", y=y_plot,
data=df_all, ci='sd', estimator='mean', label=label_100,
color='black',
linestyle='-.')
if type(indicator) is list:
df['_method'] = df.Model+df.unc
crap2name={'10_config_bayesmc5': 'Bayes+MC',
'10_configmc5': 'MC',
'10_config_bayestemp': 'Bayes+TS',
'10_configtemp': 'Temperature Scaling',
'10_config_bayesvanilla': 'Bayes',
'10_configvanilla': 'Vanilla',
}
df['Method'] = [crap2name[x] for x in df['_method']]
# df.drop(df[(df['Acquisition'] == 'random') & (df['Method'] != 'Vanilla')])
# df = df.drop(df[(df['Acquisition'] == 'random') & (df['Method'] != 'Vanilla')].index)
al_ax = sns.lineplot(x="data_percent", y=y_plot,
hue="Method",
style='Acquisition',
# style='unc',
data=df,
ci='sd',
# ci=None,
# estimator="mean",
estimator="median",
# legend=False,
# palette=sns.color_palette("rocket",3)
)
plt.xlabel('Acquired samples', fontsize=15)
plt.ylabel('Accuracy', fontsize=15)
if ood:
plt.title(title_dataset + ' OOD', fontsize=15)
elif contrast:
plt.title(title_dataset + ' Contrast set', fontsize=15)
else:
plt.title(title_dataset, fontsize=15)
# plt.legend(loc='lower right', prop={'size': 7})
# else:
# plt.legend(loc='lower right', prop={'size': 7})
#
# plt.legend(bbox_to_anchor=(1.5, 0.5), loc='center left', ncol=2,handleheight=1, labelspacing=0.05)
plt.tight_layout()
plt.legend(loc='lower right', prop={'size': 7})
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(10)
# fix limits
axes = plt.gca()
axes.set_xlim([1, 10])
# if dataset == 'ag_news':
# axes.set_ylim([90, 95])
# # start, end = axes.get_ylim()
# # axes.yaxis.set_ticks(np.arange(start, end, 2))
# if dataset == 'imdb':
# axes.set_ylim([86, 92])
plt.style.use("seaborn-colorblind")
if test:
print("test")
filename = "test_acc_{}_{}_{}_{}".format(dataset, model, print_af, indicator)
else:
print("val")
filename = "val_acc_{}_{}_{}_{}".format(dataset, model, print_af, indicator)
if ood: filename += '_ood'
if contrast: filename += '_contrast'
# png
plt.savefig(os.path.join(dataset_dir, filename + '.png'),
dpi=300,
transparent=False, bbox_inches="tight", pad_inches=0.1)
# # pdf
# pp = PdfPages(os.path.join(pdf_dir, filename + '.pdf'))
# pp.savefig(fig,dpi=300,
# transparent=False, bbox_inches="tight", pad_inches=0.1)
# plt.show()
# pp.close()
plt.close()
return
if __name__ == '__main__':
datasets = ['imdb', 'rte', 'mrpc', 'qnli', 'sst-2', 'mnli', 'qqp', 'trec-6', 'ag_news']
datasets = ['sst-2', 'ag_news']
seeds = [2, 19, 729, 982, 75]
indicator = ['10_config', '10_config_bayes']
unc = ['vanilla', 'mc5', 'temp']
models = ['bert', 'distilbert']
for dataset in datasets:
for model in models:
al_plot(dataset=dataset, indicator=indicator, seeds=seeds, unc=unc, model=model)
| mourga/transformer-uncertainty | src/plotting_al.py | plotting_al.py | py | 17,432 | python | en | code | 37 | github-code | 13 |
19577365615 | import json
from unittest import mock
from rest_framework import status
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from companies.tests.factories import fake, UserFactory, CompanyUserFactory
class CompaniesAPITestCase(APITestCase):
@mock.patch('companies.serializers.CompanyService')
def test_create_company_and_user(self, MockCompanyService):
company_user = CompanyUserFactory()
MockCompanyService.create_company_and_user.return_value = (
company_user.company, company_user.user
)
company_data = {
"name": fake.company(),
"street_address_1": fake.street_address(),
"street_address_2": "",
"city": fake.city(),
"state": fake.state_abbr(),
"zip": fake.zipcode(),
"dot_number": "",
"mc_number": "",
}
user_stub = UserFactory.stub()
user_data = {
"first_name": user_stub.first_name,
"last_name": user_stub.last_name,
"username": user_stub.username,
"password_": "unsafe-password-here",
}
url = reverse('companies:company-create')
data = {
"company": company_data,
"user": user_data,
}
response = self.client.post(url, data=json.dumps(data), content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(MockCompanyService.create_company_and_user.called)
def test_retrieve_update(self):
company_user = CompanyUserFactory()
self.client.force_authenticate(company_user.user)
url = reverse('companies:company-detail', kwargs={'pk': company_user.company.id})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Test non-admin
response = self.client.patch(
url, data=json.dumps({"name": fake.company()}), content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# Make an admin do it
company_user.is_admin = True
company_user.save()
response = self.client.patch(
url, data=json.dumps({"name": fake.company()}), content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
class UsersAPITestCase(APITestCase):
def test_retrieve_update(self):
company_user = CompanyUserFactory()
self.client.force_authenticate(company_user.user)
url = reverse('companies:user-detail', kwargs={'pk': company_user.user.id})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.patch(
url, data=json.dumps({"first_name": "bob"}), content_type='application/json'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| ktryber/aljex-sync-api | api/companies/tests/views/test_companies.py | test_companies.py | py | 3,010 | python | en | code | 0 | github-code | 13 |
17643823205 | # import subprocess
import os
from argparse import ArgumentParser
script_name = "daily_schedule.py"
code_folder_path = "C:/Users/67311/OneDrive/Reference/bash_functions/schedule_alarm/"
parser = ArgumentParser()
parser.add_argument("-code", help="show code", action = 'store_true')
parser.add_argument("-code_folder", help="show code_folder", action = 'store_true')
args = parser.parse_args()
if args.code:
os.startfile(code_folder_path + script_name)
# subprocess.call('start ' + code_folder_path + script_name, shell=True)
exit()
if args.code_folder:
os.startfile(code_folder_path)
exit()
from datetime import datetime
import keyboard
import time
import subprocess
import win32gui
from datetime import datetime, timedelta
import os
import sys
null = open(os.devnull, 'w')
sys.stdout = null
import pygame
sys.stdout = sys.__stdout__
null.close()
console_handle = win32gui.GetForegroundWindow()
def is_console_focused():
return win32gui.GetForegroundWindow() == console_handle
def play_mp3(path):
pygame.mixer.init()
pygame.mixer.music.load(path)
pygame.mixer.music.play()
now = datetime.now()
while pygame.mixer.music.get_busy():
if keyboard.is_pressed('esc') or (datetime.now()-now).total_seconds()>60:
pygame.mixer.music.stop()
def wait_for_keypress(total_seconds):
start_time = time.time()
while total_seconds > time.time() - start_time:
time.sleep(2)
if keyboard.is_pressed('esc') and is_console_focused():
future_time = datetime.now() + timedelta(hours=2)
future_time_str = future_time.strftime("%H:%M")
text = "Escape key detected. Sleep for 2 hours. Resume at " + future_time_str
subprocess.call('echo ' + text, shell=True)
time.sleep(3600*2)
text = "The program is resumed"
subprocess.call('echo ' + text, shell=True)
return True
return False
def set_alarm(alarm_time,mode):
current_time = datetime.now()
time_difference = alarm_time - current_time
future_time = datetime.now() + timedelta(seconds=time_difference.total_seconds())
future_time_str = future_time.strftime("%H:%M")
text = "Next alarm is set in " + str(time_difference.total_seconds()/60) + " minutes at " + future_time_str
subprocess.call('echo ' + text, shell=True)
if time_difference.total_seconds()>0:
wait_dummy = wait_for_keypress(time_difference.total_seconds())
if wait_dummy:
return None
if mode =="study":
play_mp3("D:\Study\Functions\\bash_functions\statics\\beautiful_piano.mp3")
elif mode == "relax":
play_mp3("D:\Study\Functions\\bash_functions\statics\\summer.mp3")
elif mode=="backhome":
play_mp3("D:\Study\Functions\\bash_functions\statics\\backhome.mp3")
def set_alarm_for_new_day():
print("Start a new day! ")
while 10<datetime.now().hour < 23:
if datetime.now().minute < 44:
alarm_time = datetime.now().replace(minute=44)
set_alarm(alarm_time,"relax")
elif datetime.now().minute < 59:
alarm_time = datetime.now().replace(minute=59)
if datetime.now().hour == 22:
set_alarm(alarm_time,"backhome")
else:
set_alarm(alarm_time,"study")
else:
wait_for_keypress(60)
if __name__=="__main__":
# text = "When an alarm is set, press esc for 5 seconds to put the program to sleep for 2 hours"
# subprocess.call('echo ' + text, shell=True)
while True:
current_time = datetime.now()
if 10<current_time.hour < 23:
set_alarm_for_new_day()
else:
future_time = datetime.now() + timedelta(hours=1)
future_time_str = future_time.strftime("%H:%M")
text = "Outside regular time. Sleep for 1 hour. Resume at " + future_time_str
subprocess.call('echo ' + text, shell=True)
time.sleep(3600)
| CCYChongyanChen/Daily_schedule | daily_schedule2.py | daily_schedule2.py | py | 4,122 | python | en | code | 0 | github-code | 13 |
7328515176 | import os
from turtle import back
import pandas as pd
import numpy as np
from sklearn.linear_model import Perceptron as per
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score
from sklearn import linear_model
from sklearn.decomposition import PCA as pca_reduction
import joblib
class reg:
def __init__(self, algorithm='', n_components='mle'):
self.cwd = os.path.dirname(os.getcwd()) #获取当前文件的绝对路径
self.file_dirname = os.path.dirname(os.path.abspath(__file__))
self.algorithm = algorithm
self.dataset_path = ' '
self.test_size = ' '
if self.algorithm == 'LR':
self.model = linear_model.LinearRegression()
elif self.algorithm == 'Perceptron':
self.model = per()
elif self.algorithm == 'PCA':
self.model = pca_reduction(n_components=n_components)
def train(self, seed=0, data_type='csv'):
if self.algorithm == 'LR':
np.random.seed(seed)
if data_type == 'csv':
dataset = pd.read_csv(self.dataset_path,sep=',',header=None).values
np.random.shuffle(dataset)
data, label = dataset[:,:-1],dataset[:,-1]
train_index = int((1-self.test_size)*len(dataset))
train_data, train_label = data[:train_index,],label[:train_index]
self.test_set = {
'data': data[train_index:,],
'label': label[train_index:]
}
self.model.fit(train_data,train_label)
elif self.algorithm == 'Perceptron' or 'PCA':
self.model.fit(self.dataset)
print(self.model.explained_variance_ratio_)
# 返回所保留的n个成分各自的方差百分比,这里可以理解为单个变量方差贡献率。
def inference(self, data):
if self.algorithm == 'LR':
pred = self.model.predict(self.test_set['data'])
loss = mean_squared_error(self.test_set['label'],pred)
print('Loss: {}'.format(loss))
elif self.algorithm == 'Perceptron' or 'PCA':
self.model.fit_transform(data)
print(self.model.n_features_)
print(self.model.n_samples_)
# 从文件加载数据集,支持csv文件和txt文件
def load_dataset_from_file(self, path, x_column = [], y_column = []):
if type == 'csv':
self.dataset = pd.read_csv(path).values # .values就转成numpy格式了
self.get_data(X,y,x_column,y_column)
elif type == 'txt':
self.dataset = np.loadtxt(path)
X = X.values
y = y.values
self.get_data(X,y,x_column,y_column)
# 从数据加载数据集,支持['numpy','list','DataFrame']
def load_dataset_from_data(self, X, y = None, x_column = [], y_column = []):
if type(X) != type(y):
raise TypeError("数据格式不同,无法加载")
if isinstance(X,list):
X = np.array(X)
y = np.array(y)
self.get_data(X,y,x_column,y_column)
elif isinstance(X,np.ndarray):
self.get_data(X,y,x_column,y_column)
elif isinstance(X,pd.DataFrame):
X = X.values
y = y.values
self.get_data(X,y,x_column,y_column)
# 支持的type有['csv', 'numpy','pandas','list','txt],后面一律转为numpy格式
def load_dataset(self, X, y = None, type = None, x_column = [], y_column = []):
if len(x_column) == 0:
raise ValueError("请传入数据列号")
if type == 'csv':
self.dataset = pd.read_csv(X).values # .values就转成numpy格式了
self.get_data(X,y,x_column,y_column)
elif type == 'numpy': # 统一转成numpy格式
self.get_data(X,y,x_column,y_column)
elif type == 'pandas':
X = X.values
y = y.values
self.get_data(X,y,x_column,y_column)
elif type == 'list':
X = np.array(X)
y = np.array(y)
self.get_data(X,y,x_column,y_column)
elif type == 'txt':
self.dataset = np.loadtxt(X)
X = X.values
y = y.values
self.get_data(X,y,x_column,y_column)
def get_data(self,X,y,x_column,y_column):
if len(X):
self.x_train = X[:,x_column]
if len(y): #
if y.ndim == 1:
y = y.reshape(-1,1)
self.y_train = y[:,y_column]
if self.y_train.shape[0]:
self.dataset = np.concatenate((self.x_train,self.y_train),axis=1) # 按列进行拼接
def save(self):
print("Saving model checkpoints...")
joblib.dump(self.model, '../checkpoint.pkl', compress=3)
def load(self, path):
joblib.load(path) | OpenXLab-Edu/OpenBaseLab-Edu | BaseML/Regression.py | Regression.py | py | 4,867 | python | en | code | 5 | github-code | 13 |
43227049186 | # Use of booleans and comparision operator
n1 = int(input('enter first number'))
n2 = int(input('enter second number'))
n3 = int(input('enter third number'))
# We will be checking if n1 is greater than n2 and n3 or not
print (n1 > n2 and n1 > n3)
print (n1 > n2 or n1 > n3)
# In the above program we are using comparison operator (>), 'and' & 'or' are used as logical operator
# The oputput of the program is presented in form of boolean operator
# Using if....else statement
age = int(input('enter the age of the person'))
if age >= 18:
print('The person can vote.')
else:
print('The person can not vote.')
# Using elif clause
number = int(input())
if number > 0:
print('positive')
elif number < 0:
print('negative')
else:
print('zero')
print('this print statement is not the part of elif clause')
# While loop
count = 1
while count <= 3:
print('I am inside a loop.')
print('Looping is interesting.')
count = count + 1
print('OUTSIDE THE LOOP')
# Complex functionalities in while loop
n = int(input('Enter a positive number: '))
total = 0
i = 1
while i <= n:
# adding the value of i to total
total = total + i
i = i + 1
print("Result:", total)
| mukes137/Python | control flow/day3.py | day3.py | py | 1,219 | python | en | code | 0 | github-code | 13 |
17113643034 | """
reference_mod_md5sum_model.py
===============
"""
from datetime import datetime
import pytz
from sqlalchemy import (Column, ForeignKey, Integer, String, DateTime)
from sqlalchemy.orm import relationship
from agr_literature_service.api.database.base import Base
from sqlalchemy.schema import Index
class ReferenceModMd5sumModel(Base):
__tablename__ = "reference_mod_md5sum"
reference_mod_md5sum_id = Column(
Integer,
primary_key=True,
autoincrement=True
)
reference_id = Column(
Integer,
ForeignKey("reference.reference_id", ondelete="CASCADE"),
nullable=False,
index=True
)
reference = relationship(
"ReferenceModel"
)
mod_id = Column(
Integer,
ForeignKey("mod.mod_id", ondelete="CASCADE"),
index=True,
nullable=True,
)
mod = relationship(
"ModModel"
)
md5sum = Column(
String(),
unique=True,
nullable=False
)
date_updated = Column(
DateTime,
default=lambda: datetime.now(tz=pytz.timezone("UTC")),
onupdate=lambda: datetime.now(tz=pytz.timezone("UTC")),
nullable=False
)
__table_args__ = (
Index(
"uix_reference_id_mod_id",
"reference_id",
"mod_id",
unique=True,
postgresql_where=mod_id.isnot(None)
),
Index(
"uix_reference_id",
"reference_id",
unique=True,
postgresql_where=mod_id.is_(None)
),
)
| alliance-genome/agr_literature_service | agr_literature_service/api/models/reference_mod_md5sum_model.py | reference_mod_md5sum_model.py | py | 1,654 | python | en | code | 1 | github-code | 13 |
14758736144 | import json
import os
from Library.core import Database
from Library import constants
import sqlite3
class DAO:
TABLE = None
SCHEMA = None
def __init__(self, database_file_path, volatile=False):
self._database = Database(database_file_path)
self.volatile = volatile
self.database_file_path = database_file_path
def create_table(self, table=None, schema=None):
table = table if table else self.TABLE
schema = schema if schema else self.SCHEMA
if table is not None and schema is not None:
self._database.create_table(table, schema)
def get_column_index(self, column_name):
if column_name in self.SCHEMA:
return self.SCHEMA.index(column_name)
return None
class TitlesDAO(DAO):
TABLE = 'Titles'
SCHEMA = ['id', 'listing_id', 'title_string']
def __init__(self, database_file_path):
super().__init__(database_file_path, volatile=True)
def write(self, listing_id, title_string):
title_id = Database.unique_id()
self._database.insert(self.TABLE, [title_id, listing_id, title_string])
return title_id
def write_multiple(self, rows):
rows_with_ids = [[Database.unique_id(), *r] for r in rows]
self._database.insert_multiple(self.TABLE, rows_with_ids)
def read_like_string(self, title_string):
condition = 'title_string like "{}%" OR title_string like "% {}%"'.format(
title_string.lower(), title_string.lower())
listing_id_index = self.get_column_index('listing_id')
return [r[0] for r in self._database.select(self.TABLE, [self.SCHEMA[listing_id_index]], condition)]
class ListingsDAO(DAO):
TABLE = 'Listings'
SCHEMA = ['id', 'display_title', 'named_info']
def __init__(self, database_file_path):
super().__init__(database_file_path, volatile=True)
@staticmethod
def _parse_named_info_to_dict(named_info_string):
named_info_string = named_info_string.replace("'", '"')
return json.loads(named_info_string)
@staticmethod
def _parse_named_info_to_string(named_info_dict):
dict_string = json.dumps(named_info_dict)
return dict_string.replace('"', "'")
def write(self, display_title, named_info_dict):
listing_id = Database.unique_id()
row = [listing_id, display_title, self._parse_named_info_to_string(named_info_dict)]
self._database.insert(self.TABLE, row)
return listing_id
def read_all(self):
rows = self._database.select(self.TABLE, columns=self.SCHEMA)
if rows:
named_info_index = self.get_column_index('named_info')
return [[*r[:-1], self._parse_named_info_to_dict(r[named_info_index])] for r in rows]
return []
class ServicesDAO(DAO):
TABLE = 'Services'
SCHEMA = ['id', 'name', 'scraping_url', 'icon_url']
def __init__(self, database_file_path):
super().__init__(database_file_path)
def read_all(self):
result = self._database.select(self.TABLE, columns=self.SCHEMA, distinct=True)
return result if result else None
class ListingServiceMappingDAO(DAO):
TABLE = 'ListingServiceMapping'
SCHEMA = ['id', 'listing_id', 'service_id']
def __init__(self, database_file_path):
super().__init__(database_file_path, volatile=True)
def write(self, listing_id, service_id):
mapping_id = Database.unique_id()
self._database.insert(self.TABLE, [mapping_id, listing_id, service_id])
return mapping_id
def read(self, listing_id):
condition = 'listing_id="{}"'.format(listing_id)
rows = self._database.select(self.TABLE, ['service_id'], condition)
return [r[0] for r in rows]
class RequestsDAO(DAO):
TABLE = 'Requests'
SCHEMA = ['id', 'user_identifier', 'datetime', 'method', 'data', 'response_time']
def __init__(self, database_file_path):
super().__init__(database_file_path)
def write(self, user_identifier, request_datetime, method, data):
request_id = Database.unique_id()
values = [request_id, user_identifier, request_datetime.strftime(constants.DATETIME.FORMAT), method,
json.dumps(data), None]
self._database.insert(self.TABLE, values)
return request_id
def update_response_time(self, request_id, response_time):
condition = 'id="{}"'.format(request_id)
self._database.update(self.TABLE, {'response_time': float(response_time)}, condition)
class RecommendationScoresDAO(DAO):
TABLE = 'RecommendationScores'
SCHEMA = ['id', 'listing_id', 'score']
def __init__(self, database_file_path):
super().__init__(database_file_path)
def write(self, listing_id, score=0):
recommendation_score_id = Database.unique_id()
self._database.insert(self.TABLE, [recommendation_score_id, listing_id, score])
def update(self, listing_id, score):
self._database.update(self.TABLE, {'score': score}, 'listing_id="{}"'.format(listing_id))
def read(self, listing_id):
condition = 'listing_id="{}"'.format(listing_id)
result = self._database.select(self.TABLE, ['score'], condition, distinct=True)[0]
return result if result else None
class DatabaseInitiator:
DAOS = [TitlesDAO, ListingsDAO, ListingServiceMappingDAO, RequestsDAO, RecommendationScoresDAO, ServicesDAO]
@staticmethod
def create_tables(database_file_path):
# Open new database file.
if not os.path.isfile(database_file_path):
with open(database_file_path, 'w+'):
pass
print('Created database file "{}".'.format(database_file_path))
# Create tables.
for dao in DatabaseInitiator.DAOS:
try:
dao(database_file_path).create_table()
print('Created table "{}" in "{}".'.format(dao.TABLE, database_file_path))
except sqlite3.OperationalError as e:
print('WARNING: Could not create table "{}". SQL Error: "{}"'.format(dao.TABLE, e))
@staticmethod
def copy_data(source_file_path, destination_file_path, force_all=False):
if force_all:
daos_to_copy = DatabaseInitiator.DAOS
else:
daos_to_copy = [dao for dao in DatabaseInitiator.DAOS if not dao(source_file_path).volatile]
table_rows = {}
source_database = Database(source_file_path)
for dao in daos_to_copy:
try:
table_rows[dao.TABLE] = source_database.select(dao.TABLE, columns=dao.SCHEMA)[1:]
print('Copied "{}" data to "{}".'.format(dao.TABLE, destination_file_path))
except sqlite3.OperationalError as e:
print('WARNING: Could not copy "{}" data. SQL Error: "{}"'.format(dao.TABLE, e))
destination_database = Database(destination_file_path)
for table in table_rows:
rows = [list(r) for r in table_rows.get(table)]
destination_database.insert_multiple(table, rows)
| joshnic3/StreamGuide | src/Library/data.py | data.py | py | 7,086 | python | en | code | 0 | github-code | 13 |
42725684397 | from __future__ import print_function, absolute_import
import os
import argparse
import time
import matplotlib.pyplot as plt
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torchvision.datasets as datasets
import _init_paths
from pose import Bar
from pose.utils.logger import Logger, savefig
from pose.utils.evaluation import accuracy, AverageMeter, final_preds
from pose.utils.misc import save_checkpoint, save_pred, adjust_learning_rate
from pose.utils.osutils import mkdir_p, isfile, isdir, join
from pose.utils.imutils import batch_with_heatmap
from pose.utils.transforms import fliplr, flip_back
import pose.models as models
import pose.datasets as datasets
import pose.losses as losses
# get model names and dataset names
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
dataset_names = sorted(name for name in datasets.__dict__
if name.islower() and not name.startswith("__")
and callable(datasets.__dict__[name]))
# init global variables
best_acc = 0
idx = []
# select proper device to run
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True # There is BN issue for early version of PyTorch
# see https://github.com/bearpaw/pytorch-pose/issues/33
def main(args):
global best_acc
global idx
# idx is the index of joints used to compute accuracy
if args.dataset in ['mpii', 'lsp']:
idx = [1,2,3,4,5,6,11,12,15,16]
elif args.dataset == 'coco':
idx = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]
else:
print("Unknown dataset: {}".format(args.dataset))
assert False
# create checkpoint dir
if not isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# create model
njoints = datasets.__dict__[args.dataset].njoints
print("==> creating model '{}', stacks={}, blocks={}".format(args.arch, args.stacks, args.blocks))
model = models.__dict__[args.arch](num_stacks=args.stacks,
num_blocks=args.blocks,
num_classes=njoints,
resnet_layers=args.resnet_layers)
model = torch.nn.DataParallel(model).to(device)
# define loss function (criterion) and optimizer
criterion = losses.JointsMSELoss().to(device)
if args.solver == 'rms':
optimizer = torch.optim.RMSprop(model.parameters(),
lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
elif args.solver == 'adam':
optimizer = torch.optim.Adam(
model.parameters(),
lr=args.lr,
)
else:
print('Unknown solver: {}'.format(args.solver))
assert False
# optionally resume from a checkpoint
title = args.dataset + ' ' + args.arch
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_acc = checkpoint['best_acc']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
logger = Logger(join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Epoch', 'LR', 'Train Loss', 'Val Loss',
'Train Acc', 'Val Acc'])
print(' Total params: %.2fM'
% (sum(p.numel() for p in model.parameters())/1000000.0))
# create data loader
train_dataset = datasets.__dict__[args.dataset](is_train=True, **vars(args))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.train_batch, shuffle=True,
num_workers=args.workers, pin_memory=True
)
val_dataset = datasets.__dict__[args.dataset](is_train=False, **vars(args))
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.test_batch, shuffle=False,
num_workers=args.workers, pin_memory=True
)
# evaluation only
if args.evaluate:
print('\nEvaluation only')
loss, acc, predictions = validate(val_loader, model, criterion, njoints,
args.debug, args.flip)
save_pred(predictions, checkpoint=args.checkpoint)
return
# train and eval
lr = args.lr
for epoch in range(args.start_epoch, args.epochs):
lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule, args.gamma)
print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))
# decay sigma
if args.sigma_decay > 0:
train_loader.dataset.sigma *= args.sigma_decay
val_loader.dataset.sigma *= args.sigma_decay
# train for one epoch
train_loss, train_acc = train(train_loader, model, criterion, optimizer,
args.debug, args.flip)
# evaluate on validation set
valid_loss, valid_acc, predictions = validate(val_loader, model, criterion,
njoints, args.debug, args.flip)
# append logger file
logger.append([epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])
# remember best acc and save checkpoint
is_best = valid_acc > best_acc
best_acc = max(valid_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, predictions, is_best, checkpoint=args.checkpoint, snapshot=args.snapshot)
logger.close()
logger.plot(['Train Acc', 'Val Acc'])
savefig(os.path.join(args.checkpoint, 'log.eps'))
def train(train_loader, model, criterion, optimizer, debug=False, flip=True):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
# switch to train mode
model.train()
end = time.time()
gt_win, pred_win = None, None
bar = Bar('Train', max=len(train_loader))
for i, (input, target, meta) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
input, target = input.to(device), target.to(device, non_blocking=True)
target_weight = meta['target_weight'].to(device, non_blocking=True)
# compute output
output = model(input)
if type(output) == list: # multiple output
loss = 0
for o in output:
loss += criterion(o, target, target_weight)
output = output[-1]
else: # single output
loss = criterion(output, target, target_weight)
acc = accuracy(output, target, idx)
if debug: # visualize groundtruth and predictions
gt_batch_img = batch_with_heatmap(input, target)
pred_batch_img = batch_with_heatmap(input, output)
if not gt_win or not pred_win:
ax1 = plt.subplot(121)
ax1.title.set_text('Groundtruth')
gt_win = plt.imshow(gt_batch_img)
ax2 = plt.subplot(122)
ax2.title.set_text('Prediction')
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
acces.update(acc[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
batch=i + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=acces.avg
)
bar.next()
bar.finish()
return losses.avg, acces.avg
def validate(val_loader, model, criterion, num_classes, debug=False, flip=True):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acces = AverageMeter()
# predictions
predictions = torch.Tensor(val_loader.dataset.__len__(), num_classes, 2)
# switch to evaluate mode
model.eval()
gt_win, pred_win = None, None
end = time.time()
bar = Bar('Eval ', max=len(val_loader))
with torch.no_grad():
for i, (input, target, meta) in enumerate(val_loader):
# measure data loading time
data_time.update(time.time() - end)
input = input.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
target_weight = meta['target_weight'].to(device, non_blocking=True)
# compute output
output = model(input)
score_map = output[-1].cpu() if type(output) == list else output.cpu()
if flip:
flip_input = torch.from_numpy(fliplr(input.clone().numpy())).float().to(device)
flip_output = model(flip_input)
flip_output = flip_output[-1].cpu() if type(flip_output) == list else flip_output.cpu()
flip_output = flip_back(flip_output)
score_map += flip_output
if type(output) == list: # multiple output
loss = 0
for o in output:
loss += criterion(o, target, target_weight)
output = output[-1]
else: # single output
loss = criterion(output, target, target_weight)
acc = accuracy(score_map, target.cpu(), idx)
# generate predictions
preds = final_preds(score_map, meta['center'], meta['scale'], [64, 64])
for n in range(score_map.size(0)):
predictions[meta['index'][n], :, :] = preds[n, :, :]
if debug:
gt_batch_img = batch_with_heatmap(input, target)
pred_batch_img = batch_with_heatmap(input, score_map)
if not gt_win or not pred_win:
plt.subplot(121)
gt_win = plt.imshow(gt_batch_img)
plt.subplot(122)
pred_win = plt.imshow(pred_batch_img)
else:
gt_win.set_data(gt_batch_img)
pred_win.set_data(pred_batch_img)
plt.pause(.05)
plt.draw()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
acces.update(acc[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.6f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | Acc: {acc: .4f}'.format(
batch=i + 1,
size=len(val_loader),
data=data_time.val,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
acc=acces.avg
)
bar.next()
bar.finish()
return losses.avg, acces.avg, predictions
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Dataset setting
parser.add_argument('--dataset', metavar='DATASET', default='mpii',
choices=dataset_names,
help='Datasets: ' +
' | '.join(dataset_names) +
' (default: mpii)')
parser.add_argument('--image-path', default='', type=str,
help='path to images')
parser.add_argument('--anno-path', default='', type=str,
help='path to annotation (json)')
parser.add_argument('--year', default=2014, type=int, metavar='N',
help='year of coco dataset: 2014 (default) | 2017)')
parser.add_argument('--inp-res', default=256, type=int,
help='input resolution (default: 256)')
parser.add_argument('--out-res', default=64, type=int,
help='output resolution (default: 64, to gen GT)')
# Model structure
parser.add_argument('--arch', '-a', metavar='ARCH', default='hg',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: hg)')
parser.add_argument('-s', '--stacks', default=8, type=int, metavar='N',
help='Number of hourglasses to stack')
parser.add_argument('--features', default=256, type=int, metavar='N',
help='Number of features in the hourglass')
parser.add_argument('--resnet-layers', default=50, type=int, metavar='N',
help='Number of resnet layers',
choices=[18, 34, 50, 101, 152])
parser.add_argument('-b', '--blocks', default=1, type=int, metavar='N',
help='Number of residual modules at each location in the hourglass')
# Training strategy
parser.add_argument('--solver', metavar='SOLVER', default='rms',
choices=['rms', 'adam'],
help='optimizers')
parser.add_argument('-j', '--workers', default=1, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=6, type=int, metavar='N',
help='train batchsize')
parser.add_argument('--test-batch', default=6, type=int, metavar='N',
help='test batchsize')
parser.add_argument('--lr', '--learning-rate', default=2.5e-4, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=0, type=float,
metavar='W', help='weight decay (default: 0)')
parser.add_argument('--schedule', type=int, nargs='+', default=[60, 90],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1,
help='LR is multiplied by gamma on schedule.')
parser.add_argument('--target-weight', dest='target_weight',
action='store_true',
help='Loss with target_weight')
# Data processing
parser.add_argument('-f', '--flip', dest='flip', action='store_true',
help='flip the input during validation')
parser.add_argument('--sigma', type=float, default=1,
help='Groundtruth Gaussian sigma.')
parser.add_argument('--scale-factor', type=float, default=0.25,
help='Scale factor (data aug).')
parser.add_argument('--rot-factor', type=float, default=30,
help='Rotation factor (data aug).')
parser.add_argument('--sigma-decay', type=float, default=0,
help='Sigma decay rate for each epoch.')
parser.add_argument('--label-type', metavar='LABELTYPE', default='Gaussian',
choices=['Gaussian', 'Cauchy'],
help='Labelmap dist type: (default=Gaussian)')
# Miscs
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--snapshot', default=0, type=int,
help='save models for every #snapshot epochs (default: 0)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='show intermediate results')
main(parser.parse_args())
| bearpaw/pytorch-pose | example/main.py | main.py | py | 17,704 | python | en | code | 1,087 | github-code | 13 |
25018622208 | import logging
from datetime import timedelta
import voluptuous as vol
import requests
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
from requests.exceptions import (
ConnectionError as ConnectError, HTTPError, Timeout)
_LOGGER = logging.getLogger(__name__)
TIME_BETWEEN_UPDATES = timedelta(minutes=30)
CONF_OPTIONS = "options"
CONF_CITY = "city"
# CONF_AQI_CITY = "aqi_city"
CONF_APPKEY = "appkey"
life_index_list = {'comf_txt': None, 'drsg_txt': None, 'flu_txt': None,
'sport_txt': None, 'trav_txt': None, 'uv_txt': None, 'cw_txt': None}
OPTIONS = dict(fl=["HeWeather_fl", "实时体感温度", "mdi:temperature-celsius", "℃"],
tmp=["HeWeather_tmp", "实时室外温度", "mdi:thermometer", "℃"],
hum=["HeWeather_hum", "实时室外湿度", "mdi:water-percent", "%Rh"],
pcpn=["HeWeather_pcpn", "降水量", "mdi:weather-rainy", "mm"],
pres=["HeWeather_pres", "大气压", "mdi:debug-step-over", "hPa"],
vis=["HeWeather_vis", "能见度", "mdi:eye", "km"],
wind_spd=["HeWeather_wind_spd", "风速", "mdi:speedometer", "km/h"],
wind_sc=["HeWeather_wind_sc", "风力", "mdi:flag-variant", None],
wind_dir=["HeWeather_wind_dir", "风向", "mdi:apple-safari", None],
cond_txt=["HeWeather_cond_txt", "天气状态", "mdi:counter", None],
qlty=["HeWeather_qlty", "空气质量", "mdi:beach", None],
main=["HeWeather_main", "主要污染物", "mdi:chart-bar-stacked", None],
aqi=["HeWeather_aqi", "空气质量指数", "mdi:poll", "AQI"],
pm10=["HeWeather_pm10", "PM10", "mdi:blur", "μg/m³"],
pm25=["HeWeather_pm25", "PM2.5", "mdi:blur", "μg/m³"],
comf=["HeWeather_comf", "舒适度指数", "mdi:chart-bubble", None],
drsg=["HeWeather_drsg", "穿衣指数", "mdi:tie", None],
trav=["HeWeather_trav", "出行指数", "mdi:bus", None],
sport=["HeWeather_sport", "运动指数", "mdi:bike", None],
flu=["HeWeather_flu", "感冒指数", "mdi:seat-individual-suite", None],
cw=["HeWeather_cw", "空气污染扩散条件指数", "mdi:airballoon", None],
uv=["HeWeather_uv", "晾晒指数", "mdi:weather-sunny", None])
ATTR_UPDATE_TIME = "更新时间"
ATTRIBUTION = "Powered by He Weather"
ATTRIBUTION_SUGGESTION = "生活建议"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_CITY): cv.string,
vol.Required(CONF_APPKEY): cv.string,
vol.Required(CONF_OPTIONS, default=[]): vol.All(cv.ensure_list, [vol.In(OPTIONS)]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
_LOGGER.info("Setup platform sensor.HeWeather")
city = config.get(CONF_CITY)
appkey = config.get(CONF_APPKEY)
# aqi_city = config.get(CONF_AQI_CITY)
data = WeatherData(city, appkey)
dev = []
for option in config[CONF_OPTIONS]:
dev.append(HeWeatherSensor(data, option))
add_entities(dev, True)
class HeWeatherSensor(Entity):
def __init__(self, data, option):
self._data = data
self._object_id = OPTIONS[option][0]
self._friendly_name = OPTIONS[option][1]
self._icon = OPTIONS[option][2]
self._unit_of_measurement = OPTIONS[option][3]
self._type = option
self._state = None
self._updatetime = None
@property
def unique_id(self):
return self._object_id
@property
def name(self):
return self._friendly_name
# @property
# def registry_name(self):
# return self._friendly_name
@property
def state(self):
return self._state
@property
def icon(self):
return self._icon
@property
def unit_of_measurement(self):
return self._unit_of_measurement
@property
def device_state_attributes(self):
global ATTRIBUTION
if self._friendly_name == "舒适度指数":
ATTRIBUTION = life_index_list['comf_txt']
elif self._friendly_name == "穿衣指数":
ATTRIBUTION = life_index_list['drsg_txt']
elif self._friendly_name == "感冒指数":
ATTRIBUTION = life_index_list['flu_txt']
elif self._friendly_name == "运动指数":
ATTRIBUTION = life_index_list['sport_txt']
elif self._friendly_name == "出行指数":
ATTRIBUTION = life_index_list["trav_txt"]
elif self._friendly_name == "晾晒指数":
ATTRIBUTION = life_index_list['uv_txt']
elif self._friendly_name == "空气污染扩散条件指数":
ATTRIBUTION = life_index_list['cw_txt']
else:
ATTRIBUTION = "Powered by HeWeather"
return {
ATTR_UPDATE_TIME: self._updatetime,
ATTRIBUTION_SUGGESTION: ATTRIBUTION,
}
def update(self):
self._data.update()
self._updatetime = self._data.updatetime
if self._type == "fl":
self._state = self._data.fl
elif self._type == "tmp":
self._state = self._data.tmp
elif self._type == "cond_txt":
self._state = self._data.cond_txt
elif self._type == "wind_spd":
self._state = self._data.wind_spd
elif self._type == "hum":
self._state = self._data.hum
elif self._type == "pcpn":
self._state = self._data.pcpn
elif self._type == "pres":
self._state = self._data.pres
elif self._type == "vis":
self._state = self._data.vis
elif self._type == "wind_sc":
self._state = self._data.wind_sc
elif self._type == "wind_dir":
self._state = self._data.wind_dir
elif self._type == "qlty":
self._state = self._data.qlty
elif self._type == "main":
self._state = self._data.main
elif self._type == "aqi":
self._state = self._data.aqi
elif self._type == "pm10":
self._state = self._data.pm10
elif self._type == "pm25":
self._state = self._data.pm25
elif self._type == "cw":
self._state = self._data.cw
elif self._type == "comf":
self._state = self._data.comf
elif self._type == "drsg":
self._state = self._data.drsg
elif self._type == "flu":
self._state = self._data.flu
elif self._type == "sport":
self._state = self._data.sport
elif self._type == "trav":
self._state = self._data.trav
elif self._type == "uv":
self._state = self._data.uv
class WeatherData(object):
def __init__(self, city, appkey):
self._url = "https://devapi.qweather.com/v7/weather/now"
self._air_url = "https://devapi.qweather.com/v7/air/now"
self._life_index_url = "https://devapi.qweather.com/v7/indices/1d?type=0"
self._params = {"location": city, "key": appkey}
# self._aqi_params = {"location": aqi_city, "key": appkey}
self._fl = None
self._tmp = None
self._cond_txt = None
self._wind_spd = None
self._hum = None
self._pcpn = None
self._pres = None
self._vis = None
self._wind_sc = None
self._wind_dir = None
self._qlty = None
self._main = None
self._aqi = None
self._pm10 = None
self._pm25 = None
self._updatetime = None
self._comf = None
self._cw = None
self._drsg = None
self._flu = None
self._sport = None
self._uv = None
self._trav = None
@property
def fl(self):
return self._fl
@property
def tmp(self):
return self._tmp
@property
def cond_txt(self):
return self._cond_txt
@property
def wind_spd(self):
return self._wind_spd
@property
def wind_dir(self):
return self._wind_dir
@property
def hum(self):
return self._hum
@property
def pcpn(self):
return self._pcpn
@property
def pres(self):
return self._pres
@property
def vis(self):
return self._vis
@property
def wind_sc(self):
return self._wind_sc
@property
def qlty(self):
return self._qlty
@property
def main(self):
return self._main
@property
def aqi(self):
return self._aqi
@property
def pm10(self):
return self._pm10
@property
def pm25(self):
return self._pm25
@property
def comf(self):
return self._comf
@property
def cw(self):
return self._cw
@property
def drsg(self):
return self._drsg
@property
def flu(self):
return self._flu
@property
def sport(self):
return self._sport
@property
def uv(self):
return self._uv
@property
def trav(self):
return self._trav
@property
def updatetime(self):
return self._updatetime
def now(self):
now_weather = requests.get(self._url, self._params)
con = now_weather.json()
return con
def air(self):
r_air = requests.get(self._air_url, self._params)
con_air = r_air.json()
return con_air
def life(self):
life_index = requests.get(self._life_index_url, self._params)
con_life_index = life_index.json()
return con_life_index
@Throttle(TIME_BETWEEN_UPDATES)
def update(self):
import time
try:
con = self.now()
except (ConnectError, HTTPError, Timeout, ValueError) as error:
time.sleep(0.01)
con = self.now()
_LOGGER.error("Unable to connect to HeWeather. %s", error)
try:
con_air = self.air()
except (ConnectError, HTTPError, Timeout, ValueError) as error:
time.sleep(0.01)
con_air = self.air()
_LOGGER.error("Unable to connect to HeWeather. %s", error)
try:
con_life_index = self.life()
except (ConnectError, HTTPError, Timeout, ValueError) as error:
time.sleep(0.01)
con_life_index = self.life()
_LOGGER.error("Unable to connect to HeWeather. %s", error)
_LOGGER.info("Update from HeWeather...")
try:
self._fl = con.get("now").get("feelsLike")
self._cond_txt = con.get("now").get("text")
self._hum = con.get("now").get("humidity")
self._pcpn = con.get("now").get("precip")
self._pres = con.get("now").get("pressure")
self._tmp = con.get("now").get("temp")
self._vis = con.get("now").get("vis")
self._wind_spd = con.get("now").get("windSpeed")
self._wind_dir = con.get("now").get("windDir")
self._wind_sc = con.get("now").get("windScale")
self._qlty = con_air.get("now").get("category")
self._aqi = con_air.get("now").get("aqi")
self._pm10 = con_air.get("now").get("pm10")
self._pm25 = con_air.get("now").get("pm2p5")
self._main = con_air.get("now").get("primary")
self._comf = con_life_index.get("daily")[8].get("category")
self._drsg = con_life_index.get("daily")[10].get("category")
self._flu = con_life_index.get("daily")[12].get("category")
self._sport = con_life_index.get("daily")[15].get("category")
self._trav = con_life_index.get("daily")[7].get("category")
self._uv = con_life_index.get("daily")[6].get("category") # 晾晒指数
self._cw = con_life_index.get("daily")[9].get("category") #空气污染扩散条件指数
life_index_list['comf_txt'] = con_life_index.get("daily")[8].get("text")
life_index_list['drsg_txt'] = con_life_index.get("daily")[10].get("text")
life_index_list['flu_txt'] = con_life_index.get("daily")[12].get("text")
life_index_list['sport_txt'] = con_life_index.get("daily")[15].get("text")
life_index_list['trav_txt'] = con_life_index.get("daily")[7].get("text")
life_index_list['uv_txt'] = con_life_index.get("daily")[6].get("text")
life_index_list['cw_txt'] = con_life_index.get("daily")[9].get("text")
except Exception as e:
logging.info(e)
import time
self._updatetime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
| code-geeker/heweather | custom_components/heweather/sensor.py | sensor.py | py | 12,850 | python | en | code | 0 | github-code | 13 |
41643857939 | from django.conf import settings
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic.simple import redirect_to
admin.autodiscover()
urlpatterns = patterns('',
# serving static files
(r'^favicon\.ico$', redirect_to, {'url': '/static/image/icon/favicon.ico'}),
(r'^apple-touch-icon\.png$', redirect_to, {'url': '/static/image/icon/apple-touch-icon.png'}),
# django stuff
# (r'^robots.txt$', include('robots.urls')),
# (r'^sitemap\.xml$', 'django.contrib.sitemaps.views.index', {'sitemaps': sitemaps}),
# (r'^sitemap-(?P<section>.+)\.xml$', 'django.contrib.sitemaps.views.sitemap', {'sitemaps': sitemaps}),
# project URLs
# (r'^/', include('diablo.urls', namespace='diablo')),
# error pages
# (r'^404/$', 'common.views.page_not_found'),
# (r'^500/$', 'common.views.server_error'),
)
urlpatterns += patterns('homepage.views',
url(r'^email/$', 'email', name='email'),
url(r'^$', 'index', name='index'),
)
handler404 = 'common.views.page_not_found'
handler500 = 'common.views.server_error'
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns() | mattsnider/vet24seven | urls.py | urls.py | py | 1,223 | python | en | code | 0 | github-code | 13 |
39151900935 | from __future__ import print_function
import os.path
import argparse
import numpy as np
import sys
from types import SimpleNamespace
sys.path.insert(0,'..')
from download_owncloud_tray_imgs import download_from_oc
from helpers import get_config
from experiment_runner import ExperimentRunner
from training_ml.pull_labels import pull_labels
from well_matrix import Well
def main(args):
opt = get_config(None, args.create_dataset_config, load_only=True)
opt.run_type = 'create_dataset'
# Get the labels from the spreadsheet
labels_dict = pull_labels(False)
# Download the trays_labelled images, if they're not already downloaded
tray_names = list(labels_dict.keys())
download_from_oc(args.full_tray_imgs_dir, num_trays_to_download=len(tray_names), trays_to_download=tray_names)
print(f"Annotated trays loading: {len(tray_names)}")
# The contents of the csv file
csv_contents = ""
for tray_name in tray_names:
print(f"Processing {tray_name}")
labels = labels_dict[tray_name]
tray_folder_dirpath = f"{args.full_tray_imgs_dir}{os.sep}{tray_name}"
config_filepath = f"{tray_folder_dirpath}{os.sep}meta.yml"
well_metadata = get_config(None, config_filepath, load_only=True)
runner = ExperimentRunner(opt, well_metadata, tray_folder_dirpath)
runner._create_well_matrix(should_save_key_states=True, feature_vec_size=opt.feature_vec_size, saved_well_matrices_path='../_saved_well_matrices', matrix_filename=tray_name, labels=labels, resave=args.resave == 'true')
group_label_counts = {group: np.zeros(5) for group in runner.well_matrix.groups}
coord_to_group_dict = {}
for group in runner.well_matrix.groups:
# Get the coordinates of all Wells in this group
group_coords = runner.well_matrix.main_df_subgrouped[group]['maps'].keys()
# Map each coordinate to a group
for coord in group_coords:
coord_to_group_dict[coord] = group
label_counts = np.zeros(5)
# Append the feature vectors and labels to the dataset
for well_coord, frame in labels:
curr_well = runner.well_matrix[well_coord, frame]
if not isinstance(curr_well, Well):
continue
if curr_well.label != -1 and labels[well_coord, frame] == curr_well.label:
label_val = int(labels[well_coord, frame])
label_counts[label_val] += 1
well_group = coord_to_group_dict[well_coord]
group_label_counts[well_group][label_val] += 1
if args.dists_to_print == 'trays':
csv_str = ""
csv_str += f"{tray_name},"
csv_str += f"{len(runner.well_matrix.all_selected_well_coords)},"
csv_str += f"{runner.well_matrix.shape[2]},"
csv_str += f"{runner.well_matrix.shape[2] * len(runner.well_matrix.all_selected_well_coords)},"
csv_str += f"{sum(label_counts)},"
label_props = label_counts / sum(label_counts)
for i in range(5):
csv_str += "{:.2f},".format(label_props[i])
for i in range(5):
csv_str += "{:.2f},".format(label_counts[i])
csv_contents += csv_str[:-1] + '\n'
if args.dists_to_print == 'groups':
for group in runner.well_matrix.groups:
csv_str = ""
csv_str += f"{tray_name}_{group},"
all_coords_in_group = 0
for group_ in coord_to_group_dict.values():
if group_ == group:
all_coords_in_group += 1
csv_str += f"{all_coords_in_group},"
csv_str += f"{runner.well_matrix.shape[2]},"
csv_str += f"{runner.well_matrix.shape[2] * all_coords_in_group},"
label_counts = group_label_counts[group]
csv_str += f"{sum(label_counts)},"
label_props = label_counts / sum(label_counts)
for i in range(5):
csv_str += "{:.2f},".format(label_props[i])
for i in range(5):
csv_str += "{:.2f},".format(label_counts[i])
csv_contents += csv_str[:-1] + '\n'
with open(f"./{args.dists_to_print}_distributions.csv", "w") as outf:
outf.write(csv_contents)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Pulls labels from the spreadsheet, downloads labelled trays from ownCloud")
parser.add_argument('--full_tray_imgs_dir', type=str, required=True)
parser.add_argument('--create_dataset_config', type=str, required=True, help="NOT the meta.yml file, \
but rather the ml_test_config.yml file used by the experiment_runner")
parser.add_argument('--dists_to_print', type=str, required=True, help="The kind of distribution you want to print (`trays` for wells in trays, `groups` \
for wells in groups")
parser.add_argument('--resave', type=str, required=True, help="Whether or not we should re-compute the WellMatrix objects, \
even if they have already been computed and saved.")
args = parser.parse_args()
main(args) | sidguptacode/ML_AT_Interpretation | agglutination-detection/training_ml/get_labels_dist.py | get_labels_dist.py | py | 5,242 | python | en | code | 0 | github-code | 13 |
15478935225 | from flask import request, jsonify
from .. import db
from .models import Homework
def get_all_homeworks():
homework = Homework.query.all()
homework_list = []
for h in homework:
hw = {
"id": h.id,
"homework_name": h.homework_name,
"subject": h.subject,
"content": h.content,
"extra_resources": h.extra_resources,
"teacher": h.teacher.name,
"teacher_id": h.teacher_id
}
homework_list.append(hw)
return jsonify(homework_list)
def get_one_homework(id):
homework = Homework.query.get(id)
hw = {
"id": homework.id,
"homework_name": homework.homework_name,
"subject": homework.subject,
"content": homework.content,
"extra_resources": homework.extra_resources,
"teacher": homework.teacher.name,
"teacher_id": homework.teacher_id
}
return jsonify(hw)
def create_homework():
data = request.json
new_hw = Homework(
homework_name = data['homework_name'],
subject = data['subject'],
content = data['content'],
extra_resources = data['extra_resources'] if 'extra_resources' in data else "",
teacher_id = data['teacher_id']
)
db.session.add(new_hw)
db.session.commit()
return jsonify(id=new_hw.id, homework_name=new_hw.homework_name, subject=new_hw.subject, content=new_hw.content, extra_resources=new_hw.extra_resources, teacher_id=new_hw.teacher_id)
def update_homework(id):
homework = Homework.query.get(id)
data = request.json
if 'homework_name' in data:
homework.homework_name = data['homework_name']
db.session.commit()
if 'subject' in data:
homework.subject = data['subject']
db.session.commit()
if 'content' in data:
homework.content = data['content']
db.session.commit()
if 'extra_resources' in data:
homework.extra_resources = data['extra_resources']
db.session.commit()
return jsonify(id=homework.id, homework_name=homework.homework_name, content=homework.content, subject = homework.subject, extra_resources=homework.extra_resources, teacher_id=homework.teacher_id)
def delete_homework(id):
homework = Homework.query.get(id)
db.session.delete(homework)
db.session.commit()
return 'homework deleted'
| diegorramos84/homeworkheroes-api | homework/controllers.py | controllers.py | py | 2,409 | python | en | code | 0 | github-code | 13 |
20254863438 | from PyQt5 import QtCore, QtGui, QtWidgets
import pandas as pd
def importBtnHandler(self):
self.fromImport = True
data=pd.read_csv('inputs.csv')
r=data.shape[0]
r=int(r)
data.set_index('Jobs',inplace=True)
i=0
j=0
rows = r
self.JobNo.setValue(rows)
#self.previousRows = rows
self.JobTable.setRowCount(rows)
for i in range(rows):
item = QtWidgets.QTableWidgetItem("Job "+str(i+1))
item.setForeground(QtGui.QColor(0, 0, 0))
item.setBackground(QtGui.QColor(
self.rgbRed[i], self.rgbGreen[i], self.rgbBlue[i]))
self.JobTable.setVerticalHeaderItem(i, item)
# Call function to set items widget in each cell of table and align them.
self.setWidgetsInCellsAndAlign()
# Call function to set Non editable columns, whichever required.
self.setNonEditableColumns()
algo=self.AlgoSelector.currentText()
#algo_type=self.TypeWithIOBT.clicked.connect(self.type_IOBT())
i=0
if self.TypeWithIOBT.isChecked():
if algo!='Priority':
columns=['AT','BT','IOBT','BT2']
for jobs in data.index:
for feature in columns:
item = QtWidgets.QTableWidgetItem()
q=data.loc[jobs,feature]
item.setText(str(q))
item.setTextAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self.JobTable.setItem(i,(j%4)+1,item)
j+=1
i+=1
else:
i=0
j=0
for jobs in data.index:
columns=['Priority','AT','BT','IOBT','BT2']
for feature in columns:
item = QtWidgets.QTableWidgetItem()
q=data.loc[jobs,feature]
item.setText(str(q))
item.setTextAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self.JobTable.setItem(i,(j%5),item)
j+=1
i+=1
if self.TypeWithoutIOBT.isChecked():
i=0
j=0
if algo!='Priority':
columns=['AT','BT']
for jobs in data.index:
for feature in columns:
item = QtWidgets.QTableWidgetItem()
q=data.loc[jobs,feature]
item.setText(str(q))
item.setTextAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self.JobTable.setItem(i,(j%2)+1,item)
j+=1
i+=1
else:
for jobs in data.index:
columns=['Priority','AT','BT']
for feature in columns:
item = QtWidgets.QTableWidgetItem()
q=data.loc[jobs,feature]
item.setText(str(q))
item.setTextAlignment(QtCore.Qt.AlignVCenter | QtCore.Qt.AlignHCenter)
self.JobTable.setItem(i,(j%3),item)
j+=1
i+=1 | ParthPrajapati43/OS-Algorithms | ProcessScheduler/src/Buttons/Import.py | Import.py | py | 3,062 | python | en | code | 7 | github-code | 13 |
12310448785 | from flask import Flask, render_template, request
from flask_paginate import Pagination, get_page_args
from firebase_admin import db
import db_init
import asyncio
import concurrent.futures
import time
pages_ref = db.reference('/pages')
ranks_ref = db.reference('/ranks')
idx_ref = db.reference('/indexes')
app = Flask(__name__)
app.config["DEBUG"] = True
def make_results(pid, n_coincidences, page_rank, list_results):
url = pages_ref.child(pid).get()
list_results.append({
'url': url,
'n_coincidences': n_coincidences,
'page_rank': page_rank
})
async def results(pid, n_workers, list_results, n_coincidences, page_rank,
event_loop):
with concurrent.futures.ThreadPoolExecutor(max_workers=n_workers +
2) as executor:
# print("Fetching: ", pid)
return await event_loop.run_in_executor(executor, make_results, pid,
n_coincidences, page_rank,
list_results)
async def results_range(word, list_results, event_loop):
# inverted index
page_ids = idx_ref.child(word).get()
print("Page ids fetched", len(page_ids))
# page rank
page_rank_ids = ranks_ref.get()
coroutines = [
results(pid, len(page_ids), list_results, page_ids[pid],
page_rank_ids[pid], event_loop) for pid in page_ids
if pid in page_rank_ids
]
await asyncio.gather(*coroutines)
def sort_by_pagerank(list_results):
return sorted(list_results, key=lambda d: d['page_rank'], reverse=True)
def get_results(list_results, offset=0, per_page=10):
return list_results[offset:offset + per_page]
@app.route("/")
def home():
return render_template('index.html')
list_results = []
word = ''
@app.route("/search", methods=['POST', 'GET'])
def search():
global list_results
global word
if request.method == 'POST':
word = request.form['word-input']
if word != '':
list_results = []
###
t0 = time.time()
event_loop = asyncio.new_event_loop()
try:
event_loop.run_until_complete(
results_range(word, list_results, event_loop))
finally:
event_loop.close()
print('Time elapsed:', (time.time() - t0), 'seconds')
###
list_results = sort_by_pagerank(list_results)
page, per_page, offset = get_page_args(
page_parameter="page", per_page_parameter="per_page")
total = len(list_results)
pagination_results = get_results(list_results,
offset=offset,
per_page=per_page)
pagination = Pagination(page=page,
per_page=per_page,
total=total,
css_framework='bootstrap5')
return render_template('results.html',
n_results=total,
word=word,
pagination_results=pagination_results,
page=page,
per_page=per_page,
pagination=pagination)
else:
page, per_page, offset = get_page_args(page_parameter="page",
per_page_parameter="per_page")
total = len(list_results)
pagination_results = get_results(list_results,
offset=offset,
per_page=per_page)
pagination = Pagination(page=page,
per_page=per_page,
total=total,
css_framework='bootstrap5')
return render_template('results.html',
n_results=total,
word=word,
pagination_results=pagination_results,
page=page,
per_page=per_page,
pagination=pagination)
if __name__ == "__main__":
app.run()
| sharon1160/buscape | app.py | app.py | py | 4,413 | python | en | code | 1 | github-code | 13 |
17055116704 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class KoubeiServindustryNatatoriumWaterqualityUploadModel(object):
def __init__(self):
self._commodity_id = None
self._current_num = None
self._currentnum_update_time = None
self._external_id = None
self._footpool_cl_qualified = None
self._footpool_cl_remain = None
self._footpool_cl_remain_standard = None
self._limit_num = None
self._mainpool_cl_qualified = None
self._mainpool_cl_remain = None
self._mainpool_cl_remain_standard = None
self._pool_volume = None
self._remark = None
self._request_id = None
self._shop_id = None
self._water_change = None
self._water_change_percent = None
self._water_change_qualified = None
self._water_change_standard = None
self._water_check_time = None
self._water_ph = None
self._water_ph_qualified = None
self._water_ph_standard = None
self._water_qualified = None
self._water_temperature = None
self._water_temperature_qualified = None
self._water_temperature_standard = None
self._water_update_time = None
@property
def commodity_id(self):
return self._commodity_id
@commodity_id.setter
def commodity_id(self, value):
self._commodity_id = value
@property
def current_num(self):
return self._current_num
@current_num.setter
def current_num(self, value):
self._current_num = value
@property
def currentnum_update_time(self):
return self._currentnum_update_time
@currentnum_update_time.setter
def currentnum_update_time(self, value):
self._currentnum_update_time = value
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
@property
def footpool_cl_qualified(self):
return self._footpool_cl_qualified
@footpool_cl_qualified.setter
def footpool_cl_qualified(self, value):
self._footpool_cl_qualified = value
@property
def footpool_cl_remain(self):
return self._footpool_cl_remain
@footpool_cl_remain.setter
def footpool_cl_remain(self, value):
self._footpool_cl_remain = value
@property
def footpool_cl_remain_standard(self):
return self._footpool_cl_remain_standard
@footpool_cl_remain_standard.setter
def footpool_cl_remain_standard(self, value):
self._footpool_cl_remain_standard = value
@property
def limit_num(self):
return self._limit_num
@limit_num.setter
def limit_num(self, value):
self._limit_num = value
@property
def mainpool_cl_qualified(self):
return self._mainpool_cl_qualified
@mainpool_cl_qualified.setter
def mainpool_cl_qualified(self, value):
self._mainpool_cl_qualified = value
@property
def mainpool_cl_remain(self):
return self._mainpool_cl_remain
@mainpool_cl_remain.setter
def mainpool_cl_remain(self, value):
self._mainpool_cl_remain = value
@property
def mainpool_cl_remain_standard(self):
return self._mainpool_cl_remain_standard
@mainpool_cl_remain_standard.setter
def mainpool_cl_remain_standard(self, value):
self._mainpool_cl_remain_standard = value
@property
def pool_volume(self):
return self._pool_volume
@pool_volume.setter
def pool_volume(self, value):
self._pool_volume = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def request_id(self):
return self._request_id
@request_id.setter
def request_id(self, value):
self._request_id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
@property
def water_change(self):
return self._water_change
@water_change.setter
def water_change(self, value):
self._water_change = value
@property
def water_change_percent(self):
return self._water_change_percent
@water_change_percent.setter
def water_change_percent(self, value):
self._water_change_percent = value
@property
def water_change_qualified(self):
return self._water_change_qualified
@water_change_qualified.setter
def water_change_qualified(self, value):
self._water_change_qualified = value
@property
def water_change_standard(self):
return self._water_change_standard
@water_change_standard.setter
def water_change_standard(self, value):
self._water_change_standard = value
@property
def water_check_time(self):
return self._water_check_time
@water_check_time.setter
def water_check_time(self, value):
self._water_check_time = value
@property
def water_ph(self):
return self._water_ph
@water_ph.setter
def water_ph(self, value):
self._water_ph = value
@property
def water_ph_qualified(self):
return self._water_ph_qualified
@water_ph_qualified.setter
def water_ph_qualified(self, value):
self._water_ph_qualified = value
@property
def water_ph_standard(self):
return self._water_ph_standard
@water_ph_standard.setter
def water_ph_standard(self, value):
self._water_ph_standard = value
@property
def water_qualified(self):
return self._water_qualified
@water_qualified.setter
def water_qualified(self, value):
self._water_qualified = value
@property
def water_temperature(self):
return self._water_temperature
@water_temperature.setter
def water_temperature(self, value):
self._water_temperature = value
@property
def water_temperature_qualified(self):
return self._water_temperature_qualified
@water_temperature_qualified.setter
def water_temperature_qualified(self, value):
self._water_temperature_qualified = value
@property
def water_temperature_standard(self):
return self._water_temperature_standard
@water_temperature_standard.setter
def water_temperature_standard(self, value):
self._water_temperature_standard = value
@property
def water_update_time(self):
return self._water_update_time
@water_update_time.setter
def water_update_time(self, value):
self._water_update_time = value
def to_alipay_dict(self):
params = dict()
if self.commodity_id:
if hasattr(self.commodity_id, 'to_alipay_dict'):
params['commodity_id'] = self.commodity_id.to_alipay_dict()
else:
params['commodity_id'] = self.commodity_id
if self.current_num:
if hasattr(self.current_num, 'to_alipay_dict'):
params['current_num'] = self.current_num.to_alipay_dict()
else:
params['current_num'] = self.current_num
if self.currentnum_update_time:
if hasattr(self.currentnum_update_time, 'to_alipay_dict'):
params['currentnum_update_time'] = self.currentnum_update_time.to_alipay_dict()
else:
params['currentnum_update_time'] = self.currentnum_update_time
if self.external_id:
if hasattr(self.external_id, 'to_alipay_dict'):
params['external_id'] = self.external_id.to_alipay_dict()
else:
params['external_id'] = self.external_id
if self.footpool_cl_qualified:
if hasattr(self.footpool_cl_qualified, 'to_alipay_dict'):
params['footpool_cl_qualified'] = self.footpool_cl_qualified.to_alipay_dict()
else:
params['footpool_cl_qualified'] = self.footpool_cl_qualified
if self.footpool_cl_remain:
if hasattr(self.footpool_cl_remain, 'to_alipay_dict'):
params['footpool_cl_remain'] = self.footpool_cl_remain.to_alipay_dict()
else:
params['footpool_cl_remain'] = self.footpool_cl_remain
if self.footpool_cl_remain_standard:
if hasattr(self.footpool_cl_remain_standard, 'to_alipay_dict'):
params['footpool_cl_remain_standard'] = self.footpool_cl_remain_standard.to_alipay_dict()
else:
params['footpool_cl_remain_standard'] = self.footpool_cl_remain_standard
if self.limit_num:
if hasattr(self.limit_num, 'to_alipay_dict'):
params['limit_num'] = self.limit_num.to_alipay_dict()
else:
params['limit_num'] = self.limit_num
if self.mainpool_cl_qualified:
if hasattr(self.mainpool_cl_qualified, 'to_alipay_dict'):
params['mainpool_cl_qualified'] = self.mainpool_cl_qualified.to_alipay_dict()
else:
params['mainpool_cl_qualified'] = self.mainpool_cl_qualified
if self.mainpool_cl_remain:
if hasattr(self.mainpool_cl_remain, 'to_alipay_dict'):
params['mainpool_cl_remain'] = self.mainpool_cl_remain.to_alipay_dict()
else:
params['mainpool_cl_remain'] = self.mainpool_cl_remain
if self.mainpool_cl_remain_standard:
if hasattr(self.mainpool_cl_remain_standard, 'to_alipay_dict'):
params['mainpool_cl_remain_standard'] = self.mainpool_cl_remain_standard.to_alipay_dict()
else:
params['mainpool_cl_remain_standard'] = self.mainpool_cl_remain_standard
if self.pool_volume:
if hasattr(self.pool_volume, 'to_alipay_dict'):
params['pool_volume'] = self.pool_volume.to_alipay_dict()
else:
params['pool_volume'] = self.pool_volume
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.request_id:
if hasattr(self.request_id, 'to_alipay_dict'):
params['request_id'] = self.request_id.to_alipay_dict()
else:
params['request_id'] = self.request_id
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
if self.water_change:
if hasattr(self.water_change, 'to_alipay_dict'):
params['water_change'] = self.water_change.to_alipay_dict()
else:
params['water_change'] = self.water_change
if self.water_change_percent:
if hasattr(self.water_change_percent, 'to_alipay_dict'):
params['water_change_percent'] = self.water_change_percent.to_alipay_dict()
else:
params['water_change_percent'] = self.water_change_percent
if self.water_change_qualified:
if hasattr(self.water_change_qualified, 'to_alipay_dict'):
params['water_change_qualified'] = self.water_change_qualified.to_alipay_dict()
else:
params['water_change_qualified'] = self.water_change_qualified
if self.water_change_standard:
if hasattr(self.water_change_standard, 'to_alipay_dict'):
params['water_change_standard'] = self.water_change_standard.to_alipay_dict()
else:
params['water_change_standard'] = self.water_change_standard
if self.water_check_time:
if hasattr(self.water_check_time, 'to_alipay_dict'):
params['water_check_time'] = self.water_check_time.to_alipay_dict()
else:
params['water_check_time'] = self.water_check_time
if self.water_ph:
if hasattr(self.water_ph, 'to_alipay_dict'):
params['water_ph'] = self.water_ph.to_alipay_dict()
else:
params['water_ph'] = self.water_ph
if self.water_ph_qualified:
if hasattr(self.water_ph_qualified, 'to_alipay_dict'):
params['water_ph_qualified'] = self.water_ph_qualified.to_alipay_dict()
else:
params['water_ph_qualified'] = self.water_ph_qualified
if self.water_ph_standard:
if hasattr(self.water_ph_standard, 'to_alipay_dict'):
params['water_ph_standard'] = self.water_ph_standard.to_alipay_dict()
else:
params['water_ph_standard'] = self.water_ph_standard
if self.water_qualified:
if hasattr(self.water_qualified, 'to_alipay_dict'):
params['water_qualified'] = self.water_qualified.to_alipay_dict()
else:
params['water_qualified'] = self.water_qualified
if self.water_temperature:
if hasattr(self.water_temperature, 'to_alipay_dict'):
params['water_temperature'] = self.water_temperature.to_alipay_dict()
else:
params['water_temperature'] = self.water_temperature
if self.water_temperature_qualified:
if hasattr(self.water_temperature_qualified, 'to_alipay_dict'):
params['water_temperature_qualified'] = self.water_temperature_qualified.to_alipay_dict()
else:
params['water_temperature_qualified'] = self.water_temperature_qualified
if self.water_temperature_standard:
if hasattr(self.water_temperature_standard, 'to_alipay_dict'):
params['water_temperature_standard'] = self.water_temperature_standard.to_alipay_dict()
else:
params['water_temperature_standard'] = self.water_temperature_standard
if self.water_update_time:
if hasattr(self.water_update_time, 'to_alipay_dict'):
params['water_update_time'] = self.water_update_time.to_alipay_dict()
else:
params['water_update_time'] = self.water_update_time
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = KoubeiServindustryNatatoriumWaterqualityUploadModel()
if 'commodity_id' in d:
o.commodity_id = d['commodity_id']
if 'current_num' in d:
o.current_num = d['current_num']
if 'currentnum_update_time' in d:
o.currentnum_update_time = d['currentnum_update_time']
if 'external_id' in d:
o.external_id = d['external_id']
if 'footpool_cl_qualified' in d:
o.footpool_cl_qualified = d['footpool_cl_qualified']
if 'footpool_cl_remain' in d:
o.footpool_cl_remain = d['footpool_cl_remain']
if 'footpool_cl_remain_standard' in d:
o.footpool_cl_remain_standard = d['footpool_cl_remain_standard']
if 'limit_num' in d:
o.limit_num = d['limit_num']
if 'mainpool_cl_qualified' in d:
o.mainpool_cl_qualified = d['mainpool_cl_qualified']
if 'mainpool_cl_remain' in d:
o.mainpool_cl_remain = d['mainpool_cl_remain']
if 'mainpool_cl_remain_standard' in d:
o.mainpool_cl_remain_standard = d['mainpool_cl_remain_standard']
if 'pool_volume' in d:
o.pool_volume = d['pool_volume']
if 'remark' in d:
o.remark = d['remark']
if 'request_id' in d:
o.request_id = d['request_id']
if 'shop_id' in d:
o.shop_id = d['shop_id']
if 'water_change' in d:
o.water_change = d['water_change']
if 'water_change_percent' in d:
o.water_change_percent = d['water_change_percent']
if 'water_change_qualified' in d:
o.water_change_qualified = d['water_change_qualified']
if 'water_change_standard' in d:
o.water_change_standard = d['water_change_standard']
if 'water_check_time' in d:
o.water_check_time = d['water_check_time']
if 'water_ph' in d:
o.water_ph = d['water_ph']
if 'water_ph_qualified' in d:
o.water_ph_qualified = d['water_ph_qualified']
if 'water_ph_standard' in d:
o.water_ph_standard = d['water_ph_standard']
if 'water_qualified' in d:
o.water_qualified = d['water_qualified']
if 'water_temperature' in d:
o.water_temperature = d['water_temperature']
if 'water_temperature_qualified' in d:
o.water_temperature_qualified = d['water_temperature_qualified']
if 'water_temperature_standard' in d:
o.water_temperature_standard = d['water_temperature_standard']
if 'water_update_time' in d:
o.water_update_time = d['water_update_time']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/KoubeiServindustryNatatoriumWaterqualityUploadModel.py | KoubeiServindustryNatatoriumWaterqualityUploadModel.py | py | 17,237 | python | en | code | 241 | github-code | 13 |
73883149778 | import numpy as np
import matplotlib.pyplot as plt
LR = 0.1
epochs = 6
BATCH_SIZE = 1000
LAMBDA = 1
def accuracy(h, y):
m = y.shape[0]
h[h >= 0.5] = 1
h[h < 0.5] = 0
c = np.zeros(y.shape)
c[y == h] = 1
return c.sum() / m
def shuffle(x, y):
r_indexes = np.arange(len(x))
np.random.shuffle(r_indexes)
x = x[r_indexes]
y = y[r_indexes]
return x, y
def sigmoid(prediccion):
prediccion = 1 / (1 + np.exp(-prediccion))
return prediccion
def main():
# Cargar dataset
x = np.load('x.npy') / 255
y = np.load('y.npy')
x, y = shuffle(x, y)
# Normalizar datos
x = (x - np.min(x)) / (np.max(x) - np.min(x))
# Numero de pixeles
n = x[0].shape[0] * x[0].shape[1]
# Numero de observaciones
m = y.shape[0]
# Formatear datos como matriz 2D
x = x.reshape(m, n)
um_rows, num_columns = x.shape
x_0 = np.ones((m, 1))
x = np.hstack((x_0, x))
# Dividir los datos en conjuntos de entrenamiento, validación y prueba
num_validation = int(0.1 * m)
num_test = int(0.1 * m)
x_training = x[num_validation+num_test:, :]
x_validation = x[num_test:num_validation+num_test, :]
x_test = x[:num_test, :]
y_validation = y[num_test:num_validation + num_test]
y_test = y[:num_test]
# Inicializar parámetros
theta = np.random.rand(n + 1)
N = len(y)
# Entrenar modelo con regularización L2
def entrenamiento(x, y, theta, LR, epochs):
costos = []
acc = []
for i in range(epochs):
for batch_start in range (0,len(x_training),BATCH_SIZE):
x_batch = x[batch_start:batch_start + BATCH_SIZE, :]
y_batch = y[batch_start:batch_start + BATCH_SIZE]
prediccion = sigmoid(np.dot(x_batch, theta))
error = prediccion - y_batch
gradient = (np.dot(x_batch.T, error) + LAMBDA * theta) / len(y_batch) # Regularización L2
#excluir el termino bias
gradient[1:] += LAMBDA * theta[1:]
theta -= LR * gradient
costo = (-1/len(y_batch)) * np.sum(y_batch * np.log(prediccion + 0.000001) + (1 - y_batch) * np.log(1 - prediccion + 0.000001))
costos.append(costo)
acc.append(accuracy(prediccion, y_batch))
return theta, prediccion, costos, acc
theta, prediccion, costos, acc = entrenamiento(x, y, theta, LR, epochs)
plt.plot(costos)
plt.ylabel('Costo')
plt.title('Costo')
plt.show()
plt.title('Accuracy')
plt.plot(acc)
plt.show()
# print(y)
# print(prediccion)
aux = accuracy(prediccion,y)
print("precision del modelo",aux)
prediccion_validacion = sigmoid(np.dot(x_validation, theta))
prediccion_test = sigmoid(np.dot(x_test, theta))
acc_validacion = accuracy(prediccion_validacion, y_validation)
acc_test = accuracy(prediccion_test, y_test)
print("Precisión en el conjunto de validación:", acc_validacion)
print("Precisión en el conjunto de prueba:", acc_test)
if __name__ == '__main__': main() | marianavargas2002/machine-learning-. | regresion_logistica/optimizacion.py | optimizacion.py | py | 3,176 | python | en | code | 0 | github-code | 13 |
26854019865 | import json
import os
import datetime
import jwt
from datetime import timedelta
from conf.base import SPE_CH
def http_response(self, msg, code):
"""
response:
"data":{type1:{},type2:{}} 对象转成的字典,以type为key区分并访问每一行
"code":code
"""
self.write(json.dumps({"data": {"msg": msg, "code": code}}))
def save_files(file_metas, in_rel_path, type='image'):
"""
Save file stream to server
:param file_metas:
:param in_rel_path:
:param type:
:return:
"""
file_path = ""
file_name_list = []
for meta in file_metas:
file_name = meta['filename']
file_path = os.path.join(in_rel_path, file_name)
file_name_list.append(file_name)
# save image as binary
with open(file_path, 'wb') as up:
up.write(meta['body'])
return file_name_list
def list_to_dict(object_list):
"""
将数据库存储格式的对象列表转换为字典列表
:param object_list: 对象列表(数据行列表)
:return: 字典列表
"""
dict_list = []
for item in object_list:
dict_ = item.__dict__
del dict_['_sa_instance_state']
dict_list.append(json.dumps(dict_))
return dict_list
def get_dates(start_date, end_date):
"""
获取时间段内每一天的日期
:param start_date: 开始日期,字符串格式
:param end_date: 终止日期,字符串格式
:return: 时间段内每一天的日期列表
"""
date_list = []
start = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end = datetime.datetime.strptime(end_date, "%Y-%m-%d")
while start <= end:
date_str = start.strftime("%Y-%m-%d")
date_list.append(date_str)
start += timedelta(days=1)
return date_list
def option_union(options):
"""
选项联合;
例:[’12‘,’8‘,’10‘,’5‘] => ’12&8&10&5&‘
:param options: list;选项列表;
:return: 选项列表
"""
options_union = ''
for option in options:
if type(option) != str:
option = str(option)
options_union = options_union + option + SPE_CH
return options_union
def option_parsing(options_union):
"""
选项解析;
例:’12&8&10&5&‘ => [’12‘,’8‘,’10‘,’5‘]
:param options_union: str;以特殊字符结尾联合好的选项;
:return: 选项列表
"""
options = []
option = ''
for ch in options_union:
if ch == SPE_CH:
options.append(option)
option = ''
else:
option = option + ch
return options
def token_encode(id_: int, days: int):
dic = {
'exp': int((datetime.datetime.now() + datetime.timedelta(days=days)).timestamp()), # 过期时间
'iat': datetime.datetime.now(), # 开始时间
'iss': 'wangxt', # 签名
'data': { # 内容,一般存放该用户id和开始时间
'id': id_,
'time': int((datetime.datetime.now()).timestamp())
},
}
token = jwt.encode(dic, 'secret', algorithm='HS256') # 加密生成字符串
return token
def token_decode(session_id):
return jwt.decode(session_id, 'secret', issuer='wangxt', algorithms=['HS256'])
if __name__ == "__main__":
http_response()
| xiaoyuerova/questionnaireServer | common/commons.py | commons.py | py | 3,356 | python | en | code | 0 | github-code | 13 |
35196067740 | #Conversor de medidas
metro = float(input('DIgite um valor para ser convertido: '))
km = metro / 1000
hec = metro / 100
dam = metro / 10
dm = metro * 10
cm = metro * 100
mm = metro * 1000
print(f' em metro: {metro} \n , em km: {km} \n , em hec {hec} \n , em dam {dam} \n , em dm {dm} \n em cm {cm,} \n em mm {mm}')
| Kaykynog/exercicios_guanabara | exercicios/011.py | 011.py | py | 318 | python | pt | code | 1 | github-code | 13 |
31332668114 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .tile_target_masks import make_target_images_and_loss_masks
class TileObjective(nn.Module):
"""
Args:
- images_to_match - List [ List [ Tensor ] ]:
e.g. [[img0], [img1, img2], ..., [img0, img3]]
A list of lists, where every sublist contains the constraints for one
tile optimisation and the outer list thus represents the batch dimension.
- matching_sides - List [ List [ str ] ]:
e.g. [["left"], ["left", "top"], ..., ["right", "bottom"]]
List of lists, matching the lengths of <images_to_match> defining which sides
(defined as strings) the images need to be matched of the tile that is being optimised
e.g. 'left' means that there is an image to the 'left' that needs to be matched.
- netG - nn.Module:
A generator network that maps noise to images (GAN).
- n_pixels_boundary - int:
Number of pixels to use in the boundary condition.
- latent_range - int:
maximum values the latent representation can take on (used to clamp)
- device - str:
which device to use, e.g. 'cpu' or 'cuda:0'
- opposite_transform - bool:
whether to invert the target image to optimise towards
- reduce_black_and_white - bool
whehter to compare the new tile and constraining tiles in a black and white projection (matching brightness)
- plot_objective - bool
whether to plot the target images and loss masks for debug
"""
def __init__(self, images_to_match, matching_sides, netG, n_pixels_boundary=10, latent_range=1.5,
device="cuda:0", opposite_transform=False, reduce_black_and_white=False, plot_objective=False):
super(TileObjective, self).__init__()
# Assertions
assert len(images_to_match) == len(matching_sides), \
"length of images_to_match should equal the length of matching_sides"
assert type(images_to_match[0]) == type(matching_sides[0]) == list, \
"images_to_match and matching_sides should be lists of lists"
for sublist in matching_sides:
for ms in sublist:
assert ms in ["left", "right", "top", "bottom"], \
"matching side needs to be one of: [left, right, top, bottom]"
for sublist in images_to_match:
for im in sublist:
assert im.dim() == 3, "individual images must be 3D"
# Craft targets and loss masks
targets, loss_masks = [], []
for batch_idx, (ms, im) in enumerate(zip(matching_sides, images_to_match)):
t, m = make_target_images_and_loss_masks(images_to_match=im, matching_sides=ms,
n_pixels_boundary=n_pixels_boundary, plot=plot_objective)
targets.append([t_.to(device) for t_ in t])
loss_masks.append([m_.to(device) for m_ in m])
self.targets = targets
self.loss_masks = loss_masks
print("len(targets) [B] len(targets[0]) [N_constraints_0]", len(targets), len(targets[0]))
print("len(loss_masks) [B] len(loss_masks[0]) [N_constraints_0]", len(loss_masks), len(loss_masks[0]))
# TODO: this might vary for different elements in the batch...
self.reduce_black_and_white = reduce_black_and_white
self.opposite_transform = opposite_transform
self.latent_range = latent_range
self.batch_size = len(self.loss_masks)
# Set the generator in the correct mode, make sure gradients are off!
self.netG = netG
self.netG.eval()
for param in self.netG.parameters():
param.requires_grad = False
@staticmethod
def rgb_to_bw(image_batch):
# 0.299 ∙ Red + 0.587 ∙ Green + 0.114 ∙ Blue
return 0.299 * image_batch[:, 0, :, :] + 0.587 * image_batch[:, 1, :, :] + 0.114 * image_batch[:, 2, :, :]
def black_and_white_l1(self, candidate_batch, target_batch):
# greyscale = 0.2125 * R + 0.7154 * G + 0.0721 * B
candidate_bw = self.rgb_to_bw(candidate_batch)
target_bw = self.rgb_to_bw(target_batch)
# make channel dim = 1
l1_loss = F.l1_loss(candidate_bw, target_bw, reduction="none").unsqueeze(1)
return l1_loss
def forward(self, candidate_latent_batch, noise_scale, reg_weight, pixel_weight):
"""
Compute the loss as:
Z_noisy = clamped(Z + scaled_noise) to max latent range
X_noisy = netG(Z_noisy)
Loss = pixel_weight * image_loss(X_noisy, adjacent_tile_constraints) + reg_weight * L1(Z_noisy)
Args:
candidate_latent_batch - Tensor [B, NZ]:
A batch latent to be optimised. Each element in the batch belongs to a separate tile with its
own constraints. Each element in the batch is matched with an element in the
loss_masks and targets lists.
noise_scale - int:
Scale for added noise.
reg_weight - int:
Weight for the latent regularisation loss (how far the latent is off the center of standard Normal).
pixel_weight - int:
Weight for the pixel loss (boundary objective).
Returns:
loss_dict - Dict [ Tensor ]
A dictionary containing losses. The 'loss' key yields the loss to perform backprop with.
"""
# Add noise, with noise_scale weight
noise = torch.randn_like(candidate_latent_batch) * noise_scale
batch_noise_sum = torch.abs(noise).mean(dim=(1, 2, 3))
candidate_latent_batch = candidate_latent_batch + noise
candidate_latent_batch = torch.clamp(candidate_latent_batch, min=-self.latent_range, max=self.latent_range)
# Regularisation loss is the sum of the absolute values of the latents
# which is equivalent to the L1 distance to the mean of a Standard Normal
batch_latent_reg_loss = torch.abs(candidate_latent_batch).mean(dim=(1, 2, 3))
batch_weighted_latent_reg_loss = reg_weight * batch_latent_reg_loss
# Forward pass
batch_candidate_image = self.netG(candidate_latent_batch)
batch_image_loss = []
# Compare the pixel values with L1 distance (absolute distance) and mask for the boundary
# we loop over lists of constraints (one sublist contains the constraints for one element in the batch), which
# might be more than 1!
for batch_idx, (targets, loss_masks) in enumerate(zip(self.targets, self.loss_masks)):
image_loss = None # just to initialise the addition cycle
# There might be multiple targets for one image (multiple constraining sides)
# t [3, H, W]
# m [1, H, W]
# batch_candidate_image[batch_idx].unsqueeze(0) [1, 3, H, W]
# image_loss_i [1, C, H, W], C might be 1 or 3 depending on reduce_black_and_white
for t, m in zip(targets, loss_masks):
# reduce_black_and_white & opposite_transform are NOT mutually exclusive
if self.reduce_black_and_white:
if self.opposite_transform:
image_loss_i = self.black_and_white_l1(batch_candidate_image[batch_idx].unsqueeze(0),
1.0 - t.unsqueeze(0))
else:
image_loss_i = self.black_and_white_l1(batch_candidate_image[batch_idx].unsqueeze(0),
t.unsqueeze(0))
elif self.opposite_transform:
image_loss_i = F.l1_loss(batch_candidate_image[batch_idx].unsqueeze(0), 1.0 - t.unsqueeze(0),
reduction="none")
else:
image_loss_i = F.l1_loss(batch_candidate_image[batch_idx].unsqueeze(0), t.unsqueeze(0),
reduction="none")
# Init or add to existing
if image_loss is None:
image_loss = image_loss_i * m
else:
image_loss = image_loss + image_loss_i * m
batch_image_loss.append(image_loss)
# [B, C, W, H]
batch_image_loss = torch.cat(batch_image_loss)
# Mean [B, C, W, H] -> [B]
batch_pixel_loss = batch_image_loss.mean(dim=(1, 2, 3))
batch_weighted_pixel_loss = batch_pixel_loss * pixel_weight
# Loss
batch_loss = batch_weighted_pixel_loss + batch_weighted_latent_reg_loss
reduced_loss = batch_loss.mean()
loss_dict = {
"loss": reduced_loss,
"batch_loss": batch_loss,
"batch_pixel_loss": batch_pixel_loss, # pixel loss
"batch_weighted_pixel_loss": batch_weighted_pixel_loss, # scaled pixel loss
"batch_latent_reg_loss": batch_latent_reg_loss, # reg
"batch_weighted_latent_reg_loss": batch_weighted_latent_reg_loss, # scaled reg
"batch_noise_sum": batch_noise_sum, # size of the noise
"batch_candidate_image": batch_candidate_image
}
# Detach and copy to cpu all but 'loss' key which is used for backprop
loss_dict_cpu = dict()
for k, v in loss_dict.items():
if k != "loss":
loss_dict_cpu[k] = v.detach().cpu()
else:
loss_dict_cpu[k] = v
return loss_dict_cpu | ClaartjeBarkhof/roaming-the-black-box | tiling/tile_objective.py | tile_objective.py | py | 9,659 | python | en | code | 0 | github-code | 13 |
70600225937 | import turtle as t
import random as r
import datetime
screen = t.Screen()
screen.setup(height=500, width=600)
t.Turtle(visible=False)
t.up()
t.speed(0)
t.goto(-250, 200)
for i in range(21):
t.write(i)
t.forward(25)
x = -250
t.goto(-250, 200)
t.right(90)
for i in range(21):
for j in range(10):
t.down()
t.forward(20)
t.up()
t.forward(10)
t.up()
t.forward(5)
t.write(i)
t.goto(x + (i + 1) * 25, 200)
all_turtle = []
y = [160, 100, 40, -20, -80]
colors = ['red', 'green', 'blue', 'yellow', 'black']
for i in range(0, 5):
p = t.Turtle(shape="turtle")
p.penup()
p.goto(x=-250, y=y[i])
p.color(colors[i])
for j in range(5):
p.left(72)
all_turtle.append(p)
'''
def random_walk(turtles):
global run
for turtle in turtles:
turtle.forward(r.randint(1, 10))
if turtle.xcor() > 250:
win=turtle
run = False
#print("thắng")
# print(f"rùa {all_turtle[colors[run]]} thắng")
'''
run = True
while run:
#random_walk(all_turtle)
#global run
for turtle in range(0,len(colors)):
timestart = datetime.datetime.now()
all_turtle[turtle].forward(r.randint(1, 10))
if all_turtle[turtle].xcor() > 250:
win = turtle
timeend=datetime.datetime.now()
run = False
#t.write(f"turtle {colors[win]} is winner",align="left")
t.write(f" turtle {colors[win]} is winner run time: {timeend-timestart}",align="left")
screen.exitonclick()
t.mainloop()
'''
a=[]
while len(a)==0:
for i in range(5):
i = int(input())
a.append(i)
a.sort()
if len(a)<2:
print(a)
else:
print(a[-1],a[-2])
'''
| mark3000-010701/python_base | bt_list_tuple.py | bt_list_tuple.py | py | 1,789 | python | en | code | 0 | github-code | 13 |
24283348455 | import pytest
from models.jellynote import UserId
from persist import users, UpdateError, InsertionError
from datetime import datetime
from random_utils import *
from fixtures import new_user
def test_user_insert():
req = random_user_creation_request()
user = users.insert(req)
assert user is not None
assert user.name == req.name
assert user.email == req.email
assert user.instruments == req.instruments
assert isinstance(user.created_at, datetime)
assert isinstance(user.updated_at, datetime)
def test_user_insert_conflict(new_user):
req = UserCreationRequest(name=random_string(10), email=new_user.email, instruments=random_enum_list(Instrument))
with pytest.raises(InsertionError):
users.insert(req)
def test_list_all(new_user):
lst = users.list_all(10)
assert len(lst) <= 10
assert new_user in lst
def test_find(new_user):
user = users.find(new_user.id)
assert user == new_user
def test_find_if_none():
user = users.find(UserId(0))
assert user is None
def test_update(new_user):
req = random_user_update_request()
updated_user = users.update(new_user.id, req)
assert updated_user.id == new_user.id
if req.name is not None:
assert updated_user.name == req.name
if req.email is not None:
assert updated_user.email == req.email
if req.instruments is not None:
assert updated_user.instruments == req.instruments
assert updated_user.updated_at > new_user.updated_at
def test_update_conflict(new_user):
new_user_req = random_user_creation_request()
second_user = users.insert(new_user_req)
req = UserUpdateRequest(name=None, email=second_user.email, instruments=None)
with pytest.raises(UpdateError):
users.update(new_user.id, req)
def test_delete_user(new_user):
users.delete(new_user.id)
u = users.find(new_user.id)
assert u is None
def test_list_by_instrument(new_user):
us = users.list_by_instruments(new_user.instruments)
instruments_set = set(new_user.instruments)
for u in us:
assert not set(u.instruments).isdisjoint(instruments_set) | whisust/jellynote-backend | tests/test_persist_users.py | test_persist_users.py | py | 2,149 | python | en | code | 1 | github-code | 13 |
7783759926 | #Importing necessary libraries.
import socket
import subprocess
import time
import sys
import pyfiglet
subprocess.call('clear', shell=True)
#Creating a script banner.
Port_Scan_Banner = pyfiglet.figlet_format("PORT SCANNER")
print(Port_Scan_Banner)
time.sleep(1)
#Use of sockets module to take user input and use in script to talk to network and find host.
Remote_Server = input("Enter an IP address to scan: ")
target = socket.gethostbyname(Remote_Server)
#Brief explanation for user while scan is in process.
print("_" * 50)
print("Scanning the following host: " + target)
print("_" * 50)
#Creating the scanning loop. Connect to socket and set target IP to test for open ports.
try:
for port in range (1, 4000):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = s.connect_ex((target, port))
if result == 0:
print ("Port {}: is open".format(port))
s.close()
#Exception response in event of keyboard interrupt.
except KeyboardInterrupt:
print("\n The scan was canceled.")
sys.exit()
#Exception response in event of invalid IP
except socket.gaierror:
print("\n Hostname could not be resolved.")
sys.exit()
#Exception response in event of socket error.
except socket.error:
print("\n No response.")
sys.exit()
#End of script. | H4mm3r0fG0d/ITP270_FALL2022 | ITP270/Python Scripts/Security Scripts/PortScanner.py | PortScanner.py | py | 1,328 | python | en | code | 0 | github-code | 13 |
4766847001 | import sys
import os
from cx_Freeze import setup, Executable
# ADD FILES
files = ['icon.ico','themes/']
# TARGET
target = Executable(
script="main.py",
base="Win32GUI",
icon="icon.ico"
)
# SETUP CX FREEZE
setup(
name = "Prroject Python",
version = "1.0",
description = "XỬ LÝ TÌNH HUỐNG KHUẨN CẤP",
author = "Uy&Đạt&Ninh",
options = {'build_exe' : {'include_files' : files}},
executables = [target]
)
| NinhLuong/Emergency-Management-System | XuLyTinhHuongKhanCap/setup.py | setup.py | py | 458 | python | en | code | 0 | github-code | 13 |
12250585520 | """
# Definition for a Node.
class Node:
def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):
self.val = val
self.left = left
self.right = right
self.next = next
"""
class Solution:
def connect(self, root: 'Node') -> 'Node':
if not root:
return None
queue = [root]
while queue:
new_queue = []
for node in queue:
if node.left is not None:
new_queue.append(node.left)
if node.right is not None:
new_queue.append(node.right)
if new_queue:
for index in range(len(new_queue)-1):
new_queue[index].next = new_queue[index+1]
queue = new_queue
return root
| wellslu/LeetCode-Python | medium/Populating_Next_Right_Pointers_in_Each_Node.py | Populating_Next_Right_Pointers_in_Each_Node.py | py | 833 | python | en | code | 3 | github-code | 13 |
8543095907 | '''
时间序列模型:ARIMA
'''
import pandas as pd
import numpy as np
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import acf, pacf
import statsmodels.tsa.stattools as st
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
def test_stationarity(timeseries):
# ADF平稳性检验
dftest = adfuller(timeseries, autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic', 'p-value', '#Lags Used', 'Number of Observations Used'])
# P值:当原假设为真时,比所得到的样本观察结果更极端的结果出现的概率
for key, value in dftest[4].items():
dfoutput['Critical Value ({})'.format(key)] = value
print(dfoutput)
# 滑动均值和方差
rolmean = timeseries.rolling(window=12).mean() # 每window个数据取均值和方差
rolstd = timeseries.rolling(window=12).std()
# 绘制滑动统计量
plt.figure()
plt.plot(timeseries, color='blue', label='原始数据')
plt.plot(rolmean, color='red', label='滑动均值')
plt.plot(rolstd, color='black', label='滑动方差')
plt.legend(loc='best')
plt.show()
def decompose(timeseries):
# 返回包含三个部分 trend(趋势部分) , seasonal(季节性部分) 和residual (残留部分)
decomposition = seasonal_decompose(timeseries)
trend = decomposition.trend
seasonal = decomposition.seasonal
residual = decomposition.resid
# plt.subplot(411)
# plt.plot(ts_log, label='Original')
# plt.legend(loc='best')
# plt.subplot(412)
# plt.plot(trend, label='Trend')
# plt.legend(loc='best')
# plt.subplot(413)
# plt.plot(seasonal, label='Seasonality')
# plt.legend(loc='best')
# plt.subplot(414)
# plt.plot(residual, label='Residuals')
# plt.legend(loc='best')
# plt.tight_layout()
# plt.show()
return trend, seasonal, residual
if __name__ == '__main__':
'''setp1:获取时间序列样本集'''
dateparse = lambda dates: pd.datetime.strptime(dates, '%Y-%m')
df = pd.read_csv('./data/arima_test1.csv', parse_dates=['Month'], index_col='Month',date_parser=dateparse)
split_idx = 10 # 交叉验证分割比率
ts_train = df['#Passengers'][0:-split_idx] # 训练集
ts_test = df['#Passengers'][-split_idx:] # 测试集
'''setp2:取对数和一阶差分,通过滑动均值和方差、以及ADF单根检验差分序列是否满足稳定性'''
ts_train_log = np.log(ts_train)
ts_train_log_diff = ts_train_log.diff(1)
ts_train_log_diff.dropna(inplace=True)
# trend, seasonal, residual = decompose(ts_train_log)
# residual.dropna(inplace=True)
# test_stationarity(ts_train_log_diff)
'''setp3:模型定阶,画出ACF和PACF的图像'''
lag_acf = acf(ts_train_log_diff, nlags=20)
lag_pacf = pacf(ts_train_log_diff, nlags=20, method='ols')
# plt.subplot(121)
# plt.plot(lag_acf)
# plt.axhline(y=0, linestyle='--', color='gray')
# plt.axhline(y=-1.96 / np.sqrt(len(ts_train_log_diff)), linestyle='--', color='gray')
# plt.axhline(y=1.96 / np.sqrt(len(ts_train_log_diff)), linestyle='--', color='gray')
# plt.title('Autocorrelation Function')
# plt.subplot(122)
# plt.plot(lag_pacf)
# plt.axhline(y=0, linestyle='--', color='gray')
# plt.axhline(y=-1.96/np.sqrt(len(ts_train_log_diff)), linestyle='--', color='gray')
# plt.axhline(y=1.96/np.sqrt(len(ts_train_log_diff)), linestyle='--', color='gray')
# plt.title('Partial Autocorrelation Function')
# plt.tight_layout()
# plt.show()
# AIC和BIC准则:暴力定阶法
# order = st.arma_order_select_ic(ts_train_log_diff, max_ar=10, max_ma=10, ic=['aic', 'bic', 'hqic'])
# print(order.bic_min_order) # (10, 7)
'''setp4:训练ARIMA模型'''
model = ARIMA(ts_train_log, order=(10, 1, 7)).fit(disp=-1)
'''setp5:检验模型学习效果'''
# 模型检验:残差的核密度(概率密度)为正态分布
# pd.DataFrame(model.resid).plot(kind='kde')
# plt.show()
# 模型检验:残差序列是否满足白噪声qq图
# fig = plt.figure()
# ax = fig.add_subplot(111)
# fig = qqplot(model.resid, line='q', ax=ax, fit=True) # 检验拟合的残差序列分布的相似性
# plt.show()
# 模型检验:计算DW(检验一阶自相关性)
# DW = sm.stats.durbin_watson(model.resid.values)
# print('一阶自相关DW={}'.format(np.round(DW, 2)))
# 模型检验:观察拟合后d次差分序列和原始差分序列
# plt.plot(ts_train_log_diff, label='原始差分序列', color='#7B68EE')
# plt.plot(model.fittedvalues, label='拟合差分序列', color='#FF4040')
# plt.title('拟合RMSE:{}'.format(np.round(np.sum((model.fittedvalues - ts_train_log_diff) ** 2), 2)))
# plt.legend(loc='best')
# plt.show()
'''setp6:模型测试效果'''
# 残差序列逆向还原拟合时间序列:log_diff -> log -> Xt
fit_ARIMA_log_diff = pd.Series(model.fittedvalues, index=ts_train_log.index, copy=True)
fit_ARIMA_log_diff_cumsum = fit_ARIMA_log_diff.cumsum()
fit_ARIMA_log = pd.Series(ts_train_log.iloc[0], index=ts_train_log.index)
fit_ARIMA_log = fit_ARIMA_log.add(fit_ARIMA_log_diff_cumsum, fill_value=0)
fit_ARIMA_log.dropna(inplace=True)
fit_ARIMA = np.exp(fit_ARIMA_log)
# 残差序列交叉验证测试集
predict_date = pd.date_range(start=fit_ARIMA_log.index[-1], periods=len(ts_test)+1, freq='MS')
forecast = model.forecast(len(ts_test))[0].tolist() # 向后预测(测试集)
predict_ARIMA_log = pd.Series([fit_ARIMA_log[-1]] + forecast, index=predict_date, copy=True)
predict_ARIMA_log.dropna(inplace=True)
predict_ARIMA = np.exp(predict_ARIMA_log)
plt.plot(df['#Passengers'], label='原始序列', color='#7B68EE')
plt.plot(fit_ARIMA, label='拟合序列', color='#FF4040')
plt.plot(predict_ARIMA, label='预测序列', color='#3CB371')
fit_RMSE = np.round(np.sqrt(np.sum((fit_ARIMA - ts_train) ** 2) / len(ts_train)), 2)
predict_RMSE = np.round(np.sqrt(np.sum((predict_ARIMA - ts_test) ** 2) / len(ts_test)), 2)
plt.title('拟合RMSE:{},预测RMSE:{}'.format(fit_RMSE, predict_RMSE))
plt.legend(loc='best')
plt.show()
| privateEye-zzy/ARIMA | arima.py | arima.py | py | 6,505 | python | en | code | 1 | github-code | 13 |
26562964553 | # pylint: disable=maybe-no-member
import base64
import json
from models import session, ElectionRound, Person, Choice
import api.response_helper as Response
#Helper
def model_as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
def _get_electionround_by_id(elec_round_id: int) -> ElectionRound:
'''get election round by id and handle errors'''
# Elec_round_id should be int
try:
elec_round_id = int(elec_round_id)
except ValueError:
return Response.wrong_format({"message":"elec_round_id has to be an int (base 10)."})
# Get ElectionRound object from the DB.
try:
elec_round = session.query(ElectionRound).filter_by(
id=elec_round_id
).first()
session.commit()
except:
return Response.database_error()
# Handle invalid election round
if elec_round is None:
return Response.ressource_not_found({"message":"No electionround for this id."})
return elec_round
def get_person_by_id(personid :int) -> Person:
try:
person = session.query(Person).filter_by(
id=personid
).first()
session.commit()
except:
return Response.database_error()
if person is None:
return Response.ressource_not_found({ "message" : "No persion for this id."})
return person
def place_vote(data:dict):
if not 'choice_id' in data:
return Response.wrong_format({'message': 'choiceid is required for voting'})
if not 'election_round_id' in data:
return Response.wrong_format({'message': 'election_round_id required for voting'})
if not 'person_id' in data:
return Response.wrong_format({'message': 'person_id required for voting'})
try:
election_round_id = int(data['election_round_id'])
person_id = int(data['person_id'])
choice_id = int(data['choice_id'])
except:
return Response.wrong_format({"message":"ids have to be an int base 10"})
elec_round = _get_electionround_by_id(election_round_id)
if not elec_round:
return Response.ressource_not_found({"message":"No electionround for this id."})
person = get_person_by_id(person_id)
if person in elec_round.persons_voted:
return Response.server_error({"message":"Person already voted"})
try:
choice = session.query(Choice).filter_by(id = choice_id).first()
session.commit()
if choice is None:
return Response.ressource_not_found({ "message" : "No Choice with this id."})
except:
print("no choice")
return Response.database_error()
if choice not in elec_round.choices:
return Response.server_error({"message":"Electionround has no Choice with that ID"})
try:
choice.counter = choice.counter + 1
session.commit()
except:
return Response.database_error()
try:
elec_round.persons_voted.append(person)
session.commit()
except:
return Response.database_error()
return Response.ok(model_as_dict(choice))
def set_vote(data: dict):
'''Add a person to the as has voted to the election_round'''
if not 'elec_round_id' in data:
return Response.wrong_format({'message': 'elec_round_id required'})
if not 'person_id' in data:
return Response.wrong_format({'message': 'person_id required'})
try:
elec_round_id = int(data['elec_round_id'])
person_id = int(data['person_id'])
except ValueError:
return Response.wrong_format({ "message" : "ids have to be an int (base 10)."})
# Get election_round
try:
elec_round = _get_electionround_by_id(elec_round_id)
if not elec_round:
return Response.ressource_not_found({"message":"No electionround for this id."})
except:
return Response.database_error()
# Get person
try:
person = session.query(Person).filter_by(
id=person_id
).first()
session.commit()
if person is None:
return Response.ressource_not_found({ "message" : "No persion for this id."})
# Add person to election_round
elec_round.persons_voted.append(person)
session.commit()
except:
return Response.database_error()
return Response.ok({"message" : "OK"})
def get_all_persons_who_voted(elec_round_id: int):
'''Return all persons as dict who have already participated in
an election round.'''
try:
elec_round = _get_electionround_by_id(elec_round_id)
if not elec_round:
return Response.ressource_not_found({ "message" : "No election round for this id."})
except:
return Response.database_error()
# Build and return dict
ret = []
for person in elec_round.persons_voted:
ret.append(
{
"id" : person.id,
"name" : person.name
}
)
return Response.ok(json.dumps(ret))
def get_all_persons_who_have_not_voted(elec_round_id: int) -> dict:
'''Get all persons who have not voted
Warning: This is only accurate at the time of the election round, since
people can leave (altering the is_present) after the election round.
'''
try:
elec_round = _get_electionround_by_id(elec_round_id)
if not elec_round:
return Response.ressource_not_found({ "message" : "No election round for this id."})
except:
return Response.database_error()
persons_voted = elec_round.persons_voted
# Get present people
try:
persons_present = session.query(Person).filter(
Person.is_present == True).all()
session.commit()
except:
return Response.database_error()
# Get all persons who didn't vote but are present
persons_not_voted = []
for person in persons_present:
if person not in persons_voted:
persons_not_voted.append(person)
# Create response
ret = []
for person in persons_not_voted:
ret.append(
{
"id" : person.id,
"name" : person.name
}
)
return Response.ok(json.dumps(ret))
| consultINCode/digitales_Abstimmtool | Backend/api/voteapi.py | voteapi.py | py | 6,455 | python | en | code | 2 | github-code | 13 |
74461853777 | import datetime
from behave import *
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
@when(u'I click on "Sort by" filed')
def step_impl(context):
sort_by = WebDriverWait(context.driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, '#simple-select')))
sort_by.click()
@when(u'I click on "Date: {order} first"')
def step_impl(context, order):
minus = '-' if order == 'highest' else ''
sort_value = WebDriverWait(context.driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, f'li[data-value="{minus}start_date"]')))
sort_value.click()
@when(u'I click on "Duration: {order} first"')
def step_impl(context, order):
minus = '-' if order == 'highest' else ''
sort_value = WebDriverWait(context.driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, f'li[data-value="{minus}duration"]')))
sort_value.click()
@when(u'I click on "Price: {order} first"')
def step_impl(context, order):
minus = '-' if order == 'highest' else ''
sort_value = WebDriverWait(context.driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, f'li[data-value="{minus}price"]')))
sort_value.click()
@when(u'I click on "Available seats: {order} first"')
def step_impl(context, order):
minus = '-' if order == 'highest' else ''
sort_value = WebDriverWait(context.driver, 10).until(
EC.element_to_be_clickable((By.CSS_SELECTOR, f'li[data-value="{minus}available_seats"]')))
sort_value.click()
@then(u'I should see rides sorted by {parameter} - {order}')
def step_impl(context, parameter, order):
rides = context.driver.find_elements(By.CSS_SELECTOR, "div[class*=css-l0b0zb]")
rides_params = []
for ride in rides:
if parameter == 'date':
ride_elem = ride.find_elements(By.TAG_NAME, 'h4')[0]
param = datetime.datetime.strptime(ride_elem.text, "%d.%m.%Y").date()
elif parameter == 'duration':
ride_duration = ride.find_elements(By.TAG_NAME, 'span')[0]
hours = ride_duration.text.split("h")[0]
minutes = ride_duration.text.split(" ")[1]
print(f'hours: {hours} and minutes: {minutes}')
param = int(hours) * 60 + int(minutes)
elif parameter == 'price':
ride_duration = ride.find_elements(By.TAG_NAME, 'span')[2]
param = float(ride_duration.text[:-2])
else:
ride_duration = ride.find_elements(By.TAG_NAME, 'span')[1]
param = ride_duration.text[0]
rides_params.append(param)
if order == 'decreasing':
assert rides_params == sorted(rides_params, reverse=True)
else:
assert rides_params == sorted(rides_params)
| LeviSforza/TraWell | TraWell-tests/features/steps/sorting_found_rides.py | sorting_found_rides.py | py | 2,912 | python | en | code | 0 | github-code | 13 |
12880605144 | import pickle
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import string
import pickle
import os
import pickle
import nltk
from nltk import word_tokenize,sent_tokenize
nltk.download('punkt')
nltk.download('stopwords')
os.chdir(here = os.path.dirname(os.path.abspath(__file__)))
pickle_off1 = open("bigram.pickle", 'rb')
inverted_index = pickle.load(pickle_off1)
def lower_str(para):
return para.lower()
def get_tokenize(para):
return word_tokenize(para)
def get_stop_word(word_token):
stop_words = set(stopwords.words('english'))
without_stop_word=[]
for w in word_token:
if w not in stop_words:
without_stop_word.append(w)
return without_stop_word
def merging(L1,L2,L3,swt):
i=0
j=0
k=0
while i< len(L1) and j<len(L2):
if(L1[i]==L2[j]):
L3.append(L1[i])
i+=1
j+=1
elif L1[i]<L2[j]:
i+=1
else :
j+=1
k+=1
if swt==1:
return sorted(list(set(L1).union(set(L2)))),k
return L3,k
def AND(L1,L2):
cmp=0
i=0
j=0
new_list=[]
k=0
return merging(L1,L2,new_list,0)
def OR(L1,L2):
new_list=[]
return merging(L1,L2,new_list,1)
def and_not(L1,L2,All):
L3=sorted(list(set(All)-set(L2)))
return merging(L1,L3,[],0)
def and_or(L1,L2,All):
print(All)
print(L2)
L3=sorted(list(set(All)-set(L2)))
return merging(L1,L3,[],1)
# print(inverted_index)
# num=int(input())
# # print()
# # print()
# # print()
def find_bigram_query(sent, ind):
answer=[]
for i in range(1):
query=sent
# print(query)
content = query.lower()
# print(content)
content = content.translate(str.maketrans('', '', string.punctuation))
tokens = get_tokenize(content)
for k in range(5000):
ki=1
# print("tookens are:",tokens)
final_tokens = get_stop_word(tokens)
for k in range(5000):
ki=1
# print(final_tokens)
bigrams_token=[]
i=0
for k in range(5000):
ki=1
one=1
if len(final_tokens)==one:
final_tokens.append(" ")
for k in range(5000):
ki=1
while i<len(final_tokens)-1:
bigrams_token.append(final_tokens[i]+" "+final_tokens[i+1])
for k in range(5000):
ki=1
i+=1
for k in range(5000):
ki=1
l1=[]
if bigrams_token[0] in inverted_index:
l1=inverted_index[bigrams_token[0]]
for k in range(5000):
ki=1
# print(l1)
# print(bigrams_token)
for i in range(1,len(bigrams_token)):
l2=[]
# print(bigrams_token[i])
for k in range(5000):
ki=1
if bigrams_token[i] in inverted_index:
l2 = inverted_index[bigrams_token[i]]
for k in range(5000):
ki=1
l1,cmp=AND(l1,l2)
for k in range(5000):
ki=1
answer.append(f"Number of documents retrieved for query ,"+ ind +" using the bigram inverted index: "+ len(l1))
a=f"Names of documents retrieved for query "+ ind +" using bigram inverted index:"
str=""
print(l1)
for k in l1:
b= "carnfield"+k+","
str+=b
for k in range(5000):
ki=1
answer.append(a+str[:-1])
ans=answer
return ans
| impatientwolf/CSE508_Winter2023_A1_67 | q2.py | q2.py | py | 3,494 | python | en | code | 0 | github-code | 13 |
38240545176 | # coding=utf-8
# 本工具用于紧凑数据库id,使之连续。
# 本工具基本没用,只用于防备出现极端情况。
# 需要安装sqlalchemy
import sys
import os
from sqlalchemy import create_engine
from sqlalchemy import Table, Column, Integer, String, MetaData
from sqlalchemy.sql import select
def doit(src, desc):
# read
engine = create_engine('sqlite:///' + src)
metadata = MetaData()
info_tbl = Table('info_tbl', metadata,
Column('id', Integer, primary_key=True),
Column('source_id', String, index=True),
Column('suid', String, index=True),
Column('fetch_date', Integer, index=True, nullable=False),
Column('title', String, nullable=False),
Column('url', String),
Column('author', String),
Column('summary', String),
Column('pub_date', String)
)
conn = engine.connect()
s = select([info_tbl]).order_by(info_tbl.c.id.asc())
result = conn.execute(s)
# write
e2 = create_engine('sqlite:///' + desc)
m2 = MetaData()
info_tbl = Table('info_tbl', m2,
Column('id', Integer, primary_key=True),
Column('source_id', String, index=True),
Column('suid', String, index=True),
Column('fetch_date', Integer, index=True, nullable=False),
Column('title', String, nullable=False),
Column('url', String),
Column('author', String),
Column('summary', String),
Column('pub_date', String)
)
m2.create_all(e2)
c2 = e2.connect()
lst = []
for i in result.fetchall():
d = {'source_id': i[1],
'suid': i[2],
'fetch_date': i[3],
'title': i[4],
'url': i[5],
'author': i[6],
'summary': i[7],
'pub_date': i[8]}
lst.append(d)
c2.execute(info_tbl.insert(), lst)
# VACUUM
c2.execute('VACUUM')
def print_tip():
print('本工具用于紧凑数据库主键(info_tbl的id字段),使之变得连续。用法:')
print('compact_db_id.py <已有数据库文件名> <新数据库文件名>')
print('本工具必须在数据库文件的目录下执行,<已有数据库>必须存在,<新数据库>必须不存在。')
def main():
if len(sys.argv) != 3:
print_tip()
return
if not os.path.isfile(sys.argv[1]):
print('已有数据库必须存在')
return
if os.path.isfile(sys.argv[2]):
print('新数据库必须不存在')
return
doit(sys.argv[1], sys.argv[2])
size1 = format(os.path.getsize(sys.argv[1]), ',')
size2 = format(os.path.getsize(sys.argv[2]), ',')
print('完成,旧文件 %s字节,新文件 %s字节。' % (size1, size2))
if __name__ == '__main__':
main()
| animalize/infopi | src/compact_db_id.py | compact_db_id.py | py | 3,173 | python | zh | code | 73 | github-code | 13 |
6660690302 | ###################################################################
## Written by Eli Pugh and Ethan Shen ##
## {epugh}, {ezshen} @stanford.edu ##
## This file contains tools to make computing directed ##
## information faster with matrices. ##
## Use 'compute_DI_MI_mat()' to compute the DI between each ##
## pair of rows in a matrix ##
###################################################################
import numpy as np
from tqdm import tqdm
from .ctwalgorithm import ctwalgorithm
from .ctwentropy import ctwentropy
from .compute_DI_MI import compute_DI_MI
#==============================================================================
# Function 'compute_mat_px' uses the CTW algorithm to find a universal
# probability assignment for each row of X
# Inputs:
# X: matrix of input sequences
# Nx: Alphabet size of X
# D: Depth of the CTW Algorithm tree
def compute_mat_px(X, Nx, D):
Px = []
for i in tqdm(range(len(X))):
Px.append(ctwalgorithm(X[i], Nx, D)) # 2x8
return Px
#==============================================================================
# Function 'compute_mat_pxy' uses the CTW algorithm to find a universal
# probability assignment for each pair of rows of X
# Inputs:
# X: matrix of input sequences
# Nx: Alphabet size of X
# D: Depth of the CTW Algorithm tree
def compute_mat_pxy(X, Nx, D):
n = len(X)
Pxy = np.zeros((n,n))
for i in tqdm(range(n)):
for j in tqdm(range(n)):
if i == j:
continue
XY=X[i]+Nx*X[j]
Pxy[i,j] = ctwalgorithm(XY, Nx**2, D)
return Pxy
#==============================================================================
# Function 'compute_DI_mat' takes in a matrix X and computes pairwise
# directed information between each of the rows of X
# DI[i,j] is the directed information I(X[i]->X[j])
# Inputs:
# X: matrix of input sequences
# Nx: Alphabet size of X
# D: Depth of the CTW Algorithm tree
def compute_DI_MI_mat(X, Nx, D, start_ratio, alg):
X = np.array(X)
DI = np.zeros((X.shape[0], X.shape[0]))
rev_DI = np.zeros((X.shape[0], X.shape[0]))
MI = np.zeros((X.shape[0], X.shape[0]))
Px = compute_mat_px(X, Nx, D)
Pxy = compute_mat_pxy(X, Nx, D)
for i in tqdm(range(len(X))):
for j in range(len(X)):
prob = ( Px[i],Px[j], Pxy[i,j] )
di, rev_di, mi = compute_DI_MI(X[i], X[j], Nx, D, start_ratio, prob=prob, alg=alg)
DI[i,j] = di[-1]
rev_DI[i,j] = rev_di[-1]
MI[i,j] = mi[-1]
return DI, rev_DI, MI
| elipugh/directed_information | directed_information/fast_mat_DI.py | fast_mat_DI.py | py | 2,692 | python | en | code | 2 | github-code | 13 |
7524223349 | from django.urls import include, path
from django.conf.urls.static import static
from django.conf import settings
from . import views
app_name = 'opensim-viewer'
urlpatterns = [
# Get list of users.
path("users/", views.UserViewSet.as_view({'get': 'list'}), name="UserViewSet"),
# Get specific model record.
path("models/<int:id>/", views.ModelRetrieve.as_view({'get': 'retrieve_model_by_id'}), name="ModelRetrieveById"),
# Get specific model only gltf file link.
path("models/viz/<int:id>/", views.ModelRetrieve.as_view({'get': 'retrieve_model_viz_by_id'}), name="ModelRetrieveById"),
# Get default model gltf file link.
path("models/viz/default/", views.ModelRetrieve.as_view({'get': 'retrieve_default_model_gltf'}), name="ModelRetrieveById"),
# Get list of models.
path("models/", views.ModelViewSet.as_view({'get': 'list'}), name="ModelViewSet"),
# Sign up by creating a user.
path("sign_up/", views.UserCreate.as_view({'post': 'create_user'}), name="UserCreate"),
# Create a new model by uploading a gltf file.
path("create_model/", views.ModelCreate.as_view({'post': 'create_model'}), name="ModelCreate"),
# Upload file, create model in backend and return url of gltf resulting from upload
path("upload_file/", views.ModelCreate.as_view({'post': 'upload_file'}), name="FileUpload"),
# Retrieve information and file of an existing model.
path("retrieve_model/", views.ModelRetrieve.as_view({'post': 'retrieve_model'}), name="ModelRetrieve"),
# Login with user and password.
path('login/', views.Login.as_view({'post': 'login_view'}), name='login'),
# Logout with user and password.
path('logout/', views.Logout.as_view({'post': 'logout_view'}), name='logout'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| opensim-org/opensim-viewer | src/backend/backend/backend/urls.py | urls.py | py | 1,840 | python | en | code | 7 | github-code | 13 |
47944432344 | import requests
# import yagmail
from pyquery import PyQuery
from mymodule import stats_word
def stats(url):
response = requests.get(url)
document = PyQuery(response.text)
content = document('#js_content').text()
statList = stats_word.stats_text(content,100)
statString = ''.join(str(i) for i in statList)
return statString
| heima2019/selfteaching-python-camp | 19100101/lidong2119/d11_training1.py | d11_training1.py | py | 355 | python | en | code | null | github-code | 13 |
72571426577 | from datetime import datetime
from sqlalchemy import select, and_, func, desc, null
from schema import *
class DBException(Exception):
pass
async def select_or_insert(connection, query_select, field, query_insert, create_if_none=True):
ds = await connection.execute(query_select)
if ds.rowcount:
result = (await ds.first())[field]
else:
if create_if_none:
async with connection.begin_nested() as trans:
result = (await connection.execute(query_insert)).inserted_primary_key[0]
await trans.commit()
else:
result = None
return result
async def get_or_create_entity_type(connection, type_name, create_if_none=True):
query_select = select([
entity_type.c.id.label("type_id")
]).select_from(
entity_type
).where(
entity_type.c.name == type_name.lower()
)
query_insert = entity_type.insert().values(
name=type_name.lower()
)
return await select_or_insert(connection, query_select, "type_id", query_insert, create_if_none)
async def get_or_create_entity(connection, type_id, token, create_if_none=True):
query_select = select([
entity.c.id.label("entity_id")
]).select_from(
entity
).where(
and_(
entity.c.type == type_id,
entity.c.token == token
)
)
query_insert = entity.insert().values(
type=type_id,
token=token
)
return await select_or_insert(connection, query_select, "entity_id", query_insert, create_if_none)
async def get_or_create_user(connection, token, create_if_none=True):
query_select = select([
user.c.id.label("user_id")
]).select_from(
user
).where(
user.c.token == token
)
query_insert = user.insert().values(
token=token
)
return await select_or_insert(connection, query_select, "user_id", query_insert, create_if_none)
async def get_user_id_by_token(connection, token):
query_select = select([
user.c.id.label("user_id")
]).select_from(
user
).where(
user.c.token == token
)
ds = await connection.execute(query_select)
if ds.rowcount:
return (await ds.first())["user_id"]
else:
return None
async def add_or_update_comment_text(connection, comment_id, text, text_hash):
subquery = select([
func.max(comment_text.c.id).label("max_id")
]).select_from(
comment_text
).where(
comment_text.c.comment == comment_id
).alias("comment_text_max_id")
query = select([
comment_text.c.hash.label("hash")
]).select_from(
comment_text
).where(
comment_text.c.id == subquery.c.max_id
)
async with connection.begin_nested() as trans:
if await connection.scalar(query) != text_hash:
result = (await connection.execute(
comment_text.insert().values(
comment=comment_id,
timestamp=datetime.now(),
hash=text_hash,
data=text
)
)).inserted_primary_key[0]
await trans.commit()
return result
else:
return None
async def insert_comment(connection, entity_id, user_id, unique_key, text, text_hash, parent_comment_id=None):
async with connection.begin_nested() as trans:
result = (await connection.execute(
comment.insert().values(
entity=entity_id,
user=user_id,
key=unique_key,
comment=parent_comment_id
)
)).inserted_primary_key[0]
await add_or_update_comment_text(connection, result, text, text_hash)
await trans.commit()
return result
async def get_comment_by_key(connection, unique_key):
query = select([
comment
]).select_from(
comment
).where(
comment.c.key == unique_key
)
ds = await connection.execute(query)
if ds.rowcount:
return dict(await ds.first())
else:
return None
async def delete_comment(connection, comment_id):
query = select([func.count()]).select_from(comment).where(
comment.c.comment == comment_id
)
async with connection.begin_nested() as trans:
if await connection.scalar(query) > 0:
return False
else:
await connection.execute(
comment.delete().where(
comment.c.id == comment_id
)
)
await trans.commit()
return True
async def get_entity_comments(connection, entity_id, with_replies, limit, offset, timestamp_from=None,
timestamp_to=None):
comment_where_clause = comment.c.entity == entity_id
if not with_replies:
comment_where_clause = and_(
comment_where_clause,
comment.c.comment.is_(None)
)
comment_text_max_id = select([
func.max(comment_text.c.id).label("max_id"),
func.min(comment_text.c.timestamp).label("created"),
func.max(comment_text.c.timestamp).label("updated"),
comment_text.c.comment
]).select_from(
comment_text
).group_by(
comment_text.c.comment
).alias("comment_text_max_id")
text_data_where_clause = comment_text.c.id == comment_text_max_id.c.max_id
if timestamp_from:
text_data_where_clause = and_(
text_data_where_clause,
comment_text_max_id.c.created >= timestamp_from
)
if timestamp_to:
text_data_where_clause = and_(
text_data_where_clause,
comment_text_max_id.c.created <= timestamp_to
)
comment_text_last_data = select([
comment_text.c.data.label("text"),
comment_text_max_id.c.created,
comment_text_max_id.c.updated,
comment_text.c.comment
]).select_from(
comment_text.join(
comment_text_max_id,
comment_text.c.comment == comment_text_max_id.c.comment
)
).where(
text_data_where_clause
).alias("comment_text_last_data")
comment2 = comment.alias("comment2")
query = select([
comment_text_last_data.c.text,
comment_text_last_data.c.created,
comment_text_last_data.c.updated,
user.c.token,
comment.c.key,
comment2.c.key.label("parent_key")
]).select_from(
comment.join(
comment_text_last_data,
comment_text_last_data.c.comment == comment.c.id
).join(
user, user.c.id == comment.c.user
).join(
comment2,
comment2.c.id == comment.c.comment,
isouter=True
)
).where(
comment_where_clause
).order_by(
desc(comment_text_last_data.c.created)
)
if limit:
query = query.limit(limit)
if offset:
query = query.offset(offset)
ds = await connection.execute(query)
if ds.rowcount:
async for item in ds:
yield dict(item)
else:
raise DBException("Data not found")
async def get_user_comments(connection, user_id, limit, offset, timestamp_from=None, timestamp_to=None):
comment_text_max_id = select([
func.max(comment_text.c.id).label("max_id"),
func.min(comment_text.c.timestamp).label("created"),
func.max(comment_text.c.timestamp).label("updated"),
comment_text.c.comment
]).select_from(
comment_text
).group_by(
comment_text.c.comment
).alias("comment_text_max_id")
comment_text_last_data = select([
comment_text.c.data.label("text"),
comment_text_max_id.c.created,
comment_text_max_id.c.updated,
comment_text.c.comment
]).select_from(
comment_text.join(
comment_text_max_id,
comment_text.c.comment == comment_text_max_id.c.comment
)
).where(
comment_text.c.id == comment_text_max_id.c.max_id
).alias("comment_text_last_data")
text_data_where_clause = comment_text_last_data.c.comment == comment.c.id
if timestamp_from:
text_data_where_clause = and_(
text_data_where_clause,
comment_text_max_id.c.created >= timestamp_from
)
if timestamp_to:
text_data_where_clause = and_(
text_data_where_clause,
comment_text_max_id.c.created <= timestamp_to
)
query = select([
comment_text_last_data.c.text,
comment_text_last_data.c.created,
comment_text_last_data.c.updated,
entity.c.token.label("entity_token"),
entity_type.c.name.label("entity_type")
]).select_from(
comment.join(
comment_text_last_data, text_data_where_clause
).join(
entity, entity.c.id == comment.c.entity
).join(
entity_type, entity_type.c.id == entity.c.type
)
).where(
comment.c.user == user_id
).order_by(
desc(comment_text_last_data.c.created)
)
if limit:
query = query.limit(limit)
if offset:
query = query.offset(offset)
ds = await connection.execute(query)
if ds.rowcount:
async for item in ds:
yield dict(item)
else:
raise DBException("Data not found")
async def get_comment_replies(connection, comment_id, limit, offset):
comment_text_max_id = select([
func.max(comment_text.c.id).label("max_id"),
func.min(comment_text.c.timestamp).label("created"),
func.max(comment_text.c.timestamp).label("updated"),
comment_text.c.comment
]).select_from(
comment_text
).group_by(
comment_text.c.comment
).alias("comment_text_max_id")
comment_text_last_data = select([
comment_text.c.data.label("text"),
comment_text_max_id.c.created,
comment_text_max_id.c.updated,
comment_text.c.comment
]).select_from(
comment_text.join(
comment_text_max_id,
comment_text.c.comment == comment_text_max_id.c.comment
)
).where(
comment_text.c.id == comment_text_max_id.c.max_id
).alias("comment_text_last_data")
comment_recursive = select([
comment.c.id,
comment.c.user,
comment.c.comment,
comment.c.key,
null().label("parent_key")
]).where(
comment.c.id == comment_id
).cte("comment_recursive", recursive=True)
comment_recursive = comment_recursive.union(
select([
comment.c.id,
comment.c.user,
comment.c.comment,
comment.c.key,
comment_recursive.c.key.label("parent_key")
]).select_from(
comment.join(
comment_recursive, comment.c.comment == comment_recursive.c.id
)
)
)
query = select([
comment_text_last_data.c.text,
comment_text_last_data.c.created,
comment_text_last_data.c.updated,
comment_recursive.c.key,
comment_recursive.c.parent_key,
user.c.token.label("user")
]).select_from(
comment_text_last_data.join(
comment_recursive,
comment_text_last_data.c.comment == comment_recursive.c.id
).join(
user,
user.c.id == comment_recursive.c.user
)
)
if limit:
query = query.limit(limit)
if offset:
query = query.offset(offset)
ds = await connection.execute(query)
if ds.rowcount:
async for item in ds:
yield dict(item)
else:
raise DBException("Data not found")
| bashkirtsevich/macaque | db_api.py | db_api.py | py | 11,805 | python | en | code | 0 | github-code | 13 |
38058633930 |
"""HARSHAD NUMBER
A number is said to be Harshad if it's exactly divisible by the sum of its digits. Create a function that determines whether a number is a Harshad or not.
Examples: is_harshad(75) ➞ False
7 + 5 = 12 75 is not exactly divisible by 12
is_harshad(171) ➞ True
1 + 7 + 1 = 9 9 exactly divides 171 """
def is_harshad(num: int)-> bool:
#write your code here
num2 = num
sum =0
while(num2!=0):
sum += num2%10
num2=num2//10
if num%sum==0:
return True
else:
return False
is_harshad(171)
| unitinguncle/PythonPrograms | HARSHAD NUMBER.py | HARSHAD NUMBER.py | py | 562 | python | en | code | 0 | github-code | 13 |
34241755267 | count=int(input())
words=dict()
for i in range(count):
word=input()
if word not in words:
words.update({word:1})
else:
words[word]= words[word]+1
print(len(words.keys()))
for i in words.values():
print(i,end=' ')
| CodeWithRushi/Hackerrank_Solutions | Collections/Words_Order.py | Words_Order.py | py | 232 | python | en | code | 0 | github-code | 13 |
2074538797 | '''Record data from the API to file. PARTIALLY DEVELOPED / UNTESTED
'''
import os
import io
import glob
import time
import asyncio
from .. import util
import tqdm
# __bind__ = ['store']
import ptgctl
ptgctl.log.setLevel('WARNING')
def tqprint(*a, **kw):
tqdm.tqdm.write(' '.join(map(str, a)), **kw)
class Disk:
EXT = '.zip'
def __init__(self, path='./data'):
self.path = path
os.makedirs(self.path, exist_ok=True)
def list(self, stream_id):
return sorted(glob.glob(os.path.join(self.path, stream_id, f'**/*{self.EXT or ""}')))
def store(self, entries, stream_id):
fname, archive = _zip(entries)
fname = os.path.join(self.path, stream_id, fname)
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as f:
f.write(archive)
tqprint(
'wrote', fname, len(entries), _pretty_bytes(len(archive)),
util.parse_time(entries[0][0]).strftime('%X.%f'),
util.parse_time(entries[-1][0]).strftime('%X.%f'))
def load(self, fname):
with open(fname, 'rb') as f:
for ts, data in _unzip(f.read()):
yield ts, data
def _pretty_bytes(b, scale=1000, names=['b', 'kb', 'mb', 'gb', 'tb']):
return next((
f'{b / (scale**i):.1f}{n}'
for i, n in enumerate(names)
if b / (scale**(i+1)) < 1
),
f'{b / (scale**(len(names)-1))}{names[-1]}')
WRITERS = {
'disk': Disk,
}
def get_writer(name, *a, **kw):
return WRITERS[name](*a, **kw)
def _zip(entries):
import zipfile
archive = io.BytesIO()
with zipfile.ZipFile(archive, 'w', zipfile.ZIP_STORED, False) as zf:
for ts, data in entries:
zf.writestr(ts, data)
date = util.parse_time(entries[0][0]).strftime('%Y-%m-%d')
fn = f'{date}/{entries[0][0]}_{entries[-1][0]}.zip'
return fn, archive.getvalue()
def _unzip(data):
import zipfile
archive = io.BytesIO(data)
with zipfile.ZipFile(archive, 'r', zipfile.ZIP_STORED, False) as zf:
for ts in sorted(zf.namelist()):
with zf.open(ts, 'r') as f:
data = f.read()
yield ts, data
# async def _api_streamer(api, stream_id, **kw):
# '''Show a video stream from the API.'''
# async with api.data_pull_connect(stream_id, **kw) as ws:
# while True:
# data = await ws.recv_data()
# if not data:
# return
# for sid, ts, data in data:
# print('data')
# yield sid, ts, data
async def _api_reader(api, stream_id, last_entry_id=0, **kw):
'''Show a video stream from the API.'''
# stream_info = api.streams.get(stream_id)
# final_id = stream_info['info']['last-entry']
loop = asyncio.get_event_loop()
try:
while True:
data = await loop.run_in_executor(None, lambda: api.data(stream_id, last_entry_id=last_entry_id, timeout=11))
if not data:
return
for sid, ts, d in data: #api.data(stream_id, last_entry_id=last_entry_id):
if ts == last_entry_id:
return
yield sid, ts, d
last_entry_id = ts
except KeyboardInterrupt:
print('Interrupted')
async def _as_batches(it, max_size=29500000, max_len=2000):
while True:
size = 0
entries = []
stream_id = None
try:
with tqdm.tqdm(total=max_len) as pbar:
async for sid, ts, data in it:
stream_id = stream_id or sid
size += len(data)
entries.append((ts, data))
pbar.update()
pbar.set_description(f'{sid} {util.parse_time(ts).strftime("%x %X.%f")} {ts} size={size}/{max_size} len={len(entries)}/{max_len}')
if len(entries) >= max_len or size > max_size:
break
else:
break
finally:
if entries:
yield stream_id, entries
async def _store_stream(api, stream_id, writer='disk', resume=False, **kw):
drive = WRITERS[writer]()
if 'last_entry_id' not in kw and resume:
last_entry_id = None
for last_entry_id, data in drive.load(drive.list(stream_id)[-1]):
pass
if last_entry_id:
tqprint(stream_id, 'resuming at', last_entry_id, util.parse_time(last_entry_id).strftime('%c'))
kw['last_entry_id'] = last_entry_id
with tqdm.tqdm() as pbar:
async for stream_id, entries in _as_batches(_api_reader(api, stream_id, **kw)):
if stream_id and entries:
drive.store(entries, stream_id)
pbar.update(len(entries))
@util.async2sync
async def store(api, *stream_ids, **kw):
if any(s == '*' for s in stream_ids):
stream_ids = api.streams.ls()
print('Using all Stream IDs:', stream_ids)
# make sure it's one stream per file
stream_ids = (s for ss in stream_ids for s in ss.split('+'))
await asyncio.gather(*(_store_stream(api, sid, **kw) for sid in stream_ids))
async def _replay_stream(api, stream_id, writer='disk', fullspeed=False):
drive = WRITERS[writer]()
async with api.data_push_connect(stream_id) as ws:
t_last = None
t0_last = time.time()
with tqdm.tqdm() as pbar:
for fname in drive.list():
for ts, data in drive.load(fname):
await ws.send_data(data)
pbar.update()
if not fullspeed:
t = util.parse_epoch_time(ts)
t_last = t_last or t
time.sleep(max(0, (t - t_last) - (time.time() - t0_last)))
t0_last = time.time()
@util.async2sync
async def replay(api, *stream_ids, **kw):
stream_ids = (s for ss in stream_ids for s in ss.split('+'))
return await asyncio.gather(*(_replay_stream(api, sid, **kw) for sid in stream_ids))
@util.async2sync
async def summary(api, *stream_ids, writer='disk', **kw):
drive = WRITERS[writer]()
stream_ids = stream_ids or api.streams.ls()
for sid in stream_ids:
info = api.streams.get(sid)
print(sid)
print(info['meta'])
print(info['info'])
fs = drive.list(sid)
count = sum(1 for f in fs for _ in drive.load(f))
print(f'saved {count}/{info["info"]["length"]} entries.')
print('') | VIDA-NYU/ptgctl | ptgctl/tools/local_record.py | local_record.py | py | 6,578 | python | en | code | 0 | github-code | 13 |
26454907353 | from collections import OrderedDict
def ordered_dict(n):
dict_value = OrderedDict()
for i in range(n):
items = input().split()
item_name = " ".join(items[:-1])
item_price = int(items[-1])
if dict_value.get(item_name):
dict_value[item_name] += item_price
else:
dict_value[item_name] = item_price
for key in dict_value.keys():
print(key, dict_value[key])
if __name__ == '__main__':
n = int(input())
ordered_dict(n)
| Eyakub/Problem-solving | HackerRank/Python/collections_orderDict.py | collections_orderDict.py | py | 510 | python | en | code | 3 | github-code | 13 |
21434509336 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cavern',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('title', models.TextField(blank=True, default='')),
('description', models.TextField(blank=True, default='')),
('link', models.CharField(blank=True, default='', max_length=1000)),
('points', models.SmallIntegerField(default=0)),
('is_entrance', models.BooleanField(default=False)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Color',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('color', models.CharField(blank=True, default='', max_length=200)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='cavern',
name='color',
field=models.ForeignKey(to='game.Color'),
preserve_default=True,
),
migrations.AddField(
model_name='cavern',
name='connections',
field=models.ManyToManyField(to='game.Cavern', related_name='connections_rel_+'),
preserve_default=True,
),
]
| dlb8685/abstract_caving | game/migrations/0001_initial.py | 0001_initial.py | py | 1,656 | python | en | code | 0 | github-code | 13 |
20891193803 | a = [1234, 'valid', 'ball', 'nice', 'man', 'quick', 123, 'one', 'two', 'alpha', 'alone', 'dark', 'night', 'tiger']
del a[5] # this will delete the 6th item from the list.
print(a)
a = [1234, 'valid', 'ball', 'nice', 'man', 'quick', 123, 'one', 'two', 'alpha', 'alone', 'dark', 'night', 'tiger']
a.remove(123) # this will remove item "123"
print(a)
a = [1234, 'valid', 'ball', 'nice', 'man', 'quick', 123, 'one', 'two', 'alpha', 'alone', 'dark', 'night', 'tiger']
del a[-0] # if you type 0 this will delete the first item of the list. Even if you add minus.
print(a)
del a[-1] # this will delete the first item from the last of the list.
print(a)
a = [1234, 'valid', 'ball', 'nice', 'man', 'quick', 123, 'one', 'two', 'alpha', 'alone', 'dark', 'night', 'tiger']
a.pop() # if you don't use the index No. , it will delete the last item of the list.
a.pop(2) # this will delete the 3rd item from the list.
print(a)
| Sayed-Tasif/my-programming-practice | removing items from the list.py | removing items from the list.py | py | 932 | python | en | code | 0 | github-code | 13 |
36725308420 | from selenium import webdriver
from django.test import LiveServerTestCase
from polls.models.question import Question
from polls.models.choice import Choice
from django.contrib.auth.models import User
from django.test.client import Client
from decouple import config
class FrontendTest(LiveServerTestCase):
@classmethod
def setUpClass(cls):
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('-–incognito')
cls.selenium = webdriver.Chrome(
chrome_options=chrome_options,
executable_path=config('CHORME_DRIVER_PATH', default='')
)
super(FrontendTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(FrontendTest, cls).tearDownClass()
def setUp(self) -> None:
self.client = Client()
User.objects.create_user(username='User',
email='user@testing.com',
password='UserNotLoser')
Question.objects.create(question_text="Is this a test?")
Choice.objects.create(question_id=Question.objects.all()[0].id,
choice_text="This is my choice!!")
self.client.login(username='User', password='UserNotLoser')
cookie = self.client.cookies['sessionid']
self.selenium.get(self.live_server_url + '/polls/')
self.selenium.add_cookie({'name': 'sessionid', 'value': cookie.value, 'secure': False, 'path': '/'})
def test_header_message(self):
self.selenium.get(self.live_server_url + '/polls/')
h2 = self.selenium.find_element_by_tag_name("h2")
self.assertIn('Hot Topics', h2.text)
def test_have_question(self):
self.selenium.get(self.live_server_url + '/polls/')
questions = self.selenium.find_elements_by_class_name('question')
self.assertTrue(len(questions) > 0)
def test_question_link(self):
self.selenium.get(self.live_server_url + '/polls/')
self.selenium.find_element_by_id(f'questionId{Question.objects.all()[0].id}').click()
self.assertURLEqual(self.selenium.current_url,
self.live_server_url + f'/polls/{Question.objects.all()[0].id}/')
def test_voting_for_first_choice(self):
self.selenium.get(self.live_server_url + f'/polls/{Question.objects.all()[0].id}/')
self.selenium.find_element_by_id('choice1').click()
self.selenium.find_element_by_id('voteButton').click()
self.assertURLEqual(self.live_server_url +
f'/polls/{Question.objects.all()[0].id}/results/',
self.selenium.current_url)
| plumest/django-polls | polls/tests/test_client_sites.py | test_client_sites.py | py | 2,749 | python | en | code | 0 | github-code | 13 |
19383465034 | from tkinter import *
import tkinter.messagebox
class Patient:
def __init__(self,name, location, gender, age):
self.name = name
self.location = location
self.gender = gender
self.age = age
self.temperature = 0
self.fever = 0
self.cough = 0
self.vomit = 0
self.lethargic = 0
self.risk = []
class Infant(Patient):
def __init__(self, name, location, gender, age):
super().__init__(name, location, gender, age)
self.restless = 0
self.fatigue = 0
self.feed = 0
self.crying = 0
self.pale = 0
class Child(Patient):
def __init__(self, name, location, gender, age):
super().__init__(name, location, gender, age)
self.headache = 0
self.sore_throat = 0
self.nasal_cong = 0
self.diah = 0
self.chill = 0
self.sweating = 0
self.flushed = 0
self.wheeze = 0
self.blue = 0
self.diff_breath = 0
self.flare = 0
self.musc_breath = 0
class Adult(Patient):
def __init__(self, name, location, gender, age):
super().__init__(name, location, gender, age)
self.cough_blood = 0
self.chills = 0
self.diff_breath = 0
self.chest_pain = 0
self.sweat = 0
self.musc_pain = 0
self.sore_throat = 0
self.diah = 0
self.blue = 0
self.confus = 0
# def createPat(name, location, gender, age):
# if age < 1 and age < 2:
# patient = Infant(name, location, gender, age)
# return diagInfant(patient)
# elif age > 2 and age < 16:
# patient = Child(name, location, gender, age)
# else:
# patient = Adult(name, location, gender, age)
class RiskWindow:
def __init__(self, win1):
self.l1 = Label(win1, text = 'Enter Risk Factor')
self.l1.grid(row = 0, sticky = E)
self.riskEnt = Entry(win1)
self.riskEnt.grid(row = 0,column = 1)
self.accBut = Button(win1, text = 'Accept', command = self.getVal)
self.accBut.grid(row = 1, column = 1)
self.retBut = Button(win1, text = 'Return To Menu', command = win1.destroy)
self.retBut.grid(row = 2, column = 1)
def getVal(self):
#Add code to add risk factor to file
tkinter.messagebox.showinfo("Information","Entry recorded.")
class DiagIWindow:
def __init__(self,win3,patient):
self.frame1 = Frame(win3)
self.frame1.pack(side = TOP, fill = X)
self.frame2 = Frame(win3)
self.frame2.pack(side = BOTTOM, fill = BOTH)
self.tlabel = Label(self.frame1, text = 'Diagnose Infant', bg = 'grey', fg = 'black')
self.tlabel.pack(fill = X)
class DiagCWindow:
def __init__(self,win3,patient):
pass
class DiagAWindow:
def __init__(self,win3,patient):
pass
class DiagWindow:
def __init__(self, win2):
self.frame1 = Frame(win2)
self.frame1.pack(side = TOP, fill = X)
self.frame2 = Frame(win2)
self.frame2.pack(side = BOTTOM, fill = BOTH)
self.tlabel = Label(self.frame1, text = 'Add Patient Information', bg = 'grey', fg = 'black')
self.tlabel.pack(fill = X)
self.nlabel = Label(self.frame2, text = 'Name')
self.nlabel.grid(row = 0, sticky = E)
self.nentry = Entry(self.frame2)
self.nentry.grid(row = 0, column = 1)
self.llabel = Label(self.frame2, text = 'Location')
self.llabel.grid(row = 1, sticky = E)
self.lentry = Entry(self.frame2)
self.lentry.grid(row = 1, column = 1)
self.glabel = Label(self.frame2, text = 'Gender')
self.glabel.grid(row = 2, sticky = E)
self.v = IntVar()
self.gb1 = Radiobutton(self.frame2, text = 'Male', value = 0, variable = self.v)
self.gb2 = Radiobutton(self.frame2, text = 'Female', value = 1, variable = self.v)
self.gb1.grid(row = 2, column = 1)
self.gb2.grid(row = 2, column = 2)
self.alabel = Label(self.frame2, text = 'Age')
self.alabel.grid(row = 3, sticky = E)
self.aentry = Entry(self.frame2)
self.aentry.grid(row = 3, column = 1)
self.age = 0
if self.aentry.get() != '':
age = int(self.aentry.get())
self.addButton = Button(self.frame2, text = 'Add Patient', command = self.addpat(self.nentry.get(),self.lentry.get(),self.v.get(),self.age))
self.addButton.grid(row = 4, columnspan = 3)
def addpat(self,name, location, gender, age):
if age < 1 and age < 2:
patient = Infant(name, location, gender, age)
self.diagIwin(patient)
elif age > 2 and age < 16:
patient = Child(name, location, gender, age)
else:
patient = Adult(name, location, gender, age)
def diagIwin(self,patient):
subwin3=Tk()
mywin4=DiagIWindow(subwin3,patient)
subwin3.title('Diagnose Infant')
subwin3.geometry("400x200+10+10")
subwin3.mainloop()
def diagCwin(self,patient):
subwin4=Tk()
mywin5=DiagCWindow(subwin4,patient)
subwin4.title('Diagnose Child')
subwin4.geometry("400x200+10+10")
subwin4.mainloop()
def diagAwin(self,patient):
subwin5=Tk()
mywin6=DiagAWindow(subwin5,patient)
subwin5.title('Diagnose Adult')
subwin5.geometry("400x200+10+10")
subwin5.mainloop()
class MainWindow:
def __init__(self, win):
self.topframe = Frame(win)
self.topframe.pack(side = TOP, fill = X)
self.bottomframe = Frame(win)
self.bottomframe.pack(side = BOTTOM, fill = X)
self.titLabel = Label(self.topframe, text = 'Welcome To The Pneumonia Diagnosis Aid System', bg = 'grey', fg = 'black')
self.titLabel.pack(fill = X, expand = 1)
self.riskBut = Button(self.bottomframe, text='Add Risk Factor', command= self.riskwin)
self.diagBut = Button(self.bottomframe, text='Run Diagnosis', command = self.diagwin)
self.outBut = Button(self.bottomframe, text='Check For Spike In Pneumonia')
self.riskBut.pack(side = BOTTOM, fill = X)
self.diagBut.pack(side = BOTTOM, fill = X)
self.outBut.pack(side = BOTTOM, fill = X)
def riskwin(self):
subwin=Tk()
mywin2=RiskWindow(subwin)
subwin.title('Add Risk Factor')
subwin.geometry("400x200+10+10")
subwin.mainloop()
def diagwin(self):
subwin2=Tk()
mywin3 = DiagWindow(subwin2)
subwin2.title('Patient Information')
subwin2.geometry("400x200+10+10")
subwin2.mainloop()
root = Tk()
mywin = MainWindow(root)
root.title('Main Menu')
root.geometry("800x400+10+10")
root.mainloop() | KelMHunt/ai-project | PatientExpertSystem/test.py | test.py | py | 6,844 | python | en | code | 0 | github-code | 13 |
27637054757 | from datetime import date, datetime
import time
import math
from wechatpy import WeChatClient
from wechatpy.client.api import WeChatMessage, WeChatTemplate
import requests
import os
import random
today = datetime.now()
#start_date = '2019-08-14'
start_date = '2022-10-31'
ending_date = '2023-07-01'
city0 = 'wuhan'
birthday1 = '10-13'
birthday2 = '11-18'
app_id = "wxa6908ef5cf77e676"
app_secret = "df5408b2d176032e45e98b73934883af"
user_id = ["okCU26Gxs7OSjbr9N12KwLmf2heg", "okCU26CyZAlox-Mrm9fQsmPhRa5I"]
template_id = "vbcMSv7sxlr2GrEuI9rQe-AHzlySGP6FoS7NaKho4F8"
weather_key = "08eb4100a73f4d2ab967c0bd5baed51e"
def get_weather():
url_location = "https://geoapi.qweather.com/v2/city/lookup?location="+city0+"&key="+weather_key
location_req = requests.get(url_location).json()["location"]
location = location_req[9]
location_id = str(location["id"])
region = location["name"]
city = location["adm2"]
prov = location["adm1"]
url_weather_now = "https://devapi.qweather.com/v7/weather/now?location="+location_id+"&key="+weather_key
url_weather_daily = "https://devapi.qweather.com/v7/weather/3d?location="+location_id+"&key="+weather_key
url_air_daily = "https://devapi.qweather.com/v7/air/5d?location="+location_id+"&key="+weather_key
now_req = requests.get(url_weather_now).json()["now"]
daily_req = requests.get(url_weather_daily).json()["daily"][0]
air_req = requests.get(url_air_daily).json()["daily"][0]
temp_now = now_req["temp"]
text_now = now_req["text"]
date = daily_req["fxDate"]
temp_min = daily_req["tempMin"]
temp_max = daily_req["tempMax"]
day_wea = daily_req["textDay"]
night_wea = daily_req["textNight"]
sunrise = daily_req["sunrise"]
sunset = daily_req["sunset"]
moonrise = daily_req["moonrise"]
moonset = daily_req["moonset"]
air_qual = air_req["category"]
# print(location)
return region, city, prov, date, text_now, temp_now, temp_min, \
temp_max, day_wea, night_wea, sunrise, sunset, moonrise, \
moonset, air_qual
def get_count():
delta = today - datetime.strptime(start_date, "%Y-%m-%d")
ends = datetime.strptime(ending_date, "%Y-%m-%d") - today
return delta.days, ends.days
def get_birthday():
next1 = datetime.strptime(str(date.today().year) + "-" + birthday1, "%Y-%m-%d")
next2 = datetime.strptime(str(date.today().year) + "-" + birthday2, "%Y-%m-%d")
if next1 < datetime.now():
next1 = next1.replace(year=next1.year + 1)
if next2 < datetime.now():
next2 = next2.replace(year=next2.year + 1)
return (next1 - today).days, (next2 - today).days
def get_weekday():
day = today.weekday()
if(day==0):
wday = "星期一"
elif(day==1):
wday = "星期二"
elif(day==2):
wday = "星期三"
elif(day==3):
wday = "星期四"
elif(day==4):
wday = "星期五"
elif(day==5):
wday = "星期六"
elif(day==6):
wday = "星期日"
return wday
def get_words():
words = requests.get("https://api.shadiao.pro/chp")
if words.status_code != 200:
return get_words()
return words.json()['data']['text']
def get_random_color():
return "#%06x" % random.randint(0, 0xFFFFFF)
client = WeChatClient(app_id, app_secret)
region,city,prov,data,text_now,temp_now,temp_min, \
temp_max,day_wea,night_wea,sunrise,sunset,moonrise,moonset,air_qual = get_weather()
wm = WeChatMessage(client)
days_start, days_ends = get_count()
data = {"region":{"value":'江夏', "color":get_random_color()},
"city":{"value":city, "color":get_random_color()},
"prov":{"value":prov, "color":get_random_color()},
"date":{"value":data, "color":get_random_color()},
"wday":{"value":get_weekday(), "color":get_random_color()},
"wea_now":{"value":text_now, "color":get_random_color()},
"temp_now":{"value":temp_now, "color":get_random_color()},
"temp_min":{"value":temp_min, "color":get_random_color()},
"temp_max":{"value":temp_max, "color":get_random_color()},
"day_wea":{"value":day_wea, "color":get_random_color()},
"night_wea":{"value":night_wea, "color":get_random_color()},
"sunrise":{"value":sunrise, "color":get_random_color()},
"sunset":{"value":sunset, "color":get_random_color()},
"moonrise":{"value":moonrise, "color":get_random_color()},
"moonset":{"value":moonset, "color":get_random_color()},
"air_qual":{"value":air_qual, "color":get_random_color()},
"love_days":{"value":days_start, "color":get_random_color()},
"ends_days":{"value":days_ends, "color":get_random_color()},
"birthday1_left":{"value":get_birthday()[0], "color":get_random_color()},
"birthday2_left":{"value":get_birthday()[1], "color":get_random_color()},
"words":{"value":get_words(), "color":get_random_color()}}
for wechat_id in user_id:
res = wm.send_template(wechat_id, template_id, data)
print(res)
#print(days_ends)
#print(region)
| Alpha521/wechat | main.py | main.py | py | 4,978 | python | en | code | 0 | github-code | 13 |
70148019857 | import io
import os
from typing import Callable, Optional
import h5py
from PIL import Image
from torch.utils.data import Dataset
from tqdm import tqdm
class H5Dataset(Dataset):
def __init__(
self,
h5_path: str,
transform: Optional[Callable] = None,
):
"""H5 Dataset.
The dataset assumes that data is organized as:
"class_name"
"img_name"
"img_name"
"img_name"
"class_name"
"img_name"
"img_name"
"img_name"
Args:
h5_path (str): path of the h5 file.
transform (Callable): pipeline of transformations. Defaults to None.
"""
self.h5_path = h5_path
self.h5_file = None
self.transform = transform
self._load_h5_data_info()
def _load_h5_data_info(self):
self._data = []
h5_data_info_file = os.path.join(
os.path.expanduser("~"), os.path.basename(os.path.splitext(self.h5_path)[0]) + ".txt"
)
if not os.path.isfile(h5_data_info_file):
temp_h5_file = h5py.File(self.h5_path, "r")
# collect data from the h5 file directly
self.classes, self.class_to_idx = self._find_classes(temp_h5_file)
for class_name in tqdm(self.classes, desc="Collecting information about the h5 file"):
y = self.class_to_idx[class_name]
for img_name in temp_h5_file[class_name].keys():
self._data.append((class_name, img_name, int(y)))
# save the info locally to speed up sequential executions
with open(h5_data_info_file, "w") as f:
for class_name, img_name, y in self._data:
f.write(f"{class_name}/{img_name} {y}\n")
else:
# load data info file that was already generated by previous runs
with open(h5_data_info_file, "r") as f:
for line in f:
class_name_img, y = line.strip().split(" ")
class_name, img_name = class_name_img.split("/")
self._data.append((class_name, img_name, int(y)))
def _find_classes(self, h5_file: h5py.File):
classes = sorted(h5_file.keys())
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def _load_img(self, class_name: str, img: str):
img = self.h5_file[class_name][img][:]
return img
def __getitem__(self, index: int):
if self.h5_file is None:
self.h5_file = h5py.File(self.h5_path, "r")
class_name, img, y = self._data[index]
x = self._load_img(class_name, img)
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self._data) | NJU-LHRS/official-CMID | Pretrain/Dataset/h5_dataset.py | h5_dataset.py | py | 2,896 | python | en | code | 48 | github-code | 13 |
3021704123 | #!/usr/bin/python3
def roman_to_int(roman_string):
"""roman to integer"""
if not isinstance(roman_string, str) or not roman_string:
return 0
roman_num = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
num = 0
prev_value = 0
for symbol in roman_string[::-1]:
value = roman_num[symbol]
if value >= prev_value:
num += value
else:
num -= value
prev_value = value
return (num)
| JozeSIMAO/alx-higher_level_programming | 0x04-python-more_data_structures/12-roman_to_int.py | 12-roman_to_int.py | py | 488 | python | en | code | 0 | github-code | 13 |
11818081945 | import json
birthday = "C:\\Users\\Banerjea-PC\\Documents\\Python_saved\\birthdays.json"
birthdays_dump = {
'Albert Einstein': '03/14/1879',
'Benjamin Franklin': '01/17/1706',
'Ada Lovelace': '12/10/1815',
'Donald Trump': '06/14/1946',
'Rowan Atkinson': '01/6/1955'}
with open(birthday, 'w') as open_file:
json.dump(birthdays_dump, open_file)
if __name__ == "__main__":
done = "Y"
while done != "N":
with open(birthday, 'r') as open_file:
birthdays = json.load(open_file)
print("Welcome to the birthday dictionary. We know the birthdays of:")
for entry in birthdays.keys():
print(entry)
person = input("Who's birthday do you want to look up?")
if person in birthdays.keys():
print(str(person) + "'s birthday is " + str(birthdays[person]))
else:
print("We dont know him. Is the spelling write ?")
nntry = None
nntry = input("New Entry ? (Y/N)")
if nntry == "Y":
name = input("Name ? : ")
DOB = input("DOB ? (DD/MM/YYYY):")
birthdays[name] = DOB
with open(birthday, 'w') as open_file:
json.dump(birthdays, open_file)
done = input("Again ? (Y/N)") | PranabBandyopadhyay/Tutorial1 | EX34.py | EX34.py | py | 1,274 | python | en | code | 0 | github-code | 13 |
21119246518 | """ Script para variar os octetos de um endereço IP """
""" import subprocess
subprocess.Popen("ssh {user}@{host} {cmd}".format(user=user, host=host, cmd='ls -l'), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
"""
try:
from os import sep
from os import getenv
from dotenv import load_dotenv, find_dotenv
from datetime import datetime, time
import connect_ssh
import sys
import csv
import subprocess
subprocess.call("pip install -r requirements.txt")
except:
subprocess.call(["pip install paramiko==2.7.2",
"pip install virtualenv==20.6.0"])
load_dotenv(find_dotenv())
host = getenv('HOST')
usr = getenv('USER')
password = getenv('PASSWORD')
port = getenv('PORT')
RESET_OCTET = 0
THIRD_OCTET_RANGE = 2
SECOND_OCTET_RANGE = 18
ip_address = [172, 17, 0, 1]
change_first_octet_address, change_second_octet_address = False, True
AUX_SECOND_OCTET_ADDRESS = ip_address[1]
AUX_THIRD_OCTET_ADDRESS = ip_address[2]
def escrevecsv(list_of_ip: str, path: str, header: str):
with open(path, 'w', encoding='utf-8', newline='') as file:
csv_file = csv.writer(file, quoting=csv.QUOTE_ALL, )
file.write(header+'\n')
csv_file.writerows(list_of_ip)
''' flag = file.write(''.join(lista))
if flag == 0:
return 'arquivo {} gravado com sucesso'.format(path)
else:
return 'erro na gravvação' '''
if len(sys.argv) > 1:
ip_range_arg = sys.argv[1].strip()
print("Argumentos passados: ", str(sys.argv[1:]))
ip_address = [int(x) for x in (ip_range_arg.split('.'))]
def change_octet_address(ip_address_, index, range_octect):
ips_changed = []
ips_changed.append('.'.join(map(str, ip_address_)))
#print(f'IP: {ip_address}')
if(ip_address_[index] < range_octect):
for x in range(ip_address_[index], range_octect):
ip_address_[index] = ip_address_[index] + 1
string_ip = '.'.join(map(str, ip_address_))
ips_changed.append(string_ip)
''' If para caso quiser intercalar alteração dos octetos '''
if index == 1:
#change_second_octet_address = True
change_first_octet_address = False
if index == 2:
#change_first_octet_address = True
change_second_octet_address = False
ip_address_[index-1] = ip_address_[index-1] + 1
if(ip_address_[index-1] <= SECOND_OCTET_RANGE):
ip_address_[index] = RESET_OCTET
return ips_changed
''' Instanciar a conexão ssh '''
ssh = connect_ssh.SSH(host, usr, password, port)
def exec_command(ssh_client: ssh, command: str):
result = ssh_client.exec_cmd(command)
if result:
return result
else:
return "executado com sucesso"
all_ips_address_generated = []
#all_ips_address_generated.append('.'.join(map(str, ip_address)))
''' Etapa de geração de IP's '''
while(ip_address[1] <= SECOND_OCTET_RANGE):
# if ip_address[1] == 16:
# break
if AUX_SECOND_OCTET_ADDRESS == SECOND_OCTET_RANGE and AUX_THIRD_OCTET_ADDRESS == THIRD_OCTET_RANGE:
break
if ip_address[2] < THIRD_OCTET_RANGE and change_second_octet_address:
all_ips_address_generated = all_ips_address_generated + \
change_octet_address(ip_address, 2, THIRD_OCTET_RANGE)
print(*all_ips_address_generated, sep="\n")
''' Colocar o retorno de cada execução na lista '''
list_with_return_command = [exec_command(ssh,
f'config user ldap; edit Pictor; set source-{x}; next; end')
for x in all_ips_address_generated]
print(*list_with_return_command)
'''
with open('file.txt', 'a+', encoding='utf-8') as file:
current_date = datetime.now().strftime('%d/%m/%Y %H:%M:%S')
file.write('ip;data;execução '+'\n')
[file.writelines(str(ip+';'+res + '\n'))
for ip, res in zip(all_ips_address_generated, list_with_return_command)]
'''
| rico2290/generate_octect_ip | ip_address_changer.py | ip_address_changer.py | py | 4,014 | python | en | code | 0 | github-code | 13 |
20052669589 | #!/usr/bin/env python
import cv2
import numpy as np
import rospy
from std_msgs.msg import Int32MultiArray
def send_corners():
result = np.array([])
pub = rospy.Publisher('corner_topic',Int32MultiArray,queue_size = 10)
rospy.init_node('corner_node',anonymous=True)
rate=rospy.Rate(10)
msg=Int32MultiArray()
while not rospy.is_shutdown():
connections = pub.get_num_connections()
rospy.loginfo("Connections : %d",connections)
if connections>0:
rospy.loginfo("Data Sent")
for id in range(10):
image = cv2.imread(r"/home/chaitanya/catkin_ws/src/cvros/CV-ROS Task/Segmentation/"+str(id)+"_1.png")
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
# image, maximum corners, quality, distance between corners
corners = cv2.goodFeaturesToTrack(gray, 4, 0.1, 30)
corners = np.int0(corners)
for corner in corners:
x, y = corner.ravel()
cv2.circle(image, (x, y), 3, 255, -1)
result = np.concatenate((result, corners.flatten()), axis=None)
msg.data = result
pub.publish(msg)
break
rate.sleep()
if __name__ == '__main__':
try:
send_corners()
except rospy.ROSInterruptException:
pass | Lazar-Wolfe/ARK-CV-Ros-Task | scripts/feature_detector_2.py | feature_detector_2.py | py | 1,400 | python | en | code | 0 | github-code | 13 |
46395003104 | import time
from locust import HttpUser, task, between
class QuickstartUser(HttpUser):
# wait_time = between(1, 5)
@task(3)
def ready(self):
self.client.get("/api/v1/ready")
@task
def microservice(self):
for item_id in range(10):
self.client.get("/api/v1/microservice")
# time.sleep(1)
| abnerjacobsen/fastapi-mvc-loguru | locust/test1.py | test1.py | py | 349 | python | en | code | 1 | github-code | 13 |
17143173855 | import scrapy
from conifer.items import ConiferItem
class ConfierSpider(scrapy.Spider):
name = 'conifer'
start_urls = ['http://www.greatplantpicks.org/plantlists/by_plant_type/conifer',]
# def parse(self, response):
# filename = response.url.split("/")[-2] + ".html"
# with open(filename,'wb') as f:
# f.write(response.body)
#
def parse(self, response):
for cel in response.xpath("//tbody/tr"):
item = ConiferItem()
# item['name'] = cel.xpath('//td[@class="plantname"]/a/span[@class="genus"]/text()').extract()
item['name'] = cel.xpath('//td[@class="common-name"]/a/text()').extract()
item['genus'] = cel.xpath('//td[@class="plantname"]/a/span[@class="genus"]/text()').extract()
item['species'] = cel.xpath('//td[@class="plantname"]/a/span[@class="species"]/text()').extract()
yield item
| subbuwork/WebScraping_Scrapy | conifer/spiders/conifer_spider.py | conifer_spider.py | py | 920 | python | en | code | 0 | github-code | 13 |
73196056017 | import math
import numpy as np
import os
import pylab
from numpy import linspace
from scipy import pi
plt = pylab.matplotlib.pyplot
mpl = pylab.matplotlib.mpl
import random as rand
zLabel='zlabel'
xLim=False
yLim=False
xLabel='xLabel'
yLabel='yLabel'
xData=[]
yData=[]
zData=[]
xData.append(1)
yData.append(1)
zData.append(0.001)
for i in range(0,1000):
xData.append(xData[-1]+0.01)
yData.append(rand.uniform(1,200))
#zData.append(zData[-1]+0.001)
zData.append(zData[-1]+0.1)
| kylemede/SMODT | sandbox/pythonSandbox/plotTESTER.py | plotTESTER.py | py | 501 | python | en | code | 0 | github-code | 13 |
16007498700 | import tqdm
import argparse
import numpy as np
import matplotlib.pyplot as plt
import cv2
def save_fig(std):
#std = 0.01
print(std)
center = np.array([0.2, 0.3, 0.8])
height = np.array([0.15, 0.15, 0.23])
def f(x):
return (np.exp(-(x[:, None] - center)**2/0.04) + height).max(axis=-1)
N = 200
N_S = 100000
x = np.arange(N)/(N+1)
ys = []
for mu in x:
a = np.random.normal(size=(N_S,)) * std + mu
y = f(a).mean()
ys.append(y)
#fig, ax = plt.subplots(figsize=(4, 4))
plt.clf()
ax = plt.gca()
plt.title(f'E[R] @ std={std:.02f}')
max_x = x[np.argmax(ys)]
plt.plot([max_x, max_x], [-2., 2.], color="red", linestyle='dashed')
plt.plot(x, ys)
ax.set_ylim([0.2, 1.4])
ax.set_xlim([0.0, 1.0])
plt.savefig(f'tmp/tmp.png')
out = cv2.imread('tmp/tmp.png')[:, :, ::-1]
return out
if __name__ == '__main__':
#save_fig(0.3)
#exit(0)
#main()
import imageio
outs = []
for std in range(20):
outs.append(save_fig(std/100.))
from tools.utils import animate
imageio.mimsave('image.gif', outs) | haosulab/RPG | solver/exps/misc/draw_gaussian.py | draw_gaussian.py | py | 1,156 | python | en | code | 18 | github-code | 13 |
21794848096 | '''
1. A METHOD FOR FACE DETECTION PERPOSED BY : VIOLA AND JONES
2. Earlist method for realtime object detection
3.
[Postive Image]
+ => [Model Train] => [XML File(Cascade File)]
[Negitive Image]
4. We are going to use PRETRAIN cascade file provide files
5. OpenCV provide some default cascade for face, number plate and many more things
6. Creating Custom cascade files
'''
import cv2
face_cascade = cv2.CascadeClassifier('./resources/cascade/haarcascade_frontalface_default.xml')
img = cv2.imread('./resources/images/face1.jpg')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_faces = face_cascade.detectMultiScale(img_gray, 1.1, 4)
for face in img_faces:
x, y, w, h = face
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 255), thickness=2)
cv2.imshow('Face Image', img)
cv2.waitKey(0) | 8Bit1Byte/openCV-projects | Face Detection/face-detection.py | face-detection.py | py | 826 | python | en | code | 2 | github-code | 13 |
38637145222 | """Routine for decoding the CIFAR-10 binary file format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
import os
from datetime import datetime
import os.path
import time
import math
from six.moves import xrange # pylint: disable=redefined-builtin
# Process images of this size. Note that this differs from the original CIFAR
# image size of 32 x 32. If one alters this number, then the entire model
# architecture will change and any model would need to be retrained.
# Global constants describing the CIFAR-10 data set.
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 15981
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 7000
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.0000001 # Initial learning rate.
def read_cifar10(filename_queue):
"""Reads and parses examples from CIFAR10 data files.
Recommendation: if you want N-way read parallelism, call this function
N times. This will give you N independent Readers reading different
files & positions within those files, which will give better mixing of
examples.
Args:
filename_queue: A queue of strings with the filenames to read from.
Returns:
An object representing a single example, with the following fields:
height: number of rows in the result (32)
width: number of columns in the result (32)
depth: number of color channels in the result (3)
key: a scalar string Tensor describing the filename & record number
for this example.
label: an int32 Tensor with the label in the range 0..9.
uint8image: a [height, width, depth] uint8 Tensor with the image data
"""
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
result.height = 256
result.width = 320
result.depth = 3
with open('val.txt') as fid:
content = fid.read()
content = content.split('\n')
content = content[:-1]
valuequeue = tf.train.string_input_producer(content,shuffle=True)
value = valuequeue.dequeue()
dir, label1= tf.decode_csv(records=value, record_defaults=[['string'], ['']], field_delim=" ")
label1 = tf.string_to_number(label1, tf.int32)
result.label=label1
print(dir)
imagecontent = tf.read_file(dir)
image = tf.image.decode_jpeg(imagecontent, channels=3)
image = tf.image.resize_images(image,[256,320])
result.uint8image=image
return result
def _generate_image_and_label_batch(image, label, min_queue_examples,
batch_size, shuffle):
"""Construct a queued batch of images and labels.
Args:
image: 3-D Tensor of [height, width, 3] of type.float32.
label: 1-D Tensor of type.int32
min_queue_examples: int32, minimum number of samples to retain
in the queue that provides of batches of examples.
batch_size: Number of images per batch.
shuffle: boolean indicating whether to use a shuffling queue.
Returns:
images: Images. 4D tensor of [batch_size, height, width, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
# Create a queue that shuffles the examples, and then
# read 'batch_size' images + labels from the example queue.
print('generate')
num_preprocess_threads = 8
images=tf.placeholder(tf.float32)
label_batch=tf.placeholder(tf.float32)
if shuffle:
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=1,
num_threads=num_preprocess_threads,
capacity=50000,
min_after_dequeue=2800)
else:
images, label_batch = tf.train.batch(
[image, label],
batch_size=1,
shapes=([256,320,3],[]),
num_threads=num_preprocess_threads,
capacity=50000)
# Display the training images in the visualizer.
#tf.image_summary('images', images,max_images=64)
print(images)
return images, tf.reshape(label_batch, [batch_size])
def inputs():
"""Construct input for CIFAR evaluation using the Reader ops.
Args:
eval_data: bool, indicating if one should use the train or eval data set.
data_dir: Path to the CIFAR-10 data directory.
batch_size: Number of images per batch.
Returns:
images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
labels: Labels. 1D tensor of [batch_size] size.
"""
batch_size=1
filenames = './val.txt'
num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN
# Read examples from files
read_input=tf.placeholder(tf.uint8)
read_input = read_cifar10('val.txt')
reshaped_image=tf.placeholder(tf.float32)
reshaped_image = tf.cast(read_input.uint8image, tf.float32)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(num_examples_per_epoch *
min_fraction_of_examples_in_queue)
# Generate a batch of images and labels by building up a queue of examples.
return _generate_image_and_label_batch(reshaped_image, read_input.label,
min_queue_examples, 1,
shuffle=False)
def _variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/gpu:0'):
dtype = tf.float32
var = tf.get_variable(name, shape, initializer=initializer, dtype=dtype)
return var
def inference(images):
"""Build the CIFAR-10 model.
Args:
images: Images returned from distorted_inputs() or inputs().
Returns:
Logits.
"""
# We instantiate all variables using tf.get_variable() instead of
# tf.Variable() in order to share variables across multiple GPU training runs.
# If we only ran this model on a single GPU, we could simplify this function
# by replacing all instances of tf.get_variable() with tf.Variable().
#
# conv1
with tf.variable_scope('conv1') as scope:
norm1=tf.placeholder("float",shape=[None,256,320,3])
conv1=tf.placeholder("float")
conv=tf.placeholder("float")
bias=tf.placeholder("float")
#norm1=images
norm1 = tf.nn.lrn(images, 4, bias=255.0, alpha=0.0, beta=1.0,
name='norm1')
norm1=norm1-0.5
tf.histogram_summary('norm1' + '/activations', norm1)
kernel = tf.get_variable('weights',
shape=[5, 5, 3, 24],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(norm1, kernel, [1, 2, 2, 1], padding='VALID')
biases = _variable_on_cpu('biases', [24], tf.constant_initializer(0.1))
weight=tf.reduce_sum(kernel)/(5*5*3*24)
biases_ave=tf.reduce_sum(biases)/24
bias = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(bias)
tf.scalar_summary('conv1' + '/weight', weight)
tf.scalar_summary('conv1' + '/biases', biases_ave)
tf.histogram_summary('conv1' + '/activations', conv1)
tf.image_summary('conv1', images,max_images=24)
#tf.image_summary('conv1', tf.transpose(conv1, [3, 1, 2, 0])[...,0:1],max_images=24)
#_activation_summary(conv1)
# conv2
with tf.variable_scope('conv2') as scope:
conv2=tf.placeholder("float")
conv=tf.placeholder("float")
bias=tf.placeholder("float")
kernel = tf.get_variable('weights',
shape=[5, 5, 24, 36],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(conv1, kernel, [1, 2, 2, 1], padding='VALID')
biases = _variable_on_cpu('biases', [36], tf.constant_initializer(0.1))
weight=tf.reduce_sum(kernel)/(5*5*36*24)
biases_ave=tf.reduce_sum(biases)/36
bias = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(bias)
tf.scalar_summary('conv2' + '/weight', weight)
tf.scalar_summary('conv2' + '/biases', biases_ave)
tf.histogram_summary('conv2' + '/activations', conv2)
tf.image_summary('conv2', tf.transpose(conv2, [3, 1, 2, 0])[...,0:1],max_images=36)
#_activation_summary(conv2)
# conv3
with tf.variable_scope('conv3') as scope:
conv3=tf.placeholder("float")
conv=tf.placeholder("float")
bias=tf.placeholder("float")
kernel = tf.get_variable('weights',
shape=[5, 5, 36, 48],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(conv2, kernel, [1, 2, 2, 1], padding='VALID')
biases = _variable_on_cpu('biases', [48], tf.constant_initializer(0.1))
weight=tf.reduce_sum(kernel)/(5*5*36*48)
biases_ave=tf.reduce_sum(biases)/48
bias = tf.nn.bias_add(conv, biases)
conv3 = tf.nn.relu(bias)
tf.scalar_summary('conv3' + '/weight', weight)
tf.scalar_summary('conv3' + '/biases', biases_ave)
tf.histogram_summary('conv3' + '/activations', conv3)
tf.image_summary('conv3', tf.transpose(conv3, [3, 1, 2, 0])[...,0:1],max_images=48)
#_activation_summary(conv3)
# conv4
with tf.variable_scope('conv4') as scope:
conv4=tf.placeholder("float")
conv=tf.placeholder("float")
bias=tf.placeholder("float")
kernel = tf.get_variable('weights',
shape=[3, 3, 48, 64],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding='VALID')
biases = _variable_on_cpu('biases', [64], tf.constant_initializer(0.1))
weight=tf.reduce_sum(kernel)/(3*3*48*64)
biases_ave=tf.reduce_sum(biases)/64
bias = tf.nn.bias_add(conv, biases)
conv4 = tf.nn.relu(bias)
tf.scalar_summary('conv4' + '/weight', weight)
tf.scalar_summary('conv4' + '/biases', biases_ave)
tf.histogram_summary('conv4' + '/activations', conv4)
tf.image_summary('conv4', tf.transpose(conv4, [3, 1, 2, 0])[...,0:1],max_images=64)
#_activation_summary(conv4)
# conv5
with tf.variable_scope('conv5') as scope:
conv5=tf.placeholder("float")
conv=tf.placeholder("float")
bias=tf.placeholder("float")
kernel = tf.get_variable('weights',
shape=[3, 3, 64, 128],
initializer=tf.contrib.layers.xavier_initializer())
conv = tf.nn.conv2d(conv4, kernel, [1, 1, 1, 1], padding='VALID')
biases = _variable_on_cpu('biases', [128], tf.constant_initializer(0.1))
weight=tf.reduce_sum(kernel)/(3*3*64*64)
biases_ave=tf.reduce_sum(biases)/128
bias = tf.nn.bias_add(conv, biases)
conv5 = tf.nn.relu(bias)
tf.scalar_summary('conv5' + '/weight', weight)
tf.scalar_summary('conv5' + '/biases', biases_ave)
tf.histogram_summary('conv5' + '/activations', conv5)
tf.image_summary('conv5', tf.transpose(conv5, [3, 1, 2, 0])[...,0:1],max_images=64)
# local3
with tf.variable_scope('local3') as scope:
# Move everything into depth so we can perform a single matrix multiply.
local3=tf.placeholder("float")
dim=tf.placeholder(tf.int32)
bias=tf.placeholder("float")
weights=tf.placeholder("float")
reshape = tf.reshape(conv5, [1,-1])
dim = reshape.get_shape()[1].value
weights = tf.get_variable('weights', shape=[dim, 500],
initializer=tf.contrib.layers.xavier_initializer())
biases = _variable_on_cpu('biases', [500], tf.constant_initializer(0.1))
#local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases,name=scope.name)
bias = tf.matmul(reshape, weights)+biases
local3=tf.nn.relu(bias)
tf.scalar_summary('local3' + '/weight', tf.reduce_sum(weights)/(dim*100))
tf.scalar_summary('local3' + '/biases', tf.reduce_sum(biases)/100)
tf.histogram_summary('local3' + '/activations', local3)
#_activation_summary(local3)
# local4
with tf.variable_scope('local4') as scope:
local4=tf.placeholder("float")
weights=tf.placeholder("float")
weights = tf.get_variable('weights', shape=[500, 300],
initializer=tf.contrib.layers.xavier_initializer())
biases = _variable_on_cpu('biases', [300], tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases)
tf.scalar_summary('local4' + '/weight', tf.reduce_sum(weights)/(500*300))
tf.scalar_summary('local4' + '/biases', tf.reduce_sum(biases)/300)
tf.histogram_summary('local4' + '/activations', local4)
#_activation_summary(local4)
with tf.variable_scope('local6') as scope:
local6=tf.placeholder("float")
weights=tf.placeholder("float")
weights = tf.get_variable('weights', shape=[300, 200],
initializer=tf.contrib.layers.xavier_initializer())
biases = _variable_on_cpu('biases', [200], tf.constant_initializer(0.1))
local6 = tf.matmul(local4, weights) + biases
#local6 = tf.tanh(local6)
tf.scalar_summary('local6' + '/weight', tf.reduce_sum(weights)/(300))
tf.scalar_summary('local6' + '/biases', tf.reduce_sum(biases))
# tf.histogram_summary('local6' + '/activations', local6)
#_activation_summary(local6)
#local6=local6[...,0]
return local6
def losss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:queue
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
loss=tf.placeholder("float")
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
loss=cross_entropy_mean
tf.histogram_summary('labels' + '/activations', labels)
tf.histogram_summary('local6' + '/activations', logits)
tf.scalar_summary('loss', loss)
return loss
def trainn(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / 1
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
#lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
# global_step,
# decay_steps,
# LEARNING_RATE_DECAY_FACTOR,
# staircase=True)
lr=0.0001
tf.scalar_summary('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
#loss_averages_op = tf.reduce_sum(total_loss)
# Compute gradients.
#with tf.control_dependencies([loss_averages_op]):
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
#tf.scalar_summary('grad', grads)
#tf.histogram_summary('grads' + '/activations', grads)
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
'''# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.histogram_summary(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')'''
train_op=apply_gradient_op
return train_op
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = inference(images)
# Calculate loss.
loss = losss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = trainn(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=False))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter('/home/fzyue/Desktop/caffeendtoend/1', sess.graph)
for step in xrange(5000):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = 1
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
#print(labels)
#print (sess.run(logits))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == 1000000:
checkpoint_path = os.path.join('/home/fzyue/Desktop/caffeendtoend/1', 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
'''def main(argv=None): # pylint: disable=unused-argument
#cifar10.maybe_download_and_extract()
if tf.gfile.Exists('/home/fzyue/Desktop/caffeendtoend/1'):
tf.gfile.DeleteRecursively('/home/fzyue/Desktop/caffeendtoend/1')
tf.gfile.MakeDirs('/home/fzyue/Desktop/caffeendtoend/1')
train()
main()'''
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluation for CIFAR-10.
Accuracy:
cifar10_train.py achieves 83.0% accuracy after 100K steps (256 epochs
of data) as judged by cifar10_eval.py.
Speed:
On a single Tesla K40, cifar10_train.py processes a single batch of 128 images
in 0.25-0.35 sec (i.e. 350 - 600 images /sec). The model reaches ~86%
accuracy after 100K steps in 8 hours of training time.
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', 'evalwrite',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '1',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5*50000,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 981,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', True,
"""Whether to run eval only once.""")
def eval_once(summary_writer, summary_op,top_k_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
saver=tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / 1))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * 1
step = 0
count=0
with open('result.txt', 'w') as test_txt:
before_time=time.time()
while step < num_iter and not coord.should_stop():
logits,labels,precisions=sess.run(top_k_op)
#predictions=tf.nn.in_top_k(logits, labels, 1)
step+=1
if(precisions==True):
count+=1
print (precisions)
'''line_logits='{} {} {} {} {}'.format(float(logits[0][0])*1280,float(logits[0][1])*1280,float(logits[0][2])*1280,float(logits[0][3])*1280,float(logits[0][4])*1024)
line_dir = '%s'%(dir)
line_dir=line_dir[10:-2]
line=line_dir+' '+line_logits+'\n'
test_txt.write(line)'''
#print('one_next_time=%s ,run_time=%s ,all_time=%s ,'%(one_next_time,run_time,all_time,))
#if step==499:
# print('all_example_time=%s'%(float(time.time()-start_all_time)))
# Compute precision @ 1.
precision = 1
print(count)
print(ckpt)
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def top_k(logits,labels):
#print('labels=%s logits=%s error=%s'%(labels,logits,labels-logits))
return logits,labels,tf.nn.in_top_k(logits, labels, 3)
def evaluate():
"""Eval CIFAR-10 for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels for CIFAR-10.
images=tf.placeholder("float",shape=[None,256,320,3])
labels=tf.placeholder("float",shape=[None])
images, labels = inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits=tf.placeholder("float",shape=[None])
logits = inference(images)
top_k_op=top_k(logits,labels)
# Calculate predictions.
'''top_k_op_logits = logits
top_k_op_labels = labels[...,0]
top_k_op_error = tf.sub(labels,logits)[...,0]'''
#print('labels=%s logits=%s error=%s'%(top_k_op_labels,top_k_op_logits,top_k_op_error))
# Restore the moving average version of the learned variables for eval.
'''variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)'''
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
while True:
eval_once(summary_writer, summary_op,top_k_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
main()
| fengziyue/Lane-Detection | myeval_class.py | myeval_class.py | py | 25,303 | python | en | code | 13 | github-code | 13 |
17033957164 | from ...backend_config.converters import safe_text_to_bool
from ...backend_config.environment import EnvEntry
ENV_HOST = EnvEntry("CLEARML_API_HOST", "TRAINS_API_HOST")
ENV_WEB_HOST = EnvEntry("CLEARML_WEB_HOST", "TRAINS_WEB_HOST")
ENV_FILES_HOST = EnvEntry("CLEARML_FILES_HOST", "TRAINS_FILES_HOST")
ENV_ACCESS_KEY = EnvEntry("CLEARML_API_ACCESS_KEY", "TRAINS_API_ACCESS_KEY")
ENV_SECRET_KEY = EnvEntry("CLEARML_API_SECRET_KEY", "TRAINS_API_SECRET_KEY")
ENV_AUTH_TOKEN = EnvEntry("CLEARML_AUTH_TOKEN")
ENV_VERBOSE = EnvEntry("CLEARML_API_VERBOSE", "TRAINS_API_VERBOSE", type=bool, default=False)
ENV_HOST_VERIFY_CERT = EnvEntry("CLEARML_API_HOST_VERIFY_CERT", "TRAINS_API_HOST_VERIFY_CERT", type=bool, default=True)
ENV_CONDA_ENV_PACKAGE = EnvEntry("CLEARML_CONDA_ENV_PACKAGE", "TRAINS_CONDA_ENV_PACKAGE")
ENV_NO_DEFAULT_SERVER = EnvEntry("CLEARML_NO_DEFAULT_SERVER", "TRAINS_NO_DEFAULT_SERVER", type=bool, default=True)
ENV_DISABLE_VAULT_SUPPORT = EnvEntry('CLEARML_AGENT_DISABLE_VAULT_SUPPORT', type=bool)
ENV_ENABLE_ENV_CONFIG_SECTION = EnvEntry('CLEARML_AGENT_ENABLE_ENV_CONFIG_SECTION', type=bool)
ENV_ENABLE_FILES_CONFIG_SECTION = EnvEntry('CLEARML_AGENT_ENABLE_FILES_CONFIG_SECTION', type=bool)
ENV_VENV_CONFIGURED = EnvEntry('VIRTUAL_ENV', type=str)
ENV_PROPAGATE_EXITCODE = EnvEntry("CLEARML_AGENT_PROPAGATE_EXITCODE", type=bool, default=False)
ENV_INITIAL_CONNECT_RETRY_OVERRIDE = EnvEntry(
'CLEARML_AGENT_INITIAL_CONNECT_RETRY_OVERRIDE', default=True, converter=safe_text_to_bool
)
ENV_FORCE_MAX_API_VERSION = EnvEntry("CLEARML_AGENT_FORCE_MAX_API_VERSION", type=str)
"""
Experimental option to set the request method for all API requests and auth login.
This could be useful when GET requests with payloads are blocked by a server as
POST requests can be used instead.
However this has not been vigorously tested and may have unintended consequences.
"""
ENV_API_DEFAULT_REQ_METHOD = EnvEntry("CLEARML_API_DEFAULT_REQ_METHOD", default="GET")
| allegroai/clearml-agent | clearml_agent/backend_api/session/defs.py | defs.py | py | 1,963 | python | en | code | 205 | github-code | 13 |
42926603186 | from django.shortcuts import render
from django.shortcuts import HttpResponse
from site_youwu.models import Album
from site_youwu.models import Star
from .view_common import paging
from .view_common import getAlbumPageUrl
from .view_common import recommend
from .view_common import recom_albums
from .view_common import is_mobile_check
from .view_common import get_famous_site
import json
def star_page(request,starId,pageId):
# 判断是否是移动端
is_mobile = is_mobile_check(request)
# 基础信息
starId = int(starId)
pageId = int(pageId)
# 知名站点
famous_site = get_famous_site
# 参数配置
if is_mobile:
page_cnt = 5 # 分页的个数
content_cnt = 15 # 内容个数
re_com_cnt = 6 #推荐的album个数
else:
page_cnt = 10
content_cnt = 40
re_com_cnt = 8
# star信息
star_info = Star.objects.filter(starId=starId)
star_name = star_info.values("name")[0]["name"]
star_threeD = star_info.values("threeD")[0]["threeD"]
star_hobby = star_info.values("hobby")[0]["hobby"]
star_birthday = star_info.values("birthday")[0]["birthday"]
star_birthPlace = star_info.values("birthPlace")[0]["birthPlace"]
star_cover = json.loads(star_info.values("cover")[0]["cover"])[0]
star_height = star_info.values("height")[0]["height"]
star_weight = star_info.values("weight")[0]["weight"]
star_description = star_info.values("description")[0]["description"]
# star 对应的图册
album_temp = Album.objects.filter(starId= starId).values("albumId", "name", "cover")
star_album = []
for a in album_temp: # 增加url
a["cover"] = json.loads(a["cover"])[0]
a["album_url"] = getAlbumPageUrl(a["albumId"])
star_album.append(a)
# 分页
page_content = paging(star_album, pageId, content_cnt, page_cnt) # 40个图片一个页面 每个页面展现10个分页tag
showData = page_content['showData']
pageGroup = page_content['pageGroup']
currentPage = pageId
url_cut = "/starId=" + str(starId) + "/pageId="
# 推荐图册
recom_data = recom_albums(re_com_cnt)
# seo_info
title = star_name + "_尤物丝"
keywords = star_name + "," + "写真"
description = star_description
if is_mobile:
return render(request, "m_star.html", locals())
else:
return render(request, "star.html", locals())
| youwu360/youwu | youwu-src/site_youwu/view_list/view_star.py | view_star.py | py | 2,448 | python | en | code | 0 | github-code | 13 |
17050659374 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CropsHarvestProgressInfo(object):
def __init__(self):
self._actual_date = None
self._addition_info = None
self._crop_code = None
self._harvest_progress_value = None
self._harvested_area = None
self._total_area = None
@property
def actual_date(self):
return self._actual_date
@actual_date.setter
def actual_date(self, value):
self._actual_date = value
@property
def addition_info(self):
return self._addition_info
@addition_info.setter
def addition_info(self, value):
self._addition_info = value
@property
def crop_code(self):
return self._crop_code
@crop_code.setter
def crop_code(self, value):
self._crop_code = value
@property
def harvest_progress_value(self):
return self._harvest_progress_value
@harvest_progress_value.setter
def harvest_progress_value(self, value):
self._harvest_progress_value = value
@property
def harvested_area(self):
return self._harvested_area
@harvested_area.setter
def harvested_area(self, value):
self._harvested_area = value
@property
def total_area(self):
return self._total_area
@total_area.setter
def total_area(self, value):
self._total_area = value
def to_alipay_dict(self):
params = dict()
if self.actual_date:
if hasattr(self.actual_date, 'to_alipay_dict'):
params['actual_date'] = self.actual_date.to_alipay_dict()
else:
params['actual_date'] = self.actual_date
if self.addition_info:
if hasattr(self.addition_info, 'to_alipay_dict'):
params['addition_info'] = self.addition_info.to_alipay_dict()
else:
params['addition_info'] = self.addition_info
if self.crop_code:
if hasattr(self.crop_code, 'to_alipay_dict'):
params['crop_code'] = self.crop_code.to_alipay_dict()
else:
params['crop_code'] = self.crop_code
if self.harvest_progress_value:
if hasattr(self.harvest_progress_value, 'to_alipay_dict'):
params['harvest_progress_value'] = self.harvest_progress_value.to_alipay_dict()
else:
params['harvest_progress_value'] = self.harvest_progress_value
if self.harvested_area:
if hasattr(self.harvested_area, 'to_alipay_dict'):
params['harvested_area'] = self.harvested_area.to_alipay_dict()
else:
params['harvested_area'] = self.harvested_area
if self.total_area:
if hasattr(self.total_area, 'to_alipay_dict'):
params['total_area'] = self.total_area.to_alipay_dict()
else:
params['total_area'] = self.total_area
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CropsHarvestProgressInfo()
if 'actual_date' in d:
o.actual_date = d['actual_date']
if 'addition_info' in d:
o.addition_info = d['addition_info']
if 'crop_code' in d:
o.crop_code = d['crop_code']
if 'harvest_progress_value' in d:
o.harvest_progress_value = d['harvest_progress_value']
if 'harvested_area' in d:
o.harvested_area = d['harvested_area']
if 'total_area' in d:
o.total_area = d['total_area']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/CropsHarvestProgressInfo.py | CropsHarvestProgressInfo.py | py | 3,685 | python | en | code | 241 | github-code | 13 |
69970213138 | import discord
import json
import munch
import os
import typing
from dotenv import load_dotenv
from glob import glob
from traceback import format_exception
import colorama as colour
colour.init(autoreset=True)
def loadjson(file: str) -> munch.Munch:
"""Attempts to load a JSON file and returns it as a Munch object"""
with open(f"{file.replace('.', '/')}.json", "r") as f:
return munch.munchify(json.loads(f.read()))
def savejson(obj: munch.Munch, file: str) -> None:
"""Takes a Munch object in and writes it to a file as JSON"""
with open(f"{file.replace('.', '/')}.json", "w") as f:
f.write(json.dumps(munch.unmunchify(obj), indent=2))
f.truncate()
def loadenv() -> munch.Munch:
"""Loads the environment variables and returns it as a Munch object"""
load_dotenv()
return munch.munchify(os.environ)
config = loadjson("config")
env = loadenv()
def formatexception(exception: Exception, indent: bool = False) -> str:
exception = "".join(format_exception(type(exception), exception, exception.__traceback__)).rstrip()
if not config.debug:
exception = exception.split("\n\nThe above exception was the direct cause of the following exception:\n\n")[0]
return (f"[X] " + exception.replace('\n', '\n │ ')) if indent else exception
def getextensions(searchdir: str = "") -> list:
if searchdir.startswith("cogs."):
searchdir = searchdir[5:]
if os.path.isfile(f"cogs/{searchdir.replace('.', '/')}.py"):
return [f"cogs.{searchdir}"]
extensionpaths = [i.replace("\\", ".").replace("/", ".")[:-3] for i in glob(
f"cogs/{searchdir.replace('.', '/')}/**/*.py",
recursive=True
)]
return extensionpaths if extensionpaths != [] else [None]
def initembed(ctx, title, description="", image=None, bordercolour=config.embed.colour):
embed = discord.Embed(title=title, description=description, color=bordercolour)
embed.set_footer(text=f"Requested by {ctx.author.name}", icon_url=ctx.author.avatar_url)
if image is None:
return embed
img = discord.File(f"assets/img/{image}.png", filename=f"{image}.png")
embed.set_thumbnail(url=f"attachment://{image}.png")
return munch.munchify({"embed": embed, "file": img})
def plaintext(inputstr: str, truncatelength: int = None) -> str:
cleanstr = inputstr.replace('``', '``').replace('``', '``')
if truncatelength is None:
return f"``{cleanstr}``"
if len(cleanstr) <= truncatelength - 6:
return f"``{cleanstr}``"
else:
return f"``{cleanstr[:truncatelength - 8]}...``"
async def reporterror(ctx, exception) -> None:
try:
formattedexception = formatexception(exception).replace("```", "```")[-1018:]
e = initembed(ctx, "An error occurred during execution", bordercolour=0xFF0000)
e.add_field(name="Traceback (May be truncated)", value=f"```{formattedexception}```")
await ctx.send(embed=e)
except Exception as criticalerror:
print(f"{colour.Fore.RED}{colour.Style.BRIGHT}[X] An error occurred, "
f"attempt to report error as a message failed\n │ {criticalerror}")
finally:
print(f"{colour.Fore.YELLOW}[!] {ctx.author} ({ctx.author.id}) ran the command "
f"{colour.Style.BRIGHT}{ctx.message.content}{colour.Style.NORMAL}, resulting in the following error:")
print(f"{colour.Fore.RED}{formatexception(exception, indent=True)}")
class Flags:
class Flag:
def __init__(self, flag: str, hasparameter: bool, defaultparametervalue, casesensitive: bool) -> None:
self.name = flag
self.id = flag if casesensitive else flag.lower()
self.hasparameter = hasparameter
self.defaultvalue = defaultparametervalue if hasparameter else None
self.casesensitive = casesensitive
def __init__(self, inputargs) -> None:
self.inputargs = inputargs
self.inputflags = []
def addflag(self, flag: str, hasparameter: bool = False, defaultvalue=None, casesensitive: bool = False) -> None:
"""Add a flag to be parsed. Set hasparameter to True to use a flag with a parameter"""
self.inputflags.append(self.Flag(flag, hasparameter, defaultvalue, casesensitive))
def splitflags(self) -> tuple[list, dict]:
"""Returns a list of non-flag arguments and a Munch object of flags and their parameters"""
inputargs = self.inputargs
splitflags = {}
for flag in self.inputflags:
buffer = []
if not flag.hasparameter:
for arg in inputargs:
if flag.id == (arg if flag.casesensitive else arg.lower()):
splitflags[flag.name] = True
else:
buffer.append(arg)
else:
splitflags[flag.name] = flag.defaultvalue
getparameter = False
for arg in inputargs:
if getparameter:
try:
splitflags[flag.name] = arg if flag.defaultvalue is None else type(flag.defaultvalue)(arg)
except ValueError:
pass
getparameter = False
elif flag.id == (arg if flag.casesensitive else arg.lower()):
getparameter = True
elif (arg if flag.casesensitive else arg.lower()).startswith(f"{flag.id}="):
if flag.defaultvalue is None:
splitflags[flag.name] = arg.split("=", 1)[1]
else:
try:
splitflags[flag.name] = type(flag.defaultvalue)(arg.split("=", 1)[1])
except ValueError:
pass
else:
buffer.append(arg)
inputargs = buffer
return inputargs, splitflags
class Logging:
_channelicons = {"i": "-", "s": "O", "w": "!", "e": "X", "d": "i", "m": "+"}
_channelcolours = {
"-": colour.Fore.RESET,
"O": colour.Fore.GREEN,
"!": colour.Fore.YELLOW,
"X": colour.Fore.RED,
"i": colour.Style.BRIGHT,
"+": colour.Fore.YELLOW
}
_channeltypes = typing.Literal[
"-", "i", "info",
"O", "s", "success",
"!", "w", "warn",
"X", "e", "error",
"i", "d", "debug",
"+", "m", "more"
]
def __init__(self):
self.indents = []
self.carriagereturn = False
def log(self, text, channel: _channeltypes = "info", *, prefix: str = None,
indent: bool = None, indentstring: str = " │ ", temporary: bool = False) -> None:
if channel[0] in self._channelicons.keys():
channelicon = self._channelicons[channel[0]]
elif channel in self._channelicons.values():
channelicon = channel
else:
channelicon = "X"
channelcolour = self._channelcolours[channelicon]
if prefix is None:
prefix = f"[{channelicon}] "
text = text.replace("\n", f"\n{''.join(self.indents)}{channelcolour}{' ' * len(prefix)}")
if self.carriagereturn:
print("\r", end="")
self.carriagereturn = False
print(f"{''.join(self.indents)}{channelcolour}{prefix}{text}", end="" if temporary else "\n")
self.carriagereturn = temporary
if indent or (indent is None and channelicon == "+"):
self.indents.append(channelcolour + indentstring)
def unindent(self, level: int = 1) -> None:
try:
for _ in range(level):
self.indents.pop()
except IndexError:
self.indents = []
| ThatOtherAndrew/ThatOtherBot-Old | assets/functions.py | functions.py | py | 7,861 | python | en | code | 0 | github-code | 13 |
11705037488 |
#John O'Neill 01/04/2018
#Euler Problem 5... Finding Lowest common multiple for a range 1-20
#'''Used much of logic from https://www.youtube.com/watch?v=Km36RkQToqo and adjusted some variable inputs based on other knowledge on LCM from other sources'''
# https://stackoverflow.com/questions/147515/least-common-multiple-for-3-or-more-numbers
def LCM(sum):
for i in range(11,21):#goes through the range 11-20
if sum % i != 0: # checking if divisible (no remainder)
return False #if divisible, True, if not false
return True
sum=2520 #already know that 1-10 is covered by 2520
while True: #where divisible
if LCM(sum):
break
sum+=2520 #idea to step of 2520 taken from here https://www.youtube.com/watch?v=EMTcsNMFS_g
print(sum)
# challanging one!! referenced multiple of online solutions.
| JohnONeillGMIT/Programming-and-Scripting | ExerciseWeek4.py | ExerciseWeek4.py | py | 863 | python | en | code | 0 | github-code | 13 |
20919769766 | #!/usr/bin/env python3
import itertools
import typing
def validate(numbers:list, index:int, lookback:int=25):
"""
Return true if numbers[index] is the sum of two numbers in the lookback slice
"""
if index >= lookback:
try:
preamble = numbers[index-lookback:index]
number = numbers[index]
return any(list(filter(lambda c:sum(c)==number,itertools.combinations(preamble, 2))))
except IndexError:
return False
else:
# Indices less than the lookback window are in the preamble and always validate
return True
def allSlices(iterable):
for start,stop in itertools.permutations(range(len(iterable)),2):
yield iterable[start:stop]
def main():
with open('inputs/input9-test.txt','r') as fin:
numbers = [int(line) for line in fin.readlines()]
lookback = 5
# Part 1
assert [numbers[index] for index in range(lookback,len(numbers)) if not validate(numbers,index,lookback)][0] == 127
# Part 2
weakness = [sl for sl in allSlices(numbers) if sum(sl) == 127 and len(sl) >= 2]
assert min(weakness[0]) + max(weakness[0]) == 62
with open('inputs/input9.txt','r') as fin:
numbers = [int(line) for line in fin.readlines()]
lookback = 25
# Part 1
invalid = [numbers[index] for index in range(lookback,len(numbers)) if not validate(numbers,index,lookback)][0]
print(invalid)
# Part 2
weakness = [sl for sl in allSlices(numbers) if sum(sl) == invalid and len(sl) >= 2]
print(min(weakness[0]) + max(weakness[0]))
if __name__ == "__main__":
main()
| colematt/advent-code | 2020/9/day9.py | day9.py | py | 1,503 | python | en | code | 0 | github-code | 13 |
12804905941 | import data_common
appliance_ip = '15.245.131.12'
RACK = 'AR51'
ENC_1 = 'CN754406W7'
ENC_2 = 'CN7544044C'
ENC_3 = 'CN754406WT'
ENCLOSURE_URIS = ['ENC:' + ENC_1, 'ENC:' + ENC_2, 'ENC:' + ENC_3]
frame = 3
# Interconnect Bay Set
IBS = 3
ENC_CLTYPE = data_common.CHLORIDE10
REDUNDANCY = 'AB'
LE = 'LE' + '-' + REDUNDANCY
LIG_A = 'LIG-A'
LIG_B = 'LIG-B'
EG = 'EG' + '-' + REDUNDANCY
LI_A = LE + '-' + LIG_A
LI_B = LE + '-' + LIG_B
LIs = [LI_A, LI_B]
POTASH3 = ENC_1 + ', ' + 'interconnect 3'
POTASH6 = ENC_2 + ', ' + 'interconnect 6'
# for EM RIS Efuse
FUSION_IP = appliance_ip
interface = 'bond0'
FUSION_USERNAME = 'Administrator' # Fusion Appliance Username
FUSION_PASSWORD = 'hpvse123' # Fusion Appliance Password
FUSION_SSH_USERNAME = 'root' # Fusion SSH Username
FUSION_SSH_PASSWORD = 'hpvse1' # Fusion SSH Password
FUSION_PROMPT = '#' # Fusion Appliance Prompt
FUSION_TIMEOUT = 180 # Timeout. Move this out???
FUSION_NIC = 'bond0' # Fusion Appliance Primary NIC
FUSION_NIC_SUFFIX = '%' + FUSION_NIC
########################################
# OV Uplinkset uplinks info
########################################
US_DA1_UPLINKS = ['Q4:3', 'Q4:4']
US_DA3_UPLINKS = ['Q4:3', 'Q4:4']
IC3_DA_UPLINKS = US_DA1_UPLINKS
IC6_DA_UPLINKS = US_DA3_UPLINKS
US_FA1_UPLINKS = []
US_FA3_UPLINKS = []
# US_FA1_UPLINKS = ['Q4:2']
# US_FA3_UPLINKS = ['Q4:2']
IC3_FA_UPLINKS = US_FA1_UPLINKS
IC6_FA_UPLINKS = US_FA3_UPLINKS
ASIDE_UPLINK_SETS = ['US-DA1']
BSIDE_UPLINK_SETS = ['US-DA3']
ALL_UPLINK_SETS = ASIDE_UPLINK_SETS + BSIDE_UPLINK_SETS
# different uplinkset uplink port representation in LIG
LIG_ENET_UPLINK = 'Q6'
LIG_DA1_UPLINKS = ['Q4.3', 'Q4.4']
LIG_DA3_UPLINKS = ['Q4.3', 'Q4.4']
# LIG_FA1_UPLINKS = ['Q4.2']
# LIG_FA3_UPLINKS = ['Q4.2']
########################################
# DA uplink connected 3Par port portWwn
########################################
# connected to Tbird 3Par-A ports 0:2:1, 1:2:1 and 0:2:2, 1:2:2
IC3_DA_WWN_1 = '20:21:00:02:ac:01:2b:0c'
IC3_DA_WWN_2 = '21:21:00:02:ac:01:2b:0c'
IC6_DA_WWN_1 = '20:22:00:02:ac:01:2b:0c'
IC6_DA_WWN_2 = '21:22:00:02:ac:01:2b:0c'
ASIDE_HAPPY_CONNECTION_MAP = [IC3_DA_WWN_1, IC3_DA_WWN_2]
BSIDE_HAPPY_CONNECTION_MAP = [IC6_DA_WWN_1, IC6_DA_WWN_2]
########################################
# DA uplinks - 3par ports Dictionary
########################################
# For nameServer uplink DA port verification
IC3_UPLINKS_DA = [
{'uplink': US_DA1_UPLINKS[0],
'da_portwwn': IC3_DA_WWN_1},
{'uplink': US_DA1_UPLINKS[1],
'da_portwwn': IC3_DA_WWN_2}
]
IC6_UPLINKS_DA = [
{'uplink': US_DA3_UPLINKS[0],
'da_portwwn': IC6_DA_WWN_1},
{'uplink': US_DA3_UPLINKS[1],
'da_portwwn': IC6_DA_WWN_2}
]
# Enclosure 1 servers downlink on Enc1 and Enc2; 2 servers on each enclosure
ENC1_SERVER_1_ENC1_DL = 'd1'
ENC1_SERVER_1_ENC2_DL = 'd13'
ENC1_SERVER_2_ENC1_DL = 'd4'
ENC1_SERVER_2_ENC2_DL = 'd16'
# Enclosure 2 servers downlink on Enc1 and Enc2; 2 servers on each enclosure
ENC2_SERVER_1_ENC2_DL = 'd1'
ENC2_SERVER_1_ENC1_DL = 'd13'
ENC2_SERVER_2_ENC2_DL = 'd4'
ENC2_SERVER_2_ENC1_DL = 'd16'
# servers downlinks mapped to Aside and Bside Potash
ASIDE_SERVER_DOWNLINKS = [ENC1_SERVER_1_ENC1_DL, ENC1_SERVER_2_ENC1_DL, ENC2_SERVER_1_ENC1_DL, ENC2_SERVER_2_ENC1_DL]
BSIDE_SERVER_DOWNLINKS = [ENC1_SERVER_1_ENC2_DL, ENC1_SERVER_2_ENC2_DL, ENC2_SERVER_1_ENC2_DL, ENC2_SERVER_2_ENC2_DL]
# server profile names
ENC1_SP_1_NAME = 'SP-%s-enc1-bay1' % RACK
ENC1_SP_2_NAME = 'SP-%s-enc1-bay4' % RACK
ENC2_SP_1_NAME = 'SP-%s-enc2-bay1' % RACK
ENC2_SP_2_NAME = 'SP-%s-enc2-bay4' % RACK
ENC1_SP_1_BFS_NAME = 'SP-%s-BFS-enc1-bay1' % RACK
ENC2_SP_1_BFS_NAME = 'SP-%s-BFS-enc2-bay4' % RACK
# servers name and private IP through dhcp
ENC1_SERVER_1 = '%s, bay 1' % ENC_1
ENC1_SERVER_2 = '%s, bay 4' % ENC_1
ENC2_SERVER_1 = '%s, bay 1' % ENC_2
ENC2_SERVER_2 = '%s, bay 4' % ENC_2
# REVISIT - to be changed
ENC1_SERVER_1_IP_A = '10.11.1.5'
ENC1_SERVER_2_IP_A_OLD = '10.11.1.23' # no mac on server
ENC1_SERVER_2_IP_A = '10.11.1.4'
ENC2_SERVER_1_IP_A = '10.11.1.3'
ENC2_SERVER_2_IP_A = '10.11.1.28'
GW_IP = '10.11.0.1'
PING_LIST = [ENC1_SERVER_1_IP_A, ENC1_SERVER_2_IP_A, ENC2_SERVER_1_IP_A, ENC2_SERVER_2_IP_A]
servers = [ENC1_SERVER_1, ENC1_SERVER_2, ENC2_SERVER_1, ENC2_SERVER_2]
server_profile_names = [ENC1_SP_1_NAME, ENC1_SP_2_NAME, ENC2_SP_1_NAME, ENC2_SP_2_NAME]
LINUX_BFS_USER = 'root'
LINUX_BFS_PWD = 'hpvse123'
LINUX_BFS_PROMPT = 'ESXi'
# For disable downlink test cases
ENC1_SERVERS = [
{'sp_name': ENC1_SP_1_NAME,
'sh_name': ENC1_SERVER_1,
'enc1_downlink': ENC1_SERVER_1_ENC1_DL,
'enc2_downlink': ENC1_SERVER_1_ENC2_DL,
'ip': ENC1_SERVER_1_IP_A},
{'sp_name': ENC1_SP_2_NAME,
'sh_name': ENC1_SERVER_2,
'enc1_downlink': ENC1_SERVER_2_ENC1_DL,
'enc2_downlink': ENC1_SERVER_2_ENC2_DL,
'ip': ENC1_SERVER_2_IP_A}
]
ENC2_SERVERS = [
{'sp_name': ENC2_SP_1_NAME,
'sh_name': ENC2_SERVER_1,
'enc1_downlink': ENC2_SERVER_1_ENC1_DL,
'enc2_downlink': ENC2_SERVER_1_ENC2_DL,
'ip': ENC2_SERVER_1_IP_A},
{'sp_name': ENC2_SP_2_NAME,
'sh_name': ENC2_SERVER_2,
'enc1_downlink': ENC2_SERVER_2_ENC1_DL,
'enc2_downlink': ENC2_SERVER_2_ENC2_DL,
'ip': ENC2_SERVER_2_IP_A}
]
##################################
# uplinksets definitions in LIG
##################################
uplink_sets_in_lig_A = [
{
'name': 'US-NET1',
'ethernetNetworkType': 'Tagged',
'networkType': 'Ethernet',
'networkUris': ['wpstnetwork1'],
'lacpTimer': 'Short',
'mode': 'Auto',
'nativeNetworkUri': None,
'logicalPortConfigInfos': [
{'enclosure': '1', 'bay': '3', 'port': LIG_ENET_UPLINK, 'speed': 'Auto'}
]
},
{
'name': 'US-DA1',
'ethernetNetworkType': 'NotApplicable',
'networkType': 'FibreChannel',
'networkUris': ['DA1'],
'lacpTimer': 'Short',
'mode': 'Auto',
'nativeNetworkUri': None,
'logicalPortConfigInfos': [
{'enclosure': '1', 'bay': '3', 'port': LIG_DA1_UPLINKS[0], 'speed': 'Auto'},
{'enclosure': '1', 'bay': '3', 'port': LIG_DA1_UPLINKS[1], 'speed': 'Auto'}
]
}
# {
# 'name': 'US-FA1',
# 'ethernetNetworkType': 'NotApplicable',
# 'networkType': 'FibreChannel',
# 'networkUris': ['FA1'],
# 'lacpTimer': 'Short',
# 'mode': 'Auto',
# 'nativeNetworkUri': None,
# 'logicalPortConfigInfos': [
# {'enclosure': '1', 'bay': '3', 'port': LIG_FA1_UPLINKS[0], 'speed': 'Auto'}
# ]
# },
]
uplink_sets_in_lig_B = [
{
'name': 'US-DA3',
'ethernetNetworkType': 'NotApplicable',
'networkType': 'FibreChannel',
'networkUris': ['DA3'],
'lacpTimer': 'Short',
'mode': 'Auto',
'nativeNetworkUri': None,
'logicalPortConfigInfos': [
{'enclosure': '2', 'bay': '6', 'port': LIG_DA3_UPLINKS[0], 'speed': 'Auto'},
{'enclosure': '2', 'bay': '6', 'port': LIG_DA3_UPLINKS[1], 'speed': 'Auto'}
]
}
# {
# 'name': 'US-FA3',
# 'ethernetNetworkType': 'NotApplicable',
# 'networkType': 'FibreChannel',
# 'networkUris': ['FA3'],
# 'lacpTimer': 'Short',
# 'mode': 'Auto',
# 'nativeNetworkUri': None,
# 'logicalPortConfigInfos': [
# {'enclosure': '2', 'bay': '6', 'port': LIG_FA3_UPLINKS[0], 'speed': 'Auto'}
# ]
# }
]
##################################
# Interconnect bays configurations
# 2 or 3 Frames, IBS3, CL-20
##################################
InterconnectMapTemplate_A = \
[
{'bay': 3, 'enclosure': 1, 'type': 'Virtual Connect SE 40Gb F8 Module for Synergy', 'enclosureIndex': 1},
{'bay': 3, 'enclosure': 2, 'type': 'Synergy 10Gb Interconnect Link Module', 'enclosureIndex': 2},
{'bay': 3, 'enclosure': 3, 'type': 'Synergy 10Gb Interconnect Link Module', 'enclosureIndex': 3}
]
InterconnectMapTemplate_B = \
[
{'bay': 6, 'enclosure': 1, 'type': 'Synergy 10Gb Interconnect Link Module', 'enclosureIndex': 1},
{'bay': 6, 'enclosure': 2, 'type': 'Virtual Connect SE 40Gb F8 Module for Synergy', 'enclosureIndex': 2},
{'bay': 6, 'enclosure': 3, 'type': 'Synergy 10Gb Interconnect Link Module', 'enclosureIndex': 3}
]
ligs = [
{'name': LIG_A,
'interconnectMapTemplate': InterconnectMapTemplate_A,
'enclosureIndexes': [x for x in xrange(1, frame + 1)],
'interconnectBaySet': IBS,
'redundancyType': 'NonRedundantASide',
'uplinkSets': list(uplink_sets_in_lig_A)
},
{'name': LIG_B,
'interconnectMapTemplate': InterconnectMapTemplate_B,
'enclosureIndexes': [x for x in xrange(1, frame + 1)],
'interconnectBaySet': IBS,
'redundancyType': 'NonRedundantBSide',
'uplinkSets': list(uplink_sets_in_lig_B),
}
]
enc_group = {
EG:
{'name': EG,
'enclosureCount': frame,
'interconnectBayMappings':
[{'interconnectBay': 1, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 2, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 3, 'logicalInterconnectGroupUri': 'LIG:' + LIG_A},
{'interconnectBay': 4, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 5, 'logicalInterconnectGroupUri': None},
{'interconnectBay': 6, 'logicalInterconnectGroupUri': 'LIG:' + LIG_B}],
'ipAddressingMode': "External"
}
}
###
# All logical enclosures
###
les = {
LE:
{'name': LE,
'enclosureUris': ENCLOSURE_URIS,
'enclosureGroupUri': 'EG:' + EG,
'firmwareBaselineUri': None,
'forceInstallFirmware': False
}
}
###############
# LI Uplinksets
###############
# used for LI uplinkset updated
li_uplinksets = {
'US_DA1_4Gb':
{'name': 'US-DA1',
'ethernetNetworkType': 'NotApplicable',
'networkType': 'FibreChannel',
'networkUris': [],
'fcNetworkUris': ['DA1'],
'fcoeNetworkUris': [],
'manualLoginRedistributionState': 'Supported',
'connectionMode': 'Auto',
'portConfigInfos': [{'enclosure': ENC_1, 'bay': '3', 'port': US_DA1_UPLINKS[0], 'desiredSpeed': 'Speed4G'},
{'enclosure': ENC_1, 'bay': '3', 'port': US_DA1_UPLINKS[1], 'desiredSpeed': 'Speed4G'}],
'logicalInterconnectUri': None},
'US_DA1_8Gb':
{'name': 'US-DA1',
'ethernetNetworkType': 'NotApplicable',
'networkType': 'FibreChannel',
'networkUris': [],
'fcNetworkUris': ['DA1'],
'fcoeNetworkUris': [],
'manualLoginRedistributionState': 'Supported',
'connectionMode': 'Auto',
'portConfigInfos': [{'enclosure': ENC_1, 'bay': '3', 'port': US_DA1_UPLINKS[0], 'desiredSpeed': 'Speed8G'},
{'enclosure': ENC_1, 'bay': '3', 'port': US_DA1_UPLINKS[1], 'desiredSpeed': 'Speed8G'}],
'logicalInterconnectUri': None},
}
#################
# Server Profiles
#################
server_profiles = [{'type': 'ServerProfileV8',
'serverHardwareUri': ENC1_SERVER_1,
'serverHardwareTypeUri': None,
'enclosureUri': 'ENC:' + ENC_1,
'enclosureGroupUri': 'EG:%s' % EG,
'serialNumberType': 'Physical',
'macType': 'Physical',
'wwnType': 'Physical',
'name': ENC1_SP_1_NAME,
'description': 'Blackbird rhel6.7 - Aside',
'affinity': 'Bay',
'connections': [{'id': 1,
'name': 'conn-net',
'functionType': 'Ethernet',
'portId': 'Mezz 3:1-a',
'requestedMbps': data_common.getEnetRBW(ENC_CLTYPE),
'networkUri': 'ETH:wpstnetwork1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 2,
'name': 'conn-fc',
'functionType': 'FibreChannel',
'portId': 'Mezz 3:1-b',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 3,
'name': 'conn-fc2',
'functionType': 'FibreChannel',
'portId': 'Auto',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA3',
'mac': None,
'wwpn': None,
'wwnn': None}
]},
{'type': 'ServerProfileV8',
'serverHardwareUri': ENC1_SERVER_2,
'serverHardwareTypeUri': None,
'enclosureUri': 'ENC:' + ENC_1,
'enclosureGroupUri': 'EG:%s' % EG,
'serialNumberType': 'Physical',
'macType': 'Physical',
'wwnType': 'Physical',
'name': ENC1_SP_2_NAME,
'description': 'Blackbird Windows - Aside',
'affinity': 'Bay',
'connections': [{'id': 1,
'name': 'conn-net',
'functionType': 'Ethernet',
'portId': 'Mezz 3:1-a',
'requestedMbps': data_common.getEnetRBW(ENC_CLTYPE),
'networkUri': 'ETH:wpstnetwork1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 2,
'name': 'conn-fc1',
'functionType': 'FibreChannel',
'portId': 'Auto',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 3,
'name': 'conn-fc2',
'functionType': 'FibreChannel',
'portId': 'Auto',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA3',
'mac': None,
'wwpn': None,
'wwnn': None}
]},
{'type': 'ServerProfileV8',
'serverHardwareUri': ENC2_SERVER_1,
'serverHardwareTypeUri': None,
'enclosureUri': 'ENC:' + ENC_2,
'enclosureGroupUri': 'EG:%s' % EG,
'serialNumberType': 'Physical',
'macType': 'Physical',
'wwnType': 'Physical',
'name': ENC2_SP_1_NAME,
'description': 'Blackbird Linux - Bside',
'affinity': 'Bay',
'connections': [{'id': 1,
'name': 'conn-net1',
'functionType': 'Ethernet',
'portId': 'Mezz 3:1-a',
'requestedMbps': data_common.getEnetRBW(ENC_CLTYPE),
'networkUri': 'ETH:wpstnetwork1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 2,
'name': 'conn-fc1',
'functionType': 'FibreChannel',
'portId': 'Auto',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 3,
'name': 'conn-fc2',
'functionType': 'FibreChannel',
'portId': 'Auto',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA3',
'mac': None,
'wwpn': None,
'wwnn': None}
]},
{'type': 'ServerProfileV8',
'serverHardwareUri': ENC2_SERVER_2,
'serverHardwareTypeUri': None,
'enclosureUri': 'ENC:' + ENC_2,
'enclosureGroupUri': 'EG:%s' % EG,
'serialNumberType': 'Physical',
'macType': 'Physical',
'wwnType': 'Physical',
'name': ENC2_SP_2_NAME,
'description': 'Blackbird Windows - Bside',
'affinity': 'Bay',
'connections': [{'id': 1,
'name': 'conn-net1',
'functionType': 'Ethernet',
'portId': 'Mezz 3:1-a',
'requestedMbps': data_common.getEnetRBW(ENC_CLTYPE),
'networkUri': 'ETH:wpstnetwork1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 2,
'name': 'conn-fc1',
'functionType': 'FibreChannel',
'portId': 'Auto',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA1',
'mac': None,
'wwpn': None,
'wwnn': None},
{'id': 3,
'name': 'conn-fc2',
'functionType': 'FibreChannel',
'portId': 'Auto',
'requestedMbps': data_common.getFcRBW(ENC_CLTYPE),
'networkUri': 'FC:DA3',
'mac': None,
'wwpn': None,
'wwnn': None}
]},
]
| richa92/Jenkin_Regression_Testing | robo4.2/fusion/tests/wpst_crm/feature_tests/TBIRD/FC_POTASH/data_F117_AR51_ab.py | data_F117_AR51_ab.py | py | 20,021 | python | en | code | 0 | github-code | 13 |
38701156465 | """
Contains classes that provide sets of API views for all models of the project.
"""
from django.db.models import Count
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status, mixins, viewsets
from rest_framework.decorators import action
from rest_framework.generics import get_object_or_404
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAdminUser
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from posts.models import Post
from questions.models import QuestionCategory, Question
from users.models import MyUser
from .filters import QuestionFilter, QuestionCategoryFilter, PostFilter, UserFilter
from .serializers import QuestionCategorySerializer, QuestionSerializer, \
PostSerializer, UserSerializer
class BasePagination(LimitOffsetPagination):
"""A class for pagination of the result."""
default_limit = 10
class BaseViewSet(ModelViewSet):
"""Basic class of making set of api views."""
model = QuestionCategory
pagination_class = BasePagination
filter_backends = (DjangoFilterBackend,)
permission_classes = [AllowAny]
permission_classes_by_action = {
'create': [IsAuthenticated],
'update': [IsAdminUser],
'partial_update': [IsAdminUser],
'destroy': [IsAdminUser]
}
def get_permissions(self):
"""Returns a list of permission classes for each HTTP method.
If they are not defined, returns the common permission class level for all methods.
"""
try:
return [permission() for permission in self.permission_classes_by_action[self.action]]
except KeyError:
return [permission() for permission in self.permission_classes]
def destroy(self, request, *args, **kwargs):
"""When trying to delete an object, makes it inactive
(or active if it was deactivated earlier).
"""
item = get_object_or_404(self.queryset, pk=kwargs['pk'])
item.available = not item.available
item.save()
return Response(status=status.HTTP_204_NO_CONTENT)
def create(self, request, *args, **kwargs):
"""Overrides the method of creating a new object.
If it is not an instance of a Category, it automatically fills in the 'author' field
with the data of the current user."""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(author=request.user) \
if self.model is not QuestionCategory else serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
@action(detail=False, name='Сортировка по тегу (а-я)')
def order_by_tag(self, request, *args, **kwargs):
"""A method that allows you to sort the queryset by the 'tag' field."""
items = self.model.objects.all().order_by('tag')
page = self.paginate_queryset(items)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(items, many=True)
return Response(serializer.data)
class QuestionCategoryViewSet(BaseViewSet):
"""Child class for QuestionCategory model api views."""
queryset = QuestionCategory.objects.all().order_by('name')
http_method_names = ['get', 'post', 'put']
serializer_class = QuestionCategorySerializer
filterset_class = QuestionCategoryFilter
permission_classes_by_action = {item: [IsAdminUser] for item in
['create', 'update', 'partial_update', ]}
@action(detail=False, name='Сортировка по количеству вопросов')
def order_by_tag(self, request, *args, **kwargs):
"""Overrides parent's method for ordering question categories by question quantity."""
items = QuestionCategory.objects.all().annotate(que_set=Count('question')) \
.order_by('-que_set')
page = self.paginate_queryset(items)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(items, many=True)
return Response(serializer.data)
class QuestionViewSet(BaseViewSet):
"""Child class for Question model api views."""
model = Question
queryset = Question.objects.all()
serializer_class = QuestionSerializer
filterset_class = QuestionFilter
class PostViewSet(BaseViewSet):
"""Child class for Post model api views."""
model = Post
queryset = Post.objects.all()
serializer_class = PostSerializer
filterset_class = PostFilter
class UserViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
"""Class for MyUser model api views."""
queryset = MyUser.objects.all()
serializer_class = UserSerializer
pagination_class = BasePagination
filter_backends = (DjangoFilterBackend,)
filterset_class = UserFilter
permission_classes = [IsAdminUser]
def destroy(self, request, *args, **kwargs):
"""When trying to delete an object, makes it inactive
(or active if it was deactivated earlier).
"""
item = get_object_or_404(self.queryset, pk=kwargs['pk'])
item.is_active = not item.is_active
item.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=False, name='Пользователи по дате последнего логина')
def recent_users(self, request, *args, **kwargs):
"""A method that allows you to sort users by the 'last_login' field."""
recent_users = MyUser.objects.all().order_by('-last_login')
page = self.paginate_queryset(recent_users)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(recent_users, many=True)
return Response(serializer.data)
@action(detail=False, name='Пользователи по рейтингу')
def ranking_by_score(self, request, *args, **kwargs):
"""A method that allows you to sort users by the 'score' field."""
users_by_score = MyUser.objects.all().order_by('-score')
page = self.paginate_queryset(users_by_score)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(users_by_score, many=True)
return Response(serializer.data)
| Lalluviadel/interview_quiz | api_rest/api.py | api.py | py | 6,957 | python | en | code | 0 | github-code | 13 |
15247928778 | import json
import os
import rtree
from shapely import geometry
DEFAULT_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
SHAPES_FILE = "shapes.json"
METADATA_FILE = "metadata.json"
cached_country_database = None
class CountryDatabase:
@staticmethod
def load(root_dir = DEFAULT_DATA_DIR):
global cached_country_database
if not cached_country_database:
countries = _load_countries(root_dir)
index = _create_index(countries)
cached_country_database = CountryDatabase(countries, index)
return cached_country_database
def __init__(self, countries, index):
self.countries = countries
self.index = index
def query_by_point(self, longitude, latitude):
return self.query_by_box((longitude, latitude, longitude, latitude))
def query_by_box(self, box):
indices = self.index.intersection(box)
return (self.countries[i] for i in indices)
def _load_countries(data_root):
shapes = _load_json(os.path.join(data_root, SHAPES_FILE))
metadata = _load_json(os.path.join(data_root, METADATA_FILE))
return [{
"name": country["properties"]["name"],
"continent": _get_continent(country, metadata),
"shape": geometry.shape(country["geometry"])
} for country in shapes["features"]]
def _load_json(filepath):
with open(os.path.join(filepath), "r") as file_stream:
return json.load(file_stream)
def _get_continent(country, metadata):
country_code = country["id"]
for meta in metadata:
if meta["alpha-3"] == country_code:
return meta["region"]
return None
def _create_index(countries):
index = rtree.index.Index(interleaved=True)
for id, country in enumerate(countries):
index.insert(id, country["shape"].bounds)
return index | amhellmund/azure-function-country-query | common/countries.py | countries.py | py | 1,870 | python | en | code | 0 | github-code | 13 |
2167378760 | from typing import Optional, Dict, List
from genie_common.utils import safe_nested_get
from genie_datastores.postgres.models import Artist
from data_collectors.consts.google_consts import RESULTS, ADDRESS_COMPONENTS, TYPES, GEOMETRY, LOCATION, LATITUDE, \
LONGITUDE
from data_collectors.contract import ISerializer
from data_collectors.logic.models import DBUpdateRequest, AddressComponentSetting
class GoogleGeocodingResponseSerializer(ISerializer):
def serialize(self, artist_id: str, geocoding: dict) -> Optional[DBUpdateRequest]:
first_result = self._extract_geocoding_first_result(geocoding)
if first_result is not None:
update_values = self._serialize_address_components(first_result)
self._add_latitude_and_longitude_to_update_values(update_values, first_result)
return DBUpdateRequest(
id=artist_id,
values=update_values
)
@staticmethod
def _extract_geocoding_first_result(geocoding: dict) -> Optional[dict]:
results = geocoding.get(RESULTS, [])
if results and isinstance(results, list):
return results[0]
def _serialize_address_components(self, geocoding_result: dict) -> Dict[Artist, str]:
components = {}
raw_address_components = geocoding_result.get(ADDRESS_COMPONENTS, [])
for setting in self._address_components_settings:
serialized_component = self._serialize_single_address_component(setting, raw_address_components)
if serialized_component is not None:
components.update(serialized_component)
return components
@staticmethod
def _serialize_single_address_component(setting: AddressComponentSetting,
raw_address_components: List[dict]) -> Optional[Dict[Artist, str]]:
for component in raw_address_components:
raw_component_types = component.get(TYPES, [])
if setting.type in raw_component_types:
component_value = component.get(setting.extract_field)
return {setting.column: component_value}
def _add_latitude_and_longitude_to_update_values(self,
update_values: Dict[Artist, str],
geocoding_result: dict) -> None:
for column, field_name in self._lat_long_columns_mapping.items():
field_value = safe_nested_get(geocoding_result, [GEOMETRY, LOCATION, field_name], default=None)
update_values[column] = field_value
@property
def _address_components_settings(self) -> List[AddressComponentSetting]:
return [
AddressComponentSetting(
column=Artist.country,
type="country",
extract_field="short_name"
),
AddressComponentSetting(
column=Artist.state,
type="administrative_area_level_1",
extract_field="short_name"
),
AddressComponentSetting(
column=Artist.county,
type="administrative_area_level_2",
extract_field="long_name"
),
AddressComponentSetting(
column=Artist.city,
type="locality",
extract_field="long_name"
)
]
@property
def _lat_long_columns_mapping(self) -> Dict[Artist, str]:
return {
Artist.latitude: LATITUDE,
Artist.longitude: LONGITUDE
}
| nirgodin/radio-stations-data-collection | data_collectors/logic/serializers/google_geocoding_response_serializer.py | google_geocoding_response_serializer.py | py | 3,616 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.