seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4242214840 | from predict_utility import load_checkpoint,get_input_args,predict,process_image
from utility_module import label_mapping
import warnings
warnings.filterwarnings("ignore")
from prettytable import PrettyTable
x = PrettyTable()
args = get_input_args()
model = load_checkpoint(args.checkpoint)
top_ps,top_class = predict(image_path=args.path,model=model,topk=args.top_k)
print("\nprediction to the given image of the flower\n")
flower_to_name = label_mapping()
prob = [round(p,5) for p in top_ps]
top_class_name = [flower_to_name[c] for c in top_class]
x.add_column("flower name",top_class_name)
x.add_column("prediction probability", prob)
print(x)
| rkg-37/ImageClassifier | predict.py | predict.py | py | 654 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "prettytable.PrettyTable",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "predict_utility.get_input_args",
"line_number": 8,
"usage_type": "call"
},
{
"api_n... |
41996069005 | import logging
import platform
import sys
import json
from getpass import getpass
from pathlib import Path
from typing import Union, Dict, Tuple
from shapely.geometry import Polygon
import os
import couchdb
import xmltodict
from lxml import etree
from tqdm import tqdm
from geojson_rewind import rewind
logging.basicConfig(level="INFO")
log = logging.getLogger(__name__)
def getBbox(coordinateList):
"""
input: list of lon/lat coordinate pairs for CAP polygon as [[lat,lon],[lat,log],...]
output: two points defining the bounding box as [lon,lat,lon,lat]
"""
return list(Polygon(coordinateList).bounds)
class CapToDB:
def __init__(self):
self._xmlschema = Path("cap_v1.2_schema.xml").read_text()
self._xml_validator = None # set at first usage
def run(self, path: Union[str, Path],
db_warnings: couchdb.client.Database,
db_incidents: couchdb.client.Database):
"""Run conversion and upload to db for all xml files in path
Args:
path: Path to xml files to be stored in DB
db_warnings: CouchDB database to store warnings
db_incidents: CouchDB database to store incident (number / names)
"""
capdir = Path(path)
for fn in tqdm(list(capdir.glob("**/METfare*.xml"))):
dic = self.xml_to_dict(fn)
warning, incident = self.map_dict(dic)
try:
log.debug("\n\nSaving to warnings DB for fn: %s", fn)
log.debug("content %s", warning)
id = warning["_id"]
if id in db_warnings:
print(id, "**** already exists, deleting")
db_warnings.delete(db_warnings[id])
else:
try:
warning['source'] = f'lustre_archive,{fn.name}'
with open(f'{warning["_id"]}.geojson', 'w') as file:
file.write(json.dumps(warning))
file.close()
# fh = open(f'{warning["_id"]}.geojson', 'w')
# fh.write(warning)
# fh.close();
except Exception as e:
print(e)
db_warnings.save(warning)
log.debug("upload attachment")
db_warnings.put_attachment(warning, fn.read_bytes(),
fn.name)
except couchdb.http.ResourceConflict:
log.exception("Could not update for %s. See log.", fn.name)
pass
# store incident number & update name, if available
# if incident is None:
# log.debug("No incident info")
# continue
# saved_entry = db_incidents.get(incident["_id"])
# if saved_entry is None:
# log.debug("Creating incidents database")
# db_incidents.save(incident)
# elif "name" not in saved_entry and "name" in incident:
# log.debug("Updating incidents database")
# saved_entry.update(incident)
# db_incidents.save(saved_entry)
# else:
# log.debug("Entry in db_incident exists already. No changes.")
def xml_to_dict(self, fn: Union[Path, str]) -> Dict:
"""Convert xml to dictionary.
Args:
fn: Input filename
"""
string = Path(fn).read_text()
try:
self.validate(string)
except etree.XMLSyntaxError as e:
log.warning("fn: %s is not a valid xml: %s.", fn, e)
return xmltodict.parse(string)
def validate(self, string: str) -> None:
"""Validates xml string against schema.
Args:
string: String to be validated.
Raises:
lxml.etree.XMLSyntaxError: If string is not a valid according to
the provided schema
"""
if self._xml_validator is None:
log.debug("Attempt to process xml schema")
schema_root = etree.XML(self._xmlschema.encode())
schema = etree.XMLSchema(schema_root)
self._xml_validator = etree.XMLParser(schema=schema)
log.info("Processed xml schema")
etree.fromstring(string.encode(), self._xml_validator)
def map_dict(self, event: Dict) -> Tuple[Dict, Union[None, Dict]]:
"""Maps xml-dict to DB keys
Results:
warning: Information for warnings DB
incident: Information for incidents DB. None if no incident number.
"""
warning = {}
alert = event['alert']
info = self.single_lang_evt_from_cap(alert)
# Variable keys
# format: "partition:name"
# warning["_id"] = f'metfare:{alert["identifier"]}'
warning["_id"] = f'{alert["identifier"]}'
warning["saved_at"] = alert["sent"]
warning["transmitted_at"] = alert["sent"]
warning["onset"] = info["onset"]
warning["expires"] = info["expires"]
warning["phenomenon"] = info["eventCode"]["value"]
# Info may not exist
if "incidents" in alert:
warning["incident"] = alert["incidents"]
# Fixed keys:
warning["archived"] = True
warning["author"] = f"{os.path.basename(__file__)}@met.no"
# warning["author"] = f"{__file__}@{platform.node()}"
warning["transmission_state"] = "transmitted"
warning["source"] = "lustre_archive"
# new keys
warning["status"] = alert["status"]
if "references" in warning:
warning["references"] = alert["references"]
warning["certainty"] = info["certainty"]
warning["severity"] = info["severity"]
warning["msgType"] = alert["msgType"]
warning["altitude"] = info["area"]["altitude"]
warning["ceiling"] = info["area"]["ceiling"]
warning["areaDesc"] = {
"en": info["area"]["areaDesc"],
"nb": info["area"]["areaDesc"],
}
warning["type"] = "FeatureCollection"
orig_polygon = info["area"]["polygon"].split()
polygon = []
for coor in orig_polygon:
lon, lat = coor.split(",")
polygon.append((float(lat), float(lon)))
coordinates = [
polygon,
]
geometry = {
"type": "Polygon",
"coordinates": coordinates,
}
bbox = getBbox(coordinates[0])
feature = {
"geometry": geometry,
"type": "Feature",
"properties": {"customArea": False, "bbox": bbox},
}
feature = rewind(feature)
warning["features"] = [feature,]
# warning["color"]
# warning["ref_by"]
# keys that are not relevant:
# "transmitted_at", "drafted_at", "author"
# incident-info
incident = None
if "incidents" in alert:
incident = {}
incident["_id"] = warning["incident"].zfill(10)
for parameter in info["parameter"]:
if parameter["valueName"] == "incidentName":
incident["name"] = parameter["value"]
return warning, incident
def single_lang_evt_from_cap(self, evt: Dict, lang="no") -> Dict:
"""Gets `events` of one language from mutlilang-CAP file"""
evt_no = evt["info"][0]
if evt_no["language"].lower() != lang:
raise ValueError("CAPs XML file scheme must have changed")
for evt_other_lang in evt["info"][1:]:
if evt_other_lang["language"] == lang:
raise ValueError("CAPs XML file scheme must have changed")
return evt_no
def save_incident(self, event: Dict):
alert = event['alert']
info = self.single_lang_evt_from_cap(alert)
if __name__ == "__main__":
user = input("user:")
password = getpass("password:")
couch = couchdb.Server("http://%s:%s@127.0.0.1:5984/" % (user, password))
captodb = CapToDB()
path = "test_data" if len(sys.argv) == 1 else sys.argv[1]
# captodb.run(path, couch["archive_warnings"], couch["archive_incidents"])
# captodb.run(path, couch["junk-warnings"], couch["junk-incidents"])
captodb.run(path, couch["jjw"], couch["jji"])
# captodb.run(path, couch["warnings"], couch["incidents"])
| metno/weamyl-metcap | scripts/lustre_archive_importer.py | lustre_archive_importer.py | py | 8,431 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.Polygon",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pathli... |
9020012724 | from multiprocessing import Pool
import math
from functools import partial
import numpy as np
from pyfaidx import Fasta, Faidx
import subprocess
import pysam
from liftoff import aligned_seg, liftoff_utils
from os import path
def align_features_to_target(ref_chroms, target_chroms, args, feature_hierarchy, liftover_type, unmapped_features):
if args.subcommand == "polish":
sam_files = [args.dir + "/polish.sam"]
else:
target_fasta_dict = split_target_sequence(target_chroms, args.target, args.dir)
genome_size = get_genome_size(target_fasta_dict)
threads_per_alignment = max(1, math.floor(int(args.p) / len(ref_chroms)))
sam_files = []
pool = Pool(int(args.p))
print("aligning features")
func = partial(align_single_chroms, ref_chroms, target_chroms, threads_per_alignment, args, genome_size,
liftover_type)
for result in pool.imap_unordered(func, np.arange(0, len(target_chroms))):
sam_files.append(result)
pool.close()
pool.join()
return parse_all_sam_files(feature_hierarchy, unmapped_features, liftover_type, sam_files)
def split_target_sequence(target_chroms, target_fasta_name, inter_files):
Faidx(target_fasta_name)
target_fasta_dict = Fasta(target_fasta_name, key_function=lambda x: x.split()[0])
for chrm in target_chroms:
if chrm != target_fasta_name:
out = open(inter_files + "/" + chrm + ".fa", 'w')
out.write(">" + chrm + "\n" + str(target_fasta_dict[chrm]))
return target_fasta_dict
def get_genome_size(target_fasta_dict):
genome_size = 0
for value in target_fasta_dict.values():
genome_size += len(value)
return genome_size
def align_single_chroms(ref_chroms, target_chroms, threads, args, genome_size, liftover_type, index):
max_single_index_size = 4000000000
features_file, features_name = get_features_file(ref_chroms, args, liftover_type, index)
target_file, output_file = get_target_file_and_output_file(liftover_type, target_chroms, index, features_name, args)
threads_arg = str(threads)
minimap2_path = get_minimap_path(args)
target_prefix = get_target_prefix_name(target_chroms, index, args, liftover_type)
if genome_size > max_single_index_size:
split_prefix = args.dir + "/" + features_name + "_to_" + target_prefix + "_split"
command = [minimap2_path, '-o', output_file, target_file, features_file] + args.mm2_options.split(" ") + [
"--split-prefix", split_prefix, '-t', threads_arg]
subprocess.run(command)
else:
minimap2_index = build_minimap2_index(target_file, args, threads_arg, minimap2_path)
command = [minimap2_path, '-o', output_file, minimap2_index, features_file] + args.mm2_options.split(" ") + [
'-t', threads_arg]
subprocess.run(command)
return output_file
def get_features_file(ref_chroms, args, liftover_type, index):
if ref_chroms[index] == args.reference and (liftover_type == "chrm_by_chrm" or liftover_type == "copies"):
features_name = 'reference_all'
elif liftover_type == "unmapped":
features_name = "unmapped_to_expected_chrom"
elif liftover_type == "unplaced":
features_name = "unplaced"
else:
features_name = ref_chroms[index]
return args.dir + "/" + features_name + "_genes.fa", features_name
def get_target_file_and_output_file(liftover_type, target_chroms, index, features_name, args):
if liftover_type != "chrm_by_chrm" or target_chroms[0] == args.target:
target_file = args.target
out_file_target = "target_all"
else:
target_file = args.dir + "/" + target_chroms[index] + ".fa"
out_file_target = target_chroms[index]
output_file = args.dir + "/" + features_name + "_to_" + out_file_target + ".sam"
return target_file, output_file
def get_minimap_path(args):
if args.m is None:
minimap2 = "minimap2"
else:
minimap2 = args.m
return minimap2
def get_target_prefix_name(target_chroms, index, args, liftover_type):
if liftover_type != "chrm_by_chrm" or target_chroms[0] == args.target:
prefix = "target_all"
else:
prefix = target_chroms[index]
return prefix
def build_minimap2_index(target_file, args, threads, minimap2_path):
if path.exists(target_file + ".mmi") is False:
subprocess.run(
[minimap2_path, '-d', target_file + ".mmi", target_file] + args.mm2_options.split(" ") + ['-t',
threads ])
return target_file + ".mmi"
def parse_all_sam_files(feature_hierarchy, unmapped_features, liftover_type, sam_files):
aligned_segments_dict = {}
for file in sam_files:
aligned_segments = parse_alignment(file, feature_hierarchy, unmapped_features, liftover_type)
aligned_segments_dict.update(aligned_segments)
return aligned_segments_dict
def parse_alignment(file, feature_hierarchy, unmapped_features, search_type):
all_aligned_blocks = {}
sam_file = pysam.AlignmentFile(file, 'r', check_sq=False, check_header=False)
sam_file_iter = sam_file.fetch()
aln_id = 0
name_dict = {}
align_count_dict = {}
for ref_seq in sam_file_iter:
if ref_seq.is_unmapped is False:
aln_id = add_alignment(ref_seq, align_count_dict, search_type, name_dict,aln_id, feature_hierarchy,
all_aligned_blocks)
else:
unmapped_features.append(feature_hierarchy.parents[ref_seq.query_name])
remove_alignments_without_children(all_aligned_blocks, unmapped_features, feature_hierarchy)
return all_aligned_blocks
def add_alignment(ref_seq, align_count_dict, search_type, name_dict, aln_id, feature_hierarchy,
all_aligned_blocks):
ref_seq.query_name = edit_name(search_type, ref_seq, name_dict)
aln_id += 1
if ref_seq.query_name in align_count_dict:
align_count = align_count_dict[ref_seq.query_name] + 1
else:
align_count = 0
align_count_dict[ref_seq.query_name] = align_count
aligned_blocks = get_aligned_blocks(ref_seq, aln_id, feature_hierarchy, search_type)
if ref_seq.query_name in all_aligned_blocks:
all_aligned_blocks[ref_seq.query_name].extend(aligned_blocks)
else:
all_aligned_blocks[ref_seq.query_name] = aligned_blocks
return aln_id
def edit_name(search_type, ref_seq, name_dict):
if search_type != "copies":
return ref_seq.query_name + "_0"
else:
if ref_seq.query_name not in name_dict:
name_dict[ref_seq.query_name] = 0
name_dict[ref_seq.query_name] += 1
return ref_seq.query_name + "_" + str(name_dict[ref_seq.query_name])
def get_aligned_blocks(alignment, aln_id, feature_hierarchy, search_type):
cigar_operations = get_cigar_operations()
cigar = alignment.cigar
parent = feature_hierarchy.parents[liftoff_utils.convert_id_to_original(alignment.query_name)]
query_start, query_end = get_query_start_and_end(alignment, cigar, cigar_operations)
children = feature_hierarchy.children[liftoff_utils.convert_id_to_original(alignment.query_name)]
end_to_end = is_end_to_end_alignment(parent, query_start, query_end)
if search_type == "copies" and end_to_end is False:
return []
reference_block_start, reference_block_pos = alignment.reference_start, alignment.reference_start
query_block_start, query_block_pos = query_start, query_start
new_blocks, mismatches = [], []
merged_children_coords = liftoff_utils.merge_children_intervals(children)
for operation, length in cigar:
if base_is_aligned(operation, cigar_operations):
query_block_pos, reference_block_pos = add_aligned_base(operation, query_block_pos, reference_block_pos,
length, cigar_operations, mismatches)
if query_block_pos == query_end:
add_block(query_block_pos, reference_block_pos, aln_id, alignment, query_block_start,
reference_block_start, mismatches, new_blocks, merged_children_coords, parent)
break
elif is_alignment_gap(operation, cigar_operations):
add_block(query_block_pos, reference_block_pos, aln_id, alignment, query_block_start, reference_block_start,
mismatches, new_blocks, merged_children_coords, parent)
mismatches, query_block_start, reference_block_start, query_block_pos, reference_block_pos = \
end_block_at_gap(
operation, query_block_pos, reference_block_pos, length, cigar_operations)
return new_blocks
def get_cigar_operations():
return {"insertion": 1, "deletion": 2, "hard_clip": 5, "match": 7, "mismatch": 8}
def get_query_start_and_end(alignment, cigar, cigar_operations):
query_start = alignment.query_alignment_start
query_end = alignment.query_alignment_end
if cigar[0][0] == cigar_operations["hard_clip"]:
query_start += cigar[0][1]
query_end += cigar[0][1]
return query_start, query_end
def is_end_to_end_alignment(parent, query_start, query_end):
return parent.end - parent.start + 1 == query_end - query_start
def base_is_aligned(operation, cigar_operations):
return operation == cigar_operations["match"] or operation == cigar_operations["mismatch"]
def add_aligned_base(operation, query_block_pos, reference_block_pos, length, cigar_operations, mismatches):
if operation == cigar_operations["mismatch"]:
for i in range(query_block_pos, query_block_pos + length):
mismatches.append(i)
query_block_pos, reference_block_pos = adjust_position(operation, query_block_pos, reference_block_pos,
length, cigar_operations)
return query_block_pos, reference_block_pos
def adjust_position(operation, query_block_pos, reference_block_pos, length, cigar_operations):
if operation == cigar_operations["match"] or operation == cigar_operations["mismatch"] or operation == \
cigar_operations["insertion"]:
query_block_pos += length
if operation == cigar_operations["match"] or operation == cigar_operations["mismatch"] or operation == \
cigar_operations["deletion"]:
reference_block_pos += length
return query_block_pos, reference_block_pos
def add_block(query_block_pos, reference_block_pos, aln_id, alignment, query_block_start, reference_block_start,
mismatches, new_blocks, merged_children_coords, parent):
query_block_end = query_block_pos - 1
reference_block_end = reference_block_pos - 1
new_block = aligned_seg.aligned_seg(aln_id, alignment.query_name, alignment.reference_name, query_block_start,
query_block_end,
reference_block_start, reference_block_end, alignment.is_reverse,
np.array(mismatches).astype(int))
overlapping_children = find_overlapping_children(new_block, merged_children_coords, parent)
if overlapping_children != []:
new_blocks.append(new_block)
def find_overlapping_children(aln, children_coords, parent):
overlapping_children = []
for child_interval in children_coords:
relative_start = liftoff_utils.get_relative_child_coord(parent, child_interval[0], aln.is_reverse)
relative_end = liftoff_utils.get_relative_child_coord(parent, child_interval[1], aln.is_reverse)
child_start, child_end = min(relative_start, relative_end), max(relative_start, relative_end)
overlap = liftoff_utils.count_overlap(child_start, child_end, aln.query_block_start, aln.query_block_end)
if overlap > 0:
overlapping_children.append(child_start)
overlapping_children.append(child_end)
return overlapping_children
def is_alignment_gap(operation, cigar_operations):
return operation == cigar_operations["insertion"] or operation == cigar_operations["deletion"]
def end_block_at_gap(operation, query_block_pos, reference_block_pos, length, cigar_operations):
mismatches = []
query_block_pos, reference_block_pos = adjust_position(operation, query_block_pos, reference_block_pos,
length, cigar_operations)
query_block_start = query_block_pos
reference_block_start = reference_block_pos
return mismatches, query_block_start, reference_block_start, query_block_pos, reference_block_pos
def remove_alignments_without_children(all_aligned_blocks, unmapped_features, feature_hierarchy):
features_to_remove = []
for seq in all_aligned_blocks:
if all_aligned_blocks[seq] == []:
features_to_remove.append(seq)
unmapped_features.append(feature_hierarchy.parents[liftoff_utils.convert_id_to_original(seq)])
for feature in features_to_remove:
del all_aligned_blocks[feature]
return all_aligned_blocks
| agshumate/Liftoff | liftoff/align_features.py | align_features.py | py | 13,119 | python | en | code | 360 | github-code | 6 | [
{
"api_name": "math.floor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "multiprocessing.Pool",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "functools.partial",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
... |
14894176437 | import asyncio
import os
import os.path
from flask import Flask, request, send_from_directory, flash, request
from werkzeug.utils import secure_filename
import getpass
import platform
from flask_cors import CORS
from scripts.datascript import Datascript
from scripts.CalcoloScostamentiSenzaIntermedi import ScostamentiSenzaIntermedi
# This file contains all the apis required to upload the
# This file contains all the apis required to upload the datasets and to get the graph images that will be displayed in the flutter frontend
app = Flask(__name__)
# CORS is required to allow other domains to access files and images on the webpage
CORS(app)
# Allowed dataset file extensions
ALLOWED_EXTENSIONS = {'csv', 'xlsx'}
# PATHS
# Raw uploaded datasets folder
# Output Graphs folder
username = getpass.getuser()
if platform.system() == "Windows":
UPLOAD_FOLDER = r"C:\SCGProject\Datasets\RawDatasets"
DATASET_FOLDER = r"C:\SCGProject\Datasets\CsvForGraphing"
if platform.system() == "Darwin" :
if(username == "marcovinciguerra"):
UPLOAD_FOLDER = "/Users/marcovinciguerra/Github/SCGProject/Datasets/RawDatasets"
DATASET_FOLDER = "/Users/marcovinciguerra/Github/SCGProject/Datasets/CsvForGraphing"
elif(username == "davidguzman"):
UPLOAD_FOLDER = "/Users/davidguzman/documents/Github/SCGProject/Datasets/RawDatasets"
DATASET_FOLDER = "/Users/davidguzman/documents/Github/SCGProject/Datasets/CsvForGraphing"
#Controllo che il file caricato abbia il formato corretto
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# Api per caricare il raw dataset
@app.route('/uploadDataset', methods = ['GET', 'POST'])
def uploadDataset():
if request.method == 'POST':
# Controllo se e presente un file nella POST request
if 'file' not in request.files:
print("no file selezionato")
flash('No file part')
return "KO"
# Predo i file selezionati dall'utente
files = request.files.getlist('file')
print(files)
# Itero i file selezionati e li carico nel filesystem uno ad uno
for file in files:
# Controllo che l'utente abbia selezionato almeno un file da caricare
if file.filename == '':
print('no file selezionato')
return "KO"
# Controllo che il formato del file sia valido
if file and allowed_file(file.filename):
# Salvo il file nel file system
file.save(os.path.join(UPLOAD_FOLDER, file.filename))
return "OK"
# Get csv graphs from folder
@app.route('/get-csvgraph/<filename>')
def get_csv_graph(filename):
return send_from_directory(DATASET_FOLDER, filename)
# Get test data from python scripts. It awaits data from the script
@app.route('/get-scriptdata')
async def get_script_data():
return await ScostamentiSenzaIntermedi.getData()
# Format Datasets Script
@app.route('/format-datasets')
async def fix_dataset():
return await FixDatas.runFixDatas()
if __name__ == "__main__":
app.run()
| VinciGit00/SCGProject | Frontend/flask_code/app.py | app.py | py | 3,160 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "getpass.getuser",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "platform.system",
"li... |
26039884706 | from __future__ import annotations
from dataclasses import dataclass
from pants.backend.go.subsystems.golang import GolangSubsystem
from pants.core.util_rules.system_binaries import (
BinaryPath,
BinaryPathRequest,
BinaryPaths,
BinaryPathTest,
)
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
@dataclass(frozen=True)
class CGoBinaryPathRequest(EngineAwareParameter):
binary_name: str
binary_path_test: BinaryPathTest | None
def debug_hint(self) -> str | None:
return self.binary_name
@rule
async def find_cgo_binary_path(
request: CGoBinaryPathRequest, golang_env_aware: GolangSubsystem.EnvironmentAware
) -> BinaryPath:
path_request = BinaryPathRequest(
binary_name=request.binary_name,
search_path=golang_env_aware.cgo_tool_search_paths,
test=request.binary_path_test,
)
paths = await Get(BinaryPaths, BinaryPathRequest, path_request)
first_path = paths.first_path_or_raise(
path_request, rationale=f"find the `{request.binary_name}` tool required by CGo"
)
return first_path
def rules():
return collect_rules()
| pantsbuild/pants | src/python/pants/backend/go/util_rules/cgo_binaries.py | cgo_binaries.py | py | 1,235 | python | en | code | 2,896 | github-code | 6 | [
{
"api_name": "pants.engine.engine_aware.EngineAwareParameter",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pants.core.util_rules.system_binaries.BinaryPathTest",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 17... |
26485545082 | ######## Tensorflow Imaage Classifier #########
#
# Author: Erik Handeland Date: 12/12/2021
# Description: This program uses a TensorFlow Lite object detection model-metadata to
# perform object detection on an image. It creates a dict containing a
# list of detected objects and the count for each object. It also save a copy
# of the image with draws boxes and scores around the objects of interest for each image.
#
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
# Add the following github repo by Evan Juras:
# https://github.com/EdjeElectronics/TensorFlow-Lite-Object-Detection-on-Android-and-Raspberry-Pi
#
# Import packages
import os
from os.path import exists
import cv2
import numpy as np
import importlib.util
from tflite_support import metadata
from from_root import from_root
# Import TensorFlow libraries
# If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow
# If using Coral Edge TPU, import the load_delegate library
pkg = importlib.util.find_spec('tflite_runtime')
if pkg:
from tflite_runtime.interpreter import Interpreter
else:
from tensorflow.lite.python.interpreter import Interpreter
# Extract metadata from the .tflite file
def load_metadata_labels(PATH_TO_MODEL):
label_list = []
try:
displayer = metadata.MetadataDisplayer.with_model_file(PATH_TO_MODEL)
file_name = displayer.get_packed_associated_file_list()[0]
except ValueError:
# The model-metadata does not have metadata.
return label_list
if file_name:
label_map_file = displayer.get_associated_file_buffer(file_name).decode()
label_list = list(filter(len, label_map_file.splitlines()))
return label_list
def load_labels(PATH_TO_GRAPH, PATH_TO_LABELS):
# Load label list from metadata or from labelmap file
label_list = load_metadata_labels(PATH_TO_GRAPH)
if not label_list: # DEPRECATED this is the old way of loading labels, new ML models should have it as metadata
if not exists(PATH_TO_LABELS):
print("No labelmap in metadata and no labelmap.txt found! at path: " + PATH_TO_LABELS)
return {
"error": "No labelmap found",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": 0.50,
"objects": [],
}
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
label_list = [line.strip() for line in f.readlines()]
return label_list
# MODEL_NAME: should be the name of a directory in the models directory
# IMG_PATH: should be the path full path to your target image
# COORDS: Whether to return coordinates of detected objects
# MIN_CONF_LEVEL: is the minimum confidence level to be considered a detection 0-1
# PATH_TO_GRAPH & LABELMAP_NAME: Name of the .tflite file and the labelmap file. Defaults should work for most cases
# SAVED_IMG_PATH: Directory to save the image with boxes and scores. If not specified, no image will be saved
def objDetection(MODEL_NAME: str, IMG_PATH: str, MIN_CONF_LEVEL=0.50,
GRAPH_NAME="detect.tflite", LABELMAP_NAME="labelmap.txt", SAVED_IMG_PATH="", COORDS=False):
objects = []
# Get path to project root
CWD_PATH = str(from_root())
# Path to .tflite file, which contains the model-metadata that is used for object detection
try: # running from pip install - pip install has different path structure that source
PATH_TO_MODEL = os.path.join(CWD_PATH, "models", MODEL_NAME)
PATH_TO_GRAPH = os.path.join(PATH_TO_MODEL, GRAPH_NAME)
PATH_TO_LABELS = os.path.join(PATH_TO_MODEL, LABELMAP_NAME)
if not exists(PATH_TO_GRAPH):
raise FileNotFoundError
except FileNotFoundError: # running from source
PATH_TO_MODEL = os.path.join(CWD_PATH, "obj_detection", "models", MODEL_NAME)
PATH_TO_GRAPH = os.path.join(PATH_TO_MODEL, GRAPH_NAME)
PATH_TO_LABELS = os.path.join(PATH_TO_MODEL, LABELMAP_NAME)
if not exists(PATH_TO_GRAPH):
print("detect.tflite not found! at path: " + PATH_TO_GRAPH)
return {
"error": "Invalid model-metadata path",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
# Load label list from metadata or from labelmap file
labels = load_labels(PATH_TO_GRAPH, PATH_TO_LABELS)
# Load the Tensorflow Lite model-metadata.
interpreter = Interpreter(model_path=PATH_TO_GRAPH)
interpreter.allocate_tensors()
# Get model-metadata details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Load image and resize to expected shape [1xHxWx3]
image = cv2.imread(IMG_PATH)
if image is None:
print("Image not found, check path ", IMG_PATH)
return {
"error": "Image not found, check path",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
imH, imW, _ = image.shape
image_resized = cv2.resize(image_rgb, (width, height))
input_data = np.expand_dims(image_resized, axis=0)
# Normalize pixel values if using a floating model-metadata (i.e. if model-metadata is non-quantized)
if floating_model:
input_data = (np.float32(input_data) - input_mean) / input_std
# Perform the actual detection by running the model-metadata with the image as input
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
# Retrieve detection results
try:
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
except:
return {
"error": "Invalid model-metadata output details, probably using model-metadata for JS or Dart",
"vehicles": -1,
"pedestrians": -1,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > MIN_CONF_LEVEL) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box Interpreter can
# return coordinates that are outside of image dimensions,
# need to force them to be within image using max() and min()
ymin = int(max(1, (boxes[i][0] * imH)))
xmin = int(max(1, (boxes[i][1] * imW)))
ymax = int(min(imH, (boxes[i][2] * imH)))
xmax = int(min(imW, (boxes[i][3] * imW)))
# Corners of the bounding box
tr = (xmax, ymax) # Top right
bl = (xmin, ymin) # Bottom left
br = (xmax, ymin)
tl = (xmin, ymax)
# Draw detection box on image
cv2.rectangle(image, bl, tr, (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
object_score = int(scores[i] * 100)
label = '%s: %d%%' % (object_name, object_score) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(image, (xmin, label_ymin - labelSize[1] - 10),
(xmin + labelSize[0], label_ymin + baseLine - 10),
(255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(image, label, (xmin, label_ymin - 7),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Add object to objects list
obj = {
"name": object_name,
"confidence": scores[i],
"coord": {"top-left": tl, "top-right": tr, "bottom-right": br, "bottom-left": bl} if COORDS else {},
}
objects.append(obj)
# count vehicles and pedestrians
cars = 0
people = 0
for obj in objects:
if obj["name"] == "car" or obj["name"] == "truck":
cars += 1
elif obj["name"] == "person":
people += 1
if SAVED_IMG_PATH:
_, tail = os.path.split(IMG_PATH)
SAVED_IMG_PATH = os.path.join(SAVED_IMG_PATH, tail[:-4] + "_box.jpg")
cv2.imwrite(SAVED_IMG_PATH, image)
return {
"error": "",
"vehicles": cars,
"pedestrians": people,
"confidence-threshold": MIN_CONF_LEVEL,
"objects": objects,
}
# Sample function for detecting if object is in a certain area, useful if some parking lots have handicapped or
# oversize parking spaces
# if inArea([tr, tl, br, bl], (100, 400), (800, 600)):
# print("Object detected in area")
def inArea(points, box_start, box_end):
for point in points:
if (box_start[0] < point[0] < box_end[0] and
box_start[1] < point[1] < box_end[1]):
return True
return False
| KB4YG/ml | obj_detection/obj_detection.py | obj_detection.py | py | 9,890 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "importlib.util.util.find_spec",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "importlib.util.util",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "importlib.util",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": ... |
3539820868 | """
Name : portfolio_optimizer.py
Author : Yinsen Miao
Contact : yinsenm@gmail.com
Time : 7/21/2021
Desc: Solve mean-variance optimization
"""
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from gerber import gerber_cov_stat1, gerber_cov_stat2
from ledoit import ledoit
def set_eps_wgt_to_zeros(in_array, eps=1e-4):
# set small weights to 0 and return a list
out_array = np.array(in_array)
out_array[np.abs(in_array) < eps] = 0
out_array = np.array(out_array) / np.sum(out_array)
return out_array
class portfolio_optimizer:
def __init__(self, min_weight: float = 0., max_weight: float = 1.0,
cov_function: str = "HC",
freq: str = "monthly",
gs_threshold: float = 0.5):
"""
:param min_weight:
:param max_weight:
:param cov_function: can be one of the HC (historical covariance matrix), GS1 (Gerber Stat1), GS2 (Gerber Stat2)
:param freq: frequency of the returns series either daily or monthly
:param gs_threshold: threshold of Gerber statistics between 0 and 1
"""
# check arguments
assert cov_function in ['HC', 'GS1', 'GS2', 'SM'], "The covariance function must be one from HC, SM, GS1, and GS2"
assert freq in ['daily', 'monthly'], "The return series can only be either daily or monthly"
assert 1 > min_weight >= 0, "The minimal weight shall be in [0, 1)"
assert 1 >= max_weight > 0, "The maximum weight shall be in (0, 1]"
assert 1 >= gs_threshold > 0, "The Gerber shrinkage threshold shall be in (0, 1]"
self.min_weight = min_weight
self.max_weight = max_weight
self.factor = 252 if freq == "daily" else 12 # annual converter
self.cov_function = cov_function # covariance function can be one of HC, GS1, GS2
self.freq = freq # freq of return series can be either daily or monthly
self.init_weights = None # initial portfolio weights
self.covariance = None
self.returns_df = None
self.negative_returns_df = None
self.covariance_neg = None # covariance matrix of only negative returns for sortino ratio
self.obj_function = None
self.by_risk = None
self.gs_threshold = gs_threshold
def set_returns(self, returns_df: pd.DataFrame):
"""
pass the return series to the class
:param returns_df: pd.DataFrame of historical daily or monthly returns
"""
self.returns_df = returns_df.copy(deep=True)
self.negative_returns_df = returns_df[returns_df < 0].fillna(0) # keep only the negative returns
def optimize(self, obj_function: str,
target_std: float = None,
target_return: float = None,
prev_weights: np.array = None,
init_weights: np.array = None,
cost: float = None) -> np.array:
"""
Perform portfolio optimization given a series of returns
:param obj_function:
:param target_std: targeted annaulized portfolio standard deviation (std)
:param target_return: targeted annaulized portfolio return deviation
:param prev_weights: previous weights
:param prices: current price level when we rebalance our portfolio
:param cost: cost of transaction fee and slippage in bps or 0.01%
:return: an array of portfolio weights p x 1
"""
n, p = self.returns_df.shape # n is number of observations, p is number of assets
if init_weights is None:
self.init_weights = np.array(p * [1. / p]) # initialize weights: equal weighting
else:
self.init_weights = init_weights # otherwise use the nearby weights as hot start for MVO
self.obj_function = obj_function
# get covariance matrix
if self.cov_function == "HC":
self.covariance = self.returns_df.cov().to_numpy() # convert to numpy
self.covariance_neg = self.negative_returns_df.cov().to_numpy() # convert to numpy
elif self.cov_function == "SM":
self.covariance, _ = ledoit(self.returns_df.values)
self.covariance_neg, _ = ledoit(self.negative_returns_df.values)
elif self.cov_function == "GS1":
self.covariance, _ = gerber_cov_stat1(self.returns_df.values, threshold=self.gs_threshold)
self.covariance_neg, _ = gerber_cov_stat1(self.negative_returns_df.values, threshold=self.gs_threshold)
elif self.cov_function == "GS2":
self.covariance, _ = gerber_cov_stat2(self.returns_df.values, threshold=self.gs_threshold)
self.covariance_neg, _ = gerber_cov_stat2(self.negative_returns_df.values, threshold=self.gs_threshold)
# set objective function
if obj_function == "equalWeighting":
self.init_weights = np.array(p * [1. / p]) # initialize weights: equal weighting
return self.init_weights
# set the bounds of each asset holding from 0 to 1
bounds = tuple((self.min_weight, self.max_weight) for k in range(p))
constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1.0}] # fully invest
if obj_function == 'meanVariance':
if target_std is not None:
self.by_risk = True
# optimize under risk constraint
constraints.append({'type': 'eq', 'fun': lambda weights: \
self.calc_annualized_portfolio_std(weights) - target_std})
else:
# optimize under return constraint
self.by_risk = False
constraints.append({'type': 'eq', 'fun': lambda weights: \
self.calc_annualized_portfolio_return(weights) - target_return})
if prev_weights is not None and cost is not None:
# cost function with transaction fee
cost_fun = lambda weights: self.object_function(weights) +\
np.abs(weights - prev_weights).sum() * cost / 10000.
else:
# cost function without any transaction fee
cost_fun = lambda weights: self.object_function(weights)
# trust-constr, SLSQP, L-BFGS-B
try:
opt = minimize(cost_fun, x0=self.init_weights, bounds=bounds, constraints=constraints, method="SLSQP")
except:
# if SLSQP fails then switch to trust-constr
opt = minimize(cost_fun, x0=self.init_weights, bounds=bounds, constraints=constraints, method="trust-constr")
return set_eps_wgt_to_zeros(opt['x']) # pull small values to zeros
def object_function(self, weights: np.array) -> float:
"""
:param weights: current weights to be optimized
"""
if self.obj_function == "maxReturn":
f = self.calc_annualized_portfolio_return(weights)
return -f
elif self.obj_function == "minVariance":
f = self.calc_annualized_portfolio_std(weights)
return f
elif self.obj_function == "meanVariance" and self.by_risk:
f = self.calc_annualized_portfolio_return(weights) # maximize target return level
return -f
elif self.obj_function == "meanVariance" and not self.by_risk:
f = self.calc_annualized_portfolio_std(weights) # minimize target risk or std level
return f
elif self.obj_function == "maxSharpe":
f = self.calc_annualized_portfolio_sharpe_ratio(weights)
return -f
elif self.obj_function == "maxSortino":
f = self.calc_annualized_sortino_ratio(weights)
return -f
elif self.obj_function == 'riskParity':
f = self.calc_risk_parity_func(weights)
return f
else:
raise ValueError("Object function shall be one of the equalWeighting, maxReturn, minVariance, " +
"meanVariance, maxSharpe, maxSortino or riskParity")
def calc_annualized_portfolio_return(self, weights: np.array) -> float:
# calculate the annualized standard returns
annualized_portfolio_return = float(np.sum(self.returns_df.mean() * self.factor * weights))
#float(np.sum(((1 + self.returns_df.mean()) ** self.factor - 1) * weights))
return annualized_portfolio_return
def calc_annualized_portfolio_std(self, weights: np.array) -> float:
if self.obj_function == "equalWeighting":
# if equal weight then set the off diagonal of covariance matrix to zero
annualized_portfolio_std = np.sqrt(np.dot(weights.T, np.dot(np.diag(self.covariance.diagonal()) * self.factor, weights)))
else:
temp = np.dot(weights.T, np.dot(self.covariance * self.factor, weights))
if temp <= 0:
temp = 1e-20 # set std to a tiny number
annualized_portfolio_std = np.sqrt(temp)
if annualized_portfolio_std <= 0:
raise ValueError('annualized_portfolio_std cannot be zero. Weights: {weights}')
return annualized_portfolio_std
def calc_annualized_portfolio_neg_std(self, weights: np.array) -> float:
if self.obj_function == "equalWeighting":
# if equal weight then set the off diagonal of covariance matrix to zero
annualized_portfolio_neg_std = np.sqrt(np.dot(weights.T, np.dot(np.diag(self.covariance_neg.diagonal()) * self.factor, weights)))
else:
annualized_portfolio_neg_std = np.sqrt(np.dot(weights.T, np.dot(self.covariance_neg * self.factor, weights)))
if annualized_portfolio_neg_std == 0:
raise ValueError('annualized_portfolio_std cannot be zero. Weights: {weights}')
return annualized_portfolio_neg_std
def calc_annualized_portfolio_moments(self, weights: np.array) -> tuple:
# calculate the annualized portfolio returns as well as its standard deviation
return self.calc_annualized_portfolio_return(weights), self.calc_annualized_portfolio_std(weights)
def calc_annualized_portfolio_sharpe_ratio(self, weights: np.array) -> float:
# calculate the annualized Sharpe Ratio
return self.calc_annualized_portfolio_return(weights) / self.calc_annualized_portfolio_std(weights)
def calc_annualized_sortino_ratio(self, weights: np.array) -> float:
# calculate the annualized Sortino Ratio
return self.calc_annualized_portfolio_return(weights) / self.calc_annualized_portfolio_neg_std(weights)
def calc_risk_parity_func(self, weights):
# Spinu formulation of risk parity portfolio
assets_risk_budget = self.init_weights
portfolio_volatility = self.calc_annualized_portfolio_std(weights)
x = weights / portfolio_volatility
risk_parity = (np.dot(x.T, np.dot(self.covariance * self.factor, x)) / 2.) - np.dot(assets_risk_budget.T, np.log(x + 1e-10))
return risk_parity
def calc_relative_risk_contributions(self, weights):
# calculate the relative risk contributions for each asset given returns and weights
rrc = weights * np.dot(weights.T, self.covariance) / np.dot(weights.T, np.dot(self.covariance, weights))
return rrc
# unitest the code
if __name__ == "__main__":
bgn_date = "2016-01-01"
end_date = "2020-01-01"
file_path = "../data/prcs.csv"
rets_df = pd.read_csv(file_path, parse_dates=['Date'], index_col=["Date"]).pct_change()[bgn_date: end_date]
rets = rets_df.values
# test objective function list
obj_function_list = ['equalWeighting', 'minVariance', 'maxReturn', 'maxSharpe', 'maxSortino', 'riskParity']
cov_function_list = ["HC", "SM", "GS1", "GS2"]
for cov_fun in cov_function_list:
print("MVO based on %s covariance function ..." % cov_fun)
port_opt = portfolio_optimizer(min_weight=0, max_weight=1, cov_function=cov_fun, freq="monthly")
port_opt.set_returns(returns_df=rets_df)
# run MVO under various optimization goals
for obj_fun_str in obj_function_list:
weights = port_opt.optimize(obj_fun_str)
ret, std = port_opt.calc_annualized_portfolio_moments(weights=weights)
sharpe = port_opt.calc_annualized_portfolio_sharpe_ratio(weights=weights)
sortino = port_opt.calc_annualized_sortino_ratio(weights=weights)
print("%20s: ret %.3f, std %.3f, Sharpe %.3f, Sortino %.3f" % (obj_fun_str, ret, std, sharpe, sortino))
obj_fun_str = "meanVariance"
# optimize for target std levels
target_stds = [3, 6, 9, 12, 15]
for target_std in target_stds:
weights = port_opt.optimize(obj_fun_str, target_std / 100.)
# print(weights)
ret, std = port_opt.calc_annualized_portfolio_moments(weights=weights)
sharpe = port_opt.calc_annualized_portfolio_sharpe_ratio(weights=weights)
sortino = port_opt.calc_annualized_sortino_ratio(weights=weights)
print("%20s (%02d%%): ret %.3f, std %.3f, Sharpe %.3f, Sortino %.3f" % (
obj_fun_str, target_std, ret, std, sharpe, sortino))
| yinsenm/gerber | src/portfolio_optimizer.py | portfolio_optimizer.py | py | 13,193 | python | en | code | 49 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 20,
... |
70064084669 | from django.conf.urls import url
from tests import views, exceptions
urlpatterns = [
url(r'^snippets/$', views.SnippetList.as_view(), name='snippet-list'),
url(r'^snippets2/$', views.SnippetList.as_view(), name='snippet2-list'),
url(r'^snippet/(?P<pk>\d+)/$', views.SnippetDetail.as_view(),
name='snippet-detail'),
url(r'^server_error/$', exceptions.server_error, name='server-error'),
url(r'^not_found/$', exceptions.not_found, name='not-found'),
url(r'^method_not_allowed/$', exceptions.method_not_allowed,
name='not-allowed'),
url(r'^not_authenticated/$', exceptions.not_authenticated,
name='not-authenticated'),
]
| FutureMind/drf-friendly-errors | tests/urls.py | urls.py | py | 673 | python | en | code | 129 | github-code | 6 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tests.views.SnippetList.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tests.views.SnippetList",
"line_number": 6,
"usage_type": "attribute"
},
{
"ap... |
17241327801 | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import requests
from bs4 import BeautifulSoup
import time
BASE_URL = 'https://www.nowcoder.com'
driver = webdriver.Chrome(executable_path="P:/selenium/chromedriver.exe")
driver.get('https://www.nowcoder.com/discuss/experience?tagId=2656') # 对应的页面
# 等待ajax内容加载
wait = WebDriverWait(driver, 10)
wait.until(
EC.presence_of_element_located((By.CLASS_NAME, "js-nc-wrap-link"))
)
def scrollPage(timeout, times):
for i in range(times):
print('next')
# 向下刷新一次内容
driver.execute_script("window.scrollTo(0,Math.max(document.documentElement.scrollHeight,document.body.scrollHeight,document.documentElement.clientHeight));")
time.sleep(timeout)
# 以3秒的间隔向下刷新5次内容
scrollPage(3, 5)
# 带有 class='js-nc-wrap-link' 属性的标签都是面经的链接
items = driver.find_elements_by_class_name('js-nc-wrap-link')
with open('content.txt', 'w', encoding='utf-8') as f:
# 逐个读取每一个链接中的文本,并写到文件里面
for item in items:
print(item.get_attribute('data-href'))
response = requests.get(BASE_URL + item.get_attribute('data-href'))
data = response.text
soup = BeautifulSoup(data, 'html.parser')
words = soup.find('div', {'class': 'post-topic-des nc-post-content'})
f.write(words.get_text())
| Chunar5354/interview_note | experience/spider.py | spider.py | py | 1,566 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.support.ui.WebDriverWait",
"line_number": 15,
"usage_type": "call"
},
... |
74050593789 | from urllib.request import Request, urlopen
from bs4 import BeautifulSoup
import sqlite3
from time import sleep
imgchrList = ['n','a','e','m','i','g','h','v']
DiffList = ['nov','adv','exh','mxm','inf','grv','hvn','vvd']
conn = sqlite3.connect("SDVXRanking.db")
cur = conn.cursor()
for tid in range(164,1412):
print('Loading...' + str(tid))
sql = "insert into TrackList (TrackID) VALUES (?);"
cur.execute(sql,(str(tid+1000),))
for i in range(0,8):
req = Request('http://anzuinfo.me/trackData.html?trackID='+str(tid).zfill(4)+imgchrList[i])
res = urlopen(req)
html = res.read().decode('utf8')
bs = BeautifulSoup(html, 'html.parser')
TrackData = bs.findAll('table', attrs={'class': 'trackData'})
for tracks in TrackData:
findlv = 'lv '+DiffList[i]
TrackLevel = tracks.find('div', attrs={'class': findlv})
if TrackLevel is None:
continue
TrackLevel = TrackLevel.text
TrackDifficulty = DiffList[i].upper()
TrackTitle = tracks.find('td', attrs={'class': 'title'}).text
sql = "update TrackList SET TrackTitle = :Title, "+TrackDifficulty+" = :Lv where TrackID = :ID;"
cur.execute(sql,{'Title': TrackTitle, 'Lv': TrackLevel, 'ID':str(tid+1000)})
conn.commit()
sleep(0.02)
conn.close() | limjungho/SDVXRanking | ParsingTrackList.py | ParsingTrackList.py | py | 1,365 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bs4.Beau... |
37283667870 | # Importing modules
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import joblib
import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark.ml import PipelineModel
from pyspark.sql.functions import *
# Configure spark session
spark = SparkSession\
.builder\
.master('local[2]')\
.appName('quake_etl')\
.config('spark.jars.package', 'org.mongodb.spark:mongo-spark-connector 2.12:2.4.1')\
.getOrCreate()
from bokeh.io import output_notebook, output_file
from bokeh.plotting import figure, show, ColumnDataSource
from bokeh.models.tools import HoverTool
import math
from math import pi
from bokeh.palettes import Category20c
from bokeh.transform import cumsum
from bokeh.tile_providers import CARTODBPOSITRON, get_provider, Vendors
from bokeh.themes import built_in_themes
from bokeh.io import curdoc
import warnings
warnings.filterwarnings('ignore')
from pyspark.sql.functions import desc
df=joblib.load('./joblibs/df.joblib')
data=spark.createDataFrame(df)
df_quake_freq=joblib.load('./visualization_files/df_quake_freq.joblib')
df_pred=joblib.load('./visualization_files/rffpred.joblib')
def svm(a):
clf=joblib.load('./joblibs/svmModel.joblib')
a=np.array([a])
y_pred_svm=clf.predict(a)
return y_pred_svm[0]/10
def dt(a):
a=np.array([a])
dc=joblib.load('./joblibs/dtModel.joblib')
y_pred_dc=dc.predict(a)
return y_pred_dc[0]
def rf(a):
pipe=PipelineModel.load("./joblibs/rfmodel.model_v0")
ip = pd.DataFrame(np.array([a]))
ip.columns=['Latitude','Longitude','Depth']
dip=spark.createDataFrame(ip)
pred_results_RF = pipe.transform(dip)
return pred_results_RF.collect()[0][4]
def knn(a):
from sklearn.neighbors import KNeighborsRegressor
#Seperating X and y
X=df[['Year','Latitude','Longitude','Depth']]
y=df[['Magnitude']]
kneigh=KNeighborsRegressor(n_neighbors = 5)
kneigh.fit(X, y.values.ravel())
a=np.array([a])
y_pred_knn=kneigh.predict(a)
return y_pred_knn[0]
def style(p):
p.title.align='center'
p.title.text_font_size = '20pt'
p.title.text_font = 'serif'
p.xaxis.axis_label_text_font_size = '14pt'
p.xaxis.axis_label_text_font_style= 'bold'
p.yaxis.axis_label_text_font_size = '14pt'
p.yaxis.axis_label_text_font_style= 'bold'
p.xaxis.major_label_text_font_size = '12pt'
p.yaxis.major_label_text_font_size = '12pt'
p.legend.location = 'top_left'
return p
def showmap():
# Earthquakein Map Representation
df_quakes_2016 = data[data['Year']==2016]
df_quakes_2016=df_quakes_2016.toPandas()
def plotMap():
lat = df_quakes_2016['Latitude'].values.tolist()
lon = df_quakes_2016['Longitude'].values.tolist()
pred_lat = df_pred['Latitude'].values.tolist()
pred_lon = df_pred['Longitude'].values.tolist()
lst_lat = []
lst_lon = []
lst_pred_lat = []
lst_pred_lon = []
i=0
j=0
for i in range (len(lon)):
r_major = 6378137.000
x = r_major * math.radians(lon[i])
scale = x/lon[i]
y = 180.0/math.pi * math.log(math.tan(math.pi/4.0 +
lat[i] * (math.pi/180.0)/2.0)) * scale
lst_lon.append(x)
lst_lat.append(y)
i += 1
for j in range (len(pred_lon)):
r_major = 6378137.000
x = r_major * math.radians(pred_lon[j])
scale = x/pred_lon[j]
y = 180.0/math.pi * math.log(math.tan(math.pi/4.0 +
pred_lat[j] * (math.pi/180.0)/2.0)) * scale
lst_pred_lon.append(x)
lst_pred_lat.append(y)
j += 1
df_quakes_2016['coords_x'] = lst_lat
df_quakes_2016['coords_y'] = lst_lon
df_pred['coords_x'] = lst_pred_lat
df_pred['coords_y'] = lst_pred_lon
df_quakes_2016['Mag_Size'] = df_quakes_2016['Magnitude'] * 4
df_pred['Mag_Size'] = df_pred['Pred_Magnitude'] * 4
lats = df_quakes_2016['coords_x'].tolist()
longs = df_quakes_2016['coords_y'].tolist()
mags = df_quakes_2016['Magnitude'].tolist()
years = df_quakes_2016['Year'].tolist()
mag_size = df_quakes_2016['Mag_Size'].tolist()
pred_lats = df_pred['coords_x'].tolist()
pred_longs = df_pred['coords_y'].tolist()
pred_mags = df_pred['Pred_Magnitude'].tolist()
pred_year = df_pred['Year'].tolist()
pred_mag_size = df_pred['Mag_Size'].tolist()
cds = ColumnDataSource(
data=dict(
lat=lats,
lon=longs,
mag=mags,
year=years,
mag_s=mag_size
)
)
pred_cds = ColumnDataSource(
data=dict(
pred_lat=pred_lats,
pred_long=pred_longs,
pred_mag=pred_mags,
year=pred_year,
pred_mag_s=pred_mag_size
)
)
TOOLTIPS = [( "Year", "@year"), ("Magnitude", "@mag"),("Predicted Magnitude", "@pred_mag")
]
p = figure(title = 'Earthquake Map',
plot_width=2300, plot_height=450,
x_range=(-2000000, 6000000),
y_range=(-1000000, 7000000),
tooltips=TOOLTIPS)
p.circle(x='lon', y='lat', size='mag_s', fill_color='#cc0000', fill_alpha=0.7,
source=cds, legend='Quakes 2016')
p.circle(x='pred_long', y='pred_lat', size='pred_mag_s', fill_color='#ccff33', fill_alpha=7.0,
source=pred_cds, legend='Predicted Quakes 2017')
tile_provider = get_provider(Vendors.CARTODBPOSITRON)
p.add_tile(tile_provider)
p.title.align='center'
p.title.text_font_size='20pt'
p.title.text_font='serif'
p.legend.location='bottom_right'
p.legend.background_fill_color='black'
p.legend.background_fill_alpha=0.8
p.legend.click_policy='hide'
p.legend.label_text_color='white'
p.xaxis.visible=False
p.yaxis.visible=False
p.axis.axis_label=None
p.axis.visible=False
p.grid.grid_line_color=None
show(p)
df_quakes_2016['Magnitude']
plotMap()
def freqgraph():
#Frequency of Earthquake By Year
def plotBar():
cds = ColumnDataSource(data=dict(
yrs= df_quake_freq[ 'Year'].values.tolist(),
numQuakes = df_quake_freq['Counts'].values.tolist()))
TOOLTIPS =[ ('Number of earthquakes','@numQuakes'),('Year','@yrs')]
barChart = figure(title='Frequency of Earthquakes by Year',
plot_height=400,
plot_width=1150,
x_axis_label='Years',
y_axis_label='Number of Occurances',
x_minor_ticks=2,
y_range=(0, df_quake_freq['Counts'].max() +100),
toolbar_location=None,
tooltips=TOOLTIPS)
print(cds)
barChart.vbar (x='yrs', bottom=0, top='numQuakes',
color='#cc0000', width=0.75,
legend='Year', source=cds)
barChart = style(barChart)
show(barChart)
return barChart
plotBar()
def maggraph():
def plotMagnitude():
cds= ColumnDataSource(data=dict(
yrs = df_quake_freq[ 'Year'].sort_values().values.tolist(),
avg_mag = df_quake_freq['avg(Magnitude)'].round(1).values.tolist(),
max_mag= df_quake_freq [ 'max(Magnitude)'].values.tolist()))
TOOLTIPS = [('Year', '@yrs'),('avg(Magnitude)', '@avg_mag'),('max(Magnitude)','@max_mag')]
mp = figure(title='Maximum and Average Magnitude by Year',
plot_width=1150,
plot_height=400,
x_axis_label='Years',
y_axis_label='Magnitude', y_range=(5, df_quake_freq[ 'max(Magnitude)'].max() + 1),
x_minor_ticks=2,
toolbar_location=None,
tooltips= TOOLTIPS)
mp.line(x='yrs', y='max_mag', color='#cc0000', line_width=2, legend= 'Max Magnitude', source=cds)
mp.circle(x='yrs', y='max_mag', color='#cc0000', size=8, fill_color='#cc0000', source=cds)
mp.line(x='yrs', y='avg_mag', color='yellow', line_width=2, legend = 'Avg Magnitude', source=cds)
mp.circle(x='yrs', y='avg_mag', color='yellow', size=8, fill_color='yellow', source=cds)
mp =style(mp)
show(mp)
return mp
plotMagnitude()
| ramsundar07/Earthquake-Detetection-Analysis-using-Machine-Learning-Algorithms | GUI/gui_algorithm.py | gui_algorithm.py | py | 8,670 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "findspark.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.master",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 13,
"usage_type": "attribute"
}... |
3999827011 | import PySimpleGUI as sg # Simple GUI for Python
import core
import guiElements
import constants as const
# Setup the simple window
sg.theme("Black")
layout = [
[guiElements.frameSelectJobAndZone],
[guiElements.frameSelectResource],
[guiElements.frameAssignAKey],
[guiElements.layoutStatusAndStartStopBtns],
]
# Create the Window
window = sg.Window(
title="Wakfu FarmBot 0.1",
layout=layout,
size=(400, 350),
element_justification="c",
element_padding=10,
)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if event == sg.WIN_CLOSED: # if user closes windows
break
if event == "button_start":
core.onClick_Start_Button(event, values, window)
if event == "button_stop":
core.onClick_Stop_Button(event, values, window)
if event == "combo_key":
core.onChange_Key_Combo(event, values, window)
if event == "combo_zone":
core.onChange_Zone_Combo(event, values, window)
if event == "combo_resource":
core.onChange_Resource_Combo(event, values, window)
if event == "combo_job":
core.onChange_Job_Combo(event, values, window)
window.close()
| jhriverasa/wakfu-farmscript | FarmScriptGUI.py | FarmScriptGUI.py | py | 1,235 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "PySimpleGUI.theme",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "guiElements.frameSelectJobAndZone",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "guiElements.frameSelectResource",
"line_number": 14,
"usage_type": "attribute"
... |
75241434428 | __all__ = ["save", "load", "load_state_dict", "arange", "cat", "cos", "clamp", "Device", "from_numpy", "flatten",
"LongTensor", "matmul", "mm", "normal", "ones", "x2ms_pow", "sin", "tanh", "x2ms_tensor", "Tensor",
"split", 'as_tensor', 'argmax', 'Generator', 'sigmoid', 'rand', 'floor', 'bernoulli', 'equal', 'var_mean',
'randperm', 'sqrt', 'stack', 'log', 'exp', 'typename', 'is_tensor', 'randn', 'FloatTensor', 'x2ms_max',
'x2ms_min', 'bmm', 'x2ms_abs', 'square', 'squeeze', 'unsqueeze', 'transpose', 'repeat_interleave', 'div',
'ones_like', 'where', 'tensordot', 'meshgrid', 'roll', 'linspace', 'full', 'empty', 'x2ms_sum',
'multinomial', 'gather', 'sort', 'topk', 'x2ms_all', 'cumsum', 'einsum', 'full_like', 'masked_select',
'x2ms_mean', 'mul', 'isfinite', 'diag', 'acos', 'add', 'argsort', 'asin', 'atan2', 'bincount',
'broadcast_tensors', 'chunk', 'conj', 'cosh', 'cross', 'cumprod', 'diagflat', 'x2ms_diagonal', 'eq',
'zeros_like', 'atan', 'unique', 'triu', 'nonzero', 'log2', 'cdist', 'erf', 'softmax', 'eye', 'prod', 'norm',
'zeros', 'lt', 'ge', 'ne', 'le', 'reshape', 'reminder', 'result_type', 'real', 'reciprocal', 'neg', 'isinf',
'isnan', 'argmin', 'floor_divide', 'fmod', 'empty_like', 'erfc', 'erfinv', 'expm1', 'flip', 'gt',
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'bartlett_window', 'blackman_window', 'hamming_window', 'histc',
'imag', 'ceil', 'lerp', 'log1p', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'var', 'unbind',
'trunc', 'true_divide', 'triu_indices', 'triu', 'tril', 'trapz', 'trapezoid', 'trace', 'tan', 'take', 'lt',
'ge', 'ne', 'le', 'reshape', 'reminder', 'result_type', 'real', 'reciprocal', 'neg',
'minimum', 'hann_window', 'dot', 'scatter', 'ger', 'addmm', 'BoolTensor', 'finfo', 'IntTensor',
'get_rng_state', 'set_rng_state', 'randint', 'randn_like', 'ByteTensor', 'index_select', 'allclose', 't',
'vstack', 'rsqrt', 'x2ms_round', 'acosh', 'addcmul', 'addcdiv', 'asinh', 'atanh', 'amax', 'amin',
'cummax', 'cummin', 'logsumexp', 'renorm', 'xlogy', 'sign', 'sinh', 'less', 'narrow', 'tensor_zeros_like',
'Size', 'DoubleTensor', 'cosine_similarity', 'block_diag', 'cholesky_solve', 'lu_solve', 'x2ms_any',
'greater', 'greater_equal', 'less_equal', 'not_equal', 'multiply', 'logspace', 'tril_indices', 'vander',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'column_stack', 'rad2deg', 'outer', 'negative', 'log10',
'count_nonzero', 'signbit', 'isposinf', 'isin', 'isneginf', 'copysign', 'deg2rad', 'diff', 'gcd',
'heaviside']
import numbers
import numpy as np
import mindspore
import mindspore.default_config
import mindspore.nn
import mindspore.numpy
import mindspore.dataset.transforms
import mindspore.ops.functional as F
from mindspore.nn import DistributedGradReducer
from mindspore.ops import composite
from mindspore.parallel._utils import (_get_device_num, _get_gradients_mean, _get_parallel_mode)
from mindspore.common.parameter import ParameterTuple
from mindspore.context import ParallelMode
from .core.decorator import x2ms_func_decorator
from .torch_api.save_load import save, load, load_state_dict
from .torch_api import tensor_api
from .torch_api import autograd
from .third_party_adapter import math_api
from .torch_api.nn_api import nn_cell
from .torch_api.nn_api import nn_init
from .torch_api.nn_api import nn_functional
from .core.context import x2ms_context
from .utils.util_api import logger
from .torch_api.torch_base_api import arange, cat, cos, clamp, Device, from_numpy, flatten, \
LongTensor, matmul, mm, normal, ones, x2ms_pow, sin, tanh, x2ms_tensor, Tensor, zeros, split, as_tensor, dot, \
x2ms_sum, argmax, Generator, sigmoid, rand, floor, bernoulli, equal, randperm, var_mean, sqrt, stack, log, exp, \
typename, is_tensor, randn, FloatTensor, x2ms_max, x2ms_min, bmm, x2ms_abs, square, squeeze, unsqueeze, \
transpose, repeat_interleave, div, ones_like, where, tensordot, meshgrid, roll, linspace, full, empty, \
multinomial, gather, sort, topk, x2ms_all, cumsum, einsum, full_like, masked_select, x2ms_mean, mul, isfinite, \
diag, acos, add, argsort, asin, atan2, bincount, broadcast_tensors, chunk, conj, cosh, cross, cumprod, \
diagflat, x2ms_diagonal, zeros_like, atan, unique, nonzero, log2, cdist, erf, softmax, eye, prod, norm, \
lt, ge, eq, ne, le, reshape, reminder, result_type, real, reciprocal, neg, isinf, isnan, argmin, floor_divide, \
fmod, empty_like, erfc, erfinv, expm1, flip, gt, bitwise_and, bitwise_or, bitwise_xor, bartlett_window, \
blackman_window, hamming_window, histc, imag, ceil, lerp, log1p, logical_and, logical_not, logical_or, \
logical_xor, var, unbind, trunc, true_divide, triu_indices, triu, tril, trapz, trapezoid, trace, tan, take, \
minimum, hann_window, scatter, ger, addmm, BoolTensor, finfo, IntTensor, get_rng_state, set_rng_state, randint, \
randn_like, ByteTensor, index_select, allclose, t, vstack, rsqrt, x2ms_round, acosh, addcmul, addcdiv, asinh, \
atanh, amax, amin, cummax, cummin, logsumexp, renorm, xlogy, sign, sinh, less, narrow, tensor_zeros_like, Size, \
DoubleTensor, cosine_similarity, block_diag, cholesky_solve, lu_solve, x2ms_any, greater, greater_equal, \
less_equal, not_equal, multiply, logspace, tril_indices, vander, atleast_1d, atleast_2d, atleast_3d, \
column_stack, rad2deg, outer, negative, log10, count_nonzero, signbit, isposinf, isin, isneginf, copysign, \
deg2rad, diff, gcd, heaviside
# overwrite Magic methods
mindspore.Tensor.__and__ = tensor_api.tensor_and
mindspore.Tensor.__or__ = tensor_api.tensor_or
mindspore.Tensor.__format__ = tensor_api.tensor_format
mindspore.Tensor.__getitem__ = tensor_api.tensor_getitem
mindspore.Tensor.__matmul__ = tensor_api.matmul
mindspore.Tensor.__setitem__ = tensor_api.tensor_setitem
mindspore.Tensor.T = tensor_api.transpose_
mindspore.Tensor.__float__ = lambda t: float(t.asnumpy())
mindspore.Tensor.__int__ = lambda t: int(t.asnumpy())
mindspore.Parameter.__iadd__ = tensor_api.parameter_iadd
mindspore.Parameter.__isub__ = tensor_api.parameter_isub
mindspore.Parameter.__imul__ = tensor_api.parameter_imul
mindspore.Parameter.__idiv__ = tensor_api.parameter_idiv
# overwrite properties
mindspore.Tensor.is_cuda = tensor_api.is_cuda
mindspore.Tensor.data = tensor_api.property_data
mindspore.Tensor.device = tensor_api.property_device
mindspore.Parameter.grad = tensor_api.grad
mindspore.Parameter.grad = tensor_api.set_grad
@property
def parameter_data(self):
return self
@parameter_data.setter
def set_data(self, new_data):
self.set_data(new_data)
mindspore.Parameter.data = parameter_data
mindspore.Parameter.data = set_data
def _get_calculate_shape(obj, other):
if not isinstance(other, mindspore.Tensor):
return obj.shape
return np.broadcast_shapes(obj.shape, other.shape)
def _replace_tensor_calculate_func(origin_func_name, output_type=None):
origin_func = getattr(mindspore.Tensor, origin_func_name)
def new_func(obj, other):
if obj.dtype == mindspore.float64:
obj = obj.astype(mindspore.float32)
if isinstance(other, np.ndarray):
other = mindspore.Tensor(other, obj.dtype)
if obj.size == 0 or (isinstance(other, mindspore.Tensor) and other.size == 0):
if output_type is None:
return mindspore.ops.Zeros()(_get_calculate_shape(obj, other), obj.dtype)
else:
return mindspore.ops.Zeros()(_get_calculate_shape(obj, other), output_type)
return origin_func(obj, other)
setattr(mindspore.Tensor, origin_func_name, new_func)
for func_name in ("__add__", "__sub__", "__mul__", "__truediv__", "__mod__", "__pow__"):
_replace_tensor_calculate_func(func_name)
for func_name in ("__lt__", "__gt__", "__le__", "__ge__", "__eq__", "__ne__"):
_replace_tensor_calculate_func(func_name, mindspore.bool_)
class GraphTrainStep(mindspore.nn.TrainOneStepCell):
def __init__(self, network, optimizer):
super(GraphTrainStep, self).__init__(network, optimizer)
def call_construct(self, *inputs):
new_input = list(mindspore.Tensor(value, dtype=mindspore.float32)
if not isinstance(value, mindspore.Tensor)
else value
for value in inputs)
return self.__call__(*new_input)
def construct(self, *inputs):
output = self.network(*inputs)
loss = output[0]
model_output = output[1:]
sens = (F.fill(loss.dtype, loss.shape, self.sens),)
for output_value in model_output:
sens += self._get_sens(output_value)
grads = self.grad(self.network, self.weights)(*inputs, sens)
grads = self.grad_reducer(grads)
F.depend(loss, self.optimizer(grads))
return output
@staticmethod
def _get_sens(value):
if isinstance(value, mindspore.Tensor):
return (F.fill(value.dtype, value.shape, 0),)
if isinstance(value, list):
sens = []
for tensor in value:
sens.append(F.fill(tensor.dtype, tensor.shape, 0))
return (sens,)
if isinstance(value, tuple):
sens = ()
for tensor in value:
sens += (F.fill(tensor.dtype, tensor.shape, 0),)
return (sens,)
return (0,)
def add_module(obj, name, module):
setattr(obj, name, module)
classic_cell_init = mindspore.nn.Cell.__init__
def new_cell_init(self, auto_prefix=True, flags=None):
classic_cell_init(self, auto_prefix, flags)
self.training = True
# same name and inherit subclass api
mindspore.nn.Cell.add_module = add_module
mindspore.nn.Cell.__init__ = new_cell_init
mindspore.nn.Cell._modules = nn_cell._modules
@property
def is_floating_point(self):
return self in (mindspore.float16, mindspore.float32, mindspore.float64)
mindspore.dtype.typing.Number.is_floating_point = is_floating_point
def cuda_set_device(device):
pass
def is_cuda_available():
"""
Stub function for torch.cuda.is_available.
get the info from default_config.
"""
return True
def memory_cached():
return 0.0
def memory_reserved():
return 0.0
def max_memory_reserved(device=None):
return 0.0
def max_memory_allocated(device=None):
return 0.0
def memory_allocated(device=None):
return 0.0
def get_device():
return mindspore.context.get_context('device_target')
@x2ms_func_decorator(mindspore.nn.Cell)
def parameters(obj, *args, **kwargs):
return get_cell_params(obj, *args, **kwargs)
def get_cell_params(cell, recurse=True):
return iter(cell.trainable_params(recurse) + cell.untrainable_params(recurse))
@x2ms_func_decorator(mindspore.nn.Cell)
def named_parameters(model, prefix='', recurse=True):
return list(param for param in model.parameters_and_names(prefix, recurse))
@x2ms_func_decorator(mindspore.nn.Cell)
def named_modules(model, prefix=''):
return model.cells_and_names(prefix)
@x2ms_func_decorator(mindspore.nn.Cell)
def graph_forward(obj, *args, **kwargs):
return obj(*args, **kwargs)
@x2ms_func_decorator(mindspore.nn.Cell)
def forward(obj, *args, **kwargs):
return obj.construct(*args, **kwargs)
@x2ms_func_decorator(mindspore.nn.Cell)
def x2ms_train(obj, *args, **kwargs):
if len(obj.trainable_params()) > 0:
if obj not in x2ms_context.amp_model:
x2ms_context.amp_model.append(obj)
return obj.set_train(*args, **kwargs)
@x2ms_func_decorator(mindspore.nn.Cell)
def x2ms_eval(obj, *args, **kwargs):
return obj.set_train(False)
class TrainCellWithoutOptimizer(mindspore.nn.Cell):
def __init__(self, network, sens=1.0):
super(TrainCellWithoutOptimizer, self).__init__(auto_prefix=False)
self.network = network
self.network.set_grad()
self.model_weights = ParameterTuple(parameters(network))
self.grad = composite.GradOperation(get_by_list=True, sens_param=True)
self.sens = sens
self.parallel_flag = False
self.grad_reducer = F.identity
self.parallel_mode = _get_parallel_mode()
self.parallel_flag = self.parallel_mode in (ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL)
if self.parallel_flag:
self.grads_mean = _get_gradients_mean()
self.device_num = _get_device_num()
self.grad_reducer = DistributedGradReducer(self.model_weights, self.grads_mean, self.device_num)
def construct(self, *inputs):
train_loss = self.network(*inputs)
train_sens = F.fill(train_loss.dtype, train_loss.shape, self.sens)
grads = self.grad(self.network, self.model_weights)(*inputs, train_sens)
grads = self.grad_reducer(grads)
for i, parameter in enumerate(self.model_weights):
parameter.grad = grads[i]
return train_loss
_wrapped_model_dict = {}
def train_one_step_cell(model, optimizer=None):
key = id(model)
if key in _wrapped_model_dict.keys():
return _wrapped_model_dict.get(key)
if x2ms_context.amp_opt_level is None or x2ms_context.amp_model is None:
if optimizer is None:
wrapped_model = TrainCellWithoutOptimizer(model)
else:
wrapped_model = mindspore.nn.TrainOneStepCell(model, optimizer)
else:
if isinstance(x2ms_context.loss_scale, numbers.Number) and x2ms_context.amp_opt_level != "O2":
wrapped_model = mindspore.amp.build_train_network(model, optimizer, level=x2ms_context.amp_opt_level,
loss_scale_manager=mindspore.FixedLossScaleManager(
x2ms_context.loss_scale))
else:
wrapped_model = mindspore.amp.build_train_network(model, optimizer, level=x2ms_context.amp_opt_level)
_wrapped_model_dict[key] = wrapped_model
return wrapped_model
def graph_train_one_step_cell(model, optimizer):
key = id(model)
if key in _wrapped_model_dict.keys():
return _wrapped_model_dict.get(key)
if x2ms_context.amp_opt_level is None or x2ms_context.amp_model is None:
wrapped_model = GraphTrainStep(model, optimizer)
else:
raise NotImplementedError("Graph mode does not currently support Mixed precision")
_wrapped_model_dict[key] = wrapped_model
return wrapped_model
def load_state_dict_from_url(url, model_dir=None, map_location=None, progress=True, check_hash=False):
"""
Current not support 'model_dir', 'map_location', 'progress', 'check_hash' parameter.
"""
logger.warning("Not support load_state_dict_from_url now")
return {}
def to(obj, *args, **kwargs):
if isinstance(obj, mindspore.nn.Cell):
return _cell_to(obj, *args, **kwargs)
elif isinstance(obj, mindspore.Tensor):
return _tensor_to(obj, *args, **kwargs)
else:
return obj.to(*args, **kwargs)
def _cell_to(obj, *args, **kwargs):
if args:
param = args[0]
if isinstance(param, mindspore.Type) and param in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=param)
if isinstance(param, mindspore.Tensor) and param.dtype in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=param.dtype)
if len(args) > 1:
param = args[1]
if param in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=param)
if 'dtype' in kwargs.keys() and kwargs['dtype'] in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=kwargs['dtype'])
if 'other' in kwargs.keys() and kwargs['other'].dtype in (mindspore.float16, mindspore.float32):
return obj.to_float(dst_type=kwargs['other'])
return obj
def _tensor_to(obj, *args, **kwargs):
if args:
param = args[0]
if isinstance(param, mindspore.common.Type):
return obj.astype(dtype=param)
if isinstance(param, mindspore.Tensor):
return obj.astype(dtype=param.dtype)
if len(args) > 1:
return obj.astype(dtype=args[1])
if 'dtype' in kwargs.keys():
return obj.astype(dtype=kwargs['dtype'])
if 'other' in kwargs.keys():
return obj.astype(dtype=kwargs['other'].dtype)
return obj
def get_device_properties(device):
return CUDADeviceProperty()
def convert_sync_batchnorm(module, process_group=None):
return module
class CUDADeviceProperty:
def __init__(self):
device_target = mindspore.context.get_context('device_target')
device_id = mindspore.context.get_context('device_id')
self.name = f'{device_target}:{device_id}'
self.total_memory = 0
| Gufrannn/W-MAE | MindSpore/x2ms_adapter/__init__.py | __init__.py | py | 16,986 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "mindspore.Tensor",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "torch_api.tensor_api.tensor_and",
"line_number": 71,
"usage_type": "attribute"
},
{
"api_name": "torch_api.tensor_api",
"line_number": 71,
"usage_type": "name"
},
{
"a... |
33648598241 | from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
import re
import string
@dataclass
class Equipment:
title: str
value: str
unit: Optional[str] = None
quantity: Optional[int] = None
_equipment = [
("Backpack", "2gp"),
("Candle", "1cp"),
("Chain, 10'", "30gp"),
("Chalk, 1 piece", "1cp"),
("Chest, empty", "2gp"),
("Crowbar", "2gp"),
("Flask, empty", "3cp"),
("Flint & steel", "15cp"),
("Grappling hook", "1gp"),
("Hammer, small", "5sp"),
("Holy symbol", "25gp"),
("Holy water, 1 vial**", "25gp"),
("Ironspikes, each", "1sp"),
("Lantern", "10gp"),
("Mirror, hand-sized", "10gp"),
("Oil, 1 flask***", "2sp"),
("Pole, 10-foot", "15cp"),
("Rations, per day", "5cp"),
("Rope, 50'", "25cp"),
("Sack, large", "12cp"),
("Sack, small", "8cp"),
("Thieves' tools", "25gp"),
("Torch, each", "1cp"),
("Waterskin", "5sp"),
]
EQUIPMENT = {}
#: Process the initial list of equipment tuples into a dict of Equipment classes
for name, cost in _equipment:
kwargs = {}
title = name.split(", ")[0]
kwargs["title"] = title
kwargs["value"] = cost
if len(name.split(", ")) > 1:
quantity = name.split(", ")[1]
try:
digits = int("".join([c for c in quantity if c.isdigit()]))
except IndexError:
digits = None
if digits:
kwargs["quantity"] = digits
kwargs["unit"] = re.sub(r"[\d+\s]", "", quantity)
EQUIPMENT[title] = Equipment(**kwargs)
| sethwoodworth/crawl-classic | crawl_classic/equipment.py | equipment.py | py | 1,581 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "typing.Optional",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "re.sub",
"li... |
1443436731 | from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Camera
class CameraAddForm(forms.ModelForm):
class Meta:
model = Camera
fields = ['title', 'ip']
labels = {
'title': _('Titel'),
'ip': _('Adresse')
}
| Thorium0/Security-terminal | Terminal/camera/forms.py | forms.py | py | 324 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "models.Camera",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.utils.tran... |
12830949060 | # Definition for a Node.
from collections import deque
from typing import List
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def preorder(self, root: Node) -> List[int]:
output = []
if root is None:
return output
def dps(node: Node):
output.append(node.val)
if node.children:
for child in node.children:
dps(child)
dps(root)
return output
def preorder_iter(self, root: Node) -> List[int]:
output = []
if root is None:
return output
stack = deque()
stack.append(root)
while stack:
popped_node = stack.pop()
output.append(popped_node.val)
if popped_node.children:
for child in reversed(popped_node.children):
stack.append(child)
return output
| theRobertSan/LeetCode-Solutions-Python | 589.py | 589.py | py | 996 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 30,
"usage_type": "name"
}
] |
30857567174 | import pyautogui
import webbrowser as web
import time
msg = input('enter message to send: ')
times = int(input('enter the number of times to send the message: '))
# win_chrome_path = 'C:\Program Files\Google\Chrome\Application\chrome.exe %s'
# web.get(win_chrome_path).open('web.whatsapp.com')
web.open('web.whatsapp.com')
time.sleep(30)
for i in range(times):
for char in msg:
pyautogui.press('space' if char==' ' else char)
pyautogui.press('enter') | Abdul-Hannan12/Whatsapp-Automation | spam_message.py | spam_message.py | py | 471 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "webbrowser.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyautogui.press",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyautogui.press",
"lin... |
41160046527 | from django import forms
from django.core.mail import EmailMessage
from .models import Householdaccountbook
# class HouseholdaccountbookForm(forms.ModelForm):
# class Meta:
# model = Householdaccountbook
# fields = ["pref","choice",]
class TableCreateForm(forms.ModelForm):
class Meta:
model = Householdaccountbook
fields = ('title', 'choice', 'date', 'genre', 'quanity', 'money', 'content' )
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs['class'] = 'form-control'
# self.fields['date'].widget.attrs['class'] = 'form-control'
# self.fields['date'].widget.attrs['id'] = "inputdate"
self.fields['date'].widget.attrs['placeholder'] = "例 2000-12-05" | HaruShim/Sotuken | 実装/新満/table一覧表示/table/forms.py | forms.py | py | 777 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.Householdaccountbook",
"line_number": 12,
"usage_type": "name"
}
] |
5387953203 | from telegram.ext import Updater, MessageHandler,Filters
from Adafruit_IO import Client
import os
aio = Client('adeebsheriff', os.getenv('adeebsheriff'))
def demo1(bot,update):
chat_id = bot.message.chat_id
path = 'https://cdn3.vectorstock.com/i/1000x1000/87/22/i-am-fine-lettering-typography-calligraphy-overlay-vector-15208722.jpg'
bot.message.reply_text('I am fine')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo2(bot,update):
chat_id = bot.message.chat_id
path = 'https://static.scientificamerican.com/sciam/cache/file/2B38DE31-C1D3-4339-8808D61972976EE4.jpg'
bot.message.reply_text('Light is turned ON')
aio.send('bedroom-light', 1)
data1 = aio.receive('bedroom-light')
print(f'Received value: {data1.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo3(bot,update):
chat_id = bot.message.chat_id
path = 'https://image.shutterstock.com/image-photo/light-bulb-turned-off-over-260nw-320485652.jpg'
bot.message.reply_text('Light is turned OFF')
aio.send('bedroom-light', 0)
data1 = aio.receive('bedroom-light')
print(f'Received value: {data1.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo4(bot,update):
chat_id = bot.message.chat_id
path = 'https://cdn.frontdoorhome.com/ahs/blog/prod/static/cs/ahs/image/running-fan.jpg'
bot.message.reply_text('Fan is turned ON')
aio.send('bedroom-fan', 1)
data2 = aio.receive('bedroom-fan')
print(f'Received value: {data2.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def demo5(bot,update):
chat_id = bot.message.chat_id
path = 'https://www.destinationlighting.com/fliptheswitch/wp-content/uploads/sites/2/2018/05/zudio-casablanca.jpg'
bot.message.reply_text('Fan is turned OFF')
aio.send('bedroom-fan', 0)
data2 = aio.receive('bedroom-fan')
print(f'Received value: {data2.value}')
update.bot.sendPhoto(chat_id=chat_id,photo=path)
def main(bot,update):
a = bot.message.text.lower()
print(a)
if a == "how are you?":
demo1(bot,update)
elif a =="light on" or a=="turn on light":
demo2(bot,update)
elif a =="light off" or a=="turn off light":
demo3(bot,update)
elif a =="switch on fan" or a=="turn on fan":
demo4(bot,update)
elif a =="switch off fan" or a=="turn off fan":
demo5(bot,update)
else:
bot.message.reply_text('Invalid Text')
BOT_TOKEN = os.getenv('BOT_TOKEN')
u = Updater(BOT_TOKEN,use_context=True)
dp = u.dispatcher
dp.add_handler(MessageHandler(Filters.text,main))
u.start_polling()
u.idle()
| adeebsheriff/telegramiotchatbot | app.py | app.py | py | 2,506 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "Adafruit_IO.Client",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "telegram.ext.Updater",
"line... |
7759954850 | '''
사업자등록번호로 업종 확인
'''
import csv
import json
from urllib.request import urlopen
from urllib import parse
import datetime as dt
import pandas as pd
import numpy as np
import re
# 대신정보통신 4088118945
# 대보정보통신 1358119406
bizNo = '2118108009'
# 조달청_사용자정보서비스
'''
> parameter(조회조건)
- bizno : 사업자등록번호
> numOfRows 는 totalCount 보다 작아야 함
'''
url = 'http://apis.data.go.kr/1230000/UsrInfoService/getPrcrmntCorpIndstrytyInfo'
queryParams = '?' + parse.urlencode({ parse.quote_plus('serviceKey') : 'B1CsUiO26Y56VDOKIParM6z394FXvTQC0rafsREBzSnOl8Cc1PUFY98LOcqKq5OahD5s2AhvszA2AIIYj0KXvg==',
parse.quote_plus('pageNo') : '1',
parse.quote_plus('numOfRows') : 100,
parse.quote_plus('type') : 'json' ,
parse.quote_plus('bizno') : bizNo
})
# set_API & get_data -> openAPI & parameters
response = urlopen(url + queryParams)
data = response.read()
JSON_object = json.loads(data.decode('utf-8'))
'''
"bizno": "1048118820",
"indstrytyNm": "엔지니어링사업(프로젝트매니지먼트)",
"indstrytyCd": "7309",
"rgstDt": "2011-10-31 00:00:00",
"vldPrdExprtDt": "",
"systmRgstDt": "2014-06-30 16:12:45",
"chgDt": "",
"indstrytyStatsNm": "",
"rprsntIndstrytyYn": "N",
"systmChgDt": "2014-06-30 16:12:45"
'''
result = pd.DataFrame(JSON_object["response"]["body"]["items"], columns = ["bizno",
"indstrytyNm",
"rgstDt",
"vldPrdExprtDt"])
#result
# init Series
s = []
for index, item in result.iterrows():
if len(item['vldPrdExprtDt']) > 0:
print(item['vldPrdExprtDt'] + ' -> ' + str(len(item['vldPrdExprtDt'])))
print(item['indstrytyNm'])
print(re.sub('\(.*?\)', '', item['indstrytyNm']))
print('\n')
s.append(re.sub('\(.*?\)', '', item['indstrytyNm']))
#s
# using naive method
# to remove duplicated
# from list
res = []
for i in s:
if i not in res:
res.append(i)
#res
#
print('-'.join(res)) | starrything/openapi-g2b | 04_g2b_user_service.py | 04_g2b_user_service.py | py | 2,354 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "urllib.parse.urlencode",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "urllib.parse.quote_plus",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib.pa... |
75316047546 | import wx
from ..form.general import GeneralDialog
from ..textbox import LayoutDimensions
from ..textbox.textbox import TextInputLayout, TextSmartBox
from ..textbox.floatbox import FloatInputLayout, FloatSmartBox
from ..controller import ChildController
from ..model.bind import BindOjbect
__author__ = 'Joeny'
class FigureSetting(object):
"""
Figure Setting model.
"""
def __init__(self, *args, **kwargs):
"""
Figure Setting Constructor
:param args:
:param kwargs:
:return:
"""
self.title = kwargs.get('title', 'Title')
self.x_title = kwargs.get('x_title', 'X Title')
self.x_subtitle = kwargs.get('x_subtitle', '')
self.y_title = kwargs.get('y_title', 'Y Title')
self.y_subtitle = kwargs.get('y_subtitle', '')
self.x_min = kwargs.get('x_min', None)
self.x_max = kwargs.get('x_max', None)
self.y_min = kwargs.get('y_min', None)
self.y_max = kwargs.get('y_max', None)
self.linewidth = kwargs.get('linewidth', 2)
self.legend = kwargs.get('legend', [])
class FigureSettingPanel(wx.Panel):
"""
Particular Figure Setting
"""
def __init__(self, parent, setting, *args, **kwargs):
"""
:param setting:
:param args:
:param kwargs:
:return:
"""
wx.Panel.__init__(self, parent, *args, **kwargs)
self.layouts = {}
self.bind_objects = {}
self.setting = setting
self.SetSizerAndFit(self.do_layout())
def do_layout(self):
"""
Layout form
:return:
"""
vsizer = wx.BoxSizer(wx.VERTICAL)
layout = LayoutDimensions(top=2, bottom=2, left=4, right=4, interior=2,
widths=(100, 200),
stretch_factor=(0, 1), height=24)
layout.calculate()
self.layouts['title'] = TextInputLayout(self,
name='Title',
layout=layout,
textbox=TextSmartBox(self))
self.layouts['x_title'] = TextInputLayout(self,
name='X Title',
layout=layout,
textbox=TextSmartBox(self))
self.layouts['y_title'] = TextInputLayout(self,
name='Y Title',
layout=layout,
textbox=TextSmartBox(self))
self.layouts['x_min'] = FloatInputLayout(self,
name='X Min',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
self.layouts['x_max'] = FloatInputLayout(self,
name='X Max',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
self.layouts['y_min'] = FloatInputLayout(self,
name='Y Min',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
self.layouts['y_max'] = FloatInputLayout(self,
name='Y Max',
layout=layout,
textbox=FloatSmartBox(self, signs=True))
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['title'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['x_title'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['y_title'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['x_min'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['x_max'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['y_min'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
vsizer.Add(self.layouts['y_max'], 1, wx.EXPAND | wx.ALL, 0)
vsizer.AddSpacer(5)
return vsizer
def sync_data(self):
"""
Sync textbox data
"""
self.bind_objects['title'] = BindOjbect(self.setting.__dict__,
self.layouts['title'].textbox,
'title')
self.bind_objects['x_title'] = BindOjbect(self.setting.__dict__,
self.layouts['x_title'].textbox,
'x_title')
self.bind_objects['y_title'] = BindOjbect(self.setting.__dict__,
self.layouts['y_title'].textbox,
'y_title')
self.bind_objects['x_min'] = BindOjbect(self.setting.__dict__,
self.layouts['x_min'].textbox,
'x_min')
self.bind_objects['x_max'] = BindOjbect(self.setting.__dict__,
self.layouts['x_max'].textbox,
'x_max')
self.bind_objects['y_min'] = BindOjbect(self.setting.__dict__,
self.layouts['y_min'].textbox,
'y_min')
self.bind_objects['y_max'] = BindOjbect(self.setting.__dict__,
self.layouts['y_max'].textbox,
'y_max')
class FigureSettingDialog(GeneralDialog):
"""
Modify figure setting.
"""
def __init__(self, parent, controller=None, setting=None, local=None, btn_flags=wx.OK | wx.CANCEL, **kwargs):
"""
Figure setting dialog.
:param parent:
:param controller:
:param setting:
:param btn_flags:
:param kwargs:
:return:
"""
self.nb = None
self.pages = {}
if local:
self.local = local
self.local.view = self
else:
self.local = FigureSettingController(parent, self, setting)
GeneralDialog.__init__(self,
parent,
title="Figure Setting",
style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER,
controller=controller,
local=self.local,
btn_flags=btn_flags,
**kwargs)
self.btnsizer.AffirmativeButton.Bind(wx.EVT_BUTTON, self.local.button_ok_click)
def do_layout(self):
"""
Draw layout
:return:
"""
self.nb = wx.Notebook(self)
for index, setting in enumerate(self.local.settings):
# Create Panel.
self.pages[index] = FigureSettingPanel(self.nb, setting)
# Add to tab page.
self.nb.AddPage(self.pages[index], "Plot %d" % (index + 1))
# Sync Data
self.pages[index].sync_data()
return self.nb
class FigureSettingController(ChildController):
"""
Figure Setting Controller
"""
def __init__(self, parent, view, settings):
"""
:param parent:
:param view:
:return:
"""
ChildController.__init__(self, parent, view, settings)
self.settings = settings
def sync_data(self):
"""
Sync Data
:return:
"""
pass
def do_layout(self):
pass
def refresh(self):
pass
def update_layout(self, state):
pass
def button_ok_click(self, event):
"""
Button ok click
:param event:
:return:
"""
error = False
#TODO: Need to bind the textbox with the data.
if error is False:
event.Skip()
else:
if not wx.Validator_IsSilent():
wx.Bell()
def delete_control(self):
pass
| JoenyBui/boa-gui | boaui/chart/dlg.py | dlg.py | py | 8,601 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "wx.Panel",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "wx.Panel.__init__",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "wx.Panel",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "wx.BoxSizer",
"line... |
9264213572 | import mne
import argparse
import numpy as np
from config import fname
# Handle command line arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('subject', metavar='sub###', type=int, help='The subject to process')
args = parser.parse_args()
subject = args.subject
print('Processing subject:', subject)
report = mne.open_report(fname.report(subject=subject))
# Fit ICA to the continuous data
raw_detrended = mne.io.read_raw_fif(fname.raw_detrend(subject=subject))
ica = mne.preprocessing.ICA(n_components=100).fit(raw_detrended)
# Get ICA components that capture eye blinks and heart beats
eog_epochs = mne.preprocessing.create_eog_epochs(raw_detrended)
_, eog_scores = ica.find_bads_eog(eog_epochs)
eog_bads = list(np.flatnonzero(abs(eog_scores) > 0.2))
ecg_epochs = mne.preprocessing.create_ecg_epochs(raw_detrended)
ecg_bads, ecg_scores = ica.find_bads_ecg(ecg_epochs)
ica.exclude = eog_bads + ecg_bads
print(eog_bads)
print(ecg_bads)
if len(eog_bads) > 0:
report.add_figs_to_section(ica.plot_scores(eog_scores), 'Correlation between ICA components and EOG channel', 'ICA', replace=True)
report.add_figs_to_section(ica.plot_properties(eog_epochs, picks=eog_bads), ['Properties of EOG component %02d' % e for e in eog_bads], 'ICA', replace=True)
if len(ecg_bads) > 0:
report.add_figs_to_section(ica.plot_scores(ecg_scores), 'Correlation between ICA components and ECG channel', 'ICA', replace=True)
report.add_figs_to_section(ica.plot_properties(ecg_epochs, picks=ecg_bads), ['Properties of ECG component %02d' % e for e in ecg_bads], 'ICA', replace=True)
report.add_figs_to_section(ica.plot_overlay(eog_epochs.average()), 'EOG signal removed by ICA', 'ICA', replace=True)
report.add_figs_to_section(ica.plot_overlay(ecg_epochs.average()), 'ECG signal removed by ICA', 'ICA', replace=True)
ica.save(fname.ica(subject=subject))
report.save(fname.report(subject=subject), overwrite=True, open_browser=False)
report.save(fname.report_html(subject=subject), overwrite=True, open_browser=False)
| wmvanvliet/beamformer_simulation | megset/03_ica.py | 03_ica.py | py | 2,047 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "mne.open_report",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.fname.report",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "config.fna... |
780574891 | from nltk.tokenize import TweetTokenizer
class LexiconFeatureExtractor:
def __init__(self, afinn_lexicon_file_path="resources/lexicons/AFINN-en-165.txt",
afinn_emoticon_lexicon_file_path="resources/lexicons/AFINN-emoticon-8.txt",
bing_liu_lexicon_file_path="resources/lexicons/BingLiu.txt",
mpqa_lexicon_file_path="resources/lexicons/mpqa.txt"):
print("Loading AFINN lexicons...")
self.afinn_lexicon = LexiconFeatureExtractor._read_standart_lexicon(afinn_lexicon_file_path)
self.afinn_emoticon_lexicon = LexiconFeatureExtractor._read_standart_lexicon(afinn_emoticon_lexicon_file_path)
print("Loading BingLiu lexicon...")
self.bingliu_lexicon = LexiconFeatureExtractor._read_standart_lexicon(bing_liu_lexicon_file_path)
print("Loading MPQA lexicon...")
self.mpqa_lexicon = LexiconFeatureExtractor._read_standart_lexicon(mpqa_lexicon_file_path)
print("Loading NRC - Hashtag - Emotion - Lexicon")
self.nrc_hash_emo_lexicon = LexiconFeatureExtractor \
._read_labeled_lexicon("resources/lexicons/NRC-Hashtag-Emotion-Lexicon-v0.2.txt")
print("Loading NRC - AffectIntensity - Lexicon")
self.nrc_affect_intensity_lexicon = LexiconFeatureExtractor \
._read_labeled_lexicon("resources/lexicons/NRC-AffectIntensity-Lexicon.txt")
print("Loading SentiStrength EmoticonLookupTable")
self.emoticon_lookup_lexicon = LexiconFeatureExtractor \
._read_standart_lexicon("resources/lexicons/EmoticonLookupTable.txt")
print("Loading SentiStrength EmotionLookupTable")
self.emotion_lookup_lexicon = LexiconFeatureExtractor \
._read_standart_lexicon("resources/lexicons/EmotionLookupTable.txt")
def extract_feature(self, input_txt):
res = [LexiconFeatureExtractor.calculate_score_word_based(self.afinn_lexicon, input_txt),
LexiconFeatureExtractor.calculate_score_word_based(self.afinn_emoticon_lexicon, input_txt),
LexiconFeatureExtractor.calculate_score_word_based(self.bingliu_lexicon, input_txt),
LexiconFeatureExtractor.calculate_score_word_based(self.mpqa_lexicon, input_txt)]
# NRC - Hashtag - Emotion - Lexicon
res += LexiconFeatureExtractor.calculate_score_labeled(self.nrc_hash_emo_lexicon, input_txt)
# NRC - Affect intensity - Lexicon
res += LexiconFeatureExtractor.calculate_multiscore(self.nrc_affect_intensity_lexicon, input_txt)
# SentiStrength - Emoticon - Lexicon
res.append(LexiconFeatureExtractor.calculate_score_word_based(self.emoticon_lookup_lexicon, input_txt))
# SentiStrength - Emotion - Lexicon
res.append(LexiconFeatureExtractor.calculate_score_word_based(self.emotion_lookup_lexicon, input_txt))
return res
@staticmethod
def _read_standart_lexicon(file_path, delimeter="\t"):
res = {}
with(open(file_path, "r")) as f:
for line in f:
columns = line.strip().split(delimeter)
if len(columns) > 1:
res[" ".join(columns[:-1]).strip(" ")] = float(columns[-1])
return res
@staticmethod
def _read_multi_score_lexicon(file_path, delimeter="\t", ):
res = {}
with(open(file_path, "r")) as f:
for line in f:
scores = []
columns = line.strip().split(delimeter)
for i in range(1, len(columns)):
scores.append(float(columns[i]))
res[columns[0]] = scores
return res
@staticmethod
def _read_labeled_lexicon(file_path, delimeter="\t",
label_index=0, feature_index=1, score_index=2):
res = {}
with(open(file_path, "r")) as f:
for line in f:
columns = line.strip().split(delimeter)
if len(columns) > 2:
if columns[label_index] not in res:
res[columns[label_index]] = {}
res[columns[label_index]][columns[feature_index]] = float(columns[score_index])
return res
@staticmethod
def calculate_score_word_based(lexicon, input_txt):
score = 0.0
input_words = [t.encode("utf-8") for t in TweetTokenizer().tokenize(input_txt)]
for k, v in lexicon.items():
if " " not in k and k in input_words:
score += v
elif " " in k and LexiconFeatureExtractor.contains_all(k, input_words):
score += v
return score
@staticmethod
def calculate_multiscore(lexicon, input_txt, score_count=4):
res = [0.0 for _ in range(score_count)]
input_words = [t.encode("utf-8") for t in TweetTokenizer().tokenize(input_txt)]
for label, d in lexicon.items():
for k, v in d.items():
scores = []
if " " not in k and k in input_words:
scores.append(v)
elif " " in k and LexiconFeatureExtractor.contains_all(k, input_words):
scores.append(v)
for i in range(len(scores)):
res[i] += scores[i]
return res
@staticmethod
def calculate_score_labeled(lexicon, input_txt):
res = []
score = 0.0
input_words = [t.encode("utf-8") for t in TweetTokenizer().tokenize(input_txt)]
for label, d in lexicon.items():
for k, v in d.items():
score = 0.0
if " " not in k and k in input_words:
score += v
elif " " in k and LexiconFeatureExtractor.contains_all(k, input_words):
score += v
res.append(score)
return res
@staticmethod
def contains_all(words1, words2):
for w in words1.split():
if w not in words2:
return False
return True
| erayyildiz/AffectInTweets | src/lexicon_features.py | lexicon_features.py | py | 5,989 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "nltk.tokenize.TweetTokenizer",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.TweetTokenizer",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.TweetTokenizer",
"line_number": 111,
"usage_type": "call"
}
] |
28412951714 | #!/usr/bin/env python
import sys, os, shlex
import multiprocessing as mp
import subprocess as sp
from pli.lib.util import log
def find_files(root):
outdir = 'tmpout'
for curdir, dirs, files in os.walk(root):
protein_fname = None
ligand_fname = None
for f in files:
if f.endswith('_protein.pdb'):
sym = f[1:3]
protein_fname = f'{curdir}/{f}'
elif f.endswith('_ligand.mol2'):
ligand_fname = f'{curdir}/{f}'
if protein_fname and ligand_fname:
bname = os.path.basename(protein_fname).split('_')[0]
sym = bname[1:3]
if not os.path.exists(f'{outdir}/{sym}/{bname}_ALP.pkl.gz'):
yield protein_fname, ligand_fname, sym
def worker(args):
protein_iname, ligand_iname, sym = args
cmd = f'python pli/bin/plifinder.py {protein_iname} {ligand_iname} -o tmpout/{sym}'
log(cmd)
proc = sp.Popen(shlex.split(cmd), universal_newlines=True, stdout=sp.PIPE, stderr=sp.STDOUT)
ret = proc.wait()
return ret, protein_iname, ligand_iname, sym
def main():
root = 'v2015'
pool = mp.Pool(mp.cpu_count())
count = 0
for ret, protein_iname, ligand_iname, sym in pool.imap(worker, find_files(root)):
if ret != 0:
log(f'!! Error {protein_iname} {ligand_iname}')
continue
count += 1
log(f'{count} {protein_iname} {ligand_iname} -> {sym}')
if __name__ == '__main__':
main()
| rhara/plifinder | examples/plifinder_v2015.py | plifinder_v2015.py | py | 1,504 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_numb... |
28026928602 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 11:31:56 2020
@author: admin
"""
from __future__ import print_function
import os, sys, time, argparse
from sensapex import SensapexDevice, UMP, UMPError
parser = argparse.ArgumentParser(
description="Test for sensapex devices; prints position and status updates continuously.")
parser.add_argument('--group', type=int, default=0, help="Device group number")
args = parser.parse_args()
ump = UMP.get_ump(group=args.group)
devids = ump.list_devices()
devs = {i:SensapexDevice(i) for i in devids}
print("SDK version:", ump.sdk_version())
print("Found device IDs:", devids)
def print_pos(timeout=None):
line = ""
for i in devids:
dev = devs[i]
try:
pos = str(dev.get_pos(timeout=timeout))
except Exception as err:
pos = str(err.args[0])
pos = pos + " " * (30 - len(pos))
line += "%d: %s" % (i, pos)
print(line)
t = time.time()
while True:
t1 = time.time()
dt = t1 - t
t = t1
line = "%3f" % dt
for id in sorted(list(devs.keys())):
line += " %d: %s busy: %s" % (id, devs[id].get_pos(timeout=0), devs[id].is_busy())
line += " \r"
print(line, end=" ")
sys.stdout.flush()
time.sleep(0.01)
| bsbrl/Amey_microinjection | Sensapex_Manipulator/test.py | test.py | py | 1,293 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sensapex.UMP.get_ump",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sensapex.UMP",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sensapex.Se... |
74959980988 | """system URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from clubs import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.home, name='home'),
path('user_dashboard/', views.user_dashboard, name='user_dashboard'),
path('user_profile/', views.user_profile, name='user_profile'),
path('user_profile/edit', views.edit_user_profile, name='edit_user_profile'),
path('user_profile/change_password', views.change_password, name='change_password'),
path('user_profile/<int:user_id>/<int:membership_id>', views.user_profile, name='user_profile'),
path('member_profile/<int:membership_id>', views.member_profile, name='member_profile'),
path('log_in/', views.log_in, name='log_in'),
path('log_out/', views.log_out, name='log_out'),
path('sign_up/', views.sign_up, name='sign_up'),
path('membership_application/', views.membership_application, name='membership_application'),
path('new_club/', views.club_creation, name='new_club'),
path('available_clubs/', views.available_clubs, name='available_clubs'),
path('club/<int:club_id>', views.club_dashboard, name='club_dashboard'),
path('club_memberships/', views.club_memberships, name='club_memberships'),
path('my_applications/', views.my_applications, name='my_applications'),
path('club/<int:club_id>/<int:user_id>/promote', views.promote_member, name='promote_member'),
path('club/<int:club_id>/<int:user_id>/demote', views.demote_member, name='demote_member'),
path('club/<int:club_id>/<int:user_id>/kick', views.kick_member, name='kick_member'),
path('club/<int:club_id>/edit', views.edit_club, name='edit_club'),
path('club/<int:club_id>/leave', views.leave_club, name='leave_club'),
path('tournament/<int:tournament_id>', views.tournament_dashboard, name='tournament_dashboard'),
path('club/<int:club_id>/transfer_ownership/<int:user_id>', views.transfer_ownership, name='transfer_ownership'),
path('membership/<int:membership_id>/approve', views.accept_membership, name='accept_membership'),
path('membership/<int:membership_id>/deny', views.reject_membership, name='reject_membership'),
path('new_tournament/<int:club_id>', views.tournament_creation, name='new_tournament'),
path('tournament/<int:tournament_id>/join', views.join_tournament, name='join_tournament'),
path('tournament/<int:tournament_id>/leave', views.leave_tournament, name='leave_tournament'),
path('tournament/<int:tournament_id>/cancel', views.cancel_tournament, name='cancel_tournament'),
path('tournament/<int:tournament_id>/generate_matches', views.generate_matches, name='generate_matches')
]
| amir-rahim/ChessClubManagementSystem | system/urls.py | urls.py | py | 3,313 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "... |
25272817830 | import sys
from itertools import combinations
from collections import deque
def move(stage):
for i in range(M):
for j in range(N-1, 0, -1):
enemy[j][i] = enemy[j-1][i]
if stage == 0:
for i in range(M):
enemy[0][i] = 0
dr = [0, -1, 0]
dc = [-1, 0, 1]
N, M, D = map(int, sys.stdin.readline().split())
enemy = [list(map(int, sys.stdin.readline().split())) for _ in range(N)]
comb = list(combinations(range(N), 3))
visited = [[0] * M for _ in ' '*N]
shot = [[0] * M for _ in ' '*N]
q = deque()
shot_clear = deque()
result = 0
for stage in range(N):
max_kill = 0
for archers in comb:
kill = 0
for archer in archers:
q.append((N, archer))
dist = 0
len_q = len(q)
cnt = 0
while q:
if len_q == cnt:
len_q = len(q)
cnt = 0
dist += 1
if dist == D:
q.clear()
break
cnt += 1
r, c = q.popleft()
for d in range(3):
nr = r + dr[d]
nc = c + dc[d]
if 0 <= nr < N and 0 <= nc < M:
if enemy[nr][nc]:
if not shot[nr][nc]:
kill += 1
shot[nr][nc] = 1
shot_clear.append((nr, nc))
q.clear()
break
q.append((nr, nc))
while shot_clear:
r, c = shot_clear.popleft()
enemy[r][c] = 0
shot[r][c] = 0
if kill > max_kill:
max_kill = kill
result += max_kill
move(stage)
print(result)
| powerticket/algorithm | Baekjoon/17135.py | 17135.py | py | 1,842 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.stdin.readline",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readline",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
... |
14975435890 | """DuerOS entity class."""
from __future__ import annotations
from dueros_smarthome.client import DeviceActionResponse
from dueros_smarthome.const import STATUS_OK, STATUS_NOT_LOGIN
from dueros_smarthome.models import Appliance, Connectivity
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryAuthFailed, IntegrationError
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, NAME, VERSION, LOGGER
from .coordinator import DuerOSDataUpdateCoordinator, get_unique_id
class DuerOSEntity(CoordinatorEntity):
"""DuerOSEntity class."""
def _set_unique_id(self) -> None:
self._attr_unique_id = get_unique_id(self._appliance)
def __init__(
self, coordinator: DuerOSDataUpdateCoordinator, appliance: Appliance
) -> None:
"""Initialize."""
super().__init__(coordinator)
self._attr_available = False
self._attr_has_entity_name = True
self._update(appliance)
self._set_unique_id()
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, self.unique_id)},
name=NAME,
model=VERSION,
manufacturer=self._appliance.bot_name,
)
def _update(self, appliance: Appliance):
self._appliance = appliance
self._attr_available = (
self._appliance.state_settings.connectivity.value == Connectivity.REACHABLE
)
self._attr_name = self._appliance.friendly_name
@callback
def _handle_coordinator_update(self) -> None:
self._update(self.coordinator.data[self.unique_id])
self.async_write_ha_state()
@property
def available(self) -> bool:
return self._attr_available
@staticmethod
def _check_response(rsp: DeviceActionResponse) -> None:
if STATUS_NOT_LOGIN == rsp.status:
LOGGER.error(rsp.msg)
raise ConfigEntryAuthFailed(rsp.msg)
if STATUS_OK != rsp.status:
LOGGER.error(rsp.msg)
raise IntegrationError(rsp.msg)
| zsy056/dueros-ha | custom_components/dueros/entity.py | entity.py | py | 2,131 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "homeassistant.helpers.update_coordinator.CoordinatorEntity",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "coordinator.get_unique_id",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "coordinator.DuerOSDataUpdateCoordinator",
"line_number":... |
39425077344 | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# -*- coding: utf-8 -*-
#
# Last modified: Fri, 14 Oct 2022 02:22:56 +0900
import numpy as np
import pandas as pd
import os
from .measurePhenotypes import measurePhenotypes
from ..util.isNotebook import isnotebook
if isnotebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
def annotateLineageIdx(**kwargs):
if len({"matFilePath", "segImgsPath", "rawImgsPath"} & set(kwargs.keys())) > 2:
originFrame = 0
if "originFrame" in kwargs.keys():
originFrame = kwargs["originFrame"]
return annotateSchnitz(
kwargs["matFilePath"],
kwargs["segImgsPath"],
kwargs["rawImgsPath"],
originFrame,
)
elif "tanauchi" in kwargs.keys():
cutOff = None
if "cutOff" in kwargs.keys():
cutOff = kwargs["cutOff"]
return annotateTanauchi(kwargs["tanauchi"], cutOff)
elif "wang" in kwargs.keys():
cutOff = None
if "cutOff" in kwargs.keys():
cutOff = kwargs["cutOff"]
return annotateWang(kwargs["wang"], cutOff)
elif "wakamoto" in kwargs.keys():
cutOff = None
if "cutOff" in kwargs.keys():
cutOff = kwargs["cutOff"]
return annotateHashimoto(kwargs["wakamoto"], cutOff)
else:
print("Error")
sys.exit(-1)
def annotateWang(WangDir, cutOff=None, fileTag=".dat"):
coord = sorted(
[
os.path.join(WangDir, d)
for d in os.listdir(WangDir)
if os.path.isdir(os.path.join(WangDir, d)) and "xy" in d
]
)
linIdx = 0
ID = []
motherID = []
uID = 0
Z = []
intensity = []
area = []
daughter1ID = []
daughter2ID = []
cenX = []
cenY = []
linIdx = []
lineages = []
if cutOff is not None:
x, y = cutOff
for dName in coord:
xy = int(dName[-2:])
if xy <= x:
lins = sorted(
[
os.path.join(dName, f)
for f in os.listdir(dName)
if os.path.isfile(os.path.join(dName, f)) and fileTag in f
]
)
lineages += lins[:y]
else:
for dName in coord:
lineages += [
os.path.join(dName, f)
for f in os.listdir(dName)
if os.path.isfile(os.path.join(dName, f)) and fileTag in f
]
for lin in tqdm(lineages):
cellNo = 0
with open(lin, "r") as data:
next(data)
for line in data:
if cellNo == 0:
motheruID = -1
else:
daughter1ID.append(uID)
motheruID = uID - 1
motherID.append(motheruID)
ID.append(cellNo)
aa = line.split(" ")
cenX.append(float(aa[6]))
cenY.append(float(aa[7]))
Z.append(int(aa[0]))
intensity.append(float(aa[5]))
area.append(float(aa[4])) # Acutally Length
if int(aa[1]) == 1:
daughter2ID.append(-3)
else:
daughter2ID.append(-1)
cellNo += 1
uID += 1
linIdx.append(lineages.index(lin))
daughter1ID.append(-1)
cellDict = {
"ID": np.array(ID),
"uID": np.array(range(uID)),
"motherID": np.array(motherID),
"daughter1ID": np.array(daughter1ID),
"daughter2ID": np.array(daughter2ID),
"cenX": np.array(cenX),
"cenY": np.array(cenY),
"Z": np.array(Z),
"cellNo": np.array(ID),
"intensity": np.array(intensity),
"area": np.array(area),
"linIdx": np.array(linIdx),
}
# for key in cellDict.keys():
# print(key,cellDict[key])
CellDfWP = pd.DataFrame(cellDict)
return CellDfWP
def annotateTanauchi(TanauchiDir, cutOff=None):
files = os.listdir(TanauchiDir)
linIdx = 0
ID = []
motherID = []
uID = 0
Z = []
intensity = []
area = []
daughter1ID = []
daughter2ID = []
cenX = []
cenY = []
linIdx = []
lineages = []
if cutOff is not None:
for lin in files:
x, y = cutOff
coord = lin[2:].split(".")[0].split("_")
if int(coord[0]) - 1 < x and int(coord[1]) - 1 < y:
lineages.append(lin)
else:
lineages = files
for lin in tqdm(lineages):
cellNo = 0
coord = lin[2:].split(".")[0].split("_")
with open(os.path.join(TanauchiDir, lin), "r") as data:
for line in data:
cenX.append(int(coord[0]))
cenY.append(int(coord[1]))
if cellNo == 0:
motheruID = -1
else:
daughter1ID.append(uID)
motheruID = uID - 1
motherID.append(motheruID)
ID.append(cellNo)
aa = line.split(",")
Z.append(int(aa[0]) - 1)
intensity.append(float(aa[4]))
area.append(float(aa[2])) # Acutally Length
if int(aa[1]) == 1:
daughter2ID.append(-3)
else:
daughter2ID.append(-1)
cellNo += 1
uID += 1
linIdx.append(lineages.index(lin))
daughter1ID.append(-1)
cellDict = {
"ID": np.array(ID),
"uID": np.array(range(uID)),
"motherID": np.array(motherID),
"daughter1ID": np.array(daughter1ID),
"daughter2ID": np.array(daughter2ID),
"cenX": np.array(cenX),
"cenY": np.array(cenY),
"Z": np.array(Z),
"cellNo": np.array(ID),
"intensity": np.array(intensity),
"area": np.array(area),
"linIdx": np.array(linIdx),
}
# for key in cellDict.keys():
# print(key,cellDict[key])
CellDfWP = pd.DataFrame(cellDict)
return CellDfWP
def annotateHashimoto(HashimotoDir, cutOff=None):
linIdx = 0
ID = []
motherID = []
uID = 0
Z = []
intensity = []
area = []
daughter1ID = []
daughter2ID = []
cenX = []
cenY = []
lineages = []
columnName = [
"ID",
"uID",
"motherID",
"daughter1ID",
"daughter2ID",
"cenX",
"cenY",
"Z",
"cellNo",
"intensity",
"area",
"linIdx",
]
CellDfWP = pd.DataFrame(columns=columnName)
data = pd.read_table(HashimotoDir, index_col=0)
lastCell = data[data["LastIndex"] == 1].sort_index()
if cutOff is not None:
lastCell = lastCell.iloc[: int(cutOff)]
for lCell in tqdm(lastCell.iterrows()):
progenyId, lCell = lCell
daughter1ID = -1
motherId = int(lCell["PreviousCell"])
while motherId >= 0:
info = [
progenyId,
motherId,
daughter1ID,
-1,
float(lCell["XM"]),
float(lCell["YM"]),
lCell["Slice"] - 1,
float(lCell["Mean"]) - float(lCell["Background"]),
float(lCell["Area"]),
linIdx,
] # Add row with this info
columns = columnName[1:8] + columnName[-3:] # skip over ID and cellNo
df = dict(zip(columns, info))
if motherId in list(CellDfWP["uID"]) and motherId != 0:
mask = CellDfWP["uID"] == motherId
CellDfWP.loc[mask, "daughter2ID"] = progenyId
d1 = CellDfWP.loc[mask, "daughter1ID"]
d2 = CellDfWP.loc[mask, "daughter2ID"]
if int(d1) > int(d2):
tmp = int(d1)
d1 = int(d2)
d2 = tmp
maskLin = CellDfWP["linIdx"] == linIdx
linIdx -= 1
CellDfWP.loc[maskLin, "linIdx"] = int(CellDfWP.loc[mask, "linIdx"])
CellDfWP = CellDfWP.append(df, ignore_index=True)
break
# CellDfWP[CellDfWP['uID'] == motherId]['daughter2ID'] = progenyId
daughter1ID = progenyId
progenyId = motherId
if motherId == 0:
motherId = -1
df["motherID"] = motherId
else:
lCell = data.iloc[motherId - 1]
motherId = int(lCell["PreviousCell"])
CellDfWP = CellDfWP.append(df, ignore_index=True)
linIdx += 1
CellDfWP = CellDfWP.sort_values(by="Z", ascending=True)
CellDfWP["ID"] = list(range(len(CellDfWP)))
CellDfWP["cellNo"] = list(range(len(CellDfWP)))
# CellDfWP['uID'] = CellDfWP['uID'].astype(int)
# CellDfWP['motherID'] = CellDfWP['motherID'].astype(int)
# CellDfWP['daughter1ID'] = CellDfWP['daughter1ID'].astype(int)
# CellDfWP['daughter2ID'] = CellDfWP['daughter2ID'].astype(int)
# CellDfWP['Z'] = CellDfWP['Z'].astype(int)
# CellDfWP['linIdx'] = CellDfWP['linIdx'].astype(int)
return CellDfWP
def annotateSchnitz(matFilePath, segImgsPath, rawImgsPath, originFrame=0):
"""
Annotate lineage indices accorting to the result of cell tracking.
Parameters
----------
matFilePath : string
A path to MAT files which were created by Schnitzcells
segImgsPath : string
A path to directory which include segmentated images which
were created by Schnitzcells
rawImgsPath : string
A path to direcotry which include raw images
which were required for Schnitzcells
Returns
-------
cellDfWPL : pandas.core.frame.DataFrame
A pandas dataframe which includes tracking result and
phenotypes of each cell, lineage indices.
Its name is abbreviate for cell DataFrame
With Phenotypes, Lineage indices.
Column indices are like below
- ID
- uID
- motherID
- daughter1ID
- daughter2ID
- cenX
- cenY
- Z
- cellNo
- intensity
- area
- linIdx
"""
cellDfWP = measurePhenotypes(matFilePath, segImgsPath, rawImgsPath, originFrame)
numOfLin = 0
for uID in cellDfWP["uID"]:
daughter1ID = cellDfWP["daughter1ID"][uID]
daughter2ID = cellDfWP["daughter2ID"][uID]
if daughter1ID < 0 and daughter2ID < 0:
numOfLin += 1
linIdx = 0
linList = np.zeros(len(cellDfWP["uID"]))
for uID in cellDfWP["uID"]:
motherID = cellDfWP["motherID"][uID]
if motherID == -1:
linList[uID] = linIdx
linIdx += 1
else:
sister1ID = cellDfWP["daughter1ID"][motherID]
sister2ID = cellDfWP["daughter2ID"][motherID]
if sister1ID > 0 and sister2ID > 0: # 親が分裂していたら
if sister1ID == uID:
linList[uID] = linList[motherID]
else:
linList[uID] = linIdx
linIdx += 1
else: # 親が分裂していなかったら
linList[uID] = linList[motherID]
linIdx = 0
for uID in cellDfWP["uID"]:
motherID = cellDfWP["motherID"][uID]
if motherID == -1:
linList[uID] = linIdx
linIdx += 1
else:
sister1ID = cellDfWP["daughter1ID"][motherID]
sister2ID = cellDfWP["daughter2ID"][motherID]
if sister1ID > 0 and sister2ID > 0: # 親が分裂していたら
if sister1ID == uID:
linList[uID] = linList[motherID]
else:
linList[uID] = linIdx
linIdx += 1
else: # 親が分裂していなかったら
linList[uID] = linList[motherID]
linList = linList.astype(np.int64)
cellDfWP["linIdx"] = linList
return cellDfWP
if __name__ == "__main__":
matFilePath = (
"/Users/itabashi/Research/Analysis/Schnitzcells/"
"9999-99-99/488/data/488_lin.mat"
)
segImgsPath = (
"/Users/itabashi/Research/Analysis/Schnitzcells/" "9999-99-99/488/segmentation/"
)
rawImgsPath = (
"/Users/itabashi/Research/Experiment/microscope/"
"2018/08/28/ECTC_8/Pos0/forAnalysis/488FS/"
)
cellDfWPL = annotateLineageIdx(matFilePath, segImgsPath, rawImgsPath)
print(cellDfWPL)
| funalab/pyCellLineage | lineageIO/annotateLineageIdx.py | annotateLineageIdx.py | py | 12,848 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "util.isNotebook.isnotebook",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
... |
25146242563 | import numpy as np
import netCDF4 as nd
import matplotlib.pyplot as plt
from calc_irr import *
def get_sample_points(origin, dir , ns=10):
gps = np.zeros((origin.shape[0],dir.shape[0]))
gps[0] = np.tan(dir/180.*np.pi) * origin[2] + origin[0]
gps[1,:] = origin[1]*1
gps[2,:] = 0
ds = np.linspace(0,1,ns)
v = np.zeros((ds.shape[0],origin.shape[0],dir.shape[0]))
v = np.reshape(origin,(1,origin.shape[0],1)) + (gps - np.reshape(origin,(origin.shape[0],1))) * np.reshape(ds,(ds.shape[0],1,1))
return v
def get_boxindex(p,grid):
dxi = np.array([grid[3],grid[4],grid[5]])
bounds = np.array([grid[0],grid[1],grid[2]])
index = np.zeros_like(p)
try:
for i in range(len(index)):
index[i] = p[i]//grid[i+3]
if index[i] < 0:
index[i]+=grid[i]
if index[i] > grid[i]-1:
index[i] -= grid[i]
except:
for i in range(p.shape[0]):
for xi in range(3):
for k in range(p.shape[2]):
index[i,xi,k] = p[i,xi,k]//dxi[xi]
if index[i,xi,k] < 0:
index[i,xi,k] += bounds[xi]
if index[i,xi,k] > bounds[xi]-1:
index[i,xi,k] -= bounds[xi]
return index.astype(int)
def get_e(p,grid,fpath="job_0.183435_36.600028/mc.flx.spc.nc"):
edirs = nd.Dataset(fpath,'r')
Edir = np.zeros(p.shape[1])
Edown = np.zeros(p.shape[1])
Eup = np.zeros(p.shape[1])
for I in range(p.shape[1]):
i,j,k = get_boxindex(p[:,I],grid)
Edir[I] = edirs["Edir"][j,i,k,:]
Edown[I] = edirs["Edown"][j,i,k,:]
Eup[I] = edirs["Eup"][j,i,k,:]
edirs.close()
return Edir,Edown,Eup
def get_rad(p,grid):
Eup = np.zeros(p.shape[1])
Edown = np.zeros_like(Eup)
Eu,Ed = calc_Es(UMUS,PHIS,wumu,wphi,"mc.rad.spc.nc","radiance")
for I in range(p.shape[1]):
i,j,k = get_boxindex(p[:,I],grid)
Eup[I] = Eu["radiance"][j,i,k,:]
Edown[I] = Ed["radiance"][j,i,k,:]
return Eup,Edown
UMUS = np.loadtxt("input_params.txt",dtype=str, max_rows=1)
PHIS = np.loadtxt("input_params.txt",dtype=str,skiprows=1, max_rows=1)
wumu = np.loadtxt("numus.txt", skiprows=1, max_rows=1)
wphi = np.loadtxt("nphis.txt", skiprows=1, max_rows=1)
Nx,Ny,dx,dy = np.loadtxt("input_params.txt", skiprows = 6, max_rows=1)
Zlev = np.loadtxt("input_params.txt", skiprows = 4, max_rows=1)
Nz = 2 #Zlev.shape[0]
dz = 1
sza = np.loadtxt("input_params.txt", skiprows = 2, max_rows=1)
mu = np.cos(sza/180.*np.pi)
albedo = 0.2
grid = np.array([Nx,Ny,Nz,dx,dy,dz])
cloudx = np.array([3,4])
cloudy = np.array([0])
cloudz = np.array([0,1])
camerapos = np.array([1.5,0.01,2])
camerafov = 90.
camerapixels = 90
pixeledges = np.linspace(-camerafov/2.,camerafov/2,camerapixels+1)
pixelangles = (pixeledges[0:-1] + pixeledges[1:])/2.
pixelvalues = np.zeros(pixelangles.shape)
pixelground = get_sample_points(camerapos,pixelangles)[-1]
Edir,Edown,Eup = get_e(pixelground,grid)
#Eu,Ed = get_rad(pixelground,grid)
#
#pixelvalues = Edir * albedo / np.pi + Ed*albedo/np.pi
#palt = Edir *albedo/np.pi + Edown*albedo/np.pi
#
#truth = nd.Dataset("job_panorama/mc.rad.spc.nc" , "r")
Eu,Ed = calc_Es(UMUS,PHIS,wumu,wphi,"mc.rad.spc.nc","radiance")
#fig,ax = plt.subplots(3,1)
#ax[0].plot(np.arange(pixelvalues.shape[0]),pixelvalues,label="with Edown from radiances")
#ax[1].plot(np.arange(palt.shape[0]),palt,label="with Edown from flx file")
#ax[2].plot(np.arange(truth["radiance"][0,:,0,0].shape[0]),truth["radiance"][0,:,0,0], label="from mystic panorama")
#ax[0].legend()
#ax[1].legend()
#ax[2].legend()
#
#plt.tight_layout()
#plt.show()
| MujkanovicMax/master-thesis | radiances/raytr.py | raytr.py | py | 3,788 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.tan",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": ... |
26818962506 | import json
import urllib
import requests
import types
class MovesAPIError(Exception):
"""Raised if the Moves API returns an error."""
pass
class MovesAPINotModifed(Exception):
"""Raised if the document requested is unmodified. Need the use of etag header"""
pass
class MovesClient(object):
"""OAuth client for the Moves API"""
api_url = "https://api.moves-app.com/api/1.1"
app_auth_url = "moves://app/authorize"
web_auth_uri = "https://api.moves-app.com/oauth/v1/authorize"
token_url = "https://api.moves-app.com/oauth/v1/access_token"
tokeninfo_url = "https://api.moves-app.com/oauth/v1/tokeninfo"
refresh_url = "https://api.moves-app.com/oauth/v1/access_token"
def __init__(self, client_id=None, client_secret=None,
access_token=None, use_app=False):
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self.auth_url = self.app_auth_url if use_app else self.web_auth_uri
self.use_app = use_app
self._last_headers = None
def parse_response(self, response):
"""Parse JSON API responses."""
return json.loads(response.text)
def build_oauth_url(self, redirect_uri=None, scope="activity location"):
params = {
'client_id': self.client_id,
'scope': scope
}
if not self.use_app:
params['response_type'] = 'code'
if redirect_uri:
params['redirect_uri'] = redirect_uri
# Moves hates +s for spaces, so use %20 instead.
encoded = urllib.urlencode(params).replace('+', '%20')
return "%s?%s" % (self.auth_url, encoded)
def get_oauth_token(self, code, **kwargs):
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'grant_type': kwargs.get('grant_type', 'authorization_code')
}
if 'redirect_uri' in kwargs:
params['redirect_uri'] = kwargs['redirect_uri']
response = requests.post(self.token_url, params=params)
response = json.loads(response.content)
try:
return response['access_token'], response['refresh_token']
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def refresh_oauth_token(self, refresh_token, **kwargs):
params = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'refresh_token': refresh_token,
'grant_type': kwargs.get('grant_type', 'refresh_token')
}
response = requests.post(self.refresh_url, params=params)
response = json.loads(response.content)
try:
return response['access_token'], response['refresh_token']
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def tokeninfo(self):
params = {
'access_token': self.access_token
}
response = requests.get(self.tokeninfo_url, params=params)
response = json.loads(response.content)
try:
return response
except:
error = "<%(error)s>: %(error_description)s" % response
raise MovesAPIError(error)
def api(self, path, method='GET', **kwargs):
params = kwargs['params'] if 'params' in kwargs else {}
data = kwargs['data'] if 'data' in kwargs else {}
if not self.access_token and 'access_token' not in params:
raise MovesAPIError("You must provide a valid access token.")
url = "%s/%s" % (self.api_url, path)
if 'access_token' in params:
access_token = params['access_token']
del(params['access_token'])
else:
access_token = self.access_token
headers = {
"Authorization": 'Bearer ' + access_token
}
if 'etag' in params:
headers['If-None-Match'] = params['etag']
del(params['etag'])
resp = requests.request(method, url,
data=data,
params=params,
headers=headers)
if str(resp.status_code)[0] not in ('2', '3'):
raise MovesAPIError("Error returned via the API with status code (%s):" %
resp.status_code, resp.text)
if resp.status_code == 304:
raise MovesAPINotModifed("Unmodified")
self._last_headers = resp.headers
return resp
def get(self, path, **params):
return self.parse_response(
self.api(path, 'GET', params=params))
def post(self, path, **data):
return self.parse_response(
self.api(path, 'POST', data=data))
def set_first_date(self):
if not self.first_date:
response = self.user_profile()
self.first_date = response['profile']['firstDate']
def __getattr__(self, name):
'''\
Turns method calls such as "moves.foo_bar(...)" into
a call to "moves.api('/foo/bar', 'GET', params={...})"
and then parses the response.
'''
base_path = name.replace('_', '/')
# Define a function that does what we want.
def closure(*path, **params):
'Accesses the /%s API endpoints.'
path = list(path)
path.insert(0, base_path)
return self.parse_response(
self.api('/'.join(path), 'GET', params=params)
)
# Clone a new method with the correct name and doc string.
retval = types.FunctionType(
closure.func_code,
closure.func_globals,
name,
closure.func_defaults,
closure.func_closure)
retval.func_doc = closure.func_doc % base_path
# Cache it to avoid additional calls to __getattr__.
setattr(self, name, retval)
return retval
# Give Access to last attribute
_move_client_status = ['etag', 'x-ratelimit-hourlimit', 'x-ratelimit-hourremaining',
'x-ratelimit-minutelimit', 'x-ratelimit-minuteremaining']
for att in _move_client_status:
att = att.replace('-', '_')
setattr(MovesClient, att, property(lambda self,att=att: self._last_headers.get(att, None)
if self._last_headers else att))
| lysol/moves | moves/_moves.py | _moves.py | py | 6,546 | python | en | code | 58 | github-code | 6 | [
{
"api_name": "json.loads",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "urllib.urlencode",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_numb... |
71226682109 | import os, glob
from fpdf import FPDF
class Pdf_Tool:
def __init__(self, format):
self.pdf = FPDF(format=format)
def save(self, dir, pdf_name):
if not os.path.exists(dir):
os.makedirs(dir)
self.pdf.output(os.path.join(dir, pdf_name), "F")
def create(self, img_path_list, dimen):
for img_path in img_path_list:
self.pdf.add_page()
self.pdf.image(img_path, dimen[0], dimen[1], dimen[2], dimen[3])
if __name__ == "__main__":
root = os.path.join(os.getcwd(), "output")
root = "F:/E-Book/Temp/JPG"
for i in range(1, 4):
no = str(i).zfill(1)
filepath_list = sorted(glob.glob(os.path.join(root, no + "/*.jpg")), key=os.path.basename)
pdf = Pdf_Tool((2040, 1512))
pdf.create(filepath_list, (0, 0, 2040, 1512))
pdf.save(os.path.join(root, no), no + ".pdf")
| huangzf128/something | code/python/image/pdf_tool.py | pdf_tool.py | py | 888 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fpdf.FPDF",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number":... |
70508505789 | import os
import sys
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from matplotlib import pyplot as plt
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
# progress bar
from tqdm.auto import tqdm
# hyper parameter
train_batch_size = 64
test_batch_size = 1000
epochs = 15
lr = 0.0003
gamma = 0.7
myseed = 1220
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = torch.flatten(x, end_dim=0)
return output
def run():
# CUDA, macOS GPU or CPU ?
if torch.cuda.is_available():
device = torch.device("cuda")
print('Using cuda')
elif torch.backends.mps.is_available():
device = torch.device("mps")
print('Using mps')
else:
device = torch.device("cpu")
print('Using cpu')
# add random seed
seed = myseed
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
# download data
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('./data', train=True, download=True, transform=transform)
dataset2 = datasets.MNIST('./data', train=False, transform=transform)
train_loader = DataLoader(dataset1, batch_size=train_batch_size, shuffle=True, num_workers=1, pin_memory=True)
test_loader = DataLoader(dataset2, batch_size=test_batch_size, shuffle=True, num_workers=1, pin_memory=True)
model = Net().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=1e-5)
criterion = nn.CrossEntropyLoss()
scheduler = StepLR(optimizer, step_size=1, gamma=gamma)
# record the performance
epoches = []
training_accuracies = []
training_loss = []
testing_accuracies = []
testing_loss = []
training_accuracies.append('Training accuracies')
training_loss.append('Training loss')
testing_accuracies.append('Testing accuracies')
testing_loss.append('Testing loss')
# ---------- train and validate ----------
for epoch in range(1, epochs + 1):
total_train_loss, total_train_acc = 0.0, 0.0
total_test_loss, total_test_acc = 0.0, 0.0
train_batch_idx, test_batch_idx = 0, 0
tqdm.write("[ epoch " + str(epoch) + " ]")
epoches.append(epoch)
# ---------- train ----------
model.train()
for batch in tqdm(train_loader, file=sys.stdout):
train_batch_idx += 1
if train_batch_idx == 1:
tqdm.write("Training")
imgs, labels = batch
outputs = model(imgs.to(device))
loss = criterion(outputs, labels.to(device))
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
optimizer.step()
acc = (outputs.argmax(dim=-1) == labels.to(device)).float().mean()
total_train_loss += loss.item()
total_train_acc += acc.item()
train_loss = total_train_loss / train_batch_idx
train_acc = total_train_acc / train_batch_idx
# ---------- validate ----------
model.eval()
for batch in tqdm(test_loader, file=sys.stdout):
if test_batch_idx == 1:
tqdm.write("Testing")
test_batch_idx += 1
imgs, labels = batch
with torch.no_grad():
outputs = model(imgs.to(device))
loss = criterion(outputs, labels.to(device))
acc = (outputs.argmax(dim=-1) == labels.to(device)).float().mean()
total_test_loss += loss.item()
total_test_acc += acc.item()
test_loss = total_test_loss / test_batch_idx
test_acc = total_test_acc / test_batch_idx
training_accuracies.append(train_acc)
training_loss.append(train_loss)
testing_accuracies.append(test_acc)
testing_loss.append(test_loss)
print('Training Loss:', training_loss[epoch], 'Training Accuracy:', (100 * training_accuracies[epoch]),
'%')
print('Testing Loss:', testing_loss[epoch], 'Testing Accuracy:', 100 * testing_accuracies[epoch], '%')
print('')
scheduler.step()
plot(epoches, training_loss)
plot(epoches, training_accuracies)
plot(epoches, testing_accuracies)
plot(epoches, testing_loss)
def plot(epoches, performance):
label = performance.pop(0)
plt.title(label)
plt.plot(epoches, performance, label=label)
plt.xlabel('epoches')
plt.legend()
plt.savefig(label + '.jpg')
plt.show()
if __name__ == '__main__':
run()
| WangShengqing122090536/CSC1004-image-classification-modified | main.py | main.py | py | 5,477 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
24252533109 | from phrase import Phrase
import random
class Game:
def __init__(self):
self.missed = 0
self.phrases = self.create_phrases()
self.active_phrase = self.get_random_phrase()
self.guesses = []
def create_phrases(self):
phrases = [Phrase("Most things that never get done never get done because they never get started"), Phrase("Discipline equals freedom"), Phrase("Do what you can with all you have wherever you are"), Phrase("Get after it"), Phrase("Up and Rock and Roll")]
return phrases
def get_random_phrase(self):
return random.choice(self.phrases)
def welcome(self):
print("="*60+"""
*** WELCOME TO NIKOLAI'S MOTIVATIONAL PHRASE HUNTER 2020 ***
"""+"="*60, "\nRULES ===> You've got 5 tries to guess the phrase.\nPlease enter 1 letter at a time.\n")
def get_guess(self):
while True:
user_guess = (input("Please enter a letter: ")).lower()
if not user_guess.isalpha():
print("That's not a valid selection. Please enter a letter.")
elif len(user_guess) != 1:
print("Please enter one letter at a time.")
else:
return user_guess
def start(self):
self.welcome()
self.active_phrase.display(self.guesses)
while not self.missed >= 5:
print(f"*** Number missed: {self.missed} *** \n")
user_guess = self.get_guess()
self.guesses.append(user_guess)
if self.active_phrase.check_guess(user_guess):
print("YAY!\n")
self.active_phrase.display(self.guesses)
if self.active_phrase.check_complete(self.guesses):
print("CONGRATS! You win!\n")
break
if not self.active_phrase.check_guess(user_guess):
self.missed += 1
print("\nBummer :(\n")
if self.missed == 5:
print("You lost. Please play again!\n")
| Nikolai-O/python-techdegree-project3 | game.py | game.py | py | 2,068 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "phrase.Phrase",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 17,
"usage_type": "call"
}
] |
32197951073 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver=webdriver.Chrome(executable_path='./driver/chromedriver')
driver.get('https://web.whatsapp.com/')
input("please scan qr code and press any key to continue:")
RM=driver.find_element_by_css_selector('span[title="Assignments & CT"]')
RM.click()
testinput=driver.find_element_by_xpath("/html/body/div/div[1]/div[1]/div[4]/div[1]/footer/div[1]/div[2]/div/div[1]/div/div[2]")
time.sleep(10)
testinput.send_keys("Hello friends")
testinput.send_keys(Keys.RETURN)
| AbhayPal005/Whatsaap-Automation-Using-Selenium | chrome_driver_windows.py | chrome_driver_windows.py | py | 575 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "time.sleep",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "selenium.webdri... |
30543622706 | import matplotlib.pyplot as plt
import numpy as np
plt.ion()
plt.rcParams['axes.labelsize'] = 18
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['font.size'] = 16
plt.rcParams['lines.linewidth'] = 2.0
plt.rcParams['lines.markersize'] = 8
plt.rcParams['legend.fontsize'] = 14
class Simulator:
eps = 1e-16
def __init__(self, num_agents = 15, max_iterations = 1000, step_size = None, \
convergence_tol = 0.001, x_bounds = (0,1), y_bounds = (0, 1)):
# convergence_tol : % of dimensions of the room
self.convergence_tol = convergence_tol
# Dimensions of the room
self.x_bounds = x_bounds
self.y_bounds = y_bounds
self.step_size = step_size
self.num_agents = num_agents
self.max_iterations = max_iterations
self.iteration = 0
self.converged_at_iteration = None
self.mean_step = []
self.__initialize_positions()
self.__choose_attractors()
def __find_third_vertex(self, first_vertex, second_vertex):
""" Returns both possible options for the third vertex that makes an
equilateral triangle with two given points"""
# Midpoint:
mid_x, mid_y = 0.5*(first_vertex[0] + second_vertex[0]), 0.5*(first_vertex[1] + second_vertex[1])
# Squared length of side of equilateral triangle:
D2 = (first_vertex[0] - second_vertex[0])**2 + (first_vertex[1] - second_vertex[1])**2
y_diff = first_vertex[1] - second_vertex[1]
if y_diff < Simulator.eps:
# avoid division by zero
y_diff += Simulator.eps
# Negative Reciprocal slope of line joining first and second vertex:
slope = -(first_vertex[0] - second_vertex[0]) / y_diff
# Intercept of perpendicular bisector line between first and second vertex:
intercept = mid_y - slope * mid_x
# For the quadratic formula:
A = 1
B = -2 * mid_x
C = mid_x**2 - (3/4) * D2 /(slope**2 + 1)
Rx = np.roots([A, B, C])
Ry = slope*Rx + intercept
vertex_options = (Rx, Ry)
return vertex_options
def __find_projections(self, target_location_x, target_location_y, current_x, current_y):
R_vect = np.array([target_location_x - current_x, target_location_y - current_y])
Rx_vect = np.array([target_location_x - current_x, 0])
Ry_vect = np.array([0, target_location_y - current_y])
# Make the distance travelled a proportion of R_vect
x_projection = self.step_size * np.dot(Rx_vect, R_vect) / (np.linalg.norm(Rx_vect) + Simulator.eps)
y_projection = self.step_size * np.dot(Ry_vect, R_vect) / (np.linalg.norm(Ry_vect) + Simulator.eps)
signed_projection = np.sign(R_vect) * np.array([x_projection, y_projection])
return (signed_projection[0], signed_projection[1])
def __initialize_positions(self):
# Container for the whole simulation:
self.X = np.zeros((self.num_agents, self.max_iterations + 1))
self.Y = np.zeros((self.num_agents, self.max_iterations + 1))
# Initialize first positions:
self.X[:,0] = np.random.rand(self.num_agents,)
self.Y[:,0] = np.random.rand(self.num_agents,)
def __choose_attractors(self):
if self.num_agents < 3:
raise Exception('The number of agents must be at least 3')
# Populate the options for each agent to follow, anyone but herself
options = np.arange(self.num_agents)
options = np.tile(options,(len(options),1))
options = options[~np.eye(options.shape[0],dtype=bool)].reshape(options.shape[0],-1)
# Pick two random indices to options for two people to follow
# (scale the random number by the range and round.)
# Actually will need to loop here for the second agent because have to make sure not
# choosing same two people:
# Initialize
follows = np.zeros((self.num_agents, 2))
# First attractor:
follows[:, 0, np.newaxis] = np.round( (options.shape[1] - 1) * np.random.rand(self.num_agents, 1) ).astype(int)
# Second attractor:
for agent in range(self.num_agents):
firstDraw = follows[agent,0]
# Initialize:
secondDraw = firstDraw
while secondDraw == firstDraw:
# Want a different random draw from the options
secondDraw = np.round( (options.shape[1] - 1) * np.random.rand() ).astype(int)
follows[agent,1] = secondDraw
follows=follows.astype(int)
self.first_attractor = options[np.arange(options.shape[0]), follows[:,0], np.newaxis]
self.second_attractor = options[np.arange(options.shape[0]), follows[:,1], np.newaxis]
def _update_positions(self):
"""
This allows each agent to jump directly to the third vertex that would create an equilateral triangle
with the agent and the agent's two targets. However, everyone is jumping at the same time so these
triangles are not likely to be formed until later in the simulation (if ever)
"""
if self.step_size is not None:
if self.step_size > 1:
raise Exception('The step size should be less than 1')
for agent in range(self.num_agents):
# Find the points where you want to go to complete the triangle
first_vertex = (self.X.item((self.first_attractor.item(agent), self.iteration)), \
self.Y.item(self.first_attractor.item(agent), self.iteration))
second_vertex = (self.X.item((self.second_attractor.item(agent), self.iteration)), \
self.Y.item(self.second_attractor.item(agent), self.iteration))
options_x, options_y = self.__find_third_vertex(first_vertex, second_vertex)
# Find the closest of the two vertices to your current position, or the one that is inside the room:
# For now, just don't update position if both are out of bounds
out_of_bounds = (options_x > self.x_bounds[1]) | (options_x < self.x_bounds[0]) | \
(options_y > self.y_bounds[1]) | (options_y < self.y_bounds[0])
options_x = options_x[~out_of_bounds]
options_y = options_y[~out_of_bounds]
current_x = self.X[agent, self.iteration]
current_y = self.Y[agent, self.iteration]
# Update the next position
if len(options_x) > 1:
# Distance to first & second options:
D1 = ( (options_x[0] - current_x)**2 + (options_y[0] - current_y)**2 )**0.5
D2 = ( (options_x[1] - current_x)**2 + (options_y[1] - current_y)**2 )**0.5
closest_ind = np.argmin([D1, D2])
if self.step_size is not None:
x_projection, y_projection = self.__find_projections(options_x.item(closest_ind), \
options_y.item(closest_ind), current_x, current_y)
self.X[agent, self.iteration + 1] = current_x + x_projection
self.Y[agent, self.iteration + 1] = current_y + y_projection
else:
self.X[agent, self.iteration + 1] = options_x[closest_ind]
self.Y[agent, self.iteration + 1] = options_y[closest_ind]
elif len(options_x) == 1:
if self.step_size is not None:
x_projection, y_projection = self.__find_projections(options_x.item(0), \
options_y.item(0), current_x, current_y)
self.X[agent, self.iteration + 1] = current_x + x_projection
self.Y[agent, self.iteration + 1] = current_y + y_projection
else:
self.X[agent, self.iteration + 1] = options_x
self.Y[agent, self.iteration + 1] = options_y
else: # Don't change position
self.X[agent, self.iteration + 1] = current_x
self.Y[agent, self.iteration + 1] = current_y
def plot_positions(self, initialize_plot, plot_sides = False, zoom = False):
if initialize_plot:
# Setting the x and y data explictly for dynamic plot update only works for plot, not scatter:
# Going to follow the first attractor with a different color
self.ax1.plot(self.X[0, self.iteration], self.Y[0, self.iteration], 'r.')
self.ax1.plot(self.X[self.first_attractor.item(0), self.iteration], \
self.Y[self.first_attractor.item(0), self.iteration],'r+')
self.ax1.plot(self.X[self.second_attractor.item(0), self.iteration], \
self.Y[self.second_attractor.item(0), self.iteration],'r+')
self.ax1.plot(self.X[1:, self.iteration], self.Y[1:, self.iteration],'b.')
self.ax1.set_aspect('equal')
self.ax1.set_xlim(self.x_bounds[0], self.x_bounds[1])
self.ax1.set_ylim(self.y_bounds[0], self.y_bounds[1])
self.ax1.set_ylabel("Y")
self.ax1.set_xlabel("X")
self.ax1.set_title("Position of Agents")
else:
# Plot the new position
self.ax1.set_title("Iteration = {}".format(self.iteration))
for lin_num, line in enumerate(self.ax1.lines):
if lin_num==0:
line.set_xdata(self.X[0, self.iteration])
line.set_ydata(self.Y[0, self.iteration])
elif lin_num==1:
line.set_xdata(self.X[self.first_attractor.item(0), self.iteration - 1])
line.set_ydata(self.Y[self.first_attractor.item(0), self.iteration - 1])
elif lin_num==2:
line.set_xdata(self.X[self.second_attractor.item(0), self.iteration - 1])
line.set_ydata(self.Y[self.second_attractor.item(0), self.iteration - 1])
else:
line.set_xdata(self.X[1:, self.iteration])
line.set_ydata(self.Y[1:, self.iteration])
self.fig.canvas.draw()
# This is crucial for viewing the plots from the command line:
try:
plt.pause(0.5)
except Exception:
pass
if plot_sides:
for agent in range(self.num_agents):
# Grab the positions for the attractors of each agent & plot the triangle in green at the end
X_triangle = np.hstack((self.X[agent, self.iteration], \
self.X[self.first_attractor.item(agent), self.iteration], \
self.X[self.second_attractor.item(agent), self.iteration], \
self.X[agent, self.iteration]))
Y_triangle = np.hstack((self.Y[agent, self.iteration], \
self.Y[self.first_attractor.item(agent), self.iteration], \
self.Y[self.second_attractor.item(agent), self.iteration], \
self.Y[agent, self.iteration]))
self.ax1.plot(X_triangle, Y_triangle, '-g')
if zoom:
# Zoom In on the final positions
self.ax1.set_xlim(0.9 * min(self.X[:, self.iteration]), 1.1 * max(self.X[:, self.iteration]))
self.ax1.set_ylim(0.9 * min(self.Y[:, self.iteration]), 1.1 * max(self.Y[:, self.iteration]))
self.ax1.set_aspect('equal')
def run(self, plot_trajectories = True, plot_convergence = True):
if plot_trajectories:
self.fig, self.ax1 = plt.subplots(nrows = 1, ncols = 1, figsize=(8, 8)) # two axes on figure
self.plot_positions(initialize_plot = True)
while self.iteration < self.max_iterations:
# Check for convergence using mean step size for all agents:
self.mean_step.append(np.mean( ( (self.X[:, self.iteration, np.newaxis] \
- self.X[:, self.iteration - 1, np.newaxis] )**2 \
+ (self.Y[:, self.iteration, np.newaxis] \
- self.Y[:, self.iteration - 1, np.newaxis] )**2 )**0.5 ) )
# Define convergence as once the mean step size has dropped below the threshold for 100 iterations
# Stop the simulation once converged.
if self.iteration > 100: # Don't bother with convergence rules unless dealing with a significant simulation
if all( ms <= self.convergence_tol for ms in self.mean_step[self.iteration - 100: self.iteration + 1] ):
self.converged_at_iteration = self.iteration
self.X = self.X[:, np.arange(self.iteration+1)]
self.Y = self.Y[:, np.arange(self.iteration+1)]
break
self._update_positions()
# Update
self.iteration += 1
if plot_trajectories:
self.plot_positions(initialize_plot = False)
if plot_convergence:
# Plot the end positions of the agents, even if we weren't plotting
# their trajectories throughout, along with the sides of the
# triangles and the convergence graph
plot_sides = True
if not plot_trajectories:
self.fig, self.ax1 = plt.subplots(nrows = 1, ncols = 1, figsize=(8, 8))
initialize = True
else:
initialize = False
#if self.step_size is not None:
# zoom = True
#else:
# zoom = False
self.plot_positions(initialize, plot_sides)
self.fig2, self.ax2 = plt.subplots(nrows = 1, ncols = 1, figsize=(8, 4))
self.ax2.plot(self.mean_step)
self.ax2.set_ylabel("Mean Step Size")
self.ax2.set_xlabel("Iteration")
self.ax2.set_title("Motion of Agents")
| liminal-learner/Chaos | Simulator.py | Simulator.py | py | 14,112 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.ion",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "... |
28624119358 | import cartopy
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import numpy as np
def plot_geodetic(location_to_geodetic, edxml_parser):
# get names and geodetics for plotting
locs = np.asarray(list(location_to_geodetic.keys()))
geodetic_coords = np.asarray(list(location_to_geodetic.values()))
geodetic_coords = geodetic_coords[:, [1, 0]]
# remove any locations from geodetic that was not in parser
loc_in_parser = np.isin(locs, edxml_parser.sorted_unique_locs)
locs = locs[loc_in_parser]
geodetic_coords = geodetic_coords[loc_in_parser]
# count occurences of each location in parser
loc_counts = np.asarray([edxml_parser.loc_to_count[loc] for loc in locs])
# set up figure and axes
fig = plt.figure()
ax = plt.axes(projection=ccrs.PlateCarree())
# zoom view around points
min_coord = np.min(geodetic_coords, axis=0) - 5
max_coord = np.max(geodetic_coords, axis=0) + 5
ax.set_extent([min_coord[0], max_coord[0], min_coord[1], max_coord[1]],
crs=ccrs.PlateCarree())
# add imagery
ax.add_feature(cartopy.feature.LAND)
ax.add_feature(cartopy.feature.OCEAN)
# plot points
sc = plt.scatter(geodetic_coords[:, 0], geodetic_coords[:, 1], color='#00000088', marker='o',
s=2*loc_counts, transform=ccrs.PlateCarree())
# create annotation
# code modified from:
# https://stackoverflow.com/questions/7908636/possible-to-make-labels-appear-when-hovering-over-a-point-in-matplotlib
annot = ax.annotate("", xy=(0, 0), xytext=(20, 20), textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
# define func to update annotations
def update_annot(ind):
# get position from first point
pos = sc.get_offsets()[ind["ind"][0]]
annot.xy = pos
# draw box with annotations from all points from event
text = "\n".join([locs[n] for n in ind["ind"]])
annot.set_text(text)
annot.get_bbox_patch().set_alpha(0.4)
# define func to handle clicking
def on_click(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = sc.contains(event)
# update annotation if point with data clicked
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
# hide annotation if point without data clicked
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect("button_release_event", on_click)
# display plot
plt.show()
| pnadelofficial/HistoricalLetters | plot.py | plot.py | py | 2,774 | python | en | code | null | github-code | 6 | [
{
"api_name": "numpy.asarray",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.isin",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_numbe... |
25687467876 | import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
def __init__(self, in_channel, out_channel, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(out_channel)
self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.shortcut = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channel)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResFeatureNet(nn.Module):
def __init__(self):
super().__init__()
self.f1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=7, stride=1, padding=3),
nn.BatchNorm2d(32),
nn.ReLU())
self.res1 = BasicBlock(32, 64, stride=2)
self.res2 = BasicBlock(64, 128, stride=2)
self.res3 = BasicBlock(128, 128, stride=2)
self.res4 = BasicBlock(128, 256, stride=2)
# self.res5 = BasicBlock(512, 1024, stride=2)
self.flatten = nn.Flatten()
def forward(self, x):
o = self.f1(x)
o = self.res1(o)
o = self.res2(o)
o = self.res3(o)
o = self.res4(o)
# o = self.res5(o)
o = self.flatten(o)
return o
class FeatureNet(nn.Module):
def __init__(self):
super(FeatureNet, self).__init__()
self.f = nn.Sequential(
nn.Conv2d(1, 24, kernel_size=7, stride=1, padding=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(24, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(64, 96, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(96, 96, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(96, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Flatten(), # flatten directly without bottleneck
)
def forward(self, x):
out = self.f(x)
return out
class MetricNet(nn.Module):
def __init__(self, in_dim=4096, hidden_size=512):
super(MetricNet, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_dim * 2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 2)
)
def forward(self, x):
return self.fc(x)
class Projection(nn.Module):
def __init__(self, in_dim=4096, hidden_size=1024):
super(MetricNet, self).__init__()
self.fc = nn.Sequential(
nn.Linear(in_dim, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size)
)
def forward(self, x):
return self.fc(x)
if __name__ == "__main__":
x = torch.randn(1, 1, 64, 64)
# m = FeatureNet()
m = ResFeatureNet()
o = m(x)
print(o.size())
from utils import cal_parameters
print(cal_parameters(m))
| p3i0t/task2 | models.py | models.py | py | 3,506 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
27103187909 | """Fedor's Upper Envelope algorithm.
Based on the original MATLAB code by Fedor Iskhakov:
https://github.com/fediskhakov/dcegm/blob/master/model_retirement.m
"""
from typing import Callable
from typing import Dict
from typing import List
from typing import Tuple
import numpy as np
from dcegm.interpolation import linear_interpolation_with_extrapolation
from scipy.optimize import brenth as root
eps = 2.2204e-16
def upper_envelope(
policy: np.ndarray,
value: np.ndarray,
exog_grid: np.ndarray,
state_choice_vec: np.ndarray,
params: Dict[str, float],
compute_utility: Callable,
) -> Tuple[np.ndarray, np.ndarray]:
"""Runs the Upper Envelope algorithm and drops sub-optimal points.
Calculates the upper envelope over the overlapping segments of the
decision-specific value functions, which in fact are value "correspondences"
in this case, where multiple solutions are detected. The dominated grid
points are then eliminated from the endogenous wealth grid.
Discrete choices introduce kinks and non-concave regions in the value
function that lead to discontinuities in the policy function of the
continuous (consumption) choice. In particular, the value function has a
non-concave region where the decision-specific values of the
alternative discrete choices (e.g. continued work or retirement) cross.
These are referred to as "primary" kinks.
As a result, multiple local optima for consumption emerge and the Euler
equation has multiple solutions.
Moreover, these "primary" kinks propagate back in time and manifest
themselves in an accumulation of "secondary" kinks in the choice-specific
value functions in earlier time periods, which, in turn, also produce an
increasing number of discontinuities in the consumption functions
in earlier periods of the life cycle.
These discontinuities in consumption rules in period t are caused by the
worker's anticipation of landing exactly at the kink points in the
subsequent periods t + 1, t + 2, ..., T under the optimal consumption policy.
Args:
policy (np.ndarray): Array of choice-specific consumption policy
of shape (2, n_grid_wealth).
Position [0, :] of the arrays contain the endogenous grid over wealth M,
and [1, :] stores the corresponding value of the (consumption) policy
function c(M, d), for each time period and each discrete choice.
value (np.ndarray): Array of choice-specific value function
of shape (2, n_grid_wealth).
Position [0, :] of the array contains the endogenous grid over wealth M,
and [1, :] stores the corresponding value of the value function v(M, d),
for each time period and each discrete choice.
exog_grid (np.ndarray): 1d array of exogenous savings grid of shape
(n_grid_wealth,).
choice (int): The current choice.
params (dict): Dictionary containing the model's parameters.
compute_value (callable): Function to compute the agent's value.
Returns:
(tuple) Tuple containing
- policy_refined (np.ndarray): Worker's *refined* (consumption) policy
function of the current period, where suboptimal points have been dropped
and the kink points along with the corresponding interpolated values of
the policy function have been added. Shape (2, 1.1 * n_grid_wealth).
- value_refined (np.ndarray): Worker's *refined* value function of the
current period, where suboptimal points have been dropped and the kink
points along with the corresponding interpolated values of the value
function have been added. Shape (2, 1.1 * n_grid_wealth).
"""
n_grid_wealth = len(exog_grid)
min_wealth_grid = np.min(value[0, 1:])
credit_constr = False
if value[0, 1] <= min_wealth_grid:
segments_non_mono = locate_non_concave_regions(value)
else:
# Non-concave region coincides with credit constraint.
# This happens when there is a non-monotonicity in the endogenous wealth grid
# that goes below the first point.
# Solution: Value function to the left of the first point is analytical,
# so we just need to add some points to the left of the first grid point.
credit_constr = True
expected_value_zero_wealth = value[1, 0]
policy, value = _augment_grid(
policy,
value,
state_choice_vec,
expected_value_zero_wealth,
min_wealth_grid,
n_grid_wealth,
params,
compute_utility=compute_utility,
)
segments_non_mono = locate_non_concave_regions(value)
if len(segments_non_mono) > 1:
_value_refined, points_to_add = compute_upper_envelope(segments_non_mono)
index_dominated_points = find_dominated_points(
value, _value_refined, significance=10
)
if credit_constr:
value_refined = np.hstack(
[np.array([[0], [expected_value_zero_wealth]]), _value_refined]
)
else:
value_refined = _value_refined
policy_refined = refine_policy(policy, index_dominated_points, points_to_add)
else:
value_refined = value
policy_refined = policy
# Fill array with nans to fit 10% extra grid points,
# as the true shape is unknown ex ante
policy_refined_with_nans = np.empty((2, int(1.1 * n_grid_wealth)))
value_refined_with_nans = np.empty((2, int(1.1 * n_grid_wealth)))
policy_refined_with_nans[:] = np.nan
value_refined_with_nans[:] = np.nan
policy_refined_with_nans[:, : policy_refined.shape[1]] = policy_refined
value_refined_with_nans[:, : value_refined.shape[1]] = value_refined
return policy_refined_with_nans, value_refined_with_nans
def locate_non_concave_regions(
value: np.ndarray,
) -> List[np.ndarray]:
"""Locates non-concave regions.
Find non-monotonicity in the endogenous wealth grid where a grid point
to the right is smaller than its preceding point. Put differently, the
value function bends "backwards".
Non-concave regions in the value function are reflected by non-monotonous
regions in the underlying endogenous wealth grid.
Multiple solutions to the Euler equation cause the standard EGM loop to
produce a "value correspondence" rather than a value function.
The elimination of suboptimal grid points converts this correspondence back
to a proper function.
Args:
value (np.ndarray): Array storing the choice-specific value function
"correspondences". Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a "correspondence"
rather than a function due to non-concavities.
Returns:
(tuple) Tuple containing:
- value_refined (np.ndarray): Array of shape (2, *n_grid_refined*)
containing the *refined* choice-specific value functions, which means that
suboptimal points have been removed from the endogenous wealth grid and
the value function "correspondence". Furthermore, kink points and the
corresponding interpolated values of the value function have been added.
- points_to_add (np.ndarray): Array of shape (*n_kink_points*,)
containing the kink points and corresponding interpolated values of the
*refined* value function that have been added to ``value_refined``.
*n_kink_points* is of variable length.
- index_dominated_points (np.ndarray): Array of shape (*n_dominated_points*,)
containing the indices of dominated points in the endogenous wealth grid,
where *n_dominated_points* is of variable length.
"""
segments_non_mono = []
is_monotonic = value[0, 1:] > value[0, :-1]
niter = 0
move_right = True
while move_right:
index_non_monotonic = np.where(is_monotonic != is_monotonic[0])[0]
# Check if we are beyond the starting (left-most) point
if len(index_non_monotonic) == 0:
if niter > 0:
segments_non_mono += [value]
move_right = False
break
else:
index_non_monotonic = min(index_non_monotonic) # left-most point
part_one, part_two = _partition_grid(value, index_non_monotonic)
segments_non_mono += [part_one]
value = part_two
# Move point of first non-monotonicity to the right
is_monotonic = is_monotonic[index_non_monotonic:]
niter += 1
return segments_non_mono
def compute_upper_envelope(
segments: List[np.ndarray],
) -> Tuple[np.ndarray, np.ndarray]:
"""Compute upper envelope and refines value function correspondence.
The upper envelope algorithm detects suboptimal points in the value function
correspondence. Consequently, (i) the suboptimal points are removed and the
(ii) kink points along with their corresponding interpolated values are included.
The elimination of suboptimal grid points converts the value
correspondence back to a proper function. Applying both (i) and (ii)
yields the refined endogenous wealth grid and the *refined* value function.
Args:
segments (List[np.ndarray]): List of non-monotonous segments in the
endogenous wealth grid, which results in non-concavities in the
corresponding value function. The list contains n_non_monotonous
np.ndarrays of shape (2, *len_non_monotonous*), where
*len_non_monotonous* is of variable length denoting the length of the
given non-monotonous segment.
Returns:
(tuple) Tuple containing:
- points_upper_env_refined (np.ndarray): Array containing the *refined*
endogenous wealth grid and the corresponding value function.
*refined* means suboptimal points have been dropped and the kink points
along with the corresponding interpolated values of the value function
have beend added.
Shape (2, *n_grid_refined*), where *n_grid_refined* is the length of
the *refined* endogenous grid.
- points_to_add (np.ndarray): Array containing the kink points and
corresponding interpolated values of the value function that have been
added to ``points_upper_env_refined``.
Shape (2, *n_intersect_points*), where *n_intersect_points* is the number of
intersection points between the two uppermost segments
(i.e. ``first_segment`` and ``second_segment``).
"""
endog_wealth_grid = np.unique(
np.concatenate([segments[arr][0] for arr in range(len(segments))])
)
values_interp = np.empty((len(segments), len(endog_wealth_grid)))
for i, segment in enumerate(segments):
values_interp[i, :] = linear_interpolation_with_inserting_missing_values(
x=segment[0],
y=segment[1],
x_new=endog_wealth_grid,
missing_value=-np.inf,
)
# values_interp has in each row the corresponding values of the upper curve
# in the overlapping seg
max_values_interp = np.tile(values_interp.max(axis=0), (3, 1)) # need this below
top_segments = values_interp == max_values_interp[0, :]
grid_points_upper_env = [endog_wealth_grid[0]]
values_upper_env = [values_interp[0, 0]]
intersect_points_upper_env = []
values_intersect_upper_env = []
move_right = True
while move_right:
# Index of top segment, starting at first (left-most) grid point
index_first_segment = np.where(top_segments[:, 0])[0][0]
for i in range(1, len(endog_wealth_grid)):
index_second_segment = np.where(top_segments[:, i] == 1)[0][0]
if index_second_segment != index_first_segment:
first_segment = index_first_segment
second_segment = index_second_segment
first_grid_point = endog_wealth_grid[i - 1]
second_grid_point = endog_wealth_grid[i]
values_first_segment = (
linear_interpolation_with_inserting_missing_values(
x=segments[first_segment][0],
y=segments[first_segment][1],
x_new=np.array([first_grid_point, second_grid_point]),
missing_value=np.nan,
)
)
values_second_segment = (
linear_interpolation_with_inserting_missing_values(
x=segments[second_segment][0],
y=segments[second_segment][1],
x_new=np.array([first_grid_point, second_grid_point]),
missing_value=np.nan,
)
)
if np.all(
np.isfinite(
np.vstack([values_first_segment, values_second_segment])
)
) and np.all(np.abs(values_first_segment - values_second_segment) > 0):
intersect_point = root(
_subtract_values,
first_grid_point,
second_grid_point,
args=(
segments[first_segment],
segments[second_segment],
),
)
value_intersect = (
linear_interpolation_with_inserting_missing_values(
x=segments[first_segment][0],
y=segments[first_segment][1],
x_new=np.array([intersect_point]),
missing_value=np.nan,
)[0]
)
values_all_segments = np.empty((len(segments), 1))
for segment in range(len(segments)):
values_all_segments[
segment
] = linear_interpolation_with_inserting_missing_values(
x=segments[segment][0],
y=segments[segment][1],
x_new=np.array([intersect_point]),
missing_value=-np.inf,
)[
0
]
index_max_value_intersect = np.where(
values_all_segments == values_all_segments.max(axis=0)
)[0][0]
if (index_max_value_intersect == first_segment) | (
index_max_value_intersect == second_segment
):
# There are no other functions above
grid_points_upper_env.append(intersect_point)
values_upper_env.append(value_intersect)
intersect_points_upper_env.append(intersect_point)
values_intersect_upper_env.append(value_intersect)
if second_segment == index_second_segment:
move_right = False
# Add point if it lies currently on the highest segment
if (
any(abs(segments[index_second_segment][0] - endog_wealth_grid[i]) < eps)
is True
):
grid_points_upper_env.append(endog_wealth_grid[i])
values_upper_env.append(max_values_interp[0, i])
index_first_segment = index_second_segment
points_upper_env_refined = np.empty((2, len(grid_points_upper_env)))
points_upper_env_refined[0, :] = grid_points_upper_env
points_upper_env_refined[1, :] = values_upper_env
points_to_add = np.empty((2, len(intersect_points_upper_env)))
points_to_add[0] = intersect_points_upper_env
points_to_add[1] = values_intersect_upper_env
return points_upper_env_refined, points_to_add
def find_dominated_points(
value_correspondence: np.ndarray,
value_refined: np.ndarray,
significance: int = 10,
) -> np.ndarray:
"""Returns indexes of dominated points in the value function correspondence.
Equality is measured up to 10**(-``significance``).
Args:
value_correspondence (np.ndarray): Array storing the choice-specific
value function correspondences. Shape (2, n_endog_wealth_grid), where
n_endog_wealth_grid is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a correspondence
rather than a function due to non-concavities.
value_refined (np.ndarray): Array of refined value function, where
suboptimal points have been dropped and kink points along with the
corresponding interpolated values of the value function have been added.
Shape (2, n_grid_refined), where n_grid_refined is the length of
the refined endogenous grid.
significance (float): Level of significance. Equality is measured up to
10**(-``significance``).
Returns:
index_dominated_points (np.ndarray): Array of shape (n_dominated_points,)
containing the indices of dominated points in the endogenous wealth grid,
where n_dominated_points is of variable length.
"""
sig_pos = 10**significance
sig_neg = 10 ** (-significance)
grid_all = np.round(value_correspondence[0, :] * sig_pos) * sig_neg
grid_refined_sig = np.round(value_refined[0, :] * sig_pos) * sig_neg
value_all = np.round(value_correspondence[1, :] * sig_pos) * sig_neg
value_refined_sig = np.round(value_refined[1, :] * sig_pos) * sig_neg
index_all = np.arange(len(grid_all))
index_dominated_points = np.union1d(
index_all[~np.isin(grid_all, grid_refined_sig)],
index_all[~np.isin(value_all, value_refined_sig)],
)
return index_dominated_points
def refine_policy(
policy: np.ndarray, index_dominated_points: np.ndarray, points_to_add: np.ndarray
) -> np.ndarray:
"""Drop suboptimal points from policy correspondence and add new optimal ones.
Args:
points_to_add (np.ndarray): Array of shape (*n_kink_points*,),
containing the kink points and corresponding interpolated values of
the refined value function, where *n_kink_points* is of variable
length.
index_dominated_points (np.ndarray): Array of shape (*n_dominated_points*,)
containing the indices of dominated points in the endogenous wealth grid,
where *n_dominated_points* is of variable length.
Returns:
(np.ndarray): Array of shape (2, *n_grid_refined*)
containing the *refined* choice-specific policy function, which means that
suboptimal points have been removed from the endogenous wealth grid and
the policy "correspondence". Furthermore, kink points and the
corresponding interpolated values of the policy function have been added.
"""
# Remove suboptimal consumption points
endog_wealth_grid = np.delete(policy[0, :], index_dominated_points)
optimal_consumption = np.delete(policy[1, :], index_dominated_points)
# Add new optimal consumption points
new_points_policy_interp = []
for new_grid_point in range(len(points_to_add[0, :])):
all_points_to_the_left = np.where(
policy[0, :] < points_to_add[0, new_grid_point]
)[0]
all_points_to_the_right = np.where(
policy[0, :] > points_to_add[0, new_grid_point]
)[0]
last_point_to_the_left = max(
all_points_to_the_left[
~np.isin(all_points_to_the_left, index_dominated_points)
]
)
# Find (scalar) point interpolated from the left
interp_from_the_left = linear_interpolation_with_extrapolation(
x=policy[0, :][last_point_to_the_left : last_point_to_the_left + 2],
y=policy[1, :][last_point_to_the_left : last_point_to_the_left + 2],
x_new=points_to_add[0][new_grid_point],
)
first_point_to_the_right = min(
all_points_to_the_right[
~np.isin(all_points_to_the_right, index_dominated_points)
]
)
# Find (scalar) point interpolated from the right
interp_from_the_right = linear_interpolation_with_extrapolation(
x=policy[0, :][first_point_to_the_right - 1 : first_point_to_the_right + 1],
y=policy[1, :][first_point_to_the_right - 1 : first_point_to_the_right + 1],
x_new=points_to_add[0, new_grid_point],
)
new_points_policy_interp += [
np.array(
[
points_to_add[0, new_grid_point],
interp_from_the_left,
interp_from_the_right,
]
)
]
# Insert new points into the endogenous wealth grid and consumption policy
for to_add in range(len(new_points_policy_interp)):
index_insert = np.where(
endog_wealth_grid > new_points_policy_interp[to_add][0]
)[0][0]
# 1) Add new points to policy TWICE to accurately describe discontinuities
endog_wealth_grid = np.insert(
endog_wealth_grid,
index_insert,
new_points_policy_interp[to_add][0],
)
endog_wealth_grid = np.insert(
endog_wealth_grid,
index_insert + 1,
new_points_policy_interp[to_add][0] - 0.001 * 2.2204e-16,
)
# 2a) Add new optimal consumption point, interpolated from the left
optimal_consumption = np.insert(
optimal_consumption,
index_insert,
new_points_policy_interp[to_add][1],
)
# 2b) Add new optimal consumption point, interpolated from the right
optimal_consumption = np.insert(
optimal_consumption,
index_insert + 1,
new_points_policy_interp[to_add][2],
)
policy_refined = np.stack([endog_wealth_grid, optimal_consumption])
# Make sure first element in endogenous wealth grid and optiomal consumption policy
# are both 0.
if policy_refined[0, 0] != 0.0:
policy_refined = np.hstack([np.zeros((2, 1)), policy_refined])
return policy_refined
def _augment_grid(
policy: np.ndarray,
value: np.ndarray,
state_choice_vec: np.ndarray,
expected_value_zero_wealth: np.ndarray,
min_wealth_grid: float,
n_grid_wealth: int,
params,
compute_utility: Callable,
) -> Tuple[np.ndarray, np.ndarray]:
"""Extends the endogenous wealth grid, value, and policy function to the left.
Args:
policy (np.ndarray): Array storing the choice-specific
policy function. Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
discontinuities in the policy function.
In the presence of discontinuities, the policy function is a
"correspondence" rather than a function due to multiple local optima.
value (np.ndarray): Array storing the choice-specific
value function. Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a "correspondence"
rather than a function due to non-concavities.
expected_value_zero_wealth (float): The agent's expected value given that she
has a wealth of zero.
min_wealth_grid (float): Minimal wealth level in the endogenous wealth grid.
n_grid_wealth (int): Number of grid points in the exogenous wealth grid.
params (dict): Dictionary containing the model's parameters.
compute_value (callable): Function to compute the agent's value.
Returns:
policy_augmented (np.ndarray): Array containing endogenous grid and
policy function with ancillary points added to the left.
Shape (2, *n_grid_augmented*).
value_augmented (np.ndarray): Array containing endogenous grid and
value function with ancillary points added to the left.
Shape (2, *n_grid_augmented*).
"""
grid_points_to_add = np.linspace(min_wealth_grid, value[0, 1], n_grid_wealth // 10)[
:-1
]
utility = compute_utility(
consumption=grid_points_to_add,
params=params,
**state_choice_vec,
)
values_to_add = utility + params["beta"] * expected_value_zero_wealth
value_augmented = np.vstack(
[
np.append(grid_points_to_add, value[0, 1:]),
np.append(values_to_add, value[1, 1:]),
]
)
policy_augmented = np.vstack(
[
np.append(grid_points_to_add, policy[0, 1:]),
np.append(grid_points_to_add, policy[1, 1:]),
]
)
return policy_augmented, value_augmented
def _partition_grid(
value_correspondence: np.ndarray, j: int
) -> Tuple[np.ndarray, np.ndarray]:
"""Splits the grid into two parts, 1,..., j and j, j+1,..., J.
Note that the index ``j``, after which the separation occurs,
is also included in the second partition.
Args:
value_correspondence (np.ndarray): Array storing the choice-specific
value function "correspondences". Shape (2, *n_endog_wealth_grid*), where
*n_endog_wealth_grid* is of variable length depending on the number of
kinks and non-concave regions in the value function.
In the presence of kinks, the value function is a "correspondence"
rather than a function due to non-concavities.
j (int): Index denoting the location where the endogenous wealth grid is
separated.
Returns:
part_one (np.ndarray): Array of shape (2, : ``j`` + 1) containing the first
partition.
part_two (np.ndarray): Array of shape (2, ``j``:) containing the second
partition.
"""
j = min(j, value_correspondence.shape[1])
part_one = np.vstack(
[
value_correspondence[0, : j + 1], # endogenous wealth grid
value_correspondence[1, : j + 1], # value function
]
)
# Include boundary points in both partitions
part_two = np.vstack([value_correspondence[0, j:], value_correspondence[1, j:]])
return part_one, part_two
def _subtract_values(grid_point: float, first_segment, second_segment):
"""Subtracts the interpolated values of the two uppermost segments."""
values_first_segment = linear_interpolation_with_extrapolation(
x=first_segment[0], y=first_segment[1], x_new=grid_point
)
values_second_segment = linear_interpolation_with_extrapolation(
x=second_segment[0], y=second_segment[1], x_new=grid_point
)
diff_values_segments = values_first_segment - values_second_segment
return diff_values_segments
def linear_interpolation_with_inserting_missing_values(x, y, x_new, missing_value):
"""Linear interpolation with inserting missing values.
Args:
x (np.ndarray): 1d array of shape (n,) containing the x-values.
y (np.ndarray): 1d array of shape (n,) containing the y-values
corresponding to the x-values.
x_new (np.ndarray or float): 1d array of shape (m,) or float containing
the new x-values at which to evaluate the interpolation function.
missing_value (np.ndarray or float): Flat array of shape (1,) or float
to set for values of x_new outside of the range of x.
Returns:
np.ndarray or float: 1d array of shape (m,) or float containing the
new y-values corresponding to the new x-values.
In case x_new contains values outside of the range of x, these
values are set equal to missing_value.
"""
interpol_res = linear_interpolation_with_extrapolation(x, y, x_new)
where_to_miss = (x_new < x.min()) | (x_new > x.max())
interpol_res[where_to_miss] = missing_value
return interpol_res
| OpenSourceEconomics/dcegm | tests/utils/upper_envelope_fedor.py | upper_envelope_fedor.py | py | 28,814 | python | en | code | 15 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarray",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "numpy.ndarra... |
46046555266 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.template import Template, Context
from django.utils.html import mark_safe
from hooks.templatehook import hook
from hooks.templatetags.hooks_tags import template_hook_collect
from . import utils_hooks
class HookTagTest(TestCase):
def setUp(self):
self.hook_name = 'myhook'
hook.unregister_all(self.hook_name)
utils_hooks.myhook.unregister_all()
def test_hook_tag(self):
def func(context, *args, **kwargs):
self.assertEqual(args, ("foobar", ))
self.assertEqual(kwargs, {'bar': "bar", })
self.assertEqual(context['foo'], "foo")
return "hello"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' bar='bar' %}"
).render(Context({"hook_name": self.hook_name, "foo": "foo", }))
self.assertEqual(out, u"hello")
def test_hook_tag_many(self):
"""
Should join multiple responses
"""
def func_a(*args, **kwargs):
return "hello"
def func_b(*args, **kwargs):
return "goodbye"
hook.register(self.hook_name, func_a)
hook.register(self.hook_name, func_b)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "hello\ngoodbye")
def test_hook_tag_escaped(self):
"""
Should escape responses (if they are not marked as safe)
"""
def func(*args, **kwargs):
return "<span>hello</span>"
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_hook_tag_mark_safe(self):
"""
Should not escape safe strings
"""
def func(*args, **kwargs):
return mark_safe("<span>hello</span>")
hook.register(self.hook_name, func)
out = Template(
"{% load hooks_tags %}"
"{% hook hook_name 'foobar' %}"
).render(Context({"hook_name": self.hook_name, }))
self.assertEqual(out, "<span>hello</span>")
def test_template_hook_collect(self):
def func(context, *args, **kwargs):
self.assertEqual(context, "context")
self.assertEqual(args, ("foo", ))
self.assertEqual(kwargs, {'extra': "bar", })
return "hello"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, u"hello")
res = template_hook_collect(utils_hooks, 'badhook')
self.assertEqual(res, u"")
def test_template_hook_collect_escaped(self):
def func(*args, **kwargs):
return "<span>hello</span>"
utils_hooks.myhook.register(func)
res = template_hook_collect(utils_hooks, 'myhook', "context", "foo", extra="bar")
self.assertEqual(res, "<span>hello</span>")
| nitely/django-hooks | hooks/tests/tests_templatetags.py | tests_templatetags.py | py | 3,337 | python | en | code | 16 | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "hooks.templatehook.hook.unregister_all",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "hooks.templatehook.hook",
"line_number": 18,
"usage_type": "name"
},
{
... |
74606059388 | from __future__ import unicode_literals
try:
from urllib2 import Request
except ImportError:
from urllib.request import Request
from mock import MagicMock, patch
from requests_kerberos import HTTPKerberosAuth
from grafana_dashboards.client.connection import (KerberosConnection,
BearerAuthConnection,
BasicAuthConnection,
SSLAuthConnection)
__author__ = 'Jakub Plichta <jakub.plichta@gmail.com>'
class Capture(object):
"""
Class for use in method call verification that captures call argument that can be tested later on.
"""
def __eq__(self, other):
"""
Captures argument and always returns true to make verification successful.
:return: True
"""
self.value = other
return True
def test_connection():
connection = BasicAuthConnection('username', 'password', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': b'Basic dXNlcm5hbWU6cGFzc3dvcmQ='
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
def test_connection_with_token():
connection = BearerAuthConnection('token', 'https://host')
connection._opener = MagicMock()
# noinspection PyProtectedMember
connection._opener.open().read.return_value = '{"hello":"world"}'
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
request = Request('https://host/uri',
'{"it\'s": "alive"}',
headers={
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization': 'Bearer token'
})
capture = Capture()
# noinspection PyProtectedMember
connection._opener.open.assert_called_with(capture)
assert request.get_full_url() == capture.value.get_full_url()
assert request.header_items() == capture.value.header_items()
assert request.get_method() == capture.value.get_method()
assert request.data.encode('utf-8') == capture.value.data
@patch('requests.post')
def test_connection_with_kerberos(post):
connection = KerberosConnection('https://host')
post().json.return_value = {'hello': 'world'}
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
capture = Capture()
post.assert_called_with('https://host/uri', auth=capture, json={"it's": 'alive'}, verify=False)
assert isinstance(capture.value, HTTPKerberosAuth)
@patch('requests.post')
def test_connection_with_sslauth(post):
connection = SSLAuthConnection('https://host', ('/fake/cert'))
post().json.return_value = {'hello': 'world'}
assert connection.make_request('/uri', {'it\'s': 'alive'}) == {'hello': 'world'}
post.assert_called_with('https://host/uri', json={"it's": 'alive'}, cert='/fake/cert')
| jakubplichta/grafana-dashboard-builder | tests/grafana_dashboards/client/test_connection.py | test_connection.py | py | 3,770 | python | en | code | 141 | github-code | 6 | [
{
"api_name": "grafana_dashboards.client.connection.BasicAuthConnection",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "mock.MagicMock",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib.request.Request",
"line_number": 39,
"usage_type": "call"
... |
35841585660 | # from time import sleep
import os
# from reply1024 import postreply1024
# import time
from datetime import datetime, timedelta
tday = datetime.now()+timedelta(hours = 8)
print(tday.hour)
tday.strftime("%Y-%m-%d %H:%M:%S")
print(tday)
if os.path.isdir("tmp")==0:
os.mkdir("tmp")
with open("./tmp/test.txt","r+") as f:
# f.write(f"{tday} 成功添加一行内容\n")
con=f.read()
a=60
f.seek(0)
f.truncate()
f.write(f"{int(con)+a}")
# print(f"文件关闭了吗:{f.closed}")
with open("./tmp/test.txt") as f:
con=f.read()
# print(f"文件关闭了吗:{f.closed}")
print("读取内容:",con) | Mmingdev/reply-1024 | test.py | test.py | py | 630 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.isdir... |
2523947822 | import argparse
import torch
from model import Pretrain_SegmentationNet, DPRAN
import os
from data.dataloader import create_dataloader
from train import net_Pretrain, DPRAN_Train
import segmentation_models_pytorch as smp
def main():
parser = argparse.ArgumentParser(description='DPRAN')
parser.add_argument('--num_classes', default=1, type=int, help='Number of output classes [2]')
parser.add_argument('--num_channels', default=1, type=int, help='Dimension of the input CEUS frames')
parser.add_argument('--lr_pre', default=0.0002, type=float, help='Initial learning rate [0.0002]')
parser.add_argument('--lr', default=0.0002, type=float, help='Initial learning rate [0.0002]')
parser.add_argument('--num_epochs', default=50, type=int, help='Number of total training epochs [40]')
parser.add_argument('--num_epochs_pre', default=50, type=int, help='Number of total training epochs [40]')
parser.add_argument('--dataset', default='data', type=str, help='Dataset folder name')
args = parser.parse_args()
save_path = os.path.join('checkpoint')
os.makedirs(save_path, exist_ok=True)
layers = [32, 32, 64, 128]
# data load and split
train_loader, val_loader, test_loader = create_dataloader(dataset=args.dataset, batch_size=1, is_pretraining=True)
# stage 1
net = Pretrain_SegmentationNet(n_channels=args.num_channels, n_classes=args.num_classes, layers=layers)
net.cuda()
criterion = smp.losses.DiceLoss('binary', classes=None, log_loss=False, from_logits=True, smooth=0.0,
ignore_index=None, eps=1e-07)
# Optimizer
optimizer = torch.optim.Adam(net.parameters(), lr=args.lr_pre)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9862, last_epoch=-1)
# Parameters
epoch_start = 0
epoch_end = args.num_epochs_pre
print("Start net Pre-Training...")
net = net_Pretrain(net, criterion, optimizer, scheduler, epoch_start, epoch_end, train_loader, val_loader,
save_path)
# stage 2
print("Start DPRAN Training...")
model = DPRAN(n_channels=args.num_channels, n_classes=args.num_classes, layers=layers)
model.encoder_ceus.load_state_dict(net.encoder.state_dict())
model.cuda()
# Optimizer and loss
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9862, last_epoch=-1)
epoch_end = args.num_epochs
train_loader.dataset.is_pretraining = False
val_loader.dataset.is_pretraining = False
test_loader.dataset.is_pretraining = False
test_result, trained_model = DPRAN_Train(model, net, criterion, optimizer, scheduler,
epoch_start, epoch_end, train_loader, val_loader,
test_loader)
torch.save({'test_rec': test_result, 'DpRAN': trained_model, 'Pretrain_SegmentationNet': net.state_dict()},
os.path.join(save_path, 'DpRAN' + '.pt'))
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
SEED = 0
torch.manual_seed(SEED)
if torch.cuda.is_available():
torch.cuda.manual_seed(SEED)
main()
| wanpeng16/DpRAN | main.py | main.py | py | 3,242 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"... |
72143905788 | from typing import Dict, List, Union
import csv
import os
from unittest import result
def load_csv(
file_path: str,
delimiter: str = ',',
has_header: bool = True,
try_casting: bool = True
) -> List[Dict]:
'''
This function laods a csv file from the given path. It accepts both csv with headers and without them.
Args:
file_path: (str) the path to the given csv file.
delimiter: (str) the string delimiter between columns of the csv.
has_headers: (bool) flag to indicate if the file has headers. [Default True]
Output:
Returns a List of dictionaries representing each row. The keys of each dictionary ar the
column name.
Throws:
- FileNotFoundError
'''
if not os.path.exists(file_path):
print(f'The path {file_path} does not exists!')
raise FileNotFoundError
results = []
with open(file_path, 'r') as f:
csv_reader = csv.reader(f, delimiter=delimiter)
if has_header:
headers = next(csv_reader)
for row in csv_reader:
if try_casting:
mapped_row = list(map(lambda item: cast_to_num(item), row))
else:
mapped_row = row
new_row = { key : item for key, item in zip(headers, mapped_row)}
results.append(new_row)
return results
def cast_to_num(value: str) -> Union[str, int, float]:
int_val = None
float_val = None
try:
int_val = int(value)
except ValueError:
try:
float_val = float(value)
except ValueError:
pass
if int_val is not None:
return int_val
if float_val is not None:
return float_val
return value
| levensworth/udesa-pc-tutorial | mini-proyectos/song_recommendation/text_processing.py | text_processing.py | py | 1,864 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number... |
9002769780 |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 10:21:15 2019
This is the modl with Keras framework
@author: ago
"""
from __future__ import print_function
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from IPython.display import display, HTML
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Reshape
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.layers import LSTM
# Set some standard parameters upfront
pd.options.display.float_format = '{:.1f}'.format
sns.set() # Default seaborn look and feel
plt.style.use('ggplot')
print('keras version ', keras.__version__)
# Same labels will be reused throughout the program
LABELS = ['Frequency','V RMS','I ph.angle','V ph.angle','I RMS']
# The number of steps within one time segment
TIME_PERIODS = 80
# The steps to take from one segment to the next; if this value is equal to
# TIME_PERIODS, then there is no overlap between the segments
STEP_DISTANCE = 40
def read_data(file_path):
columns = ['Time','value a','value b','label']
df = pd.read_csv(file_path,
header=None,
names=columns)
# will show up as NAN
df.dropna(axis=0, how='any', inplace=True)
return df
def convert_to_float(x):
try:
return np.float(x)
except:
return np.nan
def show_basic_dataframe_info(dataframe):
# Shape and how many rows and columns
print('Number of columns in the dataframe: %i' % (dataframe.shape[1]))
print('Number of rows in the dataframe: %i\n' % (dataframe.shape[0]))
# Load data set containing all the data from csv
df = verticalStack
# Describe the data
show_basic_dataframe_info(df)
df.head(20)
# Define column name of the label vector
LABEL = 'label'
# Transform the labels from String to Integer via LabelEncoder
le = preprocessing.LabelEncoder()
# Add a new column to the existing DataFrame with the encoded values
df[LABEL] = le.fit_transform(df['label'].values.ravel())
RANDOM_SEED =50
N_TIME_STEPS = 200
N_FEATURES = 2
classes= 4
step = 1
segments = []
labels = []
for i in range(1, len(df) - N_TIME_STEPS, step):
x1 = df['value a'].values[i: i + N_TIME_STEPS]
x2 = df['value b'].values[i: i + N_TIME_STEPS]
label = stats.mode(df['label'][i: i + N_TIME_STEPS])[0][0]
segments.append([x1,x2])
labels.append(label)
reshaped_segments = np.asarray(segments, dtype= np.float32).reshape(-1, N_TIME_STEPS, N_FEATURES)
labels = np.asarray(pd.get_dummies(labels), dtype = np.float32)
X_train, X_test, y_train, y_test = train_test_split(reshaped_segments, labels, test_size=0.2, random_state=RANDOM_SEED)
print('x_train shape: ', X_train.shape)
print( X_train.shape[0], 'training samples')
print('y_train shape: ', y_train.shape)
model = Sequential()
model.add(LSTM(200, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.3))
# model.add(LSTM(70))
# model.add(Dropout(0.3))
model.add(Dense(classes))
callbacks_list = [
keras.callbacks.ModelCheckpoint(
filepath='best_model.{epoch:02d}-{val_loss:.2f}.h5',
monitor='val_loss', save_best_only=True),
keras.callbacks.EarlyStopping(monitor='acc', patience=1)
]
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
print(model.summary())
# Hyper-parameters
BATCH_SIZE = 1024
EPOCHS =10
# Enable validation to use ModelCheckpoint and EarlyStopping callbacks.
history = model.fit(X_train,
y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=callbacks_list,
validation_split=0.2,
verbose=1)
plt.figure(figsize=(6, 4))
plt.plot(history.history['acc'], 'r', label='Accuracy of training data')
plt.plot(history.history['val_acc'], 'b', label='Accuracy of validation data')
plt.plot(history.history['loss'], 'r--', label='Loss of training data')
plt.plot(history.history['val_loss'], 'b--', label='Loss of validation data')
plt.title('Model Accuracy and Loss')
plt.ylabel('Accuracy and Loss')
plt.xlabel('Training Epoch')
plt.ylim(0)
plt.legend()
plt.show()
def show_confusion_matrix(validations, predictions):
matrix = metrics.confusion_matrix(validations, predictions)
plt.figure(figsize=(6, 4))
sns.heatmap(matrix,
cmap='coolwarm',
linecolor='white',
linewidths=1,
xticklabels=LABELS,
yticklabels=LABELS,
annot=True,
fmt='d')
plt.title('Confusion Matrix')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
y_pred_test = model.predict(X_test)
# Take the class with the highest probability from the test predictions
max_y_pred_test = np.argmax(y_pred_test, axis=1)
max_y_test = np.argmax(y_test, axis=1)
show_confusion_matrix(max_y_test, max_y_pred_test)
print(classification_report(max_y_test, max_y_pred_test)) | Dirbas/PMU_classifier | Keras_PMU.py | Keras_PMU.py | py | 5,451 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "pandas.options",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "seaborn.set",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotli... |
8855989929 | # Importamos Pillow
from PIL import Image
import glob
# Importamos Pandas
import pandas as pd
import csv
# TIME
import time
import datetime
from time import gmtime, strftime
# Importamos Pytesseract
import pytesseract
import os
path = "./output/media"
for root,dirs,files in os.walk(path):
for infile in [f for f in files if f.lower().endswith('.jpg')]:
file, ext = os.path.splitext(infile)
full_path = os.path.join(root,infile)
a = root[15:]
b = full_path[full_path.rfind("/")+1:]
print ("-------------------------------------------------------")
try:
img = Image.open(full_path)
texto = pytesseract.image_to_string(img)
if len(texto) is 0:
c = 'none'
else:
txt = texto.replace("\n"," ")
c = txt
row = [a,b,c]
except:
print ("Lo siento, no es una imagen legible")
c = 'No legible'
row = [a,b,c]
with open('./output/media/data_ocr.csv', 'a') as csvFile:
writer = csv.writer(csvFile)
print (row)
writer.writerow(row)
csvFile.close()
df = pd.read_csv("./output/media/data_ocr.csv", sep=',')
print (df) | bisite/Telegram-History-dump | telegram/img_ocr.py | img_ocr.py | py | 1,331 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.walk",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number... |
31066666255 | from django.urls import path
from . import views
urlpatterns = [
path('', views.loginView, name='login'),
path('register/', views.registerView, name='register'),
path('logout/', views.logoutView, name='logout'),
path('akun/', views.update_akunView, name='update_akun'),
path('register/berhasil', views.berhasilView.as_view(aksi='register_berhasil'), name='register_berhasil'),
path('akun/berhasil', views.berhasilView.as_view(aksi='update_akun_berhasil'), name='update_akun_berhasil'),
] | mugiwara35/smart-plant | akun/urls.py | urls.py | py | 512 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
887825257 | from django.shortcuts import render, redirect
from django.http import Http404, JsonResponse
from django.views import View
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
# Decorators
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from .decorators import allowerd_users
from .decorators import unauthenticated_user
from datetime import date
import pandas as pd
# Models and Forms
from backend.models import Info, Book, Student, Issue, Reservation, Class
from backend.fields import book_fields, class_fields, student_fields, reservation_fields, issue_fields
from .forms import BookForm, ClassForm, StudentForm, IssueForm, ReservationForm, LoginForm
from .custom import get_fields
# Excel to JSON parser
def parser_view(request):
info = Info.objects.all().first()
data = None
if request.method == "POST":
if 'file' in request.FILES:
dataFrame = pd.read_excel(request.FILES['file'], engine = "openpyxl")
data = dataFrame.to_json(indent = 4, orient = "records", force_ascii = False)
else:
return redirect("parser-view")
context = { "json": data, "school_name": info.school_name }
return render(request, "apis/index.html", context)
@unauthenticated_user
@csrf_exempt
def login_view(request):
form = LoginForm()
info = Info.objects.all().first()
if request.method == 'POST':
form = LoginForm(request, data = request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(request, username = username, password = password)
if user is not None:
login(request, user)
return redirect('home-view')
context = { "form": form, "messages": messages, "school_name": info.school_name }
return render(request, 'registration/login.html', context)
def logout_view(request):
logout(request)
return redirect('login-view')
def home_view(request):
info = Info.objects.all().first()
books = Book.objects.all()
tableFields = book_fields()
fields = []
# Model field list
for field in Book._meta.get_fields():
if field.name != "reservation":
fields.append(field.name)
context = {
"querySet": books,
"fields": fields,
"tfields": tableFields[0],
"tlength": len(fields),
"school_name": info.school_name,
}
return render(request, "home.html", context)
def error_view(request):
return render(request, "components/error.html")
def tutorial_view(request):
info = Info.objects.all().first()
context = {
"school_name": info.school_name,
}
return render(request, "tutorial/index.html", context)
class BookGPView(View):
@method_decorator(allowerd_users(["book-editing"]))
def get(self, request):
info = Info.objects.all().first()
books = Book.objects.all()
tableFields = book_fields()
form = BookForm()
fields = []
# Model field list
for field in Book._meta.get_fields():
if field.name != "reservation":
fields.append(field.name)
context = {
"fields": fields,
"querySet": books,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "book/index.html", context)
@method_decorator(allowerd_users(["book-editing"]))
def post(self, request):
form = BookForm()
if request.method == "POST":
form = BookForm(request.POST)
if form.is_valid():
form.save()
return redirect("book-view")
else:
return form.errors
class BookPDView(View):
def get_object(self, pk):
try:
return Book.objects.get(id = pk)
except Book.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["book-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["book-editing"]))
def delete(self, request, pk):
book = self.get_object(pk)
error = JsonResponse({"error": "Sve knjige nisu vraćene!"})
if len(Reservation.objects.all()) == 0: # If there is no book's at all
book.delete()
return JsonResponse(dict(code = 204, content = "Knjiga je izbrisana"))
elif not Reservation.objects.get(book = book): # If the selected book is not reservated
book.delete()
return JsonResponse(dict(code = 204, content = "Knjiga je izbrisana"))
else: # If the all books of this type are returned
reservation = Reservation.objects.get(book = book)
if reservation.issued == reservation.returned:
book.delete()
return JsonResponse(dict(code = 204, content = "Knjiga je izbrisana"))
error.status_code = 403
return error
class ClassGPView(View):
@method_decorator(allowerd_users(["class-editing"]))
def get(self, request):
info = Info.objects.all().first()
classes = Class.objects.all()
tableFields = class_fields()
form = ClassForm()
fields = []
# Model field list
for field in Class._meta.get_fields():
if field.name != "student":
fields.append(field.name)
context = {
"fields": fields,
"querySet": classes,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "class/index.html", context)
@method_decorator(allowerd_users(["class-editing"]))
def post(self, request):
form = ClassForm()
if request.method == "POST":
form = ClassForm(request.POST)
if form.is_valid():
form.save()
return redirect("class-view")
else:
return form.errors
class ClassPDView(View):
def get_object(self, pk):
try:
return Class.objects.get(id = pk)
except Class.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["class-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["class-editing"]))
def delete(self, request, pk):
classes = self.get_object(pk)
classes.delete()
return JsonResponse(dict(code = 204, content = "Odjeljenje je izbrisano!"))
class StudentGPView(View):
@method_decorator(allowerd_users(["student-editing"]))
def get(self, request):
info = Info.objects.all().first()
students = Student.objects.all()
tableFields = student_fields()
form = StudentForm()
fields = []
# Model fields
for field in Student._meta.get_fields():
if field.name != "issue":
fields.append(field.name)
context = {
"fields": fields,
"querySet": students,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "student/index.html", context)
@method_decorator(allowerd_users(["student-editing"]))
def post(self, request):
form = StudentForm()
if request.method == "POST":
form = StudentForm(request.POST)
if form.is_valid():
form.save()
return redirect("student-view")
else:
return form.errors
class StudentPDView(View):
def get_object(self, pk):
try:
return Student.objects.get(id = pk)
except Student.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["student-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["student-editing"]))
def delete(self, request, pk):
student = self.get_object(pk)
student.delete()
return JsonResponse(dict(code = 204, content = "Učenik je izbrisan!"))
class ReservationGPView(View):
@method_decorator(allowerd_users(["reservation-editing"]))
def get(self, request):
info = Info.objects.all().first()
reservation = Reservation.objects.filter(professor = request.user.get_full_name())
tableFields = reservation_fields()
form = ReservationForm()
fields = get_fields(Reservation, "issue")
context = {
"fields": fields,
"querySet": reservation,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "reservation/index.html", context)
@method_decorator(allowerd_users(["reservation-editing"]))
def post(self, request):
info = Info.objects.all().first()
reservation = Reservation.objects.all()
form = ReservationForm()
fields = get_fields(Reservation, "issue")
if request.method == "POST":
form = ReservationForm(request.POST)
if form.is_valid():
# Updating the book DB
book = Book.objects.get(id = form.cleaned_data["book"].id)
book.quantity -= form.cleaned_data["quantity"]
book.save()
# Saving the user
data = form.save(commit = False)
data.professor = request.user.get_full_name()
data.save()
return redirect("reservation-view")
context = {
"fields": fields,
"querySet": reservation,
"form": form,
"school_name": info.school_name,
}
return render(request, "reservation/index.html", context)
class ReservationPDView(View):
def get_object(self, pk):
try:
return Reservation.objects.get(id = pk)
except Reservation.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["reservation-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["reservation-editing"]))
def delete(self, request, pk):
reservation = self.get_object(pk)
error = JsonResponse({"error": "Sve knjige nisu vraćene!"})
if request.is_ajax():
if reservation.issued == reservation.returned:
book = Book.objects.get(id = reservation.book.id)
book.quantity += reservation.quantity
book.save()
reservation.delete()
return JsonResponse(dict(code = 204, content = "Rezervacija je izbrisana!"))
error.status_code = 403
return error
class IssueGPView(View):
@method_decorator(allowerd_users(["issue-editing"]))
def get(self, request):
info = Info.objects.all().first()
issues = Issue.objects.all()
tableFields = issue_fields()
form = IssueForm()
fields = [field.name for field in Issue._meta.get_fields()]
context = {
"fields": fields,
"querySet": issues,
"form": form,
"tfields": tableFields[0],
"tlength": len(fields) + 1,
"school_name": info.school_name,
}
return render(request, "issue/index.html", context)
@method_decorator(allowerd_users(["issue-editing"]))
def post(self, request):
info = Info.objects.all().first()
issues = Issue.objects.all()
form = IssueForm()
fields = [field.name for field in Issue._meta.get_fields()]
if request.method == "POST":
form = IssueForm(request.POST)
if form.is_valid():
issue = Reservation.objects.get(id = form.cleaned_data["reservation"].id)
issue.issued += 1
issue.save()
form.save()
return redirect("issue-view")
context = {
"fields": fields,
"querySet": issues,
"form": form,
"school_name": info.school_name,
}
return render(request, "issue/index.html", context)
class IssuePDView(View):
# Getting the Issue object
def get_object(self, pk):
try:
return Issue.objects.get(id = pk)
except Issue.DoesNotExist:
raise Http404
@method_decorator(allowerd_users(["issue-editing"]))
def get(self, request, pk):
return self.get_object(pk)
@method_decorator(allowerd_users(["issue-editing"]))
def put(self, request, pk):
issue = self.get_object(pk)
data = {}
if request.is_ajax():
reservation = issue.reservation
if issue.returnStatus:
# Updating the issues DB to the latest info
issue.returnStatus = False
issue.returnDate = None
issue.debt = 0
reservation.returned -= 1
else:
issue.returnStatus = True
issue.returnDate = date.today()
if date.today() > reservation.endDate:
delta = date.today() - reservation.endDate
issue.debt = delta.days * .5
reservation.returned += 1
# Saving the changes
issue.save()
reservation.save()
# Preparing the data for returning into template
data['id'] = issue.id
data['returnStatus'] = issue.returnStatus
data['returnDate'] = issue.returnDate
data['debt'] = issue.debt
data['content'] = "Uspješno ste izmjenili podatke o knjizi!"
return JsonResponse(data)
@method_decorator(allowerd_users(["issue-editing"]))
def delete(self, request, pk):
issue = self.get_object(pk)
reservation = issue.reservation
reservation.issued -= 1
reservation.returned -=1
reservation.save()
issue.delete()
return JsonResponse(dict(code = 204, content = "Učenik je izbrisan!"))
| analitika-tech/library | system/frontend/views.py | views.py | py | 14,674 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "backend.models.Info.objects.all",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "backend.models.Info.objects",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "backend.models.Info",
"line_number": 25,
"usage_type": "name"
},
{
... |
16439872003 | from flask import Flask
import dash
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
import plotly.graph_objs as go
import pandas as pd
import os
inbodyDf = pd.read_csv(os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'inbody.csv'))
courseDf = pd.read_csv(os.path.join(os.path.dirname(__file__), os.pardir, 'data', 'courses.csv'))
external_stylesheets=['https://codepen.io/chriddyp/pen/bWLwgP.cs']
server = Flask(__name__)
app = dash.Dash(__name__, server=server, external_stylesheets=external_stylesheets)
app.title='[Youwon Shin]'
app.layout = html.Div(className='body',children=[
html.Div(className='header',children=[
html.H1(className='h1',children='Welcome to YOUWON\'s WORLD!',style={'color':'white'})
]),
html.Div(className='firstDiv',children=[
html.Div(className='Intro',children=[
html.H1(className='h1',children='Youwon Shin',style={'color':'#8977ad'}),html.Br(),
html.P(className='IntroArticle',children=['Hello, I\'m youwon shin.',html.Br(),'I am currently a M.S. student in ',
html.B(children='Computer Science major'), ' at KAIST and supervised by Prof.Uchin Lee in ',
html.A(className='a',children='ICLab@KAIST', href="http://ic.kaist.ac.kr/wiki/wiki.cgi?Main"), '.', html.Br(),
'I received my B.S. degree in ', html.A(className='a',children='Mechanical and Biomedical Engineering', href="http://mbe.ewha.ac.kr/"),
' from Ewha Womans University in 2021.', html.Br(),html.Br(),html.Br(),
html.B(children='Contact: '),
html.A(className='email',children='youwon.shin@kaist.ac.kr',href="mailto:youwon.shin@kaist.ac.kr")])
]),
html.Div(className='Img', children=[
html.Img(className='profimg',src= app.get_asset_url('profile.jpg'), style={'alt':'Profile image'})
])
]),
html.Div(className='secondDiv',children=[
html.Div(className='leftDiv',children=[
html.H2(className='h2',children='My Personality Type'),
html.Div(className='leftChild',children=[
html.Img(className='mbtiImg',src=app.get_asset_url('ENFJ.png'), style={'alt':'ENFJ'}),
html.Span(className='MBTI',children=[
html.Br(),
html.B('E'), 'xtroverted', html.Br(),
'I', html.B('N'), 'tution', html.Br(),
html.B('F'), 'eelings', html.Br(),
html.B('J'), 'udgment'
])
])
]),
html.Div(className='rightDiv',children=[
html.H2(className='h2',children='Inbody Trend'),
html.Div(className='chartbox',children=[
dcc.Dropdown(
id="Value-selector",
options=[{
'label': i,
'value': i
} for i in inbodyDf['Type'].unique()],
value="All",
placeholder="Select Type",
),
dcc.Graph(id='inbody-graph')
]),
],
style={
'width' : '100%',
'min-width':'35rem'
})
]),
html.Div(className='thirdDiv',children=[
html.Div(className='leftDiv',children=[
html.H2(className='h2',children='Course Schedule (Fall, 2021)'),
html.Table(className='table1',children=[
html.Tbody([
html.Tr([
html.Th(style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('MON', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('TUE', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('WED', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('THU', style={'background-color':"#9283ad", 'width':'80px'}),
html.Th('FRI', style={'background-color':"#9283ad", 'width':'80px'})
],style={'height':'35px'}),
html.Tr([
html.Td('9:00-10:30'),html.Td(),html.Td(),html.Td(),html.Td(),html.Td()
]),
html.Tr([
html.Td('10:30-12:00'),html.Td(),html.Td(['Data', html.Br(),'Visualization']),html.Td(),html.Td(['Data', html.Br(),'Visualization']),html.Td()
]),
html.Tr([
html.Td('12:00-13:00'),html.Td('~LUNCH TIME~', colSpan=5,style={'background-color': '#d5c9dd','font-weight':'bold'})
]),
html.Tr([
html.Td('13:00-14:30'),html.Td(['Advanced', html.Br(), 'Data Mining']),html.Td(),html.Td(['Advanced', html.Br(), 'Data Mining']),html.Td(),html.Td()
]),
html.Tr([
html.Td('14:30-16:00'),html.Td(),html.Td('HCI'),html.Td(),html.Td('HCI'),html.Td()
])
])
])
]),
html.Div(className='rightDiv',children=[
html.H2(className='h2',children='How many courses did I take?'),
html.Div(className='chartbox',children=[
dcc.Dropdown(
id="Year-selector",
options=[{
'label': i,
'value': i
} for i in courseDf['Year'].unique()],
value="Year",
placeholder="Select Year"
),
dcc.Graph(id='course-graph')
])
],
style={
'width' : '100%',
'min-width':'35rem'
})
]),
html.Div(className='fourthDiv',children=[
html.Div(className='DivChild',children=[
html.H2(className='h2',children=['Visitors for last 7 days']),
html.Table(className='table2',children=[
html.Tbody([
html.Tr([
html.Th('MON', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('TUE', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('WED', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('THU', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('FRI', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('SAT', style={'background-color':"#dbd4e7", 'width':'90px'}),
html.Th('SUN', style={'background-color':"#dbd4e7", 'width':'90px'})
]),
html.Tr([
html.Td('30', style={'width':"#dbd4e7"}),html.Td('12'),html.Td('23'),html.Td('43'),
html.Td('21'),html.Td('11'),html.Td('34')
])
])
])
])
]),
html.Div(className='footer',children=[
html.B('Interactive Computing Lab, School of Computing,KAIST'),
html.Br(),
html.I('291 Daehak-ro, Yuseong-gu, Daejeon 34141, Republic of Korea')
])
])
@app.callback(
Output(component_id='inbody-graph', component_property='figure'),
[Input(component_id='Value-selector', component_property='value')]
)
def update_inbody_graph(value):
days = ['2021-07-27', '2021-08-03', '2021-08-12', '2021-09-07']
if value == "All":
df = inbodyDf.copy()
else:
df = inbodyDf.loc[inbodyDf['Type']==value]
line1 = go.Scatter(name='Fat', x=days, y=df.loc[df['Type']=='Fat']['Figure'], mode='lines+markers')
line2 = go.Scatter(name='Skeletal muscles', x=days, y=df.loc[df['Type']=='Skeletal muscles']['Figure'],mode='lines+markers')
line3 = go.Scatter(name='BMI', x=days, y=df.loc[df['Type']=='BMI']['Figure'],mode='lines+markers')
line4 = go.Scatter(name='Fat Pect.', x=days, y=df.loc[df['Type']=='Fat Pect.']['Figure'],mode='lines+markers')
return {
'data': [line1,line2,line3,line4],
'layout':
go.Layout(
barmode='stack'
)
}
@app.callback(
Output(component_id='course-graph', component_property='figure'),
[Input(component_id='Year-selector', component_property='value')]
)
def update_course_graph(value):
if value == "Year":
df = courseDf[courseDf['Year']==2021]
else:
df = courseDf[courseDf['Year']==value]
grouped_Df = df.groupby(['Semester','Department']).count()
grouped_Df = grouped_Df.reset_index()
semesters = ['Spring', 'Fall']
bar1 = go.Bar(name='School of Computing', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='School of Computing']['Course'])
bar2 = go.Bar(name='General Required', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='General Required']['Course'])
bar3 = go.Bar(name='Electrical Engineering', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Electrical Engineering']['Course'])
bar4 = go.Bar(name='Cyber Security', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Cyber Security']['Course'])
bar5 = go.Bar(name='Computer Engineering', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Computer Engineering']['Course'])
bar6 = go.Bar(name='Mech/BioMed Engineering', x=semesters, y=grouped_Df.loc[grouped_Df['Department']=='Mech/BioMed Engineering']['Course'])
return {
'data': [bar1,bar2,bar3,bar4,bar5,bar6],
'layout':
go.Layout(
barmode='stack'
)
}
if __name__ == '__main__':
app.run_server(debug=True) | yuwon-shin/Data_Visualization | PR/flask/useDash.py | useDash.py | py | 9,696 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line... |
12814349068 | import itertools
import matplotlib.pyplot as plt
import numpy as np
def get_mtot(event_jets):
all_px = sum([j.px ** 2 for j in event_jets])
all_py = sum([j.py ** 2 for j in event_jets])
all_pz = sum([j.pz ** 2 for j in event_jets])
all_e = sum([j.e for j in event_jets])
if all_e ** 2 - all_px - all_py - all_pz >= 0:
return (all_e ** 2 - all_px - all_py - all_pz) ** 0.5
else:
raise Exception('Bad MTot: all_e=%d, all_px=%d, all_py=%d, all_pz=%d'.format(all_e, all_px, all_py, all_pz))
def get_mjj(event_jets):
"""
The 2 first jets are the leading jets
:param event_jets:
:return: The mjj for the 2 leading jets
"""
e = event_jets[0].e + event_jets[1].e
px = event_jets[0].px + event_jets[1].px
py = event_jets[0].py + event_jets[1].py
pz = event_jets[0].pz + event_jets[1].pz
return (e ** 2 - px ** 2 - py ** 2 - pz ** 2) ** 0.5
def get_mjj_all_pairs(event_jets):
mjj_all_pairs = []
for pair in itertools.product(event_jets, repeat=2):
(jo, jt) = pair
e = jo.e + jt.e
px = jo.px + jt.px
py = jo.py + jt.py
pz = jo.pz + jt.pz
if (e ** 2 - px ** 2 - py ** 2 - pz ** 2) >= 0:
mjj_all_pairs += [(e ** 2 - px ** 2 - py ** 2 - pz ** 2) ** 0.5]
else:
raise Exception('Bad Mjj: e=%d, px=%d, py=%d, pz=%d'.format(e, px, py, pz))
return mjj_all_pairs
def get_lead_pt(event_jets):
return event_jets[0].pt
def get_nj(event_jets):
return len(event_jets)
def get_mht(event_jets, pt_cutoff=30, eta_cutoff=5):
all_px = np.array([jet.px for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
all_py = np.array([jet.py for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
return sum(np.square(all_px) + np.square(all_py)) ** 0.5
def get_ht(event_jets, pt_cutoff=30, eta_cutoff=2.5):
all_px = np.array([jet.px for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
all_py = np.array([jet.py for jet in event_jets if (jet.pt > pt_cutoff and jet.eta < eta_cutoff)])
return sum(np.square(all_px) + np.square(all_py)) ** 0.5
def get_meff(event_jets):
all_px = np.array([jet.px for jet in event_jets])
all_py = np.array([jet.py for jet in event_jets])
return sum(jet.pt for jet in event_jets) + (sum(np.square(all_px) + np.square(all_py)))**0.5
def plot_histogram(data, x_label, y_label, color='b'):
plt.figure()
plt.hist(data, bins=50, facecolor=color, alpha=0.2)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
def plot_scatter(x, y, x_label, y_label):
plt.figure()
plt.scatter(x, y)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.show()
def get_m1(event_jets):
jet1 = event_jets[0]
return (jet1.e ** 2 - jet1.px ** 2 - jet1.py ** 2 - jet1.pz ** 2) ** 0.5
def get_m2(event_jets):
jet2 = event_jets[1]
return (jet2.e ** 2 - jet2.px ** 2 - jet2.py ** 2 - jet2.pz ** 2) ** 0.5
def get_m1_sub_m2(event_jets):
return abs(get_m1(event_jets) - get_m2(event_jets))
| rotemov/ML4Jets-HUJI | jupyter_methods.py | jupyter_methods.py | py | 3,097 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "itertools.product",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "numpy.square",
"line_nu... |
26693503665 | import pytorch_lightning as pl
import pandas as pd
import torch
from torch import nn
from torch.utils.data import DataLoader
from sklearn.metrics import cohen_kappa_score
from transformers import AutoTokenizer, RobertaForSequenceClassification
from torch.utils.data import Dataset
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import (LearningRateMonitor,
EarlyStopping,
ModelCheckpoint,
TQDMProgressBar)
from pytorch_lightning import seed_everything
import wandb
import click
def kappa(y, yhat):
y = y.cpu().numpy()
yhat = yhat.cpu().numpy()
return cohen_kappa_score(y, yhat, weights="quadratic")
class SmilesDataset(Dataset):
def __init__(self,
filename,
load_labels=True
):
self.load_labels = load_labels
# Contains columns: Id, smiles, sol_category
self.df = pd.read_csv(filename)
self.smiles = (self.df["smiles"].values.tolist())
if self.load_labels:
self.labels = self.df["sol_category"].values
self.point_id = self.df["Id"].values
# Need to override methods __len__ and __getitem__
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
smiles = self.smiles[idx]
if "Id" in self.df.columns:
ids = self.point_id[idx]
if self.load_labels:
labels = torch.as_tensor(self.labels[idx])
return smiles, labels, idx, ids
else:
return smiles, idx, ids
else:
if self.load_labels:
labels = torch.as_tensor(self.labels[idx])
return smiles, labels, idx
else:
return smiles, idx
class ChemBERTa(pl.LightningModule):
def __init__(self,
size,
num_classes,
data_dir,
learning_rate=1e-3,
batch_size=300,
dropout=0.3,
weights=True,
file_template="split_{}.csv",
):
super().__init__()
# Define loss function:
if weights:
print("*** training with weighted loss ***")
self.Loss = nn.CrossEntropyLoss(weight=torch.Tensor([0.9711, 0.9599, 0.068]),
reduction='mean')
else:
print("*** training WITHOUT weights ***")
self.Loss = nn.CrossEntropyLoss(reduction='mean')
# Data loading variables
self.num_workers = 4*torch.cuda.device_count() # 8
self.batch_size = batch_size
# Data paths
self.data_dir = data_dir
self.train_file = file_template.format("train")
self.valid_file = file_template.format("valid")
self.test_file = "test.csv"
# Model specific variables
self.learning_rate = learning_rate
# Define PyTorch model
self.pretrained = "DeepChem/ChemBERTa-10M-MTR" #DeepChem/ChemBERTa-77M-MTR
self.tokenizer = (AutoTokenizer.
from_pretrained(
self.pretrained
))
self.model = (RobertaForSequenceClassification
.from_pretrained(
self.pretrained,
num_labels=num_classes
))
def forward(self, x):
# define prediction/inference actions
x = self.tokenizer(list(x),
return_tensors="pt",
padding=True)
x = {key: x[key].to("cuda:0")
for key in x.keys()}
return self.model(**x).logits
def training_step(self, batch, batch_idx):
# define train loop
x, y, idxs, p_ids = batch
logits = self(x)
loss = self.Loss(logits, y)
self.log(f"train_loss", loss, on_epoch=True, on_step=False)
return loss
def validation_step(self, batch, batch_idx):
x, y, idxs, p_ids = batch
logits = self(x)
pred = nn.Softmax(dim=1)(logits)
pred = torch.argmax(pred, dim=1)
kap = kappa(y, pred)
self.log(f"valid_kappa", kap, on_epoch=True, on_step=False, prog_bar=True)
def test_step(self, batch, batch_idx):
x, idxs, p_ids = batch
logits = self(x)
pred = nn.Softmax(dim=1)(logits)
pred = torch.argmax(pred, dim=1).cpu().numpy()
return pd.DataFrame(list(zip(p_ids, pred)))
def test_epoch_end(self, outputs):
# Concat all test results
print(outputs)
all_outs = pd.concat(outputs)
print(all_outs)
all_outs.columns = ["Id", "pred"]
all_outs.to_csv(f"Chemberta_train.csv", index=False)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(),
lr=self.learning_rate)
lr_scheduler = {
"scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode="max",
factor=0.3,
patience=1,
cooldown=0,
verbose=True
),
"monitor": "valid_kappa"
}
return [optimizer], [lr_scheduler]
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
self.train_data = SmilesDataset(self.data_dir + self.train_file)
self.val_data = SmilesDataset(self.data_dir + self.valid_file)
# Assign test dataset for use in dataloader(s)
if stage == "test" or stage is None:
self.test_data = SmilesDataset(self.data_dir + self.test_file,
load_labels=False)
def train_dataloader(self):
return DataLoader(self.train_data,
batch_size=self.batch_size,
num_workers=self.num_workers)
def test_dataloader(self):
return DataLoader(self.test_data,
batch_size=2000,
num_workers=self.num_workers)
def val_dataloader(self):
return DataLoader(self.val_data,
batch_size=2000,
num_workers=self.num_workers)
#############################################################
@click.command()
@click.option("--size", type=int, default=300)
@click.option("--num_classes", type=int, default=3)
@click.option("--max_epochs", type=int, default=50)
@click.option("--data_dir", type=str, default="../../data/")
@click.option("--learning_rate", type=float, default=1e-3)
@click.option("--batch_size", type=int, default=30)
@click.option("--weights", is_flag=True)
def main(size,
num_classes,
max_epochs,
data_dir,
learning_rate,
batch_size,
weights=True
):
"""
Train and evaluate model
"""
seed = 0
seed_everything(seed, workers=True)
wandb.init(project="solubility_prediction")
model = ChemBERTa(
size=size,
num_classes=num_classes,
data_dir=data_dir,
learning_rate=learning_rate,
batch_size=batch_size,
weights=True
)
wandb_logger = WandbLogger()
wandb.watch(model)
checkpoint_callback = ModelCheckpoint(dirpath="models/checkpoint/",
filename="best",
save_last=False,
save_top_k=1,
monitor="valid_kappa",
mode="max")
earlystop_callback = EarlyStopping(monitor="valid_kappa",
mode="max",
patience=3,
min_delta=0.001,
verbose=True)
trainer = pl.Trainer(
accelerator="auto",
devices=1 if torch.cuda.is_available() else None,
max_epochs=max_epochs,
callbacks=[TQDMProgressBar(refresh_rate=5),
LearningRateMonitor(logging_interval="epoch"),
#earlystop_callback,
checkpoint_callback,
],
logger=wandb_logger,
deterministic=True
)
# Train
trainer.fit(model)
# Save model
torch.save(model.state_dict(), 'models/checkpoint/last_weights.pth')
# Test model
trainer.test(ckpt_path="best")
if __name__ == "__main__":
main()
| maltefranke/solubility_prediction | models/ChemBERTa/chemberta10M.py | chemberta10M.py | py | 8,935 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sklearn.metrics.cohen_kappa_score",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 38,
"usage_type": "call"
},
{
"api_na... |
19386134805 | import argparse
import json
import os
import sys
import numpy as np
import torch
from plyfile import PlyData, PlyElement
from torch.utils.data import DataLoader
from tqdm import tqdm
sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder
from utils.pc_utils import write_ply_rgb
from utils.box_util import get_3d_box
from data.scannet.model_util_scannet import ScannetDatasetConfig
from lib.config import CONF
from lib.scan2cap_dataset import Scan2CapDataset
from models.pointnet_extractor_module import PointNetExtractor
# constants
SCANNET_ROOT = "../data/scannet/scans/" # TODO point this to your scannet data
SCANNET_MESH = os.path.join(SCANNET_ROOT, "{}/{}_vh_clean_2.ply") # scene_id, scene_id
SCANNET_META = os.path.join(SCANNET_ROOT, "{}/{}.txt") # scene_id, scene_id
MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8])
DC = ScannetDatasetConfig()
SCANREFER_TRAIN = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_train.json")))
SCANREFER_VAL = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_val.json")))
VOCABULARY = json.load(open(os.path.join(CONF.PATH.DATA, "vocabulary.json"), "r"))
global_correct = 0
global_total = 0
def get_dataloader(args, scanrefer, all_scene_list, split, config, augment):
dataset = Scan2CapDataset(
scanrefer=scanrefer,
scanrefer_all_scene=all_scene_list,
vocabulary=VOCABULARY,
split=split,
num_points=args.num_points,
use_height=(not args.no_height),
use_color=args.use_color,
use_normal=args.use_normal,
use_multiview=args.use_multiview,
augment=augment
)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, drop_last=True)
return dataset, dataloader
def get_model(args):
# load model
input_channels = int(args.use_multiview) * 128 + int(args.use_normal) * 3 + int(args.use_color) * 3 + int(not args.no_height)
model = PointNetExtractor(pretrain_mode=True, feature_channels=input_channels).cuda()
path = os.path.join(CONF.PATH.OUTPUT, args.folder, "model.pth")
model.load_state_dict(torch.load(path), strict=False)
model.eval()
return model
def get_scanrefer(args):
scanrefer = SCANREFER_TRAIN if args.use_train else SCANREFER_VAL
all_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer])))
if args.scene_id:
assert args.scene_id in all_scene_list, "The scene_id is not found"
scene_list = [args.scene_id]
else:
scene_list = sorted(list(set([data["scene_id"] for data in scanrefer])))
scanrefer = [data for data in scanrefer if data["scene_id"] in scene_list]
return scanrefer, scene_list
def write_ply(verts, colors, indices, output_file):
if colors is None:
colors = np.zeros_like(verts)
if indices is None:
indices = []
file = open(output_file, 'w')
file.write('ply \n')
file.write('format ascii 1.0\n')
file.write('element vertex {:d}\n'.format(len(verts)))
file.write('property float x\n')
file.write('property float y\n')
file.write('property float z\n')
file.write('property uchar red\n')
file.write('property uchar green\n')
file.write('property uchar blue\n')
file.write('element face {:d}\n'.format(len(indices)))
file.write('property list uchar uint vertex_indices\n')
file.write('end_header\n')
for vert, color in zip(verts, colors):
file.write("{:f} {:f} {:f} {:d} {:d} {:d}\n".format(vert[0], vert[1], vert[2] , int(color[0]*255), int(color[1]*255), int(color[2]*255)))
for ind in indices:
file.write('3 {:d} {:d} {:d}\n'.format(ind[0], ind[1], ind[2]))
file.close()
def write_bbox(bbox, mode, output_file):
"""
bbox: (cx, cy, cz, lx, ly, lz, r), center and length in three axis, the last is the rotation
output_file: string
"""
def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10):
import math
def compute_length_vec3(vec3):
return math.sqrt(vec3[0]*vec3[0] + vec3[1]*vec3[1] + vec3[2]*vec3[2])
def rotation(axis, angle):
rot = np.eye(4)
c = np.cos(-angle)
s = np.sin(-angle)
t = 1.0 - c
axis /= compute_length_vec3(axis)
x = axis[0]
y = axis[1]
z = axis[2]
rot[0,0] = 1 + t*(x*x-1)
rot[0,1] = z*s+t*x*y
rot[0,2] = -y*s+t*x*z
rot[1,0] = -z*s+t*x*y
rot[1,1] = 1+t*(y*y-1)
rot[1,2] = x*s+t*y*z
rot[2,0] = y*s+t*x*z
rot[2,1] = -x*s+t*y*z
rot[2,2] = 1+t*(z*z-1)
return rot
verts = []
indices = []
diff = (p1 - p0).astype(np.float32)
height = compute_length_vec3(diff)
for i in range(stacks+1):
for i2 in range(slices):
theta = i2 * 2.0 * math.pi / slices
pos = np.array([radius*math.cos(theta), radius*math.sin(theta), height*i/stacks])
verts.append(pos)
for i in range(stacks):
for i2 in range(slices):
i2p1 = math.fmod(i2 + 1, slices)
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2, i*slices + i2p1], dtype=np.uint32) )
indices.append( np.array([(i + 1)*slices + i2, i*slices + i2p1, (i + 1)*slices + i2p1], dtype=np.uint32) )
transform = np.eye(4)
va = np.array([0, 0, 1], dtype=np.float32)
vb = diff
vb /= compute_length_vec3(vb)
axis = np.cross(vb, va)
angle = np.arccos(np.clip(np.dot(va, vb), -1, 1))
if angle != 0:
if compute_length_vec3(axis) == 0:
dotx = va[0]
if (math.fabs(dotx) != 1.0):
axis = np.array([1,0,0]) - dotx * va
else:
axis = np.array([0,1,0]) - va[1] * va
axis /= compute_length_vec3(axis)
transform = rotation(axis, -angle)
transform[:3,3] += p0
verts = [np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts]
verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts]
return verts, indices
def get_bbox_edges(bbox_min, bbox_max):
def get_bbox_verts(bbox_min, bbox_max):
verts = [
np.array([bbox_min[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_min[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_min[2]]),
np.array([bbox_min[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_min[1], bbox_max[2]]),
np.array([bbox_max[0], bbox_max[1], bbox_max[2]]),
np.array([bbox_min[0], bbox_max[1], bbox_max[2]])
]
return verts
box_verts = get_bbox_verts(bbox_min, bbox_max)
edges = [
(box_verts[0], box_verts[1]),
(box_verts[1], box_verts[2]),
(box_verts[2], box_verts[3]),
(box_verts[3], box_verts[0]),
(box_verts[4], box_verts[5]),
(box_verts[5], box_verts[6]),
(box_verts[6], box_verts[7]),
(box_verts[7], box_verts[4]),
(box_verts[0], box_verts[4]),
(box_verts[1], box_verts[5]),
(box_verts[2], box_verts[6]),
(box_verts[3], box_verts[7])
]
return edges
def get_bbox_corners(bbox):
centers, lengths = bbox[:3], bbox[3:6]
xmin, xmax = centers[0] - lengths[0] / 2, centers[0] + lengths[0] / 2
ymin, ymax = centers[1] - lengths[1] / 2, centers[1] + lengths[1] / 2
zmin, zmax = centers[2] - lengths[2] / 2, centers[2] + lengths[2] / 2
corners = []
corners.append(np.array([xmax, ymax, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymax, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymax, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymax, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymin, zmax]).reshape(1, 3))
corners.append(np.array([xmax, ymin, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymin, zmin]).reshape(1, 3))
corners.append(np.array([xmin, ymin, zmax]).reshape(1, 3))
corners = np.concatenate(corners, axis=0) # 8 x 3
return corners
radius = 0.03
offset = [0,0,0]
verts = []
indices = []
colors = []
corners = get_bbox_corners(bbox)
box_min = np.min(corners, axis=0)
box_max = np.max(corners, axis=0)
palette = {
0: [0, 255, 0], # gt
1: [0, 0, 255] # pred
}
chosen_color = palette[mode]
edges = get_bbox_edges(box_min, box_max)
for k in range(len(edges)):
cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], edges[k][1])
cur_num_verts = len(verts)
cyl_color = [[c / 255 for c in chosen_color] for _ in cyl_verts]
cyl_verts = [x + offset for x in cyl_verts]
cyl_ind = [x + cur_num_verts for x in cyl_ind]
verts.extend(cyl_verts)
indices.extend(cyl_ind)
colors.extend(cyl_color)
write_ply(verts, colors, indices, output_file)
def read_mesh(filename):
""" read XYZ for each vertex.
"""
assert os.path.isfile(filename)
with open(filename, 'rb') as f:
plydata = PlyData.read(f)
num_verts = plydata['vertex'].count
vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32)
vertices[:,0] = plydata['vertex'].data['x']
vertices[:,1] = plydata['vertex'].data['y']
vertices[:,2] = plydata['vertex'].data['z']
vertices[:,3] = plydata['vertex'].data['red']
vertices[:,4] = plydata['vertex'].data['green']
vertices[:,5] = plydata['vertex'].data['blue']
return vertices, plydata['face']
def export_mesh(vertices, faces):
new_vertices = []
for i in range(vertices.shape[0]):
new_vertices.append(
(
vertices[i][0],
vertices[i][1],
vertices[i][2],
vertices[i][3],
vertices[i][4],
vertices[i][5],
)
)
vertices = np.array(
new_vertices,
dtype=[
("x", np.dtype("float32")),
("y", np.dtype("float32")),
("z", np.dtype("float32")),
("red", np.dtype("uint8")),
("green", np.dtype("uint8")),
("blue", np.dtype("uint8"))
]
)
vertices = PlyElement.describe(vertices, "vertex")
return PlyData([vertices, faces])
def align_mesh(scene_id):
vertices, faces = read_mesh(SCANNET_MESH.format(scene_id, scene_id))
for line in open(SCANNET_META.format(scene_id, scene_id)).readlines():
if 'axisAlignment' in line:
axis_align_matrix = np.array([float(x) for x in line.rstrip().strip('axisAlignment = ').split(' ')]).reshape((4, 4))
break
# align
pts = np.ones((vertices.shape[0], 4))
pts[:, :3] = vertices[:, :3]
pts = np.dot(pts, axis_align_matrix.T)
vertices[:, :3] = pts[:, :3]
mesh = export_mesh(vertices, faces)
return mesh
def dump_results(args, scanrefer, data, config):
dump_dir = os.path.join(CONF.PATH.OUTPUT, args.folder, "vis")
os.makedirs(dump_dir, exist_ok=True)
# from inputs
ids = data['scan_idx'].detach().cpu().numpy()
point_clouds = data['point_clouds'].cpu().numpy()
batch_size = point_clouds.shape[0]
pcl_color = data["pcl_color"].detach().cpu().numpy()
if args.use_color:
pcl_color = (pcl_color * 256 + MEAN_COLOR_RGB).astype(np.int64)
# from network outputs
# detection
# ground truth
gt_center = data['ref_center_label'].cpu().numpy() # (B,MAX_NUM_OBJ,3)
gt_size_residual = data['ref_size_residual_label'].cpu().numpy() # B,K2,3
# reference
nyu40_label = data["ref_nyu40_label"].detach().cpu().numpy()
prediction = torch.argmax(data["ref_obj_cls_scores"], dim=1).detach().cpu().numpy() + 1
global global_correct
global global_total
global_correct += np.sum(nyu40_label == prediction)
global_total += batch_size
print("NYU40_LABEL", [DC.nyu40id2label[i] for i in list(nyu40_label)])
print("PREDICTION", [DC.nyu40id2label[i] for i in list(prediction)])
print("ACC", global_correct / global_total)
for i in range(batch_size):
# basic info
idx = ids[i]
scene_id = scanrefer[idx]["scene_id"]
object_id = scanrefer[idx]["object_id"]
object_name = scanrefer[idx]["object_name"]
ann_id = scanrefer[idx]["ann_id"]
# scene_output
scene_dump_dir = os.path.join(dump_dir, scene_id)
if not os.path.exists(scene_dump_dir):
os.mkdir(scene_dump_dir)
# # Dump the original scene point clouds
mesh = align_mesh(scene_id)
mesh.write(os.path.join(scene_dump_dir, 'mesh.ply'))
write_ply_rgb(point_clouds[i], pcl_color[i], os.path.join(scene_dump_dir, 'pc.ply'))
# visualize the gt reference box
# NOTE: for each object there should be only one gt reference box
object_dump_dir = os.path.join(scene_dump_dir, "gt_{}_{}_{}_{}_{}.ply".format(scene_id, object_id, ann_id, DC.nyu40id2label[nyu40_label[i]], DC.nyu40id2label[prediction[i]]))
gt_obb = np.zeros((7,))
gt_obb[0:3] = gt_center[i]
gt_obb[3:6] = gt_size_residual[i]
gt_bbox = get_3d_box(gt_size_residual[i], 0, gt_center[i])
if not os.path.exists(object_dump_dir):
write_bbox(gt_obb, 0, os.path.join(object_dump_dir))
def visualize(args):
# init training dataset
print("preparing data...")
scanrefer, scene_list = get_scanrefer(args)
# dataloader
_, dataloader = get_dataloader(args, scanrefer, scene_list, "val", DC, False)
# model
model = get_model(args)
model.eval()
# evaluate
print("visualizing...")
for data in tqdm(dataloader):
for key in data:
data[key] = data[key].cuda()
# feed
with torch.no_grad():
data = model(data)
# visualize
dump_results(args, scanrefer, data, DC)
print("done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--folder", type=str, help="Folder containing the model", required=True)
parser.add_argument("--gpu", type=str, help="gpu", default="0")
parser.add_argument("--scene_id", type=str, help="scene id", default="")
parser.add_argument("--batch_size", type=int, help="batch size", default=2)
parser.add_argument('--num_points', type=int, default=40000, help='Point Number [default: 40000]')
parser.add_argument('--num_proposals', type=int, default=256, help='Proposal number [default: 256]')
parser.add_argument('--num_scenes', type=int, default=-1, help='Number of scenes [default: -1]')
parser.add_argument('--no_height', action='store_true', help='Do NOT use height signal in input.')
parser.add_argument('--no_nms', action='store_true', help='do NOT use non-maximum suppression for post-processing.')
parser.add_argument('--use_train', action='store_true', help='Use the training set.')
parser.add_argument('--use_color', action='store_true', help='Use RGB color in input.')
parser.add_argument('--use_normal', action='store_true', help='Use RGB color in input.')
parser.add_argument('--use_multiview', action='store_true', help='Use multiview images.')
args = parser.parse_args()
# setting
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
visualize(args)
| nseppi/scan2cap | scan2cap/scripts/visualize_pretrain.py | visualize_pretrain.py | py | 15,949 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
12464441939 | import argparse
import os
import shutil
import socket
import time
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.utils as vutils
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
from torch.autograd import Variable
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from data import MyImageFolder
from model import UnetGenerator
from model import RevealNet
from text_data import *
from utils import *
import nltk
DATA_DIR = '/media/changmin/mini_hard/ImageNet/'
TEXT_DATA_DIR = "/home/changmin/research/steganography/data/"
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', default="train",
help='train | val | test')
parser.add_argument('--workers', type=int, default=8,
help='number of data loading workers')
parser.add_argument('--batchsize', type=int, default=4,
help='input batch size')
parser.add_argument('--imagesize', type=int, default=256,
help='the number of frames')
parser.add_argument('--epochs', type=int, default=100,
help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate, default=0.001')
parser.add_argument('--decay_round', type=int, default=10,
help='learning rate decay 0.5 each decay_round')
parser.add_argument('--beta1', type=float, default=0.5,
help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', type=bool, default=True,
help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1,
help='number of GPUs to use')
parser.add_argument('--Hnet', default='',
help="path to Hidingnet (to continue training)")
parser.add_argument('--Rnet', default='',
help="path to Revealnet (to continue training)")
parser.add_argument('--embedding', default='',
help="path to embedding (to continue training)")
parser.add_argument('--trainpics', default='./training/',
help='folder to output training images')
parser.add_argument('--validationpics', default='./training/',
help='folder to output validation images')
parser.add_argument('--testpics', default='./training/',
help='folder to output test images')
parser.add_argument('--outckpts', default='./training/',
help='folder to output checkpoints')
parser.add_argument('--traintexts', default='./training/',
help='folder to output training texts')
parser.add_argument('--outlogs', default='./training/',
help='folder to output images')
parser.add_argument('--outcodes', default='./training/',
help='folder to save the experiment codes')
parser.add_argument('--beta', type=float, default=0.01,
help='hyper parameter of beta')
parser.add_argument('--remark', default='', help='comment')
parser.add_argument('--test', default='', help='test mode, you need give the test pics dirs in this param')
parser.add_argument('--hostname', default=socket.gethostname(), help='the host name of the running server')
parser.add_argument('--debug', type=bool, default=False, help='debug mode do not create folders')
parser.add_argument('--logfrequency', type=int, default=10, help='the frequency of print the log on the console')
parser.add_argument('--resultpicfrequency', type=int, default=100, help='the frequency of save the resultpic')
parser.add_argument('--savefrequency', type=int, default=1000, help='the frequency of save the checkpoint')
def main():
global writer, smallestLoss, optimizerH, optimizerR, schedulerH, schedulerR
opt = parser.parse_args()
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, "
"so you should porbably run with --cuda")
cudnn.benchmark = True
create_dir_to_save_result(opt)
logpath = opt.outlogs + '%s_%d_log.txt' % (opt.dataset, opt.batchsize)
print_log(opt, str(opt), logpath)
save_current_codes(opt.outcodes)
if opt.test == '':
voc, _ = loadPrepareData(None, "all", os.path.join(TEXT_DATA_DIR, "dialogues_text.txt"), 768)
# tensorboardX writer
writer = SummaryWriter(comment='**' + opt.remark)
# Get the dataset
#traindir = os.path.join(DATA_DIR, 'train')
texttraindir = os.path.join(TEXT_DATA_DIR, "train/dialogues_train.txt")
valdir = os.path.join(DATA_DIR, 'val')
textvaldir = os.path.join(TEXT_DATA_DIR, "validation/dialogues_validation.txt")
"""
train_dataset = MyImageFolder(
traindir, # Preprocessing the data
transforms.Compose([
transforms.Resize([opt.imagesize, opt.imagesize]), # Randomly cut and resize the data to a given size
transforms.ToTensor(),
# Convert a numpy.ndarray with a value range of [0,255] or a shape of (H,W,C) to
# a torch.FloatTensor with a shape of [C,H,W] and a value of [0, 1.0] torch.FloatTensor
]),
True)
"""
_, text_train_dataset = loadPrepareData(None, "train", texttraindir, 768)
val_dataset = MyImageFolder(
valdir, # Preprocessing the data
transforms.Compose([ # Combine several transforms together
transforms.Resize([opt.imagesize, opt.imagesize]), # Randomly cut and resize the data to a given size
transforms.ToTensor(),
# Convert a numpy.ndarray with a value range of [0, 255] or a shpae of (H,W,C) to
# a torch.FloatTensor with a shape of [C,H,W] and a value of [0, 1.0] torch.FloatTensor
]))
_, text_val_dataset = loadPrepareData(None, "val", textvaldir, 768)
#assert train_dataset
assert val_dataset
assert text_train_dataset
assert text_val_dataset
else:
testdir = opt.test
texttestdir = os.path.join(TEXT_DATA_DIR, "test/dialogues_test.txt")
test_dataset = MyImageFolder(
testdir, # Preprocessing the data
transforms.Compose([ # Combine several transfroms together
transforms.Resize([opt.imagesize, opt.imagesize]),
transforms.ToTensor(),
]))
_, text_test_dataset = loadPrepareData(None, "test", texttestdir, 768)
assert test_dataset
assert text_test_dataset
# Create word embedding layer
embedding = nn.Embedding(voc.num_words, 256)
embedding.cuda()
embedding.weight.data.uniform_(-1, 1)
if opt.embedding != '':
embedding.load_state_dict(torch.load(opt.embedding))
if opt.ngpu > 1:
embedding = torch.nn.DataParallel(embedding).cuda()
# Create Hiding network objects
Hnet = UnetGenerator(input_nc=6, output_nc=3, num_downs=7, output_function=nn.Sigmoid)
Hnet.cuda()
Hnet.apply(weights_init)
# Determine whether to continue the previous training
if opt.Hnet != "":
Hnet.load_state_dict(torch.load(opt.Hnet))
if opt.ngpu > 1:
Hnet = torch.nn.DataParallel(Hnet).cuda()
print_network(opt, Hnet, logpath)
# Create Reveal network objects
Rnet = RevealNet(output_function=nn.Sigmoid)
Rnet.cuda()
Rnet.apply(weights_init)
if opt.Rnet != '':
Rnet.load_state_dict(torch.load(opt.Rnet))
if opt.ngpu > 1:
Rnet = torch.nn.DataParallel(Rnet).cuda()
print_network(opt, Rnet, logpath)
# LogSoftmax
logsoftmax = nn.LogSoftmax(dim=-1).cuda()
# Mean Square Error loss
criterion = nn.MSELoss().cuda()
# training mode
if opt.test == '':
# setup optimizer
optimizerH = optim.Adam(Hnet.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
schedulerH = ReduceLROnPlateau(optimizerH, mode='min', factor=0.2, patience=5, verbose=True)
optimizerR = optim.Adam(Rnet.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
schedulerR = ReduceLROnPlateau(optimizerR, mode='min', factor=0.2, patience=8, verbose=True)
#train_loader = DataLoader(train_dataset, batch_size=opt.batchsize,
# shuffle=True, num_workers=int(opt.workers))
val_loader = DataLoader(val_dataset, batch_size=opt.batchsize,
shuffle=True, num_workers=int(opt.workers))
smallestLoss = 10000
print_log(opt, "-------------------Starts Training----------------------", logpath)
for epoch in range(opt.epochs):
# train
train(opt, val_loader, epoch, voc, embedding, text_train_dataset, Hnet=Hnet, Rnet=Rnet,
criterion=criterion, logsoftmax=logsoftmax, logpath=logpath)
# validation
val_hloss, val_rloss, val_sumloss = validation(opt, val_loader, epoch, voc, text_val_dataset,
Hnet=Hnet, Rnet=Rnet, criterion=criterion, logsoftmax=logsoftmax, logpath=logpath)
# adjust learning rate
schedulerH.step(val_sumloss)
schedulerR.step(val_rloss)
# save the best model parameters
if val_sumloss < globals()["smallestLoss"]:
globals()["smallestLoss"] = val_sumloss
# do check pointing
torch.save(Hnet.state_dict(),
'%s/netH_epoch_%d,sumloss=%.f,Hloss=%.6f.pth' % (
opt.outckpts, epoch, val_sumloss, val_hloss))
torch.save(Rnet.state_dict(),
'%s/netR_epoch_%d,sumloss=%.6f,Rloss=%.6f.pth' % (
opt.outckpts, epoch, val_sumloss, val_rloss))
writer.close()
# test mode
else:
test_loader = DataLoader(test_dataset, batch_size=opt.batchsize,
shuffle=False, num_workers=int(opt.workers))
test(opt, test_loader, 0, Hnet=Hnet, Rnet=Rnet, criterion=criterion, logpath=logpath)
print("-------------------Test is completed-------------------")
def train(opt, train_loader, epoch, voc, embedding, text_train_dataset, Hnet, Rnet, criterion, logsoftmax, logpath):
batch_time = AverageMeter()
data_time = AverageMeter()
Hlosses = AverageMeter() # record the loss of each epoch Hnet
Rlosses = AverageMeter() # record the loss of each epoch Rnet
SumLosses = AverageMeter() # record the each epoch Hloss + β*Rloss
# switch to train mode
Hnet.train()
Rnet.train()
start_time = time.time()
for i, data in enumerate(train_loader, 0):
data_time.update(time.time() - start_time)
Hnet.zero_grad()
Rnet.zero_grad()
all_pics = data # all pics contains coverImg and secretImg, no label needed
this_batch_size = int(all_pics.size()[0])
#--------------------------------------------------------------------------------------------------------------------------------
# The first half of the picture is used as coverImg, and the second half of the picture is used as secretImg
#cover_img = all_pics[0:this_batch_size, :, :, :] # batch_size, 3, ,256, 256
cover_img = all_pics
#--------------------------------------------------------------------------------------------------------------------------------
# should change secret_img -> secret_text and secret_text has the same size with cover_img
#secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
text_batches = batch2TrainData(voc, [random.choice(text_train_dataset) for _ in range(this_batch_size)])
secret_text, text_lengths, target_text, mask, max_target_len = text_batches
org_text = secret_text
secret_text = secret_text.cuda()
secret_text = embedding(secret_text)
secret_text = secret_text.view(this_batch_size, 3, 256, 256)
#--------------------------------------------------------------------------------------------------------------------------------
# Concat the pictures together to get six-channel pictures as input to the Hnet
concat_img_text = torch.cat([cover_img.cuda(), secret_text], dim=1)
# Data into gpu
if opt.cuda:
cover_img = cover_img.cuda()
#secret_img = secret_img.cuda()
#concat_img = concat_img.cuda()
secret_text = secret_text.cuda()
concat_img_text = concat_img_text.cuda()
#concat_imgv = Variable(concat_img) # concat_img as input to the Hnet
concat_img_textv = Variable(concat_img_text)
cover_imgv = Variable(cover_img) # cover_img as label of Hnet
#container_img = Hnet(concat_imgv) # Get container_img with secret_img
container_img = Hnet(concat_img_textv)
errH = criterion(container_img, cover_imgv) # Hnet reconstruction error
Hlosses.update(errH, this_batch_size) # record H_loss value
rev_secret_img = Rnet(container_img) # container_img is used as input to the Rnet to get rev_secret_img
#import pdb
#pdb.set_trace()
#secret_imgv = Variable(secret_img) # secret_img as the label of the Rnet
secret_textv = Variable(secret_text)
#errR = criterion(rev_secret_img, secret_imgv) # Rnet reconstruction error
#errR = criterion(rev_secret_img, secret_textv)
#-----------------------------------------------------------------------------------------------------------------------------
#import pdb
#pdb.set_trace()
rec_text = rev_secret_img.view(this_batch_size, 768, 256)
rec_text_norm = normalizing(rec_text, 2)
W_norm = normalizing(embedding.weight, 1)
text_org = org_text.view(-1)
prob_logits = torch.tensordot(torch.squeeze(rec_text_norm), W_norm, dims=[[2], [1]])
prob = logsoftmax(prob_logits * 100)
rec_sent = torch.squeeze(torch.argmax(prob, dim=2))
prob = prob.view(-1, voc.num_words)
idx = torch.arange(this_batch_size * 768)
all_idx = torch.t(torch.stack([idx, text_org]))
all_prob = gather_nd(prob, all_idx)
gen_temp = rec_sent.view(-1)
gen_idx = torch.t(torch.stack([idx, gen_temp.cpu()]))
gen_prob = gather_nd(prob, gen_idx)
errR = -torch.mean(all_prob)
#-----------------------------------------------------------------------------------------------------------------------------
Rlosses.update(errR, this_batch_size) # record R_loss value
betaerrR_secret = opt.beta * errR
err_sum = errH + betaerrR_secret
SumLosses.update(err_sum, this_batch_size)
# Calculate the gradient
err_sum.backward()
# Optimize the parameters of both networks
optimizerH.step()
optimizerR.step()
# Update the time of a batch
batch_time.update(time.time() - start_time)
start_time = time.time()
# log information
log = '[%d/%d][%d/%d]\tLoss_H: %.4f Loss_R: %.4f Loss_sum: %.4f \tdatatime: %.4f \tbatchtime: %.4f' % (
epoch, opt.epochs, i, len(train_loader),
Hlosses.val, Rlosses.val, SumLosses.val, data_time.val, batch_time.val)
# print log information
if i % opt.logfrequency == 0:
print_log(opt, log, logpath)
else:
print_log(opt, log, logpath, console=False)
# Related operations such as storing records
# Generate a picture in 100 steps
if epoch % 1 == 0 and i % opt.resultpicfrequency == 0:
APD = save_result_pic(opt, this_batch_size, cover_img, container_img.data, epoch, i, opt.trainpics)
save_text_path = opt.traintexts + '/ResultTexts_epoch%03d_batch%04d.txt' % (epoch, i)
#import pdb
#pdb.set_trace()
avg_bleu = 0
with open(save_text_path, 'a') as text_file:
for b in range(this_batch_size):
ori = [voc.index2word[x] for x in org_text[b].tolist() if x != 0]
recon = [voc.index2word[x] for x in rec_sent[b].tolist() if x != 0]
original_text = "{}_Original :".format(b) + " ".join([voc.index2word[x] for x in org_text[b].tolist() if x != 0])
recons_text = "{}_Reconstructed:".format(b) + " ".join([voc.index2word[x] for x in rec_sent[b].tolist() if x != 0])
text_file.write(original_text + "\n")
text_file.write(recons_text + "\n")
bleu_score = nltk.translate.bleu_score.sentence_bleu([ori], recon)
text_file.write(str(bleu_score) + "\n")
avg_bleu += bleu_score
apd_text = "APD: {}".format(APD) + "\n"
text_file.write(apd_text)
avg_bleu = avg_bleu / float(this_batch_size)
print("Original :" + " ".join([voc.index2word[x] for x in org_text[0].tolist() if x != 0]))
print()
print("Reconstructed:" + " ".join([voc.index2word[x] for x in rec_sent[0].tolist() if x != 0]))
print("Bleu score :{}".format(avg_bleu))
if i % opt.savefrequency == 0 and i != 0:
torch.save({
'epoch': epoch,
'iteration': i,
'Hnet': Hnet.state_dict(),
'Rnet': Rnet.state_dict(),
'optimizerH': optimizerH.state_dict(),
'optimizerR': optimizerR.state_dict(),
'sum_loss': err_sum,
'H_loss': errH,
'R_loss': errR,
'voc_dict': voc.__dict__,
'embedding': embedding.state_dict()
}, opt.outckpts + '/{}_{}_{}.tar'.format(epoch, i, 'checkpoint'))
# Time taken to output an epoch
"""
epoch_log = "one epoch time is %.4f==================================================" % (
batch_time.sum) + "\n"
epoch_log = epoch_log + "epoch learning rate: optimizerH_lr = %.8f optimizerR_lr = %.8f" % (
Hlosses.avg, Rlosses.avg, SumLosses.avg)
print_log(opt, epoch_log, logpath)
"""
if not opt.debug:
# record learning rate
writer.add_scalar("lr/H_lr", optimizerH.param_groups[0]['lr'], epoch)
writer.add_scalar("lr/R_lr", optimizerR.param_groups[0]['lr'], epoch)
writer.add_scalar("lr/beta", opt.beta, epoch)
# Every epoch records an average loss on tensorboard display
writer.add_scalar("train/R_loss", Rlosses.avg, epoch)
writer.add_scalar("train/H_loss", Hlosses.avg, epoch)
writer.add_scalar("train/sum_loss", SumLosses.avg, epoch)
def validation(opt, val_loader, epoch, voc, text_val_dataset, Hnet, Rnet, criterion, logsoftmax, logpath):
print("--------------------------------------------------validation begin--------------------------------------------------")
start_time = time.time()
Hnet.eval()
Rnet.eval()
Hlosses = AverageMeter() # record the loss of each epoch Hnet
Rlosses = AverageMeter() # record the loss of each epoch Rnet
count = 0
for i, data in enumerate(val_loader, 0):
if count >= 100:
break
Hnet.zero_grad()
Rnet.zero_grad()
with torch.no_grad():
all_pics = data # allpics contains coverImg and secretImg, no label needed
this_batch_size = int(all_pics.size()[0]) # Processing the last batch of each epoch may be insufficient for opt.batchsize
# The first half of the picture is used as coverImg, and the second half of the picture is used as secretImg
cover_img = all_pics # batchsize, 3, 256, 256
#secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
text_batches = batch2TrainData(voc, [random.choice(text_val_dataset) for _ in range(this_batch_size)])
secret_text, text_lengths, target_text, mask, max_target_len = text_batches
org_text = secret_text
secret_text = secret_text.cuda()
secret_text = embedding(secret_text)
secret_text = secret_text.view(this_batch_size, 3, 256, 256)
# Concat the pictures together to get six-channel pictures as input to the Hnet
#concat_img = torch.cat([cover_img, secret_img], dim=1)
concat_img_text = torch.cat([cover_img.cuda(), secret_text], dim=1)
# Data into gpu
if opt.cuda:
cover_img = cover_img.cuda()
#secret_img = secret_img.cuda()
#concat_img = concat_img.cuda()
concat_img_text = concat_img_text.cuda()
#concat_imgv = Variable(concat_img) # concat_img as input to the Hnet
concat_img_textv = Variable(concat_img_text)
cover_imgv = Variable(cover_img) # cover_img as label of Hnet
container_img = Hnet(concat_img_textv) # Get container_img with secret_img
errH = criterion(container_img, cover_imgv) # Hnet reconstruction error
Hlosses.update(errH, this_batch_size) # record H_loss value
rev_secret_img = Rnet(container_img) # container_img is used as input to the Rnet to get rev_secret_img
secret_textv = Variable(secret_text)
rec_text = rev_secret_img.view(this_batch_size, 768, 256)
rec_text_norm = normalizing(rec_text, 2)
W_norm = normalizing(embedding.weight, 1)
text_org = org_text.view(-1)
prob_logits = torch.tensordot(torch.squeeze(rec_text_norm), W_norm, dims=[[2], [1]])
prob = logsoftmax(prob_logits * 100)
prob = prob.view(-1, voc.num_words)
idx = torch.arange(this_batch_size * 768)
all_idx = torch.t(torch.stack([idx, text_org]))
all_prob = gather_nd(prob, all_idx)
errR = -torch.mean(all_prob) # Rnet reconstruction error
Rlosses.update(errR, this_batch_size) # record R_loss value
"""
if i % 50 == 0:
save_result_pic(opt, this_batch_size, cover_img, container_img.data, secret_img, rev_secret_img.data, epoch, i,
opt.validationpics)
"""
count += 1
val_hloss = Hlosses.avg
val_rloss = Rlosses.avg
val_sumloss = val_hloss + opt.beta * val_rloss
val_time = time.time() - start_time
val_log = "validation[%d] val_Hloss = %.6f\t val_Rloss = %.6f\t val_Sumloss = %.6f\t validation time=%.2f" % (
epoch, val_hloss, val_rloss, val_sumloss, val_time)
print_log(opt, val_log, logpath)
if not opt.debug:
writer.add_scalar('validation/H_loss_avg', Hlosses.avg, epoch)
writer.add_scalar('validation/R_loss_avg', Rlosses.avg, epoch)
writer.add_scalar('validation/sum_loss_avg', val_sumloss, epoch)
print("--------------------------------------------------validation end--------------------------------------------------")
return val_hloss, val_rloss, val_sumloss
def test(opt, test_loader, epoch, Hnet, Rnet, criterion, logpath):
print("--------------------------------------------------test begin--------------------------------------------------")
start_time = time.time()
Hnet.eval()
Rnet.eval()
Hlosses = AverageMeter() # to record the Hloss in one epoch
Rlosses = AverageMeter() # to record the Rloss in one epoch
for i, data in enumerate(test_loader, 0):
Hnet.zero_grad()
Rnet.zero_grad()
with torch.no_grad():
all_pics = data # all_pics contain cover_img and secret_img, label is not needed
this_batch_size = int(all_pics.size()[0] / 2) # in order to handle the final batch which may not have opt.size
# half of the front is as cover_img, half of the end is as secret_img
cover_img = all_pics[0:this_batch_size, :, :, :] # batchsize,3,256,256
secret_img = all_pics[this_batch_size:this_batch_size * 2, :, :, :]
# concat cover and original secret get the concat_img with channels
concat_img = torch.cat([cover_img, secret_img], dim=1)
# data into gpu
if opt.cuda:
cover_img = cover_img.cuda()
secret_img = secret_img.cuda()
concat_img = concat_img.cuda()
concat_imgv = Variable(concat_img) # concat_img is the input of Hnet
cover_imgv = Variable(cover_img) # Hnet reconstruction error
container_img = Hnet(concat_imgv) # concat_img as the input of Hnet and get the container_img
errH = criterion(container_img, cover_imgv) # Hnet reconstruction error
Hlosses.update(errH, this_batch_size) # record the H loss value
rev_secret_img = Rnet(container_img) # container_img is the input of the Rnet and get the output "rev_secret_img"
secret_imgv = Variable(secret_img) # secret_imgv is the label of Rnet
errR = criterion(rev_secret_img, secret_imgv) # Rnet reconstructed error
Rlosses.update(errR, this_batch_size) # record the R_loss value
save_result_pic(opt, this_batch_size, cover_img, container_img.data, secret_img, rev_secret_img.data, epoch, i,
opt.testpics)
val_hloss = Hlosses.avg
val_rloss = Rlosses.avg
val_sumloss = val_hloss + opt.beta * val_rloss
val_time = time.time() - start_time
val_log = "validation[%d] val_Hloss = %.6f\t val_Rloss = %.6f\t val_Sumloss = %.6f\t validation time=%.2f" % (
epoch, val_hloss, val_rloss, val_sumloss, val_time)
print_log(opt, val_log, logpath)
print("--------------------------------------------------test end--------------------------------------------------")
return val_hloss, val_rloss, val_sumloss
if __name__ == '__main__':
main()
| changminL/stegano | main.py | main.py | py | 26,162 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "to... |
73733113787 | from schedule import Scheduler
from session.manager import SessionManager
class GlobalObjectClass:
def __init__(self):
self.text: str = ""
self.database: str = ""
self.session_manager: SessionManager | None = None
self.scheduler: Scheduler | None = None
globalObject = GlobalObjectClass()
| fkxxyz/chatgpt-session | server/common.py | common.py | py | 329 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "session.manager.SessionManager",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "schedule.Scheduler",
"line_number": 10,
"usage_type": "name"
}
] |
27973693497 | import json
import urllib.request
# read the model into a variable
with open ("../src/test/gene-filter-example-2.xml", "r") as f:
model=f.read()
# encode the job
job = {
"export": {
"network_type":"en",
"network_format":"sbml"
},
"filter": {
"species": ["h2o", "atp"],
"reactions": [],
"enzymes": ["gene_abc"],
"enzyme_complexes": ["a + b + c", "x + Y", "b_098 + r_abc"],
},
"file": model
}
# setup request
req = urllib.request.Request("https://gemtractor.bio.informatik.uni-rostock.de/api/execute")
req.add_header('Content-Type', 'application/json; charset=utf-8')
job_bytes = json.dumps(job).encode('utf-8')
req.add_header('Content-Length', len(job_bytes))
# fire the job
try:
response = urllib.request.urlopen(req, job_bytes)
# do whatever you want with the returned file:
print (response.read())
except urllib.error.HTTPError as e:
# there was a problem...!?
print ("bad request: " + str (getattr(e, 'code', repr(e))) + getattr(e, 'message', repr(e)))
print (e.readlines())
| binfalse/GEMtractor | clients/PythonClient.py | PythonClient.py | py | 1,081 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "urllib.request.request.Request",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 24,
"usage_type": "name"
},
{
"api_nam... |
19875373112 | import sys
import os
from Model.Pairwise.Embedding import RelationEmbedding
from typing import List, Dict, Tuple
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
relationEmbedding = RelationEmbedding()
haveRels = {}
noRels = {}
def __init__(self, guid, text_a, text_b=None, label=None, entitys = None, rels=[],\
answerType: str = '', answerStr: str = ''):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.freebaseEntity = entitys
self.freebaseRels = rels
self.relsId = InputExample.relation2id(self.freebaseRels)
self.answerType = answerType
self.answerStr = answerStr
@staticmethod
def relation2id(freebaseRels):
relsId: List[int] = []
for rel in freebaseRels:
if(rel in InputExample.relationEmbedding.rel2id):
relsId.append(InputExample.relationEmbedding.rel2id[rel])
InputExample.haveRels[rel] = 1
else:
relsId.append(InputExample.relationEmbedding.rel2id['UNK'])
# print(rel)
InputExample.noRels[rel] = 1
while(len(relsId) < 2):
relsId.append(InputExample.relationEmbedding.rel2id['UNK'])
return relsId[0:2]
| EnernityTwinkle/KBQA-QueryGraphSelection | RankingQueryGraphs/Model/common/InputExample.py | InputExample.py | py | 1,494 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line... |
19147556064 | '''PROGRAM ANALISIS VARIANSI (Rata-Rata n Populasi -- Kalo Variansi sama dari uji
Levene test)'''
import scipy.stats as st
print(" H0 : miu sampe n sama semua")
print(" H1 : Ada miu yang tidak sama\n")
alfa = input("Tingkat Signifikansi : ")
jumlah_populasi = int(input("Jumlah Populasi : "))
data_populasi = [[17.5,16.9,15.8,18.6],[16.4,19.2,17.7,15.4],
[20.3,15.7,17.8,18.9],[14.6,16.7,20.8,18.9],
[17.5,19.2,16.5,20.5],[18.3,16.2,17.5,20.1]]
#data_populasi = [[1.06,0.79,0.82,0.89,1.05,0.95,0.65,1.15,1.12],
# [1.58,1.45,0.57,1.16,1.12,0.91,0.83,0.43],
# [0.29,0.06,0.44,0.55,0.61,0.43,0.51,0.10,0.53,
# 0.34,0.06,0.09,0.17,0.17,0.60]]
#data_populasi = [[42.5,39.3,39.6,39.9,42.9,43.6],
# [39.8,40.1,40.5,42.3,42.5,43.1],
# [40.2,40.5,41.3,43.4,44.9,45.1],
# [41.3,42.2,43.5,44.2,45.9,42.3]]
#data_populasi = []
ukuran_sampel = input("Ukuran sampel sama (Ya/Tidak) : ")
#for i in range(1,jumlah_populasi+1):
# populasi_i = []
# jumlah_data_i = int(input("Jumlah data populasi ke {0} : ".format(i)))
# for j in range(1,jumlah_data_i+1):
# data_j = float(input("Data ke {0} : ".format(j)))
# populasi_i.append(data_j)
# print("\n")
# data_populasi.append(populasi_i)
#JKT, JKK, JKG untuk menentukan f
#=============================================
x_kuadrat = 0
jumlah_nilai_T = []
k = jumlah_populasi
nilai_T_masing_kuadrat = []
N = 0
if ukuran_sampel == "Ya" :
n = len(data_populasi[0])
for i in range (0,jumlah_populasi):
for j in range (0,len(data_populasi[i])):
x_kuadrat += (data_populasi[i][j])**2
# print(x_kuadrat)
jumlah_nilai_i = sum (data_populasi[i])
jumlah_nilai_T.append(jumlah_nilai_i)
nilai_T_masing_kuadrat.append((jumlah_nilai_i)**2)
print("Jumlah nilai Ti. =",jumlah_nilai_T)
jumlah_nilai_T_kuadrat = (sum(jumlah_nilai_T))**2
JKT = x_kuadrat-((jumlah_nilai_T_kuadrat)/(n*k))
print("JKT = :",round(JKT,4))
JKK = (sum(nilai_T_masing_kuadrat)/n) - ((jumlah_nilai_T_kuadrat)/(n*k))
print("JKK = :",round(JKK,4))
print("JKT - JKK = :",round(JKT-JKK,4))
s1_2 = JKK/(k-1)
s2_2 = (JKT-JKK)/(k*(n-1))
print("\nRata-rata jumlah Kuadrat")
print("s1^2 = {0} s^2 = {1}".format(round(s1_2,4),round(s2_2,4)))
F = s1_2 / s2_2
print("F hitung =",round(F,4))
distribusi_f = st.f(k-1,k*(n-1))
x = distribusi_f.ppf(1-float(alfa))
print("F dengan alfa =",str(alfa),",dfn =",str(k-1),",dan dfd =",str(k*(n-1)),"adalah"
,str(round(x,4)))
print("\n")
print(" {0} {1} {2} {3} {4}"
.format("Sumber Variansi","Derajat Bebas",
"Jumlah Kuadrat", "RJK", "Statistik F"))
print("{0} {1} {2} {3} {4}"
.format("AntarMesin(Kolom)",k-1,round(JKK,4),
round(s1_2,4),round(F,4)))
print(" {0} {1} {2} {3}"
.format("Galat",k*(n-1),round(JKT-JKK,4),
round(s2_2,4)))
print(" {0} {1} {2}"
.format("Total",k*(n-1)+k-1,round(JKT,4)))
elif ukuran_sampel == "Tidak" :
for i in range (0,jumlah_populasi):
for j in range (0,len(data_populasi[i])):
x_kuadrat += (data_populasi[i][j])**2
# print(x_kuadrat)
jumlah_nilai_i = sum (data_populasi[i])
jumlah_nilai_T.append(jumlah_nilai_i)
nilai_T_masing_kuadrat.append(((jumlah_nilai_i)**2)/len(data_populasi[i]))
N_jumlah = len(data_populasi[i])
N += N_jumlah
print("Jumlah nilai Ti. =",jumlah_nilai_T)
jumlah_nilai_T_kuadrat = (sum(jumlah_nilai_T))**2
JKT = x_kuadrat-((jumlah_nilai_T_kuadrat)/(N))
print("JKT = :",round(JKT,4))
JKK = sum(nilai_T_masing_kuadrat) - ((jumlah_nilai_T_kuadrat)/(N))
print("JKK = :",round(JKK,4))
print("JKT - JKK = :",round(JKT-JKK,4))
s1_2 = JKK/(k-1)
s2_2 = (JKT-JKK)/(N-k)
print("\nRata-rata jumlah Kuadrat")
print("s1^2 = {0} s^2 = {1}".format(round(s1_2,4),round(s2_2,4)))
F = s1_2 / s2_2
print("F hitung =",round(F,4))
distribusi_f = st.f(k-1,N-k)
x = distribusi_f.ppf(1-float(alfa))
print("F dengan alfa =",str(alfa),",dfn =",str(k-1),",dan dfd =",str(N-k),"adalah"
,str(round(x,4)))
print("\n")
print(" {0} {1} {2} {3} {4}"
.format("Sumber Variansi","Derajat Bebas",
"Jumlah Kuadrat", "RJK", "Statistik F"))
print("{0} {1} {2} {3} {4}"
.format("AntarMesin(Kolom)",k-1,round(JKK,4),
round(s1_2,4),round(F,4)))
print(" {0} {1} {2} {3}"
.format("Galat",N-k,round(JKT-JKK,4),
round(s2_2,4)))
print(" {0} {1} {2}"
.format("Total",N-1,round(JKT,4)))
#print(st.f.sf(2,2,12)) #p_value langsung ini bisa
print("\n")
if F > float(round(x,4)) :
print("Karena F hitung > F tabel, H0 ditolak")
else :
print("Karena F hitung < F tabel, H0 tidak dapat ditolak")
'''RUMUS CEPAT'''
#print(st.f_oneway(data_populasi[0],data_populasi[1],data_populasi[2])) | fstevenm/Project-Statistics | Statistic Method/Analisis Variansi Satu Arah.py | Analisis Variansi Satu Arah.py | py | 5,809 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.stats.f",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_number": 75,
"usage_type": "name"
},
{
"api_name": "scipy.stats.f",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "scipy.stats",
"line_numb... |
35694943116 | from enum import Enum, unique
@unique
class Move(Enum):
THE_SAME = 0,
LEFT = 1,
TOP = 2,
RIGHT = 3,
DOWN = 4,
LEFT_TOP = 5,
RIGHT_TOP = 6
LEFT_DOWN = 7,
RIGHT_DOWN = 8
class P:
Left = 0.2
Right = 0.4
Top = 0.3
Down = 0.2
Vertical_Same = 0.5
Horizontal_Same = 0.4
def classify_move(x_diff, y_diff):
if x_diff == 0 and y_diff == 0:
return Move.THE_SAME
if x_diff == -1 and y_diff == 0:
return Move.LEFT
if x_diff == 0 and y_diff == 1:
return Move.TOP
if x_diff == 1 and y_diff == 0:
return Move.RIGHT
if x_diff == 0 and y_diff == -1:
return Move.DOWN
if x_diff == -1 and y_diff == 1:
return Move.LEFT_TOP
if x_diff == 1 and y_diff == 1:
return Move.RIGHT_TOP
if x_diff == -1 and y_diff == -1:
return Move.LEFT_DOWN
if x_diff == 1 and y_diff == -1:
return Move.RIGHT_DOWN
def calculate_move_probability(move):
result = {
Move.THE_SAME: P.Vertical_Same * P.Horizontal_Same,
Move.LEFT: P.Left * P.Vertical_Same,
Move.TOP: P.Top * P.Horizontal_Same,
Move.RIGHT: P.Right * P.Vertical_Same,
Move.DOWN: P.Down * P.Horizontal_Same,
Move.LEFT_TOP: P.Left * P.Top,
Move.RIGHT_TOP: P.Right * P.Top,
Move.LEFT_DOWN: P.Left * P.Down,
Move.RIGHT_DOWN: P.Right * P.Down
}
return result[move]
def calculate_common_direction_probability(path):
p = 0
for move in path:
p += calculate_move_probability(move)
return p
def calculate_single_path_probability(path):
p = 1
for move in path:
p *= calculate_move_probability(move)
return p
# This recursion can calculate probability without copying moves to paths to speed up process
# But this solution is more clear, easily testable and debuggable.
def calculate_paths(paths_success, paths_fail, moves, x0, y0, step):
if step == 5:
if x0 == 0 and y0 == 0:
paths_success.append(moves.copy())
else:
paths_fail.append(moves.copy())
elif step < 5:
for x in range(-1, 2):
for y in range(-1, 2):
move = classify_move(x, y)
moves.append(move)
calculate_paths(paths_success, paths_fail, moves, x0 + x, y0 + y, step + 1)
del moves[-1]
def calculate_multiple_paths_probability(paths):
p = 0
for path in paths:
p += calculate_single_path_probability(path)
return p
def test1():
p = calculate_common_direction_probability([Move.THE_SAME,
Move.LEFT,
Move.LEFT_DOWN,
Move.DOWN,
Move.RIGHT_DOWN,
Move.RIGHT,
Move.RIGHT_TOP,
Move.TOP,
Move.LEFT_TOP])
res = round(p, 0) == 1
print(res)
def test2():
p = calculate_single_path_probability([Move.LEFT, Move.RIGHT])
res = round(p, 2) == 0.02
print(res)
def test3():
paths_success = []
paths_fail = []
moves = []
calculate_paths(paths_success, paths_fail, moves, 0, 0, 0)
p_success = calculate_multiple_paths_probability(paths_success)
p_fail = calculate_multiple_paths_probability(paths_fail)
res = round(p_success + p_fail, 0) == 1
print(res)
def test():
test1()
test2()
test3()
def main():
paths_success = []
paths_fail = []
moves = []
calculate_paths(paths_success, paths_fail, moves, 0, 0, 0)
p_success = calculate_multiple_paths_probability(paths_success)
p_fail = calculate_multiple_paths_probability(paths_fail)
print(p_success)
print(p_fail)
#test()
main() | yuryybk/bulbacon_2019_task | Task.py | Task.py | py | 4,012 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "enum.unique",
"line_number": 4,
"usage_type": "name"
}
] |
764666346 | # Django初期化
import os, django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
django.setup()
# views.py
from datetime import date
from django.db.models import Count, Q
from app.models import Staff
today = date(2023, 2, 14) # 動作検証用
qs = (
Staff.objects
.values("pk", "name") # group byのキー
.annotate(
delivery_num=Count(
"delivery",
filter=Q(
delivery__date=today,
delivery__receiver__isnull=False,
),
),
unknown_num=Count(
"delivery",
filter=Q(
delivery__date=today,
delivery__receiver__isnull=True,
),
),
)
).values("pk", "name", "delivery_num", "unknown_num")
## クエリーセットを辞書のリストに展開。データの詰め替えは不要
values = list(qs)
# index.html
## todayは各行に持たせず別で渡せばよい
print(today)
for staff in values:
print(staff)
# ----------------------------
# SQL確認
print("### valuesに変更したSQL")
def printsql(query):
from sqlparse import format as sfmt
print(sfmt(str(query), reindent_aligned=True))
printsql(qs.query)
# ----------------------------
print("### SQLを観察してORMを組みなおした改善版")
from django.db.models import FilteredRelation, F
# ORMクエリの実装
qs = (
Staff.objects
.values("pk", "name") # group byのキー
.annotate(
dlist=FilteredRelation("delivery", condition=Q(delivery__date=today)),
delivery_num=Count("dlist__receiver"),
unknown_num=Count(
"dlist",
filter=Q(dlist__receiver__isnull=True),
),
).values("pk", "name", "delivery_num", "unknown_num")
)
printsql(qs.query)
| shimizukawa/pycon-apac-2023-django-orm-dojo | src/try/try2-after.py | try2-after.py | py | 1,811 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.environ.setdefault",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "django.setup",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"... |
11812609282 | """empty message
Revision ID: 08084a992d8b
Revises:
Create Date: 2018-03-23 09:28:07.017990
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '08084a992d8b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(), nullable=False),
sa.Column('email', sa.String(), nullable=False),
sa.Column('password', sa.String(), nullable=False),
sa.Column('hash_key', sa.String(), nullable=False),
sa.Column('activate', sa.String(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('hash_key'),
sa.UniqueConstraint('username')
)
op.create_table('businesses',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('logo', sa.String(), nullable=True),
sa.Column('location', sa.String(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('bio', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_businesses_category'), 'businesses', ['category'], unique=False)
op.create_index(op.f('ix_businesses_location'), 'businesses', ['location'], unique=False)
op.create_index(op.f('ix_businesses_name'), 'businesses', ['name'], unique=False)
op.create_table('reviews',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('desc', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('business_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['business_id'], ['businesses.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reviews')
op.drop_index(op.f('ix_businesses_name'), table_name='businesses')
op.drop_index(op.f('ix_businesses_location'), table_name='businesses')
op.drop_index(op.f('ix_businesses_category'), table_name='businesses')
op.drop_table('businesses')
op.drop_table('users')
# ### end Alembic commands ###
| victorjambo/WeConnect | migrations/versions/08084a992d8b_.py | 08084a992d8b_.py | py | 2,922 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
10414559833 | import collections
from typing import Any, List
import torch
from executorch.exir.dialects.edge.arg.model import BaseArg
from executorch.exir.dialects.edge.arg.type import ArgType
def extract_return_dtype(
returns: Any, sample_returns: List[BaseArg]
) -> List[torch.dtype]:
"""Extract the dtype from a return value."""
if not isinstance(returns, collections.abc.Sequence):
returns = [returns]
result = []
for ret, sample in zip(returns, sample_returns):
if sample.type == ArgType.TensorList or sample.type == ArgType.TensorOptList:
# Assuming all tensors in tensor list has the same dtype, and we only add 1 dtype to result.
assert (
ret is not None
), f"Expecting non-None return value for {sample} but got None"
result.append(ret.dtype)
break
elif sample.type == ArgType.Tensor or sample.type == ArgType.TensorOpt:
assert (
ret is not None
), f"Expecting non-None return value for {sample} but got None"
result.append(ret.dtype)
return result
| pytorch/executorch | exir/dialects/edge/dtype/utils.py | utils.py | py | 1,125 | python | en | code | 479 | github-code | 6 | [
{
"api_name": "typing.Any",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "executorch.exir.dialects.edge.arg.model.BaseArg",
"line_number": 11,
"usage_type": "name"
},
{
"api_name":... |
41383190209 | import math
from abc import ABC, abstractmethod
from dataclasses import dataclass
from hvac import Quantity
from hvac.fluids import Fluid
from hvac.refrigerant_piping.copper_tubing import CopperTube
from hvac.fluid_flow import Pipe, Circular
Q_ = Quantity
@dataclass
class RefrigerantCycleData:
rfg: Fluid
T_eva: Quantity # saturated suction temperature = evaporating temperature
T_con: Quantity # saturated condensing temperature
ESH: Quantity # evaporator superheat
CSC: Quantity # condensor subcooling
CSH: Quantity # compressor superheat
def __post_init__(self):
self.T_eva_o = self.T_eva + self.ESH
self.T_con_i = self.T_con + self.CSH
self.T_con_o = self.T_con - self.CSC
self.P_eva = self.rfg(T=self.T_eva, x=Q_(0, 'frac')).P
self.P_con = self.rfg(T=self.T_con, x=Q_(0, 'frac')).P
self.rfg_con_i = self.rfg(P=self.P_con, T=self.T_con_i)
self.rfg_con_o = self.rfg(P=self.P_con, T=self.T_con_o)
self.rfg_eva_i = self.rfg(P=self.P_eva, h=self.rfg_con_o.h)
self.rfg_eva_o = self.rfg(P=self.P_eva, T=self.T_eva_o)
self.q_eva = self.rfg_eva_o.h - self.rfg_eva_i.h
class RefrigerantLine(ABC):
vr_allow_max: Quantity = None
vr_allow_min: dict[str, Quantity] | None = None
def __init__(
self,
rfg_cycle_data: RefrigerantCycleData,
Q_eva_max: Quantity,
Q_eva_min: Quantity | None = None,
):
"""
Parameters
----------
rfg_cycle_data:
Instance of dataclass RefrigerantCycleData containing the
specifications of the refrigerant cycle.
Q_eva_max:
Maximum evaporator capacity. This will determine the maximum
flow velocity of the refrigerant.
Q_eva_min: optional, default None
Minimum evaporator capacity. This will determine the minimum
flow velocity of the refrigerant. Leave to default `None` in case of
an ON/OFF-controlled compressor.
"""
self.rcd = rfg_cycle_data
self.Q_eva_max = Q_eva_max
self.Q_eva_min = Q_eva_min or Q_eva_max
# if Q_eva_min is None, set self.Q_eva_min equal to Q_eva_max
self.mr_max = self._get_mr(self.Q_eva_max)
self.Vr_max = self._get_Vr(self.mr_max)
self.mr_min = self._get_mr(self.Q_eva_min)
self.Vr_min = self._get_Vr(self.mr_min)
def _get_mr(self, Q) -> Quantity:
"""Get mass flow rate of refrigerant in the system."""
return Q / self.rcd.q_eva
@abstractmethod
def _get_Vr(self, mr) -> Quantity:
"""Get volume flow rate of refrigerant."""
...
@abstractmethod
def get_vr(self, *args, **kwargs) -> tuple:
"""Get refrigerant velocity."""
...
def _check_vr_max(self, vr_max):
if vr_max.to('feet / min') < self.vr_allow_max:
r = 'OK'
else:
r = 'TOO HIGH'
return r
def _check_vr_min(self, vr_min, copper_tube, riser):
...
@abstractmethod
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""Get pressure drop across refrigeration line."""
...
class VaporLine(RefrigerantLine):
@abstractmethod
def _get_Vr(self, mr) -> Quantity:
...
def get_vr(self, copper_tube: CopperTube, riser: bool = True) -> tuple:
"""
Get maximum and minimum flow velocity of refrigerant.
This method also checks if maximum and minimum velocity are within the
allowable upper and lower limits to avoid noise on one hand and to
ensure proper oil return on the other hand.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
riser: default True
Indicate if the pipe is a vertical riser (True) or not (False).
Returns
-------
Tuple with 4 elements:
1. the flow velocity at maximum capacity
2. string that indicates if maximum flow velocity is 'OK' or 'TOO HIGH'
3. the flow velocity at minimum capacity
4. string that indicates if minimum flow velocity is 'OK', 'TOO LOW', or
'TOO HIGH'
"""
A = math.pi * (copper_tube.di ** 2) / 4
vr_max = self.Vr_max / A
r_max = self._check_vr_max(vr_max)
vr_min = self.Vr_min / A
r_min = self._check_vr_min(vr_min, copper_tube, riser)
return vr_max, r_max, vr_min, r_min
def _check_vr_max(self, vr_max):
if vr_max.to('feet / min') < self.vr_allow_max:
r = 'OK'
else:
r = 'TOO HIGH'
return r
def _check_vr_min(self, vr_min, copper_tube, riser):
vr_min = vr_min.to('feet / min')
vr_allow_min = self.vr_allow_min[copper_tube.dn]
# minimum allowable flow velocity to ensure oil return
if not riser:
vr_allow_min *= 0.75
if vr_allow_min <= vr_min < self.vr_allow_max:
r = 'OK'
elif vr_min < vr_allow_min:
r = 'TOO LOW'
else: # vr_min >= self.vr_allow_max:
r = 'TOO HIGH'
return r
@abstractmethod
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
...
class SuctionLine(VaporLine):
vr_allow_max = Q_(4000, 'ft / min')
vr_allow_min = {
# minimum allowable flow velocity for proper oil return in riser @
# saturated suction temperature of 20 °F
k: Q_(v, 'feet / min') for k, v in
[
('3/8', 370), ('1/2', 460),
('5/8', 520), ('3/4', 560),
('7/8', 600), ('1 1/8', 700),
('1 3/8', 780), ('1 5/8', 840),
('2 1/8', 980), ('2 5/8', 1080),
('3 1/8', 1180), ('3 5/8', 1270),
('4 1/8', 1360)
]
}
def _get_Vr(self, mr):
# volume flow rate of refrigerant at evaporator outlet
return mr / self.rcd.rfg_eva_o.rho
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""
Get pressure drop across suction line.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
Leq:
Equivalent length of suction line (including equivalent length
of fittings).
"""
pipe = Pipe.create(
length=Leq,
wall_roughness=Q_(0.0015, 'mm'),
fluid=self.rcd.rfg(P=self.rcd.P_eva, x=Q_(1, 'frac')),
cross_section=Circular.create(copper_tube.di),
volume_flow_rate=self.Vr_max
)
return pipe.pressure_drop
class DischargeLine(VaporLine):
vr_allow_max = Q_(3500, 'ft / min')
vr_allow_min = {
# minimum allowable flow velocity for proper oil return in riser @
# saturated condensing temperature of 80 °F
k: Q_(v, 'feet / min') for k, v in
[
('5/16', 220), ('3/8', 250),
('1/2', 285), ('5/8', 315),
('3/4', 345), ('7/8', 375),
('1 1/8', 430), ('1 3/8', 480),
('1 5/8', 520), ('2 1/8', 600),
('2 5/8', 665), ('3 1/8', 730)
]
}
def _get_Vr(self, mr):
# volume flow rate of refrigerant at condenser inlet
return mr / self.rcd.rfg_con_i.rho
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""
Get pressure drop across discharge line.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
Leq:
Equivalent length of discharge line (including equivalent length
of fittings).
"""
pipe = Pipe.create(
length=Leq,
wall_roughness=Q_(0.0015, 'mm'),
fluid=self.rcd.rfg(P=self.rcd.P_con, x=Q_(1, 'frac')),
cross_section=Circular.create(copper_tube.di),
volume_flow_rate=self.Vr_max
)
return pipe.pressure_drop
class LiquidLine(RefrigerantLine):
vr_allow_max = Q_(600, 'ft / min')
def _get_Vr(self, mr) -> Quantity:
return mr / self.rcd.rfg_con_o.rho
def get_vr(self, copper_tube: CopperTube) -> tuple:
A = math.pi * (copper_tube.di ** 2) / 4
vr_max = self.Vr_max / A
r_max = self._check_vr_max(vr_max)
return vr_max, r_max
def get_dp(self, copper_tube: CopperTube, Leq: Quantity) -> Quantity:
"""
Get pressure drop across liquid line.
Parameters
----------
copper_tube:
Instance of dataclass CopperTube containing copper tube specs.
Leq:
Equivalent length of discharge line (including equivalent length
of fittings).
"""
pipe = Pipe.create(
length=Leq,
wall_roughness=Q_(0.0015, 'mm'),
fluid=self.rcd.rfg(P=self.rcd.P_con, x=Q_(0, 'frac')),
cross_section=Circular.create(copper_tube.di),
volume_flow_rate=self.Vr_max
)
return pipe.pressure_drop
def get_dT(self, dP: Quantity, H: Quantity) -> Quantity:
"""
Get equivalent change in saturated temperature.
Parameters
----------
dP:
Total pressure drop across liquid line including fittings
and accessoires.
H:
The elevation height between the outlet and inlet of the liquid
line (if the outlet is below the inlet, `H` is negative).
"""
rho = self.rcd.rfg_con_o.rho
g = Q_(9.81, 'm / s ** 2')
dP_elev = rho * g * H
dP_tot = dP + dP_elev
P_out = self.rcd.P_con - dP_tot # pressure at liquid line outlet = TXV inlet
T_sat = self.rcd.rfg(P=P_out, x=Q_(0, 'frac')).T # saturation temperature @ P_out
dT = self.rcd.T_con.to('K') - T_sat.to('K')
return dT
| TomLXXVI/HVAC | hvac/refrigerant_piping/sizing.py | sizing.py | py | 10,022 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "hvac.Quantity",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "hvac.fluids.Fluid",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "hvac.Quantity",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "hvac.Quantity",
"li... |
19325547874 | from pandas import read_csv
from sklearn.metrics import mean_absolute_percentage_error
from math import sqrt
from matplotlib import pyplot as plt
from pandas import concat
import numpy as np
import scipy.stats as stats
import pandas as pd
def persistence_one_step_ln(train_log, teste_log,
show_results=False, plot_result=False):
# Prepare data
teste_log = teste_log.values
train_log = [x for x in train_log]
# Walk-forward validation
predictions = list()
for i in range(len(teste_log)):
# Predict
yhat = train_log[-1]
# Store forecast in list of predictions
predictions.append(yhat)
# Add actual observation to train for the next loop
obs = teste_log[i]
train_log.append(obs)
if show_results:
print('>Predicted=%.3f, Expected=%.3f' % (yhat, obs))
# Report performance
mape = mean_absolute_percentage_error(np.exp(teste_log), np.exp(predictions))
print('MAPE: %.3f' % mape)
# Plot predicted vs expected values
if plot_result:
plt.plot(np.exp(teste_log))
plt.plot(np.exp(predictions), color='red')
plt.show()
# Load data
train_log = pd.read_csv('../timeserie_log_train.csv',
header=0, index_col=0, parse_dates=True, squeeze=True)
teste_log = pd.read_csv('../timeserie_log_test.csv',
header=0, index_col=0, parse_dates=True, squeeze=True)
persistence_one_step_ln(train_log, teste_log, plot_result=True) | gsilva49/timeseries | H/python_code/persistence_one_step_ln.py | persistence_one_step_ln.py | py | 1,509 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.metrics.mean_absolute_percentage_error",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 37,
"usage_type": "call"
},
{
"a... |
73859053626 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import pandas as pd
import plotly.express as px
# Read data from a csv
z_data = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/api_docs/mt_bruno_elevation.csv')
fig = go.Figure(data=[go.Surface(z=z_data.values)])
fig.update_traces(contours_z=dict(show=True, usecolormap=True,
highlightcolor="limegreen", project_z=True))
fig.update_layout(title='Mt Bruno Elevation', autosize=False,
scene_camera_eye=dict(x=1.87, y=0.88, z=-0.64),
width=500, height=500,
margin=dict(l=65, r=50, b=65, t=90))
cont = go.Figure(go.Surface(
contours = {
"x": {"show": True, "start": 1.5, "end": 2, "size": 0.04, "color":"white"},
"z": {"show": True, "start": 0.5, "end": 0.8, "size": 0.05}
},
x = [1,2,3,4,5],
y = [1,2,3,4,5],
z = [
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0],
[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]
]))
cont.update_layout(
scene = {
"xaxis": {"nticks": 20},
"zaxis": {"nticks": 4},
'camera_eye': {"x": 0, "y": -1, "z": 0.5},
"aspectratio": {"x": 1, "y": 1, "z": 0.2}
})
df = px.data.iris()
sepal = px.scatter_3d(df, x='sepal_length', y='sepal_width', z='petal_width',color='species')
app = dash.Dash(__name__)
app.layout = html.Div(
children = [
html.Div([
dcc.Graph(id="3d-surface-plot", figure=fig),
dcc.Graph(id="contour-surface-plot", figure=cont)
],style={'columnCount': 2}),
html.Div([
dcc.Graph(id="3d-scatter-plot", figure=sepal)
], style={'columnCount': 1})
]) | juakonap/dash-3d | app/app.py | app.py | py | 1,941 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "plot... |
25068498925 | from uuid import uuid4
from typing import Tuple, List
from asendia_us_lib.shipping_request import ShippingRequest, Item
from asendia_us_lib.shipping_response import PackageLabel
from purplship.core.units import CustomsInfo, Packages, Options, Weight
from purplship.core.utils import Serializable, DP
from purplship.core.models import (
Documents,
ShipmentRequest,
ShipmentDetails,
Message,
Customs,
)
from purplship.providers.asendia_us.units import (
Service,
Option,
LabelType,
ProcessingLocation,
)
from purplship.providers.asendia_us.error import parse_error_response
from purplship.providers.asendia_us.utils import Settings
def parse_shipment_response(
responses: Tuple[str, dict], settings: Settings
) -> Tuple[ShipmentDetails, List[Message]]:
_, response = responses
errors = parse_error_response(response, settings)
details = (
_extract_details(responses, settings)
if response.get("packageLabel") is not None
else None
)
return details, errors
def _extract_details(response: Tuple[str, dict], settings: Settings) -> ShipmentDetails:
label, details = response
shipment = DP.to_object(PackageLabel, details)
return ShipmentDetails(
carrier_name=settings.carrier_name,
carrier_id=settings.carrier_id,
tracking_number=shipment.trackingNumber,
shipment_identifier=shipment.packageId,
docs=Documents(label=label),
)
def shipment_request(
payload: ShipmentRequest, settings: Settings
) -> Serializable[ShippingRequest]:
package = Packages(payload.parcels).single
options = Options(payload.options, Option)
product_code = Service.map(payload.service).value_or_key
unique_id = getattr(payload, "id", uuid4().hex)
customs = CustomsInfo(payload.customs or Customs(commodities=[]))
request = ShippingRequest(
accountNumber=settings.account_number,
subAccountNumber=options.asendia_sub_account_number,
processingLocation=ProcessingLocation.map(
options.asendia_processing_location or "SFO"
).name,
includeRate=True,
labelType=LabelType.map(payload.label_type or "PDF").name_or_key,
orderNumber=unique_id,
dispatchNumber=unique_id,
packageID=unique_id,
recipientTaxID=payload.recipient.state_tax_id,
returnFirstName=payload.shipper.person_name,
returnLastName=payload.shipper.person_name,
returnCompanyName=payload.shipper.company_name,
returnAddressLine1=payload.shipper.address_line1,
returnAddressLine2=payload.shipper.address_line2,
returnAddressLine3=None,
returnProvince=payload.shipper.state_code,
returnPostalCode=payload.shipper.postal_code,
returnCountryCode=payload.shipper.country_code,
returnPhone=payload.shipper.phone_number,
returnEmail=payload.shipper.email,
recipientFirstName=payload.recipient.person_name,
recipientLastName=payload.recipient.person_name,
recipientBusinessName=payload.recipient.company_name,
recipientAddressLine1=payload.recipient.address_line1,
recipientAddressLine2=payload.recipient.address_line2,
recipientAddressLine3=None,
recipientCity=payload.recipient.city,
recipientProvince=payload.recipient.state_code,
recipientPostalCode=payload.recipient.postal_code,
recipientPhone=payload.recipient.phone_number,
recipientEmail=payload.recipient.email,
totalPackageWeight=package.weight.value,
weightUnit=package.weight_unit.value.lower(),
dimLength=package.length.value,
dimWidth=package.width.value,
dimHeight=package.height.value,
dimUnit=package.dimension_unit.value,
totalPackageValue=options.declared_value,
currencyType=options.currency,
productCode=product_code,
customerReferenceNumber1=payload.reference,
customerReferenceNumber2=None,
customerReferenceNumber3=None,
contentType=("D" if package.parcel.is_document else "M"),
packageContentDescription=package.parcel.description,
vatNumber=None,
sellerName=payload.shipper.person_name,
sellerAddressLine1=payload.shipper.address_line1,
sellerAddressLine2=payload.shipper.address_line2,
sellerAddressLine3=None,
sellerProvince=payload.shipper.state_code,
sellerPostalCode=payload.shipper.postal_code,
sellerPhone=payload.shipper.phone_number,
sellerEmail=payload.shipper.email,
items=[
Item(
sku=item.sku,
itemDescription=item.description,
unitPrice=item.value_amount,
quantity=item.quantity,
unitWeight=Weight(item.weight, package.weight_unit).value,
countryOfOrigin=item.origin_country,
htsNumber=None,
)
for item in customs.commodities
],
)
return Serializable(request)
| danh91/purplship | sdk/extensions/asendia_us/purplship/providers/asendia_us/shipment/create.py | create.py | py | 5,068 | python | en | code | null | github-code | 6 | [
{
"api_name": "typing.Tuple",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "purplship.providers.asendia_us.utils.Settings",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "purplship.providers.asendia_us.error.parse_error_response",
"line_number": 29,
... |
5960535728 | #!/usr/bin/env python3
#
# File: tidal_perturbation_in_circular_binary.py
# Author: Timothy Van Reeth <timothy.vanreeth@kuleuven.be>
# License: GPL-3+
# Description: Calculating the observed flux variations of a tidally
# distorted g-mode pulsation in a circular, synchronised
# binary system
import os
import sys
import glob
import yaml
import numpy as np
import subprocess as sp
import astropy.units as au
from progress.bar import Bar
from binary import circular_binary
from pulsations import pmode_pulsation
def distort_pulsation(star, orb_phase=0., distortion_factor=1.):
"""
Routine to scale the g-mode visibility as a function of the location on
the stellar surface
Parameters:
star: stellar_model object
the star in which the pulsation is distorted
orb_phase: float; optional
current phase of the orbital cycle
(default = 0.)
distortion_factor: float; optional
the scaling factor of the mode visibility
across the stellar surface (default = 1)
Returns:
puls_scaling: numpy array
"""
basefun = 0.5 * (3.*(np.sin(star.theta)*np.cos(star.phi - 2.*np.pi*orb_phase))**2. - 1.)
puls_scaling = ( 2. * np.ones(star.theta.shape) / ( distortion_factor + 1. ) ) + ( (basefun - np.nanmin(basefun)) / (np.nanmax(basefun) - np.nanmin(basefun)) * 2. * ( (distortion_factor - 1.) / (distortion_factor + 1.) ) )
return puls_scaling
def calculate_flux(binary, pulsation, distortion_factor=1., puls_phase=0., orb_phase=0.):
"""
Calculate the observed flux of a binary star with a perturbed g-mode pulsation at a
given pulsation and orbital phase, and indicate if the data point was simulated
during an eclipse.
Parameters:
binary: circular_binary object
the studied circular, synchronised binary system
pulsation: gmode_pulsation object
the g-mode pulsation that will be perturbed and evaluated
distortion_factor: float; optional
the scaling factor of the mode visibility
across the stellar surface (default = 1)
puls_phase: float; optional
current phase of the pulsation cycle (in the inertial frame)
(default = 0.)
orb_phase: float; optional
current phase of the orbital cycle
(default = 0.)
Returns:
bin_iflux: float
the simulated flux caused by (solely) the binarity
tot_iflux: float
the total simulated flux at the provided pulsational and
orbital phases
puls_iflux: float
the simulated flux caused by (solely) the distorted pulsation
ecl_iflag: int
flag indicating if the calculated data point occurs during an
eclipse
"""
ecl_maps = binary.eclipse_masks(orb_phase)
primary_vissurf_bool = binary.primary.theta_incl < np.pi/2.
secondary_vissurf_bool = binary.secondary.theta_incl < np.pi/2.
primary_mask = np.array(ecl_maps[0] & primary_vissurf_bool, dtype=float)
secondary_mask = np.array(ecl_maps[1] & secondary_vissurf_bool, dtype=float)
primary_vissurf = np.array(primary_vissurf_bool, dtype=float)
secondary_vissurf = np.array(secondary_vissurf_bool, dtype=float)
if(binary.primary.pulsating):
norm_puls = calculate_normalised_pulsations(binary.primary, pulsation, puls_phase)
# puls_scaling = distort_pulsation(binary.primary, orb_phase=orb_phase, distortion_factor=distortion_factor)
puls_scaling = 1.
primary_puls = 10.**(-0.0004 * pulsation.amplitude * norm_puls * puls_scaling)
secondary_puls = np.ones(binary.secondary.theta.shape)
else:
norm_puls = calculate_normalised_pulsations(binary.secondary, pulsation, puls_phase)
# puls_scaling = distort_pulsation(binary.secondary, orb_phase=orb_phase, distortion_factor=distortion_factor)
puls_scaling = 1.
secondary_puls = 10.**(-0.0004 * pulsation.amplitude * norm_puls * puls_scaling)
primary_puls = np.ones(binary.primary.theta.shape)
primary_totflux = np.nansum(2. * np.cos(binary.primary.theta_incl) * binary.primary.cell_weight * binary.primary.limb_darkening() * primary_puls * primary_mask)
secondary_totflux = np.nansum(2. * np.cos(binary.secondary.theta_incl) * binary.secondary.cell_weight * binary.secondary.limb_darkening() * secondary_puls * secondary_mask)
primary_binflux = np.nansum(2. * np.cos(binary.primary.theta_incl) * binary.primary.cell_weight * binary.primary.limb_darkening() * primary_mask)
secondary_binflux = np.nansum(2. * np.cos(binary.secondary.theta_incl) * binary.secondary.cell_weight * binary.secondary.limb_darkening() * secondary_mask)
primary_refflux = np.nansum(2. * np.cos(binary.primary.theta_incl) * binary.primary.cell_weight * binary.primary.limb_darkening() * primary_vissurf)
secondary_refflux = np.nansum(2. * np.cos(binary.secondary.theta_incl) * binary.secondary.cell_weight * binary.secondary.limb_darkening() * secondary_vissurf)
tot_iflux = -2500.*np.log10( (binary.light_contribution1*primary_totflux/primary_refflux) + (binary.light_contribution2*secondary_totflux/secondary_refflux))
bin_iflux = -2500.*np.log10( (binary.light_contribution1*primary_binflux/primary_refflux) + (binary.light_contribution2*secondary_binflux/secondary_refflux))
puls_iflux = tot_iflux - bin_iflux
if(ecl_maps[0].all() & ecl_maps[1].all()):
ecl_iflag = 0
else:
ecl_iflag = 1
return bin_iflux, tot_iflux, puls_iflux, ecl_iflag
def calculate_normalised_pulsations(star, pulsation, puls_phase):
if pulsation.mode_type == 'p':
return calculate_normalised_pmode_pulsations(star, pulsation, puls_phase)
else:
print('g modes are currently unsupported')
sys.exit()
def calculate_normalised_pmode_pulsations(star, pulsation, puls_phase):
"""
Converting the geomety of the calculated g-mode pulsation to temperature
variations, Lagrangian displacements and the associated velocity field
Parameters:
star: stellar_model object
the pulsating star
pulsation: gmode_pulsation object
the simulated g-mode pulsation
puls_phase: float
the current phase of the studied pulsation (as seen
by the observer)
Returns:
norm_puls: numpy array
normalised pulsation variability of the g-mode at the
stellar surface at the phase puls_phase
"""
if(pulsation.m < 0.):
sign = 1.
else:
sign = -1.
norm_puls = (pulsation.Lr * np.cos(pulsation.m*star.phi + 2.*np.pi*sign*puls_phase)) / np.nanmax(pulsation.Lr * np.cos(pulsation.m*star.phi + 2.*np.pi*sign*puls_phase))
return norm_puls
def read_inlist(inlist_filename='./inlist.yaml'):
"""
Read in the required variables to calculate r-mode visibilities,
following the methodology from Saio et al. (2018).
Parameters:
inlist_filename: string; optional (default: ./inlist.dat)
the path to the inlist
Returns:
maindir: string
main work directory
binary: circular binary object
the circular synchronised binary system that will be simulated
pulsation: gmode_pulsation object
the g-mode pulsation that will be simulated
distortion_factor: float
distortion factor of the simulated pulsation
N_forb_cycles: int
number of orbital cycles to be simulated
Nsample_per_cycle: int
number of simulated data points per orbital cycle
"""
with open(inlist_filename,'r') as f:
cfg = yaml.load(f, Loader=yaml.Loader)
f.close()
# collecting the given parameter values within the appropriate objects
binary = circular_binary(cfg)
if cfg['pulsation']['mode_type'] == 'p':
pulsation = pmode_pulsation(cfg, binary.freq_orb)
elif cfg['pulsation']['mode_type'] == 'g':
print('g modes are currently not supported')
sys.exit()
else:
print('Mode type {} not recognised. Exiting.'.format(cfg['pulsation']['mode_type']))
sys.exit()
if(cfg['pulsation']['pulsating_star'] == 'primary'):
binary.primary.pulsating = True
pulsation.calculate_puls_geometry(binary.primary)
elif(cfg['pulsation']['pulsating_star'] == 'secondary'):
binary.secondary.pulsating = True
pulsation.calculate_puls_geometry(binary.secondary)
return cfg['general']['main_dir'], binary, pulsation, \
cfg['pulsation']['distortion_factor'], \
cfg['simulation']['N_forb_cycles'], \
cfg['simulation']['Nsample_per_cycle']
def save_results(result_filename, time, orb_phases, puls_phases, total_flux, binary_flux, pulsation_flux, eclipse_flags):
"""
Saving the calculated visibilities of the distorted g-mode pulsations
Parameters:
result_filename: string
absolute path to the results output filename
time: astropy quantity array
the time stamps of the simulate data points
orb_phases: numpy array
orbital phases corresponding to the different time stamps
puls_phases: numpy array
pulsation phases at the different time stamps
total_flux: numpy array
the total observed flux variations at the different time stamps (unit: mmag)
binary_flux: numpy array
the flux variations from the binary motion at the different time stamps (unit: mmag)
pulsation_flux: numpy array
the flux variations from the pulsations at the different time stamps (unit: mmag)
eclipse_flags: numpy array of integers
flags indicating if the data point was taken during an eclipse (yes = 1; no = 0)
"""
file = open(result_filename, 'w')
headline = ' '*16 + 'time' + ' '*11 + 'orb_phase' + ' '*10 + 'puls_phase' + ' '*10 + 'total_flux' + ' '*14 + 'binary' + ' '*11 + 'pulsation' + ' '*13 + 'eclipse'
file.write(f'{headline}\n')
for itime, iorbphase, ipulsphase, iflux, ibin, ipuls, iflag in zip(time.to(au.d).value, orb_phases, puls_phases, total_flux, binary_flux, pulsation_flux, eclipse_flags):
data = f' {itime:18e} {iorbphase:18e} {ipulsphase:18e} {iflux:18e} {ibin:18e} {ipuls:18e} {iflag}\n'
file.write(data)
file.close()
return
if __name__ == "__main__":
# Reading the input parameters / variables
maindir, binary, pulsation, distortion_factor, \
N_forb_cycles, Nsample_per_cycle = read_inlist(sys.argv[1])
print('Inlist read')
# Setting the output directory and copying the used inlist
mass1 = binary.primary.Mstar.to(au.Msun).value
mass2 = binary.secondary.Mstar.to(au.Msun).value
freq_orb = binary.freq_orb.to(1./au.d).value
outdir = f'{maindir}binary_M{int(round(100.*mass1))}M{int(round(100.*mass2))}_forb{int(round(1000.*freq_orb))}_i{int(binary.incl_deg)}/'
if not os.path.exists(outdir):
os.makedirs(outdir)
print('Directory created')
print('Directory set')
computation_nr = f"{len(glob.glob(f'{outdir}inlist*.yaml')) + 1}".zfill(3)
sp.call(f'cp ./inlist.yaml {outdir}inlist{computation_nr}.yaml', shell=True) # warning: do not forget to adapt this line if the inlist filename changes!
# setting the time domain for tidal perturbation simulations
time = np.linspace(0., float(N_forb_cycles), N_forb_cycles*Nsample_per_cycle+1) * binary.period.to(au.d)
orb_phases = np.linspace(0.,float(N_forb_cycles),N_forb_cycles*Nsample_per_cycle+1) % 1.
puls_phases = np.array(pulsation.puls_freq.to(1/au.d)*time, dtype=float) % 1.
# time = np.loadtxt('tess_sector01_times.dat').T
# time *= au.day
# orb_phases = np.array( time * freq_orb, dtype=float) % 1.
# puls_phases = np.array(pulsation.puls_freq.to(1/au.d)*time, dtype=float) % 1.
print('Phase arrays constructed.')
# Calculating the mode visibilities and kinetic energy
binary_flux = []
total_flux = []
pulsation_flux = []
eclipse_flags = []
pbar = Bar('Calculating...', max=len(puls_phases))
for iph,puls_phase,orb_phase in zip(np.arange(len(puls_phases)), puls_phases, orb_phases):
# calculating the observed fluxes for (1) the binary + pulsation, (2) the binary, and (3) the pulsation, and provide (4) eclipse flags
bin_iflux, tot_iflux, puls_iflux, ecl_iflag = calculate_flux(binary, pulsation, puls_phase=puls_phase, orb_phase=orb_phase, distortion_factor=distortion_factor)
binary_flux.append(bin_iflux)
total_flux.append(tot_iflux)
pulsation_flux.append(puls_iflux)
eclipse_flags.append(int(ecl_iflag))
pbar.next()
pbar.finish()
binary_flux = np.array(binary_flux)
total_flux = np.array(total_flux)
pulsation_flux = np.array(pulsation_flux)
eclipse_flags = np.array(eclipse_flags, dtype=int)
# Saving the results
save_results(f'{outdir}pmode_f{int(np.round(pulsation.puls_freq.value*1000000))}_perturbed-visibilities_{computation_nr}.dat', time, orb_phases, puls_phases, total_flux, binary_flux, pulsation_flux, eclipse_flags)
| colej/eb_mapping | run_model.py | run_model.py | py | 14,587 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.sin",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 46,... |
9484548824 | import pygame
import sys
#define bird class
class Bird(object):
def __init__(self):
self.birdRect = pygame.Rect(65,50,50,50)
self.birdStatus = [pygame.image.load("flappybirdassets/assets/1.png"),
pygame.image.load("flappybirdassets/assets/2.png"),
pygame.image.load("flappybirdassets/assets/dead.png")]
self.status = 0
self.birdX = 120
self.birdY = 350
self.jump = False
self.jumpSpeed = 10
self.gravity = 5
self.dead = False
def birdUpdate(self):
#movement
if self.jump:
self.jumpSpeed -= 1
self.birdY -= self.jumpSpeed
else:
self.gravity += 0.2
self.birdY += self.gravity
self.birdRect[1] = self.birdY
def createMap():
screen.blit(background,(0,0))
#display pine
screen.blit(Pipeline.pineUp,(Pipeline.wallx, -300))
screen.blit(Pipeline.pineUp,(Pipeline.wallx, 500))
Pipeline.PipelineUpdate()
#display bird
if Bird.dead :
Bird.status = 2
elif Bird.jump :
Bird.status = 1
screen.blit(Bird.birdStatus[Bird.status], (Bird.birdX,Bird.birdY))
Bird.birdUpdate()
screen.blit(font.render('Score:'+ str(score),1,(255,255,255)),(100,50))
pygame.display.update()
#define pipeline class
class Pipeline(object):
def __init__(self):
self.wallx = 400
self.pineUp = pygame.image.load("flappybirdassets/assets/top.png")
self.pineDown = pygame.image.load("flappybirdassets/assets/bottom.png")
def PipelineUpdate(self):
#movement
self.wallx -= 5
if self.wallx < -80:
global score
score += 1
self.wallx = 400
def checkDead():
upRect = pygame.Rect(Pipeline.wallx,-300,Pipeline.pineUp.get_width(),Pipeline.pineUp.get_height())
downRect = pygame.Rect(Pipeline.wallx,500,Pipeline.pineDown.get_width(),Pipeline.pineDown.get_height())
if upRect.colliderect(Bird.birdRect) or downRect.colliderect(Bird.birdRect):
Bird.dead = True
if not Bird.birdRect[1] < height:
Bird.dead = True
return True
else:
return False
def getResult():
final_text1 = "GAME OVER"
final_text2 = "Your final score is :" + str(score)
ft1_font = fit1_font = pygame.font.SysFont("Arial",70)
ft1_surf = font.render(final_text1,1,(242,3,36))
ft2_font = fit2_font = pygame.font.SysFont("Arial",50)
ft2_surf = font.render(final_text2,1,(253,177,6))
screen.blit(ft1_surf,[screen.get_width()/2-ft1_surf.get_width()/2,100])
screen.blit(ft2_surf,[screen.get_width()/2-ft2_surf.get_width()/2,200])
pygame.display.update()
if __name__ == '__main__':
pygame.init()
font = pygame.font.SysFont(None,50)
size = width, height = 400,650
screen = pygame.display.set_mode(size) #setting windows sieze
clock = pygame.time.Clock()# setting delay time
color = (255,255,255)
Bird = Bird()
Pipeline = Pipeline()
score = 0
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if (event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN) and not Bird.dead :
Bird.jump = True
Bird.gravity = 5
Bird.jumpSpeed = 10
# screen.fill(color)
background = pygame.image.load("flappybirdassets/assets/background.png")
if checkDead():
getResult()
else:
createMap()
pygame.quit()
| hxg10636/flappygame | flappybird.py | flappybird.py | py | 3,693 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.Rect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.image.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
... |
39914923603 | import cv2
import pickle
import numpy as np
import random
import threading
import warnings
from ..utils.image import read_image_bgr
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path
import keras
from ..utils.anchors import (
anchor_targets_bbox_centers,
anchor_targets_bbox,
anchors_for_shape,
guess_shapes
)
from ..utils.image import (
TransformParameters,
adjust_transform_for_image,
apply_transform,
preprocess_image,
resize_image,
)
from ..utils.transform import transform_aabb, random_transform_generator
class Centers_Generator(object):
def __init__(
self,
pairs,
BCS_path,
BoxCars_dataset,
BoxCars_images,
BCS_sessions=range(4),
no_centers=False,
fake_centers=False,
split_exclusion_function=None,
batch_size=1,
group_method='random', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
image_min_side=400,
image_max_side=600,
transform_list=None,
transform_parameters=None,
compute_anchor_targets=anchor_targets_bbox_centers,
compute_shapes=guess_shapes,
preprocess_image=preprocess_image
):
""" Initialize Generator object.
Args
transform_generator : A generator used to randomly transform images and annotations.
batch_size : The size of the batches to generate.
group_method : Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups : If True, shuffles the groups each epoch.
image_min_side : After resizing the minimum side of an image is equal to image_min_side.
image_max_side : If after resizing the maximum side is larger than image_max_side, scales down further so that the max side is equal to image_max_side.
transform_parameters : The transform parameters used for data augmentation.
compute_anchor_targets : Function handler for computing the targets of anchors for an image and its annotations.
compute_shapes : Function handler for computing the shapes of the pyramid for a given input.
preprocess_image : Function handler for preprocessing an image (scaling / normalizing) for passing through a network.
"""
self.image_names = []
self.image_data = {}
self.classes = {'car': 0}
# Take base_dir from annotations file if not explicitly specified.
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
self.fake_centers = fake_centers
self.no_centers = no_centers
self.split_exclusion_function = split_exclusion_function
self.image_data = {}
self.transform_indices = []
for pair in pairs:
if BoxCars_dataset is not None:
self.dataset_name = 'BoxCars'
self.parse_BoxCars(BoxCars_dataset.format(pair), BoxCars_images.format(pair))
for i in BCS_sessions:
self.dataset_name = 'BCS'
ds_path = os.path.join(BCS_path.format(pair), 'dataset_{}.pkl'.format(i))
im_path = os.path.join(BCS_path.format(pair), 'images_{}'.format(i))
self.parse_BCS(dataset_path=ds_path, images_path=im_path)
print("Generator size: {}".format(self.size()))
# self.transform_generator = transform_generator
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.image_min_side = image_min_side
self.image_max_side = image_max_side
if transform_parameters is not None:
self.transform_list = transform_list
else:
self.transform_list = [
random_transform_generator(
min_translation=(-0.4, -0.4),
max_translation=(0.4, 0.4),
min_scaling=(0.9, 0.9),
max_scaling=(2.0, 2.0),
flip_x_chance=0.5
),
random_transform_generator(
min_translation=(-0.5, -0.5),
max_translation=(0.5, 0.5),
min_scaling=(0.03, 0.03),
max_scaling=(1.0, 1.0),
flip_x_chance=0.5
),
]
self.transform_parameters = transform_parameters or TransformParameters(fill_mode='constant')
if self.no_centers:
self.compute_anchor_targets = anchor_targets_bbox
else:
self.compute_anchor_targets = compute_anchor_targets
self.compute_shapes = compute_shapes
self.preprocess_image = preprocess_image
self.group_index = 0
self.lock = threading.Lock()
self.group_images()
def size(self):
""" Size of the dataset.
"""
return len(self.image_names)
def num_classes(self):
""" Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def image_path(self, image_index):
""" Returns the image path for image_index.
"""
return self.image_names[image_index]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
# PIL is fast for metadata
image = Image.open(self.image_path(image_index))
return float(image.width) / float(image.height)
def load_image(self, image_index):
""" Load an image at the image_index.
"""
return read_image_bgr(self.image_path(image_index))
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
path = self.image_names[image_index]
annots = self.image_data[path]
if self.no_centers:
boxes = np.zeros((len(annots), 5))
else:
boxes = np.zeros((len(annots), 6))
for idx, annot in enumerate(annots):
class_name = annot['class']
boxes[idx, 0] = float(annot['x1'])
boxes[idx, 1] = float(annot['y1'])
boxes[idx, 2] = float(annot['x2'])
boxes[idx, 3] = float(annot['y2'])
boxes[idx, 4] = self.name_to_label(class_name)
if not self.no_centers:
boxes[idx, 5] = float(annot['c'])
return boxes
def load_transform_indices(self, group):
return [self.transform_indices[index] for index in group]
def load_annotations_group(self, group):
""" Load annotations for all images in group.
"""
return [self.load_annotations(image_index) for image_index in group]
def filter_annotations(self, image_group, annotations_group, group):
""" Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
assert (isinstance(annotations,
np.ndarray)), '\'load_annotations\' should return a list of numpy arrays, received: {}'.format(
type(annotations))
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations[:, 2] <= annotations[:, 0]) |
(annotations[:, 3] <= annotations[:, 1]) |
(annotations[:, 0] < 0) |
(annotations[:, 1] < 0) |
(annotations[:, 2] > image.shape[1]) |
(annotations[:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
# cv2.imwrite("ID_.png".format(group[index]), image)
# warnings.warn("Following warning happens in:{}".format(self.dataset_name))
# warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
# group[index],
# image.shape,
# [annotations[invalid_index, :] for invalid_index in invalid_indices]
# ))
annotations_group[index] = np.delete(annotations, invalid_indices, axis=0)
return image_group, annotations_group
def load_image_group(self, group):
""" Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_transform_group_entry(self, image, annotations, transform_index):
""" Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
transform_generator = self.transform_list[transform_index]
if transform_generator:
transform = adjust_transform_for_image(next(transform_generator), image,
self.transform_parameters.relative_translation)
image = apply_transform(transform, image, self.transform_parameters)
# Transform the bounding boxes in the annotations.
annotations = annotations.copy()
for index in range(annotations.shape[0]):
annotations[index, :4] = transform_aabb(transform, annotations[index, :4])
return image, annotations
def resize_image(self, image):
""" Resize an image using image_min_side and image_max_side.
"""
return resize_image(image, min_side=self.image_min_side, max_side=self.image_max_side)
def preprocess_group_entry(self, image, annotations, transform_index):
""" Preprocess image and its annotations.
"""
# preprocess the image
image = self.preprocess_image(image)
# randomly transform image and annotations
image, annotations = self.random_transform_group_entry(image, annotations, transform_index)
# resize image
image, image_scale = self.resize_image(image)
# apply resizing to annotations too
annotations[:, :4] *= image_scale
return image, annotations
def preprocess_group(self, image_group, annotations_group, transform_indices):
""" Preprocess each image and its annotations in its group.
"""
for index, (image, annotations, transform_index) in enumerate(
zip(image_group, annotations_group, transform_indices)):
# preprocess a single group entry
image, annotations = self.preprocess_group_entry(image, annotations, transform_index)
# copy processed data back to group
image_group[index] = image
annotations_group[index] = annotations
return image_group, annotations_group
def group_images(self):
""" Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group):
""" Compute inputs for the network using an image_group.
"""
# get the max image shape
max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(3))
# construct an image batch object
image_batch = np.zeros((self.batch_size,) + max_shape, dtype=keras.backend.floatx())
# copy all images to the upper left part of the image batch object
for image_index, image in enumerate(image_group):
image_batch[image_index, :image.shape[0], :image.shape[1], :image.shape[2]] = image
return image_batch
def generate_anchors(self, image_shape):
return anchors_for_shape(image_shape, shapes_callback=self.compute_shapes)
def compute_targets(self, image_group, annotations_group):
""" Compute target outputs for the network using images and their annotations.
"""
# get the max image shape
max_shape = tuple(max(image.shape[x] for image in image_group) for x in range(3))
anchors = self.generate_anchors(max_shape)
if self.no_centers:
labels_batch, regression_batch, _ = self.compute_anchor_targets(
anchors,
image_group,
annotations_group,
self.num_classes()
)
return [regression_batch, labels_batch]
else:
labels_batch, regression_batch, centers_batch, _ = self.compute_anchor_targets(
anchors,
image_group,
annotations_group,
self.num_classes()
)
return [regression_batch, labels_batch, centers_batch]
def compute_input_output(self, group):
""" Compute inputs and target outputs for the network.
"""
# load images and annotations
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# perform preprocessing steps
transform_indices = self.load_transform_indices(group)
image_group, annotations_group = self.preprocess_group(image_group, annotations_group, transform_indices)
# compute network inputs
inputs = self.compute_inputs(image_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __next__(self):
return self.next()
def next(self):
# advance the group index
with self.lock:
if self.group_index == 0 and self.shuffle_groups:
# shuffle groups at start of epoch
random.shuffle(self.groups)
group = self.groups[self.group_index]
self.group_index = (self.group_index + 1) % len(self.groups)
return self.compute_input_output(group)
def parse_BCS(self, dataset_path, images_path):
with open(dataset_path, "rb") as f:
ds = pickle.load(f, encoding='latin-1', fix_imports=True)
for i, entry in enumerate(ds):
filename = os.path.join(images_path, entry['filename'])
if self.split_exclusion_function is not None:
if self.split_exclusion_function(filename):
continue
if filename not in self.image_data:
self.image_data[filename] = []
self.image_names.append(filename)
self.transform_indices.append(0)
if self.no_centers:
for label in entry['labels']:
dict = {'x1': label['x_min'], 'x2': label['x_max'],
'y1': label['y_min'], 'y2': label['y_max'],
'class': 'car'}
self.image_data[filename].append(dict)
elif self.fake_centers:
for label in entry['labels']:
dict = {'x1': label['x_min'], 'x2': label['x_max'],
'y1': label['y_min'], 'y2': label['y_max'],
'c': 0.0, 'class': 'car'}
self.image_data[filename].append(dict)
else:
for label in entry['labels']:
dict = {'x1': label['x_min'], 'x2': label['x_max'],
'y1': label['y_min'], 'y2': label['y_max'],
'c': label['centery'], 'class': 'car'}
self.image_data[filename].append(dict)
def parse_BoxCars(self, dataset_path, images_path):
with open(dataset_path, "rb") as f:
ds = pickle.load(f, encoding='latin-1', fix_imports=True)
for sample in ds['samples']:
# to_camera = sample['to_camera']
for i_id, instance in enumerate(sample['instances']):
filename = os.path.join(images_path, instance['filename'])
if filename not in self.image_data:
self.image_data[filename] = []
self.image_names.append(filename)
self.transform_indices.append(1)
if self.no_centers:
dict = {'x1': instance['bb_out']['x_min'], 'x2': instance['bb_out']['x_max'],
'y1': instance['bb_out']['y_min'], 'y2': instance['bb_out']['y_max'],
'class': 'car'}
else:
if self.fake_centers:
centery = 0.0
else:
centery = (instance['bb_in']['y_min'] - instance['bb_out']['y_min']) / \
(instance['bb_out']['y_max'] - instance['bb_out']['y_min'])
dict = {'x1': instance['bb_out']['x_min'], 'x2': instance['bb_out']['x_max'],
'y1': instance['bb_out']['y_min'], 'y2': instance['bb_out']['y_max'],
'c': centery, 'class': 'car'}
self.image_data[filename].append(dict)
| kocurvik/retinanet_traffic_3D | keras_retinanet/preprocessing/centers_generator.py | centers_generator.py | py | 18,123 | python | en | code | 24 | github-code | 6 | [
{
"api_name": "utils.anchors.anchor_targets_bbox_centers",
"line_number": 56,
"usage_type": "name"
},
{
"api_name": "utils.anchors.guess_shapes",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "utils.image.preprocess_image",
"line_number": 58,
"usage_type": "nam... |
4737844765 | from pathlib import Path # Pathlib - Working with file paths
p = Path('.') # Creates a path object in found OS (Windows Path)
test = [x for x in p.iterdir() if x.is_dir()]
print(p.resolve()) # Show file dir in your OS format (D:\Backup\Work\DevOps\Programming\Scripts\Python\fundamentals\Built In Modules\pathlib)
new_p = p / 'Test dir' # Navigating into Test dir folder
new_p.mkdir() # Create folder at location p
for file_name in new_p.iterdir():
if file_name.match('*.txt') or file_name.match('*.py'): # Check for specific file types when iterating through files in path
print(file_name)
new_p /= 'test.txt'
print(new_p)
with new_p.open() as f:
print(f.readline())
print(f.readline())
| danlhennessy/Learn | Python/fundamentals/Built_In_Modules/pathlib/main.py | main.py | py | 716 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 3,
"usage_type": "call"
}
] |
1375960664 | # -*- coding: utf-8 -*-
import numpy as np
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
from deap.algorithms import varAnd
from adan.aiem.genetics.evaluators import *
import array
import random as traditional_random
#import pathos
import pathos
import operator
#from adan import functions
from adan.functions import *
from adan.aidc.feature_selection import *
import time
def eaSimple_island(population,toolbox, cxpb, mutpb, ngen,halloffame=None,
verbose=__debug__,allowed_time=np.inf, stats=None,FREQ=None,
percentage_migration=0.1):
"""
ngen is used both for the total generations and for the within island generatins.
So, the total number of gens will be ngen**2.
FREQ: How often migration takes place. If FREQ=None, then it is set to ngen/3
"""
#FREQ is how often migration takes place
if FREQ is None:
FREQ=int(ngen/3)
if FREQ<0:
FREQ=1
toolbox.register("algorithm", eaSimple_timed, toolbox=toolbox,
cxpb=cxpb, mutpb=mutpb, ngen=ngen,
verbose=verbose,stats=stats,halloffame=halloffame)
islands = population
#The GA runs each time for ngen, and then it runs for a total number of equal to ngen/FREQ
for i in range(0, ngen):
start = time.time()
results = toolbox.map(toolbox.algorithm, islands)
islands = [pop for pop, logbook in results]
if i % FREQ ==0:
print('******MIGRATION TAKING PLACE******')
tools.migRing(islands, int(percentage_migration*len(islands[0])), tools.selBest)
end = time.time()
if (end-start)>allowed_time:
if verbose:
print('Time-out. Maximum allowed time exceeded.')
break
return islands
def eaSimple_timed(population, toolbox, cxpb, mutpb, ngen, stats=None,
halloffame=None, verbose=__debug__,allowed_time=np.inf):
"""This is a copy of the eaSimple() method from DEAP, but adjusted
to support time-out. In case of timeout, the most recent generation is
returned.
"""
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
#-2 is the 'fail' value (e.g. the fitness function couldn't be computed)
if fit is None:
fit=(-2,)
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
start = time.time()
# Begin the generational process
for gen in range(1, ngen+1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, cxpb, mutpb)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
if fit is None:
fit=(-2,)
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
end = time.time()
if (end-start)>allowed_time:
if verbose:
print('Time-out. Maximum allowed time exceeded.')
break
return population, logbook
def calcNewFeatures(result_set,df,features='best'):
"""
returns the best features alongside the variables participating in the complex variables
"""
all_features=[]
complex_features=[]
pset=setPset(df)
toolbox = base.Toolbox()
toolbox.register("compile", gp.compile, pset=pset)
complex_columns=[]
all_columns=[]
simple_columns=[]
if features=='best':
dummy='best_individuals_object'
elif features=='all':
dummy='all_features_individuals_object'
for feat in result_set[dummy]:
complex_features.append(toolbox.compile(feat))
all_features.append(toolbox.compile(feat))
complex_columns.append(str(feat))
all_columns.append(str(feat))
simple_features=[]
for feat in result_set['variables']:
simple_features.append(df[feat])
simple_columns.append(str(feat))
all_features.append(df[feat])
all_columns.append(str(feat))
return pd.DataFrame(np.column_stack(all_features),columns=all_columns),pd.DataFrame(np.column_stack(complex_features),columns=complex_columns),pd.DataFrame(np.column_stack(simple_features),columns=simple_columns)
def setPset(df):
pset = gp.PrimitiveSet("MAIN", 0,prefix="coef")
pset.addPrimitive(add,2)
pset.addPrimitive(sub, 2)
pset.addPrimitive(mul, 2)
pset.addPrimitive(div, 2)
for fun in singlefunctions:
pset.addPrimitive(fun,1)
for col in df.columns.values:
#we must use strings for column names otherwise the functions interpret the
#column names as numbers
pset.addTerminal(df[col].values,name=col)
return pset
def findFeaturesGP(df,target,population=300,ngen=50,cxpb=0.9,features=-1,
max_tree=3,evaluator=evalPearsonCorNumba,
task="regression",n_processes=1,allowed_time=None,target_sampling=0.8):
"""
This function calculates complex features that correlate with the response variable.
Output:
A dictionary with the following fields:
best_features: a list of lists, where every element is a feature selected by the best n features as defined by the cbf method
best_features_plus_cols: a list of lists, where every element is a feature selected by the best n features as defined by the cbf method plus
any original features participating in the creation of the individuals
best_individuals_equations: the equations used to compute the best_features (this is the string version of best_individuals_object)
best_individuals_plus_columns: like the previous, plus the column names of the individual features
best_individuals_object: the programs used to compute the best_features
scores: the score of each individual produced during the genetic programming
scores_cbf: the cbf score of each feature (all features not just the best ones)
variables: the names of the original variables that participate in the creation of the features in the best_features
all_features: a list of lists with all the features produced by the genetic algorithm
all_features_individuals: the programs used to compute all_features
features: if features<1, then the algorithm simply defaults to 1
target_sampling: When the features are evaluated, we can sample a % of the targets, and evaluate
the performace on this subset. This should help with overfitting and finding better solutions.
"""
if features<1:
features=1
if task=='regression' and evaluator==None:
evaluator=evalPearsonCorNumba
elif task=='classification' and evaluator==None:
evaluator=evalANOVANumba
mutpb=1-cxpb
# for col in df.columns:
# df[col]=df[col].astype('float64')
pset=setPset(df)
creator.create("FitnessMax", base.Fitness, weights=(1,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=max_tree)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)
#need to do that because multithreading does not support functions with more than one arguments
def evaluate(x):
return evaluator(x,toolbox=toolbox,targets=target,sampling=target_sampling)
#toolbox.register("evaluate", evaluator,toolbox=toolbox, targets=targets)
toolbox.register("evaluate", evaluate)
#toolbox.register("select", tools.selTournament, tournsize=3)
#toolbox.register("select", tools.selNSGA2)
toolbox.register("select", tools.selDoubleTournament,fitness_size=3,parsimony_size=1.4,fitness_first=True)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=0, max_=max_tree)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
toolbox.decorate("mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=max_tree))
toolbox.decorate("mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=max_tree))
if type(population)==type([]):
toolbox.register("deme", tools.initRepeat, list, toolbox.individual)
DEME_SIZES = population
pop = [toolbox.deme(n=i) for i in DEME_SIZES]
hof = tools.HallOfFame(sum(population))
else:
pop = toolbox.population(n=population)
hof = tools.HallOfFame(population)
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
if n_processes>1:
pool = pathos.multiprocessing.ProcessingPool(n_processes)
toolbox.register("map", pool.map)
# pop, log = algorithms.eaMuPlusLambda(pop, toolbox, mu,lamb, cxpb,mutpb,ngen=ngen, stats=mstats,
# halloffame=hof, verbose=True)
# if allowed_time is None:
# pop, log = algorithms.eaSimple(pop, toolbox, cxpb,mutpb, ngen=ngen, stats=mstats,
# halloffame=hof, verbose=True)
if type(population)==type([]):
pop = eaSimple_island(pop, toolbox, cxpb,mutpb, ngen=ngen, stats=mstats,
halloffame=hof, verbose=True, allowed_time=allowed_time)
else:
pop, log = eaSimple_timed(pop, toolbox, cxpb,mutpb, ngen=ngen, stats=mstats,
halloffame=hof, verbose=True, allowed_time=allowed_time)
allfeatures=[]
allfeatures_individuals_object=[]
scores=[]
feature_names=[]
best_individuals_object=[]
for i in range(0,len(hof.items)):
# print(hof.items[i])
feature=toolbox.compile(hof.items[i])
if not np.isnan(feature).any():
#need to guard against zero variance features
if np.var(feature)>0.0:
allfeatures.append(feature)
allfeatures_individuals_object.append(hof.items[i])
feature_names.append(str(hof.items[i]))
best_individuals_object.append(hof.items[i])
#for some reason in DEAP the key in the hall-of-fame is the score
# if features>0:
# cbfscores=cbfSelectionNumba(allfeatures,target,task=task)
# bestindices=sorted(range(len(cbfscores)), key=lambda x: cbfscores[x],reverse=True)
# else:
# cbfscores=np.ones(len(allfeatures))
# bestindices=range(len(allfeatures))
cbfscores=cbfSelectionNumba(allfeatures,target,task=task)
bestindices=sorted(range(len(cbfscores)), key=lambda x: cbfscores[x],reverse=True)
bestfeatures=[]
bestindividuals=[]
bestindividuals_plus_cols=[]
scorescbf=[]
best_features_plus_cols=[]
best_individuals_object_final=[]
for i in range(0,int(features)):
index=bestindices[i]
bestfeatures.append(allfeatures[index])
best_features_plus_cols.append(allfeatures[index])
bestindividuals.append(feature_names[index])
bestindividuals_plus_cols.append(feature_names[index])
best_individuals_object_final.append(best_individuals_object[i])
# scores.append(eval(str(hof.keys[index])))
# scorescbf.append(cbfscores[index])
#all features includes the best variables, plus any single variables which might participate in the creation of the complex variables
final_vars=[]
str_individuals=str(bestindividuals)
for col in df.columns:
if str_individuals.find(col)>-1:
final_vars.append(col)
#append the original variable to bestfeatures if it exists in a complex feature
best_features_plus_cols.append(df[col].values)
bestindividuals_plus_cols.append(col)
#combine all features (individual and composite) into one df
best_all_feats_df=pd.DataFrame(np.column_stack(best_features_plus_cols),columns=bestindividuals_plus_cols)
return {'best_features':bestfeatures,'best_features_plus_cols':best_features_plus_cols,
'best_individuals_equations':bestindividuals,'best_individuals_object':best_individuals_object_final,
'scores':scores,'scores_cbf':scorescbf,'variables':final_vars,
'all_features':allfeatures,'all_features_individuals_object':allfeatures_individuals_object,'best_all_feats_df':best_all_feats_df}
def findEquationFeatures(features_to_be_used,task,target,ngen=10,population=10,crossover_prob=0.5,mut_prob=0.1,individual_mut=0.1,tournsize=3):
"""
Performs feature selection over the set of features before doing the
symbolic modelling
individual_mut: If a mutation occurs, then each item might be flipped according to this probability
"""
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
import array
creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax)
toolbox = base.Toolbox()
# Attribute generator
toolbox.register("attr_bool", traditional_random.getrandbits,1)
# Structure initializers
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, features_to_be_used.shape[1])
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
#we import here to avoid a cyclical import
from adan.aiem.symbolic_modelling import findSymbolicExpressionL1_regression_helper, findSymbolicExpressionL1_classification_helper
def evalOneMax(individual):
if sum(individual)==0:
return -100,
else:
ind=np.array(individual,bool)
if task=='regression':
models=findSymbolicExpressionL1_regression_helper(features_to_be_used.loc[:,ind].values,target)
elif task=='classification':
models=findSymbolicExpressionL1_classification_helper(features_to_be_used.loc[:,ind].values,target)
performances=[perf[1] for perf in models]
maximum=max(performances)
return maximum,
toolbox.register("evaluate", evalOneMax)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutFlipBit, indpb=individual_mut)
toolbox.register("select", tools.selTournament, tournsize=tournsize)
pop = toolbox.population(n=population)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", np.mean)
stats.register("std", np.std)
stats.register("min", np.min)
stats.register("max", np.max)
pop, log = algorithms.eaSimple(pop, toolbox, cxpb=crossover_prob,
mutpb=mut_prob, ngen=ngen,
stats=stats, halloffame=hof, verbose=True)
final_choice=hof.items[0]
final_choice=np.array(final_choice,bool)
#return pop, log, hof
return final_choice | stelios12312312/ADAN | adan/aiem/genetics/genetic_programming.py | genetic_programming.py | py | 16,827 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.inf",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "deap.tools.migRing",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "deap.tools",
"line_nu... |
6604263556 | #!/usr/bin/env python3
'''conda create -n pytorch-env python=3.9 shap pandas optuna=2.10.1 xgboost scikit-learn sklearn-pandas rdkit pytorch torchvision torchaudio pytorch-cuda=11.6 cairosvg dgllife dgl=0.9.1 dgl-cuda11.6 ipython -c pytorch -c nvidia -c dglteam'''
import pandas as pd
import numpy as np
import datetime,time,joblib
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder
from sklearn_pandas import DataFrameMapper
############### Set required parameters and load data here ###############
'''basic parameters'''
filename_pkl = 'HistGradientBoosting_Optuna_best' # load target model from the *.pkl file
split_dataset = False # whether to split the dataset into training and test sets
model_name = "HistGradientBoostingClassifier"
# Supported models are as follows:
# (1) AdaBoostRegressor / AdaBoostClassifier
# (2) XGBRegressor / XGBClassifier
# (3) GradientBoostingRegressor / GradientBoostingClassifier
# (4) HistGradientBoostingRegressor / HistGradientBoostingClassifier
# (5) RandomForestRegressor / RandomForestClassifier
# (6) SVR / SVC
# (7) MLPRegressor / MLPClassifier
# (8) ElasticNet / LogisticRegression
'''load the dataset'''
selected_features = ['MolWt','NumRotatableBonds','AromaticProportion']
df = pd.read_csv('../../MolLogP_dataset.csv')
data_X = df[selected_features]
data_y = df['MolLogP<2']
# print(data_y)
# exit()
############### Some user-defined functions ###############
def total_running_time(end_time, start_time):
tot_seconds = round(end_time - start_time,2)
days = tot_seconds // 86400
hours = (tot_seconds % 86400) // 3600
minutes = (tot_seconds % 86400 % 3600)// 60
seconds = tot_seconds % 60
print(">> Elapsed time: {0:2d} day(s) {1:2d} hour(s) {2:2d} minute(s) {3:5.2f} second(s) <<".format(int(days),int(hours),int(minutes),seconds))
def load_model(model_name, filename_pkl):
ML_regression_list = ["XGBRegressor", "AdaBoostRegressor", "GradientBoostingRegressor",
"HistGradientBoostingRegressor", "MLPRegressor",
"RandomForestRegressor", "SVR", "ElasticNet"]
ML_classification_list = ["XGBClassifier", "AdaBoostClassifier", "GradientBoostingClassifier",
"HistGradientBoostingClassifier", "MLPClassifier",
"RandomForestClassifier", "SVC", "LogisticRegression"
]
if model_name in ML_regression_list:
ML_type = "Regression"
elif model_name in ML_classification_list:
ML_type = "Classification"
if model_name == "XGBRegressor":
from xgboost import XGBRegressor
elif model_name == "XGBClassifier":
from xgboost import XGBClassifier
elif model_name == "AdaBoostRegressor":
from sklearn.ensemble import AdaBoostRegressor
elif model_name == "AdaBoostClassifier":
from sklearn.ensemble import AdaBoostClassifier
elif model_name == "GradientBoostingRegressor":
from sklearn.ensemble import GradientBoostingRegressor
elif model_name == "GradientBoostingClassifier":
from sklearn.ensemble import GradientBoostingClassifier
elif model_name == "HistGradientBoostingRegressor":
from sklearn.ensemble import HistGradientBoostingRegressor
elif model_name == "HistGradientBoostingClassifier":
from sklearn.ensemble import HistGradientBoostingClassifier
elif model_name == "MLPRegressor":
from sklearn.neural_network import MLPRegressor
elif model_name == "MLPClassifier":
from sklearn.neural_network import MLPClassifier
elif model_name == "RandomForestRegressor":
from sklearn.ensemble import RandomForestRegressor
elif model_name == "RandomForestClassifier":
from sklearn.ensemble import RandomForestClassifier
elif model_name == "SVR":
from sklearn.svm import SVR
elif model_name == "SVC":
from sklearn.svm import SVC
elif model_name == "ElasticNet":
from sklearn.linear_model import ElasticNet
elif model_name == "LogisticRegression":
from sklearn.linear_model import LogisticRegression
else:
print('** Please rechoose a model **\n-> Supported models are as follows:')
print(' (1) AdaBoostRegressor / AdaBoostClassifier\n (2) XGBRegressor / XGBClassifier')
print(' (3) GradientBoostingRegressor / GradientBoostingClassifier\n (4) HistGradientBoostingRegressor / HistGradientBoostingClassifier')
print(' (5) RandomForestRegressor / RandomForestClassifier\n (6) SVR / SVC')
print(' (7) MLPRegressor / MLPClassifier\n (8) ElasticNet / LogisticRegression')
exit(1)
model = joblib.load(filename_pkl + ".pkl")
print('---------- Results based on the current loaded model ----------')
print('> Current parameters:\n {}\n'.format(model.get_params()))
return model, ML_type
def show_metrics(model, ML_type, y_test_pred, y_test_pred_proba, y_test, X_test):
print(" >>>> Metrics based on the best model <<<<\n")
if ML_type == "Classification":
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score, average_precision_score
accuracy_test = accuracy_score(y_test, y_test_pred)
print('> Accuracy on the test set: {:.2%}'.format(accuracy_test))
print('> Score on the test set: {:.2%}'.format(model.score(X_test, y_test)))
print('> Classification report on the test set:')
print(classification_report(y_test, y_test_pred))
roc_auc_test, average_precision_test = [], []
for i in range(len(set(y_test))):
roc_auc_test.append(roc_auc_score(y_test, y_test_pred_proba[:,i], multi_class='ovr'))
average_precision_test.append(average_precision_score(y_test, y_test_pred_proba[:,i]))
pd.set_option('display.float_format','{:12.6f}'.format)
pd.set_option('display.colheader_justify', 'center')
test_reports = pd.DataFrame(np.vstack((roc_auc_test, average_precision_test)).T, columns=['ROC-AUC','AP(PR-AUC)'])
print('> Area under the receiver operating characteristic curve (ROC-AUC) and\n average precision (AP) which summarizes a precision-recall curve as the weighted mean\n of precisions achieved at each threshold on the test set:\n {}\n'.format(test_reports))
elif ML_type == "Regression":
from sklearn.metrics import mean_squared_error, mean_absolute_error
mse_test = mean_squared_error(y_test, y_test_pred)
mae_test = mean_absolute_error(y_test, y_test_pred)
print('> Mean squared error (MSE) on the test set: {:.6f}'.format(mse_test))
print('> Mean absolute error (MAE) on the test set: {:.6f}'.format(mae_test))
print('> R-squared (R^2) value on the test set: {:.6f}\n'.format(model.score(X_test, y_test)))
############### The ML training script starts from here ###############
start_time = time.time()
start_date = datetime.datetime.now()
print('*** Scikit-learn evaluation ({0}) started at {1} ***\n'.format(model_name, start_date.strftime("%Y-%m-%d %H:%M:%S")))
'''split training/test sets'''
if split_dataset:
print('The dataset is splited into training and test sets, and therefore the target model will be evaluated on the test set...\n')
X_train, X_test, y_train, y_test = train_test_split(data_X, data_y, test_size=0.2, random_state=0)
else:
print('The whole dataset will be used to evaluate the target model...\n')
X_test, y_test = data_X, data_y
target_model, ML_type = load_model(model_name, filename_pkl)
y_test_pred = target_model.predict(X_test)
y_test_pred_proba = target_model.predict_proba(X_test) if ML_type == "Classification" else None
show_metrics(target_model, ML_type, y_test_pred, y_test_pred_proba, y_test, X_test)
end_time = time.time()
end_date = datetime.datetime.now()
print('*** Scikit-learn evaluation ({0}) terminated at {1} ***\n'.format(model_name, end_date.strftime("%Y-%m-%d %H:%M:%S")))
total_running_time(end_time, start_time)
| JianyongYuan/sklearn-scripts | Scikit-learn/Predictions/sklearn_evaluation.py | sklearn_evaluation.py | py | 8,232 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "joblib.load",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 112,
"usage_type": "call"
},
{
"api_name": "sklearn.... |
35395933423 | import functools
import ipaddress
import re
import socket
from pathlib import Path, PurePath
from random import SystemRandom
from types import TracebackType
from typing import Any, AsyncContextManager, Awaitable, Callable, Dict
from typing import Generator, Generic, IO, Mapping, Optional, Sequence
from typing import Tuple, Type, TypeVar, Union, cast, overload
from typing_extensions import Literal, Protocol
from .constants import DEFAULT_LANG
from .constants import DISC_COMPRESSION_ERROR, DISC_CONNECTION_LOST
from .constants import DISC_HOST_KEY_NOT_VERIFIABLE, DISC_ILLEGAL_USER_NAME
from .constants import DISC_KEY_EXCHANGE_FAILED, DISC_MAC_ERROR
from .constants import DISC_NO_MORE_AUTH_METHODS_AVAILABLE
from .constants import DISC_PROTOCOL_ERROR, DISC_PROTOCOL_VERSION_NOT_SUPPORTED
from .constants import DISC_SERVICE_NOT_AVAILABLE
class _Hash(Protocol):
"""Protocol for hashing data"""
@property
def digest_size(self) -> int:
"""Return the hash digest size"""
@property
def block_size(self) -> int:
"""Return the hash block size"""
@property
def name(self) -> str:
"""Return the hash name"""
def digest(self) -> bytes:
"""Return the digest value as a bytes object"""
def hexdigest(self) -> str:
"""Return the digest value as a string of hexadecimal digits"""
def update(self, __data: bytes) -> None:
"""Update this hash object's state with the provided bytes"""
class HashType(Protocol):
"""Protocol for returning the type of a hash function"""
def __call__(self, __data: bytes = ...) -> _Hash:
"""Create a new hash object"""
class _SupportsWaitClosed(Protocol):
"""A class that supports async wait_closed"""
async def wait_closed(self) -> None:
"""Wait for transport to close"""
_T = TypeVar('_T')
DefTuple = Union[Tuple[()], _T]
MaybeAwait = Union[_T, Awaitable[_T]]
ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
OptExcInfo = Union[ExcInfo, Tuple[None, None, None]]
BytesOrStr = Union[bytes, str]
FilePath = Union[str, PurePath]
HostPort = Tuple[str, int]
IPAddress = Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
IPNetwork = Union[ipaddress.IPv4Network, ipaddress.IPv6Network]
SockAddr = Union[Tuple[str, int], Tuple[str, int, int, int]]
# Define a version of randrange which is based on SystemRandom(), so that
# we get back numbers suitable for cryptographic use.
_random = SystemRandom()
randrange = _random.randrange
_unit_pattern = re.compile(r'([A-Za-z])')
_byte_units = {'': 1, 'k': 1024, 'm': 1024*1024, 'g': 1024*1024*1024}
_time_units = {'': 1, 's': 1, 'm': 60, 'h': 60*60,
'd': 24*60*60, 'w': 7*24*60*60}
def hide_empty(value: object, prefix: str = ', ') -> str:
"""Return a string with optional prefix if value is non-empty"""
value = str(value)
return prefix + value if value else ''
def plural(length: int, label: str, suffix: str = 's') -> str:
"""Return a label with an optional plural suffix"""
return '%d %s%s' % (length, label, suffix if length != 1 else '')
def all_ints(seq: Sequence[object]) -> bool:
"""Return if a sequence contains all integers"""
return all(isinstance(i, int) for i in seq)
def get_symbol_names(symbols: Mapping[str, int], prefix: str,
strip_leading: int = 0) -> Mapping[int, str]:
"""Return a mapping from values to symbol names for logging"""
return {value: name[strip_leading:] for name, value in symbols.items()
if name.startswith(prefix)}
# Punctuation to map when creating handler names
_HANDLER_PUNCTUATION = (('@', '_at_'), ('.', '_dot_'), ('-', '_'))
def map_handler_name(name: str) -> str:
"""Map punctuation so a string can be used as a handler name"""
for old, new in _HANDLER_PUNCTUATION:
name = name.replace(old, new)
return name
def _normalize_scoped_ip(addr: str) -> str:
"""Normalize scoped IP address
The ipaddress module doesn't handle scoped addresses properly,
so we normalize scoped IP addresses using socket.getaddrinfo
before we pass them into ip_address/ip_network.
"""
try:
addrinfo = socket.getaddrinfo(addr, None, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM,
flags=socket.AI_NUMERICHOST)[0]
except socket.gaierror:
return addr
if addrinfo[0] == socket.AF_INET6:
sa = addrinfo[4]
addr = sa[0]
idx = addr.find('%')
if idx >= 0: # pragma: no cover
addr = addr[:idx]
ip = ipaddress.ip_address(addr)
if ip.is_link_local:
scope_id = cast(Tuple[str, int, int, int], sa)[3]
addr = str(ipaddress.ip_address(int(ip) | (scope_id << 96)))
return addr
def ip_address(addr: str) -> IPAddress:
"""Wrapper for ipaddress.ip_address which supports scoped addresses"""
return ipaddress.ip_address(_normalize_scoped_ip(addr))
def ip_network(addr: str) -> IPNetwork:
"""Wrapper for ipaddress.ip_network which supports scoped addresses"""
idx = addr.find('/')
if idx >= 0:
addr, mask = addr[:idx], addr[idx:]
else:
mask = ''
return ipaddress.ip_network(_normalize_scoped_ip(addr) + mask)
def open_file(filename: FilePath, mode: str, buffering: int = -1) -> IO[bytes]:
"""Open a file with home directory expansion"""
return open(Path(filename).expanduser(), mode, buffering=buffering)
@overload
def read_file(filename: FilePath) -> bytes:
"""Read from a binary file with home directory expansion"""
@overload
def read_file(filename: FilePath, mode: Literal['rb']) -> bytes:
"""Read from a binary file with home directory expansion"""
@overload
def read_file(filename: FilePath, mode: Literal['r']) -> str:
"""Read from a text file with home directory expansion"""
def read_file(filename, mode = 'rb'):
"""Read from a file with home directory expansion"""
with open_file(filename, mode) as f:
return f.read()
def write_file(filename: FilePath, data: bytes, mode: str = 'wb') -> int:
"""Write or append to a file with home directory expansion"""
with open_file(filename, mode) as f:
return f.write(data)
def _parse_units(value: str, suffixes: Mapping[str, int], label: str) -> float:
"""Parse a series of integers followed by unit suffixes"""
matches = _unit_pattern.split(value)
if matches[-1]:
matches.append('')
else:
matches.pop()
try:
return sum(float(matches[i]) * suffixes[matches[i+1].lower()]
for i in range(0, len(matches), 2))
except KeyError:
raise ValueError('Invalid ' + label) from None
def parse_byte_count(value: str) -> int:
"""Parse a byte count with optional k, m, or g suffixes"""
return int(_parse_units(value, _byte_units, 'byte count'))
def parse_time_interval(value: str) -> float:
"""Parse a time interval with optional s, m, h, d, or w suffixes"""
return _parse_units(value, _time_units, 'time interval')
_ACM = TypeVar('_ACM', bound=AsyncContextManager, covariant=True)
class _ACMWrapper(Generic[_ACM]):
"""Async context manager wrapper"""
def __init__(self, coro: Awaitable[_ACM]):
self._coro = coro
self._coro_result: Optional[_ACM] = None
def __await__(self) -> Generator[Any, None, _ACM]:
return self._coro.__await__()
async def __aenter__(self) -> _ACM:
self._coro_result = await self._coro
return await self._coro_result.__aenter__()
async def __aexit__(self, exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType]) -> Optional[bool]:
assert self._coro_result is not None
exit_result = await self._coro_result.__aexit__(
exc_type, exc_value, traceback)
self._coro_result = None
return exit_result
_ACMCoro = Callable[..., Awaitable[_ACM]]
_ACMWrapperFunc = Callable[..., _ACMWrapper[_ACM]]
def async_context_manager(coro: _ACMCoro[_ACM]) -> _ACMWrapperFunc[_ACM]:
"""Decorator for functions returning asynchronous context managers
This decorator can be used on functions which return objects
intended to be async context managers. The object returned by
the function should implement __aenter__ and __aexit__ methods
to run when the async context is entered and exited.
This wrapper also allows the use of "await" on the function being
decorated, to return the context manager without entering it.
"""
@functools.wraps(coro)
def context_wrapper(*args, **kwargs) -> _ACMWrapper[_ACM]:
"""Return an async context manager wrapper for this coroutine"""
return _ACMWrapper(coro(*args, **kwargs))
return context_wrapper
async def maybe_wait_closed(writer: '_SupportsWaitClosed') -> None:
"""Wait for a StreamWriter to close, if Python version supports it
Python 3.8 triggers a false error report about garbage collecting
an open stream if a close is in progress when a StreamWriter is
garbage collected. This can be avoided by calling wait_closed(),
but that method is not available in Python releases prior to 3.7.
This function wraps this call, ignoring the error if the method
is not available.
"""
try:
await writer.wait_closed()
except AttributeError: # pragma: no cover
pass
class Options:
"""Container for configuration options"""
kwargs: Dict[str, object]
def __init__(self, options: Optional['Options'] = None, **kwargs: object):
if options:
if not isinstance(options, type(self)):
raise TypeError('Invalid %s, got %s' %
(type(self).__name__, type(options).__name__))
self.kwargs = options.kwargs.copy()
else:
self.kwargs = {}
self.kwargs.update(kwargs)
self.prepare(**self.kwargs)
def prepare(self, **kwargs: object) -> None:
"""Pre-process configuration options"""
def update(self, kwargs: Dict[str, object]) -> None:
"""Update options based on keyword parameters passed in"""
self.kwargs.update(kwargs)
self.prepare(**self.kwargs)
class _RecordMeta(type):
"""Metaclass for general-purpose record type"""
def __new__(mcs: Type['_RecordMeta'], name: str, bases: Tuple[type, ...],
ns: Dict[str, object]) -> '_RecordMeta':
if name != 'Record':
fields = cast(Mapping[str, str],
ns.get('__annotations__', {})).keys()
defaults = {k: ns.get(k) for k in fields}
ns = {k: v for k, v in ns.items() if k not in fields}
ns['__slots__'] = defaults
return cast(_RecordMeta, super().__new__(mcs, name, bases, ns))
class Record(metaclass=_RecordMeta):
"""Generic Record class"""
__slots__: Mapping[str, object] = {}
def __init__(self, *args: object, **kwargs: object):
for k, v in self.__slots__.items():
setattr(self, k, v)
for k, v in zip(self.__slots__, args):
setattr(self, k, v)
for k, v in kwargs.items():
setattr(self, k, v)
def __repr__(self) -> str:
return '%s(%s)' % (type(self).__name__,
', '.join('%s=%r' % (k, getattr(self, k))
for k in self.__slots__))
def __str__(self) -> str:
values = ((k, self._format(k, getattr(self, k)))
for k in self.__slots__)
return ', '.join('%s: %s' % (k, v) for k, v in values if v is not None)
def _format(self, k: str, v: object) -> Optional[str]:
"""Format a field as a string"""
# pylint: disable=no-self-use,unused-argument
return str(v)
class Error(Exception):
"""General SSH error"""
def __init__(self, code: int, reason: str, lang: str = DEFAULT_LANG):
super().__init__(reason)
self.code = code
self.reason = reason
self.lang = lang
class DisconnectError(Error):
"""SSH disconnect error
This exception is raised when a serious error occurs which causes
the SSH connection to be disconnected. Exception codes should be
taken from :ref:`disconnect reason codes <DisconnectReasons>`.
See below for exception subclasses tied to specific disconnect
reasons if you want to customize your handling by reason.
:param code:
Disconnect reason, taken from :ref:`disconnect reason
codes <DisconnectReasons>`
:param reason:
A human-readable reason for the disconnect
:param lang: (optional)
The language the reason is in
:type code: `int`
:type reason: `str`
:type lang: `str`
"""
class CompressionError(DisconnectError):
"""SSH compression error
This exception is raised when an error occurs while compressing
or decompressing data sent on the SSH connection.
:param reason:
Details about the compression error
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_COMPRESSION_ERROR, reason, lang)
class ConnectionLost(DisconnectError):
"""SSH connection lost
This exception is raised when the SSH connection to the remote
system is unexpectedly lost. It can also occur as a result of
the remote system failing to respond to keepalive messages or
as a result of a login timeout, when those features are enabled.
:param reason:
Details about the connection failure
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_CONNECTION_LOST, reason, lang)
class HostKeyNotVerifiable(DisconnectError):
"""SSH host key not verifiable
This exception is raised when the SSH server's host key or
certificate is not verifiable.
:param reason:
Details about the host key verification failure
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_HOST_KEY_NOT_VERIFIABLE, reason, lang)
class IllegalUserName(DisconnectError):
"""SSH illegal user name
This exception is raised when an error occurs while processing
the username sent during the SSL handshake.
:param reason:
Details about the illegal username
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_ILLEGAL_USER_NAME, reason, lang)
class KeyExchangeFailed(DisconnectError):
"""SSH key exchange failed
This exception is raised when the SSH key exchange fails.
:param reason:
Details about the connection failure
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_KEY_EXCHANGE_FAILED, reason, lang)
class MACError(DisconnectError):
"""SSH MAC error
This exception is raised when an error occurs while processing
the message authentication code (MAC) of a message on the SSH
connection.
:param reason:
Details about the MAC error
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_MAC_ERROR, reason, lang)
class PermissionDenied(DisconnectError):
"""SSH permission denied
This exception is raised when there are no authentication methods
remaining to complete SSH client authentication.
:param reason:
Details about the SSH protocol error detected
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_NO_MORE_AUTH_METHODS_AVAILABLE, reason, lang)
class ProtocolError(DisconnectError):
"""SSH protocol error
This exception is raised when the SSH connection is disconnected
due to an SSH protocol error being detected.
:param reason:
Details about the SSH protocol error detected
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_PROTOCOL_ERROR, reason, lang)
class ProtocolNotSupported(DisconnectError):
"""SSH protocol not supported
This exception is raised when the remote system sends an SSH
protocol version which is not supported.
:param reason:
Details about the unsupported SSH protocol version
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_PROTOCOL_ERROR, reason, lang)
class ServiceNotAvailable(DisconnectError):
"""SSH service not available
This exception is raised when an unexpected service name is
received during the SSH handshake.
:param reason:
Details about the unexpected SSH service
:param lang: (optional)
The language the reason is in
:type reason: `str`
:type lang: `str`
"""
def __init__(self, reason: str, lang: str = DEFAULT_LANG):
super().__init__(DISC_SERVICE_NOT_AVAILABLE, reason, lang)
class ChannelOpenError(Error):
"""SSH channel open error
This exception is raised by connection handlers to report
channel open failures.
:param code:
Channel open failure reason, taken from :ref:`channel open
failure reason codes <ChannelOpenFailureReasons>`
:param reason:
A human-readable reason for the channel open failure
:param lang:
The language the reason is in
:type code: `int`
:type reason: `str`
:type lang: `str`
"""
class ChannelListenError(Exception):
"""SSH channel listen error
This exception is raised to report failures in setting up
remote SSH connection listeners.
:param details:
Details of the listen failure
:type details: `str`
"""
class PasswordChangeRequired(Exception):
"""SSH password change required
This exception is raised during password validation on the
server to indicate that a password change is required. It
should be raised when the password provided is valid but
expired, to trigger the client to provide a new password.
:param prompt:
The prompt requesting that the user enter a new password
:param lang:
The language that the prompt is in
:type prompt: `str`
:type lang: `str`
"""
def __init__(self, prompt: str, lang: str = DEFAULT_LANG):
super().__init__('Password change required: %s' % prompt)
self.prompt = prompt
self.lang = lang
class BreakReceived(Exception):
"""SSH break request received
This exception is raised on an SSH server stdin stream when the
client sends a break on the channel.
:param msec:
The duration of the break in milliseconds
:type msec: `int`
"""
def __init__(self, msec: int):
super().__init__('Break for %s msec' % msec)
self.msec = msec
class SignalReceived(Exception):
"""SSH signal request received
This exception is raised on an SSH server stdin stream when the
client sends a signal on the channel.
:param signal:
The name of the signal sent by the client
:type signal: `str`
"""
def __init__(self, signal: str):
super().__init__('Signal: %s' % signal)
self.signal = signal
class SoftEOFReceived(Exception):
"""SSH soft EOF request received
This exception is raised on an SSH server stdin stream when the
client sends an EOF from within the line editor on the channel.
"""
def __init__(self) -> None:
super().__init__('Soft EOF')
class TerminalSizeChanged(Exception):
"""SSH terminal size change notification received
This exception is raised on an SSH server stdin stream when the
client sends a terminal size change on the channel.
:param width:
The new terminal width
:param height:
The new terminal height
:param pixwidth:
The new terminal width in pixels
:param pixheight:
The new terminal height in pixels
:type width: `int`
:type height: `int`
:type pixwidth: `int`
:type pixheight: `int`
"""
def __init__(self, width: int, height: int, pixwidth: int, pixheight: int):
super().__init__('Terminal size change: (%s, %s, %s, %s)' %
(width, height, pixwidth, pixheight))
self.width = width
self.height = height
self.pixwidth = pixwidth
self.pixheight = pixheight
_disc_error_map = {
DISC_PROTOCOL_ERROR: ProtocolError,
DISC_KEY_EXCHANGE_FAILED: KeyExchangeFailed,
DISC_MAC_ERROR: MACError,
DISC_COMPRESSION_ERROR: CompressionError,
DISC_SERVICE_NOT_AVAILABLE: ServiceNotAvailable,
DISC_PROTOCOL_VERSION_NOT_SUPPORTED: ProtocolNotSupported,
DISC_HOST_KEY_NOT_VERIFIABLE: HostKeyNotVerifiable,
DISC_CONNECTION_LOST: ConnectionLost,
DISC_NO_MORE_AUTH_METHODS_AVAILABLE: PermissionDenied,
DISC_ILLEGAL_USER_NAME: IllegalUserName
}
def construct_disc_error(code: int, reason: str, lang: str) -> DisconnectError:
"""Map disconnect error code to appropriate DisconnectError exception"""
try:
return _disc_error_map[code](reason, lang)
except KeyError:
return DisconnectError(code, '%s (error %d)' % (reason, code), lang)
| ronf/asyncssh | asyncssh/misc.py | misc.py | py | 22,888 | python | en | code | 1,408 | github-code | 6 | [
{
"api_name": "typing_extensions.Protocol",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing_extensions.Protocol",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "typing_extensions.Protocol",
"line_number": 55,
"usage_type": "name"
},
{
"... |
32410264814 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('app', '0016_auto_20150828_0735'),
]
operations = [
migrations.AlterField(
model_name='object',
name='background_transparency',
field=models.IntegerField(default=100, null=True, blank=True, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
preserve_default=True,
),
]
| jacyn/burst | webapp/app/migrations/0017_auto_20150828_0747.py | 0017_auto_20150828_0747.py | py | 609 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 15,
"usage_type": "call"
},
{... |
15999128955 | # -*- coding: utf-8 -*-
"""
无签名版本
"""
import re
import json
from scrapy import Spider
from scrapy.http import Request
from douyin_app.docs.conf import HEADER
class DouyinIdolVideoSpider(Spider):
name = "idol_douyin_video"
idol_url = ''
video_list_url = 'https://api.amemv.com/aweme/v1/aweme/post/?user_id={}&max_cursor={}&count=20&device_id=39681429254&ac=wifi&channel=xiaomi&aid=1128&app_name=aweme'
max_cursor = 0
uid = None
def __init__(self, url):
super(DouyinIdolVideoSpider, self).__init__()
self.idol_url = url
def start_requests(self):
try:
self.uid = re.findall(r'user/(\d+)', self.idol_url)[0]
self.logger.info('解析到idol信息{}(•‾̑⌣‾̑•)✧˖°'.format(self.uid))
yield self.start_get_video_list(self.uid)
except Exception:
self.logger.error('解析不到视频信息,,Ծ‸Ծ,,')
def start_get_video_list(self, uid):
url = self.video_list_url.format(uid, self.max_cursor)
header = HEADER
return Request(url=url, headers=header, callback=self.get_video_list)
def start_get_video(self, url, desc):
url = url
header = HEADER
return Request(url=url, headers=header, callback=self.get_video, meta={'desc': desc})
def get_video_list(self, response):
content = response.body.decode('utf-8')
content = json.loads(content)
video_list = content.get('aweme_list')
if video_list:
for video in video_list:
download_url = video.get('video').get('play_addr_lowbr').get('url_list')[0]
desc = video.get('desc')
self.logger.info('解析到下载链接()(•‾̑⌣‾̑•)✧˖°', format(download_url))
yield self.start_get_video(download_url, desc)
if content.get('has_more'):
self.max_cursor = content.get('max_cursor')
yield self.start_get_video_list(self.uid)
def get_video(self, response):
desc = response.meta.get('desc')
content = response.body
with open('./douyin_app/videos/{}.mp4'.format(desc), 'wb') as f:
f.write(content)
self.logger.info('下载完成๑乛◡乛๑')
| iamxuwenjin/videos_download | douyin_app/spiders/douyin_idol_video_download.py | douyin_idol_video_download.py | py | 2,271 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "scrapy.Spider",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "douyin_app.docs.conf.HEADER",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "scrapy.http.Requ... |
11415064866 | """
Original multirc format:
{
data: [
{
id: str,
paragraph: {
text: {
},
questions: [
{
question: str,
sentences_used: [ int, ],
idx: int,
multisent: bool // don't know what this is
answers: [
{
text: str,
isAnswer: bool,
scores: {} //empty
},
...
]
},
...
]
}
},
...
]
}
"""
import json
import argparse
from pathlib import Path
from typing import Dict, List, Mapping, Generator, Optional, Union
from copy import deepcopy
import itertools
import re
import logging
from .reader import DatasetReader
from .types import (Sample, SingleQuestionSample,
SingleQuestionSingleOptionSample, NLIWithOptionsSample,
PureNLISample)
from dataclasses import dataclass, asdict
logger = logging.getLogger(__name__)
@dataclass
class OriginalMultircSample(Sample):
paragraph: Dict
html_tags = re.compile(r'<[^>]+>')
setence_tags = re.compile(r'Sent\s+\d+:')
html_plus_sentence_tags = re.compile(r"<[^>]+>|Sent\s+\d+:")
class MultircReader(DatasetReader):
def __init__(self,
input_type: str = 'OriginalMultircSample',
output_type: str = 'SingleQuestionSingleOptionSample'):
self.input_type = input_type
self.output_type = output_type
def _read_data(self, path: Path) -> Dict:
with open(path) as f:
samples = json.load(f)
return samples
def read(self, path: Path,
return_dict: bool = False) -> List[Union[Sample, Dict]]:
if self.input_type == 'OriginalMultircSample':
def reader_func(p: Path) -> List[Sample]:
samples = ((self._read_data(p))['data'])
# remove html
for s in samples:
s['paragraph']['text'] = html_plus_sentence_tags.sub(
'', s['paragraph']['text'])
return [OriginalMultircSample(**x) for x in samples]
else:
raise ValueError(f"input_type {self.input_type} not supported")
if self.output_type == 'SingleQuestionSingleOptionSample':
def sample_converter(
x: OriginalMultircSample) -> OriginalMultircSample:
return x # do nothing
def aggregate_converter(
x: List[OriginalMultircSample]
) -> List[SingleQuestionSingleOptionSample]:
all_res = []
for s in x:
para = s.paragraph['text']
for q in s.paragraph['questions']:
for ans_i, a in enumerate(q['answers']):
all_res.append(
SingleQuestionSingleOptionSample(
id=s.id + f"_{q['idx']}" + f"_{ans_i}",
article=para,
question=q['question'],
option=a['text'],
label=int(a['isAnswer'])))
return all_res
else:
raise ValueError(f"outpu_type {self.output_type} not supported")
input_samples = [sample_converter(s) for s in reader_func(path)]
output_samples = aggregate_converter(input_samples)
if return_dict:
return [s.__dict__ for s in output_samples]
else:
return output_samples
| nli-for-qa/conversion | qa2nli/qa_readers/multirc.py | multirc.py | py | 3,998 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "types.Sample",
"line_number": 52,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "dataclasses.dataclass",
... |
4243718619 | from fastapi import APIRouter, HTTPException, status, Query
from datetime import timedelta
from datetime import datetime
from github import Github
import random
router = APIRouter()
github = Github()
@router.get("/repo/health")
def repo_health_check():
return {"status": "OK"}
# return each contributor with their number of commits in the last week
@router.get("/repo/contributors")
def return_individual_contributions(repo_name:str):
try:
repository = github.get_repo(repo_name)
except:
raise HTTPException(status_code=404, detail="Repository not found")
contributor_list = repository.get_stats_contributors()
contributors_info = {}
counter = 0
for contributor in reversed(contributor_list):
if counter == 6:
break
weekly_contribution = contributor.weeks
contributors_info[contributor.author.name] = weekly_contribution[-1].c
counter = counter + 1
return contributors_info
# return total number of commits so for in the week (commits since the most recent monday)
@router.get("/repo/totalweeklycommits")
def return_weekly_commits(repo_name : str):
try:
repository = github.get_repo(repo_name)
except:
raise HTTPException(status_code=404, detail="Repository not found")
today = datetime.datetime.now()
most_recent_monday = today - timedelta(days=today.weekday())
commits=repository.get_commits(since=most_recent_monday)
return {
"Commits in the last week":commits.totalCount,
"Commits since": str(most_recent_monday.date())
}
# return total number of commits in the past week
@router.get("/repo/commits")
def return_weekly_commits(repo_name : str, start : datetime = Query(None), end : datetime = Query(None)):
# the url that the user should be passing in is something like ?start=2022-01-01&end=2022-01-31
# parses the dates passed in to a datetime object. This is the format that the github api uses
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
repository = github.get_repo(repo_name)
commits=repository.get_commits(since=start_date, until=end_date)
return {"commits":commits.totalCount}
# return percentage increase for commits in the past week
@router.get("/repo/commitsincrease")
def calculate_commits_increase(repo_name:str):
#find commits this week
today = datetime.datetime.now()
most_recent_monday = today - timedelta(days=today.weekday())
last_weeks_monday = most_recent_monday - timedelta(days=7)
try:
repository = github.get_repo(repo_name)
except:
raise HTTPException(status_code=404, detail="Repository not found")
this_week = repository.get_commits(since=most_recent_monday)
commits_this_week = this_week.totalCount
#find commits last week
last_week = repository.get_commits(since=last_weeks_monday, until=most_recent_monday)
commits_last_week = last_week.totalCount
#find percentage increase
percentage = ""
if commits_last_week == 0:
commits_this_week * 100
percentage = str(commits_this_week) + "%"
else:
difference = commits_this_week - commits_last_week
difference = difference / commits_last_week
difference = round(difference*100, 1)
percentage = str(difference) + "%"
return{
"Increase in commits in the past week": percentage,
"Last week's date":str(last_weeks_monday.date())
}
# @router.get("/issues")
# def get_repo_issues(repo_name : str):
# repository = github.get_repo(repo_name)
# issue_list = {}
# for issue in repository.get_issues():
# currentIssue = {
# "Assignee": issue.assignee.assignee,
# "Id": issue.id,
# "Commit Id": issue.commit_id,
# "Event": issue.event,
# "Date created": issue.created_at,
# }
# issue_list[issue.assignee.assignee] = currentIssue
# return issue_list
@router.get("/dummy/repo/commits")
def dummy_repo_commits(start: str, end: str):
# generate a random number of commits based on the start and end dates
# this is just a dummy method for testing purposes
# get the number fo days between the start and end dates
start_date = datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.strptime(end, "%Y-%m-%d")
delta = end_date - start_date
print(delta)
# generate a random number of commits based on the number of days
num_commits = random.randint(0, delta.days)
authors = ['John', 'Jane', 'Bob', 'Alice', 'Joe', 'Mary', 'Tom', 'Sally', 'Sue', 'Sam']
last_num_commits = random.randint(0, delta.days)
# get thepercentage change in commits
percentage = 0
if last_num_commits == 0:
num_commits * 100
percentage = num_commits
else:
difference = num_commits - last_num_commits
difference = difference / last_num_commits
difference = round(difference*100, 1)
percentage = difference
print(percentage)
return {
"num_commits": num_commits,
"percent_change": percentage,
"authors": authors,
"start_date": start,
"end_date": end,
}
@router.get("/dummy/repo/bugs")
def dummy_repo_commits(start: str, end: str):
start_date = datetime.strptime(start, "%Y-%m-%d")
end_date = datetime.strptime(end, "%Y-%m-%d")
delta = end_date - start_date
monthly_bug_rate = []
random.seed(0)
x = 0
for i in range(delta.days):
# values have to be positive
dx = random.gauss(0, 1)
if dx < 0:
dx = 0
x += dx
monthly_bug_rate.append(int(x))
print(monthly_bug_rate)
return {
"num_bugs": len(monthly_bug_rate),
"num_bugs_prev": 10,
"percent_change": 20,
"start_date": start,
"end_date": end,
"values": monthly_bug_rate,
}
| sweng-project-tcd/dashboard-back | router/repo/repo.py | repo.py | py | 6,032 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "github.Github",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "github.get_repo",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",... |
4463507689 | from django.shortcuts import render
from django.views.generic import View
from .models import *
# Create your views here.
class ProjectsList(View):
def get(self,request):
try:
us = User.objects.get(username=request.user)
except:
us = None
projects = Project.objects.filter(user=us)
return render(request,'taskmain/projects_list.html',{'projects':projects,'us':us})
class ProjectDetail(View):
def get(self,request,slug):
project = Project.objects.get(slug=slug)
return render(request,'taskmain/project_detail.html',{'project':project})
| virasium/TM | taskmain/views.py | views.py | py | 618 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.views.generic.View",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.views.generic.View",
"line_number": 16,
"usage_type": "name"
},
{
"api_na... |
29961826770 | from selenium import webdriver
import json
import traceback
import urllib.request
def parse_page(driver):
script_clue = "q(\"talkPage.init\","
try:
for script in driver.find_elements_by_tag_name("script"):
content = script.get_attribute("innerHTML")
if content.startswith(script_clue):
content = content.lstrip(script_clue).rstrip(")")
json_content = json.loads(content)
title = json_content["__INITIAL_DATA__"]["name"]
download_url = json_content["__INITIAL_DATA__"]["talks"][0]["downloads"]["subtitledDownloads"]["en"]["high"]
return title, download_url
except Exception:
print(traceback.format_exc())
print("Unable to parse page. Stopping.")
exit(-1)
def main(url):
print("Processing URL %s..." % url)
options = webdriver.FirefoxOptions()
options.add_argument("--headless")
driver = webdriver.Firefox(firefox_options=options)
driver.get(url)
title, download_url = parse_page(driver)
print("TED talk: {}\nDownload URL: {}".format(title, download_url))
file_title = title + ".mp4"
print("Downloading file {}...".format(file_title))
try:
urllib.request.urlretrieve(download_url, file_title)
print("Download completed.")
except Exception:
print(traceback.format_exc())
print("Unable to download video. Stopping.")
exit(-1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="A simple tool to download TED talks via CLI.")
parser.add_argument("-t", "--talk", type=str, required=True,
help="Link to TED talk")
args = parser.parse_args()
main(args.talk)
| ShadowTemplate/ted-downloader | ted.py | ted.py | py | 1,781 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "traceback.format_exc",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.FirefoxOptions",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sel... |
15536903566 | import pandas as pd
from sklearn import model_selection
import numpy as np
from sklearn import datasets
def create_k_folds():
# csv with image id, image location and image label.
df = pd.read_csv("train.csv")
# create a new column called kfold and fill it with -1
df["kfold"] = -1
# the next step is to randomize the rows of the data
df = df.sample(frac=1).reset_index(drop=True)
# initiate the kfold class from model_selection module
kf = model_selection.KFold(n_splits=5)
# fill the new kfold column
for fold, (trn_, val_) in enumerate(kf.split(X=df)):
df.loc[val_, "kfold"] = fold
# save the new csv with kfold column
df.to_csv("train_folds.csv", index=False)
def create_stratified_k_folds():
# csv with image id, image location and image label.
df = pd.read_csv("train.csv")
# create a new column called kfold and fill it with -1
df["kfold"] = -1
# the next step is to randomize the rows of the data
df = df.sample(frac=1).reset_index(drop=True)
# fetch targets
y = df.target.values
# initiate the kfold class from model_selection module
kf = model_selection.StratifiedKFold(n_splits=5)
# fill the new kfold column
for fold, (trn_, val_) in enumerate(kf.split(X=df, y=y)):
df.loc[val_, "kfold"] = fold
# save the new csv with kfold column
df.to_csv("train_folds.csv", index=False)
def create_stratified_k_fold_for_regression(data):
# create a new column called kfold and fill it with -1
data["kfold"] = -1
# the next step is to randomize the rows of the data
data = data.sample(frac=1).reset_index(drop=True)
# calculate the number of bins by Sturge's rule
# I take the floor of the value, you can also just round it.
num_bins = int(np.floor(1 + np.log2(len(data))))
# bin tagets
data.loc[:, "bins"] = pd.cut(data["target"], bins=num_bins, labels=False)
# initiate the kfold class from model_selection module
kf = model_selection.StratifiedKFold(n_splits=5)
# fill the new kfold column note that, instead of targets, we use bins!
for f, (t_, v_) in enumerate(kf.split(X=data, y=data.bins.values)):
data.loc[v_, "kfold"] = f
# drop the bins column
data = data.drop("bins", axis=1)
# return data frame with folds
return data
if __name__ == '__main__':
# We create a sample dataset with 1500 samples and 100 features and 1 target
X, y = datasets.make_regression(n_samples=1500, n_features=100, n_targets=1)
# create a data frame out of our numpy arrays
df = pd.DataFrame(X, columns=[f"f_{i}" for i in range(X.shape[1])])
df.loc[:, "target"] = y
# create folds
df = create_stratified_k_fold_for_regression(data=df)
a = 0
| Vuong02011996/Book-Approaching-any-machine-learning-problem | B_Cross validation/cross-validation.py | cross-validation.py | py | 2,781 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.KFold",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "... |
7792745843 | #!/usr/bin/env python3
"""Resolve docker container's name into IPv4 address"""
import docker
from ipaddress import ip_address, IPv4Address, IPv6Address
from threading import Thread
from twisted.internet import reactor, defer
from twisted.names import client, dns, server
LISTEN_ADDRESS = "127.0.0.1"
DNS_PORT = 53
class DockerResolver(client.Resolver):
"""Resolve container name into IP address."""
def __init__(self, dockerClient, servers=None):
super().__init__(resolv=None, servers=servers)
self.dockerClient = dockerClient
self.runningContainers = {}
for c in dockerClient.containers.list():
containerName = c.attrs["Name"][1:]
containerNetworks = c.attrs["NetworkSettings"]["Networks"]
for k, v in containerNetworks.items():
containerIPv4 = v["IPAddress"]
containerIPv6 = v["GlobalIPv6Address"]
if not containerIPv6:
containerIPv6 = None
shouldAddContainer = False
if (("Health" in c.attrs["State"] and
c.attrs["State"]["Health"]["Status"] == "healthy") or
"Health" not in c.attrs["State"]):
shouldAddContainer = True
if shouldAddContainer:
self.addContainer(containerName,
containerIPv4,
containerIPv6)
def addContainer(self, containerName, containerIPv4, containerIPv6=None):
if containerName not in self.runningContainers:
self.runningContainers[containerName] = []
self.runningContainers[containerName].append(containerIPv4)
if containerIPv6:
self.runningContainers[containerName].append(containerIPv6)
def removeContainer(self, containerName):
self.runningContainers.pop(containerName, None)
def __lookup(self, query, timeout=None, type="A"):
allowedTypes = ("A", "AAAA")
if type not in allowedTypes:
raise ValueError
domain = query.decode()
if domain in self.runningContainers:
answers = []
authority = []
additional = []
for address in self.runningContainers[domain]:
if ((type == "A"
and not isinstance(ip_address(address), IPv4Address))
or (type == "AAAA"
and not isinstance(ip_address(address), IPv6Address))):
continue
record = getattr(dns, "Record_%s" % type)
p = record(address=address.encode())
dnsType = getattr(dns, "%s" % type)
answer = dns.RRHeader(name=query, payload=p, type=dnsType)
answers.append(answer)
return defer.succeed((answers, authority, additional))
else:
return None
def lookupAddress(self, query, timeout=None):
response = self.__lookup(query, timeout, "A")
if response:
return response
else:
return super().lookupAddress(query, timeout)
def lookupIPV6Address(self, query, timeout=None):
response = self.__lookup(query, timeout, "AAAA")
if response:
return response
else:
return super().lookupIPV6Address(query, timeout)
def __findContainerByPTRQuery(self, PTRQuery):
query = PTRQuery.decode().rstrip(".")
if query.endswith(".in-addr.arpa"):
ip_list = query.rstrip(".in-addr.arpa").split(".")
i = 0
ipQuery = ""
while i < len(ip_list):
i += 1
ipQuery += ip_list[-i]
if i != len(ip_list):
ipQuery += "."
ipQuery = ip_address(ipQuery)
elif query.endswith(".ip6.arpa"):
ip_list = query.rstrip(".ip6.arpa")[::-1].split(".")
i = 0
ipQuery = ""
while i < len(ip_list):
ipQuery += ip_list[i]
i += 1
if i % 4 == 0 and i != len(ip_list):
ipQuery += ":"
ipQuery = ip_address(ipQuery)
else:
return None
for containerName, IPs in self.runningContainers.items():
for ip in IPs:
if ipQuery == ip_address(ip):
return containerName
return None
def lookupPointer(self, query, timeout=None):
answers = []
authority = []
additional = []
containerName = self.__findContainerByPTRQuery(query)
if containerName is None:
return super().lookupPointer(query, timeout)
p = dns.Record_PTR(name=containerName)
answer = dns.RRHeader(name=query, payload=p, type=dns.PTR)
answers.append(answer)
return defer.succeed((answers, authority, additional))
class EventsListener(Thread):
"""Listen on start and die events."""
def __init__(self, resolver):
super().__init__()
self.resolver = resolver
self.eventListener = None
def run(self):
self.eventListener = self.resolver.dockerClient.events(
filters={"event": ["connect",
"disconnect",
"health_status"]},
decode=True)
for e in self.eventListener:
callback_prefix = e["Action"]
if "health_status:" in e["Action"]:
callback_prefix = e["Action"][:(e["Action"].index(':'))]
callback = getattr(self, callback_prefix + "Callback")
callback(e)
def join(self, timeout=None):
self.eventListener.close()
super().join(timeout)
def __add_container(self, container):
containerName = container["Name"].lstrip('/')
containerNetworks = container["NetworkSettings"]["Networks"]
for k, v in containerNetworks.items():
containerIPv4 = v["IPAddress"]
containerIPv6 = v["GlobalIPv6Address"]
shouldAddContainer = True
# ContainerNetworks contains all the networks. So if we connect a
# second (or third) network after container started, we fire
# connect event several times. This means we should ensure that
# containerName appears once in resolver.runningContainers list.
if containerName in self.resolver.runningContainers:
thisContainer = self.resolver.runningContainers[containerName]
if containerIPv4 in thisContainer:
shouldAddContainer = False
if not containerIPv6:
containerIPv6 = None
if shouldAddContainer:
self.resolver.addContainer(containerName,
containerIPv4,
containerIPv6)
def connectCallback(self, event):
containerID = event["Actor"]["Attributes"]["container"]
api = self.resolver.dockerClient.api
container = api.inspect_container(containerID)
if ("Health" not in container["State"] or
container["State"]["Health"]["Status"] == "healthy"):
self.__add_container(container)
def disconnectCallback(self, event):
containerID = event["Actor"]["Attributes"]["container"]
api = self.resolver.dockerClient.api
try:
container = api.inspect_container(containerID)
containerName = container["Name"].lstrip('/')
self.resolver.removeContainer(containerName)
except docker.errors.NotFound:
pass
def health_statusCallback(self, event):
api = self.resolver.dockerClient.api
container = api.inspect_container(event["id"])
if ("Health" in container["State"] and
container["State"]["Health"]["Status"] == "healthy"):
self.__add_container(container)
class DockerDNS():
"""Start and stop DockerDNS Service"""
def __init__(self, port=None, listenAddress=None, forwarders=None):
if not isinstance(forwarders, list):
raise TypeError
self.port = port
self.listenAddress = listenAddress
self.forwarders = forwarders
self.eventsListener = None
self.udp_listener = None
self.tcp_listener = None
if self.port is None:
self.port = DNS_PORT
if self.listenAddress is None:
self.listenAddress = LISTEN_ADDRESS
def start(self):
"""Configure and execute the DNS server."""
dockerClient = docker.from_env()
resolver = DockerResolver(dockerClient=dockerClient,
servers=self.forwarders)
self.eventsListener = EventsListener(resolver)
self.eventsListener.start()
factory = server.DNSServerFactory(clients=[resolver])
protocol = dns.DNSDatagramProtocol(controller=factory)
self.udp_listener = reactor.listenUDP(port=self.port,
protocol=protocol,
interface=self.listenAddress)
self.tcp_listener = reactor.listenTCP(port=self.port,
factory=factory,
interface=self.listenAddress)
reactor.run()
def clean(self):
"""Clean all the resources"""
self.stop()
self.eventsListener.join()
def stop(self):
"""Stop the reactor if running"""
if reactor.running:
self.udp_listener.stopListening()
self.tcp_listener.stopListening()
reactor.stop()
| dangoncalves/docker-dns | dockerDNS/dockerDNS.py | dockerDNS.py | py | 9,886 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "twisted.names.client.Resolver",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "twisted.names.client",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "ipaddress.IPv4Address",
"line_number": 60,
"usage_type": "argument"
},
{
... |
21375968156 | from django.db import models
# Create your models here.
class Movie(models.Model):
id = models.BigAutoField(
primary_key=True
)
name = models.CharField(
max_length=250,
verbose_name="Moive name"
)
desc = models.TextField(
verbose_name="Description"
)
year = models.PositiveBigIntegerField(
verbose_name="release year"
)
image = models.ImageField(
upload_to='gallery',
verbose_name="Images"
)
director = models.CharField(
max_length=100
)
| adhilshaw/moviedemoapp | movieapp/models.py | models.py | py | 558 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.models.Model",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.BigAutoField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name"... |
14200766996 | import json
import os
import traceback
from discord import AllowedMentions, Embed, Forbidden
from discord.ext import commands
class Core(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.db = self.bot.db
async def push_link_json(self, guild) -> None:
data = {}
for invite in (await guild.invites()):
data[f'{invite.code}'] = f'{invite.uses}'
file = open(f'./data/{guild.id}.json', 'w')
json.dump(data, file, indent=4)
@commands.Cog.listener()
async def on_invite_create(self, invite):
await self.push_link_json(invite.guild)
@commands.Cog.listener()
async def on_invite_remove(self, invite):
await self.push_link_json(invite.guild)
@commands.Cog.listener()
async def on_guild_join(self, guild):
await self.push_link_json(guild)
@commands.Cog.listener()
async def on_member_join(self, member):
guild_data = self.db.list_invite_link(member.guild.id)
if not guild_data:
return
data = {}
for invite in (await member.guild.invites()):
data[f'{invite.code}'] = f'{invite.uses}'
if os.path.exists(f'./data/{member.guild.id}.json'):
with open(f'./data/{member.guild.id}.json', 'r', encoding='UTF-8') as config:
g_data = json.load(config)
else:
return
code = list(dict(data.items() - g_data.items()).items())[0]
link_role = self.db.fetch_invite_role(member.guild.id, code[0])
if not link_role:
return
role = member.guild.get_role(link_role[0])
if role:
try:
await member.add_roles(role)
except Forbidden:
return
await self.push_link_json(member.guild)
async def setup(bot):
await bot.add_cog(Core(bot))
| yutarou12/ChIn-RoleBot | cogs/Core.py | Core.py | py | 1,875 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "discord.ex... |
35417197808 | import csv
from dataclasses import dataclass, field
from itertools import count
from ..configs import Configs
from ..utils import add_bytes, stringify
from .actions import MONSTER_ACTIONS, Action
from .autoabilities import AUTOABILITIES
from .characters import CHARACTERS, Character
from .constants import (
Element,
ElementalAffinity,
EquipmentSlots,
EquipmentType,
GameVersion,
Rarity,
Stat,
Status,
)
from .file_functions import get_resource_path
from .items import ITEMS, ItemDrop
from .text_characters import TEXT_CHARACTERS
@dataclass
class Monster:
name: str
stats: dict[Stat, int]
elemental_affinities: dict[Element, ElementalAffinity]
status_resistances: dict[Status, int]
poison_tick_damage: int
zanmato_level: int
armored: bool
undead: bool
auto_statuses: list[Status]
gil: int
ap: dict[str, int]
item_1: dict[str, int | dict[Rarity, ItemDrop | None]]
item_2: dict[str, int | dict[Rarity, ItemDrop | None]]
steal: dict[str | Rarity, int | ItemDrop | None]
bribe: dict[str, int | ItemDrop | None]
equipment: dict[str, int | list | dict[Character, list[int]]]
actions: dict[str, Action]
zones: list[str] = field(default_factory=list)
def __str__(self) -> str:
return self.name
def _get_prize_structs(file_path: str) -> dict[str, list[int]]:
"""Retrieves the prize structs for enemies."""
absolute_file_path = get_resource_path(file_path)
with open(absolute_file_path) as file_object:
file_reader = csv.reader(file_object, delimiter=",")
monsters_data = {}
for line in file_reader:
prize_struct = [int(value, 16) for value in line]
# gets the name of the monster from the prize struct itself
# name is null (0x00) terminated
monster_name = ""
for character_id in prize_struct[408:430]:
if character_id == 0:
break
monster_name += TEXT_CHARACTERS[character_id]
monster_name = stringify(monster_name)
# if the name is already in the dictionary
# appends it with an underscore and a number
# from 2 to 8
if monster_name in monsters_data:
for i in count(2):
new_name = f"{monster_name}_{i}"
if new_name not in monsters_data:
monsters_data[new_name] = prize_struct
break
else:
monsters_data[monster_name] = prize_struct
return monsters_data
def _patch_prize_structs_for_hd(
prize_structs: dict[str, list[int]],
) -> dict[str, list[int]]:
"""Apply changes made in the HD version to the prize structs."""
def patch_abilities(
monster_name: str,
abilities: tuple[int, int, int, int, int, int, int],
equipment_type: EquipmentType = EquipmentType.WEAPON,
) -> None:
"""Modifies ability values 1-7 of every character's weapon
or armor ability array.
"""
# base address for abilities in the prize struct
base_address = 178
type_offset = 0 if equipment_type == EquipmentType.WEAPON else 1
# place the abilities values at the correct offsets
for owner_index in range(7):
offset = (type_offset + (owner_index * 2)) * 16
for slot in range(7):
slot_offset = (slot + 1) * 2
address = base_address + offset + slot_offset
prize_structs[monster_name][address] = abilities[slot]
# in the HD version equipment droprates were modified
# from 8/255 to 12/255 for these enemies
monster_names = (
"condor",
"dingo",
"water_flan",
"condor_2",
"dingo_2",
"water_flan_2",
"dinonix",
"killer_bee",
"yellow_element",
"worker",
"vouivre_2",
"raldo_2",
"floating_eye",
"ipiria",
"mi'ihen_fang",
"raldo",
"vouivre",
"white_element",
"funguar",
"gandarewa",
"lamashtu",
"raptor",
"red_element",
"thunder_flan",
"bite_bug",
"bunyip",
"garm",
"simurgh",
"snow_flan",
"bunyip_2",
"aerouge",
"buer",
"gold_element",
"kusariqqu",
"melusine",
"blue_element",
"iguion",
"murussu",
"wasp",
"evil_eye",
"ice_flan",
"mafdet",
"snow_wolf",
"guado_guardian_2",
"alcyone",
"mech_guard",
"mushussu",
"sand_wolf",
"bomb_2",
"evil_eye_2",
"guado_guardian_3",
"warrior_monk",
"warrior_monk_2",
"aqua_flan",
"bat_eye",
"cave_iguion",
"sahagin_2",
"swamp_mafdet",
"sahagin_3",
"flame_flan",
"mech_scouter",
"mech_scouter_2",
"nebiros",
"shred",
"skoll",
"flame_flan",
"nebiros",
"shred",
"skoll",
"dark_element",
"imp",
"nidhogg",
"yowie",
)
for monster_name in monster_names:
prize_structs[monster_name][139] = 12
# all the enemies that have ability arrays modified in the HD version
# besaid
patch_abilities("dingo", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("condor", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("water_flan", (42, 42, 42, 42, 125, 125, 125))
patch_abilities("dingo_2", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("condor_2", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("water_flan_2", (42, 42, 42, 42, 125, 125, 125))
# kilika
patch_abilities("dinonix", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("killer_bee", (38, 42, 34, 30, 126, 126, 126))
patch_abilities("yellow_element", (38, 38, 38, 38, 125, 125, 125))
# luca
patch_abilities("vouivre_2", (38, 42, 34, 30, 124, 124, 124))
# mi'ihen
patch_abilities("raldo_2", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("bomb", (30, 30, 30, 30, 30, 30, 125))
patch_abilities("dual_horn", (67, 30, 30, 30, 30, 127, 127))
patch_abilities("floating_eye", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("ipiria", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("mi'ihen_fang", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("raldo", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("vouivre", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("white_element", (34, 34, 34, 34, 125, 125, 125))
# mushroom rock road
patch_abilities("gandarewa", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("lamashtu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("raptor", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("red_element", (30, 30, 30, 30, 125, 125, 125))
patch_abilities("thunder_flan", (38, 38, 38, 38, 125, 125, 125))
# djose highroad
patch_abilities("bite_bug", (38, 42, 34, 30, 126, 126, 126))
patch_abilities("bunyip", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("garm", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("simurgh", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("snow_flan", (34, 34, 34, 34, 125, 125, 125))
# moonflow
patch_abilities("bunyip_2", (38, 42, 34, 30, 124, 124, 124))
# thunder plains
patch_abilities("aerouge", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("buer", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("gold_element", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("kusariqqu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("melusine", (38, 42, 38, 30, 126, 126, 126))
# macalania woods
patch_abilities("blue_element", (42, 42, 42, 42, 125, 125, 125))
patch_abilities("chimera", (104, 104, 103, 103, 103, 103, 125))
patch_abilities("iguion", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("murussu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("wasp", (38, 42, 34, 30, 126, 126, 126))
# lake macalania
patch_abilities("evil_eye", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("ice_flan", (34, 34, 34, 34, 125, 125, 125))
patch_abilities("mafdet", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("snow_wolf", (38, 42, 34, 30, 124, 124, 124))
# bikanel
patch_abilities("alcyone", (0, 0, 0, 0, 126, 126, 126))
patch_abilities("mushussu", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("sand_wolf", (38, 42, 34, 30, 124, 124, 124))
# home
patch_abilities("bomb_2", (30, 30, 30, 30, 30, 30, 125))
patch_abilities("chimera_2", (104, 104, 103, 103, 103, 103, 125))
patch_abilities("dual_horn_2", (67, 67, 67, 30, 30, 127, 127))
patch_abilities("evil_eye_2", (38, 42, 34, 30, 99, 126, 126))
# via purifico
patch_abilities("aqua_flan", (42, 42, 42, 42, 125, 125, 125))
patch_abilities("bat_eye", (38, 42, 34, 30, 99, 126, 126))
patch_abilities("cave_iguion", (38, 42, 38, 30, 126, 126, 126))
patch_abilities("swamp_mafdet", (38, 42, 34, 30, 124, 124, 124))
# calm lands
patch_abilities("chimera_brain", (104, 104, 104, 104, 103, 103, 125))
patch_abilities("flame_flan", (30, 30, 30, 30, 125, 125, 125))
patch_abilities("nebiros", (38, 42, 34, 30, 126, 126, 126))
patch_abilities("shred", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("skoll", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("defender_x", (100, 99, 99, 99, 99, 99, 124))
# cavern of the stolen fayth
patch_abilities("dark_element", (42, 30, 30, 34, 125, 125, 125))
patch_abilities("defender", (99, 99, 99, 99, 98, 98, 124))
patch_abilities("ghost", (104, 104, 104, 103, 103, 103, 125))
patch_abilities("imp", (38, 38, 38, 38, 125, 125, 125))
patch_abilities("nidhogg", (38, 42, 34, 30, 124, 124, 124))
patch_abilities("valaha", (67, 67, 67, 30, 30, 127, 127))
patch_abilities("yowie", (38, 42, 38, 30, 126, 126, 126))
return prize_structs
def get_raw_data_string(prize_struct: list[str]) -> str:
string = ""
for index, byte in enumerate(prize_struct):
# every 16 bytes make a new line
if index % 16 == 0:
string += "\n"
string += " ".join([f"[{hex(index + i)[2:]:>3}]" for i in range(16)])
string += "\n"
# print the bytes' value
# string += f' {hex(byte)[2:]:>3} '
string += f" {byte:>3} "
# string += f' {byte:08b} '
return string
def _get_monster_data(monster_id: str, prize_struct: list[int]) -> Monster:
"""Get a Monster from his prize struct."""
def get_elements() -> dict[str, str]:
elements = {
Element.FIRE: 0b00001,
Element.ICE: 0b00010,
Element.THUNDER: 0b00100,
Element.WATER: 0b01000,
Element.HOLY: 0b10000,
}
affinities = {}
for element, value in elements.items():
if prize_struct[43] & value:
affinities[element] = ElementalAffinity.ABSORBS
elif prize_struct[44] & value:
affinities[element] = ElementalAffinity.IMMUNE
elif prize_struct[45] & value:
affinities[element] = ElementalAffinity.RESISTS
elif prize_struct[46] & value:
affinities[element] = ElementalAffinity.WEAK
else:
affinities[element] = ElementalAffinity.NEUTRAL
return affinities
def get_abilities(address: int) -> dict[str, list[str | None]]:
abilities = {}
equipment_types = (EquipmentType.WEAPON, 0), (EquipmentType.ARMOR, 16)
for equipment_type, offset in equipment_types:
abilities[equipment_type] = []
for i in range(address + offset, address + 16 + offset, 2):
if prize_struct[i + 1] == 128:
ability_name = AUTOABILITIES[prize_struct[i]]
else:
ability_name = None
abilities[equipment_type].append(ability_name)
return abilities
monster_name = ""
for character_id in prize_struct[408:430]:
if character_id == 0:
break
monster_name += TEXT_CHARACTERS[character_id]
for i in range(16):
if monster_id.endswith(f"_{i}"):
monster_name += f"#{i}"
break
stats = {
Stat.HP: add_bytes(*prize_struct[20:24]),
Stat.MP: add_bytes(*prize_struct[24:28]),
"overkill_threshold": add_bytes(*prize_struct[28:32]),
Stat.STRENGTH: prize_struct[32],
Stat.DEFENSE: prize_struct[33],
Stat.MAGIC: prize_struct[34],
Stat.MAGIC_DEFENSE: prize_struct[35],
Stat.AGILITY: prize_struct[36],
Stat.LUCK: prize_struct[37],
Stat.EVASION: prize_struct[38],
Stat.ACCURACY: prize_struct[39],
}
gil = add_bytes(*prize_struct[128:130])
ap = {
"normal": add_bytes(*prize_struct[130:132]),
"overkill": add_bytes(*prize_struct[132:134]),
}
item_1 = {
"drop_chance": prize_struct[136],
"normal": {Rarity.COMMON: None, Rarity.RARE: None},
"overkill": {Rarity.COMMON: None, Rarity.RARE: None},
}
if prize_struct[141] == 32:
item_1["normal"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[140]], prize_struct[148], False
)
if prize_struct[143] == 32:
item_1["normal"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[142]], prize_struct[149], True
)
if prize_struct[153] == 32:
item_1["overkill"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[152]], prize_struct[160], False
)
if prize_struct[155] == 32:
item_1["overkill"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[154]], prize_struct[161], True
)
item_2 = {
"drop_chance": prize_struct[137],
"normal": {Rarity.COMMON: None, Rarity.RARE: None},
"overkill": {Rarity.COMMON: None, Rarity.RARE: None},
}
if prize_struct[145] == 32:
item_2["normal"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[144]], prize_struct[150], False
)
if prize_struct[147] == 32:
item_2["normal"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[146]], prize_struct[151], True
)
if prize_struct[157] == 32:
item_2["overkill"][Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[156]], prize_struct[162], False
)
if prize_struct[159] == 32:
item_2["overkill"][Rarity.RARE] = ItemDrop(
ITEMS[prize_struct[158]], prize_struct[163], True
)
steal = {
"base_chance": prize_struct[138],
Rarity.COMMON: None,
Rarity.RARE: None,
}
if prize_struct[165] == 32:
steal[Rarity.COMMON] = ItemDrop(
ITEMS[prize_struct[164]], prize_struct[168], False
)
if prize_struct[167] == 32:
steal[Rarity.RARE] = ItemDrop(ITEMS[prize_struct[166]], prize_struct[169], True)
bribe = {
"cost": float("nan"),
"item": None,
}
if prize_struct[171] == 32:
bribe["item"] = ItemDrop(ITEMS[prize_struct[170]], prize_struct[172], False)
elemental_affinities = get_elements()
status_resistances = {
Status.DEATH: prize_struct[47],
Status.ZOMBIE: prize_struct[48],
Status.PETRIFY: prize_struct[49],
Status.POISON: prize_struct[50],
Status.POWER_BREAK: prize_struct[51],
Status.MAGIC_BREAK: prize_struct[52],
Status.ARMOR_BREAK: prize_struct[53],
Status.MENTAL_BREAK: prize_struct[54],
Status.CONFUSE: prize_struct[55],
Status.BERSERK: prize_struct[56],
Status.PROVOKE: prize_struct[57],
Status.THREATEN: prize_struct[58],
Status.SLEEP: prize_struct[59],
Status.SILENCE: prize_struct[60],
Status.DARK: prize_struct[61],
Status.PROTECT: prize_struct[62],
Status.SHELL: prize_struct[63],
Status.REFLECT: prize_struct[64],
Status.NULBLAZE: prize_struct[65],
Status.NULFROST: prize_struct[66],
Status.NULSHOCK: prize_struct[67],
Status.NULTIDE: prize_struct[68],
Status.REGEN: prize_struct[69],
Status.HASTE: prize_struct[70],
Status.SLOW: prize_struct[71],
}
poison_tick_damage = stats[Stat.HP] * prize_struct[42] // 100
undead = prize_struct[72] == 2
auto_statuses = []
if prize_struct[74] & 0b00100000:
auto_statuses.append(Status.REFLECT)
if prize_struct[75] & 0b00000011 and prize_struct[74] & 0b11000000:
auto_statuses.append(Status.NULALL)
if prize_struct[75] & 0b00000100:
auto_statuses.append(Status.REGEN)
equipment = {
"drop_chance": prize_struct[139],
"bonus_critical_chance": prize_struct[175],
"base_weapon_damage": prize_struct[176],
"slots_modifier": prize_struct[173],
"slots_range": [],
"max_ability_rolls_modifier": prize_struct[177],
"max_ability_rolls_range": [],
"added_to_inventory": bool(prize_struct[174]),
}
for i in range(8):
slots_mod = equipment["slots_modifier"] + i - 4
slots = (slots_mod + ((slots_mod >> 31) & 3)) >> 2
if slots < EquipmentSlots.MIN:
slots = EquipmentSlots.MIN.value
elif slots > EquipmentSlots.MAX:
slots = EquipmentSlots.MAX.value
equipment["slots_range"].append(slots)
ab_mod = equipment["max_ability_rolls_modifier"] + i - 4
ab_rolls = (ab_mod + ((ab_mod >> 31) & 7)) >> 3
equipment["max_ability_rolls_range"].append(ab_rolls)
equipment["ability_arrays"] = {}
for c, i in zip(CHARACTERS.values(), range(178, 371, 32)):
equipment["ability_arrays"][c.name] = get_abilities(i)
armored = bool(prize_struct[40] & 0b00000001)
zanmato_level = prize_struct[402]
actions = MONSTER_ACTIONS[monster_id]
if not actions:
actions.update(MONSTER_ACTIONS["generic_actions"])
monster = Monster(
name=monster_name,
stats=stats,
elemental_affinities=elemental_affinities,
status_resistances=status_resistances,
poison_tick_damage=poison_tick_damage,
zanmato_level=zanmato_level,
armored=armored,
undead=undead,
auto_statuses=auto_statuses,
gil=gil,
ap=ap,
item_1=item_1,
item_2=item_2,
steal=steal,
bribe=bribe,
equipment=equipment,
actions=actions,
)
return monster
PRIZE_STRUCTS = _get_prize_structs("tracker\\data\\ffx_mon_data.csv")
if Configs.game_version is GameVersion.HD:
PRIZE_STRUCTS = _patch_prize_structs_for_hd(PRIZE_STRUCTS)
MONSTERS = {k: _get_monster_data(k, v) for k, v in PRIZE_STRUCTS.items()}
| coderwilson/FFX_TAS_Python | tracker/ffx_rng_tracker/data/monsters.py | monsters.py | py | 18,996 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "constants.Stat",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "constants.Element",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "constants.ElementalAffinity",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "constant... |
1924258051 | """Regridding operator."""
# Standard library
import dataclasses as dc
import typing
# Third-party
import numpy as np
import xarray as xr
from rasterio import transform, warp
from rasterio.crs import CRS
Resampling: typing.TypeAlias = warp.Resampling
# For more information: check https://epsg.io/<id>
CRS_ALIASES = {
"geolatlon": "epsg:4326", # WGS84
"swiss": "epsg:21781", # Swiss CH1903 / LV03
"swiss03": "epsg:21781", # Swiss CH1903 / LV03
"swiss95": "epsg:2056", # Swiss CH1903+ / LV95
"boaga-west": "epsg:3003", # Monte Mario / Italy zone 1
"boaga-east": "epsg:3004", # Monte Mario / Italy zone 2
}
def _get_crs(geo):
if geo["gridType"] != "rotated_ll":
raise NotImplementedError("Unsupported grid type")
lon = geo["longitudeOfSouthernPoleInDegrees"]
lat = -1 * geo["latitudeOfSouthernPoleInDegrees"]
return CRS.from_string(
f"+proj=ob_tran +o_proj=longlat +o_lat_p={lat} +lon_0={lon} +datum=WGS84"
)
def _normalise(angle: float) -> float:
return np.fmod(angle + 180, 360) - 180
@dc.dataclass
class RegularGrid:
"""Class defining a regular grid.
Attributes
----------
crs : CRS
Coordinate reference system.
nx : int
Number of grid points in the x direction.
ny : int
Number of grid points in the y direction.
xmin : float
Coordinate of the first grid point in the x direction.
xmax : float
Coordinate of the last grid point in the x direction.
ymin : float
Coordinate of the first grid point in the y direction.
ymax : float
Coordinate of the last grid point in the y direction.
"""
crs: CRS
nx: int
ny: int
xmin: float
xmax: float
ymin: float
ymax: float
@classmethod
def from_field(cls, field: xr.DataArray):
"""Extract grid parameters from grib metadata.
Parameters
----------
field : xarray.DataArray
field containing the relevant metadata.
"""
geo = field.geography
obj = cls(
crs=_get_crs(geo),
nx=geo["Ni"],
ny=geo["Nj"],
xmin=_normalise(geo["longitudeOfFirstGridPointInDegrees"]),
xmax=_normalise(geo["longitudeOfLastGridPointInDegrees"]),
ymin=geo["latitudeOfFirstGridPointInDegrees"],
ymax=geo["latitudeOfLastGridPointInDegrees"],
)
if abs(obj.dx - geo["iDirectionIncrementInDegrees"]) > 1e-5:
raise ValueError("Inconsistent grid parameters")
if abs(obj.dy - geo["jDirectionIncrementInDegrees"]) > 1e-5:
raise ValueError("Inconsistent grid parameters")
return obj
@classmethod
def parse_regrid_operator(cls, op: str):
"""Parse fieldextra out_regrid_target string.
Parameters
----------
op : str
fieldextra out_regrid_target definition
i.e. crs,xmin,ymin,xmay,ymax,dx,dy.
"""
crs_str, *grid_params = op.split(",")
crs = CRS.from_string(CRS_ALIASES[crs_str])
xmin, ymin, xmax, ymax, dx, dy = map(float, grid_params)
if abs(dx) < 1e-10 or abs(dy) < 1e-10:
raise ValueError("Inconsistent regrid parameters")
nx = (xmax - xmin) / dx + 1
ny = (ymax - ymin) / dy + 1
if nx != int(nx) or ny != int(ny):
raise ValueError("Inconsistent regrid parameters")
return cls(crs, int(nx), int(ny), xmin, xmax, ymin, ymax)
@property
def dx(self) -> float:
return (self.xmax - self.xmin) / (self.nx - 1)
@property
def dy(self) -> float:
return (self.ymax - self.ymin) / (self.ny - 1)
@property
def transform(self) -> transform.Affine:
return transform.from_origin(
west=self.xmin - self.dx / 2,
north=self.ymax + self.dy / 2,
xsize=self.dx,
ysize=self.dy,
)
def regrid(field: xr.DataArray, dst: RegularGrid, resampling: Resampling):
"""Regrid a field.
Parameters
----------
field : xarray.DataArray
Input field defined on a regular grid in rotated latlon coordinates.
dst : RegularGrid
Destination grid onto which to project the field.
resampling : Resampling
Resampling method, alias of rasterio.warp.Resampling.
Raises
------
ValueError
If the input field is not defined on a regular grid in rotated latlon or
if the input field geography metadata does not have consistent grid parameters.
Returns
-------
xarray.DataArray
Field regridded in the destination grid.
"""
src = RegularGrid.from_field(field)
def reproject_layer(field):
output = np.zeros((dst.ny, dst.nx))
warp.reproject(
source=field[::-1],
destination=output,
src_crs=src.crs,
src_transform=src.transform,
dst_crs=dst.crs,
dst_transform=dst.transform,
resampling=resampling,
)
return output[::-1]
# output dims renamed to workaround limitation that overlapping dims in the input
# must not change in size
return xr.apply_ufunc(
reproject_layer,
field,
input_core_dims=[["y", "x"]],
output_core_dims=[["y1", "x1"]],
vectorize=True,
).rename({"x1": "x", "y1": "y"})
| MeteoSwiss-APN/icon_data_processing_incubator | src/idpi/operators/regrid.py | regrid.py | py | 5,416 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.TypeAlias",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "rasterio.warp.Resampling",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "rasterio.warp",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "ras... |
14540936446 | """Escea Fireplace UDP messaging module
Implements simple UDP messages to Fireplace and receiving responses
"""
import asyncio
import logging
from asyncio import Lock
from asyncio.base_events import BaseEventLoop
from async_timeout import timeout
from typing import Any, Dict
# Pescea imports:
from .message import Message, CommandID, expected_response
from .udp_endpoints import open_local_endpoint, open_remote_endpoint
_LOG = logging.getLogger(__name__)
# Port used for discovery and integration
# (same port is used for replies)
CONTROLLER_PORT = 3300
# Time to wait for results from server
REQUEST_TIMEOUT = 5.0
Responses = Dict[str, Message]
class Datagram:
"""Send UDP Datagrams to fireplace and receive responses"""
def __init__(
self, event_loop: BaseEventLoop, device_ip: str, sending_lock: Lock
) -> None:
"""Create a simple datagram client interface.
Args:
event_loop: loop to use for coroutines
device_addr: Device network address. Usually specified as IP
address (can be a broadcast address in the case of fireplace search)
sending_lock: Provided to attempt to make thread safe
Raises:
ConnectionRefusedError: If no Escea fireplace is discovered, or no
device discovered at the given IP address, or the UID does not match
"""
self._ip = device_ip
self._event_loop = event_loop
self.sending_lock = sending_lock
@property
def ip(self) -> str:
"""Target IP address"""
return self._ip
def set_ip(self, ip_addr: str) -> None:
"""Change the Target IP address"""
self._ip = ip_addr
async def send_command(self, command: CommandID, data: Any = None) -> Responses:
"""Send command via UDP
Returns received response(s) and IP addresses they come from
Args:
- command: Fireplace command (refer Message)
- data: ignored except for setting desired temperature
Raises ConnectionError if unable to send command
"""
message = Message(command=command, set_temp=data)
responses = dict() # type: Responses
broadcast = command == CommandID.SEARCH_FOR_FIRES
local = None
remote = None
# set up receiver before we send anything
async with self.sending_lock:
try:
local = await open_local_endpoint(
port=CONTROLLER_PORT,
loop=self._event_loop,
reuse_port=True,
)
remote = await open_remote_endpoint(
host=self._ip,
port=CONTROLLER_PORT,
loop=self._event_loop,
allow_broadcast=broadcast,
)
remote.send(message.bytearray_)
remote.close()
async with timeout(REQUEST_TIMEOUT):
while True:
data, (addr, _) = await local.receive()
response = Message(incoming=data)
if response.is_command:
if not broadcast:
_LOG.error(
"Unexpected command id: %s", response.command_id
)
else: # response
if response.response_id != expected_response(command):
_LOG.debug(
"Message response id: %s does not match command id: %s",
response.response_id,
command,
)
else:
responses[addr] = response
if not broadcast:
break
local.close()
except (asyncio.TimeoutError, ValueError):
pass
finally:
if remote is not None and not remote.closed:
remote.close()
if local is not None and not local.closed:
local.close()
if len(responses) == 0:
_LOG.debug(
"Unable to send UDP message - Local endpoint closed:%s, Remote endpoint closed:%s",
"None" if local is None else local.closed,
"None" if remote is None else remote.closed,
)
raise ConnectionError("Unable to send/receive UDP message")
return responses
| lazdavila/pescea | pescea/datagram.py | datagram.py | py | 4,657 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "message.Message",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "asyncio.base_events.Bas... |
35327101756 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 14 15:00:34 2017
@author: rsotoc
"""
import numpy as np
import pandas as pd
import time
import re
import nltk
from nltk.corpus import stopwords
from nltk.util import ngrams
from sklearn.model_selection import train_test_split
from bs4 import BeautifulSoup
from sklearn.naive_bayes import MultinomialNB
def document_features_ngrams(document, global_features):
features = [0] * len(global_features)
nd = len(document)
for elem in document:
if elem in global_features:
tf = document.freq(elem) / nd
index, idf = global_features[elem]
if ( idf > 0 ):
features[index] = tf * idf
return features
# ------------------------------------------------------------------
movies_reviews = pd.read_csv("Movies Reviews/labeledTrainData.tsv", sep='\t')
# Limpiar los documentos. Conservar sólo plabras (alfabéticas) y pasar a minúsculas
movies_reviews.review = list(map(lambda row: re.sub("[^a-zA-Z]", " ",
BeautifulSoup(row, "lxml").get_text().lower()),
movies_reviews.review))
# Agregar una columna con la conversión de mensajes a listas de palabras
# Sin eliminar las palabras vacías
movies_reviews["words"] = list(map(lambda row: row.split(), movies_reviews.review))
corpus_len = len(movies_reviews.words)
# Agregar una columna con la conversión de mensajes a listas de palabras
# Se eliminan las palabras vacías
stop_words = set(stopwords.words("english"))
most_common_words = nltk.FreqDist(w for wl in movies_reviews.words for w in wl)
#my_stop_words = [ w for (w,f) in most_common_words.most_common(15)]
movies_reviews["words"] = list(map(lambda row: [w for w in row.split() if not w in stop_words],
movies_reviews.review))
movies_reviews["bigrams"] = list(map(lambda row: list(ngrams(row,2)),
movies_reviews.words))
# Generar un arreglo con los valores de clasificación
Sentiments = np.array([int(x) for x in movies_reviews.sentiment])
#movies_reviews["trigrams"] = list(map(lambda row: list(ngrams(row,3)),
# movies_reviews.words))
#words_frq = nltk.FreqDist(w.lower() for wl in movies_reviews.words for w in wl
# ).most_common(4000)
bigrams_frq = nltk.FreqDist(w for wl in movies_reviews.bigrams for w in wl
).most_common(4000)
print("empezando")
start_time = time.time()
bag_idf = {}
for i, (elem, f) in zip (range(len(bigrams_frq)), bigrams_frq):
nt = 0
for row in movies_reviews.bigrams:
if elem in row:
nt += 1
bag_idf[elem] = (i, np.log(corpus_len / nt))
featuresets_bigrams = [
document_features_ngrams(nltk.FreqDist(w for w in d),
bag_idf) for d in movies_reviews["bigrams"]]
#trigrams_frq = nltk.FreqDist(w for wl in movies_reviews.trigrams for w in wl
# ).most_common(4000)
#featuresets_words = [
# document_features_ngrams(d, words_frq) for d in movies_reviews["words"]]
#
#bag_dict = {}
#for i, (elem, f) in zip (range(len(bigrams_frq)), bigrams_frq):
# bag_dict[elem] = (i, float(f)/word_bag_len)
#featuresets_bigrams = [
# document_features_ngrams(nltk.FreqDist(d), bigrams_frq)
# for d in movies_reviews["bigrams"]]
#featuresets_trigrams = [
# document_features_ngrams(nltk.FreqDist(d), trigrams_frq)
# for d in movies_reviews["trigrams"]]
elapsed_time = time.time() - start_time
#for i in range(100):
# print(sum(x > 0 for x in featuresets_bigrams[i]))
bigrams_train, bigrams_test, biy_train, biy_test = train_test_split(
featuresets_bigrams, Sentiments, test_size=0.1)
# Entrenamiento de un clasificador Multinomial Bayes ingenuo
clfM = MultinomialNB()
clfM.fit(bigrams_train, biy_train)
print(elapsed_time)
# Pruebas del clasificador
predictions_train = clfM.predict(bigrams_train)
fails_train = sum(biy_train != predictions_train)
print("Puntos mal clasificados en el conjunto de entrenamiento: {} de {} ({}%)\n"
.format(fails_train, len(bigrams_train), 100*fails_train/len(bigrams_train)))
predictions_test = clfM.predict(bigrams_test)
fails_test = sum(biy_test != predictions_test)
print("Puntos mal clasificados en el conjunto de prueba: {} de {} ({}%)\n"
.format(fails_test, len(bigrams_test), 100*fails_test/len(bigrams_test))) | rsotoc/pattern-recognition | Data sets/ngrams.py | ngrams.py | py | 4,516 | python | en | code | 14 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words"... |
32111244416 | import os
import cv2
import sys
import math
import time
import numpy as np
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5 import uic
# 이미지를 읽어서 pyqt로 보여주는 함수
def cvtPixmap(frame, img_size):
frame = cv2.resize(frame, img_size)
height, width, channel = frame.shape
bytesPerLine = 3 * width
qImg = QImage(frame.data,
width,
height,
bytesPerLine,
QImage.Format_RGB888).rgbSwapped()
qpixmap = QPixmap.fromImage(qImg)
return qpixmap
# 동공 주변 반사광 채우는 함수
def fill_reflected_light(ori_img, min_thr, iteration=2, add_inter_idx=1):
if len(ori_img.shape) == 3:
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
ret, img_thresh = cv2.threshold(ori_img, min_thr, 255, cv2.THRESH_BINARY)
kernel = np.ones((3, 3), np.uint8)
img_thresh = cv2.dilate(img_thresh, kernel, iterations=iteration) # 팽창연산-대상을 확장한 후 작은 구멍을 채우는 방식
draw_img = ori_img.copy() # 원본 이미지 복사
reflection_points = np.where(img_thresh == 255) # 화소값이 255인 인덱스 반환
for y, x in zip(reflection_points[0], reflection_points[1]):
# x 픽설의 왼쪽 픽셀이 l_x, 오른쪽 픽셀이 r_x
# l_x는 0이상의 값을 가지고, r_x는 이미지크기보다 작아야 함
l_x, r_x = x - 1, x + 1
l_x = l_x if l_x >= 0 else 0
r_x = r_x if r_x < img_thresh.shape[1] else img_thresh.shape[1] - 1
# l_x, r_x가 이미지크기 범위 안에있고, 반사광이면 1칸씩 이동
while l_x >= 0 and img_thresh[y][l_x] == 255:
l_x -= 1
while r_x < (img_thresh.shape[1] - 1) and img_thresh[y][r_x] == 255:
r_x += 1
# 반사광에서 가장 인접한 값이 아닌, 조금 옆의 값으로 반사광 채우기
# 이미 위에서 dilation 연산을 통해 경계가 두꺼워져 반사광 조금 옆의 값을 가져왔기 때문에 add_inter_idx의 큰 의미는 없음
l_x -= add_inter_idx
r_x += add_inter_idx
l_x = l_x if l_x >= 0 else 0
r_x = r_x if r_x < img_thresh.shape[1] else img_thresh.shape[1] - 1
l_val = int(ori_img[y][l_x])
r_val = int(ori_img[y][r_x])
draw_img[y][x] = int((l_val + r_val) / 2) # 반사광 채우기
return draw_img
# 동공 검출 함수
def getPupil(img, thresh, area_val, symmetry_val, fill_cond_val):
'''
:param img: 입력 동공 이미지
:param thresh:
:param area_val:
:param symmetry_val:
:param fill_cond_val:
:return: ((동공중심 x,y), 반지름, (외접 사각형 x,y,w,h))
condition으로 끝나는 변수 3개가 모두 1로 만족해야 res에 append할 수 있음
area_condition : 직사각형 contour로 둘러싸인 부분의 면적
symmetry_condition : 1-종횡비율(contour를 둘러싼 직사각형의 너비/높이 비율)이 symmetry_val(0.2)보다 작으면 1-> 정사각형에 가까워야함
fill_condition : (contour로 둘러싸인 면적인 area/위에서 계산한 내접원의 넓이)을 계산해 얼마나 채워져있는지 비교
'''
res = []
if len(img.shape) == 3:
gray = cv2.cvtColor(~img, cv2.COLOR_BGR2GRAY)
else:
gray = img
ret, thresh_gray = cv2.threshold(gray, thresh[0], thresh[1], cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
draw_img = img.copy()
for i in range(len(contours)):
# # 컨투어 각각 시각화
# cv2.drawContours(draw_img, [contours[i]], 0, (0, 0, 255), 2)
# cv2.putText(draw_img, str(i), tuple(contours[i][0][0]), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)
# print(i, hierarchy[0][i])
# cv2.imshow('contour detection', draw_img)
# cv2.waitKey(0)
# for contour in contours:
area = cv2.contourArea(contours[i])
rect = cv2.boundingRect(contours[i])
x, y, width, height = rect # 직사각형 모양 바운딩 박스의 좌표, 너비, 높이
radius = 0.25 * (width + height) # 내접원의 반지름(내 생각엔 직사각형 모양의 contour의 내접원의 반지름같음)
area_condition = (area_val <= area)
symmetry_condition = (abs(1 - float(width) / float(height)) <= symmetry_val/10)
fill_condition = (abs(1 - (area / (math.pi * math.pow(radius, 2.0)))) <= fill_cond_val/10)
# 3가지 조건을 모두 만족해야 동공 영역
if area_condition and symmetry_condition and fill_condition:
res.append(((int(x + radius), int(y + radius)), int(1 * radius), rect)) # 동공중심 x좌표, y좌표, 반지름, rect(외접 사각형)
return res, thresh_gray
# 동공 지름 구하기
def get_pupil_size(roi, binary_eye, pupil_info, add_radius):
info = pupil_info[0] # (동공중심x,y), 반지름, (외접 사각형 x,y,w,h)
# rect_roi = info[2]
rect_roi = pupil_info[0][2]
box_x, box_y, width, height = rect_roi
box_x = box_x - add_radius if box_x - add_radius >= 0 else 0
box_y = box_y - add_radius if box_y - add_radius >= 0 else 0
width = width + (2 * add_radius) if width + (2 * add_radius) <= roi.shape[1] else roi.shape[1]
height = height + (2 * add_radius) if height + (2 * add_radius) <= roi.shape[0] else roi.shape[0]
img_eye_only = binary_eye[box_y:box_y + height, box_x:box_x + width].copy()
img_eye_only = np.where(img_eye_only == 255, 1, img_eye_only)
cv2.rectangle(roi, (box_x, box_y), ((box_x + width), (box_y + height)), (0, 255, 255), 2) # 동공주변 노란색 박스
# 동공 영역 새로 길이
max_idx, max_val = 0, 0
for col_idx in range(img_eye_only.shape[0]):
col_val = sum(img_eye_only[col_idx])
if max_val < col_val:
max_idx = col_idx
max_val = col_val
# 동공 영역이 시작되는 좌우 지점 찾기
l_row, r_row = 0, img_eye_only.shape[1]
for row_idx in range(img_eye_only.shape[1] - 1):
row_val = sum(img_eye_only[:, row_idx])
if row_val != 0:
l_row = row_idx
for row_idx in range(img_eye_only.shape[1] - 1, 0, -1):
row_val = sum(img_eye_only[:, row_idx])
if row_val != 0:
r_row = row_idx
# 동공의 세로 길이를 찾아 가로로 그림
cv2.line(roi,
(box_x + l_row, box_y + max_idx),
(box_x + r_row, box_y + max_idx),
(0, 0, 255), 2) # 동공의 지름 그리기
return roi, max_val
def frames_to_timecode(total_frames, frame_rate=30, drop=False):
"""
Method that converts frames to SMPTE timecode.
:param total_frames: Number of frames
:param frame_rate: frames per second
:param drop: true if time code should drop frames, false if not
:returns: SMPTE timecode as string, e.g. '01:02:12:32' or '01:02:12;32'
"""
if drop and frame_rate not in [29.97, 59.94]:
raise NotImplementedError("Time code calculation logic only supports drop frame "
"calculations for 29.97 and 59.94 fps.")
# for a good discussion around time codes and sample code, see
# http://andrewduncan.net/timecodes/
# round fps to the nearest integer
# note that for frame rates such as 29.97 or 59.94,
# we treat them as 30 and 60 when converting to time code
# then, in some cases we 'compensate' by adding 'drop frames',
# e.g. jump in the time code at certain points to make sure that
# the time code calculations are roughly right.
#
# for a good explanation, see
# https://documentation.apple.com/en/finalcutpro/usermanual/index.html#chapter=D%26section=6
fps_int = int(round(frame_rate))
if drop:
# drop-frame-mode
# add two 'fake' frames every minute but not every 10 minutes
#
# example at the one minute mark:
#
# frame: 1795 non-drop: 00:00:59:25 drop: 00:00:59;25
# frame: 1796 non-drop: 00:00:59:26 drop: 00:00:59;26
# frame: 1797 non-drop: 00:00:59:27 drop: 00:00:59;27
# frame: 1798 non-drop: 00:00:59:28 drop: 00:00:59;28
# frame: 1799 non-drop: 00:00:59:29 drop: 00:00:59;29
# frame: 1800 non-drop: 00:01:00:00 drop: 00:01:00;02
# frame: 1801 non-drop: 00:01:00:01 drop: 00:01:00;03
# frame: 1802 non-drop: 00:01:00:02 drop: 00:01:00;04
# frame: 1803 non-drop: 00:01:00:03 drop: 00:01:00;05
# frame: 1804 non-drop: 00:01:00:04 drop: 00:01:00;06
# frame: 1805 non-drop: 00:01:00:05 drop: 00:01:00;07
#
# example at the ten minute mark:
#ㄺ
# frame: 17977 non-drop: 00:09:59:07 drop: 00:09:59;25
# frame: 17978 non-drop: 00:09:59:08 drop: 00:09:59;26
# frame: 17979 non-drop: 00:09:59:09 drop: 00:09:59;27
# frame: 17980 non-drop: 00:09:59:10 drop: 00:09:59;28
# frame: 17981 non-drop: 00:09:59:11 drop: 00:09:59;29
# frame: 17982 non-drop: 00:09:59:12 drop: 00:10:00;00
# frame: 17983 non-drop: 00:09:59:13 drop: 00:10:00;01
# frame: 17984 non-drop: 00:09:59:14 drop: 00:10:00;02
# frame: 17985 non-drop: 00:09:59:15 drop: 00:10:00;03
# frame: 17986 non-drop: 00:09:59:16 drop: 00:10:00;04
# frame: 17987 non-drop: 00:09:59:17 drop: 00:10:00;05
# calculate number of drop frames for a 29.97 std NTSC
# workflow. Here there are 30*60 = 1800 frames in one
# minute
# 2는 왜 뺴지?
FRAMES_IN_ONE_MINUTE = 1800 - 2
FRAMES_IN_TEN_MINUTES = (FRAMES_IN_ONE_MINUTE * 10) - 2
ten_minute_chunks = total_frames / FRAMES_IN_TEN_MINUTES # 10분짜리가 몇 묶음인지
one_minute_chunks = total_frames % FRAMES_IN_TEN_MINUTES # 1분짜리가 몇 묶음인지
ten_minute_part = 18 * ten_minute_chunks
one_minute_part = 2 * ((one_minute_chunks - 2) / FRAMES_IN_ONE_MINUTE)
if one_minute_part < 0:
one_minute_part = 0
# add extra frames
total_frames += ten_minute_part + one_minute_part
# for 60 fps drop frame calculations, we add twice the number of frames
if fps_int == 60:
total_frames = total_frames * 2
# time codes are on the form 12:12:12;12
smpte_token = ";"
else:
# time codes are on the form 12:12:12:12
smpte_token = ":"
# now split our frames into time code
hours = int(total_frames / (3600 * fps_int))
minutes = int(total_frames / (60 * fps_int) % 60)
seconds = int(total_frames / fps_int % 60)
frames = int(total_frames % fps_int)
return "%02d:%02d:%02d%s%02d" % (hours, minutes, seconds, smpte_token, frames)
| HanNayeoniee/visual-fatigue-analysis | pupil_detection/utils.py | utils.py | py | 11,187 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cv2.resize",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"... |
71874843708 | # Import necessary modules and libraries
from dotenv import load_dotenv
import os
import base64
from requests import post, get
import json
# Load environment variables from .env file
load_dotenv()
# Import CLIENT_ID and CLIENT_SECRET from environment variables
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv("CLIENT_SECRET")
# Function to get Spotify API token using client credentials
def get_token():
# Combine client_id and client_secret and encode in base64
auth_string = client_id + ":" + client_secret
auth_bytes = auth_string.encode("utf-8")
auth_base64 = str(base64.b64encode(auth_bytes), "utf-8")
# API endpoint for token retrieval
url = "https://accounts.spotify.com/api/token"
# Headers for token request
headers = {
"Authorization": "Basic " + auth_base64,
"Content-Type": "application/x-www-form-urlencoded"
}
# Data for token request
data = {"grant_type": "client_credentials"}
# Send a POST request to retrieve the token
result = post(url, headers=headers, data=data)
# Parse the JSON response to extract the access token
json_result = json.loads(result.content)
token = json_result["access_token"]
return token
# Function to generate the authorization header with the provided token
def get_auth_header(token):
return {"Authorization": "Bearer " + token}
# Function to search for an artist by name
def search_for_artist(token, artist_name):
url = "https://api.spotify.com/v1/search"
headers = get_auth_header(token)
query = f"?q={artist_name}&type=artist&limit=1"
# Build the query URL and send a GET request to search for the artist
query_url = url + query
result = get(query_url, headers=headers)
# Parse the JSON response to extract the artist information
json_result = json.loads(result.content)["artists"]["items"]
if len(json_result) == 0:
print("No artist found...")
return None
else:
return json_result[0]
# Function to get top tracks of an artist in a specific country
def get_songs_by_artist(token, artist_id, country):
url =f"https://api.spotify.com/v1/artists/{artist_id}/top-tracks?country={country}"
headers = get_auth_header(token)
# Send a GET request to retrieve the artist's top tracks in the specified country
result = get(url, headers=headers)
# Parse the JSON response to extract the list of tracks
json_result = json.loads(result.content)["tracks"]
return json_result
def get_artist_name():
artist_name = input("Enter an artist name: ")
return artist_name
# Get the API token
token = get_token()
artist_name = get_artist_name()
# Search for an artist by name and get their ID
result = search_for_artist(token, artist_name)
artist_id = result["id"]
# Get top tracks of the artist in different countries
songsTR = get_songs_by_artist(token, artist_id, "TR")
songsUS = get_songs_by_artist(token, artist_id, "US")
#clears the text file
open("text.txt", "w").close()
#Opens a text file
file = open("text.txt", "a")
#Writes top ten songs from us into the text file
for idx, song in enumerate(songsUS):
file.write(f"{idx + 1}. {song['name']}\n")
#Closes the text file
file.close()
# Print the top tracks for each country
for idx, song in enumerate(songsTR):
print(f"{idx + 1}. {song['name']}")
print("\n")
for idx, song in enumerate(songsUS):
print(f"{idx + 1}. {song['name']}")
| linnathoncode/SpotifyApiApp | main.py | main.py | py | 3,588 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "base64.b64encode",
"line_n... |
35729296994 | import klepto
import shelve
import pickle
import numpy as np
from scipy.sparse import *
from pyspark.mllib.recommendation import ALS
from pyspark.sql import SparkSession
############### Load Data ##################
rating_matrix_csc = load_npz('netflix/sparse_matrix_100%.npz').tocsc()
rating_matrix_val_csc = load_npz('netflix/sparse_matrix_validation_75%.npz').tocsc()
print("file load DONE")
############################################
''' Save to file 'tree.pkl' '''
start = 0
end = int(rating_matrix_csc.shape[1] * 0.75)
from pyspark.mllib.recommendation import ALS
from pyspark.sql import SparkSession
from pyspark import SparkConf
from pyspark import SparkContext
class MatrixFactorization:
def __init__(self, maxIter=15, regParam=0.01, rank=10):
self.maxIter = maxIter
self.regParam = regParam
self.rank = rank
conf = SparkConf().setAppName("appName").setMaster("local[*]")
conf.set("spark.driver.memory","8g")
conf.set("spark.executor.memory","8g")
self.spark = SparkContext(conf=conf)
print("New SparkSession started...")
def change_parameter(self, regParam):
self.regParam = regParam
def matrix_factorization(self, train_lst):
ratings = self.spark.parallelize(train_lst)
print('create dataframe!')
model = ALS.train(ratings, self.rank, seed=10, \
iterations=self.maxIter, \
lambda_=self.regParam)
print("MF DONE")
userFeatures = sorted(model.userFeatures().collect(), key=lambda d: d[0], reverse=False)
productFeatures = sorted(model.productFeatures().collect(), key=lambda d: d[0], reverse=False)
userProfile = {each[0]: each[1].tolist() for each in userFeatures}
itemProfile = {each[0]: each[1].tolist() for each in productFeatures}
return userProfile, itemProfile
def end(self):
self.spark.stop()
print("SparkSession stopped.")
from scipy.sparse import find
val_num = rating_matrix_val_csc.getnnz(axis=None)
########################################## For Validation #############################################
def calculate_avg_rating_for_pesudo_user(pseudo_user_lst, sMatrix):
ret_array = np.zeros(sMatrix.shape[0])
ret_array = np.array(sMatrix[:, pseudo_user_lst].sum(axis=1))[:,0]/(sMatrix[:, pseudo_user_lst].getnnz(axis=1)+1e-9)
return ret_array
def pred_RMSE_for_validate_user(user_node_ind, user_profile, item_profile, val_user_list, val_item_list, sMatrix):
print("RMSE calculation on valset qstarted.")
RMSE = 0
i = 0
for userid, itemid in zip(val_user_list, val_item_list):
if i % 50000 == 0:
print("%.2f%%" % (100 * i / val_num))
i += 1
RMSE += (sMatrix[itemid, userid] - np.dot(user_profile[user_node_ind[userid]], item_profile[itemid]))**2
return (RMSE / len(val_user_list))**0.5
def generate_prediction_model(lr_bound, tree, rI, sMatrix, plambda_candidates, validation_set):
''' lr_bound: dict {
level 0: [[left_bound, right_bound]], users' bound for one level, each ele in dictionary represents one node
level 1: [[left_bound, right_bound], [left_bound, right_bound], [left_bound, right_bound]], 3
level 2: ..., 9
} (bound means index)
plambda_candidates: {
level 0: [clambda1, clambda2, clambda3, ...]
level 1: [clambda1, clambda2, clambda3, ...]
level 2: [clambda1, clambda2, clambda3, ...]
}
prediction_model: dict {
level 0: { 'best_lambda': x, 'user_profile': ..., 'item_profile': ...}
level 1: { 'best_lambda': x, 'user_profile': ..., 'item_profile': ...}
level 2: { 'best_lambda': x, 'user_profile': ..., 'item_profile': ...}
}
'''
# MF = MatrixFactorization()
# print("MF session started.")
prediction_model = {}
val_item_list = find(validation_set)[0]
val_user_list = find(validation_set)[1]
user_node_ind = np.zeros(sMatrix.shape[1]) #### notice that index is not id
for level in lr_bound:
# level = "10"
print("level:", level)
prediction_model.setdefault(level, {})
train_lst = []
rmse_for_level = []
for pseudo_user_bound, userid in zip(lr_bound[level], range(len(lr_bound[level]))):
# print(str(userid) + "/" + str(pow(3,int(level))))
if pseudo_user_bound[0] > pseudo_user_bound[1]:
continue
pseudo_user_lst = tree[pseudo_user_bound[0]:(pseudo_user_bound[1] + 1)]
pseudo_user_for_item = calculate_avg_rating_for_pesudo_user(pseudo_user_lst, sMatrix)
train_lst += [(userid, itemid, float(pseudo_user_for_item[itemid])) \
for itemid in range(pseudo_user_for_item.shape[0]) if pseudo_user_for_item[itemid]]
#### find node index for each validation user ####
user_node_ind[pseudo_user_lst] = userid
print("Rating Number of level " + level + ": " + str(len(train_lst)))
#### Train MF and Do validation ####
min_RMSE = -1
for plambda in plambda_candidates[level]:
MF = MatrixFactorization(regParam=plambda)
user_profile, item_profile = MF.matrix_factorization(train_lst)
MF.end() #### close MF spark session
del MF
RMSE = pred_RMSE_for_validate_user(user_node_ind, user_profile, item_profile, val_user_list, val_item_list, validation_set)
rmse_for_level.append(RMSE)
if min_RMSE is -1 or RMSE < min_RMSE:
min_RMSE = RMSE
min_user_profile, min_item_profile, min_lambda = user_profile, item_profile, plambda
print("rmse_for_level: ", rmse_for_level)
prediction_model[level]['upro'], prediction_model[level]['ipro'], prediction_model[level]['plambda'] \
= min_user_profile, min_item_profile, min_lambda
d = shelve.open("./prediction_model/"+level, protocol=pickle.HIGHEST_PROTOCOL)
d["content"] = prediction_model[level]
d.close()
print("level " + level + " training DONE")
return prediction_model
import klepto
import numpy as np
Tree = klepto.archives.dir_archive('treeFile', {}, serialized=True)
Tree.load()
plambda_candidates = {"0":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"1":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"2":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"3":[0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.001, 0.002, 0.003, 0.004, 0.005],
"4":[0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.010],
"5":[0.001, 0.002, 0.003, 0.004, 0.005, 0.006, 0.007, 0.008, 0.009, 0.010],
"6":[0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012, 0.013, 0.014, 0.015],
"7":[0.006, 0.007, 0.008, 0.009, 0.01, 0.011, 0.012, 0.013, 0.014, 0.015],
"8":[0.007, 0.008, 0.009, 0.010, 0.014, 0.018, 0.02, 0.022, 0.024, 0.026],
"9":[0.007, 0.008, 0.009, 0.010, 0.014, 0.018, 0.02, 0.022, 0.024, 0.026],
"10":[0.007, 0.008, 0.009, 0.010, 0.014, 0.018, 0.02, 0.022, 0.024, 0.026]}
# for level in Tree["lr_bound"]:
# plambda_candidates[level] = list(np.arange(0.001, 0.05, 0.005))
prediction_model = generate_prediction_model \
(Tree['lr_bound'], \
Tree['tree'], \
Tree['rI'], \
rating_matrix_csc[:, start:end],
plambda_candidates,
rating_matrix_val_csc)
import pickle
import shelve
d = shelve.open("prediction_model", protocol=pickle.HIGHEST_PROTOCOL)
d["content"] = prediction_model
d.close()
print("Write DONE!") | clamli/Decision-tree-model-for-active-learning | Netflix-based/MF param train.py | MF param train.py | py | 8,140 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "pyspark.SparkConf",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pyspark.SparkContext",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "pyspark.mllib.recommendation.ALS.train",
"line_number": 49,
"usage_type": "call"
},
{
"api... |
2710846372 | # just a seperate file for handling the logging
# of sanic to use with logging
from sanic.log import DefaultFilter
import sys
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'accessFilter': {
'()': DefaultFilter,
'param': [0, 10, 20]
},
'errorFilter': {
'()': DefaultFilter,
'param': [30, 40, 50]
}
},
'formatters': {
'simple': {
'format': '%(asctime)s - (%(name)s)[%(levelname)s]: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S'
},
'access': {
'format': '%(asctime)s - (%(name)s)[%(levelname)s][%(host)s]: ' +
'%(request)s %(message)s %(status)d %(byte)d',
'datefmt': '%Y-%m-%d %H:%M:%S'
}
},
'handlers': {
'internalFile': {
'class': 'logging.FileHandler',
'filters': ['accessFilter'],
'formatter': 'simple',
'filename': "temp/clickinternal.log"
},
'accessFile': {
'class': 'logging.FileHandler',
'filters': ['accessFilter'],
'formatter': 'access',
'filename': "temp/clickaccess.log"
},
'errorFile': {
'class': 'logging.FileHandler',
'filters': ['errorFilter'],
'formatter': 'simple',
'filename': "temp/clickerr.log"
},
'internal': {
'class': 'logging.StreamHandler',
'filters': ['accessFilter'],
'formatter': 'simple',
'stream': sys.stderr
},
'accessStream': {
'class': 'logging.StreamHandler',
'filters': ['accessFilter'],
'formatter': 'access',
'stream': sys.stderr
},
'errorStream': {
'class': 'logging.StreamHandler',
'filters': ['errorFilter'],
'formatter': 'simple',
'stream': sys.stderr
}
},
'loggers': {
'sanic': {
'level': 'DEBUG',
'handlers': ['internal','errorStream','internalFile', 'errorFile']
},
'network': {
'level': 'DEBUG',
'handlers': ['accessStream','errorStream','accessFile', 'errorFile']
}
}
} | AggressivelyMeows/Ping | logging_config.py | logging_config.py | py | 2,044 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sanic.log.DefaultFilter",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sanic.log.DefaultFilter",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sys.stderr",
"line_number": 53,
"usage_type": "attribute"
},
{
"api_name": "sys.s... |
25961872136 | # -*- coding: utf-8 -*-
"""
This Python file is been made by the project group Mattek5 C4-202
This is a test of how much packetloss the prediction of a sound file
can have and still be intelligibly
"""
from __future__ import division
import os
import sys
lib_path = '\\Scripts\\libs'
data_path = '\\Lydfiler\\Sound'
export_path = '\\Lydfiler\Predict'
cwd = os.getcwd()[:-8]
sys.path.insert(0, cwd + lib_path)
import scipy.io.wavfile as wav
import sounddevice as sd
import numpy as np
import matplotlib.pyplot as plt
import LP_speech as lps
import scipy.signal as sig
""" Import data """
filename = 'Laura_en_saet'
fs, data= wav.read(cwd + data_path + "/Saetning/" + filename + ".wav")
data = np.array(data,dtype = "float64")
""" Function for packetloss """
def packetlooser(paramters,P):
count = 0
for packet in range(len(parameters)-3):
if np.random.random() <= P:
count += 1
parameters[packet] = \
{"coef":None,"gain":None,"voi":4,"pitch":None,"first_imp":None}
print("Number of packet losses: %d" %count)
print("Packet losses precent : %.1f %%" %((100*count)/(len(parameters)-3)))
return paramters
""" Predict signal with packetloss """
N = 160
p = 12
P_packetloss = .9 # The probability of packet loss
parameters = lps.LP_parameters(data, N, p, .5)
parameters_lossy = packetlooser(parameters, P_packetloss)
predict = lps.LP_predict(parameters_lossy)
""" Plot of data and predicted data """
plt.subplot(211)
plt.plot(data)
plt.subplot(212)
plt.plot(predict)
plt.show()
""" Save and play the predict packetloss file """
#wav.write(cwd + export_path + "/Packetloss/packetloss_90_" + filename + ".wav",\
# fs,np.int16(predict))
#sd.play(np.int16(data),fs)
#sd.play(np.int16(predict),fs)
| AalauraaA/P5 | Supplerende_materiale/Scripts/packetloss.py | packetloss.py | py | 1,773 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getcwd",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "scipy.io.wavfile.read",
"... |
26929642562 | import plotly.graph_objects as go
import plotly.io as pio
from PIL import Image
# to render in jupyterlab
#pio.renderers.default = "plotly_mimetype"
# Create figure
fig = go.Figure()
pyLogo = Image.open(r'C:\Users\l.trouw\Documents\Pycharm\Lean_simulation\VSMvisualizationMatrasses.png')
# Add trace
fig.add_trace(
go.Scatter(x=[0, 0.5, 1, 2, 2.2], y=[1.23, 2.5, 0.42, 3, 1])
)
fig.add_layout_image(
dict(
source=pyLogo,
xref="x",
yref="y",
x=0,
y=3,
sizex=2,
sizey=2,
sizing="stretch",
opacity=0.5,
layer="above")
)
fig.show() | luuktrouw/Districon_lean_AdvancedAnalytics | Testfile.py | Testfile.py | py | 665 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Ima... |
14837403984 | from django.urls import path
from . import views
urlpatterns = [
path('post/<int:comment_pk>/comment_edit/', views.comment_edit, name='comment_edit'),
path('post/new/', views.post_new, name='post_new'),
path('post/list', views.post_list, name='post_list'),
path('post/<int:post_pk>/', views.post_detail, name='post_detail'),
path('post/new/', views.post_new, name='post_new'),
path('post/<int:post_pk>/edit/', views.post_edit, name='post_edit'),
path('delete/<int:post_pk>', views.delete_post, name='delete_post'),
path('delete/comment/<int:comment_pk>', views.delete_comment, name='delete_comment'),
path('post/<int:post_pk>/comment_new', views.comment_new, name='comment_new'),
path('post/<int:post_pk>/post_like_or_dislike/', views.post_like_or_dislike, name='post_like_or_dislike'),
path('post/draft/list/', views.draft_list, name='draft_list'),
path('post/publish/<int:post_pk>/', views.publish, name='publish'),
path('post/tag_list/<int:tag_pk>/', views.tag_list, name='tag_list'),
path('post/category_posts/<int:category_pk>/', views.category_posts, name='category_posts'),
path('post/recommendations/', views.recommendations, name='recommendations'),
path('post/add_to_favorite/<int:post_pk>/', views.add_to_favorite, name='add_to_favorite'),
path('post/favorites/', views.favorites, name='favorites'),
]
| meeeeeeeh/djangoblog | post/urls.py | urls.py | py | 1,384 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
15271938454 | """
Creating keyspaces in Cassandra:
CREATE KEYSPACE my_keyspace WITH replication = {'class':'SimpleStrategy', 'replication_factor':1};
"""
from faker import Faker
faker = Faker()
def get_registered_user():
return faker.name()+"$"+faker.address()+"$"+faker.year()
"""return {
"name": faker.name(),
"address":faker.address(),
"created_at": faker.year()
}"""
if __name__ == "__main__":
print(get_registered_user())
| imnikhilanand/Real-Time-ETL-with-Kafka-Spark-Cassandra | src/produce_data/generate_data.py | generate_data.py | py | 431 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "faker.Faker",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "faker.name",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "faker.address",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "faker.year",
"line_number": ... |
37562209258 | import os
from modules.computation.Dataset import Dataset
def _main():
# Define the default values for the options
pathHome = os.path.expanduser('~')
pathWork = os.path.join( pathHome, 'Desktop/ProyectoGDSA')
pathImages = os.path.join(pathWork,'1_images')
pathDatasets = os.path.join( pathWork, '2_datasets' )
# For each data partition (train & test)
for partition in os.listdir(pathImages):
# If it is a directory
dirPartition = os.path.join( pathImages, partition )
if os.path.isdir(dirPartition):
# Define the filename to contain the list of images to process
filenameOut = os.path.join( pathDatasets, partition + '.txt')
dataset = Dataset( filenameOut,
flagSaveInMemory=False,
flagVerbose=True)
dataset.build( dirPartition, '.jpg' )
#def run():
_main()
| aamcgdsa21/GDSA | Descriptor/tools/2_datasets.py | 2_datasets.py | py | 965 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.expanduser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
29792375641 | #!/usr/bin/env python3
import sys
import numpy as np
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import interp1d
import rospy
import moveit_commander
import actionlib
from franka_gripper.msg import MoveGoal, MoveAction
from geometry_msgs.msg import Point, Pose, PoseStamped
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from trajectory_msgs.msg import JointTrajectoryPoint
from sensor_msgs.msg import JointState
from moveit_commander.conversions import pose_to_list
from std_msgs.msg import Bool
CONTROLLER_TOPIC = "/position_joint_trajectory_controller/follow_joint_trajectory"
DESIRED_JOINT_STATE_TOPIC = "/joint_states_desired"
VELOCITY_MULTIPLIER = 0.2
MAX_COMMAND_POINT_DIFF = 0.05
START_JOINT_VALUES = [0, -0.785, 0, -2.356, 0, 1.571, 0.785]
# Size of goal box
GOAL_SIZE = (0.05, 0.06, 0.055)
# Offset to account for undetected object depth as the camera detects
# a point on the front surface of the goal box
(GOAL_OFFSET_X, GOAL_OFFSET_Y, GOAL_OFFSET_Z) = (-0.03, 0, 0)
DEFAULT_PLANNING_TIME = 0.5
class DemoInterface(object):
"""Demo Interface"""
def __init__(self, node_initialized=False):
if not node_initialized:
rospy.init_node('demo_interface', anonymous=True)
self.set_parameters()
self.setup_moveit()
self.set_ee_approach_dict()
self.prev_goal_point = None
# self.return_first_solution_pub = rospy.Publisher('/return_first_solution', Bool,
# queue_size=1, latch=True)
# self.return_first_solution_pub.publish(Bool(False))
rospy.set_param("return_first_solution", False)
if self.simulation:
rospy.logwarn("Running demo in simulation")
else:
rospy.logwarn("Running demo on hardware")
self.create_hardware_controller_clients()
def set_parameters(self):
self.group_name = rospy.get_param('/group_name', "panda_arm")
self.planner_id = rospy.get_param('/planner_id', "RRTstarkConfigDefault")
self.simulation = rospy.get_param('/simulation', False)
self.planning_time = rospy.get_param('/planning_time', DEFAULT_PLANNING_TIME)
self.end_effector_link = rospy.get_param('/end_effector_link', "panda_hand")
self.goal_object_topic = rospy.get_param('/goal_object_topic', '/goal_object_position')
self.start_joint_values = START_JOINT_VALUES
self.goal_size = GOAL_SIZE
self.goal_offset = Point(GOAL_OFFSET_X, GOAL_OFFSET_Y, GOAL_OFFSET_Z)
def setup_moveit(self):
moveit_commander.roscpp_initialize(sys.argv)
self.scene = moveit_commander.PlanningSceneInterface()
self.move_group = moveit_commander.MoveGroupCommander(self.group_name)
self.set_planner_id(self.planner_id)
self.set_planning_time(self.planning_time)
self.move_group.set_end_effector_link(self.end_effector_link)
def set_planner_id(self, planner_id):
self.move_group.set_planner_id(planner_id)
def set_planning_time(self, planning_time):
self.move_group.set_planning_time(planning_time)
@property
def get_planning_time(self):
return self.move_group.get_planning_time()
def set_ee_approach_dict(self):
rpy_rot_y90 = R.from_euler('y', 90, degrees=True)
self.ee_approach_dict = {
"top": R.from_euler('y', 0, degrees=True) * R.from_euler('x', 180, degrees=True),
"front": rpy_rot_y90,
"back": rpy_rot_y90 * R.from_euler('x', 180, degrees=True),
"left": rpy_rot_y90 * R.from_euler('x', 90, degrees=True),
"right": rpy_rot_y90 * R.from_euler('x', -90, degrees=True)
}
def create_hardware_controller_clients(self):
self.trajectory_client = actionlib.SimpleActionClient(CONTROLLER_TOPIC,
FollowJointTrajectoryAction)
while not self.trajectory_client.wait_for_server(rospy.Duration(2.0)):
rospy.loginfo(f"Waiting for the {CONTROLLER_TOPIC} action server")
self.gripper_client = actionlib.SimpleActionClient('/franka_gripper/move',
MoveAction)
self.gripper_client.wait_for_server()
self.close_goal = MoveGoal(width=0.054, speed=0.08)
self.open_goal = MoveGoal(width=0.08, speed=0.08)
def open_gripper(self, wait=True):
self.gripper_client.send_goal(self.open_goal)
self.gripper_client.wait_for_result(rospy.Duration.from_sec(5.0))
def close_gripper(self, wait=True):
self.gripper_client.send_goal(self.close_goal)
self.gripper_client.wait_for_result(rospy.Duration.from_sec(5.0))
def plan_to_start(self):
return self.plan_to_joint_goal(self.start_joint_values)
def plan_to_joint_goal(self, joint_values, return_first_solution=False):
"""Plan to joint goal.
Returns:
Result tuple (bool, RobotTrajectory, float, MoveItErrorCode):
(success, path, planning time, error code)
"""
# self.return_first_solution_pub.publish(Bool(return_first_solution))
rospy.set_param("return_first_solution", return_first_solution)
self.move_group.set_joint_value_target(joint_values)
return self.move_group.plan()
def plan_to_point(self, point, approach="top"):
"""Plan to point goal with specified end-effector approach.
Returns:
Result tuple (bool, RobotTrajectory, float, MoveItErrorCode):
(success, path, planning time, error code)
Raises:
KeyError: If invalid approach arg provided.
"""
# Adding goal object to scene so we plan around it
self.publish_goal_object(point)
pose_goal = self.create_grasp_pose_msg(point, approach=approach)
self.move_group.set_pose_target(pose_goal)
return self.move_group.plan()
def create_grasp_pose_msg(self, point, approach="top"):
"""Create pose msg based on object point and desired approach.
Args:
point (Point): Point in space of the object
approach (str): Descriptor of the desired approach
orientation of the end effector
Returns:
Pose: End effector pose to grasp object at given position and
desired approach
"""
pose = Pose()
if approach == "interpolated":
theta = self.get_angle(float(point.z))
rpy_rot = (R.from_euler('y', theta, degrees=True)
* R.from_euler('x', 180, degrees=True))
else:
rpy_rot = self.ee_approach_dict[approach]
# Move x point back since we want to focus on the center of the
# box, but we are given the position of the center of the front
# side (The box in use has a depth of about 6 cm)
pose.position = point
quat = rpy_rot.as_quat()
pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w = (
quat[0], quat[1], quat[2], quat[3])
return pose
def get_angle(self, height):
heights = np.linspace(0, 1, num=20, endpoint=True)
angles = [-(45 + 90*h) for h in heights]
f = interp1d(heights, angles)
return float(f(height))
def go_to_start(self, wait=True):
self.go_to_joint_goal(self.start_joint_values, wait=wait)
def go_to_joint_goal(self, joint_values, wait=True):
# Stop the robot before planning & executing new path
self.smooth_stop()
self.move_group.go(joint_values, wait)
def go_to_point(self, point, approach="top", wait=True):
# Stop the robot before planning & executing new path
self.smooth_stop()
# Adding goal object to scene so we plan around it
self.publish_goal_object(point)
pose_goal = self.create_grasp_pose_msg(point, approach=approach)
self.move_group.set_pose_target(pose_goal)
self.move_group.go(wait)
def publish_goal_object(self, point):
self.publish_object("goal", point, size=self.goal_size)
def publish_object_xyz(self, name, x, y, z, size, primitive='box', remove=False):
point = Point(x, y, z)
self.publish_object(name, point, size, primitive=primitive, remove=remove)
def publish_object(self, name, point, size, primitive='box', remove=False):
if remove:
self.remove_object(name)
object_pose = PoseStamped()
object_pose.header.frame_id = self.move_group.get_planning_frame()
object_pose.pose.position = point
if primitive == 'box':
self.scene.add_box(name, object_pose, size=size)
else:
self.scene.add_sphere(name, object_pose, radius=size)
def remove_object(self, name):
self.scene.remove_world_object(name)
def listen_for_goal(self):
rospy.Subscriber(self.goal_object_topic, Point, callback=self.filter_detection_noise,
queue_size=1)
# rospy.spin()
def filter_detection_noise(self, goal_point):
"""
TODO 1: if the planning fails, and the goal point doesn't move above the threshold, then
the robot will not attempt to replan. Should incorporate a method of evaluating whether or
not planning was successful, and if it wasn't then replan to the previous goal point.
TODO 2: Also looks like the demo interface can get behind the camera data stream
occasianally, where the camera is able to recognize the position of the box and publishes
it, but the rviz planning scene shows an old position of the box. Still need to diagnose
why this happens.
"""
rospy.loginfo_throttle(3.0, "filtering detection noise")
if self.prev_goal_point:
diff = self.euclidean_distance(self.prev_goal_point, goal_point)
if diff > MAX_COMMAND_POINT_DIFF:
rospy.loginfo("Goal point movement detected, attempting new plan")
goal_point = self.offset_point(goal_point, self.goal_offset)
self.go_to_point(goal_point, wait=False)
else:
rospy.loginfo("First goal point received, attempting to plan")
goal_point = self.offset_point(goal_point, self.goal_offset)
self.go_to_point(goal_point, wait=False)
self.prev_goal_point = goal_point
def euclidean_distance(self, point1, point2):
arr1 = np.array((point1.x, point1.y, point1.z))
arr2 = np.array((point2.x, point2.y, point2.z))
return np.linalg.norm(arr2 - arr1)
def offset_point(self, point, offset):
point_offset = Point()
point_offset.x = point.x + offset.x
point_offset.y = point.y + offset.y
point_offset.z = point.z + offset.z
return point_offset
def smooth_stop(self):
if self.simulation:
rospy.logwarn("Stopping execution (SIMULATION)")
self.move_group.stop()
else:
# might be a good idea to check if we're already stopped
# before taking the effort to create a stopping msg
# if self.trajectory_client.simple_state == 2:
# return
rospy.logwarn("Stopping execution (HARDWARE)")
stop_goal = self.get_stop_goal()
self.trajectory_client.send_goal_and_wait(stop_goal)
def get_stop_goal(self):
goal = FollowJointTrajectoryGoal()
trajectory_point = JointTrajectoryPoint()
desired_joint_state = rospy.wait_for_message(DESIRED_JOINT_STATE_TOPIC, JointState)
positions = desired_joint_state.position
velocities = desired_joint_state.velocity
trajectory_point.time_from_start = rospy.Duration(0.5)
# Fill msg vectors
for i in range(7):
# Add joint names
goal.trajectory.joint_names.append(f"panda_joint{i+1}")
# Add positions
trajectory_point.positions.append(positions[i] + (velocities[i] * VELOCITY_MULTIPLIER))
# Add velocities (ALL 0)
trajectory_point.velocities.append(0.0)
goal.trajectory.points.append(trajectory_point)
return goal
def all_close(self, goal, actual, tolerance):
"""
Convenience method for testing if a list of values are within a
tolerance of their counterparts in another list.
Args:
goal (list): A list of goal floats, a Pose or a PoseStamped
actual (list): A list of floats, a Pose or a PoseStamped
tolerance (float): Allowed difference between goal and actual
values
Returns:
Bool: Successful if true
"""
if type(goal) is list:
for index in range(len(goal)):
if abs(actual[index] - float(goal[index])) > tolerance:
return False
elif type(goal) is PoseStamped:
return self.all_close(goal.pose, actual.pose, tolerance)
elif type(goal) is Pose:
return self.all_close(pose_to_list(goal), pose_to_list(actual),
tolerance)
return True
| dwya222/end_effector_control | scripts/demo_interface.py | demo_interface.py | py | 13,316 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rospy.init_node",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "rospy.set_param",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "rospy.logwarn",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "rospy.logwarn",
"li... |
13240819097 | #
# A framework for messaging between programs
# and visualising the signaling
#
import zmq
import time
import random
import json
import sys
import protobuf_examples.example
ctx = zmq.Context()
class Stub:
def __init__(self, name):
self.name = name
self.socket = ctx.socket(zmq.PUSH)
timeout_milliseconds = 200
self.socket.setsockopt(zmq.LINGER, timeout_milliseconds)
self.socket.connect("tcp://localhost:5556")
def log_signaling(self, source, target, payload=None):
log_entry = (source, target, payload)
#print(type(source),type(target),type(payload), type(payload.decode('utf-8')))
log_entry_string = "|".join(log_entry)
self.socket.send(log_entry_string)
# self.socket.send_string( log_entry_string, zmq.NOBLOCK, encoding='latin-1')
class SpaceShip(Stub):
def send_to_station(self, target, msg):
self.log_signaling(source=self.name, target=target.name, payload=msg)
# Send signal
print("Sending from %s to %s" % (self.name, target.name))
class SpaceStation(Stub):
def send_to_station(self, target, msg):
self.log_signaling(source=self.name, target=target.name, payload=msg)
# Send signal
print("Sending from %s to %s" % (self.name, target.name))
if __name__ == '__main__':
# Create units
space_station_1 = SpaceStation(name='Earth Station')
space_station_2 = SpaceStation(name='Mars Station')
space_ship_v = SpaceShip(name='Starship Voyager')
# Start signaling
while True:
msg = random.choice(['Hello Space!', 'Hello Earth', 'Where are you guys?', 'We are at the final fronter', 'We are done, stop!', 'No can do', protobuf_examples.example.serialized_object])
source = random.choice([space_station_1, space_station_2, space_ship_v])
target = random.choice([space_station_1, space_station_2, space_ship_v])
source.send_to_station(target, msg)
time.sleep(1)
#space_station_1.send_to_station(space_station_2, 'Hello Space!')
#space_station_2.send_to_station(space_station_1, 'Hello Earth')
#space_station_2.send_to_station(space_ship_v, 'Where are you guys?')
#space_ship_v.send_to_station(space_station_1, )
#space_ship_v.send_to_station(space_station_1, 'We are done, stop!')
#space_station_2.send_to_station(space_ship_v, )
| magnuswahlstrand/home-automation | stub_world/send_sequence.py | send_sequence.py | py | 2,322 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "zmq.Context",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "zmq.PUSH",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "zmq.LINGER",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "random.choice",
"line_n... |
826135486 | # -*- coding: utf-8 -*-
"""
Created on Sat May 7 17:36:23 2022
@author: ThinkPad
"""
from __future__ import print_function
import argparse
import os
import numpy as np
import random
import torch
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
from PartialScan import PartialScans,unpickle
from model import feature_transform_regularizer
from pointnetCls import PointNetCls
import torch.nn.functional as F
from tqdm import tqdm
import random
parser = argparse.ArgumentParser()
parser.add_argument(
'--batchSize', type=int, default=3, help='input batch size')
parser.add_argument(
'--num_points', type=int, default=2500, help='input batch size')
parser.add_argument(
'--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument(
'--nepoch', type=int, default=250, help='number of epochs to train for')
parser.add_argument('--outf', type=str, default='cls', help='output folder')
parser.add_argument('--model', type=str, default='', help='model path')
parser.add_argument('--checkpoint', type=str, default='/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/cls/cls_model_5.pth', help="checkpoint dir")
parser.add_argument('--feature_transform', action='store_true', help="use feature transform")
opt = parser.parse_args()
print(opt)
blue = lambda x: '\033[94m' + x + '\033[0m'
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
latent_code = "/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_train.pickle"
latent_code_test = "/gpfs/data/ssrinath/ychen485/TextCondRobotFetch/pointnet/03001627/ocnet_shapefeature_pc/embed_feats_test.pickle"
shape_folder = "/gpfs/data/ssrinath/ychen485/partialPointCloud/03001627"
latent_dim = 512
dataset = PartialScans(latentcode_dir = latent_code, shapes_dir = shape_folder)
test_dataset = PartialScans(latentcode_dir = latent_code_test, shapes_dir = shape_folder)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
testdataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
latent_dict = unpickle(latent_code)
keylist = list(latent_dict.keys())
latent_dict_test = unpickle(latent_code_test)
keylist_test = list(latent_dict_test.keys())
print("train set lenth: "+ str(len(dataset)) +", test set length: "+ str(len(test_dataset)))
try:
os.makedirs(opt.outf)
except OSError:
pass
classifier = PointNetCls(k=2, feature_transform=opt.feature_transform)
if opt.checkpoint != " ":
checkpoint = torch.load(opt.checkpoint)
classifier.load_state_dict(checkpoint)
optimizer = optim.Adam(classifier.parameters(), lr=0.001, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
classifier.cuda()
num_batch = len(dataset) / opt.batchSize
total_correct = 0
for epoch in range(opt.nepoch):
for i, data in enumerate(dataloader, 0):
points_o, label = data
points = points_o[:,0:1024,:].to(torch.float32)
points.to(torch.float32)
points = points.transpose(2, 1)
target_np = np.zeros((len(label),))
t_idx = random.randint(0,len(label)-1)
target_np[t_idx] = 1
target = torch.from_numpy(target_np).to(torch.int64)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict[label[t_idx]]
# for j in range(opt.batchSize):
# if target[j] == 1:
# latents[j] = latent_dict[label[j]]
# else:
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# while(name == label[j]):
# idx = random.randint(0,len(keylist))
# name = keylist[idx]
# latents[j] = latent_dict[name]
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
optimizer.zero_grad()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
# print(pred.shape)
pred = pred[0]
loss = F.nll_loss(pred, target)
if opt.feature_transform:
loss += feature_transform_regularizer(trans_feat) * 0.001
loss.backward()
optimizer.step()
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
total_correct = total_correct + correct.item()
if i%100 == 0:
print('[%d: %d/%d] train loss: %f accuracy: %f' % (epoch, i, num_batch, loss.item(), total_correct / (100* opt.batchSize)))
total_correct = 0
scheduler.step()
test_correct = 0
for j, data in enumerate(testdataloader, 0):
points_o, label = data
points = points_o[:,0:1024,:].to(torch.float32)
points.to(torch.float32)
points = points.transpose(2, 1)
target_np = np.zeros((len(label),))
t_idx = random.randint(0,len(label)-1)
target_np[t_idx] = 1
target = torch.from_numpy(target_np).to(torch.int64)
latents = np.zeros((1, latent_dim))
latents[0] = latent_dict_test[label[t_idx]]
z = torch.from_numpy(latents).to(torch.float32)
points, target, z = points.cuda(), target.cuda(), z.cuda()
# optimizer.zero_grad()
classifier = classifier.train()
pred, trans, trans_feat = classifier(points, z)
# print(pred.shape)
pred = pred[0]
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.data).cpu().sum()
test_correct = test_correct + correct.item()
print('[%d: %d/%d] %s loss: %f accuracy: %f' % (epoch, i, num_batch,
blue('test'), loss.item(), test_correct/float(len(test_dataset))))
torch.save(classifier.state_dict(), '%s/cls_model_%d.pth' %
(opt.outf, epoch))
| FreddieRao/TextCondRobotFetch | pointnet/train.py | train.py | py | 6,147 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "torch.manual_seed"... |
8466448273 | #!usr/bin/env python
# -*- coding: utf-8 -*-
import itertools
import alphabet
from sequence_utils import get_reverse_complement
from get_kmers import get_kmers_from_sequence
import fasta_parser
def iter_kmers(alphabet, k):
"""Generator function that yields every kmer (substring of length k) over an
alphabet, which should be given as a Python set."""
alphabets = [alphabet for i in range(k)]
for kmer in itertools.product(*alphabets):
yield ''.join(kmer)
def iter_palindromes(k, allowed_middle_characters, alphabet):
"""Generator function that yields every DNA reverse-complement palindrome
of length k, including odd palindromes with center characters determined by
allowed_middle_characters.
allowed_middle_characters = ['G', 'T']
"""
for kmer in iter_kmers(alphabet, k // 2):
comp = get_reverse_complement(kmer)
if k % 2 != 0:
for character in allowed_middle_characters:
yield kmer + character + comp
else:
yield kmer + comp
def iter_palindrome_range(k1, k2, allowed_middle_characters, alphabet):
"""Generator function that yields all DNA reverse
complement palindromes from length k1 to k2, including
palindromes of length k1 and k2."""
for k in range(k1, k2 + 1):
for palindrome in iter_palindromes(k, allowed_middle_characters, alphabet):
yield palindrome
def gen_kmers(kmin, kmax, alphabet):
"""
generates possible k-mers of range(kmin, kmax + 1)
:param kmin: int, minimum kmer length
:param kmax: int, maximum kmer length
:param alphabet: str, accepted sequence alphabet for DNA, RNA, Amino Acids
:return list of str, possible kmers
"""
for n in range(kmin, kmax + 1):
return [''.join(mer) for mer in itertools.product(alphabet, repeat=n)]
def gen_rev_palindromes(kmin, kmax, alphabet):
"""
generate list of palindromes of length n,
when kmin<=n<=kmax identical to their reverse complements
:param kmin: int, min length of tested palindrome
:param kmax:int, max length of tested palindrome
:param bases: str, possible bases inserted in middle of odd palindrome
:return: list, palindromes seqs identical to their reverse complements
"""
dromes = []
for n in range(kmin, kmax + 1):
for left_mer in gen_kmers(n // 2, n // 2, alphabet):
if n % 2 == 0: # even palindrome
dromes.append(left_mer + get_reverse_complement(left_mer))
else: # odd palindrome
for midmer in alphabet:
dromes.append(left_mer + midmer + get_reverse_complement(left_mer))
return dromes
def compute_stats(kmer_list, counts, N, max_e):
"""
compute_stats computes the e-values for the supplied data.
Pre-conditions:
'kmer_list' - a list of kmers (for which stats will be produced)
'counts' - any dictionary-type with k-mers as keys (min_k - 2 <= k <= max_k,
where min_k and max_k are the bounds on the k-mer lengths in 'kmer_list')
and counts as values.
'N' - the total length of the sequence(s) read to produce 'counts'.
'max_e' - the upper bound on e-values reported.
Post-conditions:
Retunrs a list of lists ('results') where results[i] is of the form
[k-mer, observed count, expected count, z-score, e-value]
"""
# results is the list of list described in the docstring.
results = []
# number of tests, used to convert p-value to e-value.
n = len(kmer_list)
for kmer in kmer_list:
k = len(kmer)
observed = counts[kmer]
expected = counts[kmer[:-1]] * counts[kmer[1:]] / counts[kmer[1:-1]]
sigma = math.sqrt(expected * (1 - expected / (N - k + 1)))
Z_score = (observed - expected) / sigma
E_value_under = n * math.erfc(-Z_score / math.sqrt(2)) / 2 # E-value for under-rep
E_value_over = n * math.erfc(Z_score / math.sqrt(2)) / 2 # E-value for over-rep
if (E_value_under <= max_e):
results.append([kmer, observed, expected, Z_score, E_value_under])
elif (E_value_over <= max_e):
results.append([kmer, observed, expected, Z_score, E_value_over])
return results
def get_palindromes(alphabet, min_k, max_k):
"""Generates all DNA palindromes over the range from min_k to max_k.
Inputs:
min_k - minimum palindrome length (int)
max_k - maximum palindrome length (int)
Output:
yields all possible DNA palindromes (str) of length min_k to max_k.
Some definitions:
A palindrome is defined as a sequence which is equal to its reverse-complement.
Note: for odd length palindromes, the middle base does not need to be the same
in the reverse-complement.
Ex.: AAT is a legal palindrome even though its reverse-complement is ATT
"""
for k in range(min_k, (max_k + 1)):
for mer in itertools.product(alphabet, repeat=int(k / 2)):
kmer = ''.join(mer)
# even pal
if k % 2 == 0:
pal = kmer + get_reverse_complement(kmer)
yield pal
else:
for base in alphabet: # odd pal
pal = kmer + base + get_reverse_complement(kmer)
yield pal
if __name__ == '__main__':
alphabet = alphabet.iupac_dna
filename = "Data/test/Acidithiobacillus/chromosomes/NC_011206.1_Acidithiobacillus_ferrooxidans_ATCC_53993_complete_genome.fna.gz"
for name, seq in fasta_parser.parse_fasta(filename):
pal_list = list(get_palindromes(alphabet, 4, 6))
print(len(pal_list))
print(pal_list)
| schlogl2017/PauloSSchlogl | get_palindromes.py | get_palindromes.py | py | 5,625 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "itertools.product",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sequence_utils.get_reverse_complement",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 52,
"usage_type": "call"
},
{
"api_nam... |
72532058109 | import json
import arrow
import requests
from monitor_release.models import RunningSidecar
from monitor_release.settings import Settings
def get_bearer_token(settings: Settings):
headers = {"accept": "application/json", "Content-Type": "application/json"}
payload = json.dumps(
{
"Username": settings.portainer_username,
"Password": settings.portainer_password,
}
)
response = requests.post(
f"{settings.portainer_url}/portainer/api/auth",
headers=headers,
data=payload,
)
bearer_token = response.json()["jwt"]
return bearer_token
def get_services(settings: Settings, bearer_token):
services_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/services"
response = requests.get(
services_url,
headers={
"Authorization": "Bearer " + bearer_token,
"Content-Type": "application/json",
},
)
services = response.json()
return services
def get_tasks(settings: Settings, bearer_token):
tasks_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/tasks"
response = requests.get(
tasks_url,
headers={
"Authorization": "Bearer " + bearer_token,
"Content-Type": "application/json",
},
)
tasks = response.json()
return tasks
def get_containers(settings: Settings, bearer_token):
bearer_token = get_bearer_token(settings)
containers_url = f"{settings.portainer_url}/portainer/api/endpoints/{settings.portainer_endpoint_version}/docker/containers/json?all=true"
response = requests.get(
containers_url,
headers={
"Authorization": "Bearer " + bearer_token,
"Content-Type": "application/json",
},
)
containers = response.json()
return containers
def check_simcore_running_sidecars(settings: Settings, services):
running_sidecars: list[RunningSidecar] = []
for service in services:
if (
service["Spec"]["Name"].startswith("dy-sidecar")
and service["Spec"]["Labels"]["io.simcore.runtime.swarm-stack-name"]
== settings.swarm_stack_name
):
running_sidecars.append(
RunningSidecar(
name=service["Spec"]["Name"],
created_at=arrow.get(service["CreatedAt"]).datetime,
user_id=service["Spec"]["Labels"]["io.simcore.runtime.user-id"],
project_id=service["Spec"]["Labels"][
"io.simcore.runtime.project-id"
],
service_key=service["Spec"]["Labels"][
"io.simcore.runtime.service-key"
],
service_version=service["Spec"]["Labels"][
"io.simcore.runtime.service-version"
],
)
)
return running_sidecars
def _generate_containers_map(containers):
container_map = {}
for container in containers:
git_sha = (
container.get("Labels").get("org.opencontainers.image.revision")
if container.get("Labels").get(
"org.opencontainers.image.revision"
) # container.get("Labels").get("org.label-schema.vcs-ref")
else container.get("Labels").get("org.label-schema.vcs-ref")
)
container_map[container["Id"]] = {"git_sha": git_sha}
return container_map
def check_simcore_deployed_services(settings: Settings, services, tasks, containers):
container_map = _generate_containers_map(containers)
service_task_map = {}
for service in services:
if service["Spec"]["Name"].startswith(settings.starts_with):
service_task_map[service["ID"]] = {
"service_name": service["Spec"]["Name"],
"tasks": [],
}
for task in tasks:
if task["ServiceID"] in service_task_map:
if task["Status"].get("ContainerStatus") is None:
continue
container_id = task["Status"]["ContainerStatus"]["ContainerID"]
service_task_map[task["ServiceID"]]["tasks"].append(
{
"created_at": arrow.get(task["CreatedAt"]).datetime,
"status": task["Status"]["State"],
"timestamp": arrow.get(task["Status"]["Timestamp"]).datetime,
"git_sha": container_map.get(container_id, {}).get("git_sha"),
}
)
return service_task_map
| ITISFoundation/osparc-simcore | scripts/release/monitor/monitor_release/portainer_utils.py | portainer_utils.py | py | 4,679 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "monitor_release.settings.Settings",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.dumps",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "monitor_rel... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.