seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20914400525 | import sqlite3
import requests
from xml.dom import minidom
from prettytable import PrettyTable
def create_kns_factions_db():
"""
create sql database with all information on factions that run to the israeli knesset.
"""
# request data
database_url = 'http://knesset.gov.il/Odata/ParliamentInfo.svc/KNS_Faction()'
response = requests.get(database_url)
# create database
db = sqlite3.connect('my_database.db')
cursor = db.cursor()
cursor.execute("DROP TABLE IF EXISTS KNS_Faction")
cursor.execute('''
CREATE TABLE KNS_Faction(
Id INTEGER PRIMARY KEY,
Name VARCHAR(50),
KnessetNum INTEGER,
StartDate DATETIME2,
FinishDate DATETIME2,
IsCurrent BIT,
LastUpdatedDate DATETIME2)
''')
# iterate on every faction in database
data = minidom.parseString(response.content)
factions = data.getElementsByTagName('m:properties')
for faction in factions:
# get all parameters and insert to database
params = [elem.firstChild.nodeValue for elem in faction.childNodes]
cursor.executemany('''
INSERT INTO KNS_Faction(Id, Name, KnessetNum, StartDate, FinishDate, IsCurrent, LastUpdatedDate)
VALUES(?,?,?,?,?,?,?)
''', [params])
db.commit()
if __name__ == '__main__':
# connect to database
create_kns_factions_db()
db = sqlite3.connect('my_database.db')
cursor = db.cursor()
# select all the factions that ran for the 16th Knesset
cursor.execute('''SELECT * FROM KNS_Faction WHERE KnessetNum = 16''')
result = cursor.fetchall()
tab = PrettyTable([i[0] for i in cursor.description])
tab.add_rows(result)
print('all the factions that ran for the 16th Knesset')
print(tab)
db.close()
| MeirNizri/Python-Assignments | Python Exercise 5/create_kns_db.py | create_kns_db.py | py | 1,847 | python | en | code | 0 | github-code | 13 |
29217854861 | import base64
from typing import List, Optional, cast, Dict, Any
class StackPrinterConfig:
DEFAULT_MAX_VALUE_WIDTH: int = 30
def __init__(
self, max_value_width=DEFAULT_MAX_VALUE_WIDTH, top_of_stack_first=True
):
self.max_value_width = max_value_width
self.top_of_stack_first = top_of_stack_first
class DryrunResponse:
def __init__(self, drrjson: dict):
for param in ["error", "protocol-version", "txns"]:
assert (
param in drrjson
), f"expecting dryrun response object to have key '{param}' but it is missing"
# These are all required fields
self.error = drrjson["error"]
self.protocol = drrjson["protocol-version"]
self.txns = [DryrunTransactionResult(txn) for txn in drrjson["txns"]]
class DryrunTransactionResult:
def __init__(self, dr: Dict[str, Any]):
assert (
"disassembly" in dr
), "expecting dryrun transaction result to have key 'disassembly' but its missing"
self.disassembly = dr["disassembly"]
# cost is separated into 2 fields: `budget-added` and `budget-consumed`
optionals: List[str] = [
"app-call-messages",
"local-deltas",
"global-delta",
"budget-added",
"budget-consumed",
"logic-sig-messages",
"logic-sig-disassembly",
"logs",
]
def attrname(field: str):
return field.replace("-", "_")
for field in optionals:
setattr(self, attrname(field), dr.get(field))
traces = ["app-call-trace", "logic-sig-trace"]
for trace_field in traces:
if trace_field in dr:
setattr(
self,
attrname(trace_field),
DryrunTrace(dr[trace_field]),
)
def app_call_rejected(self) -> bool:
return (
False
if self.app_call_messages is None # type: ignore[attr-defined] # dynamic attribute
else "REJECT" in self.app_call_messages # type: ignore[attr-defined] # dynamic attribute
)
def logic_sig_rejected(self) -> bool:
if self.logic_sig_messages is not None: # type: ignore[attr-defined] # dynamic attribute
return "REJECT" in self.logic_sig_messages # type: ignore[attr-defined] # dynamic attribute
return False
@classmethod
def trace(
cls,
dr_trace: "DryrunTrace",
disassembly: List[str],
spc: StackPrinterConfig,
) -> str:
# 16 for length of the header up to spaces
lines = [["pc#", "ln#", "source", "scratch", "stack"]]
for idx in range(len(dr_trace.trace)):
trace_line = dr_trace.trace[idx]
src = disassembly[trace_line.line]
if trace_line.error != "":
src = "!! {} !!".format(trace_line.error)
prev_scratch = []
if idx > 0:
prev_scratch = dr_trace.trace[idx - 1].scratch
scratch = scratch_to_string(prev_scratch, trace_line.scratch)
stack = stack_to_string(trace_line.stack, spc.top_of_stack_first)
lines.append(
[
"{}".format(trace_line.pc),
"{}".format(trace_line.line),
truncate(src, spc.max_value_width),
truncate(scratch, spc.max_value_width),
truncate(stack, spc.max_value_width),
]
)
cols = len(lines[0])
max_widths = [0] * cols
for line in lines:
for i in range(cols):
if len(line[i]) > max_widths[i]:
max_widths[i] = len(line[i])
trace = []
for line in lines:
trace.append(
" |".join(
[str(line[i]).ljust(max_widths[i]) for i in range(cols)]
).strip()
)
return "\n".join(trace) + "\n"
def app_trace(self, spc: Optional[StackPrinterConfig] = None) -> str:
if not hasattr(self, "app_call_trace"):
return ""
if spc == None:
spc = StackPrinterConfig(top_of_stack_first=False)
spc = cast(StackPrinterConfig, spc)
return self.trace(self.app_call_trace, self.disassembly, spc=spc) # type: ignore[attr-defined] # dynamic attribute
def lsig_trace(self, spc: Optional[StackPrinterConfig] = None) -> str:
if not hasattr(self, "logic_sig_trace"):
return ""
if getattr(self, "logic_sig_disassembly", None) is None:
return ""
if spc is None:
spc = StackPrinterConfig(top_of_stack_first=False)
return self.trace(
self.logic_sig_trace, self.logic_sig_disassembly, spc=spc # type: ignore[attr-defined] # dynamic attribute
)
class DryrunTrace:
def __init__(self, trace: List[dict]):
self.trace = [DryrunTraceLine(line) for line in trace]
class DryrunTraceLine:
def __init__(self, tl):
self.line = tl["line"]
self.pc = tl["pc"]
self.error = ""
if "error" in tl:
self.error = tl["error"]
self.scratch = []
if "scratch" in tl:
self.scratch = [DryrunStackValue(sv) for sv in tl["scratch"]]
self.stack = [DryrunStackValue(sv) for sv in tl["stack"]]
class DryrunStackValue:
def __init__(self, v):
self.type = v["type"]
self.bytes = v["bytes"]
self.int = v["uint"]
def __str__(self) -> str:
if len(self.bytes) > 0:
return "0x" + base64.b64decode(self.bytes).hex()
return str(self.int)
def __eq__(self, other: object):
other = cast(DryrunStackValue, other)
return (
hasattr(other, "type")
and self.type == other.type
and hasattr(other, "bytes")
and self.bytes == other.bytes
and hasattr(other, "int")
and self.int == other.int
)
def truncate(s: str, max_width: int) -> str:
if len(s) > max_width and max_width > 0:
return s[:max_width] + "..."
return s
def scratch_to_string(
prev_scratch: List[DryrunStackValue], curr_scratch: List[DryrunStackValue]
) -> str:
if not curr_scratch:
return ""
new_idx: Optional[int] = None
for idx in range(len(curr_scratch)):
if idx >= len(prev_scratch):
new_idx = idx
continue
if curr_scratch[idx] != prev_scratch[idx]:
new_idx = idx
if new_idx == None:
return ""
new_idx = cast(int, new_idx) # discharge None type
return "{} = {}".format(new_idx, curr_scratch[new_idx])
def stack_to_string(stack: List[DryrunStackValue], reverse: bool) -> str:
if reverse:
stack.reverse()
return "[{}]".format(", ".join([str(sv) for sv in stack]))
| algorand/py-algorand-sdk | algosdk/dryrun_results.py | dryrun_results.py | py | 6,981 | python | en | code | 242 | github-code | 13 |
32544639538 | import argparse
import numpy as np
import cv2
from PIL import Image
from pathlib import Path
from tensorflow import keras
from utils import read_helpers as rh
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_directory',
default='../../data/still_images',
help='Directory with still images.')
parser.add_argument('-o', '--output_directory',
default='../../data/results',
help='Directory to save prototypes and evaluations in.')
parser.add_argument('-f', '--file', default='0X1A2C60147AF9FDAE_62.png',
help='Image whose prototype should be calculated')
parser.add_argument('-p', '--prototypes_filename', default='prototypes_esv.txt',
help='Name of file containing prototypes.')
parser.add_argument('-fv', '--frame_volumes_filename',
default='FrameVolumes.csv',
help='Name of the file containing frame volumes.')
parser.add_argument('-cb', '--volume_cluster_borders_file',
default='../../data/clustering_volume/cluster_upper_borders_esv.txt',
help='Path to file containing volume cluster upper borders.')
parser.add_argument('-vt', '--volume_type', default='ESV',
help='ESV, EDV or None')
parser.add_argument('-mp', '--model_path', required=True)
parser.add_argument('-l', '--hidden_layer_index', default=86, type=int)
args = parser.parse_args()
if args.output_directory is None:
output_directory = Path(args.input_directory, 'results')
else:
output_directory = Path(args.output_directory)
output_directory.mkdir(parents=True, exist_ok=True)
# get volume cluster borders
volume_cluster_borders = rh.read_volume_cluster_centers(args.volume_cluster_borders_file)
# get prototypes
prototypes = rh.read_prototypes(Path(output_directory, args.prototypes_filename))
# validate clustering -> by validating prototypes
calculate_prototype(
volume_cluster_borders,
prototypes,
args.model_path, args.hidden_layer_index,
args.input_directory, args.file)
def calculate_prototype(volume_cluster_borders,
prototypes,
model_path, hidden_layer_index,
input_directory, file):
"""Select the most similar prototype to the given still image file
when for similarity measuring only the feature distance is considered."""
# load model
print('Start loading model')
model = keras.models.load_model(model_path)
print('End loading model')
predicting_model = keras.Model(inputs=[model.input],
outputs=model.layers[hidden_layer_index].output)
extractor = keras.Model(inputs=[model.input],
outputs=model.layers[hidden_layer_index].output)
image = Image.open(Path(input_directory, file))
frame = np.asarray(image)
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
frame = frame / 255.0
instance = np.expand_dims(frame, axis=0)
prediction = float(predicting_model(instance).numpy()[0][0])
print('Image:', file)
print('Predicted Volume:', prediction)
# get volume cluster of image by choosing corresponding volume-range
clustered = False
volume_cluster_index = 0
for j in range(len(volume_cluster_borders)):
if prediction <= volume_cluster_borders[j]:
volume_cluster_index = j
clustered = True
break
if not clustered:
volume_cluster_index = len(volume_cluster_borders)
print('Volume cluster index:', volume_cluster_index)
current_prototypes = prototypes[volume_cluster_index]
# extract features
extracted_features = extractor(instance)
# get most similar prototype of volume cluster
# calculate distances/similarities
euc_feature_diff = []
i = 0
for prototype in current_prototypes:
# feature similarity using Euclidean distance
euc_feature_diff.append(np.linalg.norm(
[np.array(extracted_features[0]) - np.array(prototype.features)]))
i += 1
# get index of prototype with minimum difference
most_similar_index = euc_feature_diff.index(min(euc_feature_diff))
print('Most similar prototype:', current_prototypes[most_similar_index].file_name)
print('Euclidean distance of features:', euc_feature_diff[most_similar_index])
if __name__ == '__main__':
main()
| kiarastempel/explaining-echo-prototypes | src/utils/single_prototype_selection.py | single_prototype_selection.py | py | 4,620 | python | en | code | 2 | github-code | 13 |
15479952150 | from peewee import *
database = MySQLDatabase(
'telethon',
host="ls-05e88db820c0415c07abb89eec7d3f57e39a3e64.cu5yrqifxqtg.us-east-2.rds.amazonaws.com",
user='dbmasteruser',
password='[jbMhN029vJ6:bf2=M+{f&;89p7HaU`Z'
)
class Campaign(Model):
id = AutoField(primary_key=True, unique=True, null=False)
name = CharField(unique=True)
startDate = DateTimeField()
endDate = DateTimeField(null=True)
totalMessages = IntegerField(default=0)
status = IntegerField(default='A')
class Meta:
database = database
def __str__(self):
return f'{self.name}'
def create_all_campaign_table():
database.create_tables([Campaign])
| Cragser/tel-graham-52 | src/application/campaign/create_all_campaign_table.py | create_all_campaign_table.py | py | 684 | python | en | code | 0 | github-code | 13 |
72672548498 | #!/usr/bin/python env
#python 3 standard library
import argparse
from argparse import HelpFormatter
import sys
def main():
parser = argparse.ArgumentParser(prog='SPARKLE', description='''Proof of concept: alignment of FASTQ sequences to a refernece genome using clustered SPARK''', epilog='''This program was developed by Daniele Bellini (https://github.com/Daniele-db2)''', formatter_class=CustomFormat)
subparsers = parser.add_subparsers(title='modules', dest='command', metavar='index,align')
## index ##
parser_index = subparsers.add_parser('index', help='Create hash table with user-defined k-mer size for a reference genome')
required = parser_index.add_argument_group('Required I/O arguments')
required.add_argument('-g', '--genome', help='reference genome', metavar='FASTA', required=True)
additional = parser_index.add_argument_group('Additional parameters')
additional.add_argument('-k','--kmer', help='k-mer size for indexing [10]', default=10, type=int)
parser_index.set_defaults(func=run_subtool)
## align ##
parser_align = subparsers.add_parser('align', help='Align sequences in FASTQ format to an indexed reference genome')
required = parser_align.add_argument_group('Required I/O arguments')
required.add_argument('-g', '--genome', help='reference genome', metavar='FASTA', required=True)
required.add_argument('-r', '--reads', help='sequencing reads', metavar='FASTQ', required=True)
required.add_argument('-o', '--output', help='output compressed alignment in text format', metavar='TXT.GZ', required=True)
additional = parser_align.add_argument_group('Additional parameters')
additional.add_argument('-k','--kmer', help='k-mer size for indexing [10]', default=10, type=int)
additional.add_argument('--match', help='match reward (global/local alignment) [5]', default=5, type=int)
additional.add_argument('--mismatch', help='mismatch penalty (global/local alignment) [-4]', default=-4, type=int)
additional.add_argument('--gapopen', help='gap opening penalty (global/local alignment) [-8]', default=-8, type=int)
additional.add_argument('--gapextend', help='gap extending penalty (global alignment) [-6]', default=-6, type=int)
additional.add_argument('--alignment', help='which alignment to use. Choose between global, local and guess [guess]', choices=['global', 'local', 'guess'], default='guess', type=str)
additional.add_argument('--distance', help='minium distance for chaining [50]', type=int, default=50)
parser_align.set_defaults(func=run_subtool)
#print help if no subcommand nor --help provided
if len(sys.argv)==1:
parser.print_help(sys.stderr)
sys.exit(1)
#case-insensitive submodules
if sys.argv[1].lower() == 'index':
sys.argv[1] = 'index'
elif sys.argv[1].lower() == 'align':
sys.argv[1] = 'align'
args = parser.parse_args()
args.func(parser, args)
class CustomFormat(HelpFormatter):
def _format_action_invocation(self, action):
if not action.option_strings:
default = self._get_default_metavar_for_positional(action)
metavar, = self._metavar_formatter(action, default)(1)
return metavar
else:
parts = []
if action.nargs == 0:
parts.extend(action.option_strings)
else:
default = self._get_default_metavar_for_optional(action)
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append(option_string)
return '%s %s' % (', '.join(parts), args_string)
return ', '.join(parts)
def _get_default_metavar_for_optional(self, action):
return action.dest.upper()
def run_subtool(parser, args):
if args.command == 'index': #index
from .index import index as submodule
elif args.command == 'align': #align
from .align import align as submodule
else:
parser.print_help()
submodule.run(parser,args)
if __name__ =='__main__':
main() | Daniele-db2/SPARKLE | SPARKLE/SPARKLE.py | SPARKLE.py | py | 3,879 | python | en | code | 0 | github-code | 13 |
74564480018 | #!/usr/bin/env python
"""
_TaskSummary_
List the summary of job numbers by task given a workflow
"""
from WMCore.Database.DBFormatter import DBFormatter
from WMCore.JobStateMachine.Transitions import Transitions
from future.utils import listvalues
class TaskSummaryByWorkflow(DBFormatter):
sql = """SELECT wmbs_workflow.id, wmbs_workflow.name AS wmspec,
wmbs_workflow.task,
COUNT(wmbs_job.id) AS num_job, wmbs_job_state.name AS state,
SUM(wmbs_job.outcome) AS success
FROM wmbs_workflow
INNER JOIN wmbs_subscription ON
wmbs_workflow.id = wmbs_subscription.workflow
INNER JOIN wmbs_jobgroup ON
wmbs_subscription.id = wmbs_jobgroup.subscription
INNER JOIN wmbs_job ON
wmbs_jobgroup.id = wmbs_job.jobgroup
INNER JOIN wmbs_job_state ON
wmbs_job.state = wmbs_job_state.id
WHERE wmbs_workflow.name = :workflow_name
GROUP BY wmbs_workflow.task, wmbs_job_state.name
ORDER BY wmbs_workflow.id ASC"""
def failCount(self, result):
if result["state"] == 'success' or result["state"] == 'cleanout' \
or result["state"] == 'exhausted':
return (result["num_job"] - int(result["success"]))
return 0
def pendingCount(self, result):
if result["state"] == 'none' or result["state"] == 'new':
return (result["num_job"] - int(result["success"]))
return 0
def processingCount(self, result):
if result["state"] != 'success' and result["state"] != 'cleanout' \
and result["state"] != 'exhausted' and result['state'] != 'none' \
and result["state"] != 'new':
return result["num_job"]
else:
return 0
def formatWorkflow(self, results):
workflow = {}
tran = Transitions()
for result in results:
if result["task"] not in workflow:
workflow[result["task"]] = {}
for state in tran.states():
workflow[result["task"]][state] = 0
workflow[result["task"]][result["state"]] = result["num_job"]
workflow[result["task"]]['total_jobs'] = result["num_job"]
workflow[result["task"]]["real_success"] = int(result["success"])
workflow[result["task"]]["id"] = result["id"]
workflow[result["task"]]["wmspec"] = result["wmspec"]
workflow[result["task"]]["task"] = result["task"]
workflow[result["task"]]["real_fail"] = self.failCount(result)
workflow[result["task"]]['processing'] = self.processingCount(result)
else:
workflow[result["task"]][result["state"]] = result["num_job"]
workflow[result["task"]]['total_jobs'] += result["num_job"]
workflow[result["task"]]["real_success"] += int(result["success"])
workflow[result["task"]]["real_fail"] += self.failCount(result)
workflow[result["task"]]['processing'] += self.processingCount(result)
# need to order by id (client side)
return listvalues(workflow)
def execute(self, workflowName, conn = None, transaction = False):
results = self.dbi.processData(self.sql, {'workflow_name': workflowName},
conn = conn, transaction = transaction)
return self.formatWorkflow(self.formatDict(results))
| dmwm/WMCore | src/python/WMCore/WMBS/MySQL/Monitoring/TaskSummaryByWorkflow.py | TaskSummaryByWorkflow.py | py | 3,581 | python | en | code | 44 | github-code | 13 |
17745906012 | # Algorithms and Uncertainty (2019) - PUC-Rio
#
# MWU Classifier to distinguish two digits based on one pixel (MNIST)
#
# Last updated: 27/04/2019
#
# Authors: Ítalo G. Santana & Rafael Azevedo M. S. Cruz
from __future__ import print_function
import random
import numpy as np
import time
import sys
import pandas as pd
import keras
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Chosen numbers to be distinguished.
number_a = 4
number_b = 6
def predict_best_singlepixel(x, expect_y, experts, posval=number_a, negval=number_b):
acc_max = -1
best_e = None
for e in experts:
y_e = e.predict(x)
pos = (y_e>0)
y_e[pos] = posval
y_e[~pos] = negval
acc = accuracy(y_e, expect_y)
if acc > acc_max:
acc_max = acc
best_e = e
return acc_max, best_e
# Computes the accuracy of a prediction given a true output y_true.
def accuracy(y_pred, y_true):
if ( len(y_pred.shape) == 3 ):
acc = np.zeros((y_pred.shape[1],y_pred.shape[2]))
for i in range(acc.shape[0]):
for j in range(acc.shape[1]):
acc[i,j] = np.sum(y_pred[:,i,j]==y_true).astype(float)
else:
acc = np.sum(y_pred==y_true).astype(float)
acc = acc / y_true.shape[0]
return acc
def loadDataset():
# download from an online repository
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
# Dataset X values normalization (values are between 0 and 255 for pixels).
X_train = X_train.astype(float) / 255.
X_test = X_test.astype(float) / 255.
return X_train, y_train, X_test, y_test
# Single classifier for one digit which distinguishes two digits a and b based on one pixel.
class single_clf:
def __init__(self, x, y, p=None, idx=None):
# Position indexes for the pixel that best predicts the digits.
self.idx = idx
# sign == 1 iff predicts number_a else predicts number_b.
self.sign = 0
# weights for each digit instance in y (this characterizes the classifier).
self.p = p
#print("x size = {}".format(x.shape))
self.train(x,y,p)
# Predicts the number based on a pixel (i, j). Comparing the pixel (i, j) of
# all the 28x28 pixel digits in x, predicts each digit.
# x.shape = (n, 28, 28) where n is the number of digits in x and pixels are 28x28.
def predict_unit(self, x, sign, i, j, posval=number_a, negval=number_b):
# fval is the false value (the number a or b that is not being predicted)
# If sign > 0, then posval is being predicted (fval == negval, tval == posval)
# If sign < 0, then negval is being predicted (fval == posval, tval == negval)
fval = negval if sign >= 0 else posval
tval = posval if sign > 0 else negval
y = np.ones(x.shape[0])*fval
# Predicts as a tval digit based on pixel (i, j) for each digit in x.
y[ x[:,i,j] > 0 ] = tval
return y
def train(self, x, y, p=None):
if p is None:
self.p = np.ones(y.shape[0])
else:
self.p = p
if self.idx is not None:
# hitA predicts number_a based on pixel (self.idx[0],self.idx[1]).
# hitB predicts number_b based on pixel (self.idx[0],self.idx[1]).
hitA = np.sum((self.predict_unit(x, 1,self.idx[0],self.idx[1])==y).astype(float) * self.p)
hitB = np.sum((self.predict_unit(x,-1,self.idx[0],self.idx[1])==y).astype(float) * self.p)
# If hitA >= hitB then number_a predictions were betIt predicts A if average occurrence of pixel in examples of A >= average occurence of the same pixel in examples of B otherwise B will be predicted.ter than number_b predictions.
# Otherwise, number_b predictions are more accurate based for this pixel.
self.sign = 1 if (hitA>=hitB) else -1
else:
besthit=0
# Iterates through every possible 28x28 pixels and predicts a digit a or b based on this single pixel (i, j).
# Stores the pixel (i, j) for which more predictions were correct (either a or b).
for i in range(x.shape[1]):
for j in range(x.shape[2]):
# hitA predicts number_a based on pixel (i, j).
# hitB predicts number_b based on pixel (i, j).
hitA = np.sum((self.predict_unit(x, 1,i,j)==y).astype(float) * self.p)
hitB = np.sum((self.predict_unit(x,-1,i,j)==y).astype(float) * self.p)
if(hitA > besthit):
# The best pixel for predicting A.
besthit = hitA
self.sign = 1
self.idx =(i,j)
if(hitB > besthit):
# The best pixel for predicting B.
besthit = hitB
self.sign = -1
self.idx =(i,j)
def predict(self, x, posval=1, negval=-1):
# Predicts the digit based on the single best pixel determined during training.
# Best pixel for prediction is (self.idx[0], self.idx[1])
# If sign == 1 then number_a is predicted. Otherwise, number_b is predicted.
fval = negval if self.sign >= 0 else posval
tval = posval if self.sign > 0 else negval
y = np.ones(x.shape[0])*fval
y[ x[:,self.idx[0],self.idx[1]] > 0 ] = tval
return y
# MWU Classifier for distinguishing two digits A and B.
class MWU:
def __init__(self, gamma):
self.gamma = gamma
# MWU algorithm to compute the final weight w_i of each expert i in an horizon T.
def train(self, train, test, T=100, w=None):
x_train, y_train = train
x_test, y_test = test
x_train = (x_train>0).astype(float)
self.learners = []
self.t_hist = []
self.test_accuracy = []
self.train_accuracy = []
eps = np.sqrt( np.log(x_train.size) / T )
# Initializing the weight for each digit instance in x_train as 1/x_train.shape[0]
P = np.ones(x_train.shape[0]) / x_train.shape[0]
# Initializes the 28x28 pixel matrix such that w(i, j) is 1 iff pixel (i, j) is selected as a good classifier pixel; else w(i, j) is 0.
self.w = np.zeros( (x_train.shape[1],x_train.shape[2]) )
train_file = open("train_log.txt", "w")
for it in range(T):
# Create and train a classifier ci with weights P
ci = single_clf(x_train, y_train, p=P)
# Predicts the digits a and b from x_train
y_p = ci.predict(x_train, posval=number_a, negval=number_b)
# Computes the weighted (P) sum of predictions y_p that are correct (equal to digit label in y_train).
acc = np.sum((y_p==y_train).astype(float)*P)
if acc < 0.5 + self.gamma:
train_file.write("There is no more {}-weak-learners".format(0.5 + self.gamma))
print ("\n\tThere is no more {}-weak-learners".format(0.5 + self.gamma))
break
# Increments 1 to the pixel position (ci.idx[0], ci.idx[1]) that is a 0.5+gamma-weak-learner.
# Note: the same pixel position might be selected in different rounds.
self.w[ci.idx[0],ci.idx[1]] += 1
# Stores the current weak-learner.
self.learners.append(ci)
# Computes all the digits for which many predictions were wrong (misses).
miss = (y_p!=y_train)
# Reduces the weight of these digit instances by exp of eps.
P[miss] *= np.exp(eps)
# Updates weights P such that their sum is exactly 1.
P = P/np.sum(P)
############# history log....############
# Predicts and computes the validation accuracy.
y_p = self.predict(x_test)
v_acc = accuracy(y_p,y_test)
# Predicts and computes the test accuracy.
y_p = self.predict(x_train)
t_acc = accuracy(y_p,y_train)
self.test_accuracy.append(v_acc)
self.train_accuracy.append(t_acc)
self.t_hist.append(it)
##########################################
train_file.write("\niteration {}: Validation accuracy: {}".format(it, v_acc))
print("\niteration {}: Validation accuracy: {}".format(it, v_acc))
############# Retrieving the best single-pixel prediction ############
# Predicts and computes the validation accuracy.
experts_y_v = self.predict_best_singlepixel(x_test)
# Predicts and computes the test accuracy.
experts_y_t = self.predict_best_singlepixel(x_train)
print("\n\n{} : Number of learners = {}".format(it,len(self.learners)))
print("\n\n{} : Learners: ")
for i in range(len(self.learners)):
nmbr = "A" if self.learners[i].sign > 0 else "B"
print("\n\nLearner {} ==> (pixel_i, pixel_j) = {} ; predicted number = {} ; p = {} ".format(i, self.learners[i].idx, nmbr, self.learners[i].p))
train_file.write("\n\nLearner {} ==> (pixel_i, pixel_j) = {} ; predicted number = {} ; p = {} ".format(i, self.learners[i].idx, nmbr, self.learners[i].p))
for i in range(self.w.shape[0]):
print("\n")
train_file.write("\n")
for j in range(self.w.shape[1]):
print("{} ".format(self.w[i, j]), end="")
train_file.write("{} ".format(self.w[i, j]))
print("\n")
acc_max = -1.0
acc_min = 1.1
for y_e in experts_y_v:
acc = accuracy(y_e, y_test)
if acc > acc_max:
acc_max = acc
if acc < acc_min:
acc_min = acc
train_file.write("\niteration {}: Best weighted single-pixel validation accuracy (test set): {}".format(it, acc_max))
print("\niteration {}: Best weighted single-pixel validation accuracy (test set): {}".format(it, acc_max))
train_file.write("\niteration {}: Worst weighted single-pixel validation accuracy (test set): {}".format(it, acc_min))
print("\niteration {}: Worst weighted single-pixel validation accuracy (test set): {}".format(it, acc_min))
acc_max = -1.0
acc_min = 1.1
for y_e in experts_y_t:
acc = accuracy(y_e, y_train)
if acc > acc_max:
acc_max = acc
if acc < acc_min:
acc_min = acc
train_file.write("\niteration {}: Best weighted single-pixel validation accuracy (train set): {}".format(it, acc_max))
print("\niteration {}: Best weighted single-pixel validation accuracy (train set): {}".format(it, acc_max))
train_file.write("\niteration {}: Worst weighted single-pixel validation accuracy (train set): {}".format(it, acc_min))
print("\niteration {}: Worst weighted single-pixel validation accuracy (train set): {}".format(it, acc_min))
train_file.write("\n\n\t{} : Final validation accuracy (test set): {}".format(it,v_acc))
train_file.write("\n\n\t{} : Final test accuracy (train set): {}\n\n".format(it,t_acc))
print("\n\n{} : Final validation accuracy (test set): {}".format(it,v_acc))
print("\n\n{} : Final test accuracy (train set): {}\n\n".format(it,t_acc))
train_file.close()
# Plotting classfifier over iterations
train_acc = np.array(self.train_accuracy)
test_acc = np.array(self.test_accuracy)
x = np.array(self.t_hist)
plt.suptitle('Classificadores Finais')
plt.plot(x,train_acc,label="Treino")
plt.plot(x,test_acc,label="Teste")
plt.legend()
plt.xlabel('Iterações')
plt.ylabel('Qualidade')
plt.axis([0,it + 5,0.8,1])
plt.show()
return P
# Considers the prediction done by all the learners added (already weighted).
def predict(self, x, posval=number_a, negval=number_b):
y = np.zeros(x.shape[0])
# Predicts the digits value based on the contribution of each learner.
# e.predict returns an array y with values 1 and -1 (1 stands for number_a and -1 stands for number_b).
for e in self.learners:
y += e.predict(x)
pos = (y>0)
y[pos] = posval
y[~pos] = negval
return y
def predict_best_singlepixel(self, x, posval=number_a, negval=number_b):
experts_y = np.zeros((len(self.learners), x.shape[0]))
# Predicts the digits value based on the contribution of one single learner (one single pixel).
# e.predict returns an array y with values 1 and -1 (1 stands for number_a and -1 stands for number_b).
counter = 0
for e in self.learners:
experts_y[counter] += e.predict(x)
pos = (experts_y[counter]>0)
experts_y[counter][pos] = posval
experts_y[counter][~pos] = negval
counter = counter + 1
return experts_y
if __name__ == "__main__":
print("Loading dataset...")
X_train, y_train, X_test, y_test = loadDataset()
print("Filtering data based on a ={} b = {}".format(number_a,number_b))
df_train = pd.DataFrame( data={'y' : y_train } )
df_train = df_train[ (df_train.y==number_a) | (df_train.y==number_b) ]
X_train = X_train[df_train.index,...]
y_train = y_train[df_train.index,...]
X_test = []
y_test = []
X_train, X_test, y_train, y_test = train_test_split(X_train,y_train, test_size=0.2, random_state=1)
print("Size of data for training and testing. Format (elements, dimension, dimension)")
print("Training: {}".format(X_train.shape))
print("Testing: {}".format(X_test.shape))
T = 150
GAMMA = 0.05
print("T = {}, GAMMA = {}".format(T,GAMMA))
# Creates and trains a mwu classifier.
mwu = MWU(GAMMA)
P = mwu.train( train=(X_train, y_train), test=(X_test,y_test), T=T)
x_train = (X_train>0).astype(float)
x_test = (X_test>0).astype(float)
print("opop {} ioio {}".format(x_train.shape[1], x_train.shape[2]))
experts = []
for idx_row in range(x_train.shape[1]):
for idx_column in range(x_train.shape[2]):
experts.append(single_clf(X_train, y_train, idx=(idx_row, idx_column)))
acc_train_singlepixel, e_sp_train = predict_best_singlepixel(x_train, y_train, experts)
acc_test_singlepixel, e_sp_test = predict_best_singlepixel(x_test, y_test, experts)
print("\nBest single-pixel accuracy (train set): {}, pixel= {}".format(acc_train_singlepixel, e_sp_train.idx))
print("\nBest single-pixel accuracy (test set): {}, pixel= {}".format(acc_test_singlepixel, e_sp_test.idx))
| italogs/boosting-mwu-character-recognition | main.py | main.py | py | 14,805 | python | en | code | 1 | github-code | 13 |
34572652062 | divisor = int(input())
boundary = int(input())
number = 0
previous_number = 0
for i in range(boundary, divisor, -1):
if i % divisor == 0 and i > 0 and i <= boundary:
number = i
break
print(i)
| TinaZhelyazova/02.-Python-Fundamentals | 02. Exercise Basic Syntax, Conditional Statements and Loops/04. Maximum Multiple.py | 04. Maximum Multiple.py | py | 214 | python | en | code | 0 | github-code | 13 |
70135986577 | import tqdm
import unicodedata, json
import torch
from torch.utils.data import DataLoader as DataLoader, TensorDataset
class DataProcessor:
def __init__(self, config_dir):
with open(config_dir, 'r') as openfile:
json_object = json.load(openfile)
self.max_seq_length = json_object["max_seq_length"]
self.batch_size = json_object["batch_size"]
self.num_workers = json_object["num_workers"]
# self.read_file_dir = json_object["read_file_dir"]
# self.write_file_dir = json_object["write_file_dir"]
# def read_file(self):
# with open(self.read_file_dir, 'r+') as f:
# lines = f.readlines()
#
# text = '-DOCSTART-\t-X-\t-X-\tO\n\n'
# count = 0
# for i in range(3, len(lines) - 1):
# if lines[i] == '\n':
# count += 1
# if count == 2:
# lines.insert(i + 1, text)
# count = 0
# lines.append('\n\n' + text)
#
# return lines
#
# def write_file(self):
# lines = self.read_file(self.read_file_dir)
#
# if self.write_file_dir is not None:
# with open(self.write_file_dir, 'w') as f:
# for line in lines:
# f.write(f"{line}")
def dataloader(self, tokenizer, dataset_file):
custom_label2id = {"O": 0,
"B-PER": 1,
"I-PER": 2,
"B-MISC": 3,
"I-MISC": 4,
"B-LOC": 5,
"B-ORG": 6,
"I-ORG": 7,
"I-LOC": 8,
"UNK": 9
}
documents = self.load_documents(dataset_file=dataset_file)
examples = self.load_examples(documents=documents, tokenizer=tokenizer)
final_tag_list = self.create_tag_list(documents=documents, examples=examples)
# final_span_list = self.create_span(examples=examples)
params_list = self.create_list_params(examples=examples,
# span_list=final_span_list,
max_seq_length=self.max_seq_length,
tokenizer=tokenizer
)
label_id_list = self.create_label_id(examples=examples,
tag_list=final_tag_list,
custom_label2id=custom_label2id,
tokenizer=tokenizer,
max_seq_length=self.max_seq_length
)
dataloader = self.create_dataloader(params_list=params_list,
label_id_tensor=label_id_list,
batch_size=self.batch_size,
num_workers=self.num_workers
)
return dataloader
def load_documents(self, dataset_file):
print("Loading document")
line = '-DOCSTART- -X- -X- O'
documents = []
words = []
labels = []
sentence_boundaries = []
with open(dataset_file) as f:
for line in f:
line = line.rstrip()
if line.startswith("-DOCSTART-"):
if words:
documents.append(dict(
words=words,
labels=labels,
sentence_boundaries=sentence_boundaries
))
words = []
labels = []
sentence_boundaries = []
continue
if not line: # chỗ này cho '\n\n'
if not sentence_boundaries or len(words) != sentence_boundaries[-1]:
sentence_boundaries.append(len(words))
else: # các line bình thường
items = line.split("\t")
words.append(items[0])
labels.append(items[-1])
if words:
documents.append(dict(
words=words,
labels=labels,
sentence_boundaries=sentence_boundaries
))
return documents
def load_examples(self, documents, tokenizer):
print("Loading example")
examples = []
max_token_length = 510
max_mention_length = 30
for ind, document in enumerate(documents):
# print('ind: {} \n document: {}'.format(ind, document))
# break
words = document["words"]
subword_lengths = [len(tokenizer.tokenize(w)) for w in words]
total_subword_length = sum(subword_lengths)
sentence_boundaries = document["sentence_boundaries"]
for i in range(len(sentence_boundaries) - 1):
sentence_start, sentence_end = sentence_boundaries[i:i + 2]
if total_subword_length <= max_token_length:
# if the total sequence length of the document is shorter than the
# maximum token length, we simply use all words to build the sequence
context_start = 0
context_end = len(words)
else:
# if the total sequence length is longer than the maximum length, we add
# the surrounding words of the target sentence to the sequence until it
# reaches the maximum length
context_start = sentence_start
context_end = sentence_end
cur_length = sum(subword_lengths[context_start:context_end])
while True:
if context_start > 0:
if cur_length + subword_lengths[context_start - 1] <= max_token_length:
cur_length += subword_lengths[context_start - 1]
context_start -= 1
else:
break
if context_end < len(words):
if cur_length + subword_lengths[context_end] <= max_token_length:
cur_length += subword_lengths[context_end]
context_end += 1
else:
break
text = ""
for word in words[context_start:sentence_start]:
if word[0] == "'" or (len(word) == 1 and self.is_punctuation(word)):
text = text.rstrip()
text += word
text += " "
sentence_words = words[sentence_start:sentence_end]
sentence_subword_lengths = subword_lengths[sentence_start:sentence_end]
word_start_char_positions = []
word_end_char_positions = []
for word in sentence_words:
if word[0] == "'" or (len(word) == 1 and self.is_punctuation(word)):
text = text.rstrip()
word_start_char_positions.append(len(text))
text += word
word_end_char_positions.append(len(text))
text += " "
for word in words[sentence_end:context_end]:
if word[0] == "'" or (len(word) == 1 and self.is_punctuation(word)):
text = text.rstrip()
text += word
text += " "
text = text.rstrip()
entity_spans = []
original_word_spans = []
for word_start in range(len(sentence_words)):
for word_end in range(word_start, len(sentence_words)):
if sum(sentence_subword_lengths[word_start:word_end]) <= max_mention_length:
entity_spans.append(
(word_start_char_positions[word_start], word_end_char_positions[word_end])
)
original_word_spans.append(
(word_start, word_end + 1)
)
examples.append(dict(
text=text,
words=sentence_words,
entity_spans=entity_spans,
original_word_spans=original_word_spans,
))
return examples
def is_punctuation(self, char):
cp = ord(char)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def create_tag_list(self, documents, examples):
print("Creating list of label")
final_word_list = []
for i in range(len(examples)):
final_word_list.append(examples[i]['words'])
list_all_tag = []
for i in range(len(documents)):
for j in range(len(documents[i]['labels'])):
list_all_tag.append(documents[i]['labels'][j])
final_tag_list = []
ind = 0
for i in range(len(final_word_list)):
tmp_list = list_all_tag[ind:ind + len(final_word_list[i])]
final_tag_list.append(tmp_list)
ind += len(final_word_list[i])
return final_tag_list
# def create_span(self, examples):
# print("Creating list of span")
# final_word_list = []
# for i in range(len(examples)):
# final_word_list.append(examples[i]['words'])
# final_span_list = []
#
# for i in range(len(final_word_list)):
# tmp_sen = ' '.join(final_word_list[i])
# start_pos = []
# end_pos = []
#
# for j in range(len(final_word_list[i])):
# tmp_ind = tmp_sen.find(final_word_list[i][j])
# start_pos.append(tmp_ind)
# end_pos.append(tmp_ind + len(final_word_list[i][j]) - 1)
#
# entity_span = []
# # for k, s_pos in enumerate(start_pos):
# # for e_pos in end_pos[k:]:
# # entity_span.append((s_pos, e_pos))
# for idx in range(len(start_pos)):
# entity_span.append((start_pos[idx], end_pos[idx]))
#
# final_span_list.append(entity_span)
#
# return final_span_list
def create_list_params(self, examples, max_seq_length, tokenizer): #, span_list
print("Creating list of parameters")
list_input_ids, list_attention_mask, list_entity_ids, list_entity_position_ids, list_entity_attention_mask = [], [], [], [], []
for i in range(len(examples)):
source_encoding = tokenizer(
text=' '.join(examples[i]["words"]),
# entity_spans=span_list[i],
max_length=max_seq_length,
padding='max_length',
truncation=True,
return_attention_mask=True,
add_special_tokens=True
# return_tensors="pt"
)
# for j in range(len(source_encoding['entity_position_ids'])):
# if len(source_encoding['entity_position_ids'][j]) > 30:
# source_encoding['entity_position_ids'][j] = source_encoding['entity_position_ids'][j][:30]
list_input_ids.append(source_encoding["input_ids"])
list_attention_mask.append(source_encoding["attention_mask"])
# list_entity_ids.append(source_encoding["entity_ids"])
# list_entity_position_ids.append(source_encoding["entity_position_ids"])
# list_entity_attention_mask.append(source_encoding["entity_attention_mask"])
list_input_ids_tensor = torch.tensor([f for f in list_input_ids])
list_attention_mask_tensor = torch.tensor([f for f in list_attention_mask])
# list_entity_ids_tensor = torch.tensor([f for f in list_entity_ids])
# list_entity_position_ids_tensor = torch.tensor([f for f in list_entity_position_ids])
# list_entity_attention_mask_tensor = torch.tensor([f for f in list_entity_attention_mask])
list_params = [
list_input_ids_tensor,
list_attention_mask_tensor,
# list_entity_ids_tensor,
# list_entity_position_ids_tensor,
# list_entity_attention_mask_tensor
]
return list_params
def create_label_id(self, examples, tag_list, custom_label2id, max_seq_length, tokenizer):
label_id = []
final_word_list = []
final_tag_list = tag_list
for i in range(len(examples)):
final_word_list.append(examples[i]['words'])
custom_label2id = custom_label2id
for i in range(len(final_word_list[:len(final_word_list)])):
tmp_label_id = []
for j in range(len(final_word_list[i])):
token = tokenizer.tokenize(final_word_list[i][j])
for t in range(len(token)):
if t == 0:
tmp_label_id.append(custom_label2id[final_tag_list[i][j]])
else:
tmp_label_id.append(0)
if len(tmp_label_id) > max_seq_length - 2:
tmp_label_id = tmp_label_id[:max_seq_length - 2]
tmp_label_id.insert(1, 0)
tmp_label_id.append(1)
while len(tmp_label_id) < max_seq_length:
tmp_label_id.append(9)
label_id.append(tmp_label_id)
label_id_tensor = torch.tensor([f for f in label_id])
return label_id_tensor
def create_dataloader(self, params_list, label_id_tensor, batch_size, num_workers):
print("Creating DataLoader")
input_ids_tensor, attention_mask_tensor = params_list
# , entity_ids_tensor, entity_position_ids_tensor, entity_attention_mask_tensor
dataset = TensorDataset(input_ids_tensor,
attention_mask_tensor,
# entity_ids_tensor,
# entity_position_ids_tensor,
# entity_attention_mask_tensor,
label_id_tensor
)
dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=num_workers)
return dataloader
| XuanLoc2578/ner_mluke_vnese | ner_mluke/mydataset.py | mydataset.py | py | 14,889 | python | en | code | 0 | github-code | 13 |
6757339317 | import os
import pickle
import numpy as np
from torch.utils.data import Dataset
from e2edet.utils.det3d.general import read_from_file, read_pc_annotations
class PointDetection(Dataset):
"""An abstract class representing a pytorch-like Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __init__(
self,
root_path,
info_path,
num_point_features,
test_mode=False,
nsweeps=1,
load_interval=1,
):
super(PointDetection, self).__init__()
self.info_path = info_path
self.root_path = root_path
self.nsweeps = nsweeps
self.load_interval = load_interval
self.num_point_features = num_point_features
self.test_mode = test_mode
self._set_group_flag()
self._load_infos()
def _load_infos(self):
with open(self.info_path, "rb") as f:
infos_all = pickle.load(f)
self.infos = infos_all[:: self.load_interval]
# print("Using {} frames".format(len(self.infos)))
def _set_group_flag(self):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
"""
self.flag = np.ones(len(self), dtype=np.uint8)
def __getitem__(self, index):
"""This function is used for preprocess.
you need to create a input dict in this function for network inference.
format: {
anchors
voxels
num_points
coordinates
if training:
labels
reg_targets
[optional]anchors_mask, slow in SECOND v1.5, don't use this.
[optional]metadata, in kitti, image index is saved in metadata
}
"""
return self.get_sensor_data(index)
def __len__(self):
if not hasattr(self, "infos"):
self._load_infos()
return len(self.infos)
def get_sensor_data(self, idx):
"""Dataset must provide a unified function to get data.
Args:
idx: int or dict. this param must support int for training.
if dict, should have this format (no example yet):
{
sensor_name: {
sensor_meta
}
}
if int, will return all sensor data.
(TODO: how to deal with unsynchronized data?)
Returns:
sensor_data: dict.
if idx is int (return all), return a dict with all sensors:
{
sensor_name: sensor_data
...
metadata: ... (for kitti, contains image_idx)
}
if sensor is lidar (all lidar point cloud must be concatenated to one array):
e.g. If your dataset have two lidar sensor, you need to return a single dict:
{
"lidar": {
"points": ...
...
}
}
sensor_data: {
points: [N, 3+]
[optional]annotations: {
"boxes": [N, 7] locs, dims, yaw, in lidar coord system. must tested
in provided visualization tools such as second.utils.simplevis
or web tool.
"names": array of string.
}
}
if sensor is camera (not used yet):
sensor_data: {
data: image string (array is too large)
[optional]annotations: {
"boxes": [N, 4] 2d bbox
"names": array of string.
}
}
metadata: {
# dataset-specific information.
# for kitti, must have image_idx for label file generation.
image_idx: ...
}
[optional]calib # only used for kitti
"""
info = self.infos[idx]
res = {
"lidar": {
"type": "lidar",
"points": None,
"annotations": None,
"nsweeps": self.nsweeps,
},
"metadata": {
"image_prefix": self.root_path,
"num_point_features": self.num_point_features,
"token": info["token"],
},
"calib": None,
"cam": {},
"mode": "val" if self.test_mode else "train",
}
if not os.path.isabs(self.infos[idx]["path"]):
self.infos[idx]["path"] = os.path.join(
self.root_path, self.infos[idx]["path"]
)
points = read_from_file(self.infos[idx], self.nsweeps)
annos = read_pc_annotations(self.infos[idx])
return res, points, annos
def _filter_imgs(self, min_size=32):
"""Filter images too small."""
valid_inds = []
for i, img_info in enumerate(self.img_infos):
if min(img_info["width"], img_info["height"]) >= min_size:
valid_inds.append(i)
return valid_inds
| kienduynguyen/BoxeR | e2edet/dataset/helper/point_detection.py | point_detection.py | py | 5,338 | python | en | code | 126 | github-code | 13 |
220033965 | from pathlib import Path
from qtpy.QtCore import Qt, QItemSelection, Signal, QModelIndex
from qtpy.QtGui import QIcon, QStandardItemModel, QStandardItem
from qtpy.QtWidgets import QVBoxLayout, QWidget, QTreeView, QAbstractItemView
from happi import Client, Device, HappiItem, from_container
from happi.backends.mongo_db import MongoBackend
from happi.backends.json_db import JSONBackend
from typhos.display import TyphosDeviceDisplay
import os
from xicam.core import msg
from xicam.core.paths import site_config_dir, user_config_dir
from xicam.plugins import SettingsPlugin, manager as pluginmanager
from xicam.gui import static
happi_site_dir = str(Path(site_config_dir) / "happi")
happi_user_dir = str(Path(user_config_dir) / "happi")
USER_MONGO = os.getenv("USER_MONGO")
PW_MONGO = os.getenv("PASSWD_MONGO")
class HappiClientTreeView(QTreeView):
sigShowControl = Signal(QWidget)
"""Tree view that displays happi clients with any associated devices as their children."""
def __init__(self, *args, **kwargs):
super(HappiClientTreeView, self).__init__(*args, **kwargs)
self.setHeaderHidden(True)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
def selectionChanged(self, selected: QItemSelection, deselected: QItemSelection) -> None:
selected_indexes = selected.indexes()
if not selected_indexes:
return
index = selected_indexes[0]
self._activate(index)
def _activate(self, index: QModelIndex):
display = index.data(HappiClientModel.displayRole) # try to get display from model
if not display:
happi_item = index.data(HappiClientModel.happiItemRole)
device = from_container(happi_item)
try:
device.wait_for_connection()
except TimeoutError as ex:
msg.logError(ex)
controller_name = happi_item.extraneous.get("controller_class", "typhos")
controller = pluginmanager.get_plugin_by_name(controller_name, 'ControllerPlugin')
display = controller(device)
# Stash display back on the model
self.model().setData(index, display, HappiClientModel.displayRole)
self.sigShowControl.emit(display)
class HappiClientModel(QStandardItemModel):
"""Qt standard model that stores happi clients."""
happiItemRole = Qt.UserRole + 1
displayRole = Qt.UserRole + 2
def __init__(self, *args, **kwargs):
super(HappiClientModel, self).__init__(*args, **kwargs)
self._clients = []
def add_client(self, client: Client):
self._clients.append(client)
if isinstance(client.backend, JSONBackend):
client_item = QStandardItem(client.backend.path)
elif isinstance(client.backend, MongoBackend):
client_item = QStandardItem(f"{client.backend._client.HOST}/{client.backend._collection.full_name}")
self.appendRow(client_item)
for result in client.search():
# add an OphydItem
self.add_device(client_item, result.item)
client_item.setData(client)
def add_device(self, client_item: QStandardItem, device: Device):
device_item = QStandardItem(device.name)
device_item.setData(device, self.happiItemRole)
client_item.appendRow(device_item)
class HappiSettingsPlugin(SettingsPlugin):
def __init__(self):
self._happi_db_dirs = [happi_site_dir, happi_user_dir]
self._device_view = HappiClientTreeView()
self._client_model = HappiClientModel()
self._device_view.setModel(self._client_model)
for db_dir in self._happi_db_dirs:
for db_file in Path(db_dir).glob('*.json'):
client = Client(path=str(db_file))
self._client_model.add_client(client)
try:
mongo_client = Client(MongoBackend(host='127.0.0.1',
db='happi',
collection='labview_static',
user=USER_MONGO,
pw=PW_MONGO,
timeout=None))
self._client_model.add_client(mongo_client)
except Exception as e: #TODO catch exception properly
msg.logError(e)
widget = QWidget()
layout = QVBoxLayout()
layout.addWidget(self._device_view)
widget.setLayout(layout)
icon = QIcon(str(static.path('icons/calibrate.png')))
name = "Devices"
super(HappiSettingsPlugin, self).__init__(icon, name, widget)
self._device_view.expandAll()
self.restore()
@property
def devices_model(self):
return self._client_model
@property
def devices_view(self):
return self._device_view
def search(self, **kwargs):
"""
Searches all happi clients (see happi.client.Client.search)
"""
results = []
for client in self._client_model._clients:
results += client.search(**kwargs)
return results
| ihumphrey/Xi-cam.plugins.Acquire | xicam/Acquire/devices/happi.py | happi.py | py | 5,153 | python | en | code | null | github-code | 13 |
43359486143 | import numpy as np
import torch
import trimesh
import os
source_dir = "bosphorusReg3DMM/"
export_dir = "bosphorus_mesh/"
os.mkdir(export_dir)
faces = np.load("tri.npy")
files = [f for f in os.listdir(source_dir) if f.endswith('.pt')]
for f in files:
name = f.split(".")[0]
vertices = torch.load(source_dir+f).numpy()
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
mesh.export(export_dir+"{}.obj".format(name))
| w00zie/3d_face_class | data/create_mesh.py | create_mesh.py | py | 435 | python | en | code | 8 | github-code | 13 |
71082098257 | import random
import numpy as np
from activation import linear_function
class perceptron:
def __init__(self):
random.seed(1)
"""
the weight matrix with random values between -1 and 1.
The weight matrix has shape (3, 1) as it corresponds to 3 input
features and 1 output.
"""
self.weight = 2 * random.random() - 1
self.bias = 0
self.learning_rate = 0.001
# Forward pass through the perceptron.
def forward(self, x, activation=linear_function):
"""
# Calculate the dot product of input "x" and the weight matrix
"self.weight".
# Then, add the bias "self.bias".
# Finally, apply the linear activation function.
"""
y_ = np.dot(x, self.weight) + self.bias
return activation(y_)
# Update rule for the perceptron during training.
def update_rule(self, y, y_, X):
"""
# Calculate the change in prediction (y - y_) and update the weight
and bias
# using the learning rate and the input "X".
"""
change = y - y_
self.weight = self.weight + self.learning_rate * (change) * X
self.bias = self.bias + self.learning_rate * (change)
# Training function for the perceptron.
def train(
self,
train_inputs,
train_outputs,
epochs,
activation=linear_function,
):
for i in range(epochs):
print(f"taining {i}")
for X, y in zip(train_inputs, train_outputs):
y_ = self.forward(X, activation)
print(f"y = {y}, predict:{y_}")
# Update the weights and bias using the update rule.
self.update_rule(y, y_, X)
# Prediction function for the perceptron.
def predict(self, X):
# Perform a forward pass to get the predicted output "y_".
return self.forward(X)
| wayneotemah/ML-from-scratch | perceptron/perceptron.py | perceptron.py | py | 1,926 | python | en | code | 0 | github-code | 13 |
12696829725 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import Crypto.Cipher.PKCS1_v1_5
import Crypto.Hash.SHA
import Crypto.PublicKey.RSA
import Crypto.Random
import Crypto.Signature.PKCS1_v1_5
def _to_string(val):
if isinstance(val, str):
return val
if isinstance(val, unicode):
return val.encode('utf8')
if val is None:
return ""
return str(val)
class RSAUtil(object):
"""
RSA 工具集
"""
_rsa_obj = getattr(Crypto.PublicKey.RSA, '_RSAobj')
@classmethod
def load_key(cls, str_key):
"""
加载公私钥
:param str_key:
:return:
"""
return Crypto.PublicKey.RSA.importKey(_to_string(str_key))
@classmethod
def _pretreat_key(cls, key):
if isinstance(key, (bytes, str, unicode)):
return cls.load_key(key)
return key
@classmethod
def pub_encrypt(cls, text, key):
"""
使用公钥加密数据
:param text:
:param key:
:return:
"""
key = cls._pretreat_key(key)
assert isinstance(key, cls._rsa_obj)
encrypt_size = (key.size() + 1) / 8 - 28
crypt_text = _to_string(text)
out_text = ""
pkcs_obj = Crypto.Cipher.PKCS1_v1_5.new(key)
while crypt_text:
tmp_text = crypt_text[:encrypt_size]
crypt_text = crypt_text[encrypt_size:]
out_text += pkcs_obj.encrypt(tmp_text)
pass
return base64.b64encode(out_text)
@classmethod
def pri_decrypt(cls, text, key):
"""
使用私钥解密数据
:param text:
:param key:
:return:
"""
key = cls._pretreat_key(key)
assert isinstance(key, cls._rsa_obj)
decrypt_size = (key.size() + 1) / 8
crypt_text = base64.b64decode(_to_string(text))
out_text = ""
pcks_obj = Crypto.Cipher.PKCS1_v1_5.new(key)
while crypt_text:
tmp_text = crypt_text[:decrypt_size]
crypt_text = crypt_text[decrypt_size:]
out_text += pcks_obj.decrypt(tmp_text, "")
return out_text
@classmethod
def calc_signature(cls, app_id, biz_params, pri_key):
"""
使用私钥进行数字签名
:param app_id:
:param biz_params:
:param pri_key:
:return:
"""
pri_key = cls._pretreat_key(pri_key)
signer = Crypto.Signature.PKCS1_v1_5.new(pri_key)
r = signer.sign(Crypto.Hash.SHA.new("%d %s" % (app_id, _to_string(biz_params))))
return base64.b64encode(r)
@classmethod
def verify_signature(cls, app_id, biz_params, sign, pub_key):
"""
使用公钥校验数字签名
:param app_id:
:param biz_params:
:param sign:
:param pub_key:
:return:
"""
pub_key = cls._pretreat_key(pub_key)
signer = Crypto.Signature.PKCS1_v1_5.new(pub_key)
return signer.verify(
Crypto.Hash.SHA.new("%d %s" % (app_id, _to_string(biz_params))),
base64.b64decode(sign)
)
@classmethod
def generate_key_pair(cls, size=None):
"""
:param size:
:return:
"""
random_generator = Crypto.Random.new().read
r = Crypto.PublicKey.RSA.generate(size or 1024, random_generator)
return r.publickey().exportKey(), r.exportKey()
pass
| plusplus1/rsademo | python/RSAUtil.py | RSAUtil.py | py | 3,459 | python | en | code | 0 | github-code | 13 |
285293555 | #https://leetcode.com/problems/reverse-only-letters/submissions/
class reveseCharacters(object):
def main(self):
print(self.reverseCharacters("a-bC-dEf-ghIj"))
def reverseCharacters(self, S):
S = list(S)
start = 0
end = len(S) - 1
while start <= end:
if S[start].isalpha() == False:
start += 1
continue
if S[end].isalpha() == False:
end -= 1
continue
S[start], S[end] = S[end], S[start]
start += 1
end -= 1
return ''.join(S)
if __name__=="__main__":
reveseCharacters().main()
| soniaarora/Algorithms-Practice | Solved in Python/LeetCode/String/reverseonlyCharacters.py | reverseonlyCharacters.py | py | 690 | python | en | code | 0 | github-code | 13 |
74792261778 | from django.shortcuts import render
from .models import Product, ProductImages
# get all products
def productlist(request):
productlist = Product.objects.all()
context = {'product_list' : productlist}
template = 'Product/product_list.html'
return render(request, template, context)
# get all the data of one product
def productdetail(request, product_slug):
productdetail = Product.objects.get(slug=product_slug)
productimages = ProductImages.objects.filter(product=productdetail)
context = {'product_detail' : productdetail, 'prodcut_images': productimages}
template = 'Product/product_detail.html'
return render(request, template, context)
| LawrenceDavy13/resaleshop | venv/src/product/views.py | views.py | py | 683 | python | en | code | 0 | github-code | 13 |
38661183060 | #!/usr/bin/python
import Adafruit_DHT
import datetime
import sqlite3
from sqlite3 import Error
def create_connection(db):
con = None
try:
con = sqlite3.connect(db)
except Error as e:
print(e)
return con
def create_table(con, create_sql):
try:
c = con.cursor()
c.execute(create_sql)
c.close()
except Error as e:
print(e)
def insert2db(con, val):
sql_insert = "INSERT INTO measurements(temp, hum, date) VALUES(?,?,?)"
cur = con.cursor()
cur.execute(sql_insert, val)
con.commit()
cur.close()
def return_table(con):
cur = con.cursor()
cur.execute("SELECT * from measurements")
rows = cur.fetchall()
for row in rows:
print(row)
cur.close()
conn = create_connection('pysqlite.db')
sql_create = "CREATE TABLE IF NOT EXISTS measurements (id INTEGER PRIMARY KEY AUTOINCREMENT, temp REAL, hum REAL, date TIMESTAMP);"
create_table(conn, sql_create)
sensor = Adafruit_DHT.DHT11
pin = 'P8_10'
temp = []
hum = []
read = True # zmienic na false zeby wyswietlic dane z tabeli
if read:
while True:
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
if humidity is not None and temperature is not None:
temp.append(temperature)
hum.append(humidity)
if len(temp) == 18:
temp.remove(max(temp)); temp.remove(min(temp))
t = sum(temp)/len(temp)
hum.remove(max(hum)); hum.remove(min(hum))
h = sum(hum)/len(hum)
d = datetime.datetime.now()
values = (t, h, d)
insert2db(conn, values)
temp.clear()
hum.clear()
else:
print('Failed to get reading. Try again!')
else:
return_table(conn)
| kamil271e/embedded-systems-lab | lab5/src/main.py | main.py | py | 1,830 | python | en | code | 0 | github-code | 13 |
28432568546 | import uuid
from datetime import datetime
from src.common.database import Database
class Installment(object):
def __init__(self, installment_num, intent_id, district, center, units_required, garment_type, uploaded_date,
deadline, total_wages, units_pm, user_id, units_received=None, units_assigned=None, eo=None,
garment_size=None, _id=None, set_id=None, material_received_date=None, cut_piece_units=None,
status=None, units_sanctioned=None):
self.intent_id = intent_id
self.installment_num = installment_num
self.district = district
self.center = center
self.garment_type = garment_type
self.garment_size = garment_size
self.set_id = set_id
if uploaded_date:
self.uploaded_date = (datetime.combine(datetime.strptime(uploaded_date, '%Y-%m-%d').date(),
datetime.now().time()))
else:
self.uploaded_date = uploaded_date
if material_received_date:
self.material_received_date = (datetime.combine(datetime.strptime(material_received_date, '%Y-%m-%d').date(),
datetime.now().time()))
else:
self.material_received_date = material_received_date
self.units_required = units_required
self.cut_piece_units = 0 if cut_piece_units is None else cut_piece_units
self.units_assigned = 0 if units_assigned is None else units_assigned
self.units_received = 0 if units_received is None else units_received
self.units_sanctioned = 0 if units_sanctioned is None else units_sanctioned
if deadline:
self.deadline = (datetime.combine(datetime.strptime(deadline, '%Y-%m-%d').date(),
datetime.now().time()))
else:
self.deadline = deadline
self.total_wages = total_wages
self.status = "Ongoing"
self.units_pm = units_pm
self.eo = eo
self.user_id = user_id
self._id = uuid.uuid4().hex if _id is None else _id
def save_to_mongo(self):
Database.insert(collection='installments', data=self.json())
@classmethod
def update_installments(cls, _id, district, center, received_date, garment_type, units_required,
deadline, total_wages, units_pm, set_id, eo, installment_num):
if deadline:
deadline = (datetime.combine(datetime.strptime(deadline, '%Y-%m-%d').date(),
datetime.now().time()))
else:
deadline = deadline
if received_date:
received_date = (datetime.combine(datetime.strptime(received_date, '%Y-%m-%d').date(),
datetime.now().time()))
else:
received_date = received_date
Database.update_installment(collection='installments', query={'_id': _id}, district=district,
installment_num=installment_num, center=center, garment_type=garment_type,
received_date=received_date, units_required=units_required,
deadline=deadline, total_wages=total_wages, units_pm=units_pm, set_id=set_id, eo=eo)
@classmethod
def update_received(cls, _id, cut_piece_units, material_received_date):
if material_received_date:
material_received_date = (datetime.combine(datetime.strptime(material_received_date, '%Y-%m-%d').date(),
datetime.now().time()))
else:
material_received_date = material_received_date
Database.update_received_units(collection='installments', query={'_id': _id}, cut_piece_units=cut_piece_units,
material_received_date=material_received_date)
@classmethod
def update_assigned(cls, _id, units_assigned, units_received):
Database.update_assigned_units(collection='installments', query={'_id': _id}, units_assigned=units_assigned,
units_received=units_received)
@classmethod
def update_status(cls, _id):
Database.update_status(collection='installments', query={'_id': _id})
@classmethod
def update_delivery(cls, _id, units_delivered):
Database.update_delivery(collection='installments', query={'_id': _id},
units_delivered=units_delivered)
@classmethod
def update_status_reverse(cls, _id):
Database.update_status_reverse(collection='installments', query={'_id': _id})
@classmethod
def update_installment_transaction_delete(cls, _id, units_assigned_new):
Database.update_installment_transaction_delete(collection='installments', query={'_id': _id},
units_assigned_new=units_assigned_new)
@classmethod
def delete_installments_indent_removed(cls, indent_id):
Database.installments_indent_delete(collection='installments', query={'intent_id': indent_id})
def json(self):
return {
'intent_id': self.intent_id,
'installment_num': self.installment_num,
'district': self.district,
'center': self.center,
'garment_type': self.garment_type,
'garment_size': self.garment_size,
'set_id': self.set_id,
'uploaded_date': self.uploaded_date,
'cut_piece_units': self.cut_piece_units,
'material_received_date': self.material_received_date,
'units_required': self.units_required,
'units_assigned': self.units_assigned,
'units_received': self.units_received,
'units_sanctioned': self.units_sanctioned,
'deadline': self.deadline,
'total_wages': self.total_wages,
'status': self.status,
'units_pm': self.units_pm,
'user_id': self.user_id,
'eo': self.eo,
'_id': self._id,
}
| karthigeyankalyan/CooperativeSocieties | src/models/installment.py | installment.py | py | 6,193 | python | en | code | 0 | github-code | 13 |
17332218981 | import re
from MarriageValidation import create_date, at_least_age
#############################################
#### US 12
#### Check that the parents aren't too old
#############################################
def old_parents_too_old(tags, ged_file):
for family in ged_file: #loop through ged file
if family['CHIL']:
get_husband = int(re.sub('\D', '', str(family['HUSB']))) - 1 #extract husband
get_wife = int(re.sub('\D', '', str(family['WIFE']))) - 1 #extract wife
for children in family['CHIL']: #for each child in family
get_child = int(re.sub('\D', '', children)) - 1 #extract child
get_age = tags[get_child]['AGE'] #get age
if int(tags[get_wife]['AGE']) - int(get_age) > 60: #check if moms age is older than 60
print('Mother is too old.')
return False
if int(tags[get_husband]['AGE']) - int(get_age) > 80: #check if father's age is older than 80
print('Father is too old')
return False
return True
# retains the date comparison logic from above, but receives input during file data read
def check_parent_age(parent_age, old_age):
if old_age > parent_age:
return False
return True
def parent_age_check(max_age, parent_birth, child_birth):
p_birth = create_date(parent_birth.split(' ', 2)[2], parent_birth.split(' ', 2)[1], parent_birth.split(' ', 2)[0])
c_birth = create_date(child_birth.split(' ', 2)[2], child_birth.split(' ', 2)[1], child_birth.split(' ', 2)[0])
if at_least_age(max_age, p_birth, c_birth) is True:
return False
return True
| EricLin24/SSW555-DriverlessCar | ParentsNotTooOld.py | ParentsNotTooOld.py | py | 1,784 | python | en | code | 0 | github-code | 13 |
9069122628 | import crawl as crawler
import crawl_from_files as crawler_files
import os
from os import path
import preprocessor as pre
import measure as mea
print('* '*10+'MENU'+'* '*10)
print('* 1.crawler from default directory *')
print('* 2.crawler from your website *')
print('* '*10+'* * '+'* '*10)
choose = int(input('what do you want to do ? '))
dir = ''
if(choose == 1):
crawler_files.run('topic')
elif(choose == 2):
result_list = []
url = input('input your url: ')
if('http://' not in url and 'https://' not in url ):
dir = url
url = 'http://' + url
else:
dir = url.split('/')[2] #get domain name to create dir
if(not path.exists(dir)):
os.mkdir(dir)
_path = dir+'/data.txt'
f = open(_path,'w',encoding='utf-8')
# create dir TF_IDF
if(not path.exists('TF_IDF')):
os.mkdir('TF_IDF')
# create dir bag of word
if(not path.exists('BOW')):
os.mkdir('BOW')
tf_file = open('TF_IDF/data.txt','w+')
bow_file = open('BOW/data.txt','w+')
f.write(crawler.crawl(url))
result_list.append(pre.preprocessor(_path))
print('done!')
continous = int(input("do you want to get some topic from your website? "))
if(continous):
ls_url = crawler.getUrl(url)
[print(str(i+1)+":"+ls_url[i]) for i in range(len(ls_url))]
check = 1
while check:
option = int(input("choose your option? "))
url_option = ''
temp=ls_url[option-1].split('/')[2:]
for string in temp:
url_option+=string
if('.html' in url_option):
url_option = url_option[:-4]
if(not path.exists(dir+'/'+url_option)):
os.mkdir(dir+'/'+url_option)
_path = dir+'/'+url_option+'/data.txt'
f = open(_path,'w',encoding='utf-8')
f.write(crawler.crawl(ls_url[option-1]))
result_list.append(pre.preprocessor(_path))
check=int(input('do you want to contious!'))
tf_idf = mea.TF_IDF(result_list)
[tf_file.write(i) for i in tf_idf] # run solve tf_idf
bow = mea.BoW(result_list)
bow_file.write(str(bow)) # run solve bow
mea.cosin(bow) # run solve cosin
else:
print('somthing went wrong !!!') | nvtuehcmus/datamining | main.py | main.py | py | 2,300 | python | en | code | 0 | github-code | 13 |
24724836441 | # The Weather app
# Write a console application which takes as an input a city name and returns current weather in the format of your choice.
# For the current task, you can choose any weather API or website or use openweathermap.org
import requests
KEY = "5fdd472908385dc9e4ee0706958a2a6e"
def get_weather(lat, lon):
api_call = f"https://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={KEY}"
request = requests.get(api_call).json()
weather = request["weather"][0]["description"]
temp = request["main"]["temp"]
temp_cels = round((int(temp) - 273.15), 2)
humidity = request["main"]["humidity"]
wind = request["wind"]["speed"]
city, country = request["name"], request["sys"]["country"]
print(f'{city}, {country}\n'
f'weather: {weather}\n'
f'temp C: {temp_cels}\n'
f'humidity: {humidity}\n'
f'wind: {wind}\n')
def coordinates_by_name(name, country="UA", limit=1):
api_call = f"http://api.openweathermap.org/geo/1.0/direct?q={name},{country}&limit={limit}&appid={KEY}"
request = requests.get(api_call).json()
lat = request[0]["lat"]
lon = request[0]["lon"]
return lat, lon
def main():
city = input('Enter <city>: ')
country = input('Enter <country code>: ')
try:
lat, lon = coordinates_by_name(city, country)
return get_weather(lat, lon)
except IndexError:
print(f'Not found\n')
return main()
if __name__ == '__main__':
while True:
main()
| alex-raspopov/python_group_01.11.2022 | Homework/Oleksandr Raspopov/lesson_36_http/les36_tsk3_weather.py | les36_tsk3_weather.py | py | 1,520 | python | en | code | null | github-code | 13 |
24846814594 | from random import Random
import numpy as np
import pandas as pd
def split_dataset(data, val_perc=0.2):
val_size = int(len(data) * val_perc)
TR, TS = data[:-val_size], data[-val_size:]
features = 1 if len(data.shape) == 1 else data.shape[-1]
return \
TR[:-1].reshape(-1, features), \
TS[:-1].reshape(-1, features), \
TR[1:].reshape(-1, features), \
TS[1:].reshape(-1, features)
def noisy_sin(rows=2000, val_perc=0.2):
data = np.array([[np.sin(y) + np.random.uniform(high=0.01)] for y in range(rows)])
return split_dataset(data, val_perc)
def cardano(val_perc=0.2):
df = pd.read_csv("./dataset/cardano_dataset.csv")
df.drop(['Date', 'Vol.'], axis=1, inplace=True)
df = df.iloc[::-1] # reverse
data = df.to_numpy()
return split_dataset(data, val_perc)
def mg17(val_perc=0.2):
with open(f'./dataset/MG17.csv') as file:
data = file.read().split('\n')[:-1][0]
data = np.array([float(r) for r in data.split(',')])
return split_dataset(data, val_perc)
def sincode(val_perc=0.2):
df = pd.read_csv("./dataset/sincode.csv")
df.drop(['timestamp', 'id', 'sub_id', 'numero_seriale'], axis=1, inplace=True)
df = df.iloc[::-1] # reverse
data = df.to_numpy()[:, :1]
return split_dataset(data, val_perc)
| GeremiaPompei/esn | src/dataloader/dataloader.py | dataloader.py | py | 1,320 | python | en | code | 0 | github-code | 13 |
69975976019 | """
const int ARRAY_SIZE = 10;
int intArray[ARRAY_SIZE] = {87, 28, 100, 78, 84, 98, 75, 70, 81, 68};
int start = 0;
int end = ARRAY_SIZE - 1;
for (int i = start + 1; i <= end; i++) {
for (int j = i; j > start && intArray[j-1] > intArray[j]; j--) {
int temp = intArray[j-1];
intArray[j-1] = intArray[j];
intArray[j] = temp;
}
}
An insertion sort is not the most efficient sort for most circumstances,
and to tell the truth, the previous code is not even the most efficient way to
perform an insertion sort. It is reasonably efficient for small to moderately
sized arrays, however, and it is simple enough that it can be memorized.
It’s not enough to have access to someone else’s sorting code
that you don’t fully understand.
"""
lst = [87, 28, 100, 78, 84, 98, 75, 70, 81, 68]
print(lst)
start = 1 # start from second element
end = len(lst)
# INSERTION SORT ALGORITHM
for i in range(start, end):
current = lst[i]
j = i - 1
# while index is in range (positive number) and previous element is greater than current,
# swap the current value down one position in the array
left = lst[j]
while j >= 0 and left > current:
lst[j + 1] = left
j -= 1
lst[j + 1] = current
print(lst)
# input [87, 28, 100, 78, 84, 98, 75, 70, 81, 68]
# output [28, 68, 70, 75, 78, 81, 84, 87, 98, 100]
"""
first round:
j = 0
element = 28
lst[0] = 87
lst[0] > element (87 > 28)? yes?
swaps 28 for 87
lower j by 1
did we reach the end on the left? yes?
if the order changed, set the last swapped value
if the order did not change, it does nothing visible (overwrite with the same value)
""" | chicocheco/tlp-python | insertion-sort.py | insertion-sort.py | py | 1,654 | python | en | code | 0 | github-code | 13 |
40980097668 | from django.urls import path
from .views import List_medico, Update_medico, Create_medico
url_patterns = [
path('', List_medico, name='list_medico'),
path('new', Create_medico, name='create_medico'),
path('Update', Update_medico, name='update_medico'),
]
#Crude de médicos | Pachequim/MedicalSys | MedicalSys/main/projeto/urls.py | urls.py | py | 288 | python | pt | code | 0 | github-code | 13 |
16914609932 | #!/usr/bin/python
import os
import argparse as ap
from mailman.interfaces.messages import IMessageStore
from github3 import login
def main():
description = """Turn a mailing list discussion into a Github issue"""
parser = ap.ArgumentParser(description=description)
disc = 'the url of the list discussion'
parser.add_argument('disc', help=disc)
repo = 'the url of the github repository'
parser.add_argument('repo', help=repo)
ghcred = 'the github credential file'
parser.add_argument('--ghcred', default=os.path.expanduser('~/.ghcred'),
help=ghcred)
parser.parse_args()
if __name__ == "__main__":
main()
| gidden/list2issue | list2issue.py | list2issue.py | py | 677 | python | en | code | 0 | github-code | 13 |
73685125777 | import sys, io
from PIL import Image
def convert_to_png(infile):
im = Image.open(infile)
xsize, ysize = im.size
size = xsize if xsize > ysize else ysize
im_res = Image.new('RGBA', (size, size), (255, 255, 255, 0))
im_res.paste(im, (int((size-xsize)/2), int((size-ysize)/2) ))
if xsize > ysize:
im_res = im.resize((512, int(ysize/xsize*512)))
else:
im_res = im.resize((int(xsize/ysize*512), 512))
print('all is cool')
out = io.BytesIO()
im_res.save(out, 'PNG')
out.seek(0)
return out
if __name__ == '__main__':
with open('photo1.jpg', 'rb') as fil1:
myfile = convert_to_png(fil1)
with open('photo2.png', 'wb') as fil2:
fil2.write(myfile.read())
| Graftiger/Lab4_XLA | ImageToSticker_converter/src/ToPng.py | ToPng.py | py | 755 | python | en | code | 0 | github-code | 13 |
6396271465 | '''
WAP to sort the given URLs based on their frequency.
When two or more URLs have same frequency count then print the lexicographically smaller URL first.
'''
from collections import Counter
from collections import OrderedDict
url_list = list(input("Enter URL: ").split())
frequency = Counter(url_list)
result = [item for items, c in Counter(sorted(url_list)).most_common() for item in [items] * c]
result = list(OrderedDict.fromkeys(result))
print("\n\nResult: "+str(result))
#www.google.com www.twitter.com www.google.com www.fb.com | miral25/SIMPLE-PYTHON-PROGRAM | PYTHON SIMPLE PROGRAM/45.py | 45.py | py | 562 | python | en | code | 0 | github-code | 13 |
38258474111 | # find best model
import numpy as np
import tensorflow as tf
import autokeras as ak
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train[:6000].reshape(-1, 28, 28, 1)/255.
x_test = x_test[:1000].reshape(-1, 28, 28, 1)/255.
y_train = y_train[:6000]
y_test = y_test[:1000]
model = ak.ImageClassifier(
overwrite=True,
max_trials=1,
loss = 'mse',
metrics = ['acc']
)
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
es = EarlyStopping(monitor = 'val_loss', patience = 4)
lr = ReduceLROnPlateau(patience = 2, factor = 0.5, verbose = 1)
filepath = 'C:/data/h5/autokeras/'
mc = ModelCheckpoint(filepath, save_best_only=True, verbose =1)
model.fit(x_train, y_train, epochs = 1, validation_split=0.2, callbacks=[es,lr,mc])
results = model.evaluate(x_test, y_test)
print(results)
model2 = model.export_model()
model2.save('C:/data/h5/autokeras/keras103.h5')
best_model = model.tuner.get_best_model()
best_model.save('C:/data/h5/autokeras/keras105.h5')
print('=========Done=========') | dongjaeseo/study | keras3/keras105_ak_best_model.py | keras105_ak_best_model.py | py | 1,107 | python | en | code | 2 | github-code | 13 |
30141748620 | from tkinter import * # Импорт библиотеки для создания графического интерфейса
import random # подключение модуля случайных чисел random
import Task_One
import Task_Two
import Task_Three
# Создание графического интерфейса
# Создаем окно
root = Tk()
root.title("Лабораторная работа 1")
root.geometry('720x480')
theLabel = Label(root, text="Лабораторная работа 1.")
theLabel.pack()
# Создание объекта класса
LOC = Task_One.LinkedList()
LOC2 = Task_One.LinkedList()
for i in range(20):
LOC.add(round(random.random(), 1))
textt = "ЛОС, составленный из случайных чисел:"
textt_Label = Label(root, text=textt)
LOC_Label = Label(root, text=LOC.__str__(), wraplength=700)
texxxt = "ЛОС без повторяющихся значений:"
texxxt_Label = Label(root, text=texxxt)
Rand_Label = Label(root)
# Создание объекта класса
Q = Task_Two.Queue()
Q2 = Task_Two.Queue()
for i in range(20):
Q.enqueue(round(random.randint(0, 100)))
Qt = "Очередь, составленная из случайных чисел:"
Qt_Label = Label(root, text=Qt)
Qtt_Label = Label(root, text=Q.items, wraplength=700)
Qttt = "Количество простых чисел, содержащихся в очереди:"
Qttt_Label = Label(root, text=Qttt)
Simple_Label = Label(root)
# Создание объекта класса
Stack = Task_Three.Stack()
St2 = Task_Three.Stack()
for i in range(20):
Stack.push(round(random.randint(0, 100)))
St = "Стек, составленный из случайных чисел:"
St_Label = Label(root, text=St)
Stt_Label = Label(root, text=Stack.items, wraplength=700)
Sttt = "Количество простых чисел, содержащихся в стеке:"
Sttt_Label = Label(root, text=Sttt)
Prime_Label = Label(root)
Entry_text = Label(root, text="Введите значение:")
message = StringVar()
EntryA = Entry(root, width=10, font='Arial 14', textvariable=message) # создаем текстовое поле ввода
Entry_btn = Button(root, text="Добавить значение")
Res_btn = Button(root, text="Завершить ввод")
Res_Label = Label(root)
Ress_Label = Label(root)
def Add_One():
if message.get() != '':
r = float(message.get())
LOC2.add(r)
EntryA.delete(0, END)
def Add_Two():
if message.get() != '':
Q2.enqueue(int(message.get()))
EntryA.delete(0, END)
def Add_Three():
if message.get() != '':
r = int(message.get())
St2.push(r)
EntryA.delete(0, END)
def Res_One():
Res_Label.config(text=LOC2.__str__())
Res_Label.place(x=20, y=370)
Ress_Label.config(text=Task_One.LinkedList.RemoveDuplicates(LOC2))
Ress_Label.place(x=20, y=400)
def Res_Two():
Res_Label.config(text=Q2.items)
Res_Label.place(x=20, y=370)
Ress_Label.config(text=Task_Two.Queue.isPrime(Q2))
Ress_Label.place(x=20, y=400)
def Res_Three():
Res_Label.config(text=St2.items)
Res_Label.place(x=20, y=370)
Ress_Label.config(text=Task_Three.Stack.isPrime(St2))
Ress_Label.place(x=20, y=400)
# Метод обработки нажатия на кнопку
def button_clicked():
taskOneButton.pack_forget()
taskTwoButton.pack_forget()
taskThreeButton.pack_forget()
# Функция реализации задания 1
def task_one():
button_clicked()
taskLabel1.pack()
backButton.pack()
textt_Label.place(x=20, y=160)
LOC_Label.place(x=20, y=180)
texxxt_Label.place(x=20, y=200)
Rand_Label['text'] = Task_One.LinkedList.RemoveDuplicates(LOC)
Rand_Label.place(x=20, y=220)
Entry_text.place(x=20, y=260)
EntryA.place(x=20, y=280)
Entry_btn['command'] = Add_One
Entry_btn.place(x=20, y=310)
Res_btn['command'] = Res_One
Res_btn.place(x=20, y=340)
# Функция реализации задания 2
def task_two():
button_clicked()
taskLabel2.pack()
backButton.pack()
Qt_Label.place(x=20, y=160)
Qtt_Label.place(x=20, y=180)
Qttt_Label.place(x=20, y=200)
Simple_Label['text'] = Task_Two.Queue.isPrime(Q)
Simple_Label.place(x=20, y=220)
Entry_text.place(x=20, y=260)
EntryA.place(x=20, y=280)
Entry_btn['command'] = Add_Two
Entry_btn.place(x=20, y=310)
Res_btn['command'] = Res_Two
Res_btn.place(x=20, y=340)
# Функция реализации задания 3
def task_three():
button_clicked()
taskLabel3.pack()
backButton.pack()
St_Label.place(x=20, y=160)
Stt_Label.place(x=20, y=180)
Sttt_Label.place(x=20, y=200)
Prime_Label['text'] = Task_Three.Stack.isPrime(Stack)
Prime_Label.place(x=20, y=220)
Entry_text.place(x=20, y=260)
EntryA.place(x=20, y=280)
Entry_btn['command'] = Add_Three
Entry_btn.place(x=20, y=310)
Res_btn['command'] = Res_Three
Res_btn.place(x=20, y=340)
# Метод обработки кнопки "Назад"
def button_back():
taskOneButton.pack()
taskTwoButton.pack()
taskThreeButton.pack()
taskLabel1.pack_forget()
taskLabel2.pack_forget()
taskLabel3.pack_forget()
backButton.pack_forget()
EntryA.place_forget()
Entry_text.place_forget()
Entry_btn.place_forget()
Res_btn.place_forget()
Res_Label.place_forget()
Ress_Label.place_forget()
textt_Label.place_forget()
LOC_Label.place_forget()
texxxt_Label.place_forget()
Rand_Label.place_forget()
Qt_Label.place_forget()
Qtt_Label.place_forget()
Qttt_Label.place_forget()
Simple_Label.place_forget()
St_Label.place_forget()
Stt_Label.place_forget()
Sttt_Label.place_forget()
Prime_Label.place_forget()
taskOneButton = Button(root, text="Задание 1", command=task_one, fg='red')
taskTwoButton = Button(root, text="Задание 2", command=task_two, fg='blue')
taskThreeButton = Button(root, text="Задание 3", command=task_three, fg='green')
taskLabel1 = Label(root, wraplength=700,
text="Сформировать ЛОС, элементами которого являются вещественные числа (среди которых есть "
"повторяющиеся значения). Составить программу, которая по списку строит новый список, "
"в котором отсутствуют повторяющиеся значения.")
taskLabel2 = Label(root, wraplength=700,
text="Составить программу построения очереди, содержащей целые числа. Вычислить количество "
"простых чисел, содержащихся в очереди.")
taskLabel3 = Label(root, wraplength=700,
text="Сформируйте исходный стек, элементами которого являются целые числа. Составить программу, "
"которая находит количество простых чисел в стеке.")
taskOneButton.pack()
taskTwoButton.pack()
taskThreeButton.pack()
backButton = Button(root, text='Назад', command=button_back)
t1oL = Label(root)
# Задержка окна
root.mainloop()
| xtrdnrmnd/python_practice_2 | Main.py | Main.py | py | 7,561 | python | ru | code | 0 | github-code | 13 |
29746689844 | from datetime import datetime
from . import lib
class CenteredTextWidget(lib.CenterWidgetMixin, lib.TextWidget):
def __init__(self, **kwargs):
super(CenteredTextWidget, self).__init__(**kwargs)
class TimeWidget(CenteredTextWidget):
@property
def text(self):
return datetime.now().strftime("%H:%M:%S")
class IconValueWidget(lib.CompositeWidget):
def __init__(self, icon, value, icon_font, text_font, draw,
icon_offset=None, text_offset=None, spacing=0.25, **kwargs):
icon = lib.TextWidget(font=icon_font, text=icon, draw=draw)
if icon_offset is not None:
icon.add_offset(*icon_offset)
text = lib.TextWidget(font=text_font, text=str(value), draw=draw)
if text_offset is not None:
text.add_offset(*text_offset)
text.add_offset(spacing * icon.width, 0)
widgets = [icon, text]
super(IconValueWidget, self).__init__(
widgets=widgets,
reflow=lib.CompositeWidget.reflow_horizontal,
draw=draw,
**kwargs
)
| insertjokehere/ntpbox-display | ntpbox_display/widgets.py | widgets.py | py | 1,096 | python | en | code | 0 | github-code | 13 |
9616803935 | # %%
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import scipy
from scipy import ndimage
from sklearn.decomposition import PCA
from augumentation import image_augumentation
from parts import imshow, number_of_layer
# 中間層をある固定倍率でゆらす
# class shake(nn.Module):
# def __init__(self, prob):
# super().__init__(self, prob)
# def forward(self):
# return
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(31415926535)
transform_withoutrotate = transforms.Compose([transforms.ToTensor()])
batch_size = 4
dataset = "cifar10"
if dataset == "mnist":
transform = transforms.Compose(
[
transforms.ToTensor(),
# transforms.RandomRotation(degrees=(-20, 20)),
]
)
trainset = torchvision.datasets.MNIST(
root="./data", train=True, download=True, transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=True, num_workers=0
)
testset = torchvision.datasets.MNIST(
root="./data", train=False, download=True, transform=transform
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=0
)
elif dataset == "cifar10":
transform = transforms.Compose(
[
transforms.ToTensor(),
# transforms.RandomRotation(degrees=(-20, 20)),
]
)
trainset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=True, transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=batch_size, shuffle=True, num_workers=2
)
testset = torchvision.datasets.CIFAR10(
root="./data", train=False, download=True, transform=transform
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=2
)
def initialize_weights(m):
if isinstance(m, nn.Conv2d):
nn.init.constant_(m.weight.data, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight.data, 0.01)
nn.init.constant_(m.bias.data, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.weight.data, 0.01)
nn.init.constant_(m.bias.data, 0)
for change in range(2):
for shake in range(0, 13):
accu = []
class Net(nn.Module):
def __init__(self, dataset):
super().__init__()
if dataset == "cifar10":
first_dim = 3
first_padding = 5
elif dataset == "mnist":
first_dim = 1
first_padding = 5
self.conv1 = nn.Conv2d(first_dim, 6, first_padding)
self.pool = nn.MaxPool2d(2, 2)
self.bn1 = nn.BatchNorm2d(6)
self.conv2 = nn.Conv2d(6, 16, 5)
self.bn2 = nn.BatchNorm2d(16)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x, epoch, end=None, testflag=False):
# (6,32,32) -> (6,28,28) -> (6,14,14) -> (16,10,10) -> (16,5,5) -> (120) -> (84) -> (10)
layer = 0
x = self.conv1(x)
# 逆微分はどのようにして行われる?おそらくここに入れるだけでは逆微分が計算できなくなってしまう可能性あり
# 逆微分がうまくいっているのかどうか確かめたいが、どのようにして確かめる?
if layer == shake and epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = F.relu(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.bn1(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.pool(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.conv2(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = F.relu(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.bn2(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.pool(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = torch.flatten(x, 1) # flatten all dimensions except batch
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.fc1(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = F.relu(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.fc2(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = F.relu(x)
if layer == shake and testflag == False:
if change == 0 or epoch < 20:
rand = torch.rand(x.shape) + 0.5
x = x * rand
if end == layer:
return x
layer += 1
x = self.fc3(x)
return x
net = Net(dataset)
net.to(device)
# net.apply(initialize_weights)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# with torch.no_grad():
# for dim in range(0, 13):
# number = number_of_layer(dim)
# if number == -1:
# image_augumentation(net, 4, dim, number, "rotate", dataset)
# else:
# for i in range(0, number):
# image_augumentation(net, 4, dim, i, "rotate", dataset)
# number = number_of_layer(0)
# for i in range(0,10):
# for j in range(0,number):
# image_augumentation(net,i,0,j,"rotate",dataset)
for epoch in range(30): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
# imshow(torchvision.utils.make_grid(inputs))
optimizer.zero_grad()
outputs = net(inputs, epoch)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
# print("[%d, %5d] loss: %.3f" %
# (epoch + 1, i + 1, running_loss / 2000))
# running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(
device), data[1].to(device)
outputs = net(images, epoch, testflag=True)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
# print(
# "Accuracy of the network on the 10000 test images: %f %%"
# % (100 * correct / total)
# )
accu.append((100 * correct / total))
# with torch.no_grad():
# for dim in range(0, 13):
# number = number_of_layer(dim)
# if number == -1:
# image_augumentation(net, 4, dim, number, "rotate", dataset)
# else:
# for i in range(0, number):
# image_augumentation(net, 4, dim, i, "rotate", dataset)
# number = number_of_layer(0)
# for i in range(0,10):
# for j in range(0,number):
# image_augumentation(net,i,0,j,"rotate",dataset)
# print("Finished Training")
print(f"accu_{shake} = {accu}")
PATH = "./CIFARtrainset_" + dataset + \
" + dataset = torchvision.datasets." + dataset + "_net.pth"
torch.save(net.state_dict(), PATH)
# %%
| teruko9126/ML-ubuntu | augumentation/main.py | main.py | py | 11,213 | python | en | code | 0 | github-code | 13 |
20774679041 | class LR:
def __init__(self, g):
self.g = g
def goto(self, s, symbol):
# s = [(S0, .S), (S, .aA)]
if (symbol not in self.g.E) and (symbol not in self.g.N) and (symbol != self.g.S):
raise ValueError("Symbol " + symbol + " is neither a terminal nor a nonterminal")
result = []
for item in s:
point_pos = item[1].find('.')
if symbol in item[1] and point_pos != len(item[1]) - 1 and point_pos != -1:
if item[1][point_pos + 1] == symbol:
cur_item = list(item[1])
cur_item.remove('.')
cur_item.insert(point_pos + 1, '.')
result.append((item[0], ''.join(cur_item)))
return result
def closure(self, l):
processed_nonterminals = []
for item in l:
point_pos = item[1].find('.')
if point_pos != -1 and point_pos != len(item[1]) - 1:
symbol = item[1][point_pos + 1]
if symbol in self.g.N and symbol not in processed_nonterminals:
prods = self.g.prod_for_nonterminal(symbol)
for i in prods:
l.append((symbol, '.' + (i[0].replace(' ', ''))))
processed_nonterminals.append(symbol)
return l
def canonical_collection(self):
set_of_states = []
s0 = self.closure([('T', '.' + self.g.S)])
self.g.enrich_grammar()
set_of_states.append(s0)
for s in set_of_states:
for p in s:
for i in range(len(p[1])):
if p[1][i] == '.' and i != len(p[1]) - 1:
new_s = self.closure(self.goto(s, p[1][i+1]))
if new_s not in set_of_states:
set_of_states.append(new_s)
return set_of_states
def print_canonical_collection(self, can_col):
for i in range(len(can_col)):
print("s" + str(i) + ":" + str(can_col[i]))
| alexandra-murariu/FLCD | lab2+scanner/parser/LR.py | LR.py | py | 2,031 | python | en | code | 0 | github-code | 13 |
34607272555 | import logging
import random
from django.http import HttpResponse
from testapp.models import EagleTails
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.INFO,
filename="log/testapp.log",
filemode="a",
format="%(levelname)s %(message)s",
)
def testapp(request):
logger.info("Used index")
return HttpResponse("Seminar2 page")
def eagle_or_tails(request):
logger.info("Used eagle_or_tails")
n = request.GET.get("n", "5")
res = random.choice(["Орёл", "Решка"])
res_w = EagleTails(res=res)
res_w.save()
data = EagleTails.statistic_eagle_or_tails(n)
return HttpResponse(f"Последние пять значений: {data.items()}")
| e6ton1set/specialization | django/seminars/project1/testapp/views.py | views.py | py | 718 | python | en | code | 0 | github-code | 13 |
1333634784 | from urllib.parse import urljoin
from django.conf import settings
from django.urls import reverse
from templated_email import send_templated_mail
def send_battle_result(battle):
battle_detail_path = reverse("battles:battle-detail", args=(battle.pk,))
battle_details_url = urljoin(settings.HOST, battle_detail_path)
send_templated_mail(
template_name="battle_result",
from_email=settings.EMAIL_ADDRESS,
recipient_list=[battle.creator.email, battle.opponent.email],
context={
"battle_creator": battle.creator.email.split("@")[0],
"battle_opponent": battle.opponent.email.split("@")[0],
"battle_winner": battle.winner.email.split("@")[0],
"battle_id": battle.id,
"creator_team": battle.creator.teams.filter(battle=battle.id).first(),
"opponent_team": battle.opponent.teams.filter(battle=battle.id).first(),
"battle_details_url": battle_details_url,
},
)
def send_opponent_battle_invitation_email(battle):
select_battle_team_path = reverse("battles:select-team", args=(battle.pk,))
select_battle_team_url = urljoin(settings.HOST, select_battle_team_path)
send_templated_mail(
template_name="battle_invite",
from_email=settings.EMAIL_ADDRESS,
recipient_list=[battle.opponent.email],
context={
"battle_id": battle.id,
"battle_creator": battle.creator.email.split("@")[0],
"battle_opponent": battle.opponent.email.split("@")[0],
"select_battle_team_url": select_battle_team_url,
},
)
def send_user_invite_to_pokebattle(user_invited_email, user_invitee_email):
signup_path = reverse("signup")
signup_url = urljoin(settings.HOST, signup_path)
send_templated_mail(
template_name="new_user_invite",
from_email=settings.EMAIL_ADDRESS,
recipient_list=[user_invited_email],
context={
"user_who_invited": user_invitee_email.split("@")[0],
"user_invited": user_invited_email.split("@")[0],
"signup_url": signup_url,
},
)
| gabrielaleal/pokebattle | backend/battles/utils/email.py | email.py | py | 2,149 | python | en | code | 1 | github-code | 13 |
1838378502 | import collections
import functools
import numbers
import os
def coroutine(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
generator = function(*args, **kwargs)
next(generator)
return generator
return wrapper
@coroutine
def sender(receiver=None, maximum=None):
somma=0
lista=list()
while True:
number=(yield )
if maximum is not None and receiver is not None:
if isinstance(number,numbers.Number):
somma+=number
lista.append(number)
if somma>= maximum:
receiver.send(lista)
somma=0
lista.clear()
else:
receiver.close()
break
@coroutine
def writer(NameFile=None):
while True:
lista=(yield)
if isinstance(lista,collections.abc.Collection):
if NameFile is not None:
fp=open(NameFile,"a")
for element in lista:
fp.write(str(element)+" ")
fp.write("\n")
fp.close()
else:
break
def main():
s = sender(writer("file"), 100.5)
for i in range(3, 10):
for x in range(1, 30, i):
try:
s.send(x)
except StopIteration:
print("Il sender {} non accetta piu` richieste perche' e` stato inviato un oggetto non numerico".format(s))
print()
o = open("file", 'r')
print("Questo e` il contenuto del file:")
for line in o:
print(line)
#---------------------------------------
for i in range(6, 10):
for x in range(7, 60, i):
try:
s.send(x)
except StopIteration:
print("Il sender {} non accetta piu` richieste perche' e` stato inviato un oggetto non numerico".format(s))
print()
o = open("file", 'r')
print("Questo e` il contenuto del file:")
for line in o:
print(line)
for i in range(6, 8):
for x in range(7, 15, i):
try:
s.send(x)
except StopIteration:
print("Il sender {} non accetta piu` richieste perche' e` stato inviato un oggetto non numerico".format(s))
print()
try:
s.send('pop')
except StopIteration:
print("Il sender {} non accetta piu` richieste perche' e` stato inviato un oggetto non numerico".format(s))
print()
o = open("file", 'r')
print("Questo e` il contenuto del file:")
for line in o:
print(line)
s.close()
os.remove("file")
if __name__ == "__main__":
main()
"""
Ricordatevi di cancellare il file ogni volta che eseguite il programma!
Il programma deve stampare:
Questo e` il contenuto del file:
1 4 7 10 13 16 19 22 25
28 1 5 9 13 17 21 25
29 1 6 11 16 21 26
1 7 13 19 25 1 8 15 22
29 1 9 17 25 1 10 19
Questo e` il contenuto del file:
1 4 7 10 13 16 19 22 25
28 1 5 9 13 17 21 25
29 1 6 11 16 21 26
1 7 13 19 25 1 8 15 22
29 1 9 17 25 1 10 19
28 7 13 19 25 31
37 43 49
55 7 14 21 28
35 42 49
56 7 15 23
31 39 47
55 7 16 25
34 43 52
Il sender {} non accetta piu` richieste perche' e` stato inviato un oggetto non numerico
Questo e` il contenuto del file:
1 4 7 10 13 16 19 22 25
28 1 5 9 13 17 21 25
29 1 6 11 16 21 26
1 7 13 19 25 1 8 15 22
29 1 9 17 25 1 10 19
28 7 13 19 25 31
37 43 49
55 7 14 21 28
35 42 49
56 7 15 23
31 39 47
55 7 16 25
34 43 52
"""
| DanyR2001/Codice-Percorso-Universitario | Terzo anno/Programmazione Avanzata/Primi esercizi/ripasso/Esercizio 6-12-2022/Es1.py | Es1.py | py | 3,581 | python | it | code | 0 | github-code | 13 |
35012222378 | import os
import csv
import sqlite3
import pandas as pd
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtSql import QSqlDatabase, QSqlQuery, QSqlTableModel
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
################################################
# MAIN WINDOW AND NON-FUNCTIONAL WIDGETS #
################################################
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 600)
MainWindow.setMinimumSize(QtCore.QSize(800, 600))
MainWindow.setMaximumSize(QtCore.QSize(800, 600))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
MainWindow.setCentralWidget(self.centralwidget)
self.nameLabel = QtWidgets.QLabel(self.centralwidget)
self.nameLabel.setGeometry(QtCore.QRect(30, 10, 191, 71))
font = QtGui.QFont()
font.setFamily("Maiandra GD")
font.setPointSize(26)
self.nameLabel.setFont(font)
self.nameLabel.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel.setObjectName("nameLabel")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(420, 30, 20, 41))
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(620, 30, 20, 41))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
###################################
# BUTTONS #
###################################
self.openButton = QtWidgets.QPushButton(self.centralwidget)
self.openButton.setGeometry(QtCore.QRect(250, 30, 161, 41))
self.openButton.setObjectName("openButton")
self.openButton.clicked.connect(self.open_file_dialog)
self.saveButton = QtWidgets.QPushButton(self.centralwidget)
self.saveButton.setGeometry(QtCore.QRect(450, 30, 161, 41))
self.saveButton.setObjectName("saveButton")
self.saveButton.clicked.connect(self.save_file_dialog)
self.inputLine = QtWidgets.QLineEdit(self.centralwidget)
self.inputLine.setGeometry(QtCore.QRect(10, 100, 581, 41))
self.inputLine.setObjectName("inputLine")
self.filterButton = QtWidgets.QPushButton(self.centralwidget)
self.filterButton.setGeometry(QtCore.QRect(600, 100, 91, 41))
self.filterButton.setObjectName("filterButton")
self.filterButton.clicked.connect(self.filter_table)
self.exitButton = QtWidgets.QPushButton(self.centralwidget)
self.exitButton.setGeometry(QtCore.QRect(650, 30, 131, 41))
self.exitButton.setObjectName("exitButton")
self.exitButton.clicked.connect(MainWindow.close)
self.queryButton = QtWidgets.QPushButton(self.centralwidget)
self.queryButton.setEnabled(True)
self.queryButton.setGeometry(QtCore.QRect(600, 100, 91, 41))
self.queryButton.setObjectName("queryButton")
self.queryButton.setVisible(False)
self.queryButton.clicked.connect(self.execute_query_from_input_line)
self.databaseTableComboBox = QtWidgets.QComboBox(self.centralwidget)
self.databaseTableComboBox.setGeometry(QtCore.QRect(10, 100, 131, 41))
self.databaseTableComboBox.setObjectName("databaseTableComboBox")
self.databaseTableComboBox.setVisible(False)
self.databaseTableComboBox.currentIndexChanged.connect(self.select_table)
self.resetButton = QtWidgets.QPushButton(self.centralwidget)
self.resetButton.setEnabled(True)
self.resetButton.setGeometry(QtCore.QRect(700, 100, 91, 41))
self.resetButton.setObjectName("resetButton")
self.resetButton.clicked.connect(self.reset_table)
###################################
# TABLE VIEW #
###################################
self.tableView = QtWidgets.QTableView(self.centralwidget)
self.tableView.setGeometry(QtCore.QRect(10, 150, 781, 391))
self.tableView.setObjectName("tableView")
###################################
# MENU BAR #
###################################
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 21))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.importAction = QtWidgets.QAction(MainWindow)
self.importAction.setObjectName("importAction")
self.importAction.triggered.connect(self.open_file_dialog)
self.saveMenuAction = QtWidgets.QAction(MainWindow)
self.saveMenuAction.setObjectName("saveMenuAction")
self.saveMenuAction.triggered.connect(self.save_file_dialog)
self.exitMenuAction = QtWidgets.QAction(MainWindow)
self.exitMenuAction.setObjectName("exitMenuAction")
self.exitMenuAction.triggered.connect(MainWindow.close)
self.menu.addAction(self.importAction)
self.menu.addSeparator()
self.menu.addAction(self.saveMenuAction)
self.menu.addAction(self.exitMenuAction)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
###################################
# DATABASE #
###################################
self.db = None
###################################
# MODELS #
###################################
# Models are used to manage the data and the data is displayed using the TableView widget
#
# model is default set to None since there are two models that
# can be used in the program. (QStandardItemModel and QSqlTableModel)
# QSortFilterProxyModel allows for filtered data to be edited while maintaining the original model using a proxy model
# The proxy model can be filtered and the data edited in the filtered table is reflected on the original model without
# having to rewrite the whole model.
self.model = None
self.filter_proxy_model = QtCore.QSortFilterProxyModel()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "KLM Editor"))
self.openButton.setText(_translate("MainWindow", "Open"))
self.saveButton.setText(_translate("MainWindow", "Save"))
self.filterButton.setText(_translate("MainWindow", "Filter"))
self.exitButton.setText(_translate("MainWindow", "Exit"))
self.queryButton.setText(_translate("MainWindow", "Query"))
self.resetButton.setText(_translate("MainWindow", "Reset"))
self.nameLabel.setText(_translate("MainWindow", "KLM Editor"))
self.menu.setTitle(_translate("MainWindow", "File"))
self.importAction.setText(_translate("MainWindow", "Import"))
self.saveMenuAction.setText(_translate("MainWindow", "Save"))
self.exitMenuAction.setText(_translate("MainWindow", "Quit"))
################################################
# FILE DIALOGS #
################################################
def open_file_dialog(self):
filename = QFileDialog.getOpenFileName(filter="CSV File (*.csv);;Database File (*.db)")
split_filename = os.path.splitext(filename[0])
file_name = split_filename[0]
file_extension = split_filename[1]
if filename:
if file_extension == ".csv":
with open(file_name + file_extension, newline='') as f:
# Disable database query related UI elements
self.filterButton.setVisible(True)
self.queryButton.setVisible(False)
self.databaseTableComboBox.setVisible(False)
self.inputLine.setGeometry(10, 100, 581, 41)
self.inputLine.clear()
# Close old connection and reset UI if connection exists
if self.db != None:
self.db.close()
self.reset_combobox()
# Load data from csv file into a list to be added to the model
reader = csv.reader(f)
data = list(reader)
data_header = list(data[0]) # for the header row
data.pop(0)
# Creation of the base model and proxy model
self.model = QtGui.QStandardItemModel(len(data) - 1, len(data[0]))
self.model.setHorizontalHeaderLabels(data_header)
self.fill_model_from_data(data)
self.filter_proxy_model.setSourceModel(self.model)
# Model is displayed using the proxy model
self.tableView.setModel(self.filter_proxy_model)
self.reset_table()
elif file_extension == ".db":
# Disable csv related UI elements
self.filterButton.setVisible(False)
self.queryButton.setVisible(True)
self.databaseTableComboBox.setVisible(True)
self.inputLine.setGeometry(150, 100, 441, 41)
self.inputLine.clear()
# Close old connection and reset UI if connection exists
if self.db != None:
self.db.close()
self.reset_combobox()
self.db = QSqlDatabase.addDatabase("QSQLITE")
self.db.setDatabaseName(file_name + file_extension)
if self.db.open():
self.model = QSqlTableModel()
# First table in database is open and displayed by default
statement = "SELECT * FROM " + self.db.tables()[0]
query = QSqlQuery(statement)
self.populate_combobox(self.db)
self.model.setQuery(query)
self.tableView.setModel(self.model)
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Connection to database failed.")
msg.setInformativeText("Please try again.")
msg.setWindowTitle("Error")
msg.exec_()
def save_file_dialog(self):
if self.model != None:
filename = QFileDialog.getSaveFileName(filter="CSV File (*.csv);;Database File (*.db)")
split_filename = os.path.splitext(filename[0])
file_name = split_filename[0]
file_extension = split_filename[1]
if file_extension == ".csv":
self.export_model_to_csv(file_name, file_extension)
elif file_extension == ".db":
self.export_model_to_db(file_name, file_extension)
# Prevent user from saving if no data is loaded
else:
msg = QMessageBox()
msg.setIcon(QMessageBox.Critical)
msg.setText("Cannot save to file when no file is loaded.")
msg.setInformativeText("Please open file and try again.")
msg.setWindowTitle("Error")
msg.exec_()
def fill_model_from_data(self, data):
for x in range(0, len(data)):
for y in range(0, len(data[x])):
item = QtGui.QStandardItem(data[x][y])
self.model.setItem(x, y, item)
################################################
# EXPORT #
################################################
def export_model_to_csv(self, file_name, file_extension):
if type(self.model) == type(QtGui.QStandardItemModel(0,0)):
data = []
row = []
row_count = self.model.rowCount()
column_count = self.model.columnCount()
# Appending header row of the table
for x in range(0, column_count):
row.append(self.model.horizontalHeaderItem(x).text())
data.append(row)
row = []
# Appending rest of the data rows in the table
for x in range(0, row_count):
for y in range(0, column_count):
row.append(self.model.item(x,y).text())
data.append(row)
row = []
# Writing the nested list of data to a .csv spreadsheet
with open(file_name + file_extension, 'w', newline='', encoding='utf-8') as f:
wr = csv.writer(f)
wr.writerows(data)
elif type(self.model) == type(QSqlTableModel()):
# The query shown in the Table View will be lost during the process of saving
# Saving the original query will allow for the table to keep its original state after saving
original_query = self.model.query()
# Each table will be exported to its own csv file
for table_num in range(0, len(self.db.tables())):
data = []
row = []
row_count = self.model.rowCount()
column_count = self.model.columnCount()
table_name = self.db.tables()[table_num]
query = QSqlQuery("SELECT * FROM " + self.db.tables()[table_num])
self.model.setQuery(query)
# Header
for x in range(0, column_count):
row.append(self.model.headerData(x, QtCore.Qt.Horizontal))
data.append(row)
row = []
# Data
for x in range(0, row_count):
for y in range(0, column_count):
row.append(self.model.record(x).value(y))
data.append(row)
row = []
# Saving current table to .csv
with open(file_name + "_" + table_name + file_extension, 'w', newline='', encoding='utf-8') as f:
wr = csv.writer(f)
wr.writerows(data)
# Display the query that was shown before saving
self.model.setQuery(original_query)
def export_model_to_db(self, file_name, file_extension):
# Similar to export_model_to_csv in retrieving information
# Used both sqlite3 and pandas modules to export because sqlite3 makes it easier
# to write to a database file
connection = sqlite3.connect(file_name + file_extension)
if type(self.model) == type(QtGui.QStandardItemModel(0,0)):
headers = []
for x in range(0, self.model.columnCount()):
headers.append(self.model.horizontalHeaderItem(x).text())
data = []
row = []
for x in range(0, self.model.rowCount()):
for y in range(0, self.model.columnCount()):
row.append(self.model.item(x,y).text())
data.append(row)
row = []
df = pd.DataFrame(data, columns = headers)
df.to_sql(QtCore.QFileInfo(file_name).fileName(), connection, if_exists='replace', index=False)
elif type(self.model) == type(QSqlTableModel()):
for t in range(0, len(self.db.tables())):
headers = []
data = []
row = []
query = QSqlQuery("SELECT * FROM " + self.db.tables()[t])
self.model.setQuery(query)
for x in range(0, self.model.columnCount()):
headers.append(self.model.headerData(x, QtCore.Qt.Horizontal))
for x in range(0, self.model.rowCount()):
for y in range(0, self.model.columnCount()):
row.append(self.model.record(x).value(y))
data.append(row)
row = []
df = pd.DataFrame(data, columns = headers)
df.to_sql(self.db.tables()[t], connection, if_exists='replace', index=False)
connection.close()
################################################
# UI MANIPULATION #
################################################
def populate_combobox(self, db):
if type(self.model) == type(QSqlTableModel()):
for x in range(0, len(db.tables())):
self.databaseTableComboBox.addItem(db.tables()[x])
def reset_combobox(self):
self.databaseTableComboBox.clear()
def select_table(self):
current_index = self.databaseTableComboBox.currentIndex()
if type(self.model) == type(QSqlTableModel()):
# Default index of a QComboBox widget is -1
# Set conditional so that it's not automatically called when filled
if self.databaseTableComboBox.currentIndex() != -1:
statement = "SELECT * FROM " + self.db.tables()[current_index]
self.model.setTable(self.db.tables()[current_index])
self.execute_query_from_statement(statement)
def reset_table(self):
self.inputLine.clear()
if type(self.model) == type(QtGui.QStandardItemModel(0,0)):
self.filter_proxy_model.setFilterKeyColumn(-1) # -1 selects all columns
self.filter_proxy_model.setFilterRegExp("") # Empty reg expression resets model
elif type(self.model) == type(QSqlTableModel()):
self.select_table()
def filter_table(self):
if type(self.model) == type(QtGui.QStandardItemModel(0,0)):
# Split into: column name and filter value
split_filter = self.inputLine.text().split('=')
header_column = self.get_header_column(split_filter[0])
if header_column != None:
self.filter_proxy_model.setFilterKeyColumn(header_column)
self.filter_proxy_model.setFilterRegExp(split_filter[1])
def get_header_column(self, header_label):
# Returns index of column specified in the filter
for x in range(0, self.model.columnCount()):
if header_label == self.model.horizontalHeaderItem(x).text():
return x
# User may input a column name that doesn't exist
return None
################################################
# QUERY FUNCTIONS #
################################################
def execute_query_from_input_line(self):
if type(self.model) == type(QSqlTableModel()):
# Executes query from input line
query = QSqlQuery(self.inputLine.text())
self.model.setQuery(query)
def execute_query_from_statement(self, statement):
if type(self.model) == type(QSqlTableModel()):
query = QSqlQuery(statement)
self.model.setQuery(query) | JGelotin/klm-editor | klmeditor/MainWindow.py | MainWindow.py | py | 19,752 | python | en | code | 1 | github-code | 13 |
26739942345 | import pandas as pd
import numpy as np
import os
import xlsxwriter
from Retrieve_emails import *
import shutil
from datetime import datetime
import statistics
import matplotlib
import matplotlib.pyplot as plt
from io import BytesIO
import sys
year = str(input("Enter year:"))
# year = '2020'
path = os.path.abspath(os.path.dirname(__file__))
try:
shutil.rmtree(path + '\\data')
except:
pass
os.mkdir(path + '\\data\\')
retrieve_emails(path, year)
df = pd.read_csv(path + "\\data\\" + "outlook_emails_" + year + ".csv")
df = df[~df["Full path"].str.contains('Spam')].reset_index(drop= True)
df = df[~df["Full path"].str.contains('spam')].reset_index(drop= True)
def build_new_id(x):
if x['ID'] is np.nan or x['ID'] == 'nan' or x['ID'] == 'NaN':
return(x['conversationID'])
else: return(x['ID'])
df['ID'] = df['ID'].apply(str)
df['new_ID'] = df.apply(lambda row: build_new_id(row), axis = 1)
# Number of queries
num_distinct_queries = df['ID'].drop_duplicates().count()
# + df[df.ID.isna()]['conversationID'].drop_duplicates().count()
# Number of media
num_media = df[df.is_media == True]['new_ID'].drop_duplicates().count()
# Number of final replies
num_final_replies = len(df[df.is_final_reply == True]) + num_media
# Number of direct replies
num_direct_replies = len(df.loc[(df.is_final_reply == True) & (df.is_direct_reply == True)])
# Number of queries from CRM
num_crm = len(df.loc[(df.is_final_reply == True) & (df.is_crm == True)]['ID'].drop_duplicates())
# Number of follow-ups
followups = df[df.is_final_reply == True].groupby(['Recipients', 'ID']).size().reset_index(name='counts')
num_followups = (followups[followups.counts > 1]['counts']-1).sum()
df['time_format'] = df.apply(lambda row: datetime.strptime(row['Received Time'], '%Y/%m/%d %H:%M:%S'), axis=1)
# Compute average time of answer
new_df = df[~df["Full path"].str.contains('Additional')]
new_df = new_df[~new_df["Full path"].str.contains('Media')]
new_df = new_df.groupby(['new_ID'])['time_format'].agg([max, min])
def weekend_excl(x):
delta_diff = x['max'] - x['min']
delta_we = np.busday_count(x['min'].date(), x['max'].date())
if int(delta_diff.days) > delta_we:
delta_diff = delta_diff - pd.Timedelta(str(delta_diff.days) + ' days') \
+ pd.Timedelta(str(delta_we) + ' days')
return(delta_diff)
new_df['delta'] = new_df.apply(lambda x: weekend_excl(x), axis = 1)
answer_time = new_df['delta']
answer_time = answer_time[(answer_time < pd.Timedelta("30 days"))]
answer_time = str(np.mean(answer_time[(answer_time > pd.Timedelta("0 days"))]))[:9] + ' hours'
# Compute average time to answer media
new_df = df[df.is_media == True].groupby(['new_ID'])['time_format'].agg([max, min])
new_df['delta'] = new_df.apply(lambda x: weekend_excl(x), axis = 1)
answer_time_media = new_df['delta']
all_media = answer_time_media.apply(lambda x: str(x))
answer_time_media = answer_time_media[(answer_time_media < pd.Timedelta("15 days"))]
media_stat = answer_time_media[(answer_time_media > pd.Timedelta("0 days"))].describe()
media_stat = media_stat.apply(lambda x: str(x))[1:]
plot_media = (answer_time_media[(answer_time_media > pd.Timedelta("0 days"))] / \
pd.Timedelta(hours=1))
plot_media = plt.hist(x=plot_media, bins=range(0, 60, 2), color='#0504aa',
alpha=0.7, rwidth=0.85)
plot_media = pd.DataFrame({'Hours': plot_media[1][:-1], \
'Number of queries': plot_media[0]}, \
columns = list(['Hours', 'Number of queries']))
plot_media = plot_media.set_index(plot_media.columns[0])
# answer_time_media = str(
# statistics.median(answer_time_media[(answer_time_media > pd.Timedelta("0 days"))])
# )[:9] + ' hours'
# answer_time_media = str(
# np.mean(answer_time_media[(answer_time_media > pd.Timedelta("0 days"))])
# )[:9] + ' hours'
# answer_time_media.to_csv('checkmedia.csv')
d = {'N. of emails (internal exchanges included)': len(df),
'N. of distinct queries (queries without ID not counted)': num_distinct_queries,
'N. of queries from CRM': num_crm,
'N. of follow-ups': num_followups,
'N. of final replies (follow-ups included)': num_final_replies,
'N. of direct replies to user (without help of BA)': num_direct_replies,
'Relative n. of direct replies': str(round(100*num_direct_replies/num_final_replies,2)) + ' %',
'N. of media queries': num_media,
'Average time needed to close a query': answer_time}
general_stat = pd.DataFrame(d, index = [year]).transpose()
# Add BA
BAs = pd.DataFrame(list(df['Full path'].apply(lambda x: x.split("\\")[-1:])),\
columns = list(['BA']))
df = pd.concat([df, BAs], axis = 1)
# Stat on BAs
final_replies_BA = df[df.is_final_reply == True].groupby(['BA']).size()
final_replies_BA_help = df[df.is_media == False][df.is_final_reply == True][df.is_direct_reply == False]\
.groupby(['BA']).size()
final_replies_BA_direct = df[df.is_final_reply == True][df.is_direct_reply == True]\
.groupby(['BA']).size()
# Compute average time of answer
new_df = df[df.is_direct_reply == False].groupby(['BA','new_ID'])['time_format'].agg([max, min])
new_df[1] = new_df.apply(lambda x: weekend_excl(x), axis = 1)
answer_time = new_df[1]
answer_time = answer_time[(answer_time < pd.Timedelta("40 days"))]
answer_time = answer_time[(answer_time > pd.Timedelta("0 days"))]
answer_time = answer_time.groupby(level=0).agg(np.mean)
final_replies_BA_help = pd.concat([final_replies_BA_help, answer_time], axis=1, join="inner")
final_replies_BA_help[1] = final_replies_BA_help[1].apply(lambda x: str(x)[:9] + ' hours')
final_replies_BA_help = final_replies_BA_help.rename(columns={0: "Number of queries", 1: "Avg time needed"})
final_replies_BA_direct = pd.DataFrame(final_replies_BA_direct, columns = list(['Direct replies']))
# Number of queries over time per BA
count_BA = df[df.is_final_reply == True][['BA', 'months']]\
.groupby(['BA', 'months']).size()\
.reset_index()
count_BA.columns = list(['BA', 'month', 'counts'])
count_BA = count_BA.pivot_table(index=['BA'],columns='month', values='counts')
# Direct replies over time
DR_over_time = pd.DataFrame(df[df.is_direct_reply == True][df.is_final_reply == True]\
.groupby(['months']).size())
DR_over_time.columns = list(['N. replies'])
# list of dataframes and sheet names
dfs = [general_stat,
plot_media, all_media,
final_replies_BA_help,
final_replies_BA_direct,
count_BA,
DR_over_time]
sheets = ['General Statistics', 'Media Statistics',
'All media queries', 'Queries per BA', \
'Queries answered directly', 'Queries BA per month',
'Direct replies over time']
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
# run function to write in differrent sheets in Excel
def dfs_tabs(df_list, sheet_list, file_name):
writer = pd.ExcelWriter(file_name,engine='xlsxwriter')
for dataframe, sheet in zip(df_list, sheet_list):
dataframe.to_excel(writer, sheet_name=sheet, startrow=0 , startcol=0)
writer.save()
dfs_tabs(dfs, sheets, desktop + '\\report_statistics_' + year + '.xlsx')
print("Annual report " + year + ' built. You can find it in your desktop!')
input('Click Enter to close')
shutil.rmtree(path + '\\data')
| andreac0/Annual-Report-Statistics | reporting_stats.py | reporting_stats.py | py | 7,373 | python | en | code | 0 | github-code | 13 |
15171613582 | from datetime import date, timedelta, datetime, time
from sqlalchemy.sql.expression import text
from sqlalchemy import create_engine
import os
from util import config, logger, only_allow_one_instance
triggers_sql = text("""
SELECT
id,
from_host_trigger,
sys_log_tag_trigger,
message_trigger
FROM
`trigger`
WHERE
status = 'DELETE'
""")
def triggers(con):
"""Yield all DELETE triggers."""
result = con.execute(triggers_sql)
for row in result:
yield row
result.close()
clean_system_events_sql = text("""
DELETE
FROM
SystemEvents
WHERE
SystemEvents.FromHost LIKE :from_host AND
SystemEvents.SysLogTag LIKE :sys_log_tag AND
SystemEvents.Message LIKE :message AND
SystemEvents.DeviceReportedTime BETWEEN :yesterday AND :today
""")
def clean_system_events(from_host, sys_log_tag, message):
"""Delete SystemEvents matching regexp args."""
result = con.execute(
clean_system_events_sql,
from_host=from_host, sys_log_tag=sys_log_tag, message=message,
yesterday=datetime.combine(date.today() - timedelta(1), time(0, 0)),
today=datetime.combine(date.today(), time(23, 59))
)
num = result.rowcount
result.close()
return num
update_trigger_with_delete_stats_sql = text("""
UPDATE `trigger`
SET
total_deleted=total_deleted+:matched,
deleted_since_changed=deleted_since_changed+:matched,
last_delete=NOW()
WHERE
id = :id
""")
def update_trigger_with_delete_stats(id, num_of_rows):
"""Trigger table with delete statistics"""
result = con.execute(
update_trigger_with_delete_stats_sql, id=id, matched=num_of_rows
)
result.close()
def clean_mysql(con):
"""Remove SystemEvents matching regexps in trigger table"""
total = 0
for id, from_host, sys_log_tag, message in triggers(con):
num_of_rows = clean_system_events(from_host, sys_log_tag, message)
update_trigger_with_delete_stats(id, num_of_rows)
total += num_of_rows
logger('Deleted %s SystemEvents matched by triggers.' % total)
#
# Main
#
if __name__ == "__main__":
only_allow_one_instance('signer_trigger_clean.pid')
cnf = config('signer.cfg', os.path.dirname(os.path.abspath(__file__)))
engine = create_engine(
cnf.DATABASE,
convert_unicode=True, pool_size=50, pool_recycle=3600
)
con = engine.connect()
clean_mysql(con)
con.close() | systemconsole/syco-signer | signer/signer_trigger_delete.py | signer_trigger_delete.py | py | 2,438 | python | en | code | 0 | github-code | 13 |
21570779167 | """Show the homepage."""
import os
import uuid
import copy
import flask
import equations
from equations.data import rooms_info, user_info, MapsLock
from equations.models import Game
@equations.app.route("/favicon.ico")
def show_favicon():
"""Deliver the favicon asset."""
return flask.send_from_directory(os.path.join(
equations.app.root_path, 'static', 'images'), 'favicon.ico')
@equations.app.route("/", methods=['GET'])
def show_index():
"""Show homepage."""
context = {
"logged_in": False,
"username": '',
"gamerooms": [],
}
if "username" in flask.session:
context['logged_in'] = True
context['username'] = flask.session['username']
# Generate a list of the rooms a user is currently in.
MapsLock()
if context['username'] in user_info:
gamerooms = list(user_info[context['username']]["latest_socketids"].keys())
print(f"Rooms user {context['username']} is currently in: ", gamerooms)
for gameroom in gamerooms:
if gameroom in rooms_info and rooms_info[gameroom]["game_started"] \
and not rooms_info[gameroom]["game_finished"] \
and context['username'] in rooms_info[gameroom]['players']:
context["gamerooms"].append(gameroom)
return flask.render_template("index.html", **context)
def generate_gameid():
# Generate a unique game id
game_nonce = None
while game_nonce is None:
# Warning/Notice: Only 36^4 (about 1.68 million) unique game
# nonces under this scheme
proposed_nonce = str(uuid.uuid4()).replace('-', '')[:4].upper()
conflicting_games = Game.query.filter_by(nonce=proposed_nonce).all()
if len(conflicting_games) == 0:
game_nonce = proposed_nonce
assert game_nonce not in rooms_info
return game_nonce
@equations.app.route("/create/", methods=['POST'])
def create_game():
"""Create a new game."""
if 'username' not in flask.session:
flask.flash("Please log in before creating a game.")
return flask.redirect(flask.url_for('show_index'))
name = flask.session['username']
game_nonce = generate_gameid()
# TODO: These if-elif statements are kind of ugly, but not sure how else to
# encode info from the forms
gametype = None
division = None
if flask.request.form['create'] == "Create Equations Game":
gametype = 'eq'
elif flask.request.form['create'] == "Without Restrictions":
gametype = 'os'
division = 'Basic'
elif flask.request.form['create'] == "With Restrictions":
gametype = 'os'
else:
flask.flash("Tried to create an invalid type of game (type must be Equations or On-Sets)")
return flask.redirect(flask.url_for('show_index'))
# Commit the game to the database
print(f"ADDING GAME {game_nonce} TO THE DATABASE")
new_game = Game(nonce=game_nonce, gametype=gametype, division=division, ended=False, players=[name])
equations.db.session.add(new_game)
equations.db.session.commit()
return flask.redirect(flask.url_for('show_game', nonce=game_nonce))
@equations.app.route("/join/", methods=['POST'])
def join_game():
"""Join an existing game."""
if 'username' not in flask.session:
flask.flash("Please log in before joining a game.")
return flask.redirect(flask.url_for('show_index'))
name = flask.session['username']
room = flask.request.form['room'].upper()
game_info = Game.query.filter_by(nonce=room).first()
if game_info is None:
flask.flash(f"A room with the ID you entered ({room}) does not exist!")
return flask.redirect(flask.url_for('show_index'))
# Perform checks to see if it is possible to join as a player
if flask.request.form['join'] == "Join as Player":
if name not in game_info.players:
if game_info.tournament is not None:
flask.flash(f"The room you tried to join ({room}) is part of a tournament, "
" so you cannot join as player!")
return flask.redirect(flask.url_for('show_index'))
game_started = len(game_info.cube_index) > 0
if game_started or game_info.ended or len(game_info.players) >= 3:
flask.flash(f"You cannot join as a player in that room ({room}) "
"because either the game has started, the game has ended, "
"or there are already 3 players in it.")
return flask.redirect(flask.url_for('show_index'))
# At this point, it is possible to join as a player.
# Add this user as a player in this game, and commit to the database.
player_list = copy.deepcopy(game_info.players)
player_list.append(name)
game_info.players = player_list
equations.db.session.commit()
return flask.redirect(flask.url_for('show_game', nonce=room))
@equations.app.route("/game/<nonce>/", methods=['GET'])
def show_game(nonce):
"""Show the game with nonce nonce."""
if 'username' not in flask.session:
flask.flash("Please log in before joining a game.")
return flask.redirect(flask.url_for('show_index'))
name = flask.session['username']
room = nonce
# Ensure that the game exists
game_info = Game.query.filter_by(nonce=room).first()
if game_info is None:
flask.flash(f"The Room you tried to visit (ID of {room}) does not exist!")
return flask.redirect(flask.url_for('show_index'))
context = {
"nonce": nonce,
"name": name,
"division": game_info.division if game_info.division else '',
}
if game_info.gametype is None or game_info.gametype == 'eq':
return flask.render_template("game.html", **context)
if game_info.gametype == 'os':
return flask.render_template("game_onsets.html", **context)
flask.flash("Tried to visit a game of invalid type (type must be eq or os)")
return flask.redirect(flask.url_for('show_index'))
| tonyb7/equations | equations/views/index.py | index.py | py | 6,150 | python | en | code | 0 | github-code | 13 |
41910143735 | #!/usr/bin/python3
#
# Part of RedELK
#
# Authors:
# - Outflank B.V. / Mark Bergman (@xychix)
# - Lorenzo Bernardi (@fastlorenzo)
#
from modules.helpers import *
from config import interval, alarms
from iocsources import ioc_vt as vt
from iocsources import ioc_ibm as ibm
from iocsources import ioc_hybridanalysis as ha
import traceback
import logging
info = {
'version': 0.1,
'name': 'Test file hash against public sources',
'alarmmsg': 'MD5 HASH SEEN ONLINE',
'description': 'This check queries public sources given a list of md5 hashes.',
'type': 'redelk_alarm', # Could also contain redelk_enrich if it was an enrichment module
'submodule': 'alarm_filehash'
}
class Module():
def __init__(self):
self.logger = logging.getLogger(info['submodule'])
pass
def run(self):
ret = initial_alarm_result
ret['info'] = info
ret['fields'] = ['@timestamp', 'host.name', 'user.name', 'ioc.type', 'file.name', 'file.hash.md5', 'c2.message', 'alarm.alarm_filehash']
ret['groupby'] = ['file.hash.md5']
try:
report = self.alarm_check()
ret['hits']['hits'] = report['hits']
ret['mutations'] = report['mutations']
ret['hits']['total'] = len(report['hits'])
except Exception as e:
stackTrace = traceback.format_exc()
ret['error'] = stackTrace
self.logger.exception(e)
pass
self.logger.info('finished running module. result: %s hits' % ret['hits']['total'])
return(ret)
def alarm_check(self):
# This check queries public sources given a list of md5 hashes. If a hash was seen we set an alarm
q = 'c2.log.type:ioc AND NOT tags:alarm_filehash AND ioc.type:file'
alarmed_md5_q = {
"aggs": {
"interval_filter": {
"filter": {
"range": {
"alarm.last_checked": {
"gte":"now-%ds" % interval,
"lt":"now"
}
}
},
"aggs": {
"md5_interval": {
"terms": {
"field": "file.hash.md5"
}
}
}
},
"alarmed_filter": {
"filter": {
"terms": {
"tags": ["alarm_filehash"]
}
},
"aggs": {
"md5_alarmed": {
"terms": {
"field": "file.hash.md5"
}
}
}
}
}
}
report = {}
iocs = []
self.logger.debug('Running query %s' % q)
# FIRST WE GET ALL IOC's
i = countQuery(q, index='rtops-*')
if i >= 10000:
i = 10000
iocs = getQuery(q, i, index='rtops-*')
if type(iocs) != type([]):
iocs = []
self.logger.debug('found ioc: %s' % iocs)
# Then we get an aggregation of all md5 alarmed within the last 'interval' time
self.logger.debug('Running query %s' % alarmed_md5_q)
omd5 = rawSearch(alarmed_md5_q, index='rtops-*')
self.logger.debug(omd5['aggregations'])
already_checked = []
already_alarmed = []
# add md5 hashes that have been checked within the 'interval' in 'already_checked'
for h in omd5['aggregations']['interval_filter']['md5_interval']['buckets']:
already_checked.append(h['key'])
# add md5 hashes that have been alarmed previously in 'already_alarmed'
for h in omd5['aggregations']['alarmed_filter']['md5_alarmed']['buckets']:
already_alarmed.append(h['key'])
md5d = {}
md5s = []
md5ShouldCheck = {}
ival = timedelta(seconds=interval)
last_checked_max = (datetime.utcnow() - ival)
# Group all hits per md5 hash value
for ioc in iocs:
h = getValue('_source.file.hash.md5', ioc)
if h in md5d:
md5d[h].append(ioc)
else:
md5d[h] = [ioc]
should_check = True
# Check if the IOC has already been alarmed
if h in already_alarmed:
# Skip it
should_check = False
# Set the last checked date
addAlarmData(ioc, {}, info['submodule'], False)
# Tag the doc as alarmed
setTags(info['submodule'], [ioc])
# Check if the IOC has already been checked within 'interval'
if h in already_checked:
# Skip if for now
should_check = False
if h in md5ShouldCheck:
md5ShouldCheck[h] = should_check & md5ShouldCheck[h]
else:
md5ShouldCheck[h] = should_check
# self.logger.debug('Should check: %s' % md5ShouldCheck[h])
for hash in dict.copy(md5d):
# If we should not check the hash, remove it from the list
if hash in md5ShouldCheck and md5ShouldCheck[hash] == False:
self.logger.debug('[%s] md5 hash already checked within interval or already alarmed previously, skipping' % hash)
del md5d[hash]
# Create an array with all md5 hashes to send to the different providers
# we now have an aray with unique md5's to go test
for hash in md5d:
md5s.append(hash)
self.logger.debug('md5 hashes to check: %s' % md5s)
reportI = {}
# ioc VirusTotal
self.logger.debug('Checking IOC against VirusTotal')
t = vt.VT(alarms[info['submodule']]['vt_api_key'])
t.test(md5s)
reportI['VirusTotal'] = t.report
self.logger.debug('Results from VirusTotal: %s' % t.report)
# ioc IBM x-force
self.logger.debug('Checking IOC against IBM X-Force')
i = ibm.IBM(alarms[info['submodule']]['ibm_basic_auth'])
i.test(md5s)
reportI['IBM X-Force'] = i.report
# ioc Hybrid Analysis
self.logger.debug('Checking IOC against Hybrid Analysis')
h = ha.HA(alarms[info['submodule']]['ha_api_key'])
h.test(md5s)
reportI['Hybrid Analysis'] = h.report
# Will store mutations per hash (temporarily)
alarmedHashes = {}
# Loop through the engines
for engine in reportI.keys():
# Loop through the hashes results
for hash in reportI[engine].keys():
if type(reportI[engine][hash]) == type({}):
if reportI[engine][hash]['result'] == 'newAlarm':
# If hash was already alarmed by an engine
if hash in alarmedHashes:
alarmedHashes[hash][engine] = reportI[engine][hash]
else:
alarmedHashes[hash] = {
engine: reportI[engine][hash]
}
# Prepare the object to be returned
report = {
'mutations': {},
'hits': []
}
# Loop through all hashes
for hash in md5d:
# Loop through all related ES docs
for ioc in md5d[hash]:
# Hash has been found in one of the engines and should be alarmed
if hash in alarmedHashes.keys():
report['mutations'][ioc['_id']] = alarmedHashes[hash]
report['hits'].append(ioc)
# Hash was not found so we update the last_checked date
else:
self.logger.debug('md5 hash not alarmed, updating last_checked date: [%s]' % hash)
addAlarmData(ioc, {}, info['submodule'], False)
return(report)
| qx-775/redelk | elkserver/docker/redelk-base/redelkinstalldata/scripts/modules/alarm_filehash/module.py | module.py | py | 8,144 | python | en | code | 1 | github-code | 13 |
14322784088 | # Standard libs
import datetime, os, sqlite3
class Database():
"""
Class representing the database and its methods to interact with it.
"""
def __init__(self):
# Checking if the DB already exists; if not, create the schema
if not os.path.exists("data/boomerang.sqlite3"):
self.create_db()
self.base = sqlite3.connect("data/boomerang.sqlite3")
self.base.row_factory = sqlite3.Row # having column names! cf https://stackoverflow.com/a/18788347
self.cursor = self.base.cursor()
def create_db(self):
"""
Function creating the DB if it does not exist
"""
base = sqlite3.connect("data/boomerang.sqlite3")
cursor = base.cursor()
cursor.execute('CREATE TABLE "people" ("id" INTEGER NOT NULL UNIQUE, "name" TEXT, "timezone" TEXT, PRIMARY KEY("id"))')
cursor.execute('CREATE TABLE "reminders" ("id" INTEGER NOT NULL UNIQUE, "author" INTEGER NOT NULL, "date_creation" INTEGER NOT NULL, "date_next" INTEGER NOT NULL, "recurrence" TEXT, "recurrence_limit" INTEGER, "text" TEXT, "color" INTEGER, PRIMARY KEY("id" AUTOINCREMENT))')
cursor.execute('CREATE INDEX "i_people_id" ON "people" ("id" ASC)')
cursor.execute('CREATE INDEX "i_reminders_id" ON "reminders" ("id" ASC)')
base.commit()
base.close()
def select_reminder(self, id):
"""
Returns a specific reminder.
Args:
id (int): Reminder ID
Returns:
dict: Reminder
"""
self.cursor.execute("SELECT * FROM reminders WHERE id=?", (id,))
r = self.cursor.fetchone()
if r is None:
raise IndexError("Reminder does not exist in database")
else:
return r
def count_reminders(self):
"""
Counts reminders.
Returns:
int: Reminder count
"""
self.cursor.execute("SELECT COUNT(*) AS len FROM reminders")
return self.cursor.fetchone()["len"]
def select_reminders_user(self, author):
"""
Returns all the reminders belonging to someone.
Args:
author (discord.User): User requesting the select
Returns:
list: All reminders belonging to someone. Can be empty.
"""
self.cursor.execute("SELECT * FROM reminders WHERE author=?", (author.id,))
return self.cursor.fetchall()
def select_user_timezone(self, author):
"""
Returns the timezone of someone.
Args:
author (discord.User): User requesting the select
Returns:
str: IANA timezone
"""
self.cursor.execute("SELECT * FROM people WHERE id=?", (author.id,))
try:
return self.cursor.fetchone()["timezone"]
except TypeError:
raise IndexError("User does not exist in database")
def select_reminders_now(self):
"""
Returns all the reminders that must be fired now.
Returns:
list: List of reminders. Can be empty.
"""
now = datetime.datetime.now(datetime.timezone.utc).replace(second=59, microsecond=0)
self.cursor.execute("SELECT * FROM reminders WHERE date_next < ?", (int(datetime.datetime.timestamp(now)),))
reminders = []
for reminder in self.cursor.fetchall():
reminders.append({
"id": reminder["id"],
"author": reminder["author"],
"date_creation": reminder["date_creation"],
"text": reminder["text"],
"color": reminder["color"]
})
return reminders
def insert_reminder(self, author, date_creation, date_next, text, color, recurrence=None, recurrence_limit=None):
"""
Insert a new reminder in database
Args:
author (discord.User): Reminder author
date_creation (datetime): When the reminder was created
date_next (datetime): When is the next occurrence of the reminder
text (str): Text of the reminder
color (Discord Color object): Color associated with the reminder
recurrence (str, optional): Expression ("4d", "3h"...) telling how often the reminder should be fired. If None, no recurrence.
recurrence_limit (int, optional): For recurrence-enabled events: how many times the reminder should be fired. If None, no limit. (Not implemented yet)
"""
self.cursor.execute("INSERT INTO reminders (author, date_creation, date_next, recurrence, recurrence_limit, text, color) VALUES (?, ?, ?, ?, ?, ?, ?)", (author.id, datetime.datetime.timestamp(date_creation), datetime.datetime.timestamp(date_next), recurrence, recurrence_limit, text, color))
self.base.commit()
def update_reminder_recurrence(self, id, date_next, recurrence_limit):
"""
Updates a reminder to set its next occurrence.
Args:
id (int): Reminder ID
date_next (datetime.datetime): Datetime object of the next reminder datetime
recurrence_limit (int): How many times should the reminder be fired again. (Not implemented yet)
"""
self.cursor.execute("UPDATE reminders SET date_next=?, recurrence_limit=? WHERE id=?", (int(date_next.timestamp()), recurrence_limit, id))
self.base.commit()
def update_user_timezone(self, author, tz):
"""
Updates the timezone of someone in the database.
If user does not exist, creates it as well
Args:
author (Discord User/Member object): Requestor
tz (pytz timezone): pytz timezone
"""
self.cursor.execute("SELECT * FROM people WHERE id=?", (author.id,))
if self.cursor.fetchone() is not None:
self.cursor.execute("UPDATE people SET name=?, timezone=? WHERE id=?", ((author.name+'#'+str(author.discriminator)), str(tz), author.id))
else:
self.cursor.execute("INSERT INTO people VALUES (?, ?, ?)", (author.id, author.name+'#'+str(author.discriminator), str(tz)))
self.base.commit()
def delete_reminder(self, id):
"""
Delete a reminder from the database.
Args:
id (int): Reminder ID
"""
self.cursor.execute("DELETE FROM reminders WHERE id=?", (id,))
self.base.commit()
| Ailothaen/boomerang | models.py | models.py | py | 6,420 | python | en | code | 1 | github-code | 13 |
16867454231 | import socket
import sys
# Create a UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = ('localhost', 10000)
message = 'This is the message. It will be repeated.'
try:
# Send data
print >>sys.stderr, 'sending "%s"' % message
sent = sock.sendto(message, server_address)
# Receive response
print >>sys.stderr, 'waiting to receive'
data, server = sock.recvfrom(4096)
print >>sys.stderr, 'received "%s"' % data
finally:
print >>sys.stderr, 'closing socket'
sock.close()
| utra-robosoccer/soccer-embedded | Development/Ethernet/f7ethtut/chuck_data.py | chuck_data.py | py | 541 | python | en | code | 32 | github-code | 13 |
40787810982 | vals = {}
for line in open('Day 05.input'):
x0, y0, x1, y1 = [int(c) for p in line.split(' -> ') for c in p.split(',')]
if x0 == x1:
for i in range(min(y0, y1), max(y0, y1)+1):
vals[x0, i] = vals.get((x0, i), 0) + 1
elif y0 == y1:
for i in range(min(x0, x1), max(x0, x1)+1):
vals[i, y0] = vals.get((i, y0), 0) + 1
print(sum(x > 1 for x in vals.values()))
| Mraedis/AoC2021 | Day 05/Day 05.1.py | Day 05.1.py | py | 408 | python | en | code | 1 | github-code | 13 |
42257091778 | import trainer, Course, student, assigment, Student_per_course, Trainer_per_course, Assignments_per_course, Assignments_per_student
trainer_list = trainer.Trainer_records()
assigment_list = assigment.assigment_record()
course_list = Course.Course_records()
student_list = student.Student_records()
# course menu!
def course_menu():
while True:
print("Course Menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
course_list.print_course_records()
elif choice==2:
title = input("Enter a title:")
stream = input("Enter a stream:")
type = input("Enter a type:")
start_date = input("Enter start date")
end_date = input("Enter end date:")
course_list.add_course_record(title, stream, type, start_date, end_date)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
# trainer menu!
def trainer_menu():
while True:
print("Trainer Menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
trainer_list.print_trainer_records()
elif choice==2:
first_name = input("Enter a first name:")
last_name = input("Enter a last name:")
subject = input("Enter a subject:")
trainer_list.add_trainer_record(first_name,last_name,subject)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
# student menu!
def student_menu():
while True:
print("Student Menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
student_list.print_student_records()
elif choice==2:
first_name = input("Enter a first name:")
last_name = input("Enter a last name:")
date_of_birth = input("Enter a date of birth:")
tuition_fees = input("Enter tuition fees:")
student_list.add_student_record(first_name, last_name, date_of_birth, tuition_fees)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
# assigment menu!
def assigment_menu():
while True:
print("Assigment Menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
assigment_list.print_assigment_records()
elif choice==2:
title = input("Enter a title:")
description = input("Enter a description")
sub_date = input("Enter the submitted date:")
oral_mark = input("Enter the oral mark:")
total_mark = input("Enter the total mark:")
assigment_list.add_assigment_record(title, description, sub_date, oral_mark, total_mark)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
#Student_per_cource_menu
student_per_course_list = []
def student_per_course_menu():
while True:
print("Student per course menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
for item in student_per_course_list:
print(f"The course with title:{item.title},stream: {item.stream} and type: {item.type} has this students:\n")
item.print_students()
elif choice==2:
new_course = Student_per_course.Course.add_course()
choice2 = input("Do you like to add student per course? 'y'/'n'")
while choice2=='Y' or choice2=='y':
new_student = Student_per_course.Course.add_Student()
new_course.add_student(new_student)
choice2 = input("Do you like to add student per course? 'y'/'n'")
student_per_course_list.append(new_course)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
#trainer per course menu
trainer_per_course_list = []
def trainer_per_course_menu():
while True:
print("Trainer per course menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
for item in trainer_per_course_list:
print(f"The course with title:{item.title},stream: {item.stream} and type: {item.type} has this trainers:\n")
item.print_trainer()
elif choice==2:
new_course = Trainer_per_course.Course.add_course()
choice2 = input("Do you like to add trainer per course? 'y'/'n'")
while choice2=='Y' or choice2=='y':
new_trainer = Trainer_per_course.Course.add_Trainer()
new_course.add_trainer(new_trainer)
choice2 = input("Do you like to add trainer per course? 'y'/'n'")
trainer_per_course_list.append(new_course)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
#assignments per course menu
assignments_per_course_list = []
def assignments_per_course_menu():
while True:
print("Assignments per course menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
for item in assignments_per_course_list:
print(f"The course with title:{item.title},stream: {item.stream} and type: {item.type} has this assignments:\n")
item.print_assignments()
elif choice==2:
new_course = Assignments_per_course.Course.add_course()
choice2 = input("Do you like to add trainer per course? 'y'/'n'")
while choice2=='Y' or choice2=='y':
new_assignment = Assignments_per_course.Course.add_Assignments()
new_course.add_assignments(new_assignment)
choice2 = input("Do you like to add trainer per course? 'y'/'n'")
assignments_per_course_list.append(new_course)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
#assignments per student menu
assignments_per_student_list = []
def assignments_per_student_menu():
while True:
print("Assignments per student menu")
print("1.Get")
print("2.Set")
print("3.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
for item in assignments_per_student_list:
print(f"The student:{item.first_name} {item.last_name} has this assignments:\n")
item.print_assignments()
elif choice==2:
new_student = Assignments_per_student.Students.add_student()
choice2 = input("Do you like to add assignment per student? 'y'/'n'")
while choice2=='Y' or choice2=='y':
new_assignment = Assignments_per_student.Students.add_Assignments()
new_student.add_assignments(new_assignment)
choice2 = input("Do you like to add assignment per student? 'y'/'n'")
assignments_per_student_list.append(new_student)
elif choice==3:
break
else:
print("Wrong Choice! Select between 1 and 3")
#main menu!
while True:
print("Main menu")
print("1.Course")
print("2.Trainer")
print("3.Student")
print("4.Assigment")
print("5.Student per course")
print("6.Trainer per course")
print("7.Assignments per course")
print("8.Assignments per student")
print("9.Exit")
choice=int(input("Enter your choice:"))
if choice==1:
course_menu()
elif choice==2:
trainer_menu()
elif choice==3:
student_menu()
elif choice==4:
assigment_menu()
elif choice==5:
student_per_course_menu()
elif choice==6:
trainer_per_course_menu()
elif choice==7:
assignments_per_course_menu()
elif choice==8:
assignments_per_student_menu()
elif choice==9:
break
else:
print("Wrong Choice! Select between 1 and 5")
| tatoulis/CRUD_python_term_menu | crud_term_menu/origin.py | origin.py | py | 8,684 | python | en | code | 0 | github-code | 13 |
14819758793 | import os
import subprocess
from fastapi.templating import Jinja2Templates
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse, RedirectResponse
from fastapi.requests import Request
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
# from gradio_client import Client
import gradio as gr
from gradioapp import demo
# # Connect
# client = Client("abidlabs/en2fr")
# # client = Client("https://bec81a83-5b5c-471e.gradio.live")
# # Predict
# result = client.predict("Hello")
# # def acapellify(audio_path):
# # result = client.predict(audio_path, api_name="/predict")
# # return result[0]
# print(result)
CUSTOM_PATH = "/gradioapp"
app = FastAPI()
origins = [
"http://localhost:3000",
"localhost:3000"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
# Sets the templates directory to the `build` folder from `npm run build`
# this is where you'll find the index.html file.
templates = Jinja2Templates(directory="../frontend/build")
class SPAStaticFiles(StaticFiles):
async def get_response(self, path: str, scope):
response = await super().get_response(path, scope)
if response.status_code == 404:
response = await super().get_response('.', scope)
return response
# Mounts the `static` folder within the `build` folder to the `/static` route.
app.mount('/static', SPAStaticFiles(directory="../frontend/build/static", html=True), 'static')
@app.get("/")
async def react_app(req: Request):
return templates.TemplateResponse('index.html', { 'request': req })
app = gr.mount_gradio_app(app, demo, path=CUSTOM_PATH)
| Myangsun/Streetview-app | backend/app/api.py | api.py | py | 1,746 | python | en | code | 0 | github-code | 13 |
23966520169 | # template for "Stopwatch: The Game"
import simplegui
# define global variables
tick_interval = 0
stop_count = 0
win_count = 0
stopwatch_running = False
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
tenths = t % 10
t = t // 10
seconds = t % 60
t = t//60
minutes = t
if seconds < 10:
return str(minutes) + ":0"+str(seconds)+"."+str(tenths)
else:
return str(minutes) + ":"+str(seconds)+"."+str(tenths)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start_handler():
global stopwatch_running
timer.start()
stopwatch_running = True
def stop_handler():
global stop_count, win_count, stopwatch_running
timer.stop()
if stopwatch_running:
stop_count += 1
stopwatch_running = False
if (tick_interval % 10 == 0):
win_count += 1
frame.set_draw_handler(draw)
def reset_handler():
global tick_interval, stop_count, win_count
tick_interval = 0
stop_count = 0
win_count = 0
# define event handler for timer with 0.1 sec interval
def tick():
global tick_interval
tick_interval += 1
frame.set_draw_handler(draw)
# define draw handler
def draw(canvas):
canvas.draw_text(format(tick_interval), [40, 110], 50, "White")
canvas.draw_text(str(win_count)+"/"+str(stop_count), [125, 40], 40, "Green")
# create frame
frame = simplegui.create_frame("Stopwatch Game", 200, 200)
# register event handlers
frame.add_button("Start", start_handler)
frame.add_button("Stop", stop_handler)
frame.add_button("Reset", reset_handler)
timer = simplegui.create_timer(100, tick)
# start frame
frame.start()
# Please remember to review the grading rubric
| segolily04/Intro_to_Interactive_Programming_With_Python | stopwatch_mini_project_4.py | stopwatch_mini_project_4.py | py | 1,804 | python | en | code | 0 | github-code | 13 |
41619179886 | import pygame
from settings import *
from button import Button
class UI:
def __init__(self, surface) -> None:
#setup
self.display_surface = surface
#health
self.health_bar = pygame.image.load(".//JUEGO 2//graphics//ui//health_bar.png").convert_alpha()
self.health_bar_topleft = (54,39)
self.bar_max_width = 152
self.bar_heigth = 4
#stamina
self.stamina_bar = pygame.image.load(".//JUEGO 2//graphics//ui//stamina_bar.png").convert_alpha()
self.stamina_bar = pygame.transform.scale(self.stamina_bar, (215,30))
self.stamina_bar_topleft = (54,69)
self.stamina_max_width = 152
self.stamina_height = 12
#coins
self.coin = pygame.image.load(".//JUEGO 2//graphics//ui//coin.png").convert_alpha()
self.coin_rect = self.coin.get_rect(topleft = (50,111))
#font
self.font = pygame.font.Font(".//JUEGO 2//graphics//ui//ARCADEPI.TTF", 30)
#score
self.score = pygame.image.load(".//JUEGO 2//graphics//ui//score.png")
self.score = pygame.transform.scale(self.score, (200, 90))
self.score_rect = self.score.get_rect(topleft = (700,0))
#volume
self.volume_button = Button((1560,10), self.display_surface,".//JUEGO 2//graphics//ui//volume_on.png")
def show_health(self, current_health, max_health):
self.display_surface.blit(self.health_bar, (20,10))
current_health_ratio = current_health / max_health
current_bar_width = self.bar_max_width * current_health_ratio
health_bar_rect = pygame.Rect((self.health_bar_topleft), (current_bar_width,self.bar_heigth))
pygame.draw.rect(self.display_surface, C_DEEP_RED, health_bar_rect)
def show_stamin(self, current_stamina, max_stamina):
self.display_surface.blit(self.stamina_bar, (5,60))
current_stamina_ratio = current_stamina / max_stamina
current_stamina_width = self.stamina_max_width * current_stamina_ratio
stamina_bar_rect = pygame.Rect((self.stamina_bar_topleft),(current_stamina_width, self.stamina_height))
pygame.draw.rect(self.display_surface, C_BLUE_2, stamina_bar_rect)
def show_coins(self, amount):
self.display_surface.blit(self.coin, self.coin_rect)
coin_amount_surface = self.font.render(str(amount), False, C_PURPLE)
coin_amount_rect = coin_amount_surface.get_rect(midleft = (self.coin_rect.right + 4,self.coin_rect.centery))
self.display_surface.blit(coin_amount_surface, coin_amount_rect)
def show_score(self, amount):
self.display_surface.blit(self.score, self.score_rect)
score_amount_surface = self.font.render(str(amount), False, C_PURPLE)
score_amount_rect = score_amount_surface.get_rect(midleft = (self.score_rect.right -110, self.score_rect.centery))
self.display_surface.blit(score_amount_surface, score_amount_rect)
| AgustinSande/sandeAgustin-pygame-tp-final | codefiles/ui.py | ui.py | py | 3,077 | python | en | code | 0 | github-code | 13 |
44464470701 | #!/usr/bin/env python3
import itertools
import json
import neptune
import yaml
from transformer import *
from reader import *
def load_list_of_params(path):
with open(path, 'r') as f:
obj = json.load(f)
return obj['parameters'], obj['items']
def load_params(path):
""" Get input for parameter search and return grid-search inputs
Returns:
List with parameter names and iterator over all combinations of parameters.
"""
def _params(p_names, it):
for p_set in it:
yield dict(zip(p_names, p_set))
with open(path, 'r') as f:
obj = yaml.safe_load(f)
it = itertools.product(*[obj[p] for p in obj['parameters']])
return obj['parameters'], _params(obj['parameters'], it)
def use_param(p):
""" Should this set of parameters be used (has it not been used before)? """
with open('log/used_params.txt') as f:
used_params = dict([(l.strip(), None) for l in f.readlines()])
if str(p) not in used_params:
return True
return False
def log_param(p):
""" Log this set of parameters as used """
with open('log/used_params.txt', 'a') as f:
print(p, file=f)
if __name__ == "__main__":
# def __init__(self, vocab_size, num_layers=3, d_model=50, dff=512, num_heads=5, dropout_rate=0.1, reader=None):
parameters, it = load_params('params.yaml')
neptune.init('dialogue-transformer-e2e/runs')
ds = "tsdf-camrest"
cfg.init_handler(ds)
cfg.dataset = ds.split('-')[-1]
reader = CamRest676Reader()
for params in it: # iterate over all possible parameter combinations
if not use_param(params):
continue
experiment = neptune.create_experiment(name='parameter_search', params=params)
print(params)
model = SeqModel(vocab_size=cfg.vocab_size, reader=reader, num_layers=params['num_layers'], dff=params['dim_ff'], num_heads=params['num_heads'] )
model.train_model(log=True, max_sent=30, max_turns=30)
model.evaluation(verbose=True, log=True, max_sent=300, max_turns=30, use_metric=True) # the dev set has 135 dialogs
log_param(params)
experiment.stop()
| pixelneo/dialogue-transformer-e2e | implementation/tf/runner.py | runner.py | py | 2,179 | python | en | code | 5 | github-code | 13 |
22853548802 | import sys
import random
DEBUG = True # False when you submit to kattis
# function which queries the next set of neighbors from kattis
if DEBUG:
N = 21000000 # the number of nodes
eps = 0.1 # desired accuracy
maxWeight = 3 # largest weight in our graph
# we will simulate a graph that is just one large cycle
# you could add some other types of graphs for debugging/testing your program
def getNeighbors(node):
leftNeighbor = (node-1) % N
rightNeighbor = (node+1) % N
weight = 1
return [( leftNeighbor, weight), ( rightNeighbor, weight)]
else:
N = int(sys.stdin.readline()) # read number of nodes from the input
eps = float(sys.stdin.readline()) - 1 # we read the desired approximation
maxWeight = int(sys.stdin.readline()) # read the largest weight of the graph
def getNeighbors(node):
# ask kattis for the next node
print(node)
sys.stdout.flush()
# read the answer we get from kattis
line = sys.stdin.readline().split()
# the answer has the form 'numNeighbors neighbor1 weight1 neighbor2 weight2 ...'
# we want to have a list of the form:
#[ (neighbor1, weight1), (neighbor2, weight2) , ...]
return [ (int(line[i]), int(line[i+1]) ) for i in range(1, len(line), 2)]
# Now we try to estimate the size of a minimum spanning forest.
# Note that the example below is completely wrong and will not give a correct result!
# The example is just here to show how to use the function 'getNeighbors'
# We now compute the average edge weight of 100 random neighborhoods
sumOfWeights = 0
numEdges = 0
for i in range(1, 100):
node = random.randint(0, N-1) # sample a random node
neighbors = getNeighbors(node) # get the list of neighbors and the corresponding weights
for neighbor, weight in neighbors:
sumOfWeights += weight
numEdges += 1
averageEdgeWeight = 1.0 * sumOfWeights / numEdges
# A spanning tree always consists of N-1 edges,
# so one could think a minimum spanning tree would have roughly the following weight.
# (Note: This idea is wrong because a MINIMUM spanning tree will try to use only small edges)
weightOfSpanningTree = averageEdgeWeight * (N-1)
# print the answer
print('end ' + str(weightOfSpanningTree))
sys.stdout.flush()
| pedroggbcampos/ADA-proj | src/spanningforest-example.py | spanningforest-example.py | py | 2,242 | python | en | code | 1 | github-code | 13 |
22738478576 |
from keras.preprocessing.sequence import pad_sequences as pad
def tokenize(
dataset,
sent,
anno,
):
tokenized_sent = []
tokenized_anno = []
for i, word in enumerate(sent):
tokenized_word = dataset.tokenizer.convert_tokens_to_ids(dataset.tokenizer.tokenize(word))
tokenized_sent.extend(tokenized_word)
tokenized_anno.extend([dataset.tag_names.index(anno[i])] + [dataset.criterion_ignored_la]*(len(tokenized_word) - 1))
return {
"tokenized_sent": tokenized_sent,
"tokenized_anno": tokenized_anno,
}
def pad_and_add_special_tokens(
dataset,
sent,
anno,
):
sent = pad([sent], dataset.max_length - 1, truncating="post", padding="post", value=dataset.specials["pad_token_id"])[0].tolist()
anno = pad([anno], dataset.max_length - 1, truncating="post", padding="post", value=dataset.specials["pad_token_la"])[0].tolist()
if dataset.specials["pad_token_id"] in sent:
eos_index = sent.index(dataset.specials["pad_token_id"]) + 1
else:
eos_index = -1
sent = [dataset.specials["cls_token_id"]] + sent
anno = [dataset.specials["cls_token_la"]] + anno
sent[eos_index] = dataset.specials["sep_token_id"]
anno[eos_index] = dataset.specials["sep_token_la"]
return {
"sent": sent,
"anno": anno,
} | trungtv/COVID-19-Named-Entity-Recognition-for-Vietnamese | source/utils/preprocessing.py | preprocessing.py | py | 1,351 | python | en | code | 0 | github-code | 13 |
39807063985 | import io
from django.test import TestCase
import unittest
from django.utils import timezone
from model_mommy import mommy
from rest_framework.test import APIClient
from api.models import MusicalWork
from api.reconcile import get_iswc_index, obj_params_count, perform_each_line
# These are unit tests no db operations are carried out within the test so are faster
class UnitTestCase(unittest.TestCase):
def test_get_iswc_index(self):
headers = ('iswc', 'title', 'contributors', )
idx = get_iswc_index(headers)
self.assertEqual(idx, 0)
headers = ('title', '', 'iswc', 'contributors')
idx = get_iswc_index(headers)
self.assertEqual(idx, 2)
def test_obj_params_count(self):
obj = MusicalWork(source='abc', iswc='xd56uy', item_id=3)
count = obj_params_count(obj)
self.assertEqual(count, 3)
obj = MusicalWork(item_id='')
count = obj_params_count(obj)
self.assertEqual(count, 0)
obj = MusicalWork(source='', iswc='xd56uy', title='something', item_id='', )
count = obj_params_count(obj)
self.assertEqual(count, 2)
def test_perform_each_line_with_existing_db_value(self):
iswc_db_check = True # to make db value exist
obj = MusicalWork(source='abc', contributors='jpt', iswc='axbycz')
key_item, batch = 'axbycz', 'rtyuxv'
headers = ('iswc', 'source', 'contributors', 'id', ),
created_on = timezone.now()
values = ['axbycz', '', '', 4]
rows = {'axbycz': obj}
ret = perform_each_line(key_item, iswc_db_check, headers, batch, created_on, values, rows)
self.assertIsNone(ret)
def test_perform_each_line_with_existing_row_value(self):
obj = MusicalWork(source='abc', contributors='jpt', iswc='axbycz')
key_item, batch = 'axbycz', 'rtyuxv'
iswc_db_check = True
headers = ('iswc', 'source', 'contributors', 'id', ),
created_on = timezone.now()
values = ['axbycz', '', '', 4]
rows = {'axbycz': obj} # to make row value exist for that iswc
ret = perform_each_line(key_item, iswc_db_check, headers, batch, created_on, values, rows)
self.assertIsNone(ret)
def test_perform_each_line_with_no_row_no_db(self):
key_item, batch = 'axbycz', 'rtyuxv'
iswc_db_check = False
headers = ('iswc', 'source', 'contributors', 'id', )
created_on = timezone.now()
values = ['axbycz', '', '', 4]
rows = {}
ret = perform_each_line(key_item, iswc_db_check, headers, batch, created_on, values, rows)
self.assertEqual(type(ret), MusicalWork)
self.assertEqual(ret.iswc, 'axbycz')
self.assertEqual(ret.contributors, '')
self.assertEqual(ret.item_id, 4)
def test_perform_each_line_with_less_value_and_previous_row(self):
obj = MusicalWork(source='abc', contributors='jpt', iswc='axbycz', title='some title')
key_item, batch = 'axbycz', 'rtyuxv'
iswc_db_check = False
headers = ('iswc', 'source', 'contributors', 'id', )
created_on = timezone.now()
values = ['axbycz', '', 'shyam', '4']
rows = {'axbycz': obj}
ret = perform_each_line(key_item, iswc_db_check, headers, batch, created_on, values, rows)
self.assertEqual(type(ret), MusicalWork)
self.assertEqual(ret.iswc, 'axbycz')
self.assertEqual(ret.contributors, 'jpt')
self.assertEqual(ret.item_id, None)
def test_perform_each_line_with_no_previous_value(self):
key_item, batch = 'axbycz', 'rtyuxv'
iswc_db_check = False
headers = ('iswc', 'source', 'contributors', 'id', )
created_on = timezone.now()
values = ['axbycz', '', 'shyam', 4]
rows = {}
ret = perform_each_line(key_item, iswc_db_check, headers, batch, created_on, values, rows)
self.assertEqual(type(ret), MusicalWork)
self.assertEqual(ret.iswc, 'axbycz')
self.assertEqual(ret.contributors, 'shyam')
self.assertEqual(ret.item_id, 4)
self.assertEqual(ret.source, '')
# functional tests, these test cases are carried out with db operations and are slower than unit test
class APITestCase(TestCase):
client = APIClient()
def test_get_api(self):
mommy.make(MusicalWork, _quantity=5)
resp = self.client.get('/work-single/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json().get('count'), 5)
def test_download_api(self):
resp = self.client.get('/work-single/?download=download')
self.assertEqual(resp.status_code, 200)
| bishnusyangja/single_view | app/api/tests.py | tests.py | py | 4,651 | python | en | code | 0 | github-code | 13 |
72778108498 | '''
Created on 2013-6-7
@author: Yubin Bai
'''
if __name__ == '__main__':
N = 1000000
sieve = [True] * (N + 1)
sieve[0] = sieve[1] = False
results = []
for i in range(2, N):
if sieve[i] == True:
results.append(i)
for j in range(i * 2, N, i):
sieve[j] = False
print(len(results)) | yubinbai/Codejam | round1B 2008/numberSet/prototype.py | prototype.py | py | 352 | python | en | code | 8 | github-code | 13 |
7784924696 | """Univercidad interamericana de Panama
Sistemas de encuestas
Proyecto Final de Programacion de Computadoras 4
Integrantes:
Omar Gonzalez
Franklin Vanegas
Vladimir Batista
Grimaldo Castro
"""
from flask import Flask, flash, url_for, redirect, render_template, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
"""Creacion de base de datos"""
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///encuestas.sqlite3'
app.config['SECRET_KEY'] = 'uippc3'
db = SQLAlchemy(app)
"""Creacion de tabla en base de datos"""
class encuesta(db.Model):
id = db.Column('id', db.Integer, primary_key=True)
pregunta = db.Column(db.String(100))
opcion1 = db.Column(db.String(50))
opcion2 = db.Column(db.String(50))
voto1 = db.Column(db.Integer)
voto2 = db.Column(db.Integer)
cantidad = db.Column(db.Integer)
"""" Creacion de valores """
def __init__(self, pregunta, opcion1, opcion2, voto1,voto2,cantidad):
self.pregunta = pregunta
self.opcion1 = opcion1
self.opcion2 = opcion2
self.voto1 = voto1
self.voto2 = voto2
self.cantidad = cantidad
"""Llamado de pagina principal"""
@app.route('/')
def principal():
print(encuesta.query.all())
return render_template('principal.html', encuesta=encuesta.query.all())
"""Llamado de pagina nueva encuesta
Modulo de nueva encuesta"""
@app.route('/nueva_encuesta/', methods=['GET','POST'])
def nueva_encuesta():
if request.method == 'POST':
if not request.form['pregunta']:
flash('Por favor debe introducir una pregunta', 'error')
elif not request.form['opcion1'] or not request.form['opcion2']:
flash('Debes colocar dos opciones,Error')
elif not request.form['opcion2']:
flash('Debe introducir la opcion 2!')
else:
voto1 = 0
voto2 = 0
cantidad = 0
data_ = encuesta(request.form['pregunta'],request.form['opcion1'],request.form['opcion2'],voto1,voto2,cantidad)
db.session.add(data_)
db.session.commit()
flash('Se crea encuesta exitosamente!')
return redirect(url_for('principal'))
return render_template('nueva_encuesta.html')
"""Llamado de pagina votacion"""
@app.route('/votar/', methods = ['GET','POST'])
def votar():
if request.method == 'POST':
opc = request.form['id']
print(opc)
u = encuesta.query.get(opc)
print(u)
return render_template('votacion.html',u=encuesta.query.get(opc))
"""Modulo de contador de las encuestas"""
@app.route('/conteo/',methods = ['GET','POST'])
def conteo():
if request.method == 'POST':
opc = request.form['opc']
#
id = request.form['id']
u = encuesta.query.get(id)
print(u)
if opc == 'a':
flag = int(u.voto1)
flag = flag + 1
u.voto1 = flag
cant = int(u.cantidad)
cant = cant + 1
u.cantidad = cant
db.session.commit()
else:
flag = int(u.voto2)
flag = flag + 1
u.voto2 = flag
cant = int(u.cantidad)
cant = cant + 1
u.cantidad = cant
db.session.commit()
return redirect(url_for('principal'))
"""Inicio de programa"""
if __name__=='__main__':
app.run()
db.create_all()
| grimaldom/Mini-encuestas | __init__.py | __init__.py | py | 3,424 | python | es | code | 0 | github-code | 13 |
70852488018 | from aocd import data, submit
for line in data.splitlines():
line = line.strip()
sum1 = 0
sum2 = 0
for i in range(len(line)):
if line[i] == line[(i + 1) % len(line)]:
sum1 += int(line[i])
if line[i] == line[(i + int(len(line) / 2)) % len(line)]:
sum2 += int(line[i])
submit(sum1, part='a')
submit(sum2, part='b')
| charvey/advent-of-code | 2017/01.py | 01.py | py | 378 | python | en | code | 0 | github-code | 13 |
39576625133 | class Solution:
def largestWordCount(self, messages: List[str], senders: List[str]) -> str:
my_dict = {}
for _ in range(len(senders)):
if senders[_] in my_dict.keys():
my_dict[senders[_]] += len(messages[_].split())
else:
my_dict[senders[_]] = len(messages[_].split())
# print(my_dict)
value = max(my_dict.values())
# print(value)
my_list = [k for k, v in my_dict.items() if v == value]
# print(my_list)
my_list.sort(key=lambda x: len(x), reverse=True)
# print(my_list)
my_list.sort(reverse=True)
# print(my_list)
return my_list[0]
| KillerStrike17/CP-Journey | LeetCode/BiWeekly Contest/Contest 79/2284.py | 2284.py | py | 688 | python | en | code | 0 | github-code | 13 |
74970636818 | from typing import Tuple
from typing import List
from typing import Optional
from typing import Dict
from typing import Union
import pandas as pd
import geopandas as gpd
from osmgt.helpers.logger import Logger
from osmgt.apis.nominatim import NominatimApi
from osmgt.apis.overpass import OverpassApi
from osmgt.helpers.global_values import network_queries
from osmgt.helpers.global_values import epsg_4326
from osmgt.helpers.global_values import out_geom_query
from shapely.geometry import Point
from shapely.geometry import LineString
from shapely.geometry import Polygon
from shapely.geometry import box
from osmgt.helpers.global_values import osm_url
from osmgt.helpers.misc import chunker
class ErrorOsmGtCore(Exception):
pass
class IncompatibleFormat(Exception):
pass
class EmptyData(Exception):
pass
class OsmGtCore(Logger):
__slots__ = (
"_study_area_geom",
"_output_data",
"_bbox_value",
"_bbox_mode",
"_study_area_geom",
"_location_id"
)
_QUERY_ELEMENTS_FIELD: str = "elements"
__USELESS_COLUMNS: List = []
# _location_id: Optional[int] = None
_NOMINATIM_DEFAULT_ID: int = 3600000000 # this is it
_NOMINATIM_OSM_ID_FIELD: str = "osm_id"
_NOMINATIM_NUMBER_RESULT: str = 1
_NOMINATIM_GEOJSON_FIELD: str = "geojson"
_DEFAULT_NAN_VALUE_TO_USE: str = "None"
_GEOMETRY_FIELD: str = "geometry"
_LAT_FIELD: str = "lat"
_LNG_FIELD: str = "lon"
_TOPO_FIELD: str = "topo_uuid"
_FEATURE_TYPE_OSM_FIELD: str = "type"
_PROPERTIES_OSM_FIELD: str = "tags"
_ID_OSM_FIELD: str = "id"
_OSM_URL_FIELD: str = "osm_url"
_ID_DEFAULT_FIELD: str = "id"
_FEATURE_OSM_TYPE: Optional[str] = None
_OUTPUT_EXPECTED_GEOM_TYPE: Optional[str] = None
def __init__(self) -> None:
super().__init__()
self._study_area_geom: Optional[Polygon] = None
self._output_data: Optional[Union[gpd.geodataframe, List[Dict]]] = None
self._bbox_value: Optional[Tuple[float, float, float, float]] = None
self._bbox_mode: bool = False
def from_location(self, location_name: str, *args) -> None:
self.logger.info(f"From location: {location_name}")
self.logger.info("Loading data...")
location_found = list(
NominatimApi(
self.logger, q=location_name, limit=self._NOMINATIM_NUMBER_RESULT
).data()
)
if len(location_found) == 0:
raise ErrorOsmGtCore("Location not found!")
elif len(location_found) > 1:
self.logger.warning(
f"Multiple locations found for {location_name} ; the first will be used"
)
location_id = location_found[0][self._NOMINATIM_OSM_ID_FIELD]
self._study_area_geom = Polygon(
location_found[0][self._NOMINATIM_GEOJSON_FIELD]["coordinates"][0]
)
self._location_id = self._location_osm_default_id_computing(location_id)
def from_bbox(self, bbox_value: Tuple[float, float, float, float]) -> None:
self._bbox_mode: bool = True
self.logger.info(f"From bbox: {bbox_value}")
self.logger.info("Loading data...")
self._study_area_geom = box(*bbox_value, ccw=True)
# reordered because of nominatim
self._bbox_value = (bbox_value[1], bbox_value[0], bbox_value[3], bbox_value[2])
@property
def study_area(self) -> Polygon:
"""
return the shapely geometry of the study area (data area)
:return: the shapely geometry of the study area. If None, it means that nothing has been loaded or run
:rtype: shapely.geometry.Polygon
"""
return self._study_area_geom
def _query_on_overpass_api(self, request: str) -> List[Dict]:
return OverpassApi(self.logger).query(request)[self._QUERY_ELEMENTS_FIELD]
@staticmethod
def _from_location_name_query_builder(location_osm_id: int, query: str) -> str:
geo_tag_query: str = "area.searchArea"
query = query.format(geo_filter=geo_tag_query)
return f"area({location_osm_id})->.searchArea;({query});{out_geom_query};"
@staticmethod
def _from_bbox_query_builder(
bbox_value: Tuple[float, float, float, float], query: str
) -> str:
assert isinstance(bbox_value, tuple)
assert len(bbox_value) == 4
bbox_value_str = ", ".join(map(str, bbox_value))
query = query.format(geo_filter=bbox_value_str)
return f"({query});{out_geom_query};"
def _check_topology_field(self, input_gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
if self._TOPO_FIELD not in input_gdf.columns.tolist():
input_gdf[self._TOPO_FIELD] = input_gdf.index.apply(lambda x: int(x))
input_gdf = input_gdf.fillna(self._DEFAULT_NAN_VALUE_TO_USE)
return input_gdf
def get_gdf(self, verbose: bool = True) -> gpd.GeoDataFrame:
"""
Return a GeoDataframe
:param verbose: to activate log messages
:return: geopandas.GeoDataframe
"""
if verbose:
self.logger.info("Prepare GeoDataframe")
if len(self._output_data) == 0:
raise EmptyData(
"GeoDataframe creation is impossible, because no data has been found"
)
if not isinstance(self._output_data, gpd.GeoDataFrame):
# more performance comparing .from_features() method
df = pd.DataFrame()
for chunk in chunker(self._output_data, 100000):
df_tmp = pd.DataFrame(chunk)
df = pd.concat((df, df_tmp), axis=0)
df: pd.DataFrame = pd.DataFrame(self._output_data)
geometry = df[self._GEOMETRY_FIELD]
output_gdf: gpd.GeoDataFrame = gpd.GeoDataFrame(
df.drop([self._GEOMETRY_FIELD], axis=1),
crs=f"EPSG:{epsg_4326}",
geometry=geometry.to_list(),
)
else:
output_gdf: gpd.GeoDataFrame = self._output_data
self._check_build_input_data(output_gdf)
output_gdf: gpd.GeoDataFrame = self._clean_attributes(output_gdf)
self.logger.info("GeoDataframe Ready")
return output_gdf
def _check_build_input_data(self, output_gdf) -> None:
if output_gdf.shape[0] == 0:
raise EmptyData("Data is empty!")
geom_types_found = set(output_gdf[self._GEOMETRY_FIELD].geom_type.to_list())
if geom_types_found != {self._OUTPUT_EXPECTED_GEOM_TYPE}:
raise ErrorOsmGtCore(
f"Geom type not supported! Only {self._OUTPUT_EXPECTED_GEOM_TYPE} supported ; {geom_types_found} found"
)
def _clean_attributes(self, input_gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
for col_name in input_gdf.columns:
if col_name in self.__USELESS_COLUMNS:
input_gdf.drop(columns=[col_name], inplace=True)
if self._ID_DEFAULT_FIELD not in input_gdf.columns:
input_gdf.loc[:, self._ID_DEFAULT_FIELD] = input_gdf.index.astype(str)
return input_gdf
def _location_osm_default_id_computing(self, osm_location_id: int) -> int:
return osm_location_id + self._NOMINATIM_DEFAULT_ID
def _build_feature_from_osm(
self, uuid_enum: int, geometry: Union[Point, LineString], properties: Dict
) -> Dict:
properties_found: Dict = properties.get(self._PROPERTIES_OSM_FIELD, {})
properties_found[self._ID_OSM_FIELD] = str(properties[self._ID_OSM_FIELD])
properties_found[
self._OSM_URL_FIELD
] = f"{osm_url}/{self._FEATURE_OSM_TYPE}/{properties_found[self._ID_OSM_FIELD]}"
# used for topology
properties_found[
self._TOPO_FIELD
] = uuid_enum # do not cast to str, because topology processing need an int..
properties_found[self._GEOMETRY_FIELD] = geometry
feature_build: Dict = properties_found
return feature_build
@staticmethod
def _check_transport_mode(mode: str) -> None:
assert (
mode in network_queries.keys()
), f"'{mode}' not found in {', '.join(network_queries.keys())}"
| amauryval/OsmGT | osmgt/compoments/core.py | core.py | py | 8,212 | python | en | code | 4 | github-code | 13 |
21793578416 | import unittest
from modules.csv_to_db import DataBase
class MyTestCase(unittest.TestCase):
def setUp(self):
self.db = DataBase("../data/first9000.db")
self.austria = self.db.execute_selection_by_country("Austria")
self.france = self.db.execute_selection_by_country("France")
def test_locations_table(self):
location_1 = "Europe, Austria, Ost, Industrieviertel, Wienerwald(IV), Thalhofergrat, ObereOstwand, ★★ Osterhasi"
location_2 = "Europe, France, Île-de-France, Fontainebleau, Cuvier, BasCuvier, RedcircuitTD+(nº6), ★★★ La Marie Rose"
self.assertEqual(self.austria[0].location, location_1)
self.assertEqual(self.france[0].location, location_2)
def test_difficulty(self):
category_1 = "Intermediate"
category_2 = "Experienced"
self.assertEqual(self.austria[0].category, category_1)
self.assertEqual(self.france[0].category, category_2)
def test_style(self):
style_1 = "Sport"
style_2 = "Boulder"
self.assertEqual(self.austria[0].style, style_1)
self.assertEqual(self.france[0].style, style_2)
if __name__ == '__main__':
unittest.main()
| 8bit-number/coursework-project | tests/csv_to_db_test.py | csv_to_db_test.py | py | 1,202 | python | en | code | 0 | github-code | 13 |
22283779806 | """
How do you find all pairs of an integer array whose sum is equal to a given number
"""
def printpairs(arr,arr_size,sum):
s = set()
for i in range(0,arr_size):
temp = sum -arr[i]
if (temp in s):
print('Pair with given sum '+str(sum)+" is : ("+str(arr[i])+','+str(temp)+")")
#print("Pair with given sum " + str(sum) + " is (" + str(arr[i]) + ", " + str(temp) + ")")
s.add(arr[i])
#run with test
a = [1, 4, 45, 6, 10, 8]
n = 16
print(printpairs(a,len(a),n)) | zac11/algorithms-in-diff_lang | Python/pair_matching_give_sum.py | pair_matching_give_sum.py | py | 519 | python | en | code | 0 | github-code | 13 |
123175327 | import os
import re
from typing import Dict, List, Union # for type hinting
from db import db
from datetime import datetime
from sqlalchemy.sql import (
func,
) # 'sqlalchemy' is being installed together with 'flask-sqlalchemy'
from services.models.pairs import PairModel
from services.models.tickers import TickerModel
from app import configs
SignalJSON = Dict[str, Union[str, float, int]] # custom type hint
# Passphrase is required to register webhooks (& to update account positions & PNL)
PASSPHRASE = os.environ.get("WEBHOOK_PASSPHRASE", configs.get("SECRET", "WEBHOOK_PASSPHRASE"))
class SignalModel(db.Model):
__tablename__ = "signals"
rowid = db.Column(
db.Integer, primary_key=True, autoincrement=True
) # using 'rowid' as the default key
timestamp = db.Column(
db.DateTime(timezone=False),
server_default=func.current_timestamp() # TODO: check for sqlite3 and postgres
# db.DateTime(timezone=False), server_default = func.now()
) # DATETIME DEFAULT (CURRENT_TIMESTAMP) for sqlite3
ticker = db.Column(db.String)
order_action = db.Column(db.String)
order_contracts = db.Column(db.Integer)
order_price = db.Column(db.Float)
mar_pos = db.Column(db.String)
mar_pos_size = db.Column(db.Integer)
pre_mar_pos = db.Column(db.String)
pre_mar_pos_size = db.Column(db.Integer)
order_comment = db.Column(db.String)
order_status = db.Column(db.String)
# Columns needed for order creation
ticker_type = db.Column(db.String)
ticker1 = db.Column(db.String)
ticker2 = db.Column(db.String)
hedge_param = db.Column(db.Float)
order_id1 = db.Column(db.Integer)
order_id2 = db.Column(db.Integer)
price1 = db.Column(db.Float)
price2 = db.Column(db.Float)
fill_price = db.Column(db.Float)
slip = db.Column(db.Float)
error_msg = db.Column(db.String)
status_msg = db.Column(db.String)
def __init__(
self,
timestamp: datetime,
ticker: str,
order_action: str,
order_contracts: int,
order_price: float,
mar_pos: str,
mar_pos_size: int,
pre_mar_pos: str,
pre_mar_pos_size: int,
order_comment: str,
order_status: str,
ticker_type: str,
ticker1: str,
ticker2: str,
hedge_param: float,
order_id1: int,
order_id2: int,
price1: float,
price2: float,
fill_price: float,
slip: float,
error_msg: str,
status_msg: str,
):
self.timestamp = timestamp
self.ticker = ticker
self.order_action = order_action
self.order_contracts = order_contracts
self.order_price = order_price
self.mar_pos = mar_pos
self.mar_pos_size = mar_pos_size
self.pre_mar_pos = pre_mar_pos
self.pre_mar_pos_size = pre_mar_pos_size
self.order_comment = order_comment
self.order_status = order_status
self.ticker_type = ticker_type
self.ticker1 = ticker1
self.ticker2 = ticker2
self.hedge_param = hedge_param
self.order_id1 = order_id1
self.order_id2 = order_id2
self.price1 = price1
self.price2 = price2
self.fill_price = fill_price
self.slip = slip
self.error_msg = error_msg
self.status_msg = status_msg
def json(self) -> SignalJSON:
return {
"rowid": self.rowid,
"timestamp": str(self.timestamp),
"ticker": self.ticker,
"order_action": self.order_action,
"order_contracts": self.order_contracts,
"order_price": self.order_price,
"mar_pos": self.mar_pos,
"mar_pos_size": self.mar_pos_size,
"pre_mar_pos": self.pre_mar_pos,
"pre_mar_pos_size": self.pre_mar_pos_size,
"order_comment": self.order_comment,
"order_status": self.order_status,
"ticker_type": self.ticker_type,
"ticker1": self.ticker1,
"ticker2": self.ticker2,
"hedge_param": self.hedge_param,
"order_id1": self.order_id1,
"order_id2": self.order_id2,
"price1": self.price1,
"price2": self.price2,
"fill_price": self.fill_price,
"slip": self.slip,
"error_msg": self.error_msg,
"status_msg": self.status_msg,
}
@staticmethod
def passphrase_wrong(passphrase) -> bool:
if passphrase == PASSPHRASE:
return False
return True
@classmethod
def find_by_rowid(cls, rowid) -> "SignalModel":
return cls.query.filter_by(rowid=rowid).first()
# KEEPING THE SQL CODE THAT FUNCTIONS THE SAME FOR COMPARISON PURPOSES:
# if rowid == 0:
# return None
#
# connection = sqlite3.connect('data.db', timeout=10)
#
# try:
# cursor = connection.cursor()
#
# query = "SELECT rowid, * FROM {table} WHERE rowid=?".format(table=TABLE_SIGNALS)
# cursor.execute(query, (rowid,))
# row = cursor.fetchone()
#
# except sqlite3.Error as e:
# print('Database error occurred - ', e)
# raise
#
# finally:
# if connection:
# connection.close()
#
# if row:
# return cls(*row)
#
# return None
def insert(self) -> None:
db.session.add(self)
db.session.commit()
# KEEPING THE SQL CODE THAT FUNCTIONS THE SAME FOR COMPARISON PURPOSES:
# connection = sqlite3.connect('data.db', timeout=10)
#
# try:
# cursor = connection.cursor()
#
# query = "INSERT INTO {table} (ticker, order_action, order_contracts, order_price," \
# "mar_pos, mar_pos_size, pre_mar_pos, pre_mar_pos_size, order_comment, order_status) " \
# "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)".format(table=TABLE_SIGNALS)
#
# cursor.execute(query, (self.ticker, self.order_action, self.order_contracts, self.order_price,
# self.mar_pos, self.mar_pos_size, self.pre_mar_pos, self.pre_mar_pos_size,
# self.order_comment, self.order_status))
#
# connection.commit()
#
# except sqlite3.Error as e: # handling the exception with generic SQL error code
# print('Database error occurred - ', e) # better to log the error
# raise
#
# finally:
# if connection:
# connection.close() # disconnect the database even if exception occurs
def update(self, rowid) -> None:
item_to_update = self.query.filter_by(rowid=rowid).first()
item_to_update.ticker = self.ticker
item_to_update.timestamp = self.timestamp
item_to_update.order_action = self.order_action
item_to_update.order_contracts = self.order_contracts
item_to_update.order_price = self.order_price
item_to_update.mar_pos = self.mar_pos
item_to_update.mar_pos_size = self.mar_pos_size
item_to_update.pre_mar_pos = self.pre_mar_pos
item_to_update.pre_mar_pos_size = self.pre_mar_pos_size
item_to_update.order_comment = self.order_comment
item_to_update.order_status = self.order_status
item_to_update.ticker_type = self.ticker_type
item_to_update.ticker1 = self.ticker1
item_to_update.ticker2 = self.ticker2
item_to_update.hedge_param = self.hedge_param
item_to_update.order_id1 = self.order_id1
item_to_update.order_id2 = self.order_id2
item_to_update.price1 = self.price1
item_to_update.price2 = self.price2
item_to_update.fill_price = self.fill_price
item_to_update.slip = self.slip
item_to_update.error_msg = self.error_msg
item_to_update.status_msg = self.status_msg
db.session.commit()
# KEEPING THE SQL CODE THAT FUNCTIONS THE SAME FOR COMPARISON PURPOSES:
# connection = sqlite3.connect('data.db')
#
# try:
# cursor = connection.cursor()
#
# query = "UPDATE {table} SET ticker=?, order_action=?, order_contracts=?,order_price=?," \
# "mar_pos=?, mar_pos_size=?, pre_mar_pos=?, pre_mar_pos_size=?, order_comment=?, order_status=? " \
# "WHERE rowid=?".format(table=TABLE_SIGNALS)
#
# cursor.execute(query, (self.ticker, self.order_action, self.order_contracts, self.order_price,
# self.mar_pos, self.mar_pos_size, self.pre_mar_pos, self.pre_mar_pos_size,
# self.order_comment, self.order_status, rowid))
#
# connection.commit()
#
# except sqlite3.Error as e:
# print('Database error occurred - ', e)
# raise
#
# finally:
# if connection:
# connection.close()
@classmethod
def get_rows(cls, number_of_items) -> List:
if number_of_items == "0":
# return cls.query.order_by(desc("rowid")).all() # needs from sqlalchemy import desc
return cls.query.order_by(cls.rowid.desc()) # better, no need to import
else:
return cls.query.order_by(cls.rowid.desc()).limit(number_of_items)
# KEEPING THE SQL CODE THAT FUNCTIONS THE SAME FOR COMPARISON PURPOSES:
# if number_of_items == "0":
# query = "SELECT rowid, * FROM {table} ORDER BY rowid DESC".format(table=TABLE_SIGNALS)
# else:
# query = "SELECT rowid, * FROM {table} ORDER BY rowid DESC " \
# "LIMIT {number}".format(table=TABLE_SIGNALS, number=number_of_items)
#
# connection = sqlite3.connect('data.db', timeout=10)
#
# try:
# cursor = connection.cursor()
#
# cursor.execute(query)
#
# result = cursor.fetchall() # Keep the result in memory after closing the database
#
# except sqlite3.Error as e:
# print('Database error occurred - ', e)
# raise
#
# finally:
# if connection:
# connection.close()
#
# items = []
#
# for row in result:
# items.append(cls(*row))
#
# return items
def delete(self) -> None:
db.session.delete(self)
db.session.commit()
# KEEPING THE SQL CODE THAT FUNCTIONS THE SAME FOR COMPARISON PURPOSES:
# connection = sqlite3.connect('data.db', timeout=10)
#
# try:
# cursor = connection.cursor()
#
# query = "DELETE FROM {table} WHERE rowid=?".format(table=TABLE_SIGNALS)
# cursor.execute(query, (rowid,))
#
# connection.commit()
#
# except sqlite3.Error as e:
# print('Database error occurred - ', e)
# raise
#
# finally:
# if connection:
# connection.close()
@classmethod
def get_list_ticker(cls, ticker_name, number_of_items) -> List:
pair = False
tickers = ticker_name.split("-") # check if pair or single
ticker1 = tickers[0]
ticker2 = ""
if len(tickers) == 2:
ticker2 = tickers[1]
pair = True
if number_of_items == "0":
if pair:
return (
cls.query.filter(
(cls.ticker1 == ticker1) & (cls.ticker2 == ticker2)
)
.order_by(cls.rowid.desc())
.all()
)
else:
return (
cls.query.filter(cls.ticker1 == ticker1)
.filter(cls.ticker_type == "single")
.order_by(cls.rowid.desc())
.all()
)
else:
if pair:
return (
cls.query.filter(
(cls.ticker1 == ticker1) & (cls.ticker2 == ticker2)
)
.order_by(cls.rowid.desc())
.limit(number_of_items)
)
else:
return (
cls.query.filter(cls.ticker1 == ticker1)
.filter(cls.ticker_type == "single")
.order_by(cls.rowid.desc())
.limit(number_of_items)
)
@classmethod
def get_list_ticker_dates(
cls, ticker_name, number_of_items, start_date, end_date
) -> List:
pair = False
tickers = ticker_name.split("-") # check if pair or single
ticker1 = tickers[0]
ticker2 = ""
if len(tickers) == 2:
ticker2 = tickers[1]
pair = True
if number_of_items == "0":
if pair:
return (
cls.query.filter(
(cls.ticker1 == ticker1)
& (cls.ticker2 == ticker2)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
)
.order_by(cls.rowid.desc())
.all()
)
else:
return (
cls.query.filter(
(cls.ticker1 == ticker1)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
)
.filter(cls.ticker_type == "single")
.order_by(cls.rowid.desc())
.all()
)
else:
if pair:
return (
cls.query.filter(
(cls.ticker1 == ticker1)
& (cls.ticker2 == ticker2)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
)
.order_by(cls.rowid.desc())
.limit(number_of_items)
)
else:
return (
cls.query.filter(
(cls.ticker1 == ticker1)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
)
.filter(cls.ticker_type == "single")
.order_by(cls.rowid.desc())
.limit(number_of_items)
)
@classmethod
def get_list_status(cls, order_status, number_of_items) -> List:
if number_of_items == "0":
if order_status == "waiting":
return (
cls.query.filter(
(cls.order_status == "waiting")
| (cls.order_status == "rerouted")
)
.order_by(cls.rowid.desc())
.all()
)
else:
return (
cls.query.filter_by(order_status=order_status)
.order_by(cls.rowid.desc())
.all()
)
else:
if order_status == "waiting":
return (
cls.query.filter(
(cls.order_status == "waiting")
| (cls.order_status == "rerouted")
)
.order_by(cls.rowid.desc())
.limit(number_of_items)
)
else:
return (
cls.query.filter_by(order_status=order_status)
.order_by(cls.rowid.desc())
.limit(number_of_items)
)
def check_ticker_status(self) -> bool:
# check if ticker is registered and trade status is active
if self.ticker_type == "pair":
pair_name = self.ticker1 + "-" + self.ticker2
pair = PairModel.find_by_name(pair_name)
# check if pair exists
if pair:
# check trade status
# if pair is not active set a static pair status (not possible to update)
if pair.status != 1:
self.order_status = "canceled"
self.status_msg = "passive ticker"
return False
if float(self.hedge_param) != float(pair.hedge):
self.order_status = "canceled"
self.status_msg = "wrong hedge"
return False
# if registered and active ticker
else:
return True
else:
self.order_status = "error"
self.status_msg = "unknown ticker"
return False
else:
ticker = TickerModel.find_by_symbol(self.ticker1)
# check if ticker exists
if ticker:
# check trade status
# if ticker is not active set a static pair status (not possible to update)
if not ticker.active:
self.order_status = "canceled"
self.status_msg = "passive ticker"
return False
else:
# if registered and active ticker
return True
else:
self.order_status = "error"
self.status_msg = "unknown ticker"
return False
def splitticker(self,) -> bool:
success_flag = True
currency_match = True
ticker_pair1 = ""
ticker_pair2 = ""
eq12 = self.ticker.split("-") # check if pair or single
# print(eq12) # ['LNT', '1.25*NYSE:FTS']
if len(eq12) <= 2:
eq1_hedge = re.findall(
r"[-+]?\d*\.\d+|\d+", eq12[0]
) # hedge constant for the 1st ticker
# print("eq1_hedge: ", eq1_hedge) # []
if len(eq1_hedge) > 0:
eq1 = eq12[0].replace(eq1_hedge[0], "")
else:
eq1 = eq12[0] # LNT
eq1 = eq1.replace("*", "")
# print("eq1: ", eq1) # LNT
eq1_split = eq1.rsplit(":", maxsplit=1)
eq1_ticker_almost = eq1_split[len(eq1_split) - 1]
# print("eq1_split: ", eq1_split) # ['LNT']
# print("eq1_ticker_almost: ", eq1_ticker_almost) # LNT
# check if the ticker security type is CASH or CRYPTO
item = TickerModel.find_by_symbol(eq1_ticker_almost)
if item:
# print("item found")
# refer to test cases
if item.sectype == "CASH":
fx1 = eq1_ticker_almost[0:3] # get the first 3 char # USD
fx2 = eq1_ticker_almost[-3:] # get the last 3 char # CAD
ticker_pair1 = fx1 + "." + fx2
# check for currency mismatch
if fx2 != item.currency:
currency_match = False
success_flag = False
elif item.sectype == "CRYPTO":
cry2 = eq1_ticker_almost[-3:] # get last 3 char
ticker_pair1 = eq1_ticker_almost.replace(".", "")
cry1 = eq1_ticker_almost[
0 : (len(ticker_pair1) - 3)
] # get the first 3 char
ticker_pair1 = cry1 + "." + cry2
# TODO: improve validity check
# check if valid crypto pair, accepts only USD pairs
if cry2 != item.currency:
currency_match = False
success_flag = False
else:
if (
"." in eq1_ticker_almost
): # For Class A,B type tickers EXP: BF.A BF.B
ticker_pair1 = eq1_ticker_almost.replace(
".", " "
) # convert Tradingview -> IB format
else:
ticker_pair1 = "".join(
char for char in eq1_ticker_almost if char.isalnum()
)
else:
success_flag = False
self.status_msg = "unknown ticker!"
# print("ticker_pair1: ", ticker_pair1) # LNT
if len(eq1_hedge) != 0:
if eq1_hedge[0] != 1:
success_flag = False
# print("problem_flag_first: ", success_flag)
self.ticker_type = "single"
self.ticker1 = ticker_pair1
if len(eq12) == 2:
eq2_hedge = re.findall(
r"[-+]?\d*\.\d+|\d+", eq12[1]
) # hedge constant fot the 2nd ticker
# print("eq2_hedge: ", eq2_hedge) # ['1.25']
if len(eq2_hedge) > 0:
eq2 = eq12[1].replace(eq2_hedge[0], "")
else:
eq2 = eq12[1] # *NYSE:FTS
eq2 = eq2.replace("*", "")
# print("eq2: ", eq2) # NYSE:FTS
eq2_split = eq2.rsplit(":", maxsplit=1)
eq2_ticker_almost = eq2_split[len(eq2_split) - 1]
# print("eq2_split: ", eq2_split) # ['NYSE', 'FTS']
# print("eq2_ticker_almost: ", eq2_ticker_almost) # FTS
# check if the ticker security type is CASH or CRYPTO
item = TickerModel.find_by_symbol(eq2_ticker_almost)
if item:
# print("item found")
if item.sectype == "CASH":
fx1 = eq2_ticker_almost[0:3] # get the first 3 char # USD
fx2 = eq2_ticker_almost[-3:] # get the last 3 char # CAD
ticker_pair2 = fx1 + "." + fx2
# check for currency mismatch
if fx2 != item.currency:
currency_match = False
success_flag = False
elif item.sectype == "CRYPTO":
cry2 = eq2_ticker_almost[-3:] # get last 3 char
ticker_pair2 = eq2_ticker_almost.replace(".", "")
cry1 = eq2_ticker_almost[
0 : (len(ticker_pair2) - 3)
] # get the first 3 char
ticker_pair2 = cry1 + "." + cry2
# TODO: improve validity check
# check if valid cryptopair, accepts only USD pairs
if cry2 != item.currency:
currency_match = False
success_flag = False
else:
if (
"." in eq2_ticker_almost
): # For Class A,B type tickers EXP: BF.A BF.B
ticker_pair2 = eq2_ticker_almost.replace(
".", " "
) # convert Tradingview -> IB format
else:
ticker_pair2 = "".join(
char for char in eq2_ticker_almost if char.isalnum()
)
else:
success_flag = False
self.status_msg = "unknown ticker!"
# print("ticker_pair2: ", ticker_pair2) # FTS
if len(eq2_hedge) == 0:
hedge_const = 1
else:
hedge_const = eq2_hedge[0]
# print("hedge_const: ", hedge_const) # False
# print("problem_flag_final: ", success_flag)
# print("ticker_type: ", self.ticker_type)
self.ticker_type = "pair"
self.ticker2 = ticker_pair2
self.hedge_param = hedge_const
if len(eq12) > 2:
success_flag = False
if not success_flag:
self.order_status = "error"
self.status_msg = "problematic ticker!"
if not currency_match:
self.status_msg = "currency mismatch!"
return success_flag
@classmethod
def get_avg_slip(cls, ticker_name, start_date, end_date) -> dict:
slip_dic = {}
pair = False
tickers = ticker_name.split("-") # check if pair or single
ticker1 = tickers[0]
ticker2 = ""
if len(tickers) == 2:
ticker2 = tickers[1]
pair = True
if pair:
slip_dic["buy"] = (
db.session.query(db.func.avg(cls.slip))
.filter(
(cls.ticker1 == ticker1)
& (cls.ticker2 == ticker2)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
& (cls.order_action == "buy")
)
.scalar()
)
slip_dic["sell"] = (
db.session.query(db.func.avg(cls.slip))
.filter(
(cls.ticker1 == ticker1)
& (cls.ticker2 == ticker2)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
& (cls.order_action == "sell")
)
.scalar()
)
slip_dic["avg"] = (
db.session.query(db.func.avg(cls.slip))
.filter(
(cls.ticker1 == ticker1)
& (cls.ticker2 == ticker2)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
)
.scalar()
)
else:
slip_dic["buy"] = (
db.session.query(db.func.avg(cls.slip))
.filter(
(cls.ticker1 == ticker1)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
& (cls.order_action == "buy")
)
.filter(cls.ticker_type == "single")
.scalar()
)
slip_dic["sell"] = (
db.session.query(db.func.avg(cls.slip))
.filter(
(cls.ticker1 == ticker1)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
& (cls.order_action == "sell")
)
.filter(cls.ticker_type == "single")
.scalar()
)
slip_dic["avg"] = (
db.session.query(db.func.avg(cls.slip))
.filter(
(cls.ticker1 == ticker1)
& (cls.timestamp <= end_date)
& (cls.timestamp >= start_date)
)
.filter(cls.ticker_type == "single")
.scalar()
)
return slip_dic
@classmethod
def find_by_orderid(cls, orderid) -> "SignalModel":
return (
cls.query.filter((cls.order_id1 == orderid) | (cls.order_id2 == orderid))
.order_by(cls.rowid.desc())
.first()
) # get the most recent order in case of a multiple order id situation
@classmethod
# multiple order id situation happens a lot, better to double-check the ticker
def find_by_orderid_ticker(cls, orderid, ticker) -> "SignalModel":
return (
cls.query.filter(
((cls.ticker1 == ticker) | (cls.ticker2 == ticker))
& ((cls.order_id1 == orderid) | (cls.order_id2 == orderid))
)
.order_by(cls.rowid.desc())
.first()
) # get the most recent order in case of a multiple order id situation
@classmethod
def check_latest(cls) -> "SignalModel":
return (
cls.query.filter(
(
(cls.order_status == "waiting")
| (cls.order_status == "rerouted")
| (cls.order_status == "error")
| (cls.order_status == "critical err")
)
)
.order_by(cls.rowid.desc())
.first()
)
# to split stocks only
# def splitticker_stocks(
# self,
# ) -> bool:
#
# # Split the received webhook equation into tickers and hedge parameters
# # Tested with Tradingview webhooks and Interactive Brokers ticker format
# # TESTED FOR THESE EQUATIONS:
# # pair_equation = "TEST 123"
# # pair_equation = "NYSE:LNT"
# # pair_equation = "0.7*NYSE:BF.A"
# # pair_equation = "NYSE:BF.A"
# # pair_equation = "NYSE:LNT-NYSE:FTS*2.2"
# # pair_equation = "NYSE:LNT*2-NYSE:FTS"
# # pair_equation = "NYSE:LNT-NYSE:FTS/3"
# # pair_equation = "1.3*NYSE:LNT-NYSE:FTS*2.2"
# # pair_equation = "NYSE:LNT-1.25*NYSE:FTS"
# # pair_equation = "LNT-1.25*NYSE:FTS"
# # pair_equation = "NYSE:LNT-NYSE:FTS"
# # pair_equation = "BF.A-0.7*NYSE:BF.B"
#
# success_flag = True
#
# eq12 = self.ticker.split("-") # check if pair or single
# # print(eq12) # ['LNT', '1.25*NYSE:FTS']
#
# if len(eq12) <= 2:
#
# eq1_hedge = re.findall(
# r"[-+]?\d*\.\d+|\d+", eq12[0]
# ) # hedge constant fot the 1st ticker
# # print("eq1_hedge: ", eq1_hedge) # []
#
# if len(eq1_hedge) > 0:
# eq1 = eq12[0].replace(eq1_hedge[0], "")
# else:
# eq1 = eq12[0] # LNT
#
# eq1 = eq1.replace("*", "")
# # print("eq1: ", eq1) # LNT
#
# eq1_split = eq1.rsplit(":", maxsplit=1)
# eq1_ticker_almost = eq1_split[len(eq1_split) - 1]
#
# # print("eq1_split: ", eq1_split) # ['LNT']
# # print("eq1_ticker_almost: ", eq1_ticker_almost) # LNT
#
# if "." in eq1_ticker_almost: # For Class A,B type tickers EXP: BF.A BF.B
# ticker_pair1 = eq1_ticker_almost.replace(
# ".", " "
# ) # convert Tradingview -> IB format
# else:
# ticker_pair1 = "".join(
# char for char in eq1_ticker_almost if char.isalnum()
# )
#
# if eq1_ticker_almost != ticker_pair1:
# success_flag = False
#
# # print("ticker_pair1: ", ticker_pair1) # LNT
#
# if len(eq1_hedge) != 0:
# if eq1_hedge[0] != 1:
# success_flag = False
#
# # print("problem_flag_first: ", success_flag)
#
# self.ticker_type = "single"
# self.ticker1 = ticker_pair1
#
# if len(eq12) == 2:
#
# eq2_hedge = re.findall(
# r"[-+]?\d*\.\d+|\d+", eq12[1]
# ) # hedge constant fot the 2nd ticker
# # print("eq2_hedge: ", eq2_hedge) # ['1.25']
#
# if len(eq2_hedge) > 0:
# eq2 = eq12[1].replace(eq2_hedge[0], "")
# else:
# eq2 = eq12[1] # *NYSE:FTS
#
# eq2 = eq2.replace("*", "")
#
# # print("eq2: ", eq2) # NYSE:FTS
#
# eq2_split = eq2.rsplit(":", maxsplit=1)
# eq2_ticker_almost = eq2_split[len(eq2_split) - 1]
#
# # print("eq2_split: ", eq2_split) # ['NYSE', 'FTS']
# # print("eq2_ticker_almost: ", eq2_ticker_almost) # FTS
#
# if "." in eq2_ticker_almost: # For Class A,B type tickers EXP: BF.A BF.B
# ticker_pair2 = eq2_ticker_almost.replace(
# ".", " "
# ) # convert Tradingview -> IB format
# else:
# ticker_pair2 = "".join(
# char for char in eq2_ticker_almost if char.isalnum()
# )
#
# if eq2_ticker_almost != ticker_pair2:
# success_flag = False
#
# # print("ticker_pair2: ", ticker_pair2) # FTS
#
# if len(eq2_hedge) == 0:
# hedge_const = 1
# else:
# hedge_const = eq2_hedge[0]
#
# # print("hedge_const: ", hedge_const) # False
# # print("problem_flag_final: ", success_flag)
# # print("ticker_type: ", self.ticker_type)
#
# self.ticker_type = "pair"
# self.ticker2 = ticker_pair2
# self.hedge_param = hedge_const
#
# if len(eq12) > 2:
# success_flag = False
#
# if not success_flag:
# self.order_status = "error"
# self.status_msg = "problematic ticker!"
#
# return success_flag
| ozdemirozcelik/pairs-api | services/models/signals.py | signals.py | py | 33,255 | python | en | code | 9 | github-code | 13 |
35933055105 | import pygame
###########################################################(반드시 필요)
pygame.init() #처음 초기화 하는 기능
#화면 크기 설정
screen_width= 480
screen_height = 640
screen = pygame.display.set_mode((screen_width,screen_height)) #실제로 적용됨
#화면 타이틀 설정
pygame.display.set_caption("Nado Game") #게임 이름 설정
# FPS
clock =pygame.time.Clock()
#########################################################################
# 1. 사용자 게임 초기화 (배경화면, 게임 이미지, 좌표, 속도, 폰트 등)
#배경 이미지 불러오기
background = pygame.image.load("C:/testpy/python_ex/pyton_game_background.png")
#캐릭터 불러오기
character = pygame.image.load("C:/testpy/python_ex/pyton_game_character.png")
character_size = character.get_rect().size #이미지의 크기를 구해옴
character_width = character_size[0] #캐릭터의 가로 크기
character_height = character_size[1] #캐릭터의 세로 크기
character_x_pos = (screen_width - character_width) /2 #화면 괄호의 절반에 해당하는 곳에 위치하기 위한 값 (가로)
character_y_pos = screen_height - character_height #화면 바닥에 위치하기 위한 값 (세로)
#이동할 좌표
to_x=0
to_y=0
#이동 속도
character_speed = 1
#적 enemy 캐릭터
enemy = pygame.image.load("C:/testpy/python_ex/pyton_game_enemy.png")
enemy_size = enemy.get_rect().size #이미지의 크기를 구해옴
enemy_width = enemy_size[0] #적군의 가로 크기
enemy_height = enemy_size[1] #적군의 세로 크기
enemy_x_pos = (screen_width - enemy_width) /2 #화면 괄호의 절반에 해당하는 곳에 위치하기 위한 값 (가로)
enemy_y_pos = (screen_height - enemy_height) /2 #화면 괄호의 절반에 해당하는 곳에 위치하기 위한 값 (세로)
#이동할 좌표
to_x=0
to_y=0
#이동 속도
character_speed = 1
#폰트 정의
game_font = pygame.font.Font(None, 40) #폰트 객체 생성 (폰트, 크기)
#총 시간
total_time = 10
# 시간 계산
start_ticks = pygame.time.get_ticks() #현재 tick을 받아옴
#########################################################################
#이벤트 루프 설정 : 게임이 꺼지지 않게 하는 코드
running = True #게임이 진행중인가?
while running:
dt = clock.tick(60) #게임 화면의 초당 프레임 수
#프레임때문에 이동속도 제한이 걸리지 않도록 프레임 수를 조정해야한다.
# 2. 이벤트 처리 (키보드, 마우스 등)
for event in pygame.event.get(): #어떤 이벤트가 발생하였는가?
if event.type == pygame.QUIT: #창이 닫히는 이벤트가 발생할 때
running=False #게임이 진행중이 아니다.
if event.type == pygame.KEYDOWN: #키가 눌러졌는지 확인
if event.key == pygame.K_LEFT: #캐릭터를 왼쪽으로
to_x-=character_speed
elif event.key == pygame.K_RIGHT: #캐릭터를 오른쪽으로
to_x+=character_speed
elif event.key == pygame.K_UP: #캐릭터를 위쪽으로
to_y-=character_speed
elif event.key == pygame.K_DOWN: #캐릭터를 아래쪽으로
to_y+=character_speed
if event.type == pygame.KEYUP: #키를 땠는지 확인
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT: #캐릭터 좌우로 가던걸 멈춤
to_x = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN: #캐릭터 상하로 가던걸 멈춤
to_y = 0
###################################################################################################
# 3. 게임 캐릭터 위치 정의
#캐릭터의 이동 설정
character_x_pos+=to_x * dt #dt를 곱해주는 이유는 FPS와 상관없이 속도를 조절하기 위함
character_y_pos+=to_y * dt
#가로 경계값 설정
if character_x_pos<0:
character_x_pos=0
elif character_x_pos> screen_width - character_width:
character_x_pos= screen_width - character_width
#세로 경계값 설정
if character_y_pos<0:
character_y_pos=0
elif character_y_pos>screen_height - character_height:
character_y_pos=screen_height - character_height
######################################################################
#4. 충돌 처리
#충돌 처리를 위한 rect 정보 업데이트
character_rect = character.get_rect()
character_rect.left = character_x_pos
character_rect.top = character_y_pos
enemy_rect = enemy.get_rect()
enemy_rect.left = enemy_x_pos
enemy_rect.top = enemy_y_pos
#충돌 체크
if character_rect.colliderect(enemy_rect):
print("충돌했어요!")
running = False
###############################################################
# 5. 화면에 그리기
# screen.fill((127,127,127)) #게임의 색 채우기
screen.blit(background, (0,0)) #배경 그리기
screen.blit(character, (character_x_pos,character_y_pos)) #캐릭터 그리기
screen.blit(enemy, (enemy_x_pos, enemy_y_pos)) #적 캐릭터 그리기
#타이머 시간 넣기
#경계 시간 계산
elapsed_time = (pygame.time.get_ticks() - start_ticks) /1000
#경과 시간을 1000으로 나누어 초단위로 표시
timer = game_font.render(str(int(total_time -elapsed_time)),True,(255,255,255))
#출력할 글자, True, 글자 색상
screen.blit(timer,(10,10))
#만약 시간이 0미만이면 게임 종료
if total_time< elapsed_time:
print("Time Out!")
running = False
######################################################################################
# 6. 업데이트 (필수)
pygame.display.update() # 게임화면 다시 그리기
###########################################################
# 7. 종료전 대기 (없어도 되는 부분)
#잠시 대기 (종료되는 모든 순간에 적용)
pygame.time.delay(2000) #2초 정도 대기 (단위 : ms)
###########################################################
# 8. pygame 종료 (필수)
#게임 종료하고 pygame도 종료 할 때
pygame.quit()
########################################### | zookeeper464/py_game_ex | game.py | game.py | py | 6,384 | python | ko | code | 0 | github-code | 13 |
10173308345 | import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from math import sqrt
def mse_(y, y_hat):
"""
Description:
Calculate the MSE between the predicted output and the real output.
Args:
y: has to be a numpy.array, a vector of dimension m * 1.
y_hat: has to be a numpy.array, a vector of dimension m * 1.
Returns:
mse: has to be a float.
None if there is a matching dimension problem.
Raises:
This function should not raise any Exceptions.
"""
for v in [y, y_hat]:
if not isinstance(v, np.ndarray):
print(f"Invalid input: argument {v} of ndarray type required")
return None
v = [y, y_hat]
for i in range(len(v)):
if v[i].ndim == 1:
v[i] = v[i].reshape(v[i].size, 1)
elif not (v[i].ndim == 2 and v[i].shape[1] == 1):
print(f"Invalid input: wrong shape of {v[i]}", v[i].shape)
return None
y, y_hat = v
J_elem = (y_hat - y) ** 2
float_sum = float(np.sum(J_elem))
mse = float_sum / len(y)
return mse
def rmse_(y, y_hat):
"""
Description:
Calculate the RMSE between the predicted output and the real output.
Args:
y: has to be a numpy.array, a vector of dimension m * 1.
y_hat: has to be a numpy.array, a vector of dimension m * 1.
Returns:
rmse: has to be a float.
None if there is a matching dimension problem.
Raises:
This function should not raise any Exceptions.
"""
for v in [y, y_hat]:
if not isinstance(v, np.ndarray):
print(f"Invalid input: argument {v} of ndarray type required")
return None
v = [y, y_hat]
for i in range(len(v)):
if v[i].ndim == 1:
v[i] = v[i].reshape(v[i].size, 1)
elif not (v[i].ndim == 2 and v[i].shape[1] == 1):
print(f"Invalid input: wrong shape of {v[i]}", v[i].shape)
return None
y, y_hat = v
mse = mse_(y, y_hat)
rmse = mse ** 0.5
return rmse
def mae_(y, y_hat):
"""
Description:
Calculate the MAE between the predicted output and the real output.
Args:
y: has to be a numpy.array, a vector of dimension m * 1.
y_hat: has to be a numpy.array, a vector of dimension m * 1.
Returns:
mae: has to be a float.
None if there is a matching dimension problem.
Raises:
This function should not raise any Exceptions.
"""
for v in [y, y_hat]:
if not isinstance(v, np.ndarray):
print(f"Invalid input: argument {v} of ndarray type required")
return None
v = [y, y_hat]
for i in range(len(v)):
if v[i].ndim == 1:
v[i] = v[i].reshape(v[i].size, 1)
elif not (v[i].ndim == 2 and v[i].shape[1] == 1):
print(f"Invalid input: wrong shape of {v[i]}", v[i].shape)
return None
y, y_hat = v
J_elem = np.abs(y_hat - y)
float_sum = float(np.sum(J_elem))
mae = float_sum / len(y)
return mae
def r2score_(y, y_hat):
"""
Description:
Calculate the R2score between the predicted output and the output.
Args:
y: has to be a numpy.array, a vector of dimension m * 1.
y_hat: has to be a numpy.array, a vector of dimension m * 1.
Returns:
r2score: has to be a float.
None if there is a matching dimension problem.
Raises:
This function should not raise any Exceptions.
"""
for v in [y, y_hat]:
if not isinstance(v, np.ndarray):
print(f"Invalid input: argument {v} of ndarray type required")
return None
v = [y, y_hat]
for i in range(len(v)):
if v[i].ndim == 1:
v[i] = v[i].reshape(v[i].size, 1)
elif not (v[i].ndim == 2 and v[i].shape[1] == 1):
print(f"Invalid input: wrong shape of {v[i]}", v[i].shape)
return None
y, y_hat = v
J_elem = (y_hat - y) ** 2
M_elem = (y - np.mean(y)) ** 2
float_J_sum = float(np.sum(J_elem))
float_M_sum = float(np.sum(M_elem))
if float_M_sum == 0:
r2score = 0.0
else:
r2score = 1 - (float_J_sum / float_M_sum)
return r2score
def ex1():
# Example 1:
x = np.array([0, 15, -9, 7, 12, 3, -21])
y = np.array([2, 14, -13, 5, 12, 4, -19])
# Mean squared error
## your implementation
print("\nmy mse:", mse_(x,y)) ## Output: 4.285714285714286
## sklearn implementation
print("sklearn mse:", mean_squared_error(x,y)) ## Output: 4.285714285714286
# Root mean squared error
## your implementation
print("\nmy rmse:", rmse_(x,y)) ## Output: 2.0701966780270626
## sklearn implementation not available: take the square root of MSE
print("sklearn rmse:", sqrt(mean_squared_error(x,y))) ## Output: 2.0701966780270626
# Mean absolute error
## your implementation
print("\nmy mae:", mae_(x,y)) # Output: 1.7142857142857142
## sklearn implementation
print("sklearn mae:", mean_absolute_error(x,y)) # Output: 1.7142857142857142
# R2-score
## your implementation
print("\nmy r2score:", r2score_(x,y)) ## Output: 0.9681721733858745
## sklearn implementation
print("sklearn r2score:", r2_score(x,y)) ## Output: 0.9681721733858745
def ex2():
x = np.array([0, 15, -9, 7, 12, 3, -21]).reshape(-1, 1)
y_hat = np.array([[1],[2],[3],[4]])
y = np.array([[0],[0],[0],[0]])
print("\nmy mse:", mse_(x, x))
print("\nmy mse:", mse_(y_hat, y))
print("\nmy rmse:", rmse_(y_hat, y))
print("\nmy mae:", mae_(x, x))
print("\nmy mae:", mae_(y_hat, y))
print("\nmy r2score:", r2score_(y_hat, y))
if __name__ == "__main__":
ex1()
| jmcheon/ml_module | 00/ex09/other_losses.py | other_losses.py | py | 5,110 | python | en | code | 0 | github-code | 13 |
73498084497 | import os, re, json
from flask import Flask
from datetime import datetime
app = Flask(__name__)
def read_data():
with open('./data.json') as json_file:
json_data = json.load(json_file)
return json_data
def write_data(data):
with open('./resume.tex', "wb+") as f:
f.write(data)
f.close()
LATEX_SUBS = (
(re.compile(r'\\'), r'\\textbackslash'),
(re.compile(r'([{}_#%&$])'), r'\\\1'),
(re.compile(r'~'), r'\~{}'),
(re.compile(r'\^'), r'\^{}'),
(re.compile(r'"'), r"''"),
(re.compile(r'\.\.\.+'), r'\\ldots'),
)
def escape_tex(value):
newval = value
for pattern, replacement in LATEX_SUBS:
newval = pattern.sub(replacement, newval)
return newval
def compute_time_in_double(value):
if "present" in value.lower():
return round(int(datetime.now().strftime("%Y"))+1, 2)
d = datetime.strptime(value, '%Y-%m')
return int(d.strftime("%Y")) + round(int(d.strftime("%m")) / 12.00, 2)
def compute_time_in_text(value):
if "present" in value.lower():
return "Present"
d = datetime.strptime(value, '%Y-%m')
return d.strftime("%b, %Y")
def get_sorted_keys(value):
return sorted(value)
def create_jinja_environment():
texenv = app.create_jinja_environment()
texenv.block_start_string = '((*'
texenv.block_end_string = '*))'
texenv.variable_start_string = '<(('
texenv.variable_end_string = '))>'
texenv.comment_start_string = '((='
texenv.comment_end_string = '=))'
texenv.filters['escape_tex'] = escape_tex
texenv.filters['compute_time_in_double'] = compute_time_in_double
texenv.filters['get_sorted_keys'] = get_sorted_keys
texenv.filters['compute_time_in_text'] = compute_time_in_text
return texenv
def render():
texenv = create_jinja_environment()
template = texenv.get_template('template.tex')
data = read_data()
generated_latex_content = template.render(data=data, last_year=int(datetime.now().strftime("%Y"))+1).encode('utf-8')
write_data(generated_latex_content)
os.system("pdflatex resume.tex")
render()
| xuchen81/XNemo | generate_pdf.py | generate_pdf.py | py | 2,103 | python | en | code | 0 | github-code | 13 |
8842716085 | ## Modules
import numpy as np
import math as m
import matplotlib.pyplot as plt
## Part imports
import TeamProject_Part1 as p1
import TeamProject_Part2 as p2
import TeamProject_Part3 as p3
## UDFs
# Main function
def gkern(sig, x, y):
gauss = (1 / (2 * m.pi * m.pow(sig,2)))
gauss *= m.exp(-(m.pow(x, 2) + m.pow(y,2)) / (2 * m.pow(sig,2)))
return gauss
def main():
## Part 1
img_encrypted = p1.image_importation()
## Part 2
print("Starting decryption...")
key = p2.key_generator(img_encrypted)
img_decrypted = p2.xor_decryption(img_encrypted, key)
plt.imsave('file_original.tiff', img_decrypted)
plt.imshow(img_decrypted)
print("Finished decryption...\n")
## Part 3
img_float = img_decrypted.astype(np.float64)
print("Starting greyscale operation...")
img_greyscale = p3.greyscale(img_float)
plt.imshow(img_greyscale, cmap = plt.get_cmap('gray'))
print("Finished greyscale operation...\n")
sig = 10
#[[0.0625, 0.125, 0.0625], [0.125, 0.25, 0.125], [0.0625, 0.125, 0.0625]]
kernel = [[gkern(sig, 1, 1), gkern(sig, 0, 1), gkern(sig, 1, 1)], \
[gkern(sig, 1, 0), gkern(sig, 0, 0), gkern(sig, 1, 0)], \
[gkern(sig, 1, 1), gkern(sig, 0, 1), gkern(sig, 1, 1)]]
fraction = np.sum(kernel)
kernel /= fraction
print('Starting gaussian filtering...')
img_gaussian = p3.gaussian_filter(img_greyscale, kernel)
plt.imshow(img_gaussian, cmap = plt.get_cmap('gray'))
print("Finished gaussian filtering...\n")
gx = [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]
gy = [[-1, -2, -1], [0, 0, 0], [1, 2, 1]]
print('Starting edge detection...')
img_edge = p3.edge_detection(img_gaussian, gx, gy)
plt.imshow(img_edge, cmap = plt.get_cmap('gray'))
print("Finished edge detection...\n")
print('Finding Earth...')
earth_coord = p3.find_earth(img_edge)
print('Found Earth...')
print(f'Earth is @{earth_coord}')
img_crop = p3.img_crop(img_decrypted, earth_coord)
img_crop_edge = p3.img_crop(img_edge, earth_coord)
plt.imshow(img_crop, cmap = plt.get_cmap('gray'))
plt.imsave('crop.tiff', img_crop)
plt.imsave('crop_edge.tiff', img_crop_edge, cmap = plt.get_cmap('gray'))
plt.imsave('img_final.tiff', img_edge, cmap = plt.get_cmap('gray'))
## Calls main function
if __name__ == '__main__':
main()
| AviDube/Engr133Docs | TeamProject.py | TeamProject.py | py | 2,590 | python | en | code | 0 | github-code | 13 |
70971937938 | from flask import render_template
from project import app, db
from project.routes import dictionaryOfProjects
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html',
the_title='404',
dictionaryOfProjects=dictionaryOfProjects), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html',
the_title='Обработка файлов',
dictionaryOfProjects=dictionaryOfProjects), 500
| GlennMiller1991/PythonPortfolio | project/errors.py | errors.py | py | 584 | python | en | code | 0 | github-code | 13 |
22906240614 | from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
class Venue(models.Model):
foursquare_id = models.CharField(
max_length=64, unique=True
)
name = models.CharField(max_length=256)
location = models.PointField()
categories = models.ManyToManyField(
'venues.FoursquareCategory', through='venues.VenueCategory'
)
checkins_count = models.IntegerField()
users_count = models.IntegerField()
tip_count = models.IntegerField()
url = models.URLField()
formatted_address = models.TextField()
@classmethod
def get_or_create_from_api(cls, venue_response):
location = Point(
venue_response['location']['lng'],
venue_response['location']['lat'],
)
return cls.objects.get_or_create(
foursquare_id=venue_response['id'],
defaults=dict(
name=venue_response['name'],
location=location,
checkins_count=venue_response['stats']['checkinsCount'],
users_count=venue_response['stats']['usersCount'],
tip_count=venue_response['stats']['tipCount'],
url=venue_response['url'],
formatted_address=(
venue_response['location'].get('formattedAddress')
)
)
)
def get_or_create_from_api(cls, venue_response):
return
class FoursquareCategory(models.Model):
foursquare_id = models.CharField(max_length=64, unique=True)
name = models.CharField(max_length=256)
plural_name = models.CharField(max_length=256)
short_name = models.CharField(max_length=256)
icon = models.URLField()
@classmethod
def get_or_create_from_api(cls, category_response):
return cls.objects.get_or_create(
foursquare_id=category_response['id'],
defaults={
'name': category_response['name'],
'plural_name': category_response['pluralName'],
'short_name': category_response['shortName'],
'icon': (
category_response['icon']['prefix'] +
category_response['icon']['suffix']
)
}
)
class VenueCategory(models.Model):
venue = models.ForeignKey(Venue, on_delete=models.PROTECT)
category = models.ForeignKey(FoursquareCategory, on_delete=models.PROTECT)
is_primary = models.BooleanField()
class Meta:
unique_together = ('venue', 'category')
@classmethod
def get_or_create_from_api(cls, category_response, venue):
category, _ = FoursquareCategory.get_or_create_from_api(
category_response
)
return cls.objects.get_or_create(
category=category, venue=venue,
is_primary=category_response.get('primary', False)
)
| gareth-lloyd/python-geodata-talk | api/venues/models.py | models.py | py | 2,898 | python | en | code | 3 | github-code | 13 |
29041939538 | import collections
# time complexity: n
# space complexity: n
def two_sum(array):
visited = collections.defaultdict(lambda:-1)
for i in range(len(array)):
nextValue = array[i]
if -nextValue in visited.keys():
return "{} {}".format(visited[-nextValue]+1, i+1)
else:
visited[nextValue] = i
return "-1"
filein = open('rosalind_2sum.txt')
data = filein.read()
filein.close()
linesin = [ sublist.strip().split() for sublist in data.splitlines() ]
arrayCount = int(linesin[0][0])
fileout = open("rosalind_2sum_output.txt", "w")
for x in range(1, arrayCount+1):
array = [int(x) for x in linesin[x]]
output = two_sum(array)
print(output)
fileout.write("{}\n".format(output))
fileout.close() | egavett/CS473Algorithms | 2Sum/rosalind_2sum.py | rosalind_2sum.py | py | 772 | python | en | code | 0 | github-code | 13 |
6482114659 | import cv2
import os
import runlength
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from sklearn.svm import SVR, LinearSVR
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import PassiveAggressiveRegressor, RANSACRegressor, LinearRegression, BayesianRidge, Ridge, Lars, LogisticRegression, Lasso, HuberRegressor, TheilSenRegressor, OrthogonalMatchingPursuit,ElasticNet,SGDRegressor
from sklearn.kernel_ridge import KernelRidge
import glob
import math
import zlib
import pickle
import sys
def poly_compress(arr, k, deg, regressor):
differences = []
polynomial_coefficients = []
n_poly = len(arr)//k
for i in range(0, len(arr), n_poly):
inc = n_poly
if i+n_poly > len(arr):
inc = len(arr)-i
x = np.arange(inc)
y = arr[i:i+inc]
if(deg > inc):
deg = 0
x_reshaped = x.reshape(-1, 1)
regressor = regressor.fit(x_reshaped, y)
#print(regressor.coef_)
z = pickle.dumps(regressor)
polynomial_coefficients.append(z)
if (str(img_dtype) == "uint8"):
diff = regressor.predict(x_reshaped).astype(np.uint8) - y
else:
diff = regressor.predict(x_reshaped).astype(np.uint16) - y
differences += list(diff)
#xp = np.linspace(0, len(x_reshaped), len(x_reshaped)*100)
#plt.figure(figsize=(6.5,4))
#plt.hist(x=differences, bins='auto')
#plt.plot(x,y,'o',label='data')
#plt.plot(x, regressor.predict(x_reshaped),label='polyfit')
#plt.xlabel("x")
#plt.ylabel("f(x)")
#plt.show()
return (np.asarray(differences), polynomial_coefficients)
def poly_decompress(differences, coefficients, k):
# Revert differences.
n_poly = len(differences)//k
decoded_values = []
for i in range(0, len(differences), n_poly):
inc = n_poly
if i+n_poly > len(differences):
inc = len(differences)-i
z = coefficients[i//n_poly]
regressor = pickle.loads(z)
diff = differences[i:i+inc]
x = np.arange(inc)
x_reshaped = x.reshape(-1, 1)
if (str(img_dtype) == "uint8"):
decoded_values += list(regressor.predict(x_reshaped).astype(np.uint8)-diff)
else:
decoded_values += list(regressor.predict(x_reshaped).astype(np.uint8)-diff)
return np.asarray(decoded_values)
def split(array, nrows, ncols):
"""Split a matrix into sub-matrices."""
win_height = nrows
win_width = ncols
vecs = []
for r in range(0,img.shape[0], win_height):
for c in range(0,img.shape[1], win_width):
window = img[r:r+win_height,c:c+win_width]
vecs.append(window)
return np.asarray(vecs)
def merge(array, nrows, ncols):
result = []
height, width = img_shape
v = []
for i in range(0, height//(nrows)):
if (str(img_dtype) == "uint8"):
h = np.empty((nrows,ncols),dtype=np.int8)
else:
h = np.empty((nrows,ncols),dtype=np.int16)
h2 = []
for j in range(0, width//(ncols)):
window = array[i*(width//ncols)+j].reshape(nrows,ncols)
h = np.hstack((h, window))
h2.append(window)
h = np.hstack(np.asarray(h2))
v.append(h)
v = np.vstack(np.asarray(v))
return v
def poly_compress_grid(arr, n, m, deg, regressor):
splitted = split(arr, n, m)
differences = []
coeffs = []
counter = 0
for i in range(len(splitted)):
grid_arr = splitted[i]
diff, coeff = poly_compress(grid_arr.ravel(), 1, deg, regressor)
differences.append(list(diff))
coeffs.append(np.asarray(coeff).ravel())
counter += 1
#print(counter, len(splitted))
return (np.asarray(differences), np.asarray(coeffs))
def poly_decompress_grid(differences, coefficients, n, m):
decoded_values = []
for i in range(len(differences)):
diff = np.asarray(differences[i]).ravel()
coeff = coefficients[i]
z = coeff
regressor = pickle.loads(z)
x = np.arange(len(diff))
x_reshaped = x.reshape(-1, 1)
if (str(img_dtype) == "uint8"):
decoded_values += list(regressor.predict(x_reshaped).astype(np.uint8)-diff)
else:
decoded_values += list(regressor.predict(x_reshaped).astype(np.uint16)-diff)
decoded_values = np.reshape(decoded_values, img_shape)
decoded_values = split(decoded_values, n, m)
decoded_values = merge(decoded_values,n ,m)
return np.asarray(decoded_values)
## Define the regressor.
regressor = OrthogonalMatchingPursuit()
## Choose grid length
n1 = 30
n2 = 30
print("n1=%d, n2=%d, model=%s" % (n1,n2,regressor))
file_paths = glob.glob("../data/nonastro/*.tif")
z_lib = 0
improved_zlib = 0
z_lib_win_count = 0
improved_zlib_win_count = 0
total_count = 0
two_channel_images_count = 0
for path in file_paths:
# add test image path
#path = os.path.abspath("../data/heic1509a.tif")
## Read image.
img_path = path
print(img_path)
img = cv2.imread(img_path,-1)
if (len(img.shape) == 3):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
print(img.dtype)
else:
print("WARNING:this image has " + str(len(img.shape)) + " channels")
two_channel_images_count += 1
img_shape = img.shape
img_dtype = img.dtype
## Compress with Zlib.
if (str(img_dtype) == "uint8"):
x = np.asarray(img.ravel()).astype(np.uint8)
else:
x = np.asarray(img.ravel()).astype(np.uint16)
original_image = pickle.dumps(x)
y = zlib.compress(original_image)
compress_ratio_zlib = (float(len(original_image)) - float(len(y))) / float(len(original_image))
compress_ratio_percent_zlib = 100.0 * compress_ratio_zlib
print('Compressed zlib: %f%%' % (100.0 * compress_ratio_zlib))
## Compress with fitting.
if (str(img_dtype) == "uint8"):
values = img.ravel().astype(np.uint8)
else:
values = img.ravel().astype(np.uint16)
k = 10000
deg = 4
## Compute grid shape.
r1 = img_shape[0] % n1
c1 = img_shape[1] % n2
if r1 != 0:
zeros = np.zeros((n1-r1,img_shape[1]))
img = np.vstack((img, zeros))
if c1 != 0:
zeros = np.zeros((img_shape[0]+n1-r1,n2-c1))
img = np.hstack((img, zeros))
if r1 == 0:
if c1 != 0:
zeros = np.zeros((img_shape[0],n2-c1))
img = np.hstack((img, zeros))
img_shape = img.shape
n = img.shape[0]//n1
m = img.shape[1]//n2
## Compute differences and coefficients..
differences, polynomial_coefficients = poly_compress_grid(img, n, m, deg, regressor)
if (str(img_dtype) == "uint8"):
differences = differences.astype(np.uint8)
x = np.asarray(img.ravel()).astype(np.uint8)
else:
differences = differences.astype(np.uint16)
x = np.asarray(img.ravel()).astype(np.uint16)
original_image = pickle.dumps(x)
# Compress differences
if (str(img_dtype) == "uint8"):
x = np.asarray(differences).astype(np.uint8)
else:
x = np.asarray(differences).astype(np.uint16)
buffer = pickle.dumps(x)
y = zlib.compress(buffer)
# Compress coefficients
x = np.asarray(polynomial_coefficients)
buffer = pickle.dumps(x)
y2 = zlib.compress(buffer)
compress_ratio_reg = (float(len(original_image)) - float(len(y)+len(y2))) / float(len(original_image))
compress_ratio_percent_reg = 100.0 * compress_ratio_reg
print('Compressed zlib_regressoion: %f%%' % (100.0 * compress_ratio_reg))
# Decompress
differences = pickle.loads(zlib.decompress(y))
#print(len(differences))
#print(len(polynomial_coefficients))
polynomial_coefficients = pickle.loads(zlib.decompress(y2))
#print(len(polynomial_coefficients))
## Check losssless.
decoded_values = poly_decompress_grid(differences, polynomial_coefficients, n1, n2)
if (str(img_dtype) == "uint8"):
decoded_values = np.asarray(decoded_values).astype(np.uint8)
else:
decoded_values = np.asarray(decoded_values).astype(np.uint16)
## Compare with zlib.
z_lib += compress_ratio_zlib
improved_zlib += compress_ratio_reg
if compress_ratio_percent_zlib > compress_ratio_percent_reg:
z_lib_win_count += 1
print('WARNING: zlib won!' + 'this is the image: ' + path)
elif compress_ratio_percent_zlib < compress_ratio_percent_reg:
improved_zlib_win_count += 1
total_count += 1
print ("Decode error: "+str(np.array_equal(decoded_values.ravel(), img.ravel().astype(np.uint8))))
print("average zlib percent: "+ str(z_lib/float(total_count)))
print("average improved zlib percent: "+ str(improved_zlib/float(total_count)))
print('zlib wins:' + str(z_lib_win_count))
print('improved zlib wins:' + str(improved_zlib_win_count))
print('two_channel_images_count: ' + str(two_channel_images_count)) | mfkaradeniz/astronomy-compression | src/compression_with_linear_models.py | compression_with_linear_models.py | py | 9,174 | python | en | code | 0 | github-code | 13 |
74614901456 | import cv2
import numpy as np
from RiskMapRegion import RiskMapRegion
def func(x):
return (-x.risk, -x.area)
def getWatershed(data, w):
height = np.array(data)
image = np.zeros(height.shape)
for i in range(len(data)):
for j in range(len(data[0])):
height[i][j] = data[i][j].elevation
maximum = np.amax(height)
minimum = np.amin(height)
numberOfBits = 16
interval = (maximum - minimum) / (pow(2, numberOfBits) - 1)
for i in range(len(height)):
for j in range(len(height[0])):
image[i][j] = int( data[i][j].getRisk(minimum, maximum) * (pow(2, numberOfBits) - 1))
image = image.astype('uint8') * 255
cv2.imshow("w", resizeimage(image, 1000))
cv2.waitKey(0)
# image = cv2.bitwise_not(image)
# image = resizeimage(image, 1000)
image3 = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
ret, thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3, 3), np.uint8)
# sure background area
sure_bg = cv2.dilate(thresh, kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(thresh, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.3 * dist_transform.max(), 255, 0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers + 1
# Now, mark the region of unknown with zero
markers[unknown == 255] = 0
markers = cv2.watershed(image3, markers)
diff = []
for m in markers:
for a in m:
if not a in diff:
diff.append(a)
image3[markers == -1] = [255, 0, 0]
# Map to store region wise location objects
regions = getRegions(markers, data)
# Map to store region boundary location objects
regionBoundary = findboundary2(markers, data)
# Map to store regions and there relative sizes
sizeFactor = {}
# Size list
sizeList = {}
# Map to Store Average Depths
avgDepth = {}
# Map to store the centers of the regions
centers = {}
maxSize = 0
for k in regions:
# Find maximum size region
if k not in sizeFactor:
sizeFactor[k] = float(len(regions[k]))
maxSize = max(maxSize, sizeFactor[k])
sizeList[k] = len(regions[k])
# Find the center of the region i,e, the minimum depth region
minDepth = regions[k][0]
# Depth wise classification
depthSum = 0
for val in regions[k]:
if minDepth.elevation > val.elevation:
minDepth = val
depthSum = depthSum + val.elevation
centers[k] = minDepth
# Normalise the depth
avgDepth[k] = depthSum/len(regions[k])
# Normalize the size factors with respect to greatest size region
for k in sizeFactor:
sizeFactor[k] = sizeFactor[k] / maxSize
maxDepth = avgDepth[list(avgDepth.keys())[0]]
for k in avgDepth:
maxDepth = max(maxDepth, avgDepth[k])
for k in avgDepth:
avgDepth[k] = avgDepth[k] / maxDepth
factor1 = 0.5
factor2 = 0.5
riskFactor = {}
for k in regions:
riskFactor[k] = factor1*(1-avgDepth[k]) + factor2*sizeFactor[k]
riskObjects = []
rs = []
for k in regions:
if k == -1:
continue
risk = riskFactor[k]
area = pow((pow(sizeList[k], 0.5) * w)/1000, 2)
riskObjects.append(RiskMapRegion(centers[k],regionBoundary[k],risk,area))
# rs.append((-risk, -area, k))
# rs.sort()
# rss = rs[:w]
# rb = {}
# for r in rss:
# rb[r[2]] = regionBoundary[r[2]]
# print("YO")
# sb = sort_boundaries(rb, data)
# for r in rss:
# riskObjects.append(RiskMapRegion(centers[r[2]],sb[r[2]],risk,area))
return sorted(riskObjects, key=func)
def getRegions(markers, locations):
maps = {}
for i in range(len(markers)):
for j in range(len(markers[0])):
if markers[i][j] == -1:
continue
if not markers[i][j] in maps:
maps[markers[i][j]] = [locations[i][j]]
else:
maps[markers[i][j]].append(locations[i][j])
return maps
def findboundary2(markers, location):
boundaries = {}
p = [(0,1),(1,0),(0,-1),(-1,0),(1,1),(-1,-1),(1,-1),(-1,1)]
n = len(markers)
m = len(markers[0])
for i in range(n):
for j in range(m):
if markers[i][j] == -1:
for b in p:
if isValid(i+b[0], j+b[1], n, m) and markers[i+b[0]][j+b[1]] != -1:
v = markers[i+b[0]][j+b[1]]
if v not in boundaries:
boundaries[v] = [(i, j)]
else:
boundaries[v].append((i, j))
locs = {}
for i, k in enumerate(boundaries):
print(i)
arr = boundaries[k]
done = [arr[0]]
arr.remove(arr[0])
while len(arr) > 0:
n = done[-1]
m = dist(n, arr[0])
j = 0
for i in range(len(arr)):
v = dist(n, arr[i])
if v < m:
j = i
m = v
done.append(arr[j])
arr.remove(arr[j])
locs[k] = [location[a[0]][a[1]] for a in done]
return locs
def sort_boundaries(boundaries, location):
locs = {}
for i, k in enumerate(boundaries):
print(i)
arr = boundaries[k]
done = [arr[0]]
arr.remove(arr[0])
while len(arr) > 0:
n = done[-1]
m = dist(n, arr[0])
j = 0
for i in range(len(arr)):
v = dist(n, arr[i])
if v < m:
j = i
m = v
done.append(arr[j])
arr.remove(arr[j])
locs[k] = [location[a[0]][a[1]] for a in done]
return locs
def dist(a, b):
return pow((a[0] - b[0])**2 + (a[1] - b[1])**2, 0.5)
def isValid(x, y, n ,m):
return 0 <= x < n and 0 <= y < m
def findboundary(markers, location):
maps = {}
doneRegions = [-1]
n = len(markers)
m = len(markers[0])
for i in range(n):
for j in range(m):
if (markers[i][j] not in doneRegions and isBoundaryPixel(markers, i, j, n, m)):
maps[markers[i][j]] = findRegionBound(markers, i, j, location)
doneRegions.append(markers[i][j])
return maps
def findRegionBound(markers, i, j, location):
dir = [[1, 0, -1, 0, 1, -1, -1, 1],
[0, 1, 0, -1, 1, 1, -1, -1]]
n = len(markers)
m = len(markers[0])
ret = [location[i][j]]
visited = [(i, j)]
while True:
a = True
for k in range(len(dir[0])):
x = i + dir[0][k]
y = j + dir[1][k]
if (x, y) not in visited and markers[x][y] == markers[i][j] and isBoundaryPixel(markers, x, y, n, m):
visited.append((x, y))
ret.append(location[x][y])
i = x
j = y
a = False
break
if a:
return ret
def isBoundaryPixel(markers, i, j, n, m):
dir = [[0, 1, 1, 1, 0, -1, -1, -1],
[1, 1, 0, -1, -1, -1, 0, 1]]
flag = 0
for k in range(len(dir[0])):
x = i + dir[0][k]
y = j + dir[1][k]
if markers[x][y] != markers[i][j]:
return True
return False
def resizeimage(image, W):
height, width = image.shape
imgScale = W / width
newX, newY = image.shape[1] * imgScale, image.shape[0] * imgScale
return cv2.resize(image, (int(newX), int(newY)))
if __name__ == '__main__':
getWatershed(np.random.random((100, 100))) | yhgupta/Flood_Rescue | watershed.py | watershed.py | py | 8,010 | python | en | code | 0 | github-code | 13 |
17047781074 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntMerchantExpandProductionOrderSyncModel(object):
def __init__(self):
self._amount = None
self._batch_no = None
self._item_id = None
self._project_no = None
self._regional_warehouse = None
self._value = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def project_no(self):
return self._project_no
@project_no.setter
def project_no(self, value):
self._project_no = value
@property
def regional_warehouse(self):
return self._regional_warehouse
@regional_warehouse.setter
def regional_warehouse(self, value):
self._regional_warehouse = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.batch_no:
if hasattr(self.batch_no, 'to_alipay_dict'):
params['batch_no'] = self.batch_no.to_alipay_dict()
else:
params['batch_no'] = self.batch_no
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.project_no:
if hasattr(self.project_no, 'to_alipay_dict'):
params['project_no'] = self.project_no.to_alipay_dict()
else:
params['project_no'] = self.project_no
if self.regional_warehouse:
if hasattr(self.regional_warehouse, 'to_alipay_dict'):
params['regional_warehouse'] = self.regional_warehouse.to_alipay_dict()
else:
params['regional_warehouse'] = self.regional_warehouse
if self.value:
if hasattr(self.value, 'to_alipay_dict'):
params['value'] = self.value.to_alipay_dict()
else:
params['value'] = self.value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandProductionOrderSyncModel()
if 'amount' in d:
o.amount = d['amount']
if 'batch_no' in d:
o.batch_no = d['batch_no']
if 'item_id' in d:
o.item_id = d['item_id']
if 'project_no' in d:
o.project_no = d['project_no']
if 'regional_warehouse' in d:
o.regional_warehouse = d['regional_warehouse']
if 'value' in d:
o.value = d['value']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AntMerchantExpandProductionOrderSyncModel.py | AntMerchantExpandProductionOrderSyncModel.py | py | 3,344 | python | en | code | 241 | github-code | 13 |
32227445689 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 06 11:01:57 2018
@author: deepe
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('affairs.csv')
features = dataset.iloc[:,:-1].values
labels = dataset.iloc[:,-1].values
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
labelencoder = LabelEncoder()
for i in range(6,8):
features[:,i] = labelencoder.fit_transform(features[:,i])
onehotencoder1 = OneHotEncoder(categorical_features = [6])
features = onehotencoder1.fit_transform(features).toarray()
features = features[:,1:]
onehotencoder1 = OneHotEncoder(categorical_features = [-1])
features = onehotencoder1.fit_transform(features).toarray()
features = features[:,1:]
from sklearn.model_selection import train_test_split
features_train,features_test,labels_train,labels_test = train_test_split(features,labels,test_size = 0.25, random_state = 0)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(features_train,labels_train)
labels_pred = classifier.predict(features_test)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(labels_test,labels_pred)
score = classifier.score(features_test,labels_test)
new_pred = classifier.predict(np.array([1,0,0,0,0,0,0,1,0,0,3,25,3,1,4,16]).reshape(1,-1))
| sherwaldeepesh/Forsk-Python_Machine_Learning | Day 19/code1.py | code1.py | py | 1,353 | python | en | code | 1 | github-code | 13 |
40336485475 | from base.base_train import BaseTrain
from tqdm import tqdm
import numpy as np
from time import sleep
from time import time
from utils.evaluations import do_roc, save_results
class BIGANTrainer(BaseTrain):
def __init__(self, sess, model, data, config, summarizer):
super(BIGANTrainer, self).__init__(sess, model, data, config, summarizer)
self.batch_size = self.config.data_loader.batch_size
self.noise_dim = self.config.trainer.noise_dim
self.img_dims = self.config.trainer.image_dims
# Inititalize the train Dataset Iterator
self.sess.run(self.data.iterator.initializer)
# Initialize the test Dataset Iterator
self.sess.run(self.data.test_iterator.initializer)
if self.config.data_loader.validation:
self.sess.run(self.data.valid_iterator.initializer)
self.best_valid_loss = 0
self.nb_without_improvements = 0
def train_epoch(self):
begin = time()
# Attach the epoch loop to a variable
loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))
# Define the lists for summaries and losses
gen_losses = []
disc_losses = []
enc_losses = []
summaries = []
# Get the current epoch counter
cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)
image = self.data.image
for _ in loop:
loop.set_description("Epoch:{}".format(cur_epoch + 1))
loop.refresh() # to show immediately the update
sleep(0.01)
gen, dis, enc, sum_g, sum_d = self.train_step(image, cur_epoch)
gen_losses.append(gen)
disc_losses.append(dis)
enc_losses.append(enc)
summaries.append(sum_g)
summaries.append(sum_d)
self.logger.info("Epoch {} terminated".format(cur_epoch))
self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries)
# Check for reconstruction
if cur_epoch % self.config.log.frequency_test == 0:
noise = np.random.normal(
loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]
)
image_eval = self.sess.run(image)
feed_dict = {
self.model.image_input: image_eval,
self.model.noise_tensor: noise,
self.model.is_training: False,
}
reconstruction = self.sess.run(self.model.sum_op_im, feed_dict=feed_dict)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])
# Get the means of the loss values to display
gen_m = np.mean(gen_losses)
dis_m = np.mean(disc_losses)
enc_m = np.mean(enc_losses)
self.logger.info(
"Epoch: {} | time = {} s | loss gen= {:4f} | loss dis = {:4f} | loss enc = {:4f}".format(
cur_epoch, time() - begin, gen_m, dis_m, enc_m
)
)
# Save the model state
self.model.save(self.sess)
if (
cur_epoch + 1
) % self.config.trainer.frequency_eval == 0 and self.config.trainer.enable_early_stop:
valid_loss = 0
image_valid = self.sess.run(self.data.valid_image)
noise = np.random.normal(
loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]
)
feed_dict = {
self.model.noise_tensor: noise,
self.model.image_input: image_valid,
self.model.is_training: False,
}
vl = self.sess.run([self.model.rec_error_valid], feed_dict=feed_dict)
valid_loss += vl[0]
if self.config.log.enable_summary:
sm = self.sess.run(self.model.sum_op_valid, feed_dict=feed_dict)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=[sm], summarizer="valid")
self.logger.info("Validation: valid loss {:.4f}".format(valid_loss))
if (
valid_loss < self.best_valid_loss
or cur_epoch == self.config.trainer.frequency_eval - 1
):
self.best_valid_loss = valid_loss
self.logger.info(
"Best model - valid loss = {:.4f} - saving...".format(self.best_valid_loss)
)
# Save the model state
self.model.save(self.sess)
self.nb_without_improvements = 0
else:
self.nb_without_improvements += self.config.trainer.frequency_eval
if self.nb_without_improvements > self.config.trainer.patience:
self.patience_lost = True
self.logger.warning(
"Early stopping at epoch {} with weights from epoch {}".format(
cur_epoch, cur_epoch - self.nb_without_improvements
)
)
def test_epoch(self):
self.logger.warn("Testing evaluation...")
scores_1 = []
scores_2 = []
inference_time = []
true_labels = []
summaries = []
# Create the scores
test_loop = tqdm(range(self.config.data_loader.num_iter_per_test))
cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)
for _ in test_loop:
test_batch_begin = time()
test_batch, test_labels = self.sess.run([self.data.test_image, self.data.test_label])
test_loop.refresh() # to show immediately the update
sleep(0.01)
noise = np.random.normal(
loc=0.0, scale=1.0, size=[self.config.data_loader.test_batch, self.noise_dim]
)
feed_dict = {
self.model.image_input: test_batch,
self.model.noise_tensor: noise,
self.model.is_training: False,
}
scores_1 += self.sess.run(self.model.list_scores_1, feed_dict=feed_dict).tolist()
scores_2 += self.sess.run(self.model.list_scores_2, feed_dict=feed_dict).tolist()
summaries += self.sess.run([self.model.sum_op_im_test], feed_dict=feed_dict)
inference_time.append(time() - test_batch_begin)
true_labels += test_labels.tolist()
# Since the higher anomaly score indicates the anomalous one, and we inverted the labels to show that
# normal images are 0 meaning that contains no anomaly and anomalous images are 1 meaning that it contains
# an anomalous region, we first scale the scores and then invert them to match the scores
scores_1 = np.asarray(scores_1)
scores_2 = np.asarray(scores_2)
true_labels = np.asarray(true_labels)
inference_time = np.mean(inference_time)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries, summarizer="test")
self.logger.info("Testing: Mean inference time is {:4f}".format(inference_time))
step = self.sess.run(self.model.global_step_tensor)
percentiles = np.asarray(self.config.trainer.percentiles)
save_results(
self.config.log.result_dir,
scores_1,
true_labels,
self.config.model.name,
self.config.data_loader.dataset_name,
"fm_1",
"paper",
self.config.trainer.label,
self.config.data_loader.random_seed,
self.logger,
step,
percentile=percentiles,
)
save_results(
self.config.log.result_dir,
scores_2,
true_labels,
self.config.model.name,
self.config.data_loader.dataset_name,
"fm_2",
"paper",
self.config.trainer.label,
self.config.data_loader.random_seed,
self.logger,
step,
percentile=percentiles,
)
def train_step(self, image, cur_epoch):
image_eval = self.sess.run(image)
# Train the discriminator
ld, sm_d = 0, None
if self.config.trainer.mode == "standard":
disc_iters = 1
else:
disc_iters = self.config.trainer.critic_iters
for _ in range(disc_iters):
noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])
true_labels, generated_labels = self.generate_labels(
self.config.trainer.soft_labels, self.config.trainer.flip_labels
)
real_noise, fake_noise = self.generate_noise(
self.config.trainer.include_noise, cur_epoch
)
feed_dict = {
self.model.image_input: image_eval,
self.model.noise_tensor: noise,
self.model.generated_labels: generated_labels,
self.model.true_labels: true_labels,
self.model.real_noise: real_noise,
self.model.fake_noise: fake_noise,
self.model.is_training: True,
}
# Train Discriminator
_, ld, sm_d = self.sess.run(
[self.model.train_dis_op, self.model.loss_discriminator, self.model.sum_op_dis],
feed_dict=feed_dict,
)
if self.config.trainer.mode == "wgan":
_ = self.sess.run(self.model.clip_disc_weights)
# Train Generator and Encoder
noise = np.random.normal(loc=0.0, scale=1.0, size=[self.batch_size, self.noise_dim])
true_labels, generated_labels = self.generate_labels(
self.config.trainer.soft_labels, self.config.trainer.flip_labels
)
real_noise, fake_noise = self.generate_noise(self.config.trainer.include_noise, cur_epoch)
feed_dict = {
self.model.image_input: image_eval,
self.model.noise_tensor: noise,
self.model.generated_labels: generated_labels,
self.model.true_labels: true_labels,
self.model.real_noise: real_noise,
self.model.fake_noise: fake_noise,
self.model.is_training: True,
}
_, _, le, lg, sm_g = self.sess.run(
[
self.model.train_gen_op,
self.model.train_enc_op,
self.model.loss_encoder,
self.model.loss_generator,
self.model.sum_op_gen,
],
feed_dict=feed_dict,
)
return lg, np.mean(ld), le, sm_g, sm_d
def generate_labels(self, soft_labels, flip_labels):
if not soft_labels:
true_labels = np.ones((self.config.data_loader.batch_size, 1))
generated_labels = np.zeros((self.config.data_loader.batch_size, 1))
else:
generated_labels = np.zeros(
(self.config.data_loader.batch_size, 1)
) + np.random.uniform(low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1])
flipped_idx = np.random.choice(
np.arange(len(generated_labels)),
size=int(self.config.trainer.noise_probability * len(generated_labels)),
)
generated_labels[flipped_idx] = 1 - generated_labels[flipped_idx]
true_labels = np.ones((self.config.data_loader.batch_size, 1)) - np.random.uniform(
low=0.0, high=0.1, size=[self.config.data_loader.batch_size, 1]
)
flipped_idx = np.random.choice(
np.arange(len(true_labels)),
size=int(self.config.trainer.noise_probability * len(true_labels)),
)
true_labels[flipped_idx] = 1 - true_labels[flipped_idx]
if flip_labels:
return generated_labels, true_labels
else:
return true_labels, generated_labels
def generate_noise(self, include_noise, cur_epoch):
sigma = max(0.75 * (10.0 - cur_epoch) / (10), 0.05)
if include_noise:
# If we want to add this is will add the noises
real_noise = np.random.normal(
scale=sigma,
size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,
)
fake_noise = np.random.normal(
scale=sigma,
size=[self.config.data_loader.batch_size] + self.config.trainer.image_dims,
)
else:
# Otherwise we are just going to add zeros which will not break anything
real_noise = np.zeros(
([self.config.data_loader.batch_size] + self.config.trainer.image_dims)
)
fake_noise = np.zeros(
([self.config.data_loader.batch_size] + self.config.trainer.image_dims)
)
return real_noise, fake_noise
| yigitozgumus/Polimi_Thesis | trainers/bigan_trainer.py | bigan_trainer.py | py | 12,797 | python | en | code | 5 | github-code | 13 |
38167691902 | #!/usr/bin/python
# -*- coding:utf-8 -*-
from PIL import Image
import matplotlib.pyplot as plt
img = Image.open(fp="niuniu.jpg")
# 输出图片属性
print('format:', img.format, '\n', 'size:', img.size, '\n', 'mode:', img.mode)
# 分割 RGB 通道
r, g, b = Image.Image.split(img)
image = [img, r, g, b]
fig = plt.figure(figsize=(11, 5))
# 调整子图在 figure中的位置 left right bottom top 取值均在(0 - 1)0表示最左最下 1表示最右最上
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(4):
# 添加子图 1行4列,去除坐标轴的刻度
axis = fig.add_subplot(1, 4, i + 1, xticks=[], yticks=[])
axis.imshow(image[i])
plt.show()
'''
要求:
1.读入图片 输出图片属性
2.分 R G B 三通道显示
3.注意绘图的格式
提示:
from PIL import Image
import matplotlib.pyplot as plt
''' | Funail/webdriver | python_study/test16.py | test16.py | py | 913 | python | zh | code | 0 | github-code | 13 |
27208537651 | from rest_framework.decorators import api_view, permission_classes
from rest_framework.request import Request
from rest_framework.response import Response
from leaderboard.models import UserStats, UserStatsSummary
from rest_framework import status
from main.models import Country
from rest_framework.permissions import IsAuthenticated
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_leaderboard_by_country(request: Request, country_code):
try:
user_stats = UserStats.objects.filter(country_code = country_code).order_by('-donation_amount').values()
user_stats_list = list(user_stats)
leaderboards = user_stats_list[:5]
rank = 0
user_stats_summary = UserStatsSummary.objects.filter(user=request.user).first()
for i in range(len((user_stats_list))):
if user_stats_summary.pk == user_stats_list[i]['user_stats_summary_id']:
rank = i + 1
leaderboards.append(user_stats_list[i])
response = {
"success": True,
"content": {
"rank": rank,
"lederboards": leaderboards,
},
"message": "Leaderboard successfully retrieved!"
}
return Response(data=response, status=status.HTTP_200_OK)
except Exception as e:
response = {
"success": False,
"content": None,
"message": str(e)
}
return Response(data=response, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_leaderboard(request: Request):
try:
countries = Country.objects.all().values()
user_stats_summary_list = UserStatsSummary.objects.all().order_by('-total_donated_tree').values()
leaderboards = list(user_stats_summary_list)[:5]
rank = 0
user_stats_summary = None
for i in range(len((user_stats_summary_list))):
if request.user.pk == user_stats_summary_list[i]['user_id']:
rank = i + 1
leaderboards.append(user_stats_summary_list[i])
user_stats_summary = user_stats_summary_list[i]
most_donated_country = user_stats_summary.get("most_donated_country")
if not user_stats_summary.get('synced'):
user_stats = UserStats.objects.filter(user_stats_summary_id=user_stats_summary.get('id'))
user_stats_summary = UserStatsSummary.objects.filter(id=user_stats_summary.get('id')).first()
most_donated_amount = 0
most_donated_country_code = ""
for user_stat in user_stats:
if user_stat.donation_amount > most_donated_amount:
most_donated_amount = user_stat.donation_amount
most_donated_country_code = user_stat.country_code
country = Country.objects.filter(code=most_donated_country_code).first()
most_donated_country = country.name
user_stats_summary.most_donated_country = country.name
user_stats_summary.synced = True
user_stats_summary.save()
response = {
"success": True,
"content": {
"rank": rank,
"countries": countries,
"lederboards": leaderboards,
"most_donated_country": most_donated_country
},
"message": "Leaderboard successfully retrieved!"
}
return Response(data=response, status=status.HTTP_200_OK)
except Exception as e:
response = {
"success": False,
"content": None,
"message": str(e)
}
return Response(data=response, status=status.HTTP_500_INTERNAL_SERVER_ERROR) | PBP-E-03/proyek-tengah-semester | leaderboard/views.py | views.py | py | 3,909 | python | en | code | 0 | github-code | 13 |
39947264182 | import io
import typing
import socket
def as_bytes(s: str) -> bytes:
return s.encode('utf-8')
def from_bytes(b: bytes) -> str:
return b.decode('utf-8')
def annotate(src: typing.Any, *ansi_escape_codes: int) -> str:
length: int = len(ansi_escape_codes)
if length == 0:
return str(src)
str_buffer: io.StringIO = io.StringIO()
str_buffer.write(f"\033[{ansi_escape_codes[0]}")
for code in ansi_escape_codes[1:]:
str_buffer.write(f";{code}")
str_buffer.write(f"m{str(src)}\033[0m")
return str_buffer.getvalue()
def read_reliably(s: socket.socket, size: int) -> bytes:
buffer: bytearray = bytearray(size)
view: memoryview = memoryview(buffer)
i: int = 0
while i < size:
b_read: int = s.recv_into(view[i:], size - i)
if b_read == 0:
break
i += b_read
return bytes(buffer)
def write_reliably(s: socket.socket, buffer: typing.Union[bytearray, bytes], size: int) -> int:
view: memoryview = memoryview(buffer)
i: int = 0
while i < size:
b_sent: int = s.send(view[i:])
if b_sent == 0:
break
i += b_sent
return i
| jtmr05/spln-2223 | TP2/utils.py | utils.py | py | 1,174 | python | en | code | 0 | github-code | 13 |
32507202912 | import psycopg2
from pprint import pprint
def create_db(cur):
'''Создание таблиц клиентов и телефонов'''
cur.execute("""
CREATE TABLE IF NOT EXISTS client(
id SERIAL PRIMARY KEY,
name VARCHAR(100) NOT NULL,
surname VARCHAR(100) NOT NULL,
email VARCHAR(100) NOT NULL
PRIMARY KEY("id" AUTOINCREMENT)
);
""")
cur.execute("""
CREATE TABLE IF NOT EXISTS phone(
id SERIAL PRIMARY KEY,
client_id INTEGER NOT NULL REFERENCES client(id),
phone VARCHAR(20) DEFAULT NULL UNIQUE
PRIMARY KEY("id" AUTOINCREMENT)
);
""")
def add_client(cur, name, surname, email):
'''Добавление клиента в таблицу client'''
cur.execute("""
INSERT INTO client(name, surname, email) VALUES(?, ?, ?);
""", (name, surname, email))
def add_phone(cur, client_id, phone):
'''Добавление номера телефона в таблицу phone'''
cur.execute("""
INSERT INTO phone(client_id, phone) VALUES(?, ?);
""", (client_id, phone))
def clent_update():
'''Изменение информации о клиенте'''
command = int(input("Для изменения информации о клиенте, пожалуйста, введите нужную Вам команду.\n "
"1 - изменить имя; 2 - изменить фамилию; 3 - изменить e-mail; 4 - изменить номер телефона"))
while True:
if command == 1:
name = input("Введите id клиента имя которого хотите изменить: ")
new_name = input("Введите имя для изменения: ")
cur.execute("""
UPDATE client SET name=? WHERE id=?;
""", (new_name, name))
break
elif command == 2:
surname = input("Введите id клиента фамилию которого хотите изменить: ")
new_surname = input("Введите фамилию для изменения: ")
cur.execute("""
UPDATE client SET surname=? WHERE id=?;
""", (new_surname, surname))
break
elif command == 3:
email = input("Введите id клиента e-mail которого хотите изменить: ")
new_email = input("Введите e-mail для изменения: ")
cur.execute("""
UPDATE client SET email=? WHERE id=?;
""", (new_email, email))
break
elif command == 4:
phone = input("Введите номер телефона который Вы хотите изменить: ")
new_phone = input("Введите новый номер телефона, который заменит собой старый: ")
cur.execute("""
UPDATE phone SET phone=? WHERE phone=?;
""", (new_phone, phone))
break
else:
print("К сожалению, Вы ввели неправильную команду, пожалуйста, повторите ввод")
def delete_phone():
'''Удаление номера телефона клиента из таблицы phone'''
id_client = input("Введите id клиента номер телефона которого хотите удалить: ")
phone = input("Введите номер телефона который хотите удалить: ")
with conn.cursor() as cur:
cur.execute("""
DELETE FROM phone WHERE client_id=? AND phone=?
""", (id_client, phone))
def delete_client():
'''Удаление имеющейся информации о клиенте'''
id_client = input("Введите id клиента которого хотите удалить: ")
surname = input("Введите фамилию клиента которого хотите удалить: ")
with conn.cursor() as cur:
#удаление связи с таблицей phone
cur.execute("""
DELETE FROM phone WHERE client_id=?
""", (id_client,))
#удаление информации о клиенте из таблицы client
cur.execute("""
DELETE FROM client WHERE id=? AND surname=?
""", (id_client, surname))
def find_client():
'''Поиск клиента по имени'''
command = int(input("Для поиска информации о клиенте, пожалуйста, введите команду, где:\n "
"1 - найти по имени; 2 - найти по фамилии; 3 - найти по e-mail; 4 - найти по номеру телефона"))
while True:
if command == 1:
name = input("Введите имя для поиска информации о клиенте: ")
cur.execute("""
SELECT id, name, surname, email, phone
FROM client AS ch5
LEFT JOIN phone AS cp ON cp.id = ch5.id
WHERE name=?
""", (name,))
print(cur.fetchall())
elif command == 2:
surname = input("Введите фамилию для поиска информации о клиенте: ")
cur.execute("""
SELECT id, name, surname, email, phone
FROM client AS ch5
LEFT JOIN phone AS cp ON cp.id = ch5.id
WHERE surname=?
""", (surname,))
print(cur.fetchall())
elif command == 3:
email = input("Введите email для поиска информации о клиенте: ")
cur.execute("""
SELECT id, name, surname, email, phone
FROM client AS ch5
LEFT JOIN phone AS cp ON cp.id = ch5.id
WHERE email=?
""", (email,))
print(cur.fetchall())
elif command == 4:
phone = input("Введите номер телефона для поиска информации о клиенте: ")
cur.execute("""
SELECT id, name, surname, email, phone
FROM client AS ch5
LEFT JOIN phone AS cp ON cp.id = ch5.id
WHERE phone=?
""", (phone,))
print(cur.fetchall())
else:
print("К сожалению, Вы ввели неправильную команду, пожалуйста, повторите ввод")
def check_function(cur):
'''Проверочная функция, отображает содержимое таблиц'''
cur.execute("""
SELECT * FROM client;
""")
pprint(cur.fetchall())
cur.execute("""
SELECT * FROM phone;
""")
pprint(cur.fetchall())
#check_function()
if __name__ == '__main__':
with psycopg2.connect(host="127.0.0.1", user="postgres", password="123456", database="h_w_5", port="5432") as conn:
with conn.cursor() as cur:
create_db(cur)
check_function(cur)
add_client(cur, "Гендальф", "Серый", "g_gray@com.com")
add_client(cur, "Винни", "Пух", "br@com.com")
add_client(cur, "Сталкер", "Меченный", "stalker@com.com")
add_client(cur, "Снежная", "Королева", "snow_queen@com.com")
add_client(cur, "Гудвин", "Ужасный", "gudvin@com.com")
add_phone(cur, 1, "123456789")
add_phone(cur, 2, "987654321")
add_phone(cur, 3, "1111111111")
add_phone(cur, 4, "77777777777")
add_phone(cur, 5, "112233445566")
clent_update()
delete_phone()
delete_client()
find_client()
| s-evg/netology_SQLPY | hm_5/main.py | main.py | py | 7,862 | python | ru | code | 0 | github-code | 13 |
43085210862 | #
# @lc app=leetcode.cn id=83 lang=python3
#
# [83] 删除排序链表中的重复元素
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
dummy_node = head
if not head:
return head
pre_node = head
head = head.next
while head:
if head.val == pre_node.val:
pre_node.next = head.next
else:
pre_node = head
head = head.next
return dummy_node
# @lc code=end
| Guo-xuejian/leetcode-practice | 83.删除排序链表中的重复元素.py | 83.删除排序链表中的重复元素.py | py | 679 | python | en | code | 1 | github-code | 13 |
71995469459 | import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_csv('covid_19_data.csv')
daily_country_cases = data.groupby(
['ObservationDate', 'Country/Region'])['Confirmed'].sum().reset_index()
countries_of_interest = ['China', 'Italy', 'United States', 'India']
plt.figure(figsize=(10, 6))
for country in countries_of_interest:
country_data = daily_country_cases[daily_country_cases['Country/Region'] == country]
plt.plot(country_data['ObservationDate'],
country_data['Confirmed'], label=country)
plt.xlabel('Date')
plt.ylabel('Confirmed Cases')
plt.title('Comparative Analysis of Confirmed Cases by Country')
plt.legend()
plt.xticks(rotation=45)
plt.grid()
plt.tight_layout()
plt.show()
| tyron200/capstone | comparative.py | comparative.py | py | 728 | python | en | code | 0 | github-code | 13 |
14042408077 | import logging
import os
import sys
from transformers import AutoConfig, AutoTokenizer
from transformers import (
HfArgumentParser,
set_seed,
)
from arguments import ModelArguments, DataArguments, BiEncoderTrainingArguments
from dataloader import BiEncoderDataset, BiEncoderCollator, GenericDataLoader, NoisyBiEncoderDataset
from model import BiEncoder
from trainer import CropSentenceTrainer
logger = logging.getLogger(__name__)
def main():
parser = HfArgumentParser((ModelArguments, DataArguments, BiEncoderTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
model_args: ModelArguments
data_args: DataArguments
training_args: BiEncoderTrainingArguments
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
logger.info("MODEL parameters %s", model_args)
set_seed(training_args.seed)
num_labels = 1
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir
)
model = BiEncoder.build(
model_args,
data_args,
training_args,
config=config,
cache_dir=model_args.cache_dir,
)
train_datasets = []
tasks = ['fiqa', 'scifact', 'arguana', 'climate-fever', 'dbpedia-entity', 'cqadupstack', 'quora', 'scidocs',
'nfcorpus', 'signal1m', 'trec-covid', 'webis-touche2020', 'hotpotqa', 'nq', 'robust04', 'trec-news',
'bioasq']
data_dir = data_args.train_dir
pretokenized = True
use_mmap = True
for task in tasks:
data_dir = os.path.join(data_dir, task)
corpus, queries, qrels = GenericDataLoader(data_dir, corpus_file=data_args.corpus_file,
query_file=data_args.query_file, qrel_file=data_args.train_path,
use_mmap=use_mmap).load(split="train")
if training_args.noisy:
train_dataset = NoisyBiEncoderDataset(corpus, queries, qrels, tokenizer, data_args,
pretokenized=pretokenized, noise_type=training_args.noise_type,
noise_prob=training_args.noise_prob)
else:
train_dataset = BiEncoderDataset(corpus, queries, qrels, tokenizer, data_args, pretokenized=pretokenized)
train_datasets.append(train_dataset)
data_collator = BiEncoderCollator(
tokenizer,
max_passage_length=data_args.max_passage_length,
max_query_length=data_args.max_query_length
)
trainer = CropSentenceTrainer(
model=model,
train_dataset=train_datasets,
data_collator=data_collator,
training_args=training_args,
data_args=data_args,
tokenizer=tokenizer
)
for dataset in train_datasets:
dataset.trainer = trainer
trainer.train() # TODO: resume training
trainer.save_model()
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir)
if __name__ == "__main__":
main()
| Fantabulous-J/BootSwitch | train_crop_sent.py | train_crop_sent.py | py | 4,513 | python | en | code | 0 | github-code | 13 |
7553329038 |
from numpy import random
import pandas as pd
Age=[]
height=[]
weight=[]
BMI=[] #body math index
Gender=[]
athlete=[]
smoker=[]
AnotherChronicIllness=[]
riskGroup=[] # if 0 there is no risk-------- if 1 there is medium risk----- if 2 there is high risk
Sex=["Male","Female"]
YesNo=["Yes","No"]
for i in range(20000):
Age.append(random.randint(10,120))
height.append(random.randint(140, 210))
weight.append(random.randint(40, 130))
Gender.append(random.choice(Sex))
athlete.append(random.choice([1,0],p=[0.4,0.6]))
smoker.append(random.choice([1,0],p=[0.3,0.7]))
AnotherChronicIllness.append(random.choice([1,0],p=[0.1,0.9]))
#Body math index hesaplama
for i in range(20000):
BMI.append(weight[i] / (height[i]/100)**2)
data = {'Age': Age,
'Height':height,
'Weight':weight,
'BMI':BMI,
'Gender':Gender,
'Athlete':athlete,
'Smoker':smoker,
'AnotherChronic':AnotherChronicIllness,
}
Dataset=pd.DataFrame(data)
# Risk Grupları belirleme
for i in Dataset.values:
#************************* Riskli Grup (2)*******************************
if (i[0]>=75): # yaş 75ten büyükse başka kritere bakmaya gerek duymadan riskli olarak belirle
riskGroup.append(2)
# yaşı 60 ile 75 arasına olup spor yapmayan veya sigara içen riskli say
elif ((i[0]>=60 and i[0]<75) and i[5]==0 ) or ((i[0]>=60 and i[0]<75) and i[6]==1):
riskGroup.append(2)
elif (i[3]<20 or i[3]>=40): # BMI bu değerlerde olursa riskli say
riskGroup.append(2)
#yaşa bakmadan kronik rahatsızlık varsa riskli say
elif (i[7]==1):
riskGroup.append(2)
#************************* Orta Riskli Grup (1)*******************************
# 60 ile 75 yaşlarında geri kalanlar orta riskli
elif (i[0]>=60 and i[0]<75):
riskGroup.append(1)
#geri kalanlar hepsi riskiz
elif i[0]<=16:
riskGroup.append(0)
elif i[5]==1:
riskGroup.append(0)
else:
riskGroup.append(0)
Dataset['RiskGroup']=riskGroup
Dataset.to_csv (r'C:/Users/Kutay/Desktop/Bitirme projesi/Pythpn Codes/datasetCovit19.csv', index = False, header=True)
| kutayAlaaeddin/Covid19_risk_group_classification | CreateDataset.py | CreateDataset.py | py | 2,218 | python | en | code | 0 | github-code | 13 |
36052585149 | # build a neural network that classifies images
# 1. Build a neural network that classifies images
# 2. Train the neural network
# 3. And, finally, evaluate the accuracy of the model
# import libraries
import tensorflow as tf
# load data
def loadData():
mnist = tf.keras.datasets.mnist
(xTrain, yTrain), (xTest, yTest) = mnist.load_data()
xTrain, xTest = xTrain / 255, xTest / 255
return xTrain, yTrain, xTest, yTest
# build model
def buildModel():
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
return model
# load data
xTrain, yTrain, xTest, yTest = loadData()
# build model
model = buildModel()
# define loss function
lossFn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# compile model
model.compile(optimizer='adam', loss=lossFn, metrics=['accuracy'])
# fit model to training dataset
model.fit(xTrain, yTrain, epochs=5)
# evaluate model
model.evaluate(xTest, yTest, verbose=2)
# probability model for probabilities
probabilityModel = tf.keras.Sequential([
model,
tf.keras.layers.Softmax()
])
probabilityModel(xTest[:5])
| SanchitJain123/ML-Implementations | Basics/TFBasics.py | TFBasics.py | py | 1,270 | python | en | code | 0 | github-code | 13 |
22790201993 | from typing import List, Sequence
import dezero as dz
from dezero import functions as F
from dezero import layers as L
from dezero import utils
class Model(dz.Layer):
def plot(self, *inputs: dz.Variable, to_file='model.png'):
y = self.forward(*inputs)
return utils.plot_dot_graph(
y, verbose=True, to_file=to_file)
class MLP(Model):
def __init__(
self,
fc_output_sizes: Sequence[int],
activation=F.sigmoid) -> None:
super().__init__()
self.activation = activation
self.layers: List[dz.Layer] = []
for i, out_size in enumerate(fc_output_sizes):
layer = L.Linear(out_size)
setattr(self, f'l{i}', layer)
self.layers.append(layer)
def forward(self, x: dz.Variable) -> dz.Variable:
for l in self.layers[:-1]:
x = self.activation(l(x))
return self.layers[-1](x) | teijeong/deeplearning-from-scratch | dezero/models.py | models.py | py | 921 | python | en | code | 0 | github-code | 13 |
7390476439 | from django.contrib.auth.models import AbstractUser
from django.db import models
# Create your models here.
class User(AbstractUser):
def serialize(self):
return {
"id": self.id,
}
class Quiz(models.Model):
owner = models.ForeignKey("User", on_delete=models.CASCADE, related_name="owner")
title = models.CharField(max_length=50)
timestamp = models.DateTimeField(auto_now_add=True)
answered = models.ManyToManyField("User", blank=True, related_name="answered")
def serialize(self):
return {
"id": self.id,
"owner": self.owner.username,
"title": self.title,
"timestamp": self.timestamp.strftime("%b %d %Y, %I:%M %p"),
"answered": [user.username for user in self.answered.all()]
}
class Question(models.Model):
quiz = models.ForeignKey("Quiz", on_delete=models.CASCADE, related_name="quiz")
question_description = models.CharField(max_length=300)
class Option(models.Model):
question = models.ForeignKey("Question", on_delete=models.CASCADE, related_name="question")
option_description = models.CharField(max_length=200)
correct = models.BooleanField(default=False)
chosen_user = models.ManyToManyField("User", blank=True, related_name="chosen_user")
class Score(models.Model):
quiz = models.ForeignKey("Quiz", on_delete=models.CASCADE, related_name="quiz_score")
answerer = models.ForeignKey("User", on_delete=models.CASCADE, related_name="answerer")
result = models.IntegerField() | wallace9320/CS50W-Project | quiz/models.py | models.py | py | 1,558 | python | en | code | 0 | github-code | 13 |
1954949464 | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 21 20:30:52 2021
Problem 57: Square root convergents
https://projecteuler.net/problem=57
@author: kuba
"""
import time
def solution():
# Main score
score = 0
# [[x,y]] where x - numerator, y - denominator
list_of_number = [[3, 2], [7, 5]]
# Main loop
for i in range(1, 1000):
# Using formula for next expresion
numerator = list_of_number[i][0] * 2 + list_of_number[i - 1][0]
denominator = list_of_number[i][1] * 2 + list_of_number[i - 1][1]
if len(str(numerator)) > len(str(denominator)):
score += 1
list_of_number.append([numerator, denominator])
print(score)
start_time = time.time()
solution()
print("--- %s seconds ---" % (time.time() - start_time))
| KubiakJakub01/ProjectEuler | src/Problem57.py | Problem57.py | py | 790 | python | en | code | 0 | github-code | 13 |
71979272659 | #!/usr/bin/env python
#CHRIS SEQUEIRA CAS8903
import socket
#prints output for each iteration may be more than 2-3 000 character
for i in range (1000,3000):
ip = "172.16.237.128"
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(5)
s.connect((ip, 9999))
s.recv(2048)
badstr = "A"*i
print(i)
command = "TRUN ."+badstr
s.send(command.encode())
print(s.recv(2048))
s.close()
#manual
#make client socket
ip = "172.16.237.128"
s=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.settimeout(5)
s.connect((ip, 9999))
s.recv(2048)
badstr = "A"*2006
command = "TRUN ."+badstr+"BBBB"
s.send(command.encode())
s.close()
#eip = "BBBB" #replaces next address with "42424242"
#buf = "C"*20 #replaes shell code, if we find CCCC add more As
#badstr = "TRUN ." + "A"*i + eip + "\x90"*20 + buf + "\r\n"
#socket.connect((ip, 9999))
#socket.recv(2048)
#socket.send(badstr)
#socket.close()
| apotheosik/SvrExploits | VulnServer/VulnServerFuzzer.py | VulnServerFuzzer.py | py | 949 | python | en | code | 1 | github-code | 13 |
30803286868 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
np.random.seed(1)
print('Loading train and test data...')
df1=pd.read_csv('exoTrain.csv')
#print(df1)
df2=pd.read_csv('exoTest.csv')
#print(df2)
"""
In the traning data, We have light intensites of stars measured at 3198 time instances.
The training data has the flux sequenc for 5087 stars while the test data has the flux sequences for 570 stars.
If the value in LABEL column is 2, it is an exoplanet host star and if it is 1, it is not an exoplanet host star.
"""
train_data=np.array(df1,dtype=np.float32)
#print(train_data)
test_data=np.array(df2,dtype=np.float32)
#print(test_data)
ytrain=train_data[:,0]
Xtrain=train_data[:,1:]
ytest=test_data[:,0]
Xtest=test_data[:,1:]
# print(ytrain,'\n',Xtrain)
# print(ytest,'\n',Xtest)
m=0 # A chosen exoplanet host star's index for plott
n=100 # A chosen non-exoplanet host star's index
#print('Shape of Xtrain:',np.shape(Xtrain),'\nShape of ytrain:',np.shape(ytrain))
plt.plot(Xtrain[m],'r')
plt.title('Light intensity vs time (for an exoplanet star)')
plt.xlabel('Time index')
plt.ylabel('Light intensity')
plt.show()
plt.plot(Xtrain[n],'b')
plt.title('Light intensity vs time (for a non exoplanet star)')
plt.xlabel('Time')
plt.ylabel('Light intensity')
plt.show()
### Applying Fourier Transform
from scipy.fftpack import fft
print('Applying Fourier Transform...')
Xtrain=np.abs(fft(Xtrain,n=len(Xtrain[0]),axis=1))
Xtest=np.abs(fft(Xtest,n=len(Xtest[0]),axis=1))
# print(Xtrain,Xtrain.shape)
Xtrain=Xtrain[:,:1+int((len(Xtrain[0])-1)/2)]
# print('\n\n',Xtrain,Xtrain.shape)
#print('Shape of Xtrain:',np.shape(Xtrain),'\nShape of ytrain:',np.shape(ytrain))
Xtest=Xtest[:,:1+int((len(Xtest[0])-1)/2)]
plt.plot(Xtrain[m],'r')
plt.title('After FFT (for an exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
plt.plot(Xtrain[n],'b')
plt.title('After FFT (for a non exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
#### Normalizing
from sklearn.preprocessing import normalize
print('Normalizing...')
Xtrain=normalize(Xtrain)
Xtest=normalize(Xtest)
plt.plot(Xtrain[m],'r')
plt.title('After FFT,Normalization (for an exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
plt.plot(Xtrain[n],'b')
plt.title('After FFT,Normalization (for a non exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
#### Applying Gaussian Filter
from scipy import ndimage
print('Applying Gaussian filter...')
Xtrain=ndimage.filters.gaussian_filter(Xtrain,sigma=10)
Xtest=ndimage.filters.gaussian_filter(Xtest,sigma=10)
plt.plot(Xtrain[m],'r')
plt.title('After FFT,Normalization and Gaussian filtering (for an exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
plt.plot(Xtrain[n],'b')
plt.title('After FFT,Normalization and Gaussian filtering (for a non exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
#### Scaling down the data
from sklearn.preprocessing import MinMaxScaler
print('Applying MinMaxScaler...')
scaler=MinMaxScaler(feature_range=(0,1))
Xtrain=scaler.fit_transform(Xtrain)
Xtest=scaler.fit_transform(Xtest)
plt.plot(Xtrain[m],'r')
plt.title('After FFT,Normalization, Gaussian filtering and scaling (for an exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
plt.plot(Xtrain[n],'b')
plt.title('After FFT,Normalization, Gaussian filtering and scaling (for a non exoplanet star)')
plt.xlabel('Frequency')
plt.ylabel('Feature value')
plt.show()
print("By looking at the last 2 curves, we can say that fourier transform has helped us in seeing that, for an exoplanet star, the curve has a sudden dip.\
\n And, for the non-exoplanet star, the curve is almost on the same level with high fluctuations.")
#### LSTM RNN Model and Training
# reshaping to give as input to the RNN:
Xtrain = np.reshape(Xtrain,(Xtrain.shape[0],1,Xtrain.shape[1]))
Xtest = np.reshape(Xtest,(Xtest.shape[0],1,Xtest.shape[1]))
#print('Shape of Xtrain:',np.shape(Xtrain),'\nShape of ytrain:',np.shape(ytrain))
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.wrappers.scikit_learn import KerasClassifier
# LSTM RNN Model:
def LSTM_RNN():
model = Sequential()
model.add(LSTM(32,input_shape=(1,Xtrain.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['acc'])
return model
estimator=KerasClassifier(build_fn=LSTM_RNN,epochs=10,batch_size=64,verbose=1)
# Training:
print('The model is being trained...')
history=estimator.fit(Xtrain,ytrain)
#### Training and Testing results
loss=history.history['loss']
acc=history.history['acc']
epochs=range(1,len(loss)+1)
plt.title('Training error with epochs')
plt.plot(epochs,loss,'bo',label='training loss')
plt.xlabel('epochs')
plt.ylabel('training error')
plt.show()
plt.plot(epochs,acc,'b',label='accuracy')
plt.title('Accuracy of prediction with epochs')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.show()
# make predictions
trainPredict = estimator.predict(Xtrain,verbose=0)
testPredict = estimator.predict(Xtest,verbose=0)
plt.title('Training results')
plt.plot(trainPredict,'*',label='Predicted')
plt.plot(ytrain,'o',label='ground truth')
plt.xlabel('Train data sample index')
plt.ylabel('Predicted class (1 or 2)')
plt.legend()
plt.show()
print('We can see that the model is well trained on training data as it is predicting correctly')
plt.title('Performance of the model on testing data')
plt.plot(testPredict,'*',label='Predicted')
plt.plot(ytest,'o',label='ground truth')
plt.xlabel('Test data sample index')
plt.ylabel('Predicted class (1 or 2)')
plt.legend()
plt.show()
#### Accuracy, Precision and recall of the model
from sklearn import metrics as sk_met
accuracy_train=sk_met.accuracy_score(ytrain,trainPredict)
accuracy_test= sk_met.accuracy_score(ytest,testPredict)
print('\t\t train data \t test data')
print('accuracy: ',accuracy_train,'\t',accuracy_test)
precision_train=sk_met.precision_score(ytrain,trainPredict)
precision_test=sk_met.precision_score(ytest,testPredict)
print('precision: ',precision_train,'\t',precision_test)
recall_train=sk_met.recall_score(ytrain,trainPredict)
recall_test=sk_met.recall_score(ytest,testPredict)
print('recall: ',recall_train,'\t\t',recall_test)
| anandnwarrier/Exoplane_host_star_detection | exoplanet_prediction_using_RNN_with_fourier_transformed_data.py | exoplanet_prediction_using_RNN_with_fourier_transformed_data.py | py | 6,509 | python | en | code | 0 | github-code | 13 |
42148539511 | animals = input('Heyvanlari girin: ').split(', ')
prices = { 'inek': 500, 'toyuq': 50, 'qoyun': 120, 'at': 900, 'keci': 210 }
try:
if not 3<len(animals)<10:
raise ValueError('3-den cox ve 10-dan az sayda heyvan yazmalisiniz')
print('Umumi qiymet:', sum(map(lambda animal: prices[animal], animals)))
except KeyError:
print('Heyvanin adini sehv girmisiniz. Sehviniz:')
except ValueError:
print('3-den cox ve 10-dan az sayda heyvan yazmalisiniz')
except Exception:
print('Bir xeta bas verdi')
| elminazmirzelizade/python | homework33/index.py | index.py | py | 520 | python | tr | code | 0 | github-code | 13 |
40783940072 | def has_double(num):
digits = [char for char in str(num)]
last = digits[0]
last2 = digits[1]
dubs = last == last2
trips = False
hasdubs = False
for digit in digits[2:]:
if digit == last2:
trips = dubs
dubs = True
else:
if dubs and not trips:
hasdubs = True
dubs = digit == last2
last = last2
last2 = digit
return hasdubs or dubs and not trips
def no_dec(num):
digits = [char for char in str(num)]
last = digits[0]
lower = True
for digit in digits[1:]:
if (int(digit) < int(last)):
lower = False
last = digit
return lower
lower = 171309
upper = 643603
correct = 0
for num in range(lower, upper):
if(has_double(num) and no_dec(num)):
correct += 1
print(correct)
| Mraedis/AoC2019 | Day4.2.py | Day4.2.py | py | 754 | python | en | code | 0 | github-code | 13 |
31283683405 | import jsonpath
import requests
import json
from comm.write_log import log
'''
获取魔镜token
验证码获取
↓
验证码图像识别
↓
token获取
'''
def get_token():
# 获取验证码图像识别后的结果, 预期为4个字符
url = "http://localhost:8081/"
captcha = ""
while len(captcha) != 4:
response = requests.request("GET", url, data={})
captcha = response.text
url = "http://gateway.mj.cn/monitor/terminal/login"
headers = {
'User-Agent': 'Apifox/1.0.0 (https://www.apifox.cn)',
'Content-Type': 'application/json'
}
payload = json.dumps({
"username": "test123",
"password": "axz+tev5OXSGM+sNATCulSx6N1t5Z9qDD33PGaq01ZBop745IVCcP5+t9Ua1bCMC",
"uuid": "6cb13e75-971f-4cd6-8fa4-752e8074e4cc",
"captcha": captcha
})
try:
response = requests.post(url=url, data=payload, headers=headers)
except Exception as e:
log.error("网络连接超时, 请检查设备是否在线.")
raise e
data = response.json()
token = jsonpath.jsonpath(data, "$..token")
return(token[0]) | storeview/LEARN__HTTP-Interface-Automate-Test | task1/comm/get_token.py | get_token.py | py | 1,144 | python | en | code | 0 | github-code | 13 |
34527274449 | import time
from ui import read_input, print_output, should_exit
from solver import GreedySolver
while True:
input = read_input()
greedy_solver = GreedySolver(input)
start = time.perf_counter()
result = greedy_solver.solve()
end = time.perf_counter()
print_output(result, start, end)
if should_exit():
break
| martinbudinsky3/N-puzzle-greedy-solver | n_puzzle_greedy_algo.py | n_puzzle_greedy_algo.py | py | 349 | python | en | code | 0 | github-code | 13 |
22137785429 | import Augmentor
from invoke import run
from PIL import Image
from random import randint
import glob
import os
def augmentor():
WrinklePath="./Wrinkle_templates"
p = Augmentor.Pipeline(WrinklePath)
p.rotate(probability=1, max_left_rotation=20, max_right_rotation=20)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.zoom(probability=1, min_factor=1.1, max_factor=1.5)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.skew_tilt(probability=1)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.skew_left_right(probability=1)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.skew_top_bottom(probability=1)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.skew_corner(probability=1)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.skew(probability=1)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.random_distortion(probability=1, grid_width=16, grid_height=16, magnitude=8)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.shear(probability=1, max_shear_left=20, max_shear_right=20)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.crop_random(probability=1, percentage_area=0.7)
p.process()
p = Augmentor.Pipeline(WrinklePath)
p.flip_random(probability=1)
p.process()
#p.status
#p.sample(100)
def augmentor_test():
# =============================================================================
# def Augment(inputpath, cmdspath):
# cmds = open(cmdspath).readlines()
# for i in range(len(cmds)):
# p = Augmentor.Pipeline(inputpath)
# run(cmds[i])
# p.process()
#
# inpath="/home/aeroclub/Abtin/Augmentor/figure_skating_templates"
# cmpath="/home/aeroclub/Abtin/Augmentor/cmds.txt"
# Augment(inpath, cmpath)
# =============================================================================
p = Augmentor.Pipeline("./Augmentor/figure_skating_templates")
cmds = "p.rotate(probability=1, max_left_rotation=20, max_right_rotation=20)"
run(cmds, hide=True, warn=True)
p.process()
def copy():
temp_dir = "./Wrinkle_templates/output"
bckg_dir = "./Fabric_templates"
dtst_dir = "./Generated_files/Images/"
labls_dir = "./Generated_files/Labels/xywh/"
num = 0
for bckg_img in glob.glob(bckg_dir + "/*.JPG"):
bckg = Image.open(bckg_img)
bckg_w, bckg_h = bckg.size
for tmp_img in glob.glob(temp_dir + "/*.JPG"):
tmp = Image.open(tmp_img)
tmp_w, tmp_h = tmp.size
# print(bckg_w, bckg_h, tmp_w, tmp_h)
for i in range(2):
new = bckg.copy()
if bckg_w - tmp_w > 1 and bckg_h - tmp_h > 1:
num += 1
x = randint(1, bckg_w - tmp_w)
y = randint(1, bckg_h - tmp_h)
offset = (x, y)
new.paste(tmp, offset)
# new.show()
new.save(dtst_dir + str(num).zfill(6) + ".jpg")
f = open(labls_dir + str(num).zfill(6) + ".txt", "w+")
f.write("1\n")
# 1 is the number of class
f.write(str(x) + " " + str(y) + " " + str(tmp_w) + " " + str(tmp_h))
f.close()
def train_test_list_creator():
dataset_path = "./Generated_files/Images/wrinkle/"
# Percentage of images to be used for the test set
percentage_test = 10;
# Create and/or truncate train.txt and test.txt
file_train = open('train.txt', 'w+')
file_test = open('test.txt', 'w+')
# Populate train.txt and test.txt
counter = 1
index_test = round(100 / percentage_test)
for pathAndFilename in glob.iglob(os.path.join(dataset_path, "*.jpg")):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
if counter == index_test + 1:
counter = 1
file_test.write(dataset_path + title + '.jpg' + "\n")
else:
file_train.write(dataset_path + title + '.jpg' + "\n")
counter = counter + 1
def YOLO3_format_converter():
classes = ["wrinkle"]
def convert(size, box):
dw = 1. / size[0]
dh = 1. / size[1]
x = (box[0] + box[1]) / 2.0
y = (box[2] + box[3]) / 2.0
w = box[1] - box[0]
h = box[3] - box[2]
print(x)
x = x * dw
w = w * dw
y = y * dh
h = h * dh
print(dw)
print(dh)
return (x, y, w, h)
""" Configure Paths"""
mypath = "./Generated_files/Labels/xywh/"
outpath = "./Generated_files/Labels/YOLO/"
cls = "wrinkle"
if cls not in classes:
exit(0)
cls_id = classes.index(cls)
wd = os.getcwd()
list_file = open('%s/%s_list.txt' % (wd, cls), 'w')
""" Get input text file list """
txt_name_list = []
for (dirpath, dirnames, filenames) in os.walk(mypath):
txt_name_list.extend(filenames)
break
print(txt_name_list)
""" Process """
for txt_name in txt_name_list:
# txt_file = open("Labels/stop_sign/001.txt", "r")
""" Open input text files """
txt_path = mypath + txt_name
print("Input:" + txt_path)
txt_file = open(txt_path, "r")
lines = txt_file.read().split('\n') # for ubuntu, use "\r\n" instead of "\n"
""" Open output text files """
txt_outpath = outpath + txt_name
print("Output:" + txt_outpath)
txt_outfile = open(txt_outpath, "w")
""" Convert the data to YOLO format """
ct = 0
for line in lines:
# print('lenth of line is: ')
# print(len(line))
# print('\n')
if (len(line) >= 2):
ct = ct + 1
print(line + "\n")
elems = line.split(' ')
print(elems)
xmin = float(elems[0])
xmax = float(elems[2]) + float(elems[0])
ymin = float(elems[1])
ymax = float(elems[3]) + float(elems[1])
#
img_path = str('%s/Generated_files/Images/%s/%s.jpg' % (wd, cls, os.path.splitext(txt_name)[0]))
# t = magic.from_file(img_path)
# wh= re.search('(\d+) x (\d+)', t).groups()
im = Image.open(img_path)
w = int(im.size[0])
h = int(im.size[1])
# w = int(xmax) - int(xmin)
# h = int(ymax) - int(ymin)
# print(xmin)
print(w, h)
b = (xmin, xmax, ymin, ymax)
bb = convert((w, h), b)
print(bb)
txt_outfile.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
""" Save those images with bb into list"""
if (ct != 0):
list_file.write('%s/images/%s/%s.JPEG\n' % (wd, cls, os.path.splitext(txt_name)[0]))
list_file.close() | AbtinDjavadifar/SimpleNet | Augmentor/utils.py | utils.py | py | 7,030 | python | en | code | 2 | github-code | 13 |
40640210681 | #!/usr/bin/env python3
"""
Given a rasterized DEM in Cartesian coordinates (e.g., UTM),
compute the slope and slope-aspect maps.
The DEM should be provided as a gdal-readable file, preferably
in GeoTiff format.
"""
### IMPORT MODULES ---
import numpy as np
import matplotlib.pyplot as plt
from osgeo import gdal
# InsarToolkit modules
from viewingFunctions import mapPlot
from geoFormatting import GDALtransform
from slopeFunctions import *
### PARSER ---
def createParser():
import argparse
parser = argparse.ArgumentParser(description='Given a rasterized DEM in Cartesian coordinates (e.g., UTM), compute the slope and slope-aspect maps. The DEM should be provided as a gdal-readable file, preferably in GeoTiff format. Alternatively, provide the slope and slope aspect maps.')
# Input data
parser.add_argument(dest='DEMname', type=str, help='Name of DEM in Cartesian coordinates.')
# Outputs
parser.add_argument('-o','--outName', dest='outName', type=str, required=True, help='Output name base')
parser.add_argument('-v','--verbose', dest='verbose', action='store_true', help='Verbose mode')
parser.add_argument('-p','--plot', dest='plot', action='store_true', help='Plot outputs')
# Options
parser.add_argument('--nVectors', dest='nVectors', type=int, default=30, help='Number of vectors to plot if \'plot outputs\' option is selected.')
return parser
def cmdParser(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
### ANCILLARY FUNCTIONS ---
## Plot gradient vectors
def plotGradientVectors(px,py,mapVals,T,n=30):
"""
Plot 20x20 vectors based on the pointing vector field
INPUTS
px are the x/east vectors
py are the y/north vectors
slope is the slope map
T is the GDALtransform
"""
# Parameters
M,N=px.shape
mSkip=int(M/n)
nSkip=int(N/n)
east=np.arange(T.xstart,T.xend,T.xstep)
north=np.arange(T.ystart,T.yend,T.ystep)
E,N=np.meshgrid(east,north)
# Resample to give nxn points
E=E[1:-1:mSkip,1:-1:nSkip]
N=N[1:-1:mSkip,1:-1:nSkip]
vx=px[1:-1:mSkip,1:-1:nSkip]
vy=py[1:-1:mSkip,1:-1:nSkip]
# Plot
Fig,ax=mapPlot(mapVals,cmap='viridis',pctmin=1,pctmax=99,background='auto',
extent=T.extent,showExtent=True,cbar_orientation='horizontal',title='Vector plot')
scale=np.abs(T.xend-T.xstart)/n+np.abs(T.yend-T.ystart)/n
ax.quiver(E,N,scale*vx,scale*vy,
color='r',units='xy',scale=2)
## Save georeferenced map
def saveMap(templateDS,bands,savename):
"""
Provide a template data set with same spatial extent and
resolution as images to be saved.
"""
# Input parameters
nBands=len(bands)
# Gather parameters from template dataset
N=templateDS.RasterXSize; M=templateDS.RasterYSize
Proj=templateDS.GetProjection()
Tnsf=templateDS.GetGeoTransform()
# Save image to geotiff
driver=gdal.GetDriverByName('GTiff')
DSout=driver.Create(savename,N,M,nBands,gdal.GDT_Float32)
for n,band in enumerate(bands):
DSout.GetRasterBand(n+1).WriteArray(band)
DSout.SetProjection(Proj)
DSout.SetGeoTransform(Tnsf)
DSout.FlushCache()
### MAIN ---
if __name__=='__main__':
inpt=cmdParser()
## Load DEM
DEM=gdal.Open(inpt.DEMname,gdal.GA_ReadOnly)
elev=DEM.GetRasterBand(1).ReadAsArray()
# Determine extent
T=GDALtransform(DEM)
# Report if requested
if inpt.verbose is True:
print('Loaded DEM: {}'.format(inpt.DEMname))
print('\tdimensions: {}'.format(elev.shape))
## Compute gradients
gradients=computeGradients(elev,dx=T.xstep,dy=T.ystep)
slope=grad2slope(gradients)
aspect=grad2aspect(gradients)
## Construct pointing vectors
# This vector points directly downhill at a given point
px,py,pz=makePointingVectors(gradients)
## Sanity checks
# Check that slope recomputed from vectors matches original computation
if inpt.verbose is True:
slopeCalc=pointing2slope(px,py,pz)
slopeDiff=slope-slopeCalc
print('Slope difference: {} +/- {}'.format(np.nansum(slopeDiff),np.nanstd(slopeDiff)))
# Check that aspect recomputed from vectors matches original computation
if inpt.verbose is True:
aspectCalc=pointing2aspect(px,py,pz)
aspectDiff=aspect-aspectCalc
print('Aspect difference: {} +/- {}'.format(np.nansum(aspectDiff),np.nanstd(aspectDiff)))
# Check that all vectors are unit length
if inpt.verbose is True:
unitVectors=np.sqrt(px**2+py**2+pz**2)
print('Unit vectors: {} +/- {}'.format(np.nanmean(unitVectors),np.nanstd(unitVectors)))
## Save maps
# Save slope map
slopeName='{}_slope.tif'.format(inpt.outName)
saveMap(templateDS=DEM,bands=[slope],savename=slopeName)
# Save aspect map
aspectName='{}_aspect.tif'.format(inpt.outName)
saveMap(templateDS=DEM,bands=[aspect],savename=aspectName)
# Save vector component map
vectorName='{}_xyz_pointing_vectors.tif'.format(inpt.outName)
saveMap(templateDS=DEM,bands=[px,py,pz],savename=vectorName)
## Plot maps
if inpt.plot is True:
# Plot DEM
mapPlot(elev,cmap='viridis',pctmin=1,pctmax=99,background='auto',
extent=T.extent,showExtent=True,cbar_orientation='horizontal',title='DEM')
# Plot Slope
mapPlot(slope,cmap='viridis',pctmin=1,pctmax=99,background='auto',
extent=T.extent,showExtent=True,cbar_orientation='horizontal',title='Slope')
# Plot Aspect
mapPlot(aspect,cmap='viridis',pctmin=1,pctmax=99,background='auto',
extent=T.extent,showExtent=True,cbar_orientation='horizontal',title='Aspect')
# Plot Vectors
plotGradientVectors(px,py,elev,T,n=inpt.nVectors)
plt.show() | EJFielding/InsarToolkit | SlopeAnalysis/ComputeTopoVectors.py | ComputeTopoVectors.py | py | 5,434 | python | en | code | 4 | github-code | 13 |
1375913621 | from classes import DBManager, HH
import utils as ut
data = DBManager()
hh_agent = HH()
user_input = None
print(f'Перед вами программа для работы с базой данных PostgreSQL.\n'
f'После подтверждения, будет созданна база данных вакансий компаний, id которых хранятся'
f' в файле ./data/selected_employers_id.json\n')
input('Для продолжения нажмите Enter')
data.create_table_selected_employers_id()
data.insert_table_selected_employers_id()
data.create_table_all_vacancies()
for i in ut.get_selected_employers_id():
print(f'\nПоиск вакансий компании: {hh_agent.get_name_employer_per_id(i)}')
data_vac = hh_agent.get_vacancies(keyword=None, employer_id=i)
data.insert_data_to_all_vacancies(data_vac)
print(f'База данных с вакансиями успешно создана.\n'
f'В настоящий момент в базе хранится таблица all_vacancies с {data.get_count_of_all()} вакансиями\n')
print('Вам доступны следующие функции для работы с базой:\n'
'1. Получить список всех компаний с количеством вакансий\n'
'2. Получить список всех вакансий с краткой информацией\n'
'3. Получитт среднюю зарплату по всем вакансиям\n'
'4. Получить список всех вакансий, у которых зарплата выше средней\n'
'5. Поиск по ключевому слову\n'
'6. Получить список выбранных компаний с их ID')
# цикл для работы с запросами по базе
while user_input != '0':
user_input = input('\nВведите число от 1 до 6 или 0 для выхода: ').strip().lower()
if user_input == '1':
print()
print('Название - Вакансий')
for name, count in data.get_companies_and_vacancies_count():
print(f'{name} - {count}')
elif user_input == '2':
print()
print('Компания - Название вакансии - Зарплата от - Зарплата до - Ссылка на вакансию')
for company, name, salary_from, salary_to, url in data.get_all_vacancies():
print(f'{company} - {name} - {salary_from} - {salary_to} - {url}')
elif user_input == '3':
print()
print(f'Средня зарплата по всем вакансиям составляет {data.get_avg_salary()} рублей')
elif user_input == '4':
print()
print('ID вакансии - Название вакансии - Компания - Зарплата от - Зарплата до - Валюта - Город - '
'Описание вакансии - Ссылка на вакансию')
for vac in data.get_vacancies_with_higher_salary():
print(f'{vac[0]} - {vac[1]} - {vac[3]} - {vac[4]} - {vac[5]} - {vac[6]} - {vac[7]} - {vac[8]} - {vac[9]}')
print(f'Всего найдено {len(data.get_vacancies_with_higher_salary())} вакансий')
elif user_input == '5':
print()
keyword = input('Введите ключевое слово: ').strip().lower()
for vac in data.get_vacancies_with_keyword(keyword):
print(f'{vac[0]} - {vac[1]} - {vac[3]} - {vac[4]} - {vac[5]} - {vac[6]} - {vac[7]} - {vac[8]} - {vac[9]}')
print(f'Всего найдено {len(data.get_vacancies_with_keyword(keyword))} вакансий')
elif user_input == '6':
print()
print('ID компании - Название')
i = 0
for company_id, company_name in data.get_selected_employers():
print(f'{i + 1} - {company_id} - {company_name}')
i += 1
else:
if user_input != '0':
print('Некорректный ввод\n')
| perf-il/vacancy-analysis | main.py | main.py | py | 4,158 | python | ru | code | 0 | github-code | 13 |
25106620173 | import pytest
from subprocess import check_output, check_call
from os.path import dirname, join
import json
@pytest.mark.usefixtures('sim_hocr_file')
def test_split_combine_plaintext(sim_hocr_file):
sim_hocr_file = str(sim_hocr_file)
basedir = dirname(sim_hocr_file)
split_pages = join(basedir, 'split-%06d.html')
split_pages_g = join(basedir, 'split-*.html')
combined_file = join(basedir, 'combined.html')
check_call(['hocr-split-pages', '-f', sim_hocr_file, '-o', split_pages])
combined_text = check_output(['hocr-combine-stream', '-g', split_pages_g])
with open(combined_file, 'wb+') as f:
f.write(combined_text)
plaintext = check_output(['hocr-text', '-f', sim_hocr_file])
new_plaintext = check_output(['hocr-text', '-f', combined_file])
assert plaintext == new_plaintext
| internetarchive/archive-hocr-tools | tests/test_hocr_split_recombine.py | test_hocr_split_recombine.py | py | 837 | python | en | code | 24 | github-code | 13 |
22224137314 | import base64
import time
import requests
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_v1_5
def rsa_decode(data):
key = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsgDq4OqxuEisnk2F0EJFmw4xKa5IrcqEYHvqxPs2CHEg2kolhfWA2SjNuGAHxyDDE5MLtOvzuXjBx/5YJtc9zj2xR/0moesS+Vi/xtG1tkVaTCba+TV+Y5C61iyr3FGqr+KOD4/XECu0Xky1W9ZmmaFADmZi7+6gO9wjgVpU9aLcBcw/loHOeJrCqjp7pA98hRJRY+MML8MK15mnC4ebooOva+mJlstW6t/1lghR8WNV8cocxgcHHuXBxgns2MlACQbSdJ8c6Z3RQeRZBzyjfey6JCCfbEKouVrWIUuPphBL3OANfgp0B+QG31bapvePTfXU48TYK0M5kE+8LgbbWQIDAQAB'
rsa_key = RSA.import_key(base64.b64decode(key))
cipher = PKCS1_v1_5.new(rsa_key)
cipher_text = base64.b64encode(cipher.encrypt(data.encode(encoding="utf-8")))
return cipher_text.decode("utf-8")
headers = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "https://login.10086.cn",
"Referer": "https://login.10086.cn/html/login/email_login.html",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Mobile Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"sec-ch-ua": "^\\^Not.A/Brand^^;v=^\\^8^^, ^\\^Chromium^^;v=^\\^114^^, ^\\^Google",
"sec-ch-ua-mobile": "?1",
"sec-ch-ua-platform": "^\\^Android^^"
}
cookies = {
"sendflag": "20230609163511711776",
"CaptchaCode": "BuNZBB",
"rdmdmd5": "63C7A1126B4CEB07EE780791556FAADA"
}
aa_time = str(int(time.time() * 1000))
with open("aa.png", "wb+") as f:
aa = requests.get(f'https://login.10086.cn/captchazh.htm?type=03×tamp={aa_time}',
headers={
"GET": "/captchazh.htm?type=03×tamp=1686300262296 HTTP/1.1",
"Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "sendflag=20230609163511711776; CaptchaCode=IGQyXY",
"Host": "login.10086.cn",
"Referer": "https://login.10086.cn/html/login/email_login.html",
"Sec-Fetch-Dest": "image",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Mobile Safari/537.36",
"sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
"sec-ch-ua-mobile": "?1",
"sec-ch-ua-platform": "\"Android\""
}).content
f.write(aa)
code = input("")
for i in range(len(code)+1):
print(code[0:i])
requests.get(f'https://login.10086.cn/verifyCaptcha?inputCode={code[0:i]}', headers={
"GET": "/verifyCaptcha?inputCode=11 HTTP/1.1",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "sendflag=20230609163511711776; CaptchaCode=BuNZBB; rdmdmd5=63C7A1126B4CEB07EE780791556FAADA",
"Host": "login.10086.cn",
"Referer": "https://login.10086.cn/html/login/email_login.html",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Mobile Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
"sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
"sec-ch-ua-mobile": "?1",
"sec-ch-ua-platform": "\"Android\""
})
url = "https://login.10086.cn/login.htm"
data = {
"accountType": "02",
"pwdType": "03",
"account": rsa_decode("2737454073@qq.com"),
"password": rsa_decode("123456"),
"inputCode": code,
"backUrl": "https://touch.10086.cn/i/",
"rememberMe": "0",
"channelID": "12014",
"protocol": "https:",
"loginMode": "03",
"timestamp": aa_time
}
response = requests.post(url, headers=headers, cookies=cookies, data=data)
print(response.text)
print(response)
| qifiqi/codebase | python_codebase/爬虫/爬虫逆向进阶实战-案例/login_10086_cn.py | login_10086_cn.py | py | 4,718 | python | en | code | 3 | github-code | 13 |
13103767544 | # https://school.programmers.co.kr/learn/courses/30/lessons/84512
def solution(word):
dict = {
'A': 0,
'E': 1,
'I': 2,
'O': 3,
'U': 4
}
answer = 0
mul = 1
word_len = len(word)
for i in range(1, 6):
if word_len > 5-i:
answer += mul * dict[word[5-i]] + 1
mul = mul * 5 + 1
return answer | olwooz/algorithm-practice | practice/2022_12/221208_Programmers_VowelDict/221208_Programmers_VowelDict.py | 221208_Programmers_VowelDict.py | py | 404 | python | en | code | 0 | github-code | 13 |
15304098669 | """ Tower of Hanoi Interactive
"""
import sys
N = 3
towers = {
'A': list(reversed(range(1, N + 1))),
'B': [],
'C': []}
def move(start, end):
disk = towers[start].pop()
towers[end].append(disk)
def show_board():
rows = []
for key, values in towers.items():
rows.append('[' + ''.join(str(v) for v in values) + ']')
print('\t'.join(rows))
def play():
while True:
show_board()
m = input(":").upper()
if m == 'Q':
sys.exit()
start, end = m[0], m[1]
if len(towers[start]) == 0:
print('Illegal move')
continue
if len(towers[end]) > 0:
if towers[start][-1] > towers[end][-1]:
print('Illegal move')
continue
if start not in 'ABC' or end not in 'ABC' or start == end:
print('Illegal move')
continue
move(start, end)
if towers['B'] == list(reversed(range(1, N + 1))):
show_board()
print('Solved!')
sys.exit()
print("--------------")
print('Tower of Hanoi')
print('Type two letter from abc (start and end tower), type Q to quit ...')
print("--------------")
play() | minte9/algorithms-pages | practice/tower_of_hanoi/tower_of_hanoi2_play.py | tower_of_hanoi2_play.py | py | 1,247 | python | en | code | 0 | github-code | 13 |
20296055415 | #!/usr/bin/env python
from copy import copy
import game_assets
from utils import get_logger
from string import capwords
class Model:
""" This is the base class for all model classes. It provides the basic
methods that all model objects (e.g. Innovations, Resources, etc.) have to
support."""
def __init__(self):
self.logger = get_logger()
def get_asset(self, game_asset_key):
return self.game_assets[game_asset_key]
def get_keys(self, exclude_if_attrib_exists=None, Settlement=None):
keys = []
for asset_key in self.game_assets.keys():
if not exclude_if_attrib_exists in self.game_assets[asset_key]:
keys.append(asset_key)
if Settlement is not None:
for asset_key in keys:
asset_dict = self.get_asset(asset_key)
if "expansion" in asset_dict.keys() and asset_dict["expansion"] not in Settlement.get_expansions():
keys.remove(asset_key)
return keys
def get_pretty_name(self):
self.pretty_name = self.name.replace("_", " ").title()
if self.name == "ability":
self.pretty_name = "Ability or Impairment"
return self.pretty_name
def get_forbidden(self, settlement_object=None):
""" Checks all assets for whether they are forbidden by settlement
attributes, i.e. whether they're on a 'forbidden' list. """
campaign = settlement_object.get_campaign()
forbidden = set()
for game_asset in self.get_keys():
c_dict = game_assets.campaigns[campaign]
if "forbidden" in c_dict.keys() and game_asset in c_dict["forbidden"]:
forbidden.add(game_asset)
return forbidden
def get_always_available(self, settlement_object=None):
""" Checks all assets in the model against the settlement attributes and
their own attributes to see if they're on an 'always_available' list
or whether they have the 'always_available' attrib. Returns a list of
ones that do. """
campaign = settlement_object.get_campaign()
expansions = settlement_object.get_expansions()
always_available = set()
for game_asset in self.get_keys():
# first check the campaign
c_dict = game_assets.campaigns[campaign]
if "always_available" in c_dict and game_asset in c_dict["always_available"]:
always_available.add(game_asset)
# then check the expansions
for e in expansions:
e_dict = game_assets.expansions[e]
if "always_available" in e_dict.keys() and game_asset in e_dict["always_available"]:
always_available.add(game_asset)
# finally, check the asset itself
asset_dict = self.get_asset(game_asset)
if "always_available" in asset_dict.keys():
always_available.add(game_asset)
return always_available
def render_as_html_toggle_dropdown(self, selected=None, submit_on_change=True, expansions=[]):
""" Creates a single dropdown for the model where 'None' is selected by
by default, but the user can toggle to something else from the list of
asset keys. """
self.get_pretty_name()
options = self.get_keys()
for o in options:
if "expansion" in self.get_asset(o) and self.get_asset(o)["expansion"] not in expansions:
options.remove(o)
soc = ""
if submit_on_change:
soc = "this.form.submit()"
if selected is None:
selected = "-"
elif selected == "":
selected = "-"
output = '\n\t<select name="add_%s" onchange="%s" class="min_width">' % (self.name, soc)
options.append("-")
for o in sorted(options):
s = ""
if o == selected:
s = "selected"
output += '\t\t<option %s>%s</option>\n' % (s, o)
output += '</select>\n'
return output
def render_as_html_dropdown(self, submit_on_change=True, exclude=[], disable=[], excluded_type=None, Settlement=None):
""" Renders the model as an HTML dropdown and returns a string. Use the
'submit_on_change' kwarg to control whether it submits on change.
Use the 'exclude' kwarg to prevent certain keys from showing up in the
resuting render.
Use the 'include' kwarg to force the option list to only show certain
options in the render.
Use 'disabled' to provide a list of options that, if present, will be
greyed out/disabled in the resulting pick-list.
The 'include_principles' kwarg is a hack that forces innovation lists to
be returned without principle-type innovations.
"""
self.get_pretty_name()
options = self.get_keys()
for excluded_key in exclude:
if excluded_key in options:
options.remove(excluded_key)
# exclude if the asset wants to be excluded
for self_ex_asset in self.get_keys():
if "exclude_from_picker" in self.get_asset(self_ex_asset):
options.remove(self_ex_asset)
# exclude by type
if excluded_type is not None:
excluded_assets = []
for asset in self.get_keys():
if "type" in self.get_asset(asset).keys() and self.get_asset(asset)["type"] == excluded_type:
excluded_assets.append(asset)
for excluded_key in excluded_assets:
options.remove(excluded_key)
# exclude by expansion and campaign rules if we've got a Settlement obj
excluded_assets = []
if Settlement is not None:
for asset in options:
if "expansion" in self.get_asset(asset).keys() and self.get_asset(asset)["expansion"] not in Settlement.get_expansions():
excluded_assets.append(asset)
if asset in Settlement.get_campaign("dict")["forbidden"]:
excluded_assets.append(asset)
for excluded_key in excluded_assets:
options.remove(excluded_key)
if options == []:
# stop here if we've got no options to return
return "<!-- no available options for '%s' -->\n" % self.name
else:
options = sorted(options)
if submit_on_change:
submit_on_change = "this.form.submit()"
output = '\n\t<select name="add_%s" onchange="%s">' % (self.name, submit_on_change)
output += '\t<option selected disabled hidden value=''>Add %s</option>' % self.pretty_name
if self.name in ["disorder","fighting_art"]:
output += '\t\t<option value="RANDOM_%s">* Random %s</option>' % (self.name.upper(), self.pretty_name)
output += ' <option disabled>     --- </option>'
for o in sorted(options):
disabled = ""
if o in disable:
disabled = "disabled"
pretty_o = o
if self.name == "ability":
option_classes = {}
a = Abilities.get_asset(o)
pretty_o = "%s (%s)" % (o, capwords(a["type"].replace("_"," ")))
output += '\t\t<option value="%s" %s>%s</option>\n' % (o, disabled, pretty_o)
output += '</select>\n'
return output
#
# Define and initialize all models below here ONLY!
# All of these have to have a self.game_assets dictionary that includes all of
# of the game assets associated with the model class.
#
# self.name, by the bye, should be the singular appelation used in forms to
# add/remove the game asset from one of our application assets, e.g.
# add_item/remove_item, add_disorder/remove_disorder, etc.
#
class abilitiesModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.abilities_and_impairments
self.name = "ability"
def get_maxed_out_abilities(self, survivor_abilities):
""" Pass this a survivor["abilities_and_impairments"] list and it will
return a list of ability/impairment keys for which the survivor is
ineligible. """
maxed_out = set()
for ability_key in self.game_assets.keys():
ability_dict = self.get_asset(ability_key)
if "max" in ability_dict and ability_key in survivor_abilities:
survivor_total = survivor_abilities.count(ability_key)
if survivor_total == ability_dict["max"]:
maxed_out.add(ability_key)
return sorted(list(maxed_out))
class disordersModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.disorders
self.name = "disorder"
def build_asset_deck(self, Settlement):
expansions = Settlement.get_expansions()
deck = []
for disorder in game_assets.disorders.keys():
d_dict = self.get_asset(disorder)
if not "expansion" in d_dict.keys():
deck.append(disorder)
elif "expansion" in d_dict.keys():
if d_dict["expansion"] in expansions:
deck.append(disorder)
for d_key in deck:
campaign_dict = Settlement.get_campaign("dict")
if d_key in campaign_dict["forbidden"]:
deck.remove(d_key)
return sorted(deck)
class epithetsModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.epithets
self.name = "epithet"
class fightingArtsModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.fighting_arts
self.name = "fighting_art"
def build_survivor_deck(self, Survivor=None, Settlement=None):
""" Builds a survivor's personal fighting arts deck. """
fa_deck = self.get_keys(exclude_if_attrib_exists="secret")
# remove survivor's current arts from the deck
for fa in Survivor.survivor["fighting_arts"]:
if fa in fa_deck:
fa_deck.remove(fa)
# remove non-enabled expansion content from the deck
for fa in sorted(fa_deck):
if "expansion" in self.get_asset(fa):
if "expansions" not in Settlement.settlement.keys():
fa_deck.remove(fa)
elif "expansions" in Settlement.settlement.keys():
if self.get_asset(fa)["expansion"] not in Settlement.settlement["expansions"]:
fa_deck.remove(fa)
# add always_available/remove forbidden items to the deck
fa_deck.extend(self.get_always_available(Settlement))
for forbidden_asset in self.get_forbidden(Settlement):
fa_deck.remove(forbidden_asset)
# uniquify and sort
fa_deck = sorted(list(set(fa_deck)))
# return
# self.logger.debug("[%s] fighting arts deck: %s" % (Survivor, fa_deck))
return fa_deck
class locationsModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.locations
self.sort_alpha = True
self.uniquify = True
self.name = "location"
class itemsModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.items
self.name = "item"
def render_as_html_multiple_dropdowns(self, recently_added=[], expansions=[]):
""" New storage UI. """
output = ""
def render_location(output, pretty_location_name=None, item_list=[]):
""" Helper function for programmatically generating item drop-down
lists. This should be refactored to be buttons one day. """
output += '\n<select name="add_item" onchange="this.form.submit()">\n'
output += ' <option disabled selected> %s </option>\n' % pretty_location_name
for item in item_list:
output += ' <option value="%s">%s</option>\n' % (item, item)
output += '\n</select><br><hr class="invisible">'
return output
# start creating output
if recently_added != []:
output = render_location(output, pretty_location_name="Recently Added", item_list=recently_added)
# get locations based on location attributes of items
locations = set()
for item_key in self.get_keys():
item_asset = self.get_asset(item_key)
if "expansion" in item_asset.keys() and item_asset["expansion"] not in expansions:
pass
else:
locations.add(item_asset["location"])
location_dict = {}
for location in locations:
location_dict[location] = set()
for item_key in self.get_keys():
item = self.get_asset(item_key)
if "expansion" in item.keys() and item["expansion"] not in expansions:
pass
else:
location_dict[item["location"]].add(item_key)
# finally, use the location list to start creating html
locations = sorted(list(locations))
for location_key in locations:
if location_key in Locations.get_keys():
loc_asset = Locations.get_asset(location_key)
if "expansion" in loc_asset and loc_asset["expansion"] not in expansions:
pass
else:
output = render_location(output, pretty_location_name=location_key, item_list=sorted(location_dict[location_key]))
else:
output = render_location(output, pretty_location_name=location_key, item_list=sorted(location_dict[location_key]))
return output
def render_as_html_dropdown_with_divisions(self, recently_added=[]):
""" Old storage UI. Deprecated. """
locations = set()
for item_key in self.get_keys():
locations.add(self.get_asset(item_key)["location"])
location_dict = {}
for location in locations:
location_dict[location] = set()
for item_key in self.get_keys():
item = self.get_asset(item_key)
location_dict[item["location"]].add(item_key)
locations = sorted(list(locations))
output = '\n<select name="add_item" onchange="this.form.submit()">\n'
output += '<option selected disabled hidden value=''>Add Item</option>\n'
if recently_added != []:
output += ' <option disabled>     --- Recently Added --- </option>\n'
for item in recently_added:
output += ' <option value="%s">%s</option>\n' % (item, item)
for location_key in locations:
output += ' <option disabled>     --- %s --- </option>\n' % location_key
for item in sorted(location_dict[location_key]):
output += ' <option value="%s">%s</option>\n' % (item, item)
output += '</select>\n'
return output
class innovationsModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.innovations
self.sort_alpha = True
self.uniquify = True
self.name = "innovation"
class nemesesModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.nemeses
self.name = "nemesis"
class quarriesModel(Model):
def __init__(self):
Model.__init__(self)
self.sort_alpha = True
self.game_assets = game_assets.quarries
self.name = "quarry"
class nemesisMonstersModel(Model):
""" This is a pseudo model. """
def __init__(self):
Model.__init__(self)
self.game_assets = {}
self.sort_alpha = True
self.name = "nemesis_monster"
class defeatedMonstersModel(Model):
""" This is a pseudo model, which basically means that it is created with no
references to game_assets.py because it depends totally on the actual
settlement. Its methods are mostly private/unique to it. """
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.defeated_monsters
self.name = "defeated_monster"
self.sort_alpha = True
self.stack = True
def build_asset_deck(self, settlement, base_options=[]):
""" Call this method with the settlement mdb object/dict to build an
asset deck for this model. """
deck = base_options
deck.append("White Lion (First Story)")
deck.extend(settlement.get_quarries("list_of_options"))
deck.extend(settlement.get_nemeses("list_of_options"))
return deck
class resourcesModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.resources
class survivalActionsModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.survival_actions
class weaponProficienciesModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = game_assets.weapon_proficiencies
self.name = "weapon_proficiency_type"
class weaponMasteriesModel(Model):
def __init__(self):
Model.__init__(self)
self.game_assets = {}
for weapon in game_assets.weapon_proficiencies:
self.game_assets["Mastery - %s" % weapon] = {
"type": "weapon proficiency",
"all_survivors": "Specialization - %s" % weapon,
"settlement_buff": "All survivors gain <i>Specialization - %s</i>." % weapon,
}
self.name = "weapon_mastery_type"
# initialize all of our classes above when this module is imported
Abilities = abilitiesModel()
Disorders = disordersModel()
Epithets = epithetsModel()
FightingArts = fightingArtsModel()
Locations = locationsModel()
Items = itemsModel()
Innovations = innovationsModel()
Nemeses = nemesesModel()
Quarries = quarriesModel()
Resources = resourcesModel()
WeaponMasteries = weaponMasteriesModel()
WeaponProficiencies = weaponProficienciesModel()
DefeatedMonsters = defeatedMonstersModel() # this is like...a pseudo model
NemesisMonsters = nemesisMonstersModel() # another pseudo model
SurvivalActions = survivalActionsModel()
#
# mutually exclusive principles
#
mutually_exclusive_principles = {
"Death": ("Graves", "Cannibalize"),
"New Life": ("Protect the Young", "Survival of the Fittest"),
"Society": ("Collective Toil", "Accept Darkness"),
"Conviction": ("Romantic", "Barbaric"),
}
#
# The User Preferences Model
#
preferences_dict = {
"preserve_sessions": {
"desc": "Preserve Sessions?",
"affirmative": "Keep me logged in",
"negative": "Remove sessions after 24 hours",
},
"comma_delimited_lists": {
"desc": "How should Location, Innovation, Innovation Deck, etc. lists be displayed?",
"affirmative": "Comma-delimited lists",
"negative": "Line item, bulleted lists",
},
"apply_new_survivor_buffs": {
"type": "Automation",
"desc": "Automatically apply settlement bonuses to new survivors?",
"affirmative": "Automatically apply",
"negative": "Do not apply",
},
"apply_weapon_specialization": {
"type": "Automation",
"desc": "Automatically add weapon specializations if Innovations include the mastery?",
"affirmative": "Add",
"negative": "Do Not Add",
},
"hide_timeline": {
"type": "Timeline",
"desc": "Automatically hide the Settlement Sheet Timeline controls?",
"affirmative": "Hide",
"negative": "Always Show",
},
"show_future_timeline": {
"type": "Timeline",
"desc": "How many Lantern Years should be visible when viewing the Timeline?",
"affirmative": "Show the next four Lantern Years",
"negative": "Only show the current Lantern Year",
},
"confirm_on_return": {
"type": "Campaign Summary",
"desc": "Confirm Departing Survivors return?",
"affirmative": "Confirm",
"negative": "Do not confirm",
},
"update_timeline": {
"type": "Automation",
"desc": "Automatically Update Timeline with Milestone Story Events?",
"affirmative": "Update settlement timelines when milestone conditions are met",
"negative": "Do not automatically update settlement timelines",
},
"show_epithet_controls": {
"type": "Survivor Sheet",
"desc": "Use survivor epithets?",
"affirmative": "Show controls on Survivor Sheets",
"negative": "Hide controls and survivor epithets on Survivor Sheets",
},
"show_remove_button": {
"type": "Settlement Sheet",
"desc": "Show controls for removing Settlements?",
"affirmative": "Show controls on Settlement Sheet",
"negative": "Hide controls on Settlement Sheet",
},
"dynamic_innovation_deck": {
"type": "Settlement Sheet",
"desc": "What Innovations should be selectable?",
"affirmative": "Innovation Deck only",
"negative": "All Innovations (not recommended)",
},
"hide_principle_controls": {
"type": "Settlement Sheet",
"desc": "Use settlement milestones to hide unavailable principles?",
"affirmative": "Dynamically hide Principle controls",
"negative": "Always show all Principle controls",
},
"confirm_on_remove_from_storage": {
"type": "Settlement Sheet",
"desc": "Confirm before removing items from Settlement Storage?",
"affirmative": "Confirm with Pop-up",
"negative": "Do not confirm",
},
}
class userPreferences(Model):
def __init__(self):
Model.__init__(self)
self.preferences_dict = preferences_dict
self.game_assets = preferences_dict
def get_keys(self):
return self.preferences_dict.keys()
def pref(self, user_object, pref_key):
pref_dict = self.preferences_dict[pref_key]
if user_object.get_preference(pref_key):
pref_dict["affirmative_selected"] = "checked"
pref_dict["negative_selected"] = ""
else:
pref_dict["affirmative_selected"] = ""
pref_dict["negative_selected"] = "checked"
return pref_dict
def get_categories(self):
""" Creates a list of available/extant headings for category. """
categories = set(["General"])
for k in self.get_keys():
asset_dict = self.get_asset(k)
if "type" in asset_dict.keys():
categories.add(asset_dict["type"])
return sorted(list(categories))
def get_category_dict(self):
""" Uses self.get_categories() to create a dict where each key is a
category and the value is a list of preference keys. """
d = {}
categories = self.get_categories()
for c in categories:
d[c] = []
for a in self.get_keys():
asset_dict = self.get_asset(a)
if "type" in asset_dict:
d[asset_dict["type"]].append(a)
else:
d["General"].append(a)
return d
Preferences = userPreferences()
| chummer5a/kdm-manager | v1/models.py | models.py | py | 23,466 | python | en | code | null | github-code | 13 |
70270411857 | '''
calc.py Version: 1.0 by Affe_130
Important:
This program should be runned in a console with the py command!
Help:
Modes: -add, -sub, -mul, -div, -exp, -root, -stat, Example: py calc.py -add 1 2 3
'''
import sys
arg = sys.argv
values = []
for i in range(2, len(arg)): #Adds all values in the argument list to a different list
values.append(arg[i])
def add(list): #Adds all values
result = float(list[0])
for i in range(1, len(list)):
result = result + float(list[i])
return result
def sub(list): #Takes the first value and subtracts it with all the others
result = float(list[0])
for i in range(1, len(list)):
result = result - float(list[i])
return result
def mul(list): #Multiplicates all values
result = float(list[0])
for i in range(1, len(list)):
result = result * float(list[i])
return result
def div(list): #Takes the first value and divides it with all the others
result = float(list[0])
for i in range(1, len(list)):
result = result / float(list[i])
return result
def exp(list): #Takes the first value to the power of the second value
result = float(list[0]) ** float(list[1])
return result
def root(list): #Takes the root from the first value of the second value
exp = 1 / float(list[1])
result = float(list[0]) ** exp
return result
#All these ifs check for which argument has been given
if len(arg) <= 1: #No argument has been given
print("-help to get help with commands")
if len(arg) > 1:
if arg[1] == "-add":
print("Answer: ", add(values))
if arg[1] == "-sub":
print("Answer: ", sub(values))
if arg[1] == "-mul":
print("Answer: ", mul(values))
if arg[1] == "-div":
print("Answer: ", div(values))
if arg[1] == "-exp":
print("Answer: ", exp(values))
if arg[1] == "-root":
print("Answer: ", root(values))
if arg[1] == "-stat":
values.sort()
print("Number of values: ", len(values))
print("Total value: ", add(values))
print("Highest value: ", values[len(values) - 1])
print("Lowest value: ", values[0])
print("Average value: ", add(values) / len(values))
if arg[1] == "-help":
print("Modes: -add, -sub, -mul, -div, -exp, -root, -stat, Example: py calc.py -add 1 2 3")
| Affe130/Calculator | calc.py | calc.py | py | 2,387 | python | en | code | 0 | github-code | 13 |
5232720034 | '''
*****************************************************************************************
*
* ===============================================
* Nirikshak Bot (NB) Theme (eYRC 2020-21)
* ===============================================
*
* This script is to implement Task 1B of Nirikshak Bot (NB) Theme (eYRC 2020-21).
*
* This software is made available on an "AS IS WHERE IS BASIS".
* Licensee/end user indemnifies and will keep e-Yantra indemnified from
* any and all claim(s) that emanate from the use of the Software or
* breach of the terms of this agreement.
*
* e-Yantra - An MHRD project under National Mission on Education using ICT (NMEICT)
*
*****************************************************************************************
'''
# Team ID: [ 1756 ]
# Author List: [ Serena Raju ]
# Filename: task_1b.py
# Functions: applyPerspectiveTransform, detectMaze, writeToCsv
# [ order_points, four_point_transform, sort_contours ]
# Global variables:
# [ List of global variables defined in this file ]
####################### IMPORT MODULES #######################
## You are not allowed to make any changes in this section. ##
## You have to implement this task with the three available ##
## modules for this task (numpy, opencv, csv) ##
##############################################################
import numpy as np
import cv2
import csv
##############################################################
################# ADD UTILITY FUNCTIONS HERE #################
## You can define any utility functions for your code. ##
## Please add proper comments to ensure that your code is ##
## readable and easy to understand. ##
##############################################################
def order_points(pts):
# initialzie a list of coordinates that will be ordered
# such that the first entry in the list is the top-left,
# the second entry is the top-right, the third is the
# bottom-right, and the fourth is the bottom-left
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point will have the smallest sum, whereas
# the bottom-right point will have the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# now, computing the difference between the points, the
# top-right point will have the smallest difference,
# whereas the bottom-left will have the largest difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# return the ordered coordinates
return rect
def four_point_transform(image, pts):
# obtaining a consistent order of the points and unpacking them
rect = order_points(pts)
(tl, tr, br, bl) = rect
# computing the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
# constructing the set of destination points to obtain a
# top-down view of the image, again specifying points
# in the specified order
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect, dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b: b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return cnts
##############################################################
def applyPerspectiveTransform(input_img):
"""
Purpose:
---
takes a maze test case image as input and applies a Perspective Transfrom on it to isolate the maze
Input Arguments:
---
`input_img` : [ numpy array ]
maze image in the form of a numpy array
Returns:
---
`warped_img` : [ numpy array ]
resultant warped maze image after applying Perspective Transform
Example call:
---
warped_img = applyPerspectiveTransform(input_img)
"""
warped_img = None
############## ADD YOUR CODE HERE ##############
# general pre-processing
gray = cv2.cvtColor(np.asarray(input_img),cv2.COLOR_BGR2GRAY)
#Convert to grayscale image
#edged = cv2.Canny(gray, 290, 295)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 200, 450)
cv2.imwrite('testing.jpg', edged)
#Determine edges of objects in an image
(cnts,_) = cv2.findContours(edged,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
print(len(cnts))
#Find contours in an image
shape = input_img.shape[:-1]
h, w = shape
size = tuple(list(shape)[::-1])
cnts = np.array(sort_contours(cnts), dtype = np.float32)
# loop over the contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c,True)
approx = cv2.approxPolyDP(c, 0.02 * peri,True)
if len(approx) == 4:
screenCnt = approx
break
# show the contour (outline) of the maze
# apply the four point transform to obtain a perfect
# view of the original image
ratio = 1
warp = four_point_transform(input_img, screenCnt.astype(float).reshape(4, 2) * ratio)
warped_img1 = cv2.cvtColor(warp, cv2.COLOR_BGR2GRAY)
warped_img = cv2.resize(warped_img1, (1280, 1280))
##################################################
return warped_img
def detectMaze(warped_img):
"""
Purpose:
---
takes the warped maze image as input and returns the maze encoded in form of a 2D array
Input Arguments:
---
`warped_img` : [ numpy array ]
resultant warped maze image after applying Perspective Transform
Returns:
---
`maze_array` : [ nested list of lists ]
encoded maze in the form of a 2D array
Example call:
---
maze_array = detectMaze(warped_img)
"""
maze_array = []
############## ADD YOUR CODE HERE ##############
r,c = warped_img.shape
#saving the warped_img shape so we don't lose out on the size
warp = cv2.resize(warped_img, (r, r), interpolation = cv2.INTER_AREA)
#aspect ratio to be 1:1
cv2.imwrite('warp.jpg', warp)
ret, thresh = cv2.threshold(warp, 100, 255, cv2.THRESH_BINARY_INV)
cv2.imwrite('warp.jpg', thresh)
if (r>410):
ke = 21 #kernel size of 20
kernel1 = np.ones((ke, ke), np.uint8)
ke = 21
kernel2 = np.ones((ke, ke), np.uint8)
else:
ke = 10 #kernel size of 21
kernel1 = np.ones((ke, ke), np.uint8)
ke = 10
kernel2 = np.ones((ke, ke), np.uint8)
dilation1 = cv2.dilate(thresh, kernel1, iterations=1)
dilation2 = cv2.dilate(thresh, kernel2, iterations=1)
# dilating the output since to make our assertions more accurate
cv2.imwrite('dilate.jpg', dilation1)
r,c = dilation1.shape
mat = np.empty((0,), dtype=int)
# matrix that'll hold the cell values
for ro in range(0, r - int(r/10)+1, int(r/10)):
row = np.empty((0,1), dtype= int)
for co in range(0, c - int(c/10)+1, int(c/10)):
sum_t = 0
x = dilation1[ro:ro + int(r/10),co :co + int(c/10)]
# saving each cell as a kernel
# total 10 cells
result = np.all((x == 255), axis=0)
#saving the column borders
if result[0]:
sum_t += 1
if result[int(c/10)-1]:
sum_t += 4
x = dilation2[ro:ro + int(r/10),co :co + int(c/10)]
result = np.all((x == 255), axis=1)
#saving the row borders
if result[0]:
sum_t += 2
if result[int(r/10)-1]:
sum_t += 8
row = np.append(row, [sum_t])
mat = np.append(mat, row, axis = 0)
mat = mat.reshape(10,10)
# for making sure we have the array in the proper format
maze_array = mat.tolist()
# converting array to the expected list format
##################################################
return maze_array
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
def writeToCsv(csv_file_path, maze_array):
"""
Purpose:
---
takes the encoded maze array and csv file name as input and writes the encoded maze array to the csv file
Input Arguments:
---
`csv_file_path` : [ str ]
file path with name for csv file to write
`maze_array` : [ nested list of lists ]
encoded maze in the form of a 2D array
Example call:
---
warped_img = writeToCsv('test_cases/maze00.csv', maze_array)
"""
with open(csv_file_path, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(maze_array)
# NOTE: YOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION
#
# Function Name: main
# Inputs: None
# Outputs: None
# Purpose: This part of the code is only for testing your solution. The function first takes 'maze00.jpg'
# as input, applies Perspective Transform by calling applyPerspectiveTransform function,
# encodes the maze input in form of 2D array by calling detectMaze function and writes this data to csv file
# by calling writeToCsv function, it then asks the user whether to repeat the same on all maze images
# present in 'test_cases' folder or not. Write your solution ONLY in the space provided in the above
# applyPerspectiveTransform and detectMaze functions.
if __name__ == "__main__":
# path directory of images in 'test_cases' folder
img_dir_path = 'test_cases/'
# path to 'maze00.jpg' image file
file_num = 0
img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'
print('\n============================================')
print('\nFor maze0' + str(file_num) + '.jpg')
# path for 'maze00.csv' output file
csv_file_path = img_dir_path + 'maze0' + str(file_num) + '.csv'
# read the 'maze00.jpg' image file
input_img = cv2.imread(img_file_path)
# get the resultant warped maze image after applying Perspective Transform
warped_img = applyPerspectiveTransform(input_img)
if type(warped_img) is np.ndarray:
# get the encoded maze in the form of a 2D array
maze_array = detectMaze(warped_img)
if (type(maze_array) is list) and (len(maze_array) == 10):
print('\nEncoded Maze Array = %s' % (maze_array))
print('\n============================================')
# writes the encoded maze array to the csv file
writeToCsv(csv_file_path, maze_array)
cv2.imshow('warped_img_0' + str(file_num), warped_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print('\n[ERROR] maze_array returned by detectMaze function is not complete. Check the function in code.\n')
exit()
else:
print('\n[ERROR] applyPerspectiveTransform function is not returning the warped maze image in expected format! Check the function in code.\n')
exit()
choice = input('\nDo you want to run your script on all maze images ? => "y" or "n": ')
if choice == 'y':
for file_num in range(1, 10):
# path to image file
img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'
print('\n============================================')
print('\nFor maze0' + str(file_num) + '.jpg')
# path for csv output file
csv_file_path = img_dir_path + 'maze0' + str(file_num) + '.csv'
# read the image file
input_img = cv2.imread(img_file_path)
# get the resultant warped maze image after applying Perspective Transform
warped_img = applyPerspectiveTransform(input_img)
if type(warped_img) is np.ndarray:
# get the encoded maze in the form of a 2D array
maze_array = detectMaze(warped_img)
if (type(maze_array) is list) and (len(maze_array) == 10):
print('\nEncoded Maze Array = %s' % (maze_array))
print('\n============================================')
# writes the encoded maze array to the csv file
writeToCsv(csv_file_path, maze_array)
cv2.imshow('warped_img_0' + str(file_num), warped_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
print('\n[ERROR] maze_array returned by detectMaze function is not complete. Check the function in code.\n')
exit()
else:
print('\n[ERROR] applyPerspectiveTransform function is not returning the warped maze image in expected format! Check the function in code.\n')
exit()
else:
print('')
| serenaraju/Pragmatic-Implementation-Reinforcement-Learning-Path-Planning-Raspberry-Pi-Bot | task_1b_detect_and_encode_maze/task_1b.py | task_1b.py | py | 12,704 | python | en | code | 1 | github-code | 13 |
15428823317 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_movimentodeestoque'),
]
operations = [
migrations.CreateModel(
name='Compras',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.DateField()),
('valor', models.DecimalField(decimal_places=2, max_digits=10)),
('descricao', models.CharField(max_length=120)),
],
),
migrations.CreateModel(
name='Vendas',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.DateField()),
('valor', models.DecimalField(decimal_places=2, max_digits=10)),
('forma_de_pagamento', models.CharField(max_length=50)),
('descricao', models.CharField(max_length=120)),
],
),
]
| CarolPera/OPE-Vulcano | app/migrations/0003_compras_vendas.py | 0003_compras_vendas.py | py | 1,086 | python | en | code | 2 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.