seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
11507295486 | # import general python packages
import unittest
import json
from datetime import datetime
import pytz
import logging
# import lusid specific packages
import lusid
import lusid.models as models
from lusid import ApiException
from utilities import InstrumentLoader, IdGenerator
from utilities import TestDataUtilities
from utilities.id_generator_utilities import delete_entities
class TransactionProperty(unittest.TestCase):
@classmethod
def setUpClass(cls):
# setup logging configuration
cls.root_logger = logging.getLogger(__name__)
cls.root_logger.setLevel(logging.INFO)
# create a configured API client
api_client = TestDataUtilities.api_client()
cls.property_definitions_api = lusid.PropertyDefinitionsApi(api_client)
cls.instruments_api = lusid.InstrumentsApi(api_client)
cls.transaction_portfolios_api = lusid.TransactionPortfoliosApi(api_client)
cls.portfolios_api = lusid.PortfoliosApi(api_client)
# load instruments from InstrumentLoader
instrument_loader = InstrumentLoader(cls.instruments_api)
cls.instrument_ids = instrument_loader.load_instruments()
# set test scope and code
cls.scope = "TransactionProperty"
cls.code = "TransactionTaxDetail"
cls.id_generator = IdGenerator(scope=TestDataUtilities.tutorials_scope)
@classmethod
def tearDownClass(cls):
delete_entities(cls.id_generator)
def create_transaction_property(self):
# Details of the property
property_definition = models.CreatePropertyDefinitionRequest(
domain="Transaction",
scope=self.scope,
code=self.code,
display_name=self.code,
data_type_id=lusid.ResourceId(scope="system", code="string"),
)
# create property definition
try:
self.property_definitions_api.create_property_definition(
create_property_definition_request=property_definition
)
except lusid.ApiException as e:
if json.loads(e.body)["name"] == "PropertyAlreadyExists":
self.root_logger.info(
f"Property {property_definition.domain}/{property_definition.scope}/{property_definition.code} already exists"
)
finally:
self.id_generator.add_scope_and_code("property_definition", property_definition.scope,
property_definition.code, ["Transaction"])
def create_portfolio(self):
# Details of new portfolio to be created
effective_date = datetime(2020, 12, 1, 0, 0, tzinfo=pytz.utc)
create_portfolio_request = models.CreateTransactionPortfolioRequest(
code=self.code,
display_name=self.code,
base_currency="GBP",
created=effective_date,
)
# create portfolio
try:
self.transaction_portfolios_api.create_portfolio(
scope=self.scope,
create_transaction_portfolio_request=create_portfolio_request,
)
except lusid.ApiException as e:
if json.loads(e.body)["name"] == "PortfolioWithIdAlreadyExists":
self.root_logger.info(
f"Portfolio {create_portfolio_request.code} already exists"
)
finally:
self.id_generator.add_scope_and_code("portfolio", self.scope, self.code)
def create_txn_with_property(self, instrument_id, property_value):
# setup the transaction
effective_date = datetime(2020, 12, 1, 0, 0, tzinfo=pytz.utc)
txn = models.TransactionRequest(
transaction_id="TXN001",
type="Buy",
instrument_identifiers={"Instrument/default/Figi": instrument_id},
transaction_date=effective_date,
settlement_date=effective_date,
units=1000,
transaction_price=models.TransactionPrice(price=100, type="Price"),
total_consideration=models.CurrencyAndAmount(amount=1, currency="GBP"),
exchange_rate=1,
transaction_currency="GBP",
properties={
f"Transaction/{self.scope}/{self.code}": lusid.PerpetualProperty(
key=f"Transaction/{self.scope}/{self.code}",
value=lusid.PropertyValue(label_value=property_value),
)
},
)
return self.transaction_portfolios_api.upsert_transactions(
scope=self.scope, code=self.code, transaction_request=[txn]
)
def get_transaction(self, scope, code):
return self.transaction_portfolios_api.get_transactions(scope=scope, code=code)
def test_transaction_property(self):
# Value for our property
transaction_tax_data = {"Tax": 1.0, "Rate": 0.01, "Schedule": "A"}
# Convert property to string representation
transaction_tax_string = json.dumps(transaction_tax_data)
# Setup property and portfolio
self.create_transaction_property()
self.create_portfolio()
# Setup transaction with txn tax details as the property value
response = self.create_txn_with_property("BBG00KTDTF73", transaction_tax_string)
self.assertIsNotNone(response)
# Get transaction with property
txn_response = self.get_transaction(scope=self.scope, code=self.code)
self.assertIsNotNone(txn_response)
# Parse property value from transaction and assert is equal to original string object
queried_property_string = (
txn_response.values[0]
.properties[f"Transaction/{self.scope}/{self.code}"]
.value.label_value
)
self.assertIsNotNone(queried_property_string)
self.assertEqual(queried_property_string, transaction_tax_string)
# Test individual key-value pairs against original data
queried_property_dict = json.loads(queried_property_string)
self.assertEqual(transaction_tax_data["Tax"], queried_property_dict["Tax"])
self.assertEqual(transaction_tax_data["Rate"], queried_property_dict["Rate"])
self.assertEqual(
transaction_tax_data["Schedule"], queried_property_dict["Schedule"]
)
if __name__ == "__main__":
unittest.main()
| finbourne/lusid-sdk-examples-python | src/tutorials/properties/test_transaction_properties.py | test_transaction_properties.py | py | 6,393 | python | en | code | 2 | github-code | 13 |
8460636455 |
#Iniciar Fibonacci
quantidade = 1
while (quantidade > 0):
quantidade = int(input("Digite um numero inteiro para o calculo do FIBONACCI: "))
contador = 1
numero1 = 0
numero2 = 1
#calculando o fibonacci
while (contador <= quantidade):
print(numero1)
#print(numero2)
numero3 = numero1 + numero2
print(numero3)
numero1 = numero2
numero2 = numero3
contador += 1
| Priscillajessika/Python | Fibonacciwhile.py | Fibonacciwhile.py | py | 445 | python | pt | code | 0 | github-code | 13 |
29487780136 | import sys
import warnings
from collections import Counter
import numpy as np
from sklearn.cluster import KMeans
warnings.filterwarnings("ignore")
class GranularBall:
"""class of the granular ball"""
def __init__(self, data, attribute):
"""
:param data: Labeled data set, the "-2" column is the class label, the last column is the index of each line
and each of the preceding columns corresponds to a feature
:param attribute: condition attribution
"""
self.data = data[:, :]
self.attribute = attribute
self.data_no_label = data[:, attribute]
self.num, self.dim = self.data_no_label.shape # samples numbers and dimension
self.center = self.data_no_label.mean(0)
self.label, self.purity, self.r = self.__get_label_and_purity_and_r()
def __get_label_and_purity_and_r(self):
"""
:return: the label, purity and radio of the granular ball.
"""
count = Counter(self.data[:, -2])
label = max(count, key=count.get)
purity = count[label] / self.num
a = np.sqrt(np.sum(np.square(np.array(self.data_no_label) - self.center), 1)) # The distance lists from the
# sample point to the center in the granular-ball.
r = max(a) # ball's radius is max distance
return label, purity, r
def split_2balls(self):
"""
split granular-ball into two granular-balls
:return: granular-balls list
"""
labels = set(self.data[:, -2].tolist()) # labels of dataset
sample1 = -1 # Maximum sample point with label not 0
sample0 = -1 # Maximum sample point with label 0
label0 = sys.maxsize # the maximum value whoโs label is not 0
label1 = sys.maxsize # the maximum value whoโs label is 0
GBList = [] # granular-balls list with initial value is empty
dol = np.sum(self.data_no_label, axis=1) # statistics the values of data_no_label
if len(labels) > 1: # Obtain the sample point with the highest value labeled as 1 and non 1
for i in range(len(self.data)):
if self.data[i, -2] == 1 and dol[i] < label1:
label1 = dol[i]
sample1 = i
elif self.data[i, -2] != 1 and dol[i] < label0:
label0 = dol[i]
sample0 = i
ini = self.data_no_label[[sample0, sample1], :] # initial the granular-ball's center
clu = KMeans(n_clusters=2, init=ini).fit(self.data_no_label) # select primary sample center
label_cluster = clu.labels_
if len(set(label_cluster)) > 1: # GBList append ball1 and ball2
ball1 = GranularBall(self.data[label_cluster == 0, :], self.attribute)
ball2 = GranularBall(self.data[label_cluster == 1, :], self.attribute)
GBList.append(ball1)
GBList.append(ball2)
else:
GBList.append(self)
else:
GBList.append(self)
return GBList
| wcAreYouOk/GBRS | Code/tolls/GranularBall.py | GranularBall.py | py | 2,659 | python | en | code | 0 | github-code | 13 |
39211398454 | import solveutils
import scramble
import cube.vectors as vc
import numpy as np
BUFFERORDER = {
"corner": ["UFR", "UFL", "UBL", "UBR", "DFR", "DFL", "DBR"],
"edge": ["UF", "UB", "UR", "UL", "DF", "DB", "FR", "FL", "DR", "DL", "BR"],
}
PSEUDOS = {
"UFR": ("UF", "UR"),
"UFL": ("UF", "UL"),
"UBR": ("UB", "UR"),
"UBL": ("UB", "UL"),
"DFR": ("DF", "DR"),
"DFL": ("DF", "DL"),
"DBR": ("DB", "DR"),
}
def solve(scram):
cube = solveutils.Solver()
cube = scramble.scramble(cube, scram)
edge_buffers = iter(BUFFERORDER["edge"])
corner_buffers = iter(BUFFERORDER["corner"])
edge_buffer = vc.get_vector(next(edge_buffers))
corner_buffer = vc.get_vector(next(corner_buffers))
cyclebreaks = 0
flips = 0
twists = 0
numcornerbuffers = 1
numedgebuffers = 1
while cube.count_solved("corner") < 8:
if cube.is_permuted(corner_buffer):
if cube.is_solved(corner_buffer):
if not cube.corner_parity:
corner_buffer = vc.get_vector(next(corner_buffers))
continue
try:
cube.cycle_break(corner_buffer)
cyclebreaks += 1
except:
cube.flip_or_twist(corner_buffer)
twists += 1
else:
cube.solve_piece(corner_buffer)
if cube.corner_parity:
a, b = PSEUDOS[vc.get_name(corner_buffer)]
a, b = vc.get_vector(a), vc.get_vector(b)
try:
if cube.log['UFR'][-1] == 'BUL':
a = vc.get_name('UB')
b = vc.get_name('UL')
except:
pass
cube.pseudoswap(a, b)
# AFTER THIS DONT EDIT SO WE CAN SAVE
while cube.count_solved("edge") < 12:
if cube.is_permuted(edge_buffer):
if cube.is_solved(edge_buffer):
if not cube.edge_parity:
edge_buffer = vc.get_vector(next(edge_buffers))
continue
try:
cube.cycle_break(edge_buffer)
cyclebreaks += 1
except:
cube.flip_or_twist(edge_buffer)
flips += 1
else:
cube.solve_piece(edge_buffer)
# print(scram)
# print(cube.log)
values = list(map(list, (ele for ele in cube.log.values())))
values = sum(values, [])
algs = int(len(values) / 2) + len(list(values)) % 2
corner_parity = 1 if cube.corner_parity else 0
buffers = [x for x in cube.log.keys() if x not in {'twist', 'flip'}]
cornerbuffers = [x for x in buffers if len(x) == 3]
edgebuffers = [x for x in buffers if len(x) == 2]
# print(algs)
return algs, corner_parity, cyclebreaks, flips, twists, len(cornerbuffers)#, len(edgebuffers)
def test():
x = solve("U2 D F U B R2 L2 D B2 L2 B2 L' F2 D2 R2 U2 R' D2 L F2")
print(x)
def main():
data = []
with open("scrambles.txt") as f:
scrams = f.read().splitlines()
for scram in scrams:
thissolve = solve(scram)
data.append([x for x in thissolve])
return data
if __name__ == "__main__":
test()
| elliottkobelansky/3BLD-Analyzer | 3BLD-Analyzer/solve.py | solve.py | py | 3,144 | python | en | code | 1 | github-code | 13 |
25130614980 | #
# Milking the Goat
# Author: Marcelo Martins
# Source: https://github.com/mmartins000/milkingthegoat
#
# Pre-requisites:
# Python modules GitPython, Docker
# Internet access to download container images and clone Git repos
import time
import datetime
import argparse
import sys
import os
from pathlib import Path
import json
from git import Repo
import docker
from docker import errors
# Hardcoded variables:
__version__ = '0.1.1'
# Default exit signals can be changed using config file
EXIT_OK = 0
EXIT_FAIL_LOW = 1
EXIT_FAIL_MEDIUM = 2
EXIT_FAIL_HIGH = 3
# Hardcoded signals
EXIT_NOOP = 10
EXIT_ROOT = 11
EXIT_NOT_WRITABLE = 12
EXIT_FILE_NOT_FOUND = 13
EXIT_INVALID_SOURCE = 14
EXIT_INVALID_CONFIG = 15
EXIT_FILE_FORMAT = 16
EXIT_JSON_FILE = 17
def print_version():
log_and_print("info", "\nMilking the Goat v{}\n".format(__version__))
return True # Do not remove, used in main scope, in-line condition
def running_as_root():
log_and_print("critical", "To reduce risks, do not run as root. To ignore this warning, use flag --ignore-root.")
sys.exit(EXIT_ROOT)
def dest_not_writable(dest):
log_and_print("critical", "Error: Destination is not writable: {}".format(dest))
sys.exit(EXIT_NOT_WRITABLE)
def invalid_source(source):
log_and_print("critical", "Error: {} is not a valid directory.".format(source))
sys.exit(EXIT_INVALID_SOURCE)
def missing_config(config_file):
log_and_print("critical", "Error: Could not locate config file: {}".format(config_file))
sys.exit(EXIT_INVALID_CONFIG)
def noop():
log_and_print("info", "All scans skipped and clean up is not set. Nothing to do here.")
sys.exit(EXIT_NOOP)
def sanity_checks():
"""
Runs a few basic checks before start scanning, not to waste users' time.
Pythonic way is EAFP: https://docs.python.org/3/glossary.html#term-EAFP
Called by main().
:return:
"""
# Check if running as root (should not):
args.ignoreRoot or os.geteuid() == 0 and running_as_root()
# Check if an operation was selected:
(args.skipKics and args.skipCheckov and args.skipTfsec and args.skipTerrascan and args.skipTrivy
and not args.cleanup) and noop()
def is_url(str_url):
"""
If this is a valid URL, we will try to clone it locally.
The user is supposed to use a Git clone URL. This is 'best effort'. I'm just checking to avoid errors.
Called by run_scan().
:param str_url: Destination string, supposedly a URL
:return: True if is a valid URL; otherwise, False
"""
from urllib.parse import urlparse
import urllib.error
try:
o = urlparse(str_url)
if o.scheme and o.netloc and o.path:
return True
return False
except urllib.error.URLError:
return False
def clone_repo(source_dir):
"""
Runs Git module to clone a repo locally.
Called by run_scan().
:param source_dir: Comes from args.source
:return: True if successful or exits with error code
"""
log_and_print("info", "Cloning repository in " + dest_clones)
# This is an attempt to automate the naming convention for a local folder where a Git repo will be cloned
# Example: https://github.com/user/repo.git --> repo
# I'm not locking it with 'github.com' because the user might have another Git repo somewhere else.
# For this to work, the user must enter a Git clone URL (with .git in the end)
local_folder = os.path.basename(os.path.normpath(args.source)).replace(".git", "")
try:
# git clone [repo]
repo_to_clone = dest_clones + "/" + local_folder
if not os.path.isdir(repo_to_clone):
log_and_print("debug", "Cloning repository {}.".format(repo_to_clone))
Repo.clone_from(source_dir, repo_to_clone)
else:
log_and_print("debug", "Repo {} has already been cloned.".format(repo_to_clone))
return local_folder
except PermissionError:
dest_not_writable(dest_clones)
def clone_goats():
"""
Runs Git module to clone Goats locally.
Called by run_scan().
:return: True if successful or exits with error code
"""
# To fully automate this function using config file, I would need a 2D array, with the local name and Github URL
# not os.access(dest_clones, os.W_OK) and dest_not_writable(dest_clones)
log_and_print("info", "Cloning the Goats in " + dest_clones)
from git import exc
try:
json_goats = get_goats(goats_file)
for target in json_goats["targets"]:
local_folder = target["local_folder"]
# Avoid cloning an already cloned repo
repo_to_clone = dest_clones + "/" + local_folder
if not os.path.isdir(repo_to_clone):
log_and_print("debug", "Cloning repository {}.".format(repo_to_clone))
Repo.clone_from(target["source"], repo_to_clone)
else:
log_and_print("debug", "Repo {} has already been cloned.".format(repo_to_clone))
except (PermissionError, exc.GitCommandError):
dest_not_writable(dest_clones)
def docker_save_log(str_log, output_file):
try:
with open(output_file, 'w') as f:
try:
res = json.loads(str_log)
json.dump(res, f, indent=4)
except json.decoder.JSONDecodeError:
log_and_print("error", "Error: Could not understand stdout data.".format(output_file))
return False
except FileNotFoundError:
log_and_print("error", "Error: Could not write to file {}.".format(output_file))
return False
def run_docker_checkov(source_directory, force_docker_pull):
"""
Runs Docker Checkov container.
Called by run_scan().
:param source_directory: The directory where the apps will perform their security assessment.
:param force_docker_pull: If Docker should always download an image or not.
:return: True
"""
if not args.skipCheckov:
log_and_print("info", "Running Checkov on {}...".format(source_directory))
docker_pull_image(docker_image_checkov, force_docker_pull)
start = time.time()
# Checkov: If Docker runs with flag --tty, it will generate non-printable characters at the end of JSON output.
# Ref: https://www.checkov.io/4.Integrations/Docker.html
dc = docker.from_env()
container_id = dc.api.create_container(
image=docker_image_checkov,
command="--quiet -d /src --output {}".format(output_format_checkov),
volumes=['/src'],
host_config=dc.api.create_host_config(binds={
source_directory: {
'bind': '/src',
'mode': 'ro',
}
})
)
dc.api.start(container_id)
dc.api.wait(container_id)
# Saves the output into a JSON file (there is no output flag in command)
docker_save_log(dc.api.logs(container_id).decode('utf-8'), results_checkov)
end = time.time()
total_time = str(round(end - start, 2)) + " seconds"
str_it_took = ", it took " + total_time
log_and_print("info", "Docker ran Checkov container in {}{} and the report was saved in {}."
.format(source_directory, str_it_took, results_checkov))
process_checkov(load_from_json(results_checkov), results_checkov,
os.path.basename(os.path.normpath(source_directory)), total_time)
def run_docker_tfsec(source_directory, force_docker_pull):
"""
Runs Docker tfsec container.
Called by run_scan().
:param source_directory: The directory where the apps will perform their security assessment.
:param force_docker_pull: If Docker should always download an image or not.
:return: True
"""
# Ref: https://aquasecurity.github.io/tfsec/v1.4.2/getting-started/usage/
if not args.skipTfsec:
log_and_print("info", "Running tfsec on {}...".format(source_directory))
docker_pull_image(docker_image_tfsec, force_docker_pull)
start = time.time()
# Used because the results volume was mounted
results_tfsec_output_dir = results_tfsec.rstrip("/").rsplit('/', 1)[0]
# results_tfsec_output_dir_file = "/results/" + results_tfsec_filename
dc = docker.from_env()
container_id = dc.api.create_container(
image=docker_image_tfsec,
command="/src --no-colour --format {} --out {}".format(output_format_tfsec, results_tfsec_filename),
volumes=['/src'],
host_config=dc.api.create_host_config(binds={
source_directory: {
'bind': '/src',
'mode': 'ro',
},
results_tfsec_output_dir: {
'bind': '/results',
'mode': 'rw',
}
})
)
dc.api.start(container_id)
dc.api.wait(container_id)
# Not necessary, results volume mounted and used in command
# docker_save_log(dc.api.logs(container_id).decode('utf-8'), results_tfsec)
end = time.time()
total_time = str(round(end - start, 2)) + " seconds"
str_it_took = ", it took " + total_time
log_and_print("info", "Docker ran tfsec container in {}{} and the report was saved in {}."
.format(source_directory, str_it_took, results_tfsec))
process_tfsec(load_from_json(results_tfsec), results_tfsec,
os.path.basename(os.path.normpath(source_directory)), total_time)
def run_docker_kics(source_directory, force_docker_pull):
"""
Runs Docker KICS container.
Called by run_scan().
:param source_directory: The directory where the apps will perform their security assessment.
:param force_docker_pull: If Docker should always download an image or not.
:return: True
"""
if not args.skipKics:
log_and_print("info", "Running KICS on {}...".format(source_directory))
docker_pull_image(docker_image_kics, force_docker_pull)
start = time.time()
# KICS can save in different formats and the stdout output is different from JSON object
# Output will be saved with filename 'results.json' (if output-format is JSON)
# Because KICS is doing the output, I need to mount another volume for default results folder, otherwise
# there will be no output, although KICS outputs a message to sdtout saying the output was successful
# Ref: https://github.com/Checkmarx/kics/blob/master/docs/commands.md
# Used because the results volume was mounted
results_kics_output_dir = results_kics.rstrip("/").rsplit('/', 1)[0]
# results_kics_output_dir_file = "/results/" + results_kics_filename
dc = docker.from_env()
container_id = dc.api.create_container(
image=docker_image_kics,
command="scan -p \"/src\" --silent --no-color --report-formats {} --output-path \"/results\""
.format(output_format_kics),
volumes=['/src', '/results'],
host_config=dc.api.create_host_config(binds={
source_directory: {
'bind': '/src',
'mode': 'ro',
},
results_kics_output_dir: {
'bind': '/results',
'mode': 'rw',
}
})
)
dc.api.start(container_id)
dc.api.wait(container_id)
# Not necessary, results volume mounted and used in command
# docker_save_log(dc.api.logs(container_id).decode('utf-8'), results_kics)
end = time.time()
total_time = str(round(end - start, 2)) + " seconds"
str_it_took = ", it took " + total_time
log_and_print("info", "Docker ran KICS container in {}{} and the report was saved in {}."
.format(source_directory, str_it_took, results_kics.rsplit('/', 1)[0] + "/results.json"))
process_kics(load_from_json(results_kics), results_kics,
os.path.basename(os.path.normpath(source_directory)), total_time)
def run_docker_terrascan(source_directory, force_docker_pull):
"""
Runs Docker Terrascan container.
Called by run_scan().
:param source_directory: The directory where the apps will perform their security assessment.
:param force_docker_pull: If Docker should always download an image or not.
:return: True
"""
if not args.skipTerrascan:
log_and_print("info", "Running Terrascan on {}...".format(source_directory))
docker_pull_image(docker_image_terrascan, force_docker_pull)
start = time.time()
# Because I could not find a flag for output, I'm capturing the output from stdout to make a JSON file.
# Flag '-x console' (default) makes errors go into the console
# Flag '--log-level fatal' avoids logging at the console not to mess with JSON output
# Flag '-o' selects an output format
# Ref: https://runterrascan.io/docs/usage/command_line_mode/
dc = docker.from_env()
container_id = dc.api.create_container(
image=docker_image_terrascan,
command="scan -d /src -x console --use-colors f --log-level fatal -o {}"
.format(output_format_terrascan),
volumes=['/src'],
host_config=dc.api.create_host_config(binds={
source_directory: {
'bind': '/src',
'mode': 'ro',
}
})
)
dc.api.start(container_id)
dc.api.wait(container_id)
# Saves the output into a JSON file (there is no output flag in command)
docker_save_log(dc.api.logs(container_id).decode('utf-8'), results_terrascan)
end = time.time()
total_time = str(round(end - start, 2)) + " seconds"
str_it_took = ", it took " + total_time
log_and_print("info", "Docker ran Terrascan container in {}{} and the report was saved in {}."
.format(source_directory, str_it_took, results_terrascan))
process_terrascan(load_from_json(results_terrascan), results_terrascan,
os.path.basename(os.path.normpath(source_directory)), total_time)
def run_docker_trivy(source_directory, force_docker_pull):
"""
Runs Docker Trivy container.
Called by run_scan().
:param source_directory: The directory where the apps will perform their security assessment.
:param force_docker_pull: If Docker should always download an image or not.
:return: True
"""
if not args.skipTrivy:
log_and_print("info", "Running Trivy on {}...".format(source_directory))
docker_pull_image(docker_image_trivy, force_docker_pull)
start = time.time()
# Trivy can save in different formats and the stdout output is different from JSON object
# Output will be saved with filename 'results-trivy.json' (if output-format is JSON)
# Because Trivy is doing the output, I need to mount another volume for default results folder, otherwise
# there will be no output
# Ref: https://aquasecurity.github.io/trivy/v0.24.1/getting-started/cli/config/
# Used because the results volume was mounted
results_trivy_output_dir = results_trivy.rstrip("/").rsplit('/', 1)[0]
results_volume_filename = "/results/{}".format(results_trivy_filename)
results_trivy_output_dir_file = results_trivy_output_dir + "/" + results_trivy_filename
dc = docker.from_env()
container_id = dc.api.create_container(
image=docker_image_trivy,
command="-q config -f {} -o {} \"/src\"".format(output_format_trivy, results_volume_filename),
volumes=['/src', '/results'],
host_config=dc.api.create_host_config(binds={
source_directory: {
'bind': '/src',
'mode': 'ro',
},
results_trivy_output_dir: {
'bind': '/results',
'mode': 'rw',
}
})
)
dc.api.start(container_id)
dc.api.wait(container_id)
# Not necessary, results volume mounted and used in command
# docker_save_log(dc.api.logs(container_id).decode('utf-8'), results_trivy)
end = time.time()
total_time = str(round(end - start, 2)) + " seconds"
str_it_took = ", it took " + total_time
log_and_print("info", "Docker ran Trivy container in {}{} and the report was saved in {}."
.format(source_directory, str_it_took, results_trivy_output_dir_file))
process_trivy(load_from_json(results_trivy_output_dir_file), results_trivy_output_dir_file,
os.path.basename(os.path.normpath(source_directory)), total_time)
def run_scan():
"""
Decides how the app is going to run (Goats or local project), calls clone functions, calls Docker functions.
Called by main().
:return: True if cloning is successful; False if PermissionError exception raised
"""
global results_checkov, results_tfsec, results_kics, results_terrascan, results_trivy
args.verbose and print_version()
if args.forceDockerPull:
force_docker_pull = "always"
else:
force_docker_pull = "missing"
# If the user did not provide a source directory, we will check for a benchmark against the Goats or Locals
if not args.source:
# Goats or Locals?
if args.locals: # Domestic goats
json_iterable = get_goats(local_file)
else: # args.goats == True, Foreign Goats
json_iterable = get_goats(goats_file)
clone_goats()
log_and_print("info", "Starting IaC code scans...")
for target in json_iterable["targets"]:
local_folder = target["local_folder"]
source = str(dest_clones + "/" + local_folder).replace("//", "/")
results_dir = str(results_destination + "/" + local_folder).replace("//", "/")
# Create results directories for each Goat
try:
Path(results_dir).mkdir(parents=True, exist_ok=True)
except FileExistsError:
# Should not be called due to 'exist_ok=True'
log_and_print("debug", "Folder {} already exists.".format(results_dir))
except PermissionError:
dest_not_writable(results_dir)
# Declared outside; here for the 'for goat in goats' loop
results_checkov = \
str(results_destination + "/" + local_folder + "/" + results_checkov_filename).replace("//", "/")
results_tfsec = \
str(results_destination + "/" + local_folder + "/" + results_tfsec_filename).replace("//", "/")
results_kics = \
str(results_destination + "/" + local_folder + "/" + results_kics_filename).replace("//", "/")
results_terrascan = \
str(results_destination + "/" + local_folder + "/" + results_terrascan_filename).replace("//", "/")
results_trivy = \
str(results_destination + "/" + local_folder + "/" + results_trivy_filename).replace("//", "/")
# Run the tools
run_docker_checkov(source, force_docker_pull)
run_docker_tfsec(source, force_docker_pull)
run_docker_kics(source, force_docker_pull)
run_docker_terrascan(source, force_docker_pull)
run_docker_trivy(source, force_docker_pull)
log_and_print("info", "Finished scanning IaC code.")
else:
# The user provided a URL or a source directory.
if is_url(args.source):
# The user provided a URL.
local_folder = clone_repo(args.source)
source = str(dest_clones + "/" + local_folder).replace("//", "/")
# Create results directory for this assessment
# Declared outside; results_destination is the default folder for reports
results_local_folder = results_destination + "/" + local_folder
else:
# The user provided a source directory
os.path.isdir(args.source) or invalid_source(args.source)
# dest_clones is the default destination for Goat clones
local_folder = args.source # Here, the user is picking a full path
source = str(local_folder).replace("//", "/")
# Declared outside; results_destination is the default folder for reports
results_local_folder = results_destination + "/" + str(local_folder).rstrip("/").rsplit("/", 1)[1]
log_and_print("info", "Starting IaC code scans...")
try:
if not os.path.isdir(results_local_folder):
log_and_print("debug", "Creating folder {}.".format(results_local_folder))
Path(results_local_folder).mkdir(parents=True, exist_ok=True)
except FileExistsError:
# If the folder exists, we will use it
log_and_print("debug", "Folder {} already exists.".format(results_local_folder))
except PermissionError:
dest_not_writable(results_local_folder)
results_checkov = results_local_folder + "/" + results_checkov_filename
results_tfsec = results_local_folder + "/" + results_tfsec_filename
results_kics = results_local_folder + "/" + results_kics_filename
results_terrascan = results_local_folder + "/" + results_terrascan_filename
results_trivy = results_local_folder + "/" + results_trivy_filename
run_docker_checkov(source, force_docker_pull)
run_docker_tfsec(source, force_docker_pull)
run_docker_kics(source, force_docker_pull)
run_docker_terrascan(source, force_docker_pull)
run_docker_trivy(source, force_docker_pull)
log_and_print("info", "Finished scanning IaC code.")
def process_checkov(json_object, json_filename, source, total_time):
"""
Processes Checkov JSON output and collects summary data
:param json_object: JSON data loaded from file
:param json_filename: The name of the file that contained the data
:param source: The last directory in the path
:param total_time: The time it took to run Checkov
:return: True if JSON structure was readable (and parsed), False otherwise
"""
# If we failed to load the file as a JSON object (in function load_from_json()), stop
if not json_object:
return
global json_milk
# Now, does the file have the structure we're looking for?
try:
for item in json_object:
str_check = item["check_type"]
str_passed = item["summary"]["passed"]
str_failed = item["summary"]["failed"]
str_skipped = item["summary"]["skipped"]
str_version = item["summary"]["checkov_version"]
# Scan-specific data to be written
json_checkov_check = {
"passed": str_passed,
"failed": str_failed,
"skipped": str_skipped
}
# If we're here, we were able to collect the info for that check_type into 'data' variable
# Let's append it to the main 'stash' and look for other check_types
try:
json_milk[source]["checkov"][str_check] = {}
except KeyError:
try:
json_milk[source]["checkov"] = {
"json_file": json_filename,
"version": str_version,
"running_time": total_time
}
json_milk[source]["checkov"][str_check] = {}
except KeyError:
pass
finally:
json_milk[source]["checkov"][str_check].update(json_checkov_check)
except TypeError:
# It doesn't, but maybe Checkov doesn't have policies for that type of file
# So, we won't find 'check_type' there. Let's search for 'passed', 'failed', 'skipped'
try:
if json_object["passed"] == 0 and json_object["failed"] == 0 and json_object["skipped"] == 0:
json_checkov = {
"check_type": "none"
}
json_milk[source]["checkov"] = {}
json_milk[source]["checkov"]["none"] = {}
json_milk[source]["checkov"]["none"].update(json_checkov)
log_and_print("warn", "Warning: Checkov could not process this application.")
except (KeyError, TypeError):
# No, there is something wrong with this file
log_and_print("error", "Error: could not process this file: {}".format(json_filename))
def process_tfsec(json_object, json_filename, source, total_time):
"""
Processes tfsec JSON output and collects summary data
:param json_object: JSON data loaded from file
:param json_filename: The name of the file that contained the data
:param source: The last directory in the path
:param total_time: The time it took to run tfsec
:return: True if JSON structure was readable (and parsed), False otherwise
"""
# If we failed to load the file as a JSON object (in function load_from_json()), stop
if not json_object:
return
global json_milk
str_failed, str_high, str_medium, str_low = 0, 0, 0, 0
try:
for item in json_object["results"]:
str_failed += 1
if item["severity"] == "HIGH":
str_high += 1
elif item["severity"] == "MEDIUM":
str_medium += 1
elif item["severity"] == "LOW":
str_low += 1
json_tfsec = {
"json_file": json_filename,
"version": get_version_tfsec(),
"running_time": total_time,
"check_type": "terraform", # Hardcoded because tfsec scans Terraform code only
"passed": 0,
"skipped": 0,
"failed": str_failed,
"failed_by_severity": {
"high": str_high,
"medium": str_medium,
"low": str_low
}
}
json_milk[source]["tfsec"] = {}
json_milk[source]["tfsec"].update(json_tfsec)
except TypeError:
try:
if json_object["results"] == "null":
json_tfsec = {
"json_file": json_filename,
"version": get_version_tfsec(),
"running_time": total_time,
"check_type": "none",
"passed": 0,
"failed": 0,
"skipped": 0
}
json_milk[source]["tfsec"] = {}
json_milk[source]["tfsec"].update(json_tfsec)
log_and_print("warn", "Warning: tfsec did not process this application.")
except KeyError:
log_and_print("error", "Error: could not process this file: {}".format(json_filename))
def process_kics(json_object, json_filename, source, total_time):
"""
Processes KICS JSON output and collects summary data
:param json_object: JSON data loaded from file
:param json_filename: The name of the file that contained the data
:param source: The last directory in the path
:param total_time: The time it took to run KICS
:return: True if JSON structure was readable (and parsed), False otherwise
"""
# If we failed to load the file as a JSON object (in function load_from_json()), stop
if not json_object:
return
global json_milk
# Now, does the file have the structure we're looking for?
try:
# KICS flags INFO severity checks like "Variables are not snake case"
# To make comparison easier, I'm removing INFO and TRACE severity from the count
str_high = json_object["severity_counters"]["HIGH"]
str_medium = json_object["severity_counters"]["MEDIUM"]
str_low = json_object["severity_counters"]["LOW"]
str_failed = int(str_high) + int(str_medium) + int(str_low)
# I'm also excluding INFO from the total (passed), like it never happened
str_passed = json_object["queries_total"] - str_failed - json_object["severity_counters"]["INFO"]
str_skipped = json_object["queries_failed_to_execute"]
str_version = json_object["kics_version"]
# Data to be written
json_kics = {
"json_file": json_filename,
"version": str_version,
"running_time": total_time,
"skipped": str_skipped,
"passed": str_passed,
"failed": str_failed,
"failed_by_severity": {
"high": str_high,
"medium": str_medium,
"low": str_low
}
}
# If we're here, we were able to collect the info for that check_type into 'data' variable
# Let's append it to the main 'stash' and look for other check_types
json_milk[source]["kics"] = {}
json_milk[source]["kics"].update(json_kics)
except (KeyError, TypeError):
# It doesn't, but maybe KICS doesn't have policies for that type of file
# So, we won't find 'check_type' there. Let's search for 'passed', 'failed', 'skipped'
try:
if json_object["passed"] == 0 and json_object["failed"] == 0 and json_object["skipped"] == 0:
json_kics = {
"json_file": json_filename,
"check_type": "none"
}
json_milk[source]["kics"] = {}
json_milk[source]["kics"].update(json_kics)
log_and_print("warn", "Warning: KICS could not process this application.")
except (KeyError, TypeError):
# No, there is something wrong with this file
log_and_print("error", "Error: could not process this file: {}".format(json_filename))
def process_terrascan(json_object, json_filename, source, total_time):
"""
Processes Terrascan JSON output and collects summary data
:param json_object: JSON data loaded from file
:param json_filename: The name of the file that contained the data
:param source: The last directory in the path
:param total_time: The time it took to run Terrascan
:return: True if JSON structure was readable (and parsed), False otherwise
"""
# If we failed to load the file as a JSON object (in function load_from_json()), stop
if not json_object:
return
global json_milk
try:
str_skipped = json_object["results"]["skipped_violations"]
str_check = json_object["results"]["scan_summary"]["iac_type"]
str_passed = json_object["results"]["scan_summary"]["policies_validated"]
str_failed = json_object["results"]["scan_summary"]["violated_policies"]
str_low = json_object["results"]["scan_summary"]["low"]
str_medium = json_object["results"]["scan_summary"]["medium"]
str_high = json_object["results"]["scan_summary"]["high"]
json_terrascan = {
"json_file": json_filename,
"version": get_version_terrascan(),
"check_type": str_check,
"running_time": total_time,
"skipped": str_skipped,
"passed": str_passed,
"failed": str_failed,
"failed_by_severity": {
"high": str_high,
"medium": str_medium,
"low": str_low
}
}
json_milk[source]["terrascan"] = {}
json_milk[source]["terrascan"].update(json_terrascan)
except TypeError:
log_and_print("warn", "Warning: Terrascan could not process this application.")
except KeyError:
log_and_print("error", "Error: could not process this file: {}".format(json_filename))
def process_trivy(json_object, json_filename, source, total_time):
"""
Processes Trivy JSON output and collects summary data.
Tested with Trivy v0.24.1, which is very buggy.
---> Raised fatal errors with Terragoat and CfnGoat.
:param json_object: JSON data loaded from file
:param json_filename: The name of the file that contained the data
:param source: The last directory in the path
:param total_time: The time it took to run Trivy
:return: True if JSON structure was readable (and parsed), False otherwise
"""
# If we failed to load the file as a JSON object (in function load_from_json()), stop
if not json_object:
return
global json_milk
str_passed, str_failed, str_skipped, str_high, str_medium, str_low = 0, 0, 0, 0, 0, 0
check_type = []
try:
for assessed_file in json_object["Results"]:
check_type.append(assessed_file["Type"])
str_passed += assessed_file["MisconfSummary"]["Successes"]
str_failed += assessed_file["MisconfSummary"]["Failures"]
str_skipped += assessed_file["MisconfSummary"]["Exceptions"]
if assessed_file["MisconfSummary"]["Failures"] > 0:
for misconfiguration in assessed_file["Misconfigurations"]:
if misconfiguration["Severity"] == "HIGH":
str_high += 1
elif misconfiguration["Severity"] == "MEDIUM":
str_medium += 1
elif misconfiguration["Severity"] == "LOW":
str_low += 1
# Select unique values then sort
check_type = list(set(check_type))
check_type.sort()
json_trivy = {
"json_file": json_filename,
"version": get_version_trivy(),
"running_time": total_time,
"check_type": check_type,
"passed": str_passed,
"skipped": str_skipped,
"failed": str_failed,
"failed_by_severity": {
"high": str_high,
"medium": str_medium,
"low": str_low
}
}
json_milk[source]["trivy"] = {}
json_milk[source]["trivy"].update(json_trivy)
except TypeError:
try:
if json_object["results"] == "null":
json_trivy = {
"json_file": json_filename,
"version": get_version_trivy(),
"running_time": total_time,
"check_type": "none",
"passed": 0,
"failed": 0,
"skipped": 0
}
json_milk[source]["trivy"] = {}
json_milk[source]["trivy"].update(json_trivy)
log_and_print("warn", "Warning: Trivy did not process this application.")
except KeyError:
log_and_print("error", "Error: could not process this file: {}".format(json_filename))
def load_from_json(json_filename):
"""
Loads a text file into a JSON object.
Called by process_checkov(), process_tfsec(), process_kics(), process_terrascan() and process_trivy().
:param json_filename: JSON file to be processed
:return: JSON object to passed to other functions; or False if there was an exception
"""
try:
with open(json_filename, 'r', encoding='utf-8') as f:
try:
res = json.loads(f.read())
return res
except json.decoder.JSONDecodeError:
log_and_print("error", "Error: Could not decode {} as JSON data file.".format(json_filename))
return False
except FileNotFoundError:
log_and_print("error", "Error: Could not find file {}.".format(json_filename))
return False
def write_json_to_file(data, filename):
"""
Writes JSON dict to file.
Called by main().
:param data: JSON object
:param filename: Destination where data will be written
:return: True for success, otherwise exits with error code in case of exception
"""
# Clean up destination path
bad_chars = "\'\""
full_dest = results_destination + '/' + filename
for c in bad_chars:
full_dest = full_dest.replace(c, "")
full_dest = full_dest.replace("//", "/")
date_format = "%Y-%m-%d %H:%M:%S"
data["milking_the_goat"]["end_time"] = str(datetime.datetime.now().strftime(date_format))
dt_duration = datetime.datetime.strptime(data["milking_the_goat"]["end_time"], date_format) \
- datetime.datetime.strptime(data["milking_the_goat"]["start_time"], date_format)
data["milking_the_goat"]["duration_seconds"] = int(dt_duration.total_seconds())
data["milking_the_goat"]["extended_duration"] = display_time(int(dt_duration.total_seconds()))
try:
Path(results_destination).mkdir(parents=True, exist_ok=True)
with open(full_dest, 'w') as f:
json.dump(data, f, indent=4)
log_and_print("info", "Milking the Goat JSON saved to {}".format(full_dest))
except PermissionError:
dest_not_writable(full_dest)
def display_time(seconds, granularity=5):
"""
Converts seconds in string text with weeks, days, hours, etc.
:param seconds: The number of seconds we want to convert.
:param granularity: How far we should go in the intervals JSON.
:return: String
"""
# Modified from:
# https://stackoverflow.com/questions/4048651/python-function-to-convert-seconds-into-minutes-hours-and-days
result = []
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
for name, count in intervals:
value = seconds // count
if value:
if value >= 1:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append("{} {}".format(value, name))
return ', '.join(result[:granularity])
def create_json_structure():
"""
Create a basic JSON object.
Called by main().
:return: True if main JSON object is successfully updated; otherwise, exits with error code
"""
if bool_command_line_args:
str_command_line_args = str(args)
else:
str_command_line_args = None
try:
dc = docker.from_env()
if bool_docker_version:
str_docker_version = dc.version()
else:
str_docker_version = None
except docker.errors.APIError as e:
log_and_print("error", "Error: Could not read version from Docker client.")
str_docker_version = None
finally:
# JSON result file, schema version 1
data = {
"milking_the_goat": {
"script_version": __version__,
"json_schema_version": "1",
"url": "https://github.com/mmartins000/milkingthegoat",
"command_line_args": str_command_line_args,
"docker_version": str_docker_version,
"start_time": str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
"end_time": "",
"duration": ""
}
}
try:
if not args.source:
# Milking the well-known Goats from Github
# Goats or Locals?
if args.locals: # Domestic goats
json_iterable = get_goats(local_file)
else: # args.goats == True, Foreign goats
json_iterable = get_goats(goats_file)
for target in json_iterable["targets"]:
json_goat = {
target["local_folder"]: {
"git_http_url": target["source"]
}
}
data.update(json_goat)
else:
# Milking a local Goat (local project)
json_local = {
str(os.path.basename(os.path.normpath(args.source)).replace(".git", "")): {
"source": args.source
}
}
data.update(json_local)
json_milk.update(data)
except (TypeError, KeyError, IndexError):
log_and_print("error", "Error: Could not read Goats from config file.")
exit(EXIT_JSON_FILE)
def get_version_trivy():
"""
Runs Docker to get Trivy version. Trivy JSON output doesn't contain the executable version.
Called by process_trivy().
:return: String with version
"""
# Trivy outputs version in stdout as: "Version: vn.n.n"
if not args.skipTrivy:
return get_version(container_image=docker_image_trivy, command_to_run='--version').split(" ")[1]
def get_version_tfsec():
"""
Runs Docker to get tfsec version. tfsec JSON output doesn't contain the executable version.
Called by process_tfsec().
:return: String with version
"""
# tfsec outputs version in stdout as: "vn.n.n"
if not args.skipTfsec:
return get_version(container_image=docker_image_tfsec, command_to_run='--version')
def get_version_terrascan():
"""
Runs Docker to get Terrascan version. Terrascan JSON output doesn't contain the executable version.
Called by process_terrascan().
:return: String with version
"""
# Terrascan outputs version in stdout as: "version: vn.n.n"
if not args.skipTerrascan:
return get_version(container_image=docker_image_terrascan, command_to_run='version').split(" ")[1]
def get_version(container_image, command_to_run):
"""
Runs Docker to get container image version.
Called by get_version_tfsec(), get_version_terrascan() and get_version_trivy().
:return: String containing Container version or "None"
"""
docker_pull_image(container_image, args.forceDockerPull)
dc = docker.from_env()
try:
ctn = dc.containers.run(image=container_image,
command=command_to_run,
remove=True, tty=False, detach=False)
except docker.errors.ContainerError:
container_version = "None"
else:
container_version = ctn.decode("utf-8").replace('\n', '')
return container_version
def docker_pull_image(image_name, force_docker_pull):
"""
Pulls the image before described in config file
docker.containers.run() and docker.api.create_container() don't include a --pull flag
:param image_name: Container image to be downloaded, format: 'repo/image:tag'
:param force_docker_pull: If Docker should always download an image or not.
:return: True if succeeded, False if exception raised
"""
dc = docker.from_env()
if force_docker_pull == "always":
# We will download if --pull "always"
log_and_print("info", "Downloading Docker image for {}.".format(image_name))
try:
dc.api.pull(image_name)
except docker.errors.APIError as e:
log_and_print("error", "{}".format(e.args))
return False
else:
log_and_print("info", "Docker just downloaded image {}.".format(image_name))
else:
# We will check if we have the image because --pull "missing"
found_image = False
for image in dc.images.list(all=True):
if image.attrs['RepoDigests'][0].split("@")[0] == image_name.split(":")[0]:
found_image = True
log_and_print("debug", "Docker image {} found and will not be downloaded.".format(image_name))
break
if not found_image:
# We don't have the image and --pull "missing"
try:
dc.api.pull(image_name)
except docker.errors.APIError as e:
log_and_print("error", "{}".format(e.args))
return False
else:
log_and_print("info", "Docker just downloaded image {}.".format(image_name))
def prune_images():
"""
Prunes untagged images if they are described in config file
:return: True
"""
dc = docker.from_env()
for image in dc.images.list(all=True, filters={'dangling': True}):
for key, value in config["images"].items():
if image.attrs['RepoDigests'][0].split("@")[0] in value:
try:
dc.images.remove(image.id)
except docker.errors.ContainerError as e:
log_and_print("error", "{}".format(e.stderr))
except docker.errors.ImageNotFound as e:
log_and_print("error", "{}".format(e.args))
else:
# images.remove() does not return anything
log_and_print("debug", "Removed {}, image.short_id={}"
.format(image.attrs['Config']['Image'], image.short_id))
def prune_containers():
"""
Prunes containers if they were run using Docker module.
:return: True
"""
filters = []
for scanner in config["scanners"]:
filters.append({'status': 'exited', 'ancestor': config["scanners"][scanner]["image"].split(":")[0]})
dc = docker.from_env()
for filter_dict in filters:
for container in dc.containers.list(all=True, filters=filter_dict):
try:
container.remove()
except docker.errors.ContainerError as e:
log_and_print("error", "{}".format(e.stderr))
else:
# container.remove() does not return anything
log_and_print("debug", "Removed {}, container.short_id={}"
.format(container.attrs['Config']['Image'], container.short_id))
def clean_up():
"""
Removes Git cloned repos and Docker images.
Called by main().
:return: True in case of success; exits with error code if failed to remove Goats
"""
import shutil
if not args.cleanup and not args.onlyCleanup:
return
log_and_print("info", "Starting clean up...")
dc = docker.from_env()
# Remove Docker images: $ docker rmi [image]
# There shall be containers to be pruned because I can't use flag remove=True using create_container() function
for scanner in config["scanners"]:
try:
dc.images.remove(config["scanners"][scanner]["image"].split(":")[0])
except docker.errors.ImageNotFound as e:
# The image should be there.
log_and_print("error", "{}".format(e.args))
except docker.errors.ContainerError:
log_and_print("error", "Error: Could not remove Docker image {}.".format(
config["scanners"][scanner]["image"].split(":")[0]
))
# Remove goat clones (only dest_clones/results folder will remain):
if not args.source:
json_goats = get_goats(goats_file)
for target in json_goats["targets"]:
# This will remove directories; the reason for having running_as_root()
target_dir = dest_clones + "/" + target["local_folder"]
if os.path.isdir(target_dir):
# We know the directory is there
try:
shutil.rmtree(target_dir)
except PermissionError:
dest_not_writable(target_dir)
else:
log_and_print("debug", "Directory {} was removed.".format(target_dir))
# Do not send this message to log
args.verbose and print("Done.\n")
return True # Do not remove
def get_goats(goats):
"""
Reads goats file specified in milking_goat.json (default: goats.json) into a JSON object
Called by clone_goats(), run_scan(), create_json_structure() and clean_up()
:param goats: Filename
:return: True if successfully loaded the file; False in case of exception
"""
try:
return load_from_json(goats)
except (FileNotFoundError, PermissionError):
return False
def log_and_print(msg_loglevel, message):
"""
Logs a message to stdout and/ot file
:param msg_loglevel: Log level defined for that message
:param message: The message itself
:return: True
"""
# Log levels to output to stdout and to logfile
switch = {
"debug": 0,
"info": 1,
"warn": 2,
"error": 3,
"critical": 4
}
# Print to stdout: if message loglevel is not debug
switch.get(msg_loglevel) > 0 and verbose_mode and print(message)
# Print to log: if message loglevel is in the range configured to be reported:
if switch.get(msg_loglevel) >= switch.get(log_level, 1):
logging_enabled and write_to_log(msg_level=msg_loglevel, log_dest=log_output, message_to_log=message)
def write_to_log(msg_level, log_dest, message_to_log):
"""
Writes JSON dict to file.
Called by main().
:param msg_level: Log level before the message
:param log_dest: Destination where data will be written
:param message_to_log: Text to be logged
:return: True for success, otherwise exits with error code in case of exception
"""
log_datetime = str(datetime.datetime.now().strftime(log_datetime_format))
try:
# Creates recursive path if it doesn't exist (should have been created by start_logging()
Path(results_destination).mkdir(parents=True, exist_ok=True)
with open(log_dest, 'a') as f:
# If a message_to_log comes with a '\n' at the end
message_to_log = message_to_log.strip('\n')
if not logging_as_json:
full_msg = str(log_datetime + log_sep + msg_level + log_sep + message_to_log + '\n')
f.write(full_msg)
else:
full_msg = {
"datetime": str(log_datetime),
"level": str(msg_level),
"message": str(message_to_log)
}
f.write(json.dumps(full_msg) + "\n")
except PermissionError:
dest_not_writable(log_dest)
def prepare_signal_stats(json_object):
"""
Reads vulnerability numbers from statistics to format a JSON that will be used to choose exit signals
:param json_object: JSON containing all reports
:return: JSON dict with number of vulnerabilities by severity
"""
int_high, int_medium, int_low = 0, 0, 0
for target in json_object:
if target == "milking_the_goat":
continue
try:
for scanner in json_object[target]:
mylist = [x for x in config["scanners"]]
if scanner not in mylist:
continue
try:
int_high += int(json_object[target][scanner]["failed_by_severity"]["high"])
int_medium += int(json_object[target][scanner]["failed_by_severity"]["medium"])
int_low += int(json_object[target][scanner]["failed_by_severity"]["low"])
except (KeyError or TypeError):
# Could not find "failed_by_severity" key
pass
except (KeyError or TypeError):
pass
signal_stats = {
"high": int_high,
"medium": int_medium,
"low": int_low
}
return signal_stats
def choose_exit_signal(json_object):
"""
Chooses the exit signal when used in CI.
Makes more sense when only one image is assessed in each run, because many images may be assessed but
only one signal will be returned.
:param json_object: JSON object containing the summary for each target image
:return: exit signal, depending on config and vulnerabilities found
"""
int_high = json_object["high"]
int_medium = json_object["medium"]
int_low = json_object["low"]
log_and_print("debug", "Found: {} high, {} medium and {} low vulnerabilities."
.format(int_high, int_medium, int_low))
if bool_fail_on_high and (int_high > 0):
log_and_print("debug", "Exiting with signal EXIT_FAIL_HIGH: {}".format(EXIT_FAIL_HIGH))
sys.exit(EXIT_FAIL_HIGH)
elif bool_fail_on_medium and (int_high > 0 or int_medium > 0):
log_and_print("debug", "Exiting with signal EXIT_FAIL_MEDIUM: {}".format(EXIT_FAIL_MEDIUM))
sys.exit(EXIT_FAIL_MEDIUM)
elif bool_fail_on_low and (int_high > 0 or int_medium > 0 or int_low > 0):
log_and_print("debug", "Exiting with signal EXIT_FAIL_LOW: {}".format(EXIT_FAIL_LOW))
sys.exit(EXIT_FAIL_LOW)
log_and_print("debug", "Exiting with signal EXIT_OK: {}".format(EXIT_OK))
sys.exit(EXIT_OK)
def start_logging(log_dest):
"""
Removes and recreates logfile, according to config
:param log_dest: Full path to log file
:return: True if successful, otherwise exits with error signal
"""
try:
if logging_overwrite_file_if_exists:
os.remove(log_dest)
Path(log_dest).touch(exist_ok=True)
except FileNotFoundError:
Path(results_destination).mkdir(parents=True, exist_ok=True)
Path(log_dest).touch(exist_ok=True)
except PermissionError:
dest_not_writable(log_dest)
def main():
start_logging(log_dest=log_output)
sanity_checks()
create_json_structure()
run_scan()
write_json_to_file(json_milk, results_filename)
prune_containers()
prune_images()
clean_up()
choose_exit_signal(prepare_signal_stats(json_milk))
# Main scope: Argument Parser
parser = argparse.ArgumentParser()
target_group = parser.add_mutually_exclusive_group()
target_group.add_argument("-s", "--source", help="Run against a source directory", dest='source')
target_group.add_argument("-g", "--goats", help="Run against the Goat pack (default)", dest='goats',
action='store_true', default=True)
target_group.add_argument("-l", "--locals", help="Run against a list of local projects", dest='locals',
action='store_true')
parser.add_argument("--version", help="Print current version and exit", dest='version', action='store_true')
parser.add_argument("-f", "--config", help="Config file", dest='config', default="milking_goat.json")
parser.add_argument("-k", "--skip-kics", help="Skip KICS execution", dest='skipKics', action='store_true')
parser.add_argument("-c", "--skip-checkov", help="Skip Checkov execution", dest='skipCheckov', action='store_true')
parser.add_argument("-t", "--skip-tfsec", help="Skip tfsec execution", dest='skipTfsec', action='store_true')
parser.add_argument("-e", "--skip-terrascan", help="Skip Terrascan execution", dest='skipTerrascan',
action='store_true')
parser.add_argument("-y", "--skip-trivy", help="Skip Trivy execution", dest='skipTrivy', action='store_true')
parser.add_argument("--force-docker-pull", help="Make Docker pull the image on every run",
dest='forceDockerPull', action='store_true')
parser.add_argument("-v", "--verbose", help="Verbose mode", dest='verbose', action='store_true')
parser.add_argument("-o", "--output", help="Override output_folder parameter in config file", dest='output')
parser.add_argument("-i", "--ignore-root", help="Ignore being executed as root", dest='ignoreRoot', action='store_true')
parser.add_argument("-x", "--cleanup", help="Enable clean up after execution", dest='cleanup', action='store_true')
parser.add_argument("--only-cleanup", help="Execute a clean up and exit", dest='onlyCleanup', action='store_true')
args = parser.parse_args()
# Main scope: config file
try:
config = load_from_json(args.config)
# Scanners, Image
docker_image_checkov = config["scanners"]["checkov"]["image"]
docker_image_tfsec = config["scanners"]["tfsec"]["image"]
docker_image_kics = config["scanners"]["kics"]["image"]
docker_image_terrascan = config["scanners"]["terrascan"]["image"]
docker_image_trivy = config["scanners"]["trivy"]["image"]
# Scanners, Output format
output_format_checkov = config["scanners"]["checkov"]["output_format"]
output_format_tfsec = config["scanners"]["tfsec"]["output_format"]
output_format_kics = config["scanners"]["kics"]["output_format"]
output_format_terrascan = config["scanners"]["terrascan"]["output_format"]
output_format_trivy = config["scanners"]["trivy"]["output_format"]
# Scanners, Output filename
results_checkov_filename = config["scanners"]["checkov"]["output_filename"]
results_tfsec_filename = config["scanners"]["tfsec"]["output_filename"]
results_kics_filename = config["scanners"]["kics"]["output_filename"]
results_terrascan_filename = config["scanners"]["terrascan"]["output_filename"]
results_trivy_filename = config["scanners"]["trivy"]["output_filename"]
# Input, Sources
goats_file = config["input"]["goats_source"]
local_file = config["input"]["local_source"]
# Output, Destinations
dest_clones = config["output"]["clones_destination"]
results_destination = config["output"]["results_destination"]
goats_destination = config["output"]["goats_destination"]
results_filename = config["output"]["results_filename"]
bool_command_line_args = config["output"]["command_line_args"]
bool_docker_version = config["output"]["docker_version"]
# Logging
verbose_mode = config["logging"]["verbose_stdout"]
logging_enabled = config["logging"]["logging_enabled"]
logging_as_json = config["logging"]["logging_as_json"]
logging_overwrite_file_if_exists = config["logging"]["logging_overwrite_file_if_exists"]
log_level = config["logging"]["logging_level"]
log_output = config["logging"]["logging_file"]
log_sep = config["logging"]["logging_separator"]
log_datetime_format = config["logging"]["logging_datetime_format"]
# Settings
bool_ignore_running_as_root = config["settings"]["ignore_running_as_root"]
# CI, fail on findings
bool_fail_on_high = config["ci"]["fail_on_findings"]["fail_on_high"]
bool_fail_on_medium = config["ci"]["fail_on_findings"]["fail_on_medium"]
bool_fail_on_low = config["ci"]["fail_on_findings"]["fail_on_low"]
# CI, exit signals
EXIT_OK = int(config["ci"]["exit_signals"]["exit_ok"])
EXIT_FAIL_LOW = int(config["ci"]["exit_signals"]["exit_fail_low"])
EXIT_FAIL_MEDIUM = int(config["ci"]["exit_signals"]["exit_fail_medium"])
EXIT_FAIL_HIGH = int(config["ci"]["exit_signals"]["exit_fail_high"])
# About output formats:
# https://github.com/bridgecrewio/checkov/blob/master/docs/2.Basics/Reviewing%20Scan%20Results.md
# https://aquasecurity.github.io/tfsec/v1.0.11/getting-started/usage/
# https://github.com/Checkmarx/kics/blob/master/docs/results.md
# https://runterrascan.io/docs/usage/command_line_mode/#configuring-the-output-format-for-a-scan
# Variables declared in main scope
results_checkov = ""
results_tfsec = ""
results_kics = ""
results_terrascan = ""
results_trivy = ""
# Overrides sinker_output_folder from config file
if args.output:
results_destination = args.output
# Overrides ignore_running_as_root from config file
if args.ignoreRoot:
bool_ignore_running_as_root = True
# Overrides verbose_mode from config file
if args.verbose:
verbose_mode = True
# Main JSON for the app
json_milk = {}
except (PermissionError, FileNotFoundError):
missing_config(args.config)
else:
# clean_up() depends on config[] and config section above depends on ArgParser (defined before)
args.version and print_version() and sys.exit(EXIT_OK)
args.onlyCleanup and clean_up() and sys.exit(EXIT_OK)
if __name__ == "__main__":
main()
| mmartins000/milkingthegoat | milking_goat.py | milking_goat.py | py | 59,803 | python | en | code | 0 | github-code | 13 |
33565502718 | import re, urllib, urllib2
arguments = ["self", "info", "args"]
helpstring = "wra <math stuff>"
minlevel = 1
def main(connection, info, args) :
encoded = urllib.urlencode({"i":" ".join(args[1:])})
request = urllib2.Request("http://www.wolframalpha.com/input/", encoded)
request.add_header('User-Agent', 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.2.15 Version/10.00')
html = urllib2.urlopen(request)
html2 = html.read()
html.close()
html = html2
x = re.findall('<hr class="top" /><h2>(.*)</h2>.*<div class="output pnt".*alt="(.*)" title=".*"\s*/>', html)
send = []
for y in x :
if ">" in y[0] or ">" in y[0] or ">" in y[1] or "<" in y[1] or "\\n" in y[0] or "\\n" in y[1] :
pass
else :
send.append("%s %s" % (y[0], y[1]))
if send != [] :
connection.msg(info["channel"], "\n".join(send))
else : connection.msg(info["channel"], _("No (parseable) results found."))
| sonicrules1234/sonicbot | oldplugins/wra.py | wra.py | py | 965 | python | en | code | 10 | github-code | 13 |
36569740341 | numeros = [1,2,3,4,5,6]
saida = [0,0,0,0,0,0]
total = 1
anterior = 0
for i in range(len(numeros)):
anterior = total
total = total * numeros[i]
saida[i] = anterior
total = 1
anterior = 0
for i in range(len(numeros)-1,-1,-1):
anterior = total
total = total * numeros[i]
saida[i] = saida[i] * anterior
for n in saida:
print(n) | ltakuno/arquivos | python/URI/produtorio.py | produtorio.py | py | 357 | python | en | code | 0 | github-code | 13 |
12269757258 | import sys
sys.stdin = open("Ladder1_input.txt", "r")
for testcase in range(1, 11):
input()
matrix = [[] for i in range(100)]
for i in range(100):
matrix[i] = input().split()
end = 0
for i in range(100):
if matrix[99][i] == '2':
end = i
break
now_x , now_y = end, 99
way = 0 # up:0 , left:-1, right:1
while now_y != 0:
if now_x != 0 and way != 1 and matrix[now_y][now_x - 1] == '1':
way = -1
now_x -= 1
elif now_x != 99 and way != -1 and matrix[now_y][now_x + 1] == '1':
way = 1
now_x += 1
else:
way = 0
now_y -= 1
print(f"#{testcase} {now_x}") | ksinuk/python_open | my_pychram/7 stack/Ladder1.py | Ladder1.py | py | 721 | python | en | code | 0 | github-code | 13 |
25207618914 | import os, pytest, yaml
import blazon
from blazon import Schematic, json
def test_from_file():
class Swagger(Schematic):
__schema__ = json.from_file(
os.path.join(os.path.dirname(__file__), "schemas", "swagger.yaml"), name="Swagger"
)
s = Swagger()
assert not s.validate()
assert Swagger.__schema__({"openapi": "3.0.0"}, partial=True)
s.info = {"title": "Swagger Petstore", "version": "0.0.1"}
s.openapi = "3.0.0"
s.paths = []
assert s.validate()
petstore_path = os.path.join(os.path.dirname(__file__), "data", "petstore.yaml")
with open(petstore_path) as o:
data = yaml.safe_load(o)
petstore = Swagger(**data)
assert petstore.info["title"] == "Swagger Petstore"
assert petstore.validate()
old_value = petstore.get_value()
class Info(Schematic):
__schema__ = json.schemas["Info"]
petstore.info = Info(title="Swagger Petstore", version="0.0.1")
# Still validates
assert petstore.validate()
# .info is now an Info object
assert isinstance(petstore.info, Info)
# Now we access it with dots because info isn't a dict, it's an Info Schematic
assert petstore.info.version == "0.0.1"
# Still the same 'value' as before
assert old_value == petstore.get_value()
# We can assign it as a dict, shows up as Info object
petstore.info = {"title": "Swagger Petstore", "version": "0.0.1"}
assert old_value == petstore.get_value()
assert isinstance(petstore.info, Info)
| DeadWisdom/blazon | tests/test_swagger.py | test_swagger.py | py | 1,527 | python | en | code | 6 | github-code | 13 |
20424029473 | from vedo import Mesh
from math import fabs
import numpy as np
import Sofa.SofaBaseTopology
def find_fixed_box(source_file, scale):
"""
Find the fixed box of the model.
:param str source_file: Mesh file
:param float scale: Scale to apply
:return: Min and max corners of the fixed box.
"""
# Get the bounding box
b_min, b_max, _ = define_bbox(source_file, 0, scale)
# Fix along the largest dimension
sizes = b_max - b_min
dim = sizes.tolist().index(sizes.max(0))
# Fix the bottom of the Armadillo
b_min[dim] -= 0.05 * sizes[dim]
b_max[dim] = b_min[dim] + 0.1 * sizes[dim]
return b_min.tolist() + b_max.tolist()
def find_extremities(source_file, scale):
"""
Find the different extremities of the model.
:param str source_file: Mesh file
:param float scale: Scale to apply
:return: Key points of the mesh.
"""
# Get the coordinates of the mesh
mesh = Mesh(source_file).scale(scale)
coords = mesh.points().copy()
# Get the size of the bounding box
b_min, b_max, _ = define_bbox(source_file, 0, scale)
sizes = b_max - b_min
# Find the tail
tail = coords[coords[:, 2].argmax()].tolist()
# Find the hands
right = coords[coords[:, 0] >= sizes[0] / 3]
left = coords[coords[:, 0] <= -sizes[0] / 3]
r_hand = right[right[:, 2].argmin()].tolist()
l_hand = left[left[:, 2].argmin()].tolist()
# Find the ears
right = coords[coords[:, 0] >= 0]
left = coords[coords[:, 0] <= 0]
r_ear = right[right[:, 1].argmax()].tolist()
l_ear = left[left[:, 1].argmax()].tolist()
# Find the muzzle
middle = coords[coords[:, 0] >= -sizes[0] / 3]
middle = middle[middle[:, 0] <= sizes[0] / 3]
muzzle = middle[middle[:, 2].argmin()].tolist()
return [tail, r_hand, l_hand, r_ear, l_ear, muzzle]
def define_bbox(source_file, margin_scale, scale):
"""
Find the bounding box of the model.
:param str source_file: Mesh file
:param float scale: Scale to apply
:param float margin_scale: Margin in percents of the bounding box
:return: List of coordinates defined by xmin, ymin, zmin, xmax, ymax, zmax
"""
# Find min and max corners of the bounding box
mesh = Mesh(source_file).scale(scale)
bbox_min = mesh.points().min(0)
bbox_max = mesh.points().max(0)
# Apply a margin scale to the bounding box
bbox_min -= margin_scale * (bbox_max - bbox_min)
bbox_max += margin_scale * (bbox_max - bbox_min)
return bbox_min, bbox_max, bbox_min.tolist() + bbox_max.tolist()
def compute_grid_resolution(max_bbox, min_bbox, cell_size, print_log=False):
"""
Compute the grid resolution from the desired cell size and the grid dimensions.
:param list max_bbox: Max upper corner of the grid
:param list min_bbox: Min lower corner of the grid
:param float cell_size: Desired cell size
:param bool print_log: Print info
:return: List of grid resolution for each dimension
"""
# Absolute size values along 3 dimensions
sx = fabs(max_bbox[0] - min_bbox[0])
sy = fabs(max_bbox[1] - min_bbox[1])
sz = fabs(max_bbox[2] - min_bbox[2])
# Compute number of nodes in the grid
cell_size = cell_size * min(sx, sy, sz) # Cells need to be hexahedron
nx = int(sx / cell_size)
ny = int(sy / cell_size)
nz = int(sz / cell_size)
# Print grid infos
if print_log:
print(f"Cell size = {cell_size}x{cell_size}x{cell_size}")
print(f"Nx = {nx}, Ny = {ny}, Nz = {nz}")
number_of_nodes = (nx + 1) * (ny + 1) * (nz + 1)
print(f"Number of nodes in regular grid = {number_of_nodes}")
return [nx + 1, ny + 1, nz + 1]
def get_nb_nodes(source_file):
"""
Get the number of nodes of a mesh.
:param str source_file: Mesh file
:return: Number of nodes in the mesh
"""
return Mesh(source_file).N()
def get_object_max_size(source_file, scale):
"""
Get the max size of the object along x, y, z axis.
:param str source_file: Mesh file
:param float scale: Scale to apply to the mesh
:return: Max size of the object
"""
b = Mesh(source_file).scale(scale).bounds()
return max(b[1]-b[0], b[3]-b[2], b[5]-b[4])
def from_sparse_to_regular_grid(nb_nodes_regular_grid, sparse_grid, sparse_grid_mo):
"""
Map the indices of nodes in the sparse grid with the indices of nodes in the regular grid.
:param int nb_nodes_regular_grid: Total number of nodes in the regular grid
:param sparse_grid: SparseGridTopology containing the sparse grid topology
:param sparse_grid_mo: MechanicalObject containing the positions of the nodes in the sparse grid
:return: Mapped indices from sparse to regular grids, Mapped indices from regular to sparse regular grid,
Rest shape positions of the regular grid
"""
# Initialize mapping between sparse grid and regular grid
positions_sparse_grid = sparse_grid_mo.position.array()
indices_sparse_to_regular = np.zeros(positions_sparse_grid.shape[0], dtype=np.int32)
indices_regular_to_sparse = np.full(nb_nodes_regular_grid, -1, dtype=np.int32)
# Map the indices of each node iteratively
for i in range(positions_sparse_grid.shape[0]):
# In Sofa, a SparseGrid in computed from a RegularGrid, just use the dedicated method to retrieve their link
idx = sparse_grid.getRegularGridNodeIndex(i)
indices_sparse_to_regular[i] = idx # Node i in SparseGrid corresponds to node idx in RegularGrid
indices_regular_to_sparse[idx] = i # Node idx in RegularGrid corresponds to node i in SparseGrid
# Recover rest shape positions of sparse grid nodes in the regular grid
regular_grid_rest_shape_positions = np.zeros((nb_nodes_regular_grid, 3), dtype=np.double)
regular_grid_rest_shape_positions[indices_sparse_to_regular] = sparse_grid_mo.rest_position.array()
return indices_sparse_to_regular, indices_regular_to_sparse, regular_grid_rest_shape_positions
| mimesis-inria/DeepPhysX.Sofa | examples/demos/Armadillo/UNet/Environment/utils.py | utils.py | py | 6,015 | python | en | code | 3 | github-code | 13 |
2222927371 | """
practice_sequences.py
Get more practice with sequences
Snehitha Mamidi
February 17, 2020
"""
class Practice(object):
"""
Illustrate methods that transform input sequences
into something else
"""
def months_and_days(self, month_names, month_days):
"""
Create a string with info from the month_names and month_days lists
month_names: list of words naming the 12 months of the year,
in lower case. Example: ['january', 'february', ...]
month_days: list of numbers corresponding to the number of days in
each month, from January to December, and February having 28 days
Example: [31, 28, 31, 30, 31, ...]
Returns: list of of words, where each word has the following:
First three letters of a month name, with first letter capitalized
Followed by '-' and then the number of days in that month
Example: Calling the function will produce the following list
['Jan-31', 'Feb-28', 'Mar-31', ... ]
"""
three_letters_of_month_name=[]
for month in month_names:
three_letters_of_month_name=three_letters_of_month_name+[month[0:3]]
first_letter_capital=[]
for month_name in three_letters_of_month_name:
first_letter_capital=first_letter_capital+[month_name.capitalize()]
x=[]
for month_name in first_letter_capital:
x=x+[month_name+"-"]
expected_result=[]
k=range(len(month_days))
for i in k:
result=(x[i])+str(month_days[i])
expected_result=expected_result+[result]
return expected_result
if __name__ == '__main__':
p = Practice()
input1 = ['january', 'february']
input2 = [31, 28]
result = p.months_and_days(input1, input2)
print(f'months_and_days({input1}, {input2}) returns {result}')
p = Practice()
input1 = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']
input2 = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
result = p.months_and_days(input1, input2)
print(f'months_and_days({input1}, {input2}) returns {result}')
| Snehitha98/COMP525-lab3 | problems/practice_sequences.py | practice_sequences.py | py | 2,230 | python | en | code | 0 | github-code | 13 |
2274771630 |
import attr
from ndk.definitions import contactgroup
@attr.s
class ContactGroup(contactgroup.ContactGroupDirective):
alias = attr.ib(type=str,
converter=str,
validator=attr.validators.instance_of(str),
kw_only=True)
@alias.default
def _set_alias_as_contactgroup_name(self):
return self.contactgroup_name
| VunkLai/ndk | ndk/objects/contactgroup.py | contactgroup.py | py | 388 | python | en | code | 0 | github-code | 13 |
31258069222 | import torch
import torch.nn as nn
from layers import *
class GNN_JK(nn.Module):
""" GNN with JK design as a node classification model """
def __init__(self, dim_feats, dim_h, n_classes, n_layers, activation, dropout, gnnlayer_type='gcn'):
super(GNN_JK, self).__init__()
heads = [1] * (n_layers + 1)
if gnnlayer_type == 'gcn':
gnnlayer = GCNLayer
elif gnnlayer_type == 'gsage':
gnnlayer = SAGELayer
elif gnnlayer_type == 'gat':
gnnlayer = GATLayer
heads = [8] * n_layers + [1]
dim_h = int(dim_h / 8)
activation = F.elu
self.layers = nn.ModuleList()
# input layer
self.layers.append(gnnlayer(dim_feats, dim_h, heads[0], activation, 0))
# hidden layers
for i in range(n_layers - 1):
self.layers.append(gnnlayer(dim_h*heads[i], dim_h, heads[i+1], activation, dropout))
# output layer
self.layer_output = nn.Linear(dim_h*n_layers*heads[-2], n_classes)
def forward(self, adj, features):
h = features
hs = []
for layer in self.layers:
h = layer(adj, h)
hs.append(h)
# JK-concat design
h = torch.cat(hs, 1)
h = self.layer_output(h)
return h
| worldinmyfist/Graph_Augmentation | GAugO/model/GNN_JK.py | GNN_JK.py | py | 1,308 | python | en | code | 0 | github-code | 13 |
25811066704 | """
TCP NULL, FIN, and Xmas scans
nmap flags: -sN; -sF; -sX
"""
import socket
from impacket import ImpactPacket, ImpactDecoder
from impacket.ImpactPacket import TCP
src = '10.0.2.15'
dst = '10.0.2.4'
sport = 12345 # Random source port
dport = 81 # Port that we want to probe
# Create a new IP packet and set its source and destination addresses.
# Construct the IP Packet
ip = ImpactPacket.IP()
ip.set_ip_src(src)
ip.set_ip_dst(dst)
# Construct the TCP Segment
tcp = ImpactPacket.TCP()
tcp.set_th_sport(sport) # Set the source port in TCP header
tcp.set_th_dport(dport) # Set the destination in TCP header
tcp.auto_checksum = 1
# NULL
#######
# FIN
# tcp.set_FIN()
# Light up the Xmas tree
tcp.set_URG()
tcp.set_PSH()
tcp.set_FIN()
# Put the TCP Segment into the IP Packet
ip.contains(tcp)
# Create a Raw Socket to send the above constructed Packet
# socket(<domain>, <type>, <protocol>)
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, 6) # protocol value can also be fetched like this: socket.getprotobyname('tcp')
s.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# calls sendto() syscall
s.sendto(ip.get_packet(), (dst, 0))
s.settimeout(3)
# UNCOMMENT BELOW LINE IF src AND dst ARE 127.0.0.1
# packet = s.recvfrom(4096)[0] # This will show the packet sent from sender to receiver.
try:
'''
If the port is open and if SYN, RST or ACK bits are not set,
then the packet will be dropped. Hence, if we don't set SYN, RST or ACK bits
and don't receive any response, then port is open. (Or it can be filtered as well)
If port is closed, then we'll receive RST/ACK.
'''
packet = s.recvfrom(4096)[0] # This is the packet we're interested in. Receiver to sender packet
except socket.timeout:
print('%d is open|filtered' % dport)
exit(0)
# Decode the received Packet
res_ip = ImpactDecoder.IPDecoder().decode(packet)
res_tcp: TCP = res_ip.child() # Get the response TCP Segment from the IP Packet
print("Pretty print the IP Packet:")
print(res_ip)
print("Flag bit format: URG-ACK-PSH-RST-SYN-FIN")
print("Request Flag bits: " + bin(tcp.get_th_flags())[2:].zfill(6))
flag_bits = bin(res_tcp.get_th_flags())[2:].zfill(6)
print("Response Flag bits: " + flag_bits)
# Flag format: URG-ACK-PSH-RST-SYN-FIN
# if RST/ACK are set
if flag_bits == '010100':
print('%d is closed' % dport)
s.close()
'''
This scan doesn't work on Windows as it returns RST regardless of whether the port is open or closed during this scan.
(This can be one factor when we want to find out the type of the OS of the target).
If we get RST for all ports of the target during this scan, then there is a high chance that the target is running Windows.
Also note that many Cisco devices, BSDI, IBM OS/400 behave the same way as Windows during this scan.
Note: In my personal test, Windows 10 with port 80 open, didn't send any response for this scan neither for open nor closed ports
Might be due to Windows Firewall.
Linux worked as expected.
''' | nandan-desai-extras/nmap-port-scan-works | tcp_null_fin_xmas.py | tcp_null_fin_xmas.py | py | 2,992 | python | en | code | 0 | github-code | 13 |
41182544464 | import os
import pickle
from distutils.util import strtobool
from core import constants
def query_yes_no(question, default="yes"):
print('{question} [y/n]'.format(question=question))
while True:
try:
return strtobool(input().lower())
except ValueError:
print("Please respond with 'y' or 'n'.")
with open(os.path.join(constants.DATA_DIR, 'tweets.pickle'), 'rb') as f:
chains = pickle.load(f)
keepers = []
for chain in chains:
for status in chain:
print('{author}: {status}'.format(author=status.author.screen_name, status=status.text))
is_keeper = query_yes_no('Do you want to keep this chain?')
if is_keeper:
keepers.append(chain)
with open(os.path.join(constants.DATA_DIR, 'selected_tweets.pickle'), 'wb') as f:
pickle.dump(keepers, f)
| knyghty/linguistic-analysis | la/twtr/select_tweets.py | select_tweets.py | py | 830 | python | en | code | 0 | github-code | 13 |
41903703680 | #A program that counts up to a certain number, and
#only prints out the primes between 1 and that number.
list=[]
for x in range(2,10000):
if x <= 2:
list.append(x)
for n in list:
if any(x % n == 0 for n in list):
continue
elif x % n > 0:
list.append(x)
break
print(list) | smithevanb/ArgumentClinic | prime.py | prime.py | py | 378 | python | en | code | 0 | github-code | 13 |
16299277620 | from django.conf.urls import url
from tastypie import fields
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from tastypie.utils import trailing_slash
from .models import GitHubUser
class GitHubUserResource(ModelResource):
followers = fields.ToManyField('self', 'followers', use_in='detail')
following = fields.ToManyField('self', 'following', use_in='detail')
class Meta:
queryset = GitHubUser.objects.all()
resource_name = 'user'
fields = ['id', 'github_id', 'login', 'num_following',
'num_followers', 'location', 'company']
filtering = {
'id': ALL,
'github_id': ALL,
'login': ALL,
'num_followers': ALL,
'num_following': ALL,
'location': ALL,
'company': ALL,
'followers': ALL_WITH_RELATIONS,
'following': ALL_WITH_RELATIONS,
'distance': []
}
ordering = ['id', 'num_followers', 'num_following', 'login', 'github_id', 'location',
'company']
def prepend_urls(self):
return [
url(r"^(?P<resource_name>%s)/(?P<%s>.*?)/within/(?P<distance>\d+)%s$" % (
self._meta.resource_name,
self._meta.detail_uri_name,
trailing_slash()
), self.wrap_view('within'), name="api_user_within"),
]
def within(self, request, **kwargs):
"""
Expose the distance method on the custom manager via an instance.
There is almost definitely a better way to do this. This method is
duplicating a bunch of code in the get_list() method.
"""
self.method_check(request, allowed=['get'])
basic_bundle = self.build_bundle(request=request)
distance = kwargs.pop('distance')
user = self.cached_obj_get(bundle=basic_bundle,
**self.remove_api_resource_names(kwargs))
# Access our custom manager method to get the appropriate queryset
objects = user.users_within_distance(int(distance))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects,
resource_uri=self.get_resource_uri(),
limit=self._meta.limit,
max_limit=self._meta.max_limit,
collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = []
for obj in to_be_serialized[self._meta.collection_name]:
bundle = self.build_bundle(obj=obj, request=request)
bundles.append(self.full_dehydrate(bundle, for_list=True))
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
| matthewcburke/github_users | github_users/github_users/api.py | api.py | py | 3,139 | python | en | code | 0 | github-code | 13 |
33536076698 | #!/usr/bin/env python3
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# NAME: ti.py #
# #
# VERSION: 20230403 #
# #
# SYNOPSIS: Track IP-IDs for patterns and exploit analysis #
# #
# DESCRIPTION: Analyze the IP-IDs of an IP address to determine order of IDs. #
# Different patterns can help determine exploits or corroborate #
# any idle network scan that only scans for sequential IP-IDs. #
# This was based on the paper *A closer look at IP-ID behavior #
# in the Wild* by Flavia Salutari, Danilo Cicalese, and Dario J. #
# Rossi. #
# #
# INPUT: Runtime user input #
# #
# OUTPUT: 1.) STDOUT #
# 2.) ./sample.pcap #
# 3.) ./ipids.txt #
# #
# PRE-RUNTIME NOTES: 1.) You will need to open another terminal and ping your #
# target when prompted. #
# 2.) At the time of writing, 68.173.202.37, was an #
# example of sequential incremental IP-IDs. I do not #
# own this IP address. Ping at your own risk. #
# 3.) The dir tree needs to be kept intact for this script #
# to work correctly. Otherwise you will need to set #
# TARGETDIR yourself. #
# #
# AUTHORS: @southwickio #
# #
# LICENSE: GPLv3 #
# #
# DISCLAIMER: All work produced by Authors is provided โAS ISโ. Authors make #
# no warranties, express or implied, and hereby disclaims any and #
# all warranties, including, but not limited to, any warranty of #
# fitness, application, et cetera, for any particular purpose, #
# use case, or application of this script. #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#import dependencies
import scapy.all as scapy #sudo pip3 install scapy
import os
import shutil
#set constants
TARGETDIR = os.path.join(os.path.dirname(__file__), "..", "res", "output")
if not os.path.exists(TARGETDIR):
os.makedirs(TARGETDIR)
#delete the target files if they exists
if os.path.exists(f"{TARGETDIR}/sample.pcap"):
os.remove(f"{TARGETDIR}/sample.pcap")
if os.path.exists(f"{TARGETDIR}/ipids.txt"):
os.remove(f"{TARGETDIR}/ipids.txt")
#get IP address
ip = input("\nEnter the IP address to capture packets from: ")
packetcount = int(input("Enter the number of packets to capture: "))
#STDOUT instructions and parameters
print("\n\n\nNow, open another terminal and enter the following command:")
print(f"\nping -c {int(packetcount * 1.1)} {ip}")
print("\nRepeat the command if there is a packet loss of more than 10%")
print("Find another IP address in the event of 100% packet loss.\n\n\n")
print(f"Sniffing for {packetcount} packets from {ip}...")
#sniff packets and provide status
packets = []
for i in range(packetcount):
packet = scapy.sniff(filter=f"src host {ip}", count=1)[0]
packets.append(packet)
print(packet)
if (i + 1) % 5 == 0:
print(f"\n{i+1} packets captured.\n")
#extract IP-IDs
ipids = []
for packet in packets:
if packet.haslayer('IP'):
ipids.append(packet['IP'].id)
#determine patterned behavior
if all(x == ipids[0] for x in ipids):
print("\nIP-ID values are constant.")
elif all(x == ipids[0]+i for i, x in enumerate(ipids)):
print("\nIP-ID values are incremented sequentially.")
else:
print("\nIP-ID values are random.")
#check for duplicates
if len(ipids) == len(set(ipids)):
print("\nNo duplicate IP-ID values found.")
else:
print("\nDuplicate IP-ID values found.")
#print IP-IDs
#print("\nIP-ID values:")
#
#for ipid in ipids:
# print(ipid)
#output to sample.pcap file and ipids.txt for further analysis
scapy.wrpcap(f"{TARGETDIR}/sample.pcap", packets)
with open(f"{TARGETDIR}/ipids.txt", 'w') as file:
for ip_id in ipids:
file.write(str(ip_id) + '\n')
print("\nCaptured packets saved in ../res/output/sample.pcap file.")
print("\nIP-ID values saved in ../res/output/ipids.txt file.")
print("\nExiting...\n") | southwickIO/cloudy-rabbit | src/ti.py | ti.py | py | 5,516 | python | en | code | 0 | github-code | 13 |
4168297606 | import logging, sys, os
def find():
platform = ''
for char in sys.platform:
if char in '1234567890': break
platform += char
fun = globals().get(platform, None)
if fun is None:
print('Error: unable to get platform for %s' % platform, file=sys.stderr)
sys.exit(1)
logging.info('Using platform {}'.format(fun.__name__))
return fun
def freebsd():
""" FreeBSD, should work at least for 8 and newer """
import subprocess
o = subprocess.Popen(['acpiconf', '-i0'], stdout=subprocess.PIPE).communicate()[0].decode()
for line in o.split('\n'):
if line.find(':') == -1:
continue
(key, value) = line.split(':', 1)
if key.strip() == 'Remaining capacity':
percent = int(value.strip().replace('%', ''))
elif key.strip() == 'Remaining time':
if value.strip() == 'unknown':
lifetime = -1
else:
lifetime = value.strip().replace(':', '.')
lifetime = int(int(lifetime[0]) * 60 + int(lifetime[2]) * 10)
elif key.strip() == 'State':
if value.strip() == 'charging':
charging = True
else:
charging = False
elif key.strip() == 'Present rate':
if value.strip() == '0 mW' or value.strip().endswith('(0 mW)'):
ac = True
else:
ac = False
elif key.strip() == 'State' and value.strip() == 'not present':
ac = None
if charging:
ac = True
return (1, ac, charging, percent, lifetime)
def openbsd():
""" OpenBSD; should work with all versions since 4 (possibly earlier) """
import subprocess
ac = charging = None
percent = lifetime = 999
def sysctl(name):
o = subprocess.Popen(['/sbin/sysctl', name], stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()[0].decode()
try:
(name, value) = o.split('=')
except ValueError:
value = None
return value
o = subprocess.Popen(['/usr/sbin/apm', '-balm'], stdout=subprocess.PIPE).communicate()[0]
(bstat, percent, lifetime, ac) = o.decode().split()
if bstat == '4':
return (0, 1, 0, 0, 0)
if ac == '0':
ac = False
elif ac == '1':
ac = True
else:
ac = None
# apm output is not always reliable ...
b = sysctl('hw.sensors.acpibat0.raw0')
if b and b[:1] == '2':
charging = True
else:
charging = False
percent = int(percent)
if lifetime == 'unknown':
lifetime = -1
else:
lifetime = float(lifetime) * 60
return (1, ac, charging, percent, lifetime)
def linux():
""" Linux, being Linux, has several incompatible ways of doing this. """
# Disabled because getting it through /sys/ doesn't seem to work very
# reliably any more :-/
for linux_sucks in ['linux_upower', 'linux_sys_new', 'linux_sys_old']:
try:
result = globals().get(linux_sucks)()
except Exception as exc:
logging.warning(exc)
continue
if result != False:
logging.info('Using {}'.format(linux_sucks))
return result
return False
def linux_sys_new():
""" """
path = None
# Some systems seem to have BAT1 but not BAT0, so use the first one we
# encounter.
for i in range(0, 4):
p = '/sys/class/power_supply/BAT{}'.format(i)
if os.path.exists(p):
path = p
break
if path is None:
return False
if not os.path.exists('{}/energy_now'.format(path)):
return False
r = lambda f: open('{}/{}'.format(path, f), 'r').read().strip()
ri = lambda f: int(r(f))
status = r('status')
if status == 'Charging':
ac = True
charging = True
elif status == 'Discharging':
ac = False
charging = False
elif status == 'Full':
ac = True
charging = False
else:
ac = False
charging = False
percent = ri('capacity')
drain_rate = ri('power_now')
full_capacity = ri('energy_full')
remaining = ri('energy_now')
if charging:
lifetime = (full_capacity - remaining) / drain_rate * 60
elif drain_rate > 0:
lifetime = remaining / drain_rate * 60
else:
lifetime = -1
return (1, ac, charging, percent, lifetime)
def linux_sys_old():
""" """
path = None
# Some systems seem to have BAT1 but not BAT0, so use the first one we
# encounter.
for i in range(0, 4):
p = '/sys/class/power_supply/BAT{}'.format(i)
if os.path.exists(p):
path = p
break
if path is None:
return False
if not os.path.exists('{}/current_now'.format(path)):
return False
r = lambda f: open('{}/{}'.format(path, f), 'r').read().strip()
ri = lambda f: int(r(f))
status = r('status')
if status == 'Charging':
ac = True
charging = True
elif status == 'Discharging':
ac = False
charging = False
elif status == 'Full':
ac = True
charging = False
else:
ac = False
charging = False
percent = ri('capacity')
drain_rate = ri('current_now')
remaining = ri('charge_now')
full_capacity = ri('charge_full')
if charging:
lifetime = (full_capacity - remaining) / drain_rate * 60
elif drain_rate > 0:
lifetime = remaining / drain_rate * 60
else:
lifetime = -1
return (1, ac, charging, percent, lifetime)
def linux_upower():
""" Linux with UPower; http://upower.freedesktop.org/docs/Device.html """
try:
import dbus
except ImportError:
#print('battray: "import dbus" failed; not trying UPower', file=sys.stderr)
#print('battray: if you would like to use UPower then install dbus-python:', file=sys.stderr)
print('battray: "import dbus" failed; this is required for Linux; install it with:', file=sys.stderr)
print('battray: pip install dbus-python', file=sys.stderr)
return False
bus = dbus.SystemBus()
upower = bus.get_object('org.freedesktop.UPower', '/org/freedesktop/UPower/devices/battery_BAT0')
#upower = bus.get_object('org.freedesktop.UPower', '/org/freedesktop/UPower/devices/battery_BAT1')
iface = dbus.Interface(upower, 'org.freedesktop.DBus.Properties')
info = iface.GetAll('org.freedesktop.UPower.Device')
percent = float(info['Percentage'])
state = int(info['State'])
charging = False
if state == 1:
ac = True
charging = True
elif state == 2:
ac = False
elif state == 4:
ac = True
else:
ac = None
if charging:
lifetime = int(info['TimeToFull']) / 60
else:
lifetime = int(info['TimeToEmpty']) / 60
return (1, ac, charging, percent, lifetime)
# The MIT License (MIT)
#
# Copyright ยฉ 2008-2017 Martin Tournoij
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The software is provided "as is", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall the
# authors or copyright holders be liable for any claim, damages or other
# liability, whether in an action of contract, tort or otherwise, arising
# from, out of or in connection with the software or the use or other dealings
# in the software.
| arp242/battray | battray/platforms.py | platforms.py | py | 8,123 | python | en | code | 5 | github-code | 13 |
16295369240 | '''
*args,**kwargs(kwargs IS OUT OF CONTEXT RN)(ONLY ARGS)
Exception Handling :
Try Except Else Finally
File Handling :
'''
###EXCEPTION HANDLING
##print(1/0)
try:
div = 1/0
except Exception :
print('you cannot divide by 0')
### this loops infinitely unless user enters a number
while True:
try:
x = int(input("Please enter a number: "))
break
except ValueError:
print("Oops! That was no valid number. Try again...")
## TRY EXCEPT ELSE FINALLY
def divides(x, y):
try:
result = x / y
except ZeroDivisionError:
print("division by zero!")
except NameError:
print("please input a number")
except Exception as e:
print(f'something went wrong : {e}')
else:
print("result is", result)
finally:
print("executing finally clause")
##divides(1,dsi)
| shyamgupta196/old-code | BootCamp/day7.py | day7.py | py | 896 | python | en | code | 0 | github-code | 13 |
15551317935 | from pwn import *
from pwn import p64
debug = 1
gdb_is = 1
# context(arch='i386',os = 'linux', log_level='DEBUG')
context(arch='amd64',os = 'linux', log_level='DEBUG')
if debug:
context.terminal = ['/mnt/c/Users/sagiriking/AppData/Local/Microsoft/WindowsApps/wt.exe','nt','Ubuntu','-c']
if not gdb_is:
r = process("./shellcode")
else:
host = "192.168.0.111:62338"
r = connect(host.split(':')[0],host.split(':')[1])#่ฟ็จ่ฟๆฅ
gdb_is =0
if gdb_is:
r = gdb.debug("./shellcode",'b main')
pass
# 0x55d3bfc04438 0x55d3bfc044f2 distance 0xba
r.send(asm('syscall'))
time.sleep(1)
pause()
# r.send(b'S' +b'X'+ b'Z'*2 + b'Y'+ b'S'+b'_' + b'Z'*6 + b'Q' + b'Z'* 10)
r.send(b'Z'*2 +b'Y'+b'Z'*7 + b'QY'*3 + b'Q')
# syscall -> rcx Y
# elf = ELF('./pwn')
r.interactive()
| Sagiring/Sagiring_pwn | ็พๅๆฏ/shellcode/pwn_exp.py | pwn_exp.py | py | 847 | python | en | code | 1 | github-code | 13 |
10477087540 | # LALR(1) parser for grammars. This version is based on the LR(1)-automaton
# states merging. If the states share the same LR(0)-items they get merged in a
# unique state with the lookaheads merged together. The future implementation will be
# based on the recursive equatios algorithm invented by Paola Quaglia. The process
# follows the base LR(0)-automaton construction, while at the same time keeping track
# of a set of recursive equations containing the lookaheads. At the end of the
# LR(0)-automaton computation, the recursive equations get solved, the lookahead lists
# get populated and the resulting automaton is a LALR(1)-automaton as it was created
# through LR(1)-automaton state merging.
# Check readme.txt in order to see input format of the grammar and eventual
# output format.
#
# Author: Matteo Amatori
import csv
import numpy
from utils import first_and_follow_calculation as ffc
from prettytable import PrettyTable
#------------------------------------------------------------------------------
class nonTerminal:
name = ''
first_l = []
follow_l = []
isStartSymbol = False
def __init__ (self, non_terminal_name):
self.name = non_terminal_name
self.first_l = []
self.follow_l = []
self.isStartSymbol = False
def add_first (self, element):
self.first_l.append(element)
def add_follow (self, element):
self.follow_l.append(element)
#------------------------------------------------------------------------------
class lr0Item:
production = []
type = ""
dot = 0
isReduceItem = False
def __init__ (self, production, type, dot, reduct):
self.production = production
self.type = type
self.dot = dot
self.isReduceItem = reduct
def __eq__ (self, other):
if (self.production == other.production and self.type == other.type and self.dot == other.dot and self.isReduceItem == other.isReduceItem):
return True
else:
return False
def __hash__(self):
return hash((self.production, self.type, self.dot, self.isReduceItem))
def create_new_lr0_item (production, type, dot, reduct):
new_state = lr0Item(production, type, dot, reduct)
return new_state
#------------------------------------------------------------------------------
class lr1Item:
production = []
lookAhead = []
dot = 0
type = ""
isReduceItem = False
def __init__ (self, production, dot, type, reduct):
self.production = production
self.lookAhead = []
self.dot = dot
self.type = type
self.isReduceItem = reduct
def __eq__ (self, other):
equal = False
lookaheads = []
if (self.production == other.production and self.dot == other.dot and self.type == other.type and self.isReduceItem == other.isReduceItem):
for element in self.lookAhead:
if (element not in lookaheads):
lookaheads.append(element)
for element in other.lookAhead:
if (element not in lookaheads):
lookaheads.append(element)
for LA in lookaheads:
if (LA in self.lookAhead):
if (LA in other.lookAhead):
equal = True
else:
equal = False
break
else:
equal = False
break
else:
equal = False
if (equal):
return True
else:
return False
def __hash__(self):
return hash((self.production, self.dot, self.type, self.isReduceItem))
def create_new_lr1_item (production, dot, type, reduct):
new_item = lr1Item(production, dot, type, reduct)
return new_item
def set_lookaheads (item, lookahead_l):
item.lookAhead = lookahead_l
def print_item(item):
print(item.production, item.lookAhead, item.dot, item.type, item.isReduceItem)
#------------------------------------------------------------------------------
class lr1State:
name = 0
index = 0
item_l = []
isInitialState = False
gotMerged = False
def __init__ (self, name, state_count):
self.name = name
self.index = state_count
self.item_l = []
self.isInitialState = False
self.gotMerged = False
def add_item (self, item):
self.item_l.append(item)
def create_new_state (name, state_count):
new_state = lr1State(name, state_count)
return new_state
def check_kernel_equality(new_kernel, state_n):
state_n_ker = []
for item in state_n.item_l:
if (item.type == "Kernel"):
state_n_ker.append(item)
if (set(new_kernel) == set(state_n_ker)):
return True
else:
return False
def check_states_equality_for_merge(set_1, set_2):
if (set(set_1) == set(set_2)):
return True
else:
return False
def apply_closure(state, my_item, recursion):
if (my_item.isReduceItem == "Not-Reduce"):
if (ffc.isNonTerminal(my_item.production[my_item.dot])):
for production in grammar:
if (production[0][0] == my_item.production[my_item.dot]):
temp_lookAhead_l = []
if (my_item.dot == len(my_item.production)-1):
for element in my_item.lookAhead:
temp_lookAhead_l.append(element)
else:
p_prog = my_item.dot
stopped = False
while (p_prog+1 <= len(my_item.production)-1 and not stopped):
if (ffc.isTerminal(my_item.production[p_prog+1])):
if (my_item.production[p_prog+1] not in temp_lookAhead_l):
temp_lookAhead_l.append(my_item.production[p_prog+1])
stopped = True
else:
for nT in non_terminals:
if (nT.name == my_item.production[p_prog+1]):
for first_nT in nT.first_l:
if (first_nT != "#"):
if (first_nT not in temp_lookAhead_l):
temp_lookAhead_l.append(first_nT)
else:
if (p_prog+1 == len(my_item.production)-1):
for item_clos_LA in my_item.lookAhead:
if (item_clos_LA not in temp_lookAhead_l):
temp_lookAhead_l.append(item_clos_LA)
p_prog += 1
temp_type = ""
if (production[0][3] == "#"):
new_temp_item = create_new_lr0_item(production[0], 3, "Closure", "Reduce")
temp_type = "Reduce"
else:
new_temp_item = create_new_lr0_item(production[0], 3, "Closure", "Not-Reduce")
temp_type = "Not-Reduce"
found = False
for item_for_la_merge in state.item_l:
temp_item = create_new_lr0_item(item_for_la_merge.production, item_for_la_merge.dot, item_for_la_merge.type, item_for_la_merge.isReduceItem)
if (temp_item == new_temp_item):
for la_to_merge in temp_lookAhead_l:
if (la_to_merge not in item_for_la_merge.lookAhead):
item_for_la_merge.lookAhead.append(la_to_merge)
found = True
if (not found):
new_item = create_new_lr1_item(production[0], 3, "Closure", temp_type)
set_lookaheads(new_item, temp_lookAhead_l)
if (new_item not in state.item_l):
state.item_l.append(new_item)
#print("Adding " + new_item.production + " to state " + str(state.name))
if (recursion < 2):
if (ffc.isNonTerminal(new_item.production[new_item.dot])):
#print("recurring for " + new_item.production, recursion)
apply_closure(state, new_item, recursion+1)
#------------------------------------------------------------------------------
class lr1transition:
name = 0
element = ""
starting_state = ""
ending_state = ""
def __init__ (self, transition_count, elem, s_state, e_state):
self.name = transition_count
self.element = elem
self.starting_state = s_state
self.ending_state = e_state
def __eq__ (self, other):
if (self.name == other.name and self.element == other.element and self.starting_state == other.starting_state and self.ending_state == other.ending_state):
return True
else:
return False
def create_new_transition (name, element, s_state, e_state):
new_transition = lr1transition(name, element, s_state, e_state)
return new_transition
def print_transition(transition):
print(transition.name, transition.element, transition.starting_state, transition.ending_state)
#------------------------------------------------------------------------------
# variables declaration section
terminal_names = [] # strings of terminals
non_terminal_names = [] # just strings
non_terminals = [] # actual non-terminals
lr1_states = [] # array that will contain the LR(1)-automaton states
lr1_transitions = [] # array of transitions between the LR(1)-automaton states
lr1_state_counter = 0
lr1_transition_counter = 0
lalr1_states = [] # array that will contain the LALR(1)-automaton states
lalr1_transitions = [] # array of transitions between the LALR(1)-automaton states
lalr1_state_counter = 0
lalr1_transition_counter = 0
# input section
with open("utils/grammar.txt", 'r', encoding = 'ISO-8859-1') as f:
input_file = csv.reader(f)
grammar = []
for row in input_file:
if (len(row) != 0):
grammar = grammar + [row]
f.close()
# collecting non-terminals
for index in range(len(grammar)):
driver = grammar[index][0][0]
if driver not in non_terminal_names:
non_terminal_names.append(driver)
non_terminals.append(nonTerminal(driver))
# collecting terminals
terminal_names.append(" ")
for production in grammar:
for index in range(len(production[0])):
if (production[0][index] != '#' and index >= 3):
if (ffc.isTerminal(production[0][index])):
if (production[0][index] not in terminal_names):
terminal_names.append(production[0][index])
terminal_names.append("$")
non_terminals[0].isStartSymbol = True
print("Grammar:")
for element in grammar:
print(element[0])
# first computation
print("------------------------- First Computation --------------------------")
for i in range(0, 2):
for element in reversed(non_terminals):
for row in grammar:
ffc.compute_first(element, row, non_terminals, 3)
for element in non_terminals:
print("First(" + element.name + "):")
print(element.first_l)
# follow computation
print("------------------------- Follow Computation -------------------------")
for i in range(0, 1):
for element in non_terminals:
for row in grammar:
ffc.compute_follow(element, row, non_terminals, 3)
for element in non_terminals:
print("Follow(" + element.name + "):")
print(element.follow_l)
# creation of augmented grammar
a_grammar = []
prev_starting_symb = ''
for element in non_terminals:
if element.isStartSymbol:
prev_starting_symb = element.name
starting_prod = "Q->" + prev_starting_symb
a_grammar.append(starting_prod)
for prod in grammar:
a_grammar.append(prod[0])
# computation of the LALR(1)-automaton
print("------------------- LALR(1)-automaton Computation --------------------")
# starting state
initial_state = create_new_state(str(lr1_state_counter), lr1_state_counter)
lr1_state_counter += 1
initial_state.isInitialState = True
s_item = create_new_lr1_item(a_grammar[0], 3, "Kernel", "Not-Reduce")
set_lookaheads(s_item, ['$'])
initial_state.add_item(s_item)
apply_closure(initial_state, s_item, 0)
lr1_states.append(initial_state)
# rest of automaton computation
for state in lr1_states:
for i in range(3): # temporary solution to recursive closure applications
for clos_item in state.item_l:
apply_closure(state, clos_item, 0)
new_symb_transitions = []
for item in state.item_l:
if (item.isReduceItem == "Not-Reduce"):
if (item.production[item.dot] not in new_symb_transitions):
new_symb_transitions.append(item.production[item.dot])
for element in new_symb_transitions:
require_new_state = False
destination_state = 0
new_state_items = []
for item in state.item_l:
if (item.isReduceItem != "Reduce"):
if (item.production[item.dot] == element):
new_item = create_new_lr1_item(item.production, item.dot+1, "Kernel", "Reduce" if (item.dot+1 == len(item.production)) else "Not-Reduce")
set_lookaheads(new_item, item.lookAhead)
new_state_items.append(new_item)
for state_n in lr1_states:
if (check_kernel_equality(new_state_items, state_n)):
require_new_state = False
destination_state = state_n.name
break
else:
require_new_state = True
if (require_new_state):
new_state = create_new_state(str(lr1_state_counter), lr1_state_counter)
lr1_state_counter += 1
lr1_states.append(new_state)
for new_state_item in new_state_items:
if (new_state_item not in new_state.item_l):
new_state.add_item(new_state_item)
apply_closure(new_state, new_state_item, 0)
new_transition = create_new_transition(lr1_transition_counter, element, state.index, new_state.index)
lr1_transition_counter += 1
if (new_transition not in lr1_transitions):
lr1_transitions.append(new_transition)
else:
new_transition = create_new_transition(lr1_transition_counter, element, state.index, destination_state)
lr1_transition_counter += 1
if (new_transition not in lr1_transitions):
lr1_transitions.append(new_transition)
# merging of the states that share the same LR(0)-items with different lookaheads using the lookahead merging technique
check_merge_matrix = numpy.zeros(shape = (lr1_state_counter, lr1_state_counter))
new_lalr1_states = []
for i in range(lr1_state_counter):
for j in range(lr1_state_counter):
if (i == j):
check_merge_matrix[i][j] = 1
for state in lr1_states:
for state_check in lr1_states:
equal = False
if (check_merge_matrix[state.index][state_check.index] != 1 and check_merge_matrix[state_check.index][state.index] != 1):
first_item_l = []
second_item_l = []
for lr1_item in state.item_l:
new_tmp_lr0_item = create_new_lr0_item (lr1_item.production, lr1_item.dot, lr1_item.type, lr1_item.isReduceItem)
first_item_l.append(new_tmp_lr0_item)
for lr1_item in state_check.item_l:
new_tmp_lr0_item = create_new_lr0_item (lr1_item.production, lr1_item.dot, lr1_item.type, lr1_item.isReduceItem)
second_item_l.append(new_tmp_lr0_item)
if (check_states_equality_for_merge(first_item_l, second_item_l)):
equal = True
state_1 = state
state_2 = state_check
else:
equal = False
if (equal):
new_name = state.name + state_check.name
new_state = create_new_state(new_name, int(state.name))
#print("\nmerging "+state.name+" and "+state_check.name)
for item_1 in state_1.item_l:
temp_lookaheads = []
for item_2 in state_2.item_l:
if (item_1.production == item_2.production and item_1.dot == item_2.dot and item_1.type == item_2.type and item_1.isReduceItem == item_2.isReduceItem):
for LA_1 in item_1.lookAhead:
temp_lookaheads.append(LA_1)
for LA_2 in item_2.lookAhead:
if (LA_2 not in item_1.lookAhead):
#print("adding "+LA_2+" to "+item_1.production)
temp_lookaheads.append(LA_2)
new_item = create_new_lr1_item(item_1.production, item_1.dot, item_1.type, item_1.isReduceItem)
set_lookaheads(new_item, temp_lookaheads)
new_state.add_item(new_item)
check_merge_matrix[state.index][state_check.index] = 1
check_merge_matrix[state_check.index][state.index] = 1
state.gotMerged = True
state_check.gotMerged = True
new_lalr1_states.append(new_state)
new_state.gotMerged = True
for state in lr1_states:
if (not state.gotMerged):
lalr1_states.append(state)
lalr1_state_counter += 1
else:
for new_state in new_lalr1_states:
if (str(state.name) in new_state.name and new_state not in lalr1_states):
lalr1_states.append(new_state)
lalr1_state_counter += 1
for idx, state in enumerate(lalr1_states):
if (idx != state.index):
state.index = idx
# transition update
for transition in lr1_transitions:
new_transition = create_new_transition(lalr1_transition_counter, transition.element, transition.starting_state, transition.ending_state)
s_state_mod = False
s_state_name = ""
e_state_mod = False
e_state_name = ""
alreadyIn = False
for state in lalr1_states:
if (str(transition.starting_state) in str(state.name)):
if (state.gotMerged):
s_state_mod = True
s_state_name = state.name
break
for state in lalr1_states:
if (str(transition.ending_state) in str(state.name)):
if (state.gotMerged):
e_state_mod = True
e_state_name = state.name
break
if (s_state_mod):
new_transition.starting_state = s_state_name
if (e_state_mod):
new_transition.ending_state = e_state_name
if (lalr1_transition_counter == 0):
lalr1_transitions.append(new_transition)
lalr1_transition_counter += 1
for lalr_transition in lalr1_transitions:
if (new_transition.element == lalr_transition.element and new_transition.starting_state == lalr_transition.starting_state and new_transition.ending_state == lalr_transition.ending_state):
alreadyIn = True
break
else:
alreadyIn = False
if (not alreadyIn):
lalr1_transitions.append(new_transition)
lalr1_transition_counter += 1
print("LALR(1)-states:")
for state in lalr1_states:
print("\nState " + str(state.name) + ":")
for element in state.item_l:
prod_to_print = ""
prod_to_print += element.production[:3]
if (element.isReduceItem == "Reduce"):
if (element.production[3] == "#"):
prod_to_print += "."
else:
prod_to_print += element.production[3:]
prod_to_print += "."
else:
idx = 3
dot_added = False
while (idx < len(element.production)):
if (idx != element.dot):
prod_to_print += element.production[idx]
idx += 1
elif (idx == element.dot and not dot_added):
prod_to_print += "."
prod_to_print += element.production[idx]
dot_added = True
else:
idx += 1
print(prod_to_print + ", " + element.type + ", " + element.isReduceItem + ",", element.lookAhead)
print("\nLALR(1)-transitions:")
for transition in lalr1_transitions:
print(transition.name, transition.element, transition.starting_state, transition.ending_state)
# table Computation
header = []
for element in terminal_names:
if element not in header:
header.append(element)
for element in non_terminal_names:
if element not in header:
header.append(element)
lalr1_table = PrettyTable(header)
total_lenght = len(non_terminal_names) + len(terminal_names)
table = [["" for x in range(total_lenght)] for y in range(lalr1_state_counter)]
# LALR(1)-parsing table computation
for idx_row in range(lalr1_state_counter):
for idx_col in range(total_lenght):
if (idx_col == 0):
table[idx_row][idx_col] = lalr1_states[idx_row].name
else:
table[idx_row][idx_col] = []
for idx, element in enumerate(header):
if (element == "$"):
table[1][idx].append("Accept")
for transition in lalr1_transitions:
new_entry = ""
if (ffc.isNonTerminal(transition.element)):
new_entry = "Goto " + str(transition.ending_state)
for idx, element in enumerate(header):
if (element == transition.element):
for state in lalr1_states:
if (str(transition.starting_state) == str(state.name)):
table[state.index][idx].append(new_entry)
elif (ffc.isTerminal(transition.element)):
new_entry = "S" + str(transition.ending_state)
for idx, element in enumerate(header):
if (element == transition.element):
for state in lalr1_states:
if (str(transition.starting_state) == str(state.name)):
table[state.index][idx].append(new_entry)
for state_idx, state in enumerate(lalr1_states):
for item in state.item_l:
if ("Q->" not in item.production):
new_entry = ""
if (item.isReduceItem == "Reduce"):
for idx1, production in enumerate(grammar):
if (item.production == production[0]):
new_entry = "R" + str(idx1+1)
for idx2, element in enumerate(header):
for LA in item.lookAhead:
if (element == LA):
if (len(new_entry) > 0):
table[state_idx][idx2].append(new_entry)
for i in range(lalr1_state_counter):
lalr1_table.add_row(table[i])
print("\nLALR(1) parsing table of the grammar G:")
print(lalr1_table)
if (ffc.verify_grammar(table, lalr1_state_counter, total_lenght)):
print("\nThe grammar G is not LALR(1).")
else:
print("\nThe grammar G is LALR(1).")
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
| amathebest/Parser_Project | OLD_lalr1_parser.py | OLD_lalr1_parser.py | py | 23,754 | python | en | code | 3 | github-code | 13 |
13240535109 | import cv2
import numpy as np
import argparse
def infec_region(img_path):
img1 = cv2.imread(img_path)
# '.\\0d2e2971-f1c9-4278-b35c-91dd8a22a64d___RS_Early.B_7581.JPG')
img = cv2.resize(img1, (0, 0), fx=0.5, fy=0.5)
original = img.copy()
neworiginal = img.copy()
blur1 = cv2.GaussianBlur(img, (3, 3), 1)
newimg = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
img = cv2.pyrMeanShiftFiltering(blur1, 20, 30, newimg, 0, criteria)
blur = cv2.GaussianBlur(img, (11, 11), 1)
kernel = np.ones((5, 5), np.uint8)
canny = cv2.Canny(blur, 200, 290)
res = cv2.morphologyEx(canny, cv2.MORPH_CLOSE, kernel)
canny = cv2.cvtColor(canny, cv2.COLOR_GRAY2BGR)
# cv2.imshow('Canny', res)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
lower = np.array([5, 25, 25])
upper = np.array([70, 255, 255])
mask = cv2.inRange(hsv, lower, upper)
res = cv2.bitwise_and(hsv, hsv, mask=mask)
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for i in contours:
cnt = cv2.contourArea(i)
#M = cv2.momens(i)
#cx = int(M['m10']/M['m00'])
if cnt > 1000:
cv2.drawContours(img, [i], 0, (0, 0, 255), 2)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(
gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
Tarea = cv2.contourArea(cnt)
# cv2.imshow('img', img)
height, width, _ = canny.shape
min_x, min_y = width, height
max_x = max_y = 0
frame = canny.copy()
for contour, hier in zip(contours, hierarchy):
(x, y, w, h) = cv2.boundingRect(contour)
min_x, max_x = min(x, min_x), max(x+w, max_x)
min_y, max_y = min(y, min_y), max(y+h, max_y)
if w >= 0 and h >= 0:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
roi = img[y:y+h, x:x+w]
originalroi = original[y:y+h, x:x+w]
if max_x - min_x > 0 and max_y - min_y > 0:
cv2.rectangle(frame, (min_x, min_y), (max_x, max_y), (255, 0, 0), 2)
img = roi
imghls = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
imghls[np.where((imghls == [30, 200, 2]).all(axis=2))] = [0, 200, 0]
huehls = imghls[:, :, 0]
huehls[np.where(huehls == [0])] = [35]
# Thresholding on hue image
ret, thresh = cv2.threshold(huehls, 28, 255, cv2.THRESH_BINARY_INV)
# cv2.imshow('thresh', thresh)
mask = cv2.bitwise_and(originalroi, originalroi, mask=thresh)
contours, heirarchy = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
Infarea = 0
for x in range(len(contours)):
cv2.drawContours(originalroi, contours[x], -1, (0, 0, 255), 2)
# cv2.imshow('Contour masked', originalroi)
# Calculating area of infected region
Infarea += cv2.contourArea(contours[x])
# if Infarea > Tarea:
#Tarea = img.shape[0]*img.shape[1]
print('______________________\n| Total area: ' +
str(Tarea) + ' |\n|____________________|')
# Finding the percentage of infection in the leaf
print('\n__________________________\n| Infected area: ' +
str(Infarea) + ' |\n|________________________|')
try:
per = 100 * Infarea/Tarea
except ZeroDivisionError:
per = 0
print('\n_________________________________________________\n| Percentage of infected region: ' +
str(per) + ' |\n|_______________________________________________|')
# cv2.imshow('orig', original)
return per
# per = infec_region(
# '.\\0d2e2971-f1c9-4278-b35c-91dd8a22a64d___RS_Early.B_7581.JPG')
# print(per)
| jayeshk-21/Plant-Disease-Detection-And-Cure | PDD/plant_infection.py | plant_infection.py | py | 4,019 | python | en | code | 0 | github-code | 13 |
16408714993 | """
้ฎ้ข51
ๅไธไธชๆนๆณ็จไบ่ฎก็ฎ5 / 0, ๅนถ็จtry/exceptๆ่ทๅผๅธธ
"""
def devide(x, y):
try:
print(x / y)
print('Division ok.')
except ZeroDivisionError as er:
print(er)
except Exception as der:
print(der)
finally:
print('Division finally.')
while True:
x = int(input())
y = int(input())
devide(x, y) | martinleeq/python-100 | day-07/question-051.py | question-051.py | py | 389 | python | en | code | 3 | github-code | 13 |
38613422112 | import logging
from hashlib import sha256
import structlog
from flask import current_app
from google.cloud import storage
from google.cloud.exceptions import NotFound
from application.exceptions import GCPBucketException, RasError
log = structlog.wrap_logger(logging.getLogger(__name__))
class GoogleCloudSEFTCIBucket:
def __init__(self, config):
self.project_id = config["GOOGLE_CLOUD_PROJECT"]
self.bucket_name = config["SEFT_DOWNLOAD_BUCKET_NAME"]
self.client = storage.Client(project=self.project_id)
self.bucket = self.client.bucket(self.bucket_name)
self.prefix = config["SEFT_DOWNLOAD_BUCKET_FILE_PREFIX"]
def upload_file_to_bucket(self, file):
path = file.filename
if self.prefix != "":
path = self.prefix + "/" + path
log.info("Uploading SEFT CI to GCP bucket: " + path)
key = current_app.config.get("ONS_CRYPTOKEY", None)
if key is None:
log.error("Customer defined encryption key is missing.")
raise RasError("can't find customer defined encryption, hence can't perform this task", 500)
customer_supplied_encryption_key = sha256(key.encode("utf-8")).digest()
blob = self.bucket.blob(blob_name=path, encryption_key=customer_supplied_encryption_key)
blob.upload_from_file(file_obj=file.stream, rewind=True)
log.info("Successfully put SEFT CI in bucket")
return
def download_file_from_bucket(self, file_location: str):
if self.prefix != "":
path = self.prefix + "/" + file_location
else:
path = file_location
log.info("Downloading SEFT CI from GCP bucket: " + path)
key = current_app.config.get("ONS_CRYPTOKEY", None)
if key is None:
log.error("Customer defined encryption key is missing.")
raise RasError("can't find customer defined encryption, hence can't perform this task", 500)
customer_supplied_encryption_key = sha256(key.encode("utf-8")).digest()
blob = self.bucket.blob(blob_name=path, encryption_key=customer_supplied_encryption_key)
file = blob.download_as_bytes()
log.info("Successfully downloaded SEFT CI from GCP bucket")
return file
def delete_file_from_bucket(self, file_location: str):
path = self.prefix + "/" + file_location if self.prefix != "" else file_location
log.info("Deleting SEFT CI from GCP bucket: " + path)
try:
self.bucket.delete_blob(path)
log.info("Successfully deleted SEFT CI file")
except NotFound:
log.error("SEFT CI file not found when attempting to delete")
return
def delete_files_by_prefix(self, prefix: str):
prefix = f"{self.prefix}/{prefix}" if self.prefix else prefix
try:
self.bucket.delete_blobs(blobs=list(self.bucket.list_blobs(prefix=prefix)))
except NotFound:
raise GCPBucketException(f"No files were found with prefix {prefix} ", 404)
return
| ONSdigital/ras-collection-instrument | application/models/google_cloud_bucket.py | google_cloud_bucket.py | py | 3,049 | python | en | code | 2 | github-code | 13 |
27555144154 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""""
paste.py
~~~~~~~~~~~~~~~~~~~~
:author: wongxinjie
:date created: 2019-08-14 01:04
"""
from flask import request, jsonify
import configs
from api import api
from service.paste import (
srv_create_paste,
srv_get_short_url_content
)
from service.ext import limiter
from service.session import get_user_id
from common.http import abort_400, abort_404
@api.route("/paste", methods=['POST'])
def api_create_paste():
ip = request.remote_addr
body = request.get_json()
paste_content = body.get('paste_content')
if not paste_content:
abort_400('content should not be empty')
expiration = body.get('expiration')
if expiration:
if not isinstance(expiration, int):
expiration = int(expiration)
user_id = get_user_id()
if user_id:
visible_range = body.get('visible_range', 0)
else:
visible_range = None
s_url = srv_create_paste(
ip, paste_content, expiration, user_id, visible_range
)
if not s_url:
abort_400('could not generate short url for content', 10)
url = '{}/api/paste?u={}'.format(configs.domain, s_url)
return {'short_url': url, 'u': s_url}
@api.route("/paste", methods=['GET'])
@limiter.limit('1000/hour;100/minute')
def api_get_paste():
url = request.args.get('u', '')
user_id = get_user_id()
payload = srv_get_short_url_content(url, user_id)
if not payload:
abort_404('content not found')
return jsonify(**payload)
| wongxinjie/bitly | api/paste.py | paste.py | py | 1,552 | python | en | code | 0 | github-code | 13 |
27876628745 | import os
input_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'input')
cache_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'cache')
full_split = (os.path.join(input_dir, 'clicks_train.csv.gz'), os.path.join(input_dir, 'clicks_test.csv.gz'))
cv1_split = (os.path.join(cache_dir, 'clicks_cv1_train.csv.gz'), os.path.join(cache_dir, 'clicks_cv1_test.csv.gz'))
cv1_split_idx = (os.path.join(cache_dir, 'clicks_cv1_train_idx.csv.gz'), os.path.join(cache_dir, 'clicks_cv1_test_idx.csv.gz'))
cv2_split = (os.path.join(cache_dir, 'clicks_cv2_train.csv.gz'), os.path.join(cache_dir, 'clicks_cv2_test.csv.gz'))
cv1_split_time = 950400000
test_split_time = 1123200000
row_counts = {
'cv2_train': 14164401,
'cv2_test': 6484938,
'cv1_train': 62252998,
'cv1_test': 24888733,
'full_train': 87141731,
'full_test': 32225162
}
| alno/kaggle-outbrain-click-prediction | util/meta.py | meta.py | py | 892 | python | en | code | 77 | github-code | 13 |
28362951255 | from fnmatch import fnmatch
from os import listdir
from unittest import TestCase
from pylegos.core import FileUtils
class TestFileUtils(TestCase):
Sut = FileUtils()
def test_pathUtils(self):
pd = self.Sut.getParentDir(filePath=__file__)
self.assertEqual('/Users/gchristiansen/projects/pyLegos/tests/velexio/pylegos/core', pd)
pd = self.Sut.getAppBase()
self.assertEqual('/Users/gchristiansen/projects/pyLegos/tests/velexio/pylegos',pd)
self.assertTrue(self.Sut.dirExists('/Users/gchristiansen/projects/pyLegos'),'Method dirExists determined existing dir does not exist')
self.assertFalse(self.Sut.dirExists('/Users/gchristiansen/projects/pyLegos/xxxpylegos'),'Method dirExists returned True on a non-existent directory')
self.assertFalse(self.Sut.dirExists('/Users/gchristiansen/projects/pyLegos/pylegos/tests/test_FileUtils.py'),'Method dirExists returned True on a check against a file')
self.assertTrue(self.Sut.fileExists(__file__), 'Method fileExists returned false file that DOES exist')
self.assertFalse(self.Sut.fileExists('/Users/gchristiansen/projects/pyLegos/tests'),'Method fileExists returned true on dir')
self.assertFalse(self.Sut.fileExists('/Users/gchristiansen/projects/pyLegos/tests/xxxx.py'),'Method fileExists returned true file that DOES NOT exist')
# Create some tmp dirs
self.Sut.removeDirMatch(dirPath='/tmp', pattern='conf*')
self.Sut.createDirectory('/tmp/conf')
self.Sut.createDirectory('/tmp/config')
self.assertEqual(len(self.Sut.getDirMatches(baseDir='/tmp',pattern='conf*')),2,'Method getDirMatches returns more than expected')
self.assertEqual(self.Sut.getDirMatches(baseDir='/tmp',pattern='conf')[0],'conf','Method getDirMatches does not return full path')
def test_DeleteFiles(self):
testFiles = ['/tmp/app_test1', '/tmp/app_test2']
for tf in testFiles:
self.Sut.touchFile(tf)
self.Sut.deleteFiles(baseDir='/tmp', pattern='app*')
for file in listdir('/tmp'):
if fnmatch(file, 'app*'):
self.fail()
def test_GetFileMatches(self):
testFiles = ['/tmp/app_test1', '/tmp/app_test2', '/tmp/vapp_test1']
for tf in testFiles:
self.Sut.touchFile(tf)
fileList = self.Sut.getFileMatches(baseDir='/tmp', pattern='app*')
self.assertEqual(len(fileList), 2)
| velexio/pyLegos | tests/test_FileUtils.py | test_FileUtils.py | py | 2,450 | python | en | code | 0 | github-code | 13 |
40663845019 | #!user/bin/env python3
# -*- coding: utf-8 -*-
'''
Created on 20170528
@author: JohnHuiWB
'''
import numpy as np
class kMeans(object):
def __init__(self):
self._assassment = 0
self._center = 0
self._k = 0
self._data_set = 0
def _cal_euclidean_distance(self, vec1, vec2):
# ่ฎก็ฎๆฌงๅผ่ท็ฆป
return (np.sum((vec1 - vec2)**2))**0.5
def _create_center(self):
# ้ๆบๆๅปบ่ดจๅฟ
n = self._data_set.shape[1]
self._center = np.zeros((self._k, n))
for j in range(n):
# ็กฎไฟ่ดจๅฟๅจๆฐๆฎ้็่ๅดไนๅ
minJ = self._data_set[:,j].min()
rangeJ = self._data_set[:,j].max() - minJ
self._center[:,j] = (minJ + rangeJ * np.random.rand(self._k, 1)).T
def analyze(self, data_set, k):
# K-means
self._data_set = np.array(data_set)
self._k = k
m, n = self._data_set.shape
self._assassment = np.zeros((m, 2))
self._create_center() # ้ๆบๆๅปบ่ดจๅฟ
center_change = True
while center_change:
center_change = False
for i in range(m):
# ๅค็ๆฏไธชๆฐๆฎ
min_dist = np.inf # ็ฆป่ดจๅฟ็ๆๅฐ่ท็ฆปๅๅงๅไธบๆ ็ฉทๅคง
min_indx = -1 # ็ฆปๅพๆ่ฟ็่ดจๅฟ๏ผๅๅงๅไธบ-1
for j in range(self._k):
dist = self._cal_euclidean_distance(self._data_set[i,:], self._center[j,:])
if dist < min_dist:
min_dist = dist
min_indx = j
if int(self._assassment[i, 0]) != min_indx:
center_change = True
self._assassment[i,:] = min_indx, min_dist**2
for c in range(k):
count = 0
sum_dist = np.zeros((1, n))
for i in range(m):
if self._assassment[i,0] == c:
sum_dist += self._data_set[i,:]
count += 1
self._center[c,:] = sum_dist / count
def assassment(self):
return self._assassment
def center(self):
return self._center
def predict(self, dataX):
dataX = np.array(dataX)
min_dist = np.inf # ็ฆป่ดจๅฟ็ๆๅฐ่ท็ฆปๅๅงๅไธบๆ ็ฉทๅคง
min_indx = -1 # ็ฆปๅพๆ่ฟ็่ดจๅฟ๏ผๅๅงๅไธบ-1
for i in range(self._k):
dist = self._cal_euclidean_distance(dataX, self._center[i,:])
if dist < min_dist:
min_dist = dist
min_indx = i
return min_indx
class biKMeans(kMeans):
def __init__(self):
super().__init__()
def _create_center(self):
self._center = [np.mean(self._data_set, axis = 0).tolist()]
def analyze(self, data_set, k):
# bisecting K-means
self._data_set = np.array(data_set)
self._k = k
m, n = self._data_set.shape
self._assassment = np.zeros((m, 2))
self._create_center() # ๆๅปบ่ดจๅฟ้ๅ
center0 = np.array(self._center[0])
for i in range(m):
self._assassment[i, 1] = self._cal_euclidean_distance(center0, self._data_set[i,:])**2
km = kMeans()
while self._k > len(self._center):
lowest_SSE = np.inf
plot_graph(self._data_set.tolist(), self._assassment[:,0], np.array(self._center))
for i in range(len(self._center)):
km.analyze(self._data_set[np.nonzero(self._assassment[:,0]==i)[0], :], 2)
new_center, new_assassment = km.center(), km.assassment()
SSE_of_split = np.sum(new_assassment[:,1])
SSE_of_not_split = np.sum(self._assassment[np.nonzero(self._assassment[:,0]!=i)[0], 1])
new_SSE = SSE_of_split + SSE_of_not_split
if new_SSE < lowest_SSE:
lowest_SSE = new_SSE
best_cluster_to_split = i
best_new_center = new_center
best_new_assassment = new_assassment.copy()
best_new_assassment[np.nonzero(best_new_assassment[:,0]==1)[0], 0] = len(self._center)
best_new_assassment[np.nonzero(best_new_assassment[:,0]==0)[0], 0] = best_cluster_to_split
self._assassment[np.nonzero(self._assassment[:,0]==best_cluster_to_split)[0], :] = best_new_assassment
self._center[best_cluster_to_split] = best_new_center[0, :].tolist()
self._center.append(best_new_center[1, :].tolist())
plot_graph(self._data_set.tolist(), self._assassment[:,0], np.array(self._center))
def plot_graph(data, labels, center):
import matplotlib.pyplot as plt
import numpy as np
colors = ['b', 'c', 'm', 'r', 'y']
data_arr = np.array(data)
n = np.shape(data_arr)[0]
allplot = {}
for i in range(n):
if labels[i] in allplot:
allplot[int(labels[i])]['xcord'].append(data_arr[i, 0])
allplot[int(labels[i])]['ycord'].append(data_arr[i, 1])
else:
allplot[int(labels[i])] = {}
allplot[int(labels[i])]['xcord'] = [data_arr[i, 0]]
allplot[int(labels[i])]['ycord'] = [data_arr[i, 1]]
fig = plt.figure()
ax = fig.add_subplot(111)
for label in allplot:
color = colors.pop()
ax.scatter(allplot[label]['xcord'], allplot[label]['ycord'], s = 30, c = color)
plt.scatter(center[:, 0], center[:, 1], c = 'k', marker = '*', s = 600)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show() | JohnHuiWB/SynX-NLP | SynXNLP/cluster/kMeans.py | kMeans.py | py | 5,597 | python | en | code | 0 | github-code | 13 |
25535699383 | import functools as fn
import re
# -- Importing/parsing ---------------------------------------------------------
with open("2023-Python/day-05/input.txt") as input_file:
input = input_file.read().split("\n\n")
seeds = [int(x) for x in re.findall(r"\d+", input[0])]
maps = [
[[int(x) for x in map.split()] for map in block.split("\n")[1:]]
for block in input[1:]
]
# -- Helper functions ----------------------------------------------------------
def range_intersect(x, y):
out = [range(max(x.start, y.start), min(x.stop, y.stop))]
return [r for r in out if len(r) > 0]
def range_difference(x, y):
x0, x1, y0, y1 = x.start, x.stop, y.start, y.stop
out = [range(x0, min(x1, y0)), range(max(y1, x0), x1)]
return [r for r in out if len(r) > 0]
def range_shift(x, y):
if len(x) == 0: return x
return [range(r.start + y, r.stop + y, r.step) for r in x]
def remap(ranges, map):
new_ranges = []
for m in map:
overlap = range(m[1], m[1] + m[2])
moved_parts, static_parts = [], []
for r in ranges + static_parts:
moved_parts += range_shift(range_intersect(r, overlap), m[0] - m[1])
static_parts += range_difference(r, overlap)
new_ranges += moved_parts
ranges = static_parts
return new_ranges + static_parts
# -- Part 1 --------------------------------------------------------------------
seed_ranges = [range(x, x + 1) for x in seeds]
result1 = min(r.start for r in fn.reduce(remap, maps, seed_ranges))
# -- Part 2 --------------------------------------------------------------------
seed_ranges2 = [
range(seeds[i], seeds[i] + seeds[i + 1])
for i in range(0, len(seeds), 2)
]
result2 = min(r.start for r in fn.reduce(remap, maps, seed_ranges2))
| wurli/advent-of-code | 2023-Python/day-05/code.py | code.py | py | 1,773 | python | en | code | 0 | github-code | 13 |
11166631296 | from annoying.decorators import render_to
from .models import Record
from .forms import AddRecordForm
from common import common
__author__ = 'Anna Bomersbach'
__credits__ = ['Anna Bomersbach', 'Tomasz Kubik']
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = 'Anna Bomersbach'
__email__ = "184779@student.pwr.wroc.pl"
__status__ = 'Production'
@render_to('records/user_records.html')
def user_records(request):
"""Executes SQL function to obtain information on user rental history."""
id = request.user.id
records = Record.objects.raw(common.user_items(id))
return {'records': records}
@render_to('records/add_new.html')
def add_record(request):
"""Creates an instance of a form, sets omitted value and saves and object if valid.
Executes raw update SQL query to populate arrays of the tuple with temporal data."""
form = AddRecordForm(request.POST or None)
if form.is_valid():
obj = form.save(commit=False)
item = obj.item
period = item.period
record = Record.objects.filter(user=request.user.id, item=obj.item.id)
if record.count() == 0:
obj.save()
common.update_record(request.user.id, obj.item.id, period)
return {'form': AddRecordForm()}
return {'form': form} | shaunthesheep/bachelor | rental_service/records/views.py | views.py | py | 1,289 | python | en | code | 0 | github-code | 13 |
13155092748 | """ Python script to process the output of a set of batch jobs, extracting key information to dump to csv
@author Peter Heywood <p.heywood@sheffield.ac.uk>
"""
import argparse
import re
import os
import sys
import csv
import math
import datetime
import subprocess
import pathlib
from distutils.util import strtobool
from collections import OrderedDict
def user_yes_no_query(question):
# http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input
sys.stdout.write('%s [y/n]\n' % question)
while True:
try:
return strtobool(input().lower())
except ValueError:
sys.stdout.write('Please respond with \'y\' or \'n\'.\n')
def create_directory(directory):
if not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as exception:
if exception.errno != errno.EEXIST:
print("ERROR: Directory `{:}` could not be created. Aborting.".format(directory))
return False
return True
class InstrumentationExtractor:
def __init__(self, args):
# Set defaults
self.verbose = False
self.force = False
self.pretty = False
self.input = []
self.output = None
# Parse input arguments
self.process_flags(args)
self.process_args(args)
# Prepare other class variables
self.input_files = []
self.data = OrderedDict()
# Validate
self.validate()
def process_flags(self, args):
if "verbose" in args:
self.verbose = args["verbose"]
if "force" in args:
self.force = args["force"]
if "pretty" in args:
self.pretty = args["pretty"]
def process_args(self, args):
if "input" in args and args["input"] is not None:
self.input = args["input"]
if "output" in args and args["output"] is not None and len(args["output"]) > 0:
self.output = args["output"][0]
def get_num_input_files(self):
return len(self.input_files)
def validate(self):
# For each input argument ensure it exists. If it does not issue a warning and remove it.
for i in self.input:
if os.path.exists(i):
# If it is a directory, find all its children, add them to the list of files to parse.
if os.path.isdir(i):
for root, subdirs, files in os.walk(i):
for file in files:
self.input_files.append(os.path.join(root, file))
pass
# Otherwise if it is a file add it to the files list.
elif os.path.isfile(i):
self.input_files.append(i)
else:
print("WARNING: Input argument {:} is not a valid file or directory. Ignoring.".format(i))
# Create the output directory if it does not exist, if there is valid input file(s)
if len(self.input_files) > 0:
create_directory(self.output)
def parse_results(self):
self.data = []
print("Processing {:} input files".format(self.get_num_input_files()))
# For each input file
file_counter = 1
num_files = self.get_num_input_files()
for input_file in self.input_files:
# print("Processing file {:} of {:}".format(file_counter, num_files))
file_data = self.parse_file(input_file)
if file_data is not None:
self.data.append(file_data)
file_counter += 1
def parse_file(self, input_file):
is_flamegpu_file = False
# Prepare dict to represent file data
data = OrderedDict()
data["input_file"] = input_file
# Open the file
with open(input_file, "r") as f:
for line in f:
line = line.rstrip()
if line.startswith("FLAMEGPU Console mode"):
is_flamegpu_file = True
if line.startswith("Initial states: "):
data["initial_states"] = line.replace("Initial states: ", "")
elif line.startswith("Output dir: "):
data["output_dir"] = line.replace("Output dir: ", "")
elif line.startswith("Device "):
data["device_string"] = line.replace("Device ", "")
elif line.startswith("Total Processing time: "):
data["total_processing_time"] = line.replace("Total Processing time: ", "").replace(" (ms)", "")
if "instrumentation" not in data:
data["instrumentation"] = OrderedDict()
if "population" not in data:
data["population"] = OrderedDict()
if line.startswith("Instrumentation: "):
# Strip out unneccesary info
string = line.replace("Instrumentation: ", "").replace(" (ms)", "")
# Split on the equals symbol
split_string = string.split(" = ")
if len(split_string) == 2:
k, v = split_string
# If the key is not in the insrumentation data, add it.
if k not in data["instrumentation"]:
data["instrumentation"][k] = []
data["instrumentation"][k].append(float(v))
if line.startswith("agent_") and "_count:" in line:
split_line = line.split(": ")
agent_type_state = split_line[0].replace("agent_", "").replace("_count", "")
count = int(split_line[-1])
if agent_type_state not in data["population"]:
data["population"][agent_type_state] = []
data["population"][agent_type_state] = count
if is_flamegpu_file:
return data
else:
print("ERROR: File {:} is not flamegpu output".format(input_file))
return None
def output_data(self):
if self.output is not None:
self.output_data_csv()
else:
print("Error: No output file specified")
def output_data_csv(self):
# Attempt to open the output file
success = True
for i, file_data in enumerate(self.data):
csv_data = []
fname = "{:}__{:}.csv".format(i, os.path.split(file_data["input_file"])[-1])
output_file = os.path.normpath(os.path.join(self.output, fname))
if os.path.isfile(output_file) and not self.force:
if not user_yes_no_query("Do you wish to overwrite output file {:}".format(output_file)):
success = False
continue
try:
with open(output_file, 'w', newline='') as f:
# Prepare data as csv
fieldnames = [
"filename",
"total processing time (ms)",
"iteration"
]
for agent_type_state in file_data["population"]:
fieldnames.append(agent_type_state)
for fn in file_data["instrumentation"]:
fieldnames.append(fn + " (ms)")
num_iterations = max([len(file_data["instrumentation"][d]) for d in file_data["instrumentation"]])
for iteration in range(num_iterations):
row = []
row.append(file_data["input_file"])
row.append(file_data["total_processing_time"])
row.append(iteration)
for agent_type_state in file_data["population"]:
pop = file_data["population"][agent_type_state]
row.append(pop)
for fn in file_data["instrumentation"]:
millis = None
if iteration < len(file_data["instrumentation"][fn]):
millis = file_data["instrumentation"][fn][iteration]
row.append(millis)
csv_data.append(row)
# Write the data out as a csv file.
writer = csv.writer(f, delimiter=",")
if len(fieldnames) > 0:
writer.writerow(fieldnames)
for row in csv_data:
writer.writerow(row)
success = True
except Exception as exception:
print("ERROR: File `{:}` could not be opened for writing.".format(output_file))
success = False
return success
def main():
parser = argparse.ArgumentParser(
description="Result extractor for benchmarks."
)
parser.add_argument(
"-i",
"--input",
type=str,
nargs="+",
help="Input files or directories to parse.",
required=True
)
parser.add_argument(
"-o",
"--output",
type=str,
nargs=1,
help="Directory for Output, produces one csv file per input file.",
required=True
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
help="increase verbosity of output"
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="force overwriting of files"
)
parser.add_argument(
"-p",
"--pretty",
action="store_true",
help="Produce pretty-printed output (newlines)"
)
args = parser.parse_args()
parser = InstrumentationExtractor(vars(args))
parser.parse_results()
parser.output_data()
if __name__ == "__main__":
main()
| ptheywood/flamegpu-instrumentation-extractor | flamegpu_instrumentation_extractor.py | flamegpu_instrumentation_extractor.py | py | 9,966 | python | en | code | 0 | github-code | 13 |
71645247699 | """
Provides Widgets to be used in dialogues and settings, with a standardized function to return the data
"""
from PySide2 import QtWidgets, QtCore
from .widgets import ColorSelectWidget
from utils import style_selector_widgets as styles
class BaseFieldWidget():
def data(self):
pass
class LineEditField(QtWidgets.QLineEdit, BaseFieldWidget):
def data(self):
return self.text()
class TextEditField(QtWidgets.QTextEdit, BaseFieldWidget):
def data(self):
return self.document().toPlainText()
class ComboBoxField(QtWidgets.QComboBox, BaseFieldWidget):
def __init__(self, options=(), *args, **kwargs):
super().__init__(*args, **kwargs)
self.addItems(options)
def data(self):
return self.currentText()
class ColorSelectField(ColorSelectWidget, BaseFieldWidget):
def data(self):
return self.selected_color
class SelectMultipleField():
pass # SUggestion: move labelselect to this sort of thing?
class PasswordField(LineEditField, BaseFieldWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setEchoMode(QtWidgets.QLineEdit.Password)
class TimeField(QtWidgets.QFrame, BaseFieldWidget):
def __init__(self, hours=0, minutes=0, seconds=0):
super().__init__()
self.setLayout(QtWidgets.QHBoxLayout())
self.hours_select = QtWidgets.QDoubleSpinBox()
self.hours_select.setValue(float(hours))
self.hours_select.setDecimals(0)
self.layout().addWidget(self.hours_select, alignment=QtCore.Qt.AlignLeft)
self.layout().addWidget(QtWidgets.QLabel(":"))
self.minutes_select = QtWidgets.QDoubleSpinBox()
self.minutes_select.setValue(float(minutes))
self.minutes_select.setDecimals(0)
self.layout().addWidget(self.minutes_select, alignment=QtCore.Qt.AlignLeft)
self.layout().addWidget(QtWidgets.QLabel(":"))
self.seconds_select = QtWidgets.QDoubleSpinBox()
self.seconds_select.setValue(float(seconds))
self.seconds_select.setDecimals(0)
self.layout().addWidget(self.seconds_select, alignment=QtCore.Qt.AlignLeft)
self.layout().addWidget(QtWidgets.QLabel("hh:mm:ss"))
self.setFixedWidth(220)
def data(self):
return f"{int(self.hours_select.value())}:{int(self.minutes_select.value())}:{int(self.seconds_select.value())}"
class FilePathField(styles.PrimaryColorWidget, BaseFieldWidget):
def __init__(self, file_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.file_path = file_path
self.file_label = QtWidgets.QLabel("Select a File" if file_path is None else file_path)
self.file_dialog = QtWidgets.QFileDialog()
self.file_dialog.setFileMode(QtWidgets.QFileDialog.ExistingFile)
self.file_dialog.currentChanged.connect(lambda path: self.file_label.setText(path))
self.setLayout(QtWidgets.QHBoxLayout())
self.layout().addWidget(self.file_label)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.file_dialog.exec_()
def data(self):
if len(self.file_dialog.selectedFiles()) != 0:
print(self.file_dialog.selectedFiles()[0])
return self.file_dialog.selectedFiles()[0]
else:
return self.file_path
class SliderField(QtWidgets.QFrame, BaseFieldWidget):
def __init__(self, init_value=0, _range=(0, 100), *sliders_args, **slider_kwargs):
super().__init__()
self.slider = QtWidgets.QSlider(*sliders_args, **slider_kwargs)
self.slider.setRange(*_range)
self.slider.setValue(int(init_value))
self.label = QtWidgets.QLabel(f"{self.slider.value()}/{self.slider.maximum()}")
self.slider.valueChanged.connect(lambda new: self.label.setText(f"{new}/{self.slider.maximum()}"))
self.setLayout(QtWidgets.QHBoxLayout())
self.layout().addWidget(self.label)
self.layout().addWidget(self.slider)
def data(self):
return self.slider.value()
class BooleanField(QtWidgets.QCheckBox, BaseFieldWidget):
def __init__(self, is_checked=False, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(is_checked, str):
is_checked = False if is_checked == "False" else True
self.setChecked(is_checked)
def data(self):
return str(self.isChecked())
| BoredlyGit/CongressionalAppChallengeEntry | utils/field_widgets.py | field_widgets.py | py | 4,465 | python | en | code | 0 | github-code | 13 |
38036688798 | import os
import uuid
import ROOT
from DCubeUtils import DCubeObject, DCubeException
import unittest
##
# @class DCubePlotter
# @author Krzysztof Daniel Ciba (Krzysztof.Ciba@NOSPAMagh.edu.pl)
# @brief root plotter for DCubeClient package
class DCubePlotter( DCubeObject ):
## DCubePlotter TCanvas
canvas = None
## TPaveText with configuration
configPave = None
## c'tor
# @param self "Me, myself and Irene"
# @param xmldoc DOM XML Document instance
# @param parsed tuple with args and opts from option parser
def __init__( self, xmldoc, parsed):
super( DCubePlotter, self).__init__( self )
self.debug( "constructing DCubePlotter...")
self.xmldoc = xmldoc
self.opts, self.args = parsed
self.outputDir = os.path.join( os.path.dirname( self.opts.output ), "plots" )
self.info("will store all plots in directory %s" % self.outputDir )
try:
if (not os.path.exists( self.outputDir ) ):
os.mkdir( self.outputDir )
except:
self.error("cannot create output plot directory, plot creation will be disabled")
self.opts._update_careful( { "makeplots": False } )
self.__applyTStyle()
## prepare/return ROOT.TCanvas instance
# @param self "Me, myself and Irene"
def __getCanvas( self ):
if ( not self.canvas ):
self.canvas = ROOT.TCanvas( "dcube", "dcube" , 600, 600 )
return self.canvas
## set DCube plot dtaw style
# @param self "Me, myself and Irene"
def __applyTStyle( self, name="DCube" ):
self.debug("setting up DCube plot style...")
ROOT.gStyle.SetOptTitle(0)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptFit(0)
ROOT.gStyle.SetFillStyle(1001)
ROOT.gStyle.SetMarkerSize( 0.9 )
ROOT.gStyle.SetMarkerStyle( ROOT.kFullCircle )
ROOT.gStyle.SetLabelFont(42, "xyz")
ROOT.gStyle.SetLabelSize(0.03, "xyz")
ROOT.gStyle.SetTitleFont(42, "xyz")
ROOT.gStyle.SetTitleFontSize(0.04)
ROOT.gStyle.SetTitleFont(42, "a")
ROOT.gStyle.SetFuncStyle(1)
ROOT.gStyle.SetFuncWidth(2)
ROOT.gStyle.SetHistLineColor( ROOT.kBlack )
ROOT.gStyle.SetHistFillColor( ROOT.kRed )
ROOT.gStyle.SetHistLineWidth(1)
ROOT.gStyle.SetPadBorderSize(1)
ROOT.gStyle.SetPadBorderMode(0)
ROOT.gStyle.SetPadLeftMargin( 0.1 )
ROOT.gStyle.SetPadBottomMargin( 0.1 )
ROOT.gStyle.SetPadRightMargin( 0.1 )
ROOT.gStyle.SetPadTopMargin( 0.1 )
ROOT.gStyle.SetCanvasBorderSize(1)
ROOT.gStyle.SetCanvasBorderMode(0)
ROOT.gStyle.SetGridStyle(3)
ROOT.gStyle.SetGridWidth(1)
ROOT.gStyle.SetOptDate(21)
ROOT.gStyle.GetAttDate().SetTextFont(42)
ROOT.gStyle.GetAttDate().SetTextSize(0.025)
ROOT.gStyle.SetLegendBorderSize(1)
ROOT.gStyle.SetTextColor( ROOT.kBlack )
ROOT.gStyle.GetAttDate().SetTextColor( ROOT.kBlack )
ROOT.gStyle.SetLabelColor(ROOT.kBlack, "xyz")
ROOT.gStyle.SetTitleColor(ROOT.kBlack, "xyz")
ROOT.gStyle.SetFillColor( ROOT.kWhite )
ROOT.gStyle.SetMarkerColor( ROOT.kBlue )
ROOT.gStyle.SetCanvasColor( ROOT.kWhite )
ROOT.gStyle.SetFrameFillColor(ROOT.kWhite)
ROOT.gStyle.SetPadColor( ROOT.kGray )
ROOT.gStyle.SetTitleColor(390, "a")
ROOT.gStyle.SetFuncColor(ROOT.kOrange)
ROOT.gStyle.SetGridColor(1)
ROOT.gStyle.SetCanvasDefH( 800 )
ROOT.gStyle.SetCanvasDefW( 800 )
ROOT.gROOT.ForceStyle(1)
self.debug("done!")
## get unique plot name
# @param self "Me, myself and Irene"
def __plotName( self, what="" ):
return what + "_" + str(uuid.uuid4()) + ".png"
## plot dispatcher
# @param self "Me, myself and Irene"
# @param node DOM XML element
# @param mon monitored object
# @param ref reference object
def plot( self, node, mon=None, ref=None ):
self.mon = self.ref = self.node = None
self.debug("plot called for object of name " + node.getAttribute("name") )
self.node = node
self.mon = mon
self.ref = ref
if ( not self.mon ):
self.error("monitored object not found, creation of plots skipped!")
return "FAIL"
self.name = self.mon.GetName()
self.title = self.mon.GetTitle()
if ( not self.ref ):
self.warn("reference object not found, not all plots will be produced")
self.className = mon.Class().GetName()
status = "OK"
if ( "TGraph" in self.className ):
status = self.__plotGraph()
elif ( "TH1" in self.className ):
status = self.__plotTH1()
elif ( "TH2" in self.className ):
status = self.__plotTH2()
elif ( "TProfile" == self.className ):
status = self.__plotProf1D()
elif ( "TProfile2D" == self.className ):
status = self.__plotProf2D()
else:
self.error( "unsupported object name=%s class=%s" % ( self.name, self.className ) )
status = "FAIL"
return status
## save TCanvas to png file
# @param self "Me, myself and Irene"
# @param type string with type attribute
# @param plot plot file naem prefix
# @return status string
def __saveAs( self, canvas, type, plot ):
try:
plotName = self.__plotName( plot )
absPath = os.path.join( self.outputDir, plotName )
canvas.Update()
#canvas.SaveAs( absPath )
canvas.SaveAs( absPath )
imgNode = self.xmldoc.createElement( "img" )
imgNode.setAttribute( "src" , os.path.join( "plots", plotName ) )
imgNode.setAttribute( "type", type )
if ( os.path.exists(absPath) ):
self.debug("plot '%s' has been created" % absPath )
self.node.appendChild( imgNode )
return "OK"
else:
self.warn("problem when saving plot '%s' to file" % absPath )
return "WARN"
except:
self.warn("unknown error when plotting %s for %s" % ( type, self.name ) )
return "WARN"
## plotter for TGraph, TGraphError, TGraphAsymmError etc.
# @param self "Me, myself and Irene"
def __plotGraph( self ):
status = [ "OK" ]
self.debug("plotting %s of type %s" % ( self.name, self.className ) )
graph = ROOT.TMultiGraph()
canvas = self.__getCanvas()
canvas.cd()
xmin, xmax, ymin, ymax = self.__getMinMaxTGraph( self.mon, self.ref )
dx = abs(xmax - xmin) * 0.15
dy = abs(ymax - ymin)
ROOT.gPad.DrawFrame( xmin - dx, ymin - dy * 0.4, xmax + dx, ymax + dy*1.4)
ROOT.gStyle.SetPadBottomMargin(0.2)
#self.mon.SetMarkerStyle( ROOT.kFullCircle )
self.mon.SetMarkerColor( ROOT.kRed )
self.mon.SetMarkerSize( 0.9 )
self.mon.SetFillColor( ROOT.kRed )
self.mon.SetFillStyle( 3005 )
self.mon.SetLineColor( ROOT.kRed )
graph.Add( self.mon, "P")
if ( self.ref ):
#self.ref.SetMarkerStyle( ROOT.kFullCircle )
self.ref.SetMarkerColor( ROOT.kBlue )
self.ref.SetMarkerSize( 0.9 )
self.ref.SetFillColor( ROOT.kBlue )
self.ref.SetFillStyle( 3004 )
self.ref.SetLineColor( ROOT.kBlue )
graph.Add( self.ref, "P")
graph.Draw("P")
legend = self.__legend()
legend.Draw()
titlePave = self.__titlePave()
titlePave.Draw()
configPave = self.__configPave()
configPave.Draw()
status.append( self.__saveAs( canvas, "reg", "gr" ) )
if ( None not in ( self.ref, self.mon ) ):
canvas.Clear()
diffGraph = ROOT.TGraph()
diffGraph.SetMarkerColor( ROOT.kRed )
monN = self.mon.GetN()
refN = self.ref.GetN()
monXraw = self.mon.GetX()
monYraw = self.mon.GetY()
refXraw = self.ref.GetX()
refYraw = self.ref.GetY()
points = {}
for i in range( monN ):
if ( monXraw[i] not in points ):
points [ monXraw[i] ] = monYraw[i]
for i in range( refN ):
if ( refXraw[i] not in points ):
points[ refXraw[i] ] = refYraw[i]
else:
points[ refXraw[i] ] = points[ refXraw[i] ] - refYraw[i]
i = 0
for x in sorted( points.keys() ):
diffGraph.SetPoint(i, x, points[x] )
i += 1
xl = xmin - dx
xr = xmax + dx
xmin, xmax, ymin, ymax = self.__getMinMaxTGraph( diffGraph )
dy = abs(ymax - ymin)
if ( ymin > ymax ):
a = ymin
ymin = ymax
ymax = ymin
ROOT.gPad.DrawFrame( xl, ymin - (dy * 0.4), xr, ymax + (dy*1.4) )
diffGraph.SetTitle( self.mon.GetTitle() )
diffGraph.SetName( self.mon.GetName() )
diffGraph.Draw("P")
titlePave = self.__titlePave("diff (mon - ref)")
titlePave.Draw()
configPave = self.__configPave()
configPave.Draw()
status.append( self.__saveAs( canvas, "dif", "gd" ) )
return self.__getStatus( status )
## get xmin, xmax, ymin, ymax for monitored and reference TGraphs
# @param self "Me, myself and Irene"
def __getMinMaxTGraph( self, mon=None, ref=None ):
xmin = xmax = ymin= ymax = None
if ( mon ):
x = ROOT.Double(0)
y = ROOT.Double(0)
exl = ROOT.Double(0)
exr = ROOT.Double(0)
eyl = ROOT.Double(0)
eyh = ROOT.Double(0)
if ( self.className == "TGraph" ):
for i in range( mon.GetN() ):
mon.GetPoint(i, x, y)
if ( xmin == None ): xmin = ROOT.Double(x)
if ( xmax == None ): xmax = ROOT.Double(x)
if ( ymin == None ): ymin = ROOT.Double(y)
if ( ymax == None ): ymax = ROOT.Double(y)
if ( x < xmin ): xmin = x
if ( x > xmax ): xmax = x
if ( y < ymin ): ymin = y
if ( y > ymax ): ymax = y
if ( ref ):
for i in range( ref.GetN() ):
ref.GetPoint(i, x, y)
if ( xmin == None ): xmin = ROOT.Double(x)
if ( xmax == None ): xmax = ROOT.Double(x)
if ( ymin == None ): ymin = ROOT.Double(y)
if ( ymax == None ): ymax = ROOT.Double(y)
if ( x < xmin ): xmin = x
if ( x > xmax ): xmax = x
if ( y < ymin ): ymin = y
if ( y > ymax ): ymax = y
else:
for i in range( mon.GetN() ):
mon.GetPoint(i, x, y)
exl = mon.GetErrorXhigh( i )
exr = mon.GetErrorXlow( i )
eyh = mon.GetErrorYhigh( i )
eyl = mon.GetErrorYlow( i )
xl = x - exr
xr = x + exl
yl = y - eyl
yh = y + eyh
if ( xmin == None ): xmin = ROOT.Double(xl)
if ( xmax == None ): xmax = ROOT.Double(xr)
if ( ymin == None ): ymin = ROOT.Double(yl)
if ( ymax == None ): ymax = ROOT.Double(yh)
if ( xl < xmin ): xmin = xl
if ( xr > xmax ): xmax = xr
if ( yl < ymin ): ymin = yl
if ( yh > ymax ): ymax = yh
if ( ref ):
for i in range( ref.GetN() ):
ref.GetPoint(i, x, y)
exl = ref.GetErrorXhigh( i )
exr = ref.GetErrorXlow( i )
eyh = ref.GetErrorYhigh( i )
eyl = ref.GetErrorYlow( i )
xl = x - exr
xr = x + exl
yl = y - eyl
yh = y + eyh
if ( xmin == None ): xmin = ROOT.Double(xl)
if ( xmax == None ): xmax = ROOT.Double(xr)
if ( ymin == None ): ymin = ROOT.Double(yl)
if ( ymax == None ): ymax = ROOT.Double(yh)
if ( xl < xmin ): xmin = xl
if ( xr > xmax ): xmax = xr
if ( yl < ymin ): ymin = yl
if ( yh > ymax ): ymax = yh
return ( xmin, xmax, ymin, ymax)
## plotter for TH1 histograms
# @param self "Me, myself and Irene"
def __plotTH1( self ):
status = [ "OK" ]
self.debug("plotting %s of type %s" % ( self.name, self.className ) )
canvas = self.__getCanvas()
titlePave = self.__titlePave( )
title = "%s;%s;%s" % ( self.mon.GetTitle(),
self.mon.GetXaxis().GetTitle(),
self.mon.GetYaxis().GetTitle() )
stack = ROOT.THStack( "dcube stack", title )
self.mon.SetFillStyle( 3004 )
self.mon.SetLineColor( ROOT.kRed )
stack.Add( self.mon )
ymax = self.mon.GetMaximum()
if ( self.ref ):
self.ref.SetFillColor( ROOT.kBlue )
self.ref.SetLineColor( ROOT.kBlue )
self.ref.SetFillStyle( 3005 )
stack.Add( self.ref )
if ( ymax < self.ref.GetMaximum() ): ymax = self.ref.GetMaximum()
stack.SetMaximum( 1.4 * ymax )
config = self.__configPave()
legend = self.__legend()
canvas.cd()
stack.Draw( "NOSTACK" )
titlePave.Draw()
config.Draw()
legend.Draw()
pvaluePave = self.__pvaluePave()
if ( pvaluePave ): pvaluePave.Draw()
status.append( self.__saveAs( canvas, "reg", "h1r" ) )
diffHist = self.mon.Clone()
diffHist.Reset()
diffHist.SetTitle( "diff %s" % title )
diffHist.SetLineColor( ROOT.kRed )
if ( self.ref ):
diffHist.Add( self.ref, self.mon, 1.0, -1.0 )
else:
diffHist.Add( self.mon, self.mon, 1.0, -1.0 )
canvas.Clear()
canvas.cd()
titlePave = self.__titlePave( "diff" )
configPave = self.__configPave()
ymax = diffHist.GetMaximum()
if ( ymax ) : diffHist.SetMaximum( 1.4 * ymax )
diffHist.Draw()
titlePave.Draw()
configPave.Draw()
status.append( self.__saveAs( canvas, "dif", "h1d" ) )
return self.__getStatus( status )
## plotter for TH2 histograms
# @param self "Me, myself and Irene"
def __plotTH2( self ):
status = [ "OK" ]
self.debug( "plotting %s of type %s" % ( self.name, self.className) )
canvas = self.__getCanvas()
title = "%s;%s;%s;%s" % ( self.mon.GetTitle(),
self.mon.GetXaxis().GetTitle(),
self.mon.GetYaxis().GetTitle(),
self.mon.GetZaxis().GetTitle() )
configPave = self.__configPave( )
titlePave = self.__titlePave()
pvaluePave = self.__pvaluePave()
if ( self.ref ):
bottom = self.mon.Clone()
top = self.mon.Clone()
same = self.mon.Clone()
bottom.Reset()
bottom.SetLineColor( ROOT.kBlack )
bottom.SetFillColor( ROOT.kBlue )
bottom.SetLineWidth( 2 )
top.Reset()
top.SetLineColor( ROOT.kBlack )
top.SetFillColor( ROOT.kRed )
top.SetLineWidth(2)
same.Reset()
same.SetLineColor( ROOT.kBlack )
same.SetFillColor( ROOT.kGreen )
same.SetLineWidth( 2)
for i in range(self.ref.GetNbinsX()+1):
for j in range(self.ref.GetNbinsY()+1 ) :
refBin = self.ref.GetBinContent(i, j)
monBin = self.mon.GetBinContent(i, j)
if ( refBin < monBin ):
top.SetBinContent(i, j, monBin - refBin)
bottom.SetBinContent( i, j, refBin )
elif ( refBin > monBin ):
bottom.SetBinContent( i, j, refBin - monBin)
top.SetBinContent(i, j, monBin)
else:
same.SetBinContent(i, j, refBin )
stack = ROOT.THStack( "dcube stack", title )
stack.Add( same )
stack.Add( bottom )
stack.Add( top )
#stack.UseCurrentStyle()
legend = ROOT.TLegend(0.1, 0.80, 0.45, 0.72, "", "NDC")
legend.SetTextFont(102)
legend.SetTextSize(0.02)
legend.SetTextColor(1)
legend.SetBorderSize(1)
legend.AddEntry( top, "monitored", "F" )
legend.AddEntry( bottom, "reference", "F" )
legend.AddEntry( same, "same", "F")
canvas.cd()
stack.Draw( "lego1 nostack" )
titlePave.Draw()
configPave.Draw()
legend.Draw()
if ( pvaluePave ): pvaluePave.Draw()
canvas.Draw()
else:
canvas.cd()
self.mon.Draw()
configPave.Draw()
titlePave.Draw()
if ( pvaluePave ): pvaluePave.Draw()
canvas.Draw()
status.append( self.__saveAs( canvas, "reg", "h2r" ) )
# make diff plot
diffHist = self.mon.Clone()
diffHist.Reset()
diffHist.SetFillColor( ROOT.kRed )
diffHist.SetLineColor( ROOT.kBlack )
if ( self.ref ):
diffHist.Add( self.ref, self.mon, 1.0, -1.0 )
else:
diffHist.Add( self.mon, self.mon, 1.0, -1.0 )
canvas.Clear()
diffHist.Draw("LEGO1 0")
titlePave = self.__titlePave( "diff" )
titlePave.Draw()
configPave.Draw()
if ( pvaluePave ): pvaluePave.Draw()
status.append( self.__saveAs( canvas, "dif", "h2d" ) )
# make projection x plot
canvas.Clear()
projX = ROOT.THStack("projectionX", "projectionX")
monPX = self.mon.ProjectionX( "mon px" )
monPX.SetFillStyle( 3004 )
monPX.SetLineColor( ROOT.kRed )
projX.Add( monPX )
ymax = monPX.GetMaximum()
if ( self.ref ):
refPX = self.ref.ProjectionX( "ref px")
refPX.SetFillStyle( 3005 )
refPX.SetFillColor( ROOT.kBlue )
refPX.SetLineColor( ROOT.kBlue )
if ( ymax < refPX.GetMaximum() ):
ymax = self.ref.GetMaximum()
projX.Add( refPX )
projX.SetMaximum( ymax * 1.4 )
canvas.cd()
projX.Draw("NOSTACK")
titlePave = self.__titlePave( "proj X")
titlePave.Draw()
if ( pvaluePave ): pvaluePave.Draw()
configPave.Draw()
legend = ROOT.TLegend(0.1, 0.80, 0.45, 0.74, "", "NDC")
legend.SetTextFont(102)
legend.SetTextSize(0.02)
legend.SetTextColor(1)
legend.SetBorderSize(1)
legend.AddEntry( monPX, "mon projection X", "F" )
legend.AddEntry( refPX, "ref projection X", "F" )
legend.Draw()
status.append( self.__saveAs( canvas, "prx", "h2x" ) )
# make projection y plot
canvas.Clear()
projY = ROOT.THStack("projectionY", "projectionY")
monPY = self.mon.ProjectionY( "mon py" )
monPY.SetFillStyle( 3004 )
monPY.SetLineColor( ROOT.kRed )
projY.Add( monPX )
ymax = monPY.GetMaximum()
if ( self.ref ):
refPY = self.ref.ProjectionX( "ref py")
refPY.SetFillStyle( 3005 )
refPY.SetFillColor( ROOT.kBlue )
refPY.SetLineColor( ROOT.kBlue )
if ( ymax < refPY.GetMaximum() ):
ymax = refPY.GetMaximum()
projY.Add( refPY )
projY.SetMaximum( ymax * 1.4 )
canvas.cd()
projY.Draw("NOSTACK")
titlePave = self.__titlePave( "proj Y")
titlePave.Draw()
configPave.Draw()
if ( pvaluePave ): pvaluePave.Draw()
legend = ROOT.TLegend(0.1, 0.80, 0.45, 0.74, "", "NDC")
legend.SetTextFont(42)
legend.SetTextSize(0.02)
legend.SetTextColor(1)
legend.SetBorderSize(1)
legend.AddEntry( monPX, "mon projection Y", "F" )
legend.AddEntry( refPX, "ref projection Y", "F" )
legend.Draw()
status.append( self.__saveAs( canvas, "pry", "h2y" ) )
return self.__getStatus( status )
## plotter for 1D TProfile
# @param self "Me, myself and Irene"
def __plotProf1D( self ):
status = [ ]
self.debug("plotting %s of type %s" % ( self.name, self.className ) )
canvas = self.__getCanvas()
titlePave = self.__titlePave()
configPave = self.__configPave()
legend = self.__legend()
self.mon.SetMarkerColor( ROOT.kRed )
self.mon.SetMarkerSize( 0.9 )
self.mon.SetFillColor( ROOT.kRed )
self.mon.SetFillStyle( 3005 )
self.mon.SetLineColor( ROOT.kRed )
ymax = self.mon.GetMaximum()
if ( self.ref ):
self.ref.SetMarkerColor( ROOT.kBlue )
self.ref.SetMarkerSize( 0.9 )
self.ref.SetFillColor( ROOT.kBlue )
self.ref.SetFillStyle( 3004 )
self.ref.SetLineColor( ROOT.kBlue )
if ( ymax < self.ref.GetMaximum() ):
ymax = self.ref.GetMaximum()
self.mon.SetMaximum( 1.4 * ymax )
self.mon.Draw()
if ( self.ref ): self.ref.Draw( "same" )
titlePave.Draw()
configPave.Draw()
legend.Draw()
status.append( self.__saveAs( canvas, "reg", "p1r" ) )
canvas.Clear()
canvas.cd()
diffProfile = self.mon.Clone()
diffProfile.Reset()
if ( self.ref ):
diffProfile.Add( self.ref, self.mon, 1.0, -1.0 )
else:
diffProfile.Add( self.mon, self.mon, 1.0, -1.0 )
titlePave = self.__titlePave( "diff" )
diffProfile.Draw()
titlePave.Draw()
configPave.Draw()
status.append( self.__saveAs( canvas, "dif", "p1d" ) )
return self.__getStatus( status )
## plotter for TProfile2D
# @param self "Me, myself and Irene"
def __plotProf2D( self ):
status = [ ]
self.debug("plotting %s of type %s" % ( self.name, self.className ) )
canvas = self.__getCanvas()
titlePave = self.__titlePave()
configPave = self.__configPave()
legend = self.__legend()
self.mon.SetMarkerColor( ROOT.kRed )
self.mon.SetMarkerSize( 0.9 )
self.mon.SetFillColor( ROOT.kRed )
self.mon.SetFillStyle( 3005 )
self.mon.SetLineColor( ROOT.kBlack )
self.mon = ROOT.TH2D( self.mon )
title = "%s;%s;%s;%s" % ( self.mon.GetTitle(),
self.mon.GetXaxis().GetTitle(),
self.mon.GetYaxis().GetTitle(),
self.mon.GetZaxis().GetTitle() )
ymax = self.mon.GetMaximum()
if ( self.ref ):
self.ref = ROOT.TH2D( self.ref )
bottom = self.ref.Clone()
top = self.ref.Clone()
same = self.ref.Clone()
bottom.Reset()
bottom.SetLineColor( ROOT.kBlack )
bottom.SetFillColor( ROOT.kBlue )
bottom.SetLineWidth( 2 )
top.Reset()
top.SetLineColor( ROOT.kBlack )
top.SetFillColor( ROOT.kRed )
top.SetLineWidth(2)
same.Reset()
same.SetLineColor( ROOT.kBlack )
same.SetFillColor( ROOT.kGreen )
same.SetLineWidth( 2 )
for i in range(self.ref.GetNbinsX()+1):
for j in range(self.ref.GetNbinsY()+1 ) :
refBin = self.ref.GetBinContent(i, j)
monBin = self.mon.GetBinContent(i, j)
if ( refBin < monBin ):
top.SetBinContent(i, j, monBin - refBin)
bottom.SetBinContent( i, j, refBin )
elif ( refBin > monBin ):
bottom.SetBinContent( i, j, refBin - monBin)
top.SetBinContent(i, j, monBin)
else:
same.SetBinContent(i, j, refBin )
stack = ROOT.THStack( "dcube stack", title )
stack.Add( same )
stack.Add( bottom )
stack.Add( top )
legend = ROOT.TLegend(0.1, 0.80, 0.45, 0.72, "", "NDC")
legend.SetTextFont(102)
legend.SetTextSize(0.02)
legend.SetTextColor(1)
legend.SetBorderSize(1)
legend.AddEntry( top, "monitored", "F" )
legend.AddEntry( bottom, "reference", "F" )
legend.AddEntry( same, "same", "F")
canvas.cd()
stack.Draw( "lego1 0 nostack" )
titlePave.Draw()
configPave.Draw()
legend.Draw()
pvaluePave = self.__pvaluePave()
if ( pvaluePave ): pvaluePave.Draw()
canvas.Draw()
else:
canvas.cd()
self.mon.Draw()
configPave.Draw()
titlePave.Draw()
canvas.Draw()
status.append( self.__saveAs( canvas, "reg", "p2r" ) )
canvas.Clear()
canvas.cd()
diffProfile = self.mon.Clone()
diffProfile.Reset()
diffProfile.SetFillColor( ROOT.kRed )
diffProfile.SetLineColor( ROOT.kBlack )
if ( self.ref ):
diffProfile.Add( self.ref, self.mon, 1.0, -1.0 )
else:
diffProfile.Add( self.mon, self.mon, 1.0, -1.0 )
titlePave = self.__titlePave( "diff" )
diffProfile.Draw( "lego1 0")
titlePave.Draw()
configPave.Draw()
status.append( self.__saveAs( canvas, "dif", "p2d" ) )
return self.__getStatus( status )
## get status string from a list of strings
# @param self "Me, myself and Irene"
# @param sl list of strings
def __getStatus( self, sl ):
if ( "FAIL" in sl ): return "FAIL"
if ( "WARN" in sl ): return "WARN"
return "OK"
## runtime configuration pave
# @param self "Me, myself and Irene"
def __configPave( self ):
if ( not self.configPave ):
self.configPave = ROOT.TPaveText( 0.1, 0.9, 0.45, 0.8, "NDC" )
self.configPave.SetBorderSize( 1 )
self.configPave.SetTextColor( 1 )
self.configPave.SetTextSize( 0.02 )
self.configPave.SetTextFont( 102 )
self.configPave.SetTextAlign( 12 )
self.configPave.AddText( "branch: " + self.opts.branch )
self.configPave.AddText( "install: " + self.opts.install )
self.configPave.AddText( "cmt: " + self.opts.cmtconfig )
self.configPave.AddText( "project: " + self.opts.project )
self.configPave.AddText( "jobId: " + self.opts.jobId )
return self.configPave
## p-value pave
# @param self "Me, myself and Irene"
def __pvaluePave( self ):
pvaluePave = ROOT.TPaveText( )
pvaluePave.SetBorderSize( 1 )
pvaluePave.SetTextColor( 1 )
pvaluePave.SetTextSize( 0.02 )
pvaluePave.SetTextFont( 42 )
pvaluePave.SetTextAlign( 22 )
lines = 0
pvalues = self.node.getElementsByTagName( "pvalue" )
for pvalue in pvalues:
test = pvalue.getAttribute( "test" )
status = pvalue.getAttribute( "status" )
pval = self.__getCData( pvalue.childNodes )
text = "p-value = %s" % pval
if test == "chi2": text = "#chi^{2} %s" % text
else:
text = "%-3s %s" % (test, text)
text = pvaluePave.AddText( text )
if ( status == "FAIL" ):
text.SetTextColor( ROOT.kRed )
if ( status == "WARN" ):
text.SetTextColor( ROOT.kOrange )
lines += 1
if ( lines ):
Y2NDC = 0.9 - (lines*0.021)
pvaluePave.SetX1NDC( 0.69 )
pvaluePave.SetX2NDC( 0.9 )
pvaluePave.SetY1NDC( 0.9 )
pvaluePave.SetY2NDC( 0.9 - (lines*0.021) )
return pvaluePave
## title pave
# @param self "Me, myself and Irene"
def __titlePave( self, what="" ):
titlePave = ROOT.TPaveText( 0.0, 1.0, 1.0, 0.93 , "NDC")
titlePave.SetBorderSize( 0 )
titlePave.SetTextColor( ROOT.kBlack )
titlePave.SetTextSize( 0.028 )
titlePave.SetTextFont( 42 )
titlePave.SetFillColor( ROOT.kGray )
titlePave.SetTextAlign( 12 )
titlePave.AddText( "title: %s" % self.title )
#titlePave.AddText( "name: %s" % self.name )
if ( what == "" ): what = "normal"
titlePave.AddText( "%s plot" % what )
return titlePave
## plot legend
# @param self "Me, myself and Irene"
def __legend( self ):
legend = ROOT.TLegend(0.1, 0.80, 0.45, 0.75, "", "NDC")
legend.SetTextFont(42)
legend.SetTextSize(0.02)
legend.SetTextColor( ROOT.kBlack )
legend.SetBorderSize(1)
if ( "TH" in self.className ):
legend.AddEntry( self.mon, "monitored", "f" )
if ( self.ref ): legend.AddEntry( self.ref, "reference", "f" )
else:
legend.AddEntry( self.mon, "monitored", "pe")
if ( self.ref ): legend.AddEntry( self.ref, "reference", "pe")
return legend
## CDATA getter
# @param self "Me, myself and Irene"
# @param nodeList list of DOM nodes
def __getCData( self, nodeList ):
cdata = ""
for node in nodeList:
if ( node.nodeType == node.TEXT_NODE ):
cdata += node.data
return cdata
##
# @class test_DCubePlotter
# @author Krzysztof Daniel Ciba (Krzysztof.Ciba@NOSPAMagh.edu.pl)
# @brief test case for DCubePlotter
class test_DCubePlotter( unittest.TestCase ):
## test case setup
# @param self "Me, myself and Irene"
def setUp( self ):
pass
## c'tor
# @param self "Me, myself and Irene"
def test_01_ctor( self ):
try:
self.plotter = DCubePlotter()
except:
pass
self.assertEqual( isinstance( self.plotter, DCubePlotter), True )
# test suite execution
if __name__ == "__main__":
testLoader = unittest.TestLoader()
suite = testLoader.loadTestsFromTestCase(test_DCubePlotter)
unittest.TextTestRunner(verbosity=3).run(suite)
| rushioda/PIXELVALID_athena | athena/Tools/RunTimeTester/testsuite/DCubeClient-00-00-21/python/DCubePlotter.py | DCubePlotter.py | py | 32,574 | python | en | code | 1 | github-code | 13 |
70549487378 | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy import Column, Integer, String, Numeric, DateTime, Boolean, ForeignKey, func
from sqlalchemy.orm import relationship
Base = declarative_base()
Transaction = None
class TransactionBase(Base):
__abstract__ = True
@declared_attr
def __tablename__(self):
return 'transactions'
id = Column(Integer, primary_key=True)
amount = Column(Numeric(18, 2))
date = Column(DateTime)
canceled = Column(Boolean)
cancel_desc = Column(String(500))
@declared_attr
def accounts(self):
return relationship('Destination', cascade="all, delete, delete-orphan")
def cancel(self, desc=None):
self.canceled = True
self.desc = desc
def __repr__(self):
return "<%s -> %s: %f at %s>" % (repr(self.from_accs), repr(self.to_accs),
self.amount, self.date)
@property
def from_accs(self):
accs = [r for r in self.accounts if not r.direction]
return [r.account for r in sorted(accs, key=lambda r: r.is_end)]
@property
def to_accs(self):
accs = [r for r in self.accounts if r.direction]
return [r.account for r in sorted(accs, key=lambda r: r.is_end)]
class Destination(Base):
__tablename__ = 'destinations'
tid = Column(Integer, ForeignKey('transactions.id'), primary_key=True)
direction = Column(Boolean)
account = Column(String(20), primary_key=True)
is_end = Column(Boolean)
def __repr__(self):
return "[%s %s%s]" % (['<-', '->'][self.direction], self.account, '!' if self.is_end else '')
def make_transaction(acc_from, acc_to, amount, date=None):
date = date or datetime.now()
t = Transaction()
t.amount = amount
t.canceled = False
t.date = date
accounts = []
for a in acc_from[:-1]:
accounts.append(Destination(direction=False, account=a, is_end=False))
accounts.append(Destination(direction=False, account=acc_from[-1], is_end=True))
for a in acc_to[:-1]:
accounts.append(Destination(direction=True, account=a, is_end=False))
accounts.append(Destination(direction=True, account=acc_to[-1], is_end=True))
t.accounts = accounts
return t
class Balance(object):
def __init__(self, debet, kredit):
self.debet = debet
self.kredit = kredit
self.balance = debet - kredit
def __repr__(self):
return "<+: %f -: %f =: %f>" % (self.debet, self.kredit, self.balance)
def month_report(session, account_id_list, dt=None):
dt = dt or datetime.now()
dt_before = datetime(dt.year, dt.month, 1)
dt_after = (dt_before + timedelta(days=32)).replace(day=1)
rbefore = session.query(Destination.account, Destination.direction, func.sum(Transaction.amount))\
.join(Transaction.accounts)\
.filter(Destination.account.in_(account_id_list))\
.filter(Transaction.canceled == False)\
.filter(Transaction.date < dt_before)\
.group_by(Destination.account, Destination.direction)
rafter = session.query(Destination.account, Destination.direction, func.sum(Transaction.amount))\
.join(Transaction.accounts)\
.filter(Transaction.canceled == False)\
.filter(Destination.account.in_(account_id_list))\
.filter(Transaction.date >= dt_before)\
.filter(Transaction.date < dt_after)\
.group_by(Destination.account, Destination.direction)
def get_debet_kredit(seq):
result = {}
for aid, dir, amount in seq:
if aid not in result:
result[aid] = [0, 0]
if dir:
result[aid][0] = amount
else:
result[aid][1] = amount
return result
result = {}
for aid in account_id_list:
result[aid] = {'kredit': 0, 'debet': 0, 'after': 0, 'before': 0}
for aid, (d, k) in get_debet_kredit(rbefore).iteritems():
rr = result[aid]
rr['before'] = rr['after'] = d - k
for aid, (d, k) in get_debet_kredit(rafter).iteritems():
rr = result[aid]
rr['debet'] = d
rr['kredit'] = k
rr['after'] = rr['before'] + d - k
return result
def balance(session, aid, date_from=None, date_to=None):
'''
ะะพะทะฒัะฐัะฐะตั ะฑะฐะปะฐะฝั ััะตัะฐ
@return: Balance
'''
q = session.query(Destination.direction, func.sum(Transaction.amount))\
.join(Transaction.accounts)\
.filter(Destination.account == aid)\
.filter(Transaction.canceled == False)
if date_from:
q = q.filter(Transaction.date >= date_from)
if date_to:
q = q.filter(Transaction.date < date_to)
result = q.group_by(Destination.direction).all()
kredit = debet = 0
for r in result:
if r[0]:
debet = r[1]
else:
kredit = r[1]
return Balance(debet, kredit)
def balances(id_list):
'''
ะะพะทะฒัะฐัะฐะตั ะฑะฐะปะฐะฝัั ะฟะตัะตะดะฐะฝะฝัั
ััะตัะพะฒ
@return: list of Balance
'''
return dict((r['key'], Balance(r['value']['debet'], r['value']['kredit']))
for r in Transaction.get_db().view('transactions/balance', keys=id_list, group=True))
def day_report(session, aid, date_from=None, date_to=None):
q = session.query(func.date_trunc('day', Transaction.date), Destination.direction,
func.sum(Transaction.amount))\
.join(Transaction.accounts)\
.filter(Destination.account == aid)\
.filter(Transaction.canceled == False)
if date_from:
q = q.filter(Transaction.date >= date_from)
if date_to:
q = q.filter(Transaction.date < date_to)
result = q.group_by(func.date_trunc('day', Transaction.date), Destination.direction)
data = []
kredit = debet = 0
last_data = None
for r in result:
if last_data is not None and last_data != r[0]:
data.append((last_data, Balance(debet, kredit)))
kredit = debet = 0
last_data = r[0]
if r[1]:
debet = r[2]
else:
kredit = r[2]
data.append((last_data, Balance(debet, kredit)))
return data
def transactions(session, aid, date_from=None, date_to=None, income=False, outcome=False):
q = session.query(Transaction)\
.join(Transaction.accounts)\
.filter(Destination.account == aid)
if date_from:
q = q.filter(Transaction.date >= date_from)
if date_to:
q = q.filter(Transaction.date < date_to)
if income and not outcome:
q = q.filter(Destination.direction == True)
elif outcome and not income:
q = q.filter(Destination.direction == False)
return q
| baverman/taburet | taburet/transactions/model.py | model.py | py | 6,801 | python | en | code | 1 | github-code | 13 |
17042301544 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.DeliveryAgencyMerchantInfo import DeliveryAgencyMerchantInfo
class AlipayMarketingActivityDeliverychannelQueryModel(object):
def __init__(self):
self._belong_merchant_info = None
self._booth_code = None
self._page_num = None
self._page_size = None
@property
def belong_merchant_info(self):
return self._belong_merchant_info
@belong_merchant_info.setter
def belong_merchant_info(self, value):
if isinstance(value, DeliveryAgencyMerchantInfo):
self._belong_merchant_info = value
else:
self._belong_merchant_info = DeliveryAgencyMerchantInfo.from_alipay_dict(value)
@property
def booth_code(self):
return self._booth_code
@booth_code.setter
def booth_code(self, value):
self._booth_code = value
@property
def page_num(self):
return self._page_num
@page_num.setter
def page_num(self, value):
self._page_num = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
def to_alipay_dict(self):
params = dict()
if self.belong_merchant_info:
if hasattr(self.belong_merchant_info, 'to_alipay_dict'):
params['belong_merchant_info'] = self.belong_merchant_info.to_alipay_dict()
else:
params['belong_merchant_info'] = self.belong_merchant_info
if self.booth_code:
if hasattr(self.booth_code, 'to_alipay_dict'):
params['booth_code'] = self.booth_code.to_alipay_dict()
else:
params['booth_code'] = self.booth_code
if self.page_num:
if hasattr(self.page_num, 'to_alipay_dict'):
params['page_num'] = self.page_num.to_alipay_dict()
else:
params['page_num'] = self.page_num
if self.page_size:
if hasattr(self.page_size, 'to_alipay_dict'):
params['page_size'] = self.page_size.to_alipay_dict()
else:
params['page_size'] = self.page_size
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingActivityDeliverychannelQueryModel()
if 'belong_merchant_info' in d:
o.belong_merchant_info = d['belong_merchant_info']
if 'booth_code' in d:
o.booth_code = d['booth_code']
if 'page_num' in d:
o.page_num = d['page_num']
if 'page_size' in d:
o.page_size = d['page_size']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AlipayMarketingActivityDeliverychannelQueryModel.py | AlipayMarketingActivityDeliverychannelQueryModel.py | py | 2,811 | python | en | code | 241 | github-code | 13 |
20806875648 | from pulp import *
prob = LpProblem("Giapetto", LpMaximize) # Create a LP maximization problem
x1 = LpVariable("x1", lowBound=0) # Create a variable x1 >= 0
x2 = LpVariable("x2", lowBound=0) # Create another variable x2 >= 0
prob += 20*x1 + 30*x2 # Objective function
prob += 1*x1 + 2*x2 <= 100 # Finishing hours
prob += 2*x1 + 1*x2 <= 100 # Carpentry hours
print(prob) # Display the LP problem
status = prob.solve() # Solve with the default solver
print(LpStatus[status]) # Print the solution status
print(value(x1), value(x2), value(prob.objective)) # Show the solution
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
# Use seaborn to change the default graphics to something nicer
import seaborn as sns
# And set a nice color palette
sns.set_color_codes('deep')
# Create the plot object
fig, ax = plt.subplots(figsize=(5, 4))
x1 = np.linspace(0, 100)
# Add finishing constraint: x2 <= 100/2 - x1/2
plt.plot(x1, 100/2 - x1/2, linewidth=3, label='Finishing constraint')
plt.fill_between(x1, 0, 100/2 - x1/2, alpha=0.1)
# Add carpentry constraint: x2 <= 80 - x1
plt.plot(x1, 100 - 2*x1, linewidth=3, label='Carpentry constraint')
plt.fill_between(x1, 0, 100 - 2*x1, alpha=0.1)
# Add non-negativity constraints
plt.plot(np.zeros_like(x1), x1, linewidth=3, label='$x_1$ Sign restriction')
plt.plot(x1, np.zeros_like(x1), linewidth=3, label='$x_2$ Sign restriction')
#====================================================
# This part is different from giapetto_feasible.py
# Plot the possible (x1, x2) pairs
pairs = [(x1, x2) for x1 in np.arange(101)
for x2 in np.arange(101)
if (x1 + 2*x2) <= 100
and (2*x1 + x2) <= 100]
# Split these into our variables
chairs, tables = np.hsplit(np.array(pairs), 2)
# Caculate the objective function at each pair
z = 20*chairs + 30*tables
# Plot the results
plt.scatter(chairs, tables, c=z, cmap='jet', edgecolor='gray', alpha=0.5, label='Profit at each point', zorder=3)
# Colorbar
cb = plt.colorbar()
cb.set_label('Profit Colormap ($)')
#====================================================
# Labels and stuff
plt.xlabel('Chairs')
plt.ylabel('Tables')
plt.xlim(-0.5, 100)
plt.ylim(-0.5, 100)
plt.legend()
plt.show()
| DigvijayRed/LPP-Problems | LPP1.py | LPP1.py | py | 2,371 | python | en | code | 0 | github-code | 13 |
26789807061 | # -*- coding: UTF-8 -*-
from pyecharts import Map
value =[20, 190, 253, 77, 65]
attr =['ๆฑๅคดๅธ', 'ๆฑๅฐพๅธ', 'ๆญ้ณๅธ', '้ณๆฑๅธ', '่ๅบๅธ']
map=Map("ๅนฟไธๅฐๅพ็คบไพ", width=1200, height=600)
map.add("", [], [], maptype=u'ๅนฟไธ', is_visualmap=True, visual_text_color='#000')
map.show_config()
map.render() | forestopen/MapTravelVisualize | ๅนฟไธๅธ.py | ๅนฟไธๅธ.py | py | 322 | python | zh | code | 0 | github-code | 13 |
23319881726 | from flask import Flask,Response
import logging
log = logging.getLogger('werkzeug')
log.disabled = True
app = Flask(__name__)
@app.route('/')
def hello():
return Response('hello world', mimetype='text/plain')
if __name__ == "__main__":
app.run(port=8080, debug=False) | siimp/benchmarks | simple/flask-simple/app.py | app.py | py | 285 | python | en | code | 0 | github-code | 13 |
23392019450 | import glob
import os
import setuptools
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.rst'), 'r') as f:
readme = f.read()
setuptools.setup(
name='rpn_calculator',
version='1.2.1.3',
description='RPN calculator for CLI',
long_description=readme,
url='https://github.com/massongit/rpn-calculator',
author='Masaya SUZUKI',
author_email='suzukimasaya428@gmail.com',
license='MIT',
keywords='RPN Calculator',
packages=setuptools.find_packages(),
scripts=glob.glob('bin/*'),
install_requires=[
'six==1.12.0',
'zenhan==0.5.2'
],
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3.7'
]
)
| massongit/rpn-calculator | setup.py | setup.py | py | 734 | python | en | code | 0 | github-code | 13 |
3027907072 | #!/usr/bin/python
import sys
import re
path = "input.txt"
def day3():
grid = [[' ' for j in range(1000)] for i in range(1000)]
for line in open(path):
id, x, y, lenx, leny = [int(s) for s in re.findall('\d+', line )]
for i in range(leny):
for j in range(lenx):
if grid[x+j][y+i] == ' ':
grid[x+j][y+i] = '.'
elif grid[x+j][y+i] == '.':
grid[x+j][y+i] = 'X'
print(sum(row.count('X') for row in grid))
def day3_2():
grid = [[[] for j in range(1000)] for i in range(1000)]
control = []
for line in open(path):
id, x, y, lenx, leny = [int(s) for s in re.findall('\d+', line )]
for i in range(leny):
for j in range(lenx):
grid[x+j][y+i].append(id)
control.append(id)
for y in range(1000):
for x in range(1000):
if len(grid[x][y])>1:
for item in grid[x][y]:
if item in control: control.remove(item)
print(control)
if __name__ == "__main__":
day3()
day3_2()
| MartinsGabrielC/AdventOfCode2018 | 3/day3.py | day3.py | py | 1,103 | python | en | code | 0 | github-code | 13 |
21786347690 | # builtin
import importlib
# site
import colorama
class VoluxDemo:
def __init__(
self, demo_name, demo_method, alias, requirements=[], *args, **kwargs
):
self._name = demo_name
self._method = demo_method
self._alias = alias
self._requirements = requirements # optional
def run(self):
self._method()
# def __str__(self):
# header = "[DEMO:{name}]----".format(name=self._alias)
# divider = "-"*len(header)
# body = "Name: {name}\nAlias: {alias}\nRequirements: {requirements}".format(name=self._name,requirements=self._requirements, alias=self._alias)
#
# return("{header}\n{body}\n{divider}".format(header=header,divider=divider,body=body))
def __repr__(self):
return "<VoluxDemo '{}'>".format(self._alias)
def _check_reqs(self):
failed_imports = []
for req in self._requirements:
try:
importlib.import_module(req)
except ImportError:
failed_imports.append(req)
if len(failed_imports) > 0:
print(
"{}Error: unable to start demo, you're missing some requirements: {}{}".format(
colorama.Fore.RED,
", ".join(failed_imports),
colorama.Style.RESET_ALL,
)
)
print(
"{}Tip: try seeing if the package/s are available in pip ('pip search {}'){}".format(
colorama.Fore.YELLOW,
" ".join(failed_imports),
colorama.Style.RESET_ALL,
)
)
exit()
| DrTexx/Volux | volux/demo.py | demo.py | py | 1,683 | python | en | code | 7 | github-code | 13 |
70728396818 | from tkinter import *
class Window(Frame):
def __init__(self, master = None):
Frame.__init__(self, master)
self.master = master
##new function added
self.init_window()
##create function init_window
def init_window(self):
##set the title of the window to GUI
self.master.title("GUI")
##filling up the window and adjust the dimension
self.pack(fill = BOTH, expand = 1)
##creating a button instance
quitButton = Button(self, text = "Quit")
##position of the button place
quitButton.place(x = 0, y = 0)
root = Tk()
##specify dimension of the window
root.geometry("400x300")
app = Window(root)
root.mainloop() | LuminousMirage/sendex-tutorial | 1 basic/python 3 tutorial 39 tkinter buttons.py | python 3 tutorial 39 tkinter buttons.py | py | 623 | python | en | code | 0 | github-code | 13 |
32970544554 | from __future__ import unicode_literals
from django.db import models
from django.core.urlresolvers import reverse
# Create your models here.
class Article(models.Model):
title = models.CharField(max_length = 100) #title
category = models.CharField(max_length = 50, blank = True) #categore
date_time = models.DateTimeField(auto_now_add = True) #timestamp
content = models.TextField(blank = True, null = True) #content
# python2 __unicode__
# python3 __str__
def __unicode__(self):
return self.title
class Meta: # sort by time in descending order
ordering = ['-date_time']
# get URL
def get_absolute_url(self):
path = reverse('detail', kwargs={'id':self.id})
return "http://127.0.0.1:8000%s" % path
| 392110851/django_blog | django_blog/blog/models.py | models.py | py | 772 | python | en | code | 0 | github-code | 13 |
31478111029 | from aiogram import types, Dispatcher
from aiogram.types import ParseMode, InlineKeyboardMarkup, InlineKeyboardButton
from config import bot, dp
#from keyboards import client_kb
#@dp.message_handler(commands=['start'])
async def start_handler(message: types.Message):
await bot.send_message(message.from_user.id,
f"ะะพะฑัะพ ะฟะพะถะฐะปะพะฒะฐัั ะฝะฐ ัะพั {message.from_user.full_name}")
#@dp.message_handler(commands=['help'])
async def help_handler(message: types.Message):
await bot.send_message(message.from_user.id,
f"/start - ะกัะฐัั ะฑะพัะฐ\n "
f"/mem - ะพัะฟัะฐะฒะปัะตั mem\n "
f"/menu - ะพะฟัะพั ะณััะผะฐะฝะฐ\n "
f"/quiz - ะฒะธะบัะพัะธะฝะฐ\n "
f"/help - ะฟะพะผะพัั ะฒ ัััะดะธั\n "
f"/game - ะธะณัะฐ ")
#@dp.message_handler(commands=['mem'])
async def mem_handler(message: types.Message):
stick = open('media/1.png', 'rb')
await bot.send_sticker(message.chat.id, sticker=stick)
#@dp.message_handler(commands=['quiz'])
async def quest_handler(message: types.Message):
markup = InlineKeyboardMarkup()
button_call_1 = InlineKeyboardButton("NEXT", callback_data='button_call_1')
markup.add(button_call_1)
question = "ะะฐะบะพะน ะฟัะตะฒะดะพะฝะธะผ ั Marshall Marshall Bruce Mathers III?"
answers = [
'Snoop Dog', "50 CENT", "Jay-Z", "Eminem"
]
await bot.send_poll(
chat_id=message.chat.id,
question=question,
options=answers,
is_anonymous=False,
type='quiz',
correct_option_id=3,
explanation="ะขั ะฝะต ัะฐัะธัั ะฒ ะผัะทัะบะต",
explanation_parse_mode=ParseMode.MARKDOWN_V2,
reply_markup=markup
)
def register_handlers_client(dp: Dispatcher):
dp.register_message_handler(start_handler, commands=['start'])
dp.register_message_handler(help_handler, commands=['help'])
dp.register_message_handler(mem_handler, commands=['mem'])
dp.register_message_handler(quest_handler, commands=['quiz'])
| kairatnurmakhan/bot-19 | handlers/client.py | client.py | py | 2,164 | python | en | code | 0 | github-code | 13 |
3114869587 | import logging
import re
from bs4 import BeautifulSoup
from oxint.utils.TimeUtils import TimeUtils
from oxint.utils.URLUtils import URLUtils
class ScrapInfocif:
INFOCIF_URL_BASE = "http://www.infocif.es"
def search_company_by_cif(self, cif: str):
company_info = None
if cif is not None:
url = self.INFOCIF_URL_BASE + f"/general/empresas-informacion-listado-empresas.asp?Buscar={cif}"
logging.debug(f"URL: {url}")
html = URLUtils.get_html_from_url(url)
soup = BeautifulSoup(html, "html.parser")
# recover headers from "Company information" section
company_info_field_names = ScrapInfocif.__get_company_information_field_names(soup)
# recover values from "Company information" section
company_info_field_values = ScrapInfocif.__get_company_information_field_values(soup)
# Mix company information (field names and values)
company_info = ScrapInfocif.__mix_field_names_with_values(company_info_field_names,
company_info_field_values)
positions = ScrapInfocif.__get_company_positions(soup)
company_info["positions"] = positions
return company_info
@staticmethod
def __get_company_information_field_names(soup):
"""
Recover field names from "Informaciรณn de la compaรฑรญa" section
:param soup:
:return: Array with the field names
"""
headers = []
headers_col_1 = soup.find_all("strong", {"class": "col-md-2 col-sm-3 col-xs-12 cb fwb"})
headers_col_2 = soup.find_all("strong", {"class": "col-md-4 col-sm-4 col-xs-12 cb fwb"})
for header in headers_col_1:
headers.append(ScrapInfocif.__standardize_field_names(header.contents[0]))
for header in headers_col_2:
headers.append(ScrapInfocif.__standardize_field_names(header.contents[0]))
return headers
@staticmethod
def __standardize_field_names(field_name):
if field_name == 'CIF':
field = "cif"
elif field_name == 'Antigรผedad':
field = "since"
elif field_name == 'Domicilio':
field = "address"
elif field_name == 'Telรฉfono':
field = "phone"
elif field_name == 'Web':
field = "web"
elif field_name == 'Registro':
field = "register"
elif field_name == 'Sector':
field = "sector"
elif field_name == 'Nยบ de empleados':
field = "num_employees"
elif field_name == 'Matriz':
field = "parent_company"
elif field_name == 'Estado:':
field = "state"
elif field_name == 'Informaciรณn Crediticia:':
field = "credit_information"
else:
field = "unknown"
logging.warning(f"Unknown field {field_name}")
return field
@staticmethod
def __get_company_information_field_values(soup):
"""
Recover field values from "Informaciรณn de la compaรฑรญa" section
:param soup:
:return: Array with the field values
"""
values = []
values_cif = soup.find_all("h2", class_="editable col-md-10 col-sm-9 col-xs-12 mb10 text-right")
values_col_1 = soup.find_all("p", class_="editable col-md-10 col-sm-9 col-xs-12 mb10 text-right")
values_col_2 = soup.find_all("p", class_="editable col-md-8 col-sm-8 col-xs-12 mb10 text-right")
if values_cif is not None and len(values_cif) == 1:
values.append(ScrapInfocif.__trim(values_cif[0].contents[0]))
values += ScrapInfocif.__manage_company_info_values(values_col_1)
values += ScrapInfocif.__manage_company_info_values(values_col_2)
return values
@staticmethod
def __manage_company_info_values(values_in_column):
values = []
for value in values_in_column:
links = value.find_all("a")
if links is not None and len(links) > 0:
values.append(ScrapInfocif.__trim(links[0].contents[0]))
else:
values.append(ScrapInfocif.__trim(value.contents[0]))
return values
@staticmethod
def __mix_field_names_with_values(company_info_field_names, company_info_field_values):
"""
Mix company information (field names and values)
:param company_info_field_names: Array with field names
:param company_info_field_values: Array with field values
:return: dictionary with the values assigned to the right field name
"""
mix = {}
if company_info_field_names is not None and \
company_info_field_values is not None and\
len(company_info_field_names) == len(company_info_field_values):
for i, val in enumerate(company_info_field_names):
# Manage special fields
if val == 'since':
mix[val] = ScrapInfocif.__get_creation_date_from_antiquity(company_info_field_values[i])
elif val == 'credit_information':
# Field 'credit_information' intentionally ignored
pass
else:
mix[val] = company_info_field_values[i]
mix["last_update"] = TimeUtils.now()
return mix
@staticmethod
def __trim(string: str) -> str:
if string is not None:
string = string.replace(" ", "").replace("\r\n", " ").replace(u'\xa0', '').strip()
return string
@staticmethod
def __get_creation_date_from_antiquity(antiquity: str) -> str:
"""
Extract the company creation date from a string that looks like this:
44 aรฑos (24/05/1977)
:param antiquity: String with the company creation date, e.g. "44 aรฑos (24/05/1977)"
:return: Company creation date in format dd/mm/yyyy
"""
date = None
if antiquity is not None:
match = re.findall(r"\d+\/\d+\/\d+", antiquity)
if match is not None and len(match) == 1:
date = match[0]
return date
@staticmethod
def __get_company_positions(soup):
positions = []
positions_table = soup.find_all("table", {"class": "table table-hover"})
if positions_table is not None and len(positions_table) > 0:
# There are 3 columns: Position, Name, Linkages
positions_rows = positions_table[0].find_all("td")
index = 0
for position_row in positions_rows:
pos = index % 3
if pos == 0:
position = {"position": position_row.text}
elif pos == 1:
position["name"] = position_row.text
elif pos == 2:
position["linkages"] = position_row.text
positions.append(position)
index += 1
return positions
| joaquinOnSoft/oxint | src/oxint/scraping/ScrapInfocif.py | ScrapInfocif.py | py | 7,074 | python | en | code | 0 | github-code | 13 |
29181534483 | '''
Created on Oct 20, 2015
@author: bardya
'''
import os
import argparse
import re
import sys
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description='Get the ancestral consensus sequence from a hmm file')
parser.add_argument('-i', dest='infilepath', metavar='<hmm_file_path>', type=argparse.FileType('rt'),
help='path to an hmm file')
parser.add_argument('--version', action='version', version='0.1')
return parser.parse_args()
def getBitscores(inputfile):
bitscores = []
for i, line in enumerate(inputfile.readlines()):
if line.startswith('#'):
continue
matchlist = re.split("\s+", line.strip())
if len(matchlist) >= 19:
bitscores.append(matchlist[-14]) #not simlpy matchlist[5] because of possible spaces in description/name
else:
raise Exception("Error while reading bitscore from " + inputfile.name)
return bitscores
def getSubjectName(inputname):
if len(inputname.split('.')) == 2:
return inputname.split('.')[0]
def getCutoff(bitscore_list, limit=0.9):
arr = np.asarray(bitscore_list)
min_score = float(min(arr))
cutoff_val = min_score*limit
return cutoff_val
if __name__ == '__main__':
args = parse_args()
try:
inputfile = open(args.infilepath.name, 'r')
except:
print('IOError occured')
seqname = getSubjectName(os.path.basename(args.infilepath.name))
bitscores = getBitscores(inputfile)
cutoff = getCutoff(bitscores)
sys.stdout.write('{}\t{}\n'.format(seqname,cutoff)) | ba1/BioParsing | tbl2scores_cutoff.py | tbl2scores_cutoff.py | py | 1,628 | python | en | code | 1 | github-code | 13 |
25502547911 | import numpy as np
import glob
import pickle
import juliet
import matplotlib.pyplot as plt
import ray
ray.shutdown()
ray.init()
def fit_transit_by_transit(P, P_err, t0, t0_err, ecc, omega, GPmodel = 'QP', outpath = 'planetfit', in_transit_length = 0.):
# First, extract both sectors and folders of those sectors which have out-of-transit fits already done:
oot_folders = glob.glob(outpath+'/TESS*_'+GPmodel+'_out_of_transit')
for oot_folder in oot_folders:
print('Working on',oot_folder)
# Define priors:
priors = {}
# First define parameter names, distributions and hyperparameters for GP-independant parameters:
params1 = ['P_p1', 't0_p1', 'p_p1', 'b_p1', 'q1_TESS', 'q2_TESS', \
'ecc_p1', 'omega_p1', 'a_p1']
params1_instrument = ['mdilution_TESS', 'mflux_TESS', 'sigma_w_TESS']
dists1 = ['normal', 'normal', 'uniform', 'uniform', 'uniform', 'uniform', \
'fixed','fixed','loguniform']
dists1_instrument = ['fixed','normal','loguniform']
hyperps1 = [[P,P_err], [t0, 0.1], [0., 1.], [0., 1.], [0., 1.], [0., 1.], \
ecc, omega, [1., 100.]]
hyperps1_instrument = [1., [0., 0.1], [0.1, 10000.]]
# Now define hyperparameters of the GP depending on the chosen kernel:
if GPmodel == 'ExpMatern':
params2 = ['GP_sigma_TESS', 'GP_timescale_TESS', 'GP_rho_TESS']
dists2 = ['loguniform', 'loguniform', 'loguniform']
hyperps2 = [[1e-5, 10000.], [1e-3,1e2], [1e-3,1e2]]
elif GPmodel == 'Matern':
params2 = ['GP_sigma_TESS', 'GP_rho_TESS']
dists2 = ['loguniform', 'loguniform']
hyperps2 = [[1e-5, 10000.], [1e-3,1e2]]
elif GPmodel == 'QP':
params2 = ['GP_B_TESS', 'GP_C_TESS', 'GP_L_TESS', 'GP_Prot_TESS']
dists2 = ['loguniform', 'loguniform', 'loguniform','loguniform']
hyperps2 = [[1e-5,1e3], [1e-5,1e4], [1e-3, 1e3], [1.,1e2]]
# Extract posteriors from out-of-transit GP fit first:
params = params1_instrument + params2
dists = dists1_instrument + dists2
hyperps = hyperps1_instrument + hyperps2
# Populate priors dict:
for param, dist, hyperp in zip(params, dists, hyperps):
priors[param] = {}
priors[param]['distribution'], priors[param]['hyperparameters'] = dist, hyperp
dataset = juliet.load(input_folder = oot_folder)
results = dataset.fit(sampler = 'dynesty', nthreads = 12)
for i in range(len(params2)):
posterior = results.posteriors['posterior_samples'][params2[i]]
mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
dists2[i] = 'truncatednormal'
hyperps2[i] = [mu, sigma, hyperps2[i][0], hyperps2[i][1]]
# Same for sigma_w and mflux:
dists1_instrument[2] = 'truncatednormal'
posterior = results.posteriors['posterior_samples']['sigma_w_TESS']
mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
hyperps1_instrument[2] = [mu, sigma, hyperps1_instrument[2][0], hyperps1_instrument[2][1]]
# Normal for mflux:
dists1_instrument[1] = 'normal'
posterior = results.posteriors['posterior_samples']['mflux_TESS']
mu, sigma = np.median(posterior), np.sqrt(np.var(posterior))
hyperps1_instrument[1] = [mu, sigma]
# Populate prior dict:
params = params1 + params1_instrument + params2
dists = dists1 + dists1_instrument + dists2
hyperps = hyperps1 + hyperps1_instrument + hyperps2
# Populate the priors dictionary:
for param, dist, hyperp in zip(params, dists, hyperps):
priors[param] = {}
priors[param]['distribution'], priors[param]['hyperparameters'] = dist, hyperp
it_folders = glob.glob(outpath+'/TESS*'+'/run_*/')
#if len(it_folders) / len(oot_folders) < 10.: # Not enough successful injections -- don't fit
# continue
for it_folder in it_folders:
run_id = 0
it_files = glob.glob(it_folder+'/transit_*'+'.dat')
fit_injected_transit.remote(P, t0, priors, it_files, run_id) # Call the remote function
run_id += 1
@ray.remote
def fit_injected_transit(P, t0, priors, it_files, run_id):
for it_file in it_files:
tt, ff, fferr = {}, {}, {}
tt['TESS'], ff['TESS'], fferr['TESS'] = np.genfromtxt(it_file, usecols = (0, 1, 2), unpack = True)
mid_idx = int(len(tt['TESS'])*0.5)
tmid = tt['TESS'][mid_idx]
# Check if there is any time-datapoint that covers, at least, an hour around mid-transit:
n_onehour = len(np.where(np.abs(tt['TESS']-tmid)<1./24.)[0])
# If there are datapoints, fit the dataset. Use that central time as the t0 mean on the prior:
if n_onehour > 0:
priors['t0_p1']['hyperparameters'][0] = tmid
print(it_file[:-4])
# Run fit:
transit_dataset = juliet.load(priors=priors, t_lc = tt, y_lc = ff, yerr_lc = fferr, GP_regressors_lc = tt, \
out_folder = it_file[:-4])
results = transit_dataset.fit(sampler = 'dynesty')
else:
print('Transit at',tc,' doesnt have n_onehour apparently:',np.abs(tt['TESS']-tc))
def read_data(fname):
fin = open(fname, 'r')
data = {}
while True:
line = fin.readline()
line = line[:-1] # Remove the trailing "\n"
if line != '':
if line[0] != '#':
lv = line.split("\t")
name, ticid = lv[0], lv[1]
data[name] = {}
data[name]['ticid'] = ticid
else:
break
return data
| gavinxwang/variable-depths | Scripts/utils.py | utils.py | py | 5,873 | python | en | code | 0 | github-code | 13 |
17059859174 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SignRecordLogVO(object):
def __init__(self):
self._s_version = None
self._status = None
self._trans_date = None
@property
def s_version(self):
return self._s_version
@s_version.setter
def s_version(self, value):
self._s_version = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def trans_date(self):
return self._trans_date
@trans_date.setter
def trans_date(self, value):
self._trans_date = value
def to_alipay_dict(self):
params = dict()
if self.s_version:
if hasattr(self.s_version, 'to_alipay_dict'):
params['s_version'] = self.s_version.to_alipay_dict()
else:
params['s_version'] = self.s_version
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.trans_date:
if hasattr(self.trans_date, 'to_alipay_dict'):
params['trans_date'] = self.trans_date.to_alipay_dict()
else:
params['trans_date'] = self.trans_date
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SignRecordLogVO()
if 's_version' in d:
o.s_version = d['s_version']
if 'status' in d:
o.status = d['status']
if 'trans_date' in d:
o.trans_date = d['trans_date']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/SignRecordLogVO.py | SignRecordLogVO.py | py | 1,810 | python | en | code | 241 | github-code | 13 |
40397707284 | """
ๆนๆกไธ๏ผๅบไบ่ง่ง่ฏๆฑ็่ฑๅ่ฏๅซๆนๆณ
"""
import glob
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import matplotlib.mlab as mlab
from sklearn.cluster import KMeans,MiniBatchKMeans
import kNN
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold,datasets
import utils
import kNN
#import afkmc2.afkmc2 as afk
#import MR8
from skimage.transform import rotate
from skimage.feature import local_binary_pattern
from skimage import data,io,data_dir,filters,feature
from skimage.color import label2rgb
import skimage
from PIL import Image
import dataLoad
CHOSEN_TRAIN=40#ๅฎไนๆฏไธ็ฑป้ๆฉ็่ฎญ็ปๆ ทๆฌไธชๆฐ
RADIUS=4#ๅฎไนLBP็ฎๅญ็ๅบๅๅๅพ
N_POINTS=8*RADIUS#ๅฎไนLBP็ฎๅญ็ๅบๅ้ๆ ท็นไธชๆฐ
ALPHA=6.0#้ข่ฒ็นๅพ็ๆ้
BETA=4.5#ๅฝข็ถ็นๅพ็ๆ้
GAMMA=3.1#็บน็็นๅพ็ๆ้
randomIndex,resIndex=dataLoad.randomSample(80,CHOSEN_TRAIN)#้ๆบ้ๅ่ฎญ็ปๆ ทๆฌๅๆต่ฏๆ ทๆฌ
filenames=[]
filenames.append(glob.glob(r'F:/seven/prDesign/fdata/buttercup/*.jpg'))
filenames.append(glob.glob(r'F:/seven/prDesign/fdata/daisy/*.jpg'))
filenames.append(glob.glob(r'F:/seven/prDesign/fdata/iris/*.jpg'))
filenames.append(glob.glob(r'F:/seven/prDesign/fdata/lilyvalley/*.jpg'))
filenames.append(glob.glob(r'F:/seven/prDesign/fdata/sunflower/*.jpg'))
filenames.append(glob.glob(r'F:/seven/prDesign/fdata/windflower/*.jpg'))
#ๆๅๅๅง็นๅพ
dataX,numX=dataLoad.load(filenames,randomIndex,'color',CHOSEN_TRAIN,N_POINTS,RADIUS)
print('color completed')
dataY,numY=dataLoad.load(filenames,randomIndex,'shape',CHOSEN_TRAIN,N_POINTS,RADIUS)
print('shape completed')
dataZ,numZ=dataLoad.load(filenames,randomIndex,'texture',CHOSEN_TRAIN,N_POINTS,RADIUS)
print('texture completed')
#็ๆ็ฑปๅซๆ ็ญพ
y=None
for i in range(6):
tempY=(i+1)*np.ones(CHOSEN_TRAIN,dtype=int)
if y is None:
y=tempY
else:
y=np.concatenate((y,tempY),axis=0)
#่็ฑป็ๆ่ง่ง่ฏๆฑ
kmeansX=MiniBatchKMeans(n_clusters=200)
kmeansX.fit(dataX)
print('color k-means completed')
kmeansY=MiniBatchKMeans(n_clusters=200)
kmeansY.fit(dataY)
print('shape k-means completed')
kmeansZ=MiniBatchKMeans(n_clusters=200)
kmeansZ.fit(dataZ)
print('texture k-means completed')
print('k-means complete!')
centersX,labelsX=kmeansX.cluster_centers_,kmeansX.labels_
centersY,labelsY=kmeansY.cluster_centers_,kmeansY.labels_
centersZ,labelsZ=kmeansZ.cluster_centers_,kmeansZ.labels_
#็ๆๆ็ป็็นๅพ่กจ่พพ๏ผๅณ็จๆ่ฟ้ปๆณ็ป่ฎก็ดๆนๅพ
histDataX=np.zeros((6*CHOSEN_TRAIN,200),dtype=int)
histDataY=np.zeros((6*CHOSEN_TRAIN,200),dtype=int)
histDataZ=np.zeros((6*CHOSEN_TRAIN,200),dtype=int)
k=0
sumXZ=0
sumY=0
for filename in filenames:
for i in range(CHOSEN_TRAIN):
tempImg=cv2.cvtColor(cv2.imread(filename[i],cv2.IMREAD_COLOR),cv2.COLOR_BGR2HSV) #m,n,_=tempImg.shape
m,n,_=tempImg.shape
for j in range(m+n):
histDataX[k*CHOSEN_TRAIN+i,labelsX[j+sumXZ]]=histDataX[k*CHOSEN_TRAIN+i,labelsX[j+sumXZ]]+1
histDataZ[k*CHOSEN_TRAIN+i,labelsZ[j+sumXZ]]=histDataZ[k*CHOSEN_TRAIN+i,labelsZ[j+sumXZ]]+1
for j in range(numY[k*CHOSEN_TRAIN+i]):
histDataY[k * CHOSEN_TRAIN + i, labelsY[j + sumY]] = histDataY[k * CHOSEN_TRAIN + i, labelsY[j + sumY]] + 1
sumXZ=sumXZ+m+n
sumY=sumY+numY[k*CHOSEN_TRAIN+i]
k=k+1
#"""
#ไฝฟ็จt-SNEๆฐๆฎๅฏ่งๅ
tsne=manifold.TSNE(n_components=2,init='pca',random_state=0)
Y=tsne.fit_transform(histDataY)
print('tSNE completed!')
fig=utils.plot_embedding(Y,y,'t-SNE embedding of the digits')
plt.show()
#"""
#ๆต่ฏ๏ผๅนถ่ฎก็ฎๆญฃ็กฎ็
"""
sum=0
t=0
for filename in filenames:
t=t+1
for i in range(CHOSEN_TRAIN,80):
resultTT=np.zeros(7,dtype=float)
testFile=filename[resIndex[i-CHOSEN_TRAIN]]
histTestX=kNN.featureCOV(centersX,testFile)
histTestY=kNN.featureSIFT(centersY,testFile)
histTestZ=kNN.featureLBP(centersZ,testFile,N_POINTS,RADIUS)
print('featuren construction completed!')
resultC,probC=kNN.xkNN(histDataX,y,histTestX)
resultS,probS=kNN.xkNN(histDataY,y,histTestY)
resultT,probT=kNN.xkNN(histDataZ,y,histTestZ)
resultTT[resultC]=resultTT[resultC]+ALPHA*probC
resultTT[resultS]=resultTT[resultS]+BETA*probS
resultTT[resultT]=resultTT[resultT]+GAMMA*probT
result=np.where(resultTT==np.max(resultTT))
result=result[0]
print(resultTT,result,t)
if result==t:
sum=sum+1
print(sum*100/240)
"""
#ๆต่ฏๅฝๆฐ
testFilename='F:/seven/prDesign/test6.jpg'
resultTT=np.zeros(7,dtype=float)
histTestX=kNN.featureCOV(centersX,testFilename)
histTestY=kNN.featureSIFT(centersY,testFilename)
histTestZ=kNN.featureLBP(centersZ,testFilename,N_POINTS,RADIUS)
print('featuren construction completed!')
resultC,probC=kNN.xkNN(histDataX,y,histTestX)
resultS,probS=kNN.xkNN(histDataY,y,histTestY)
resultT,probT=kNN.xkNN(histDataZ,y,histTestZ)
resultTT[resultC]=resultTT[resultC]+ALPHA*probC
resultTT[resultS]=resultTT[resultS]+BETA*probS
resultTT[resultT]=resultTT[resultT]+GAMMA*probT
result=np.where(resultTT==np.max(resultTT))
result=result[0]
print(resultTT,result) | xkazm/Pattern-Recognition-Course-Design | test/main.py | main.py | py | 5,260 | python | en | code | 1 | github-code | 13 |
73705961937 | #!python3
# desafio \/
# def dia_semana(dia):
# dias = {
# 1: 'Domingo',
# 2: 'Segunda',
# 3: 'Terรงa',
# 4: 'Quarta',
# 5: 'Quinta',
# 6: 'Sexta',
# 7: 'Sรกbado',
# }
# return dias.get(dia)
# if __name__ == '__main__':
#
# dia_informado = int(input('Informe o dia: '))
#
# if dia_informado < 1 or dia_informado > 7:
# print('Dia invalido! ')
# else:
# dia_semana(dia_informado)
# if dia_informado >= 2 and dia_informado <= 6:
# print(f'{dia_semana(dia_informado)} รฉ um dia util')
# else:
# print(f'{dia_semana(dia_informado)} รฉ fim de semana')
# resposta \/
def get_tipo_dia(dia):
dias = {
1: 'Fim de semana',
2: 'Dia de semana',
3: 'Dia de semana',
4: 'Dia de semana',
5: 'Dia de semana',
6: 'Dia de semana',
7: 'Fim de semana',
}
return dias.get(dia, '** invรกlido **')
if __name__ == '__main__':
for dia in range(8):
print(f'{dia}: {get_tipo_dia(dia)}') | EderPBorges/EstudosPython | estruturas_controle/switch_2.py | switch_2.py | py | 1,063 | python | pt | code | 0 | github-code | 13 |
34518432616 | import numpy as np
import itertools
from scipy import stats
class Dynamic_features:
def dynamic_calculation(self,ethsize):
sum_packets = sum(ethsize)
min_packets = min(ethsize)
max_packets = max(ethsize)
mean_packets = sum_packets / len(ethsize)
std_packets = np.std(ethsize)
return sum_packets,min_packets,max_packets,mean_packets,std_packets
def dynamic_count(self,protcols_count): #calculates the Number feature
packets = 0
for k in protcols_count.keys():
packets = packets + protcols_count[k]
return packets
def dynamic_two_streams(self,incoming, outgoing):
inco_ave = sum(incoming) / len(incoming)
outgoing_ave = sum(outgoing) / len(outgoing)
magnite = (inco_ave + outgoing_ave) ** 0.5
inco_var = np.var(incoming)
outgo_var = np.var(outgoing)
radius = (inco_var + outgo_var) ** 0.5
if len(incoming) and len(outgoing) >= 2:
correlation, p_value = stats.pearsonr(incoming, outgoing)
else:
correlation = 0
covaraince = sum((a - inco_ave) * (b - outgoing_ave) for (a, b) in zip(incoming, outgoing)) / len(incoming)
var_ratio = 0
if outgo_var != 0:
var_ratio = inco_var / outgo_var
weight = len(incoming) * len(outgoing)
return magnite, radius, correlation, covaraince, var_ratio, weight
| Madhav-Malhotra/cicIoT | iot_scripts/feat_extract/Dynamic_features.py | Dynamic_features.py | py | 1,438 | python | en | code | 1 | github-code | 13 |
41503437065 | """Perform JOIN queries on models with relationships."""
from sqlalchemy.orm import Session
from logger import LOGGER
from sqlalchemy_tutorial.part3_relationships.models import Comment, Post, User
def get_all_posts(session: Session, admin_user: User):
"""
Fetch all posts belonging to an author user.
:param Session session: SQLAlchemy database session.
:param User admin_user: Author of blog posts.
:return: None
"""
LOGGER.info("Fetching posts with child comments...")
posts = session.query(Post).join(User, Post.author_id == User.id).filter_by(username=admin_user.username).all()
for post in posts:
LOGGER.success(f"Fetched posts by user: {post}")
def get_all_comments(session: Session):
"""
Fetch all comments and join with their parent posts.
:param session: SQLAlchemy database session.
:type session: Session
:return: None
"""
LOGGER.info("Joining comments with parent posts...")
comments = session.query(Comment).join(Post, Post.id == Comment.post_id).all()
for comment in comments:
LOGGER.success(f"Joined comments with parent posts: {comment}")
| hackersandslackers/sqlalchemy-tutorial | sqlalchemy_tutorial/part3_relationships/joins.py | joins.py | py | 1,152 | python | en | code | 67 | github-code | 13 |
70680312979 | import tensorflow as tf
import keras as k
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
import h5py
import os
from PIL import Image
MODEL_FILENAME = 'model_fruit_fresh_rotten'
# PATH_TO_TRAINED_MODEL_FILE = 'model/'+MODEL_FILENAME+'.h5'
PATH_TO_TRAINED_MODEL_FILE = MODEL_FILENAME+'.h5'
# PATH_TO_TRAINED_MODEL_FILE = MODEL_FILENAME + '.h5'
# test_dir_path = os.path.join('/data/','test')
INPUT_SHAPE = (224, 224, 3)
TARGET_SIZE = INPUT_SHAPE[:2]
# Fruits/model/model_fruit_fresh_rotten.h5
# def predictFruitClass(ImagePath, trainedModel, class_dict):
def predictFruitClass(img, trainedModel, class_dict):
x = img
# included below to resolve ValueError: Error when checking input: expected vgg16_input to have 4 dimensions, but got array with shape (1, 1)
# x = x.reshape((-1, 100, 100, 1))
x = image.img_to_array(x) #ValueError: could not convert string to float:
plt.imshow((x).astype(np.uint16)) #AttributeError: 'bytes' object has no attribute 'astype'
# plt.imshow(x) TypeError: Image data of dtype |S5151 cannot be converted to float
x = np.expand_dims(x, axis=0)
prediction_class = trainedModel.predict_classes(x, batch_size=1)
prediction_probs = trainedModel.predict_proba(x, batch_size=1)
return prediction_class, prediction_probs
def getTrainedModel(PATH_TO_TRAINED_MODEL_FILE):
trainedModel = load_model(PATH_TO_TRAINED_MODEL_FILE)
return trainedModel
# def results():
def results(img):
result = {}
trained_model_path = PATH_TO_TRAINED_MODEL_FILE
trained_model = getTrainedModel(trained_model_path)
class_dict = np.load('class_dict.npy', allow_pickle=True).item()
# image_path = 'data/test/test/ro5.jpg' im.resize((width, height))
img_resize = img.resize((224, 224))
single_pred = predictFruitClass(img_resize, trained_model, class_dict)
result = single_pred
return result, class_dict
| trigus00/Fruits | fresh_rotten_fruits-Final /model_test.py | model_test.py | py | 2,001 | python | en | code | 0 | github-code | 13 |
37526401375 | """===============================================================================
FILE: _gstasks/html_formatter.py
USAGE: (not intended to be directly executed)
DESCRIPTION:
OPTIONS: ---
REQUIREMENTS: ---
BUGS: ---
NOTES: ---
AUTHOR: Alex Leontiev (alozz1991@gmail.com)
ORGANIZATION:
VERSION: ---
CREATED: 2022-12-16T20:37:31.068331
REVISION: ---
TODO:
1. states filter (sql?)
2. (states) order (?sql)
3. color based on state (only stateonly state??)
4. text formatting
a. contract links
5. text styling
b. bold overdue tasks
==============================================================================="""
import itertools
import json
import logging
from os import path
from datetime import datetime, timedelta
import json5
import typing
import pandas as pd
from jinja2 import Template
from string import Template as string_template
import typing
import functools
import hashlib
import importlib.util
# copycat to omit dependency on `alex_leontiev_toolbox_python`
from _gstasks._pandas_sql import pandas_sql
def ifnull(x, y, method: typing.Literal["isna"] = "isna", is_loud: bool = False):
res = y if pd.isna(x) else x
if is_loud:
logging.warning(("ifnull", (x, y), res))
return res
class _get_task_by_uuid:
def __init__(self, task_list):
self._coll = task_list.get_coll()
self._cache = {}
self._logger = logging.getLogger(self.__class__.__name__)
def __call__(self, uuid_: str, is_report_cache_hitmisses: bool = False):
if uuid_ not in self._cache:
if is_report_cache_hitmisses:
self._logger.warning(f'cache miss with "{uuid_}"')
self._cache[uuid_] = self._coll.find_one({"uuid": uuid_})
elif is_report_cache_hitmisses:
self._logger.warning(f'cache hit with "{uuid_}"')
res = self._cache[uuid_]
# self._logger.warning(
# (
# "_get_task_by_uuid",
# uuid_,
# res,
# )
# )
return res
def _df_env(df):
df = df.copy()
df.reset_index(inplace=True)
tags = df.pop("tags")
tags_df = pd.DataFrame(
data=itertools.chain(
*[
[{"uuid": uuid, "tag": tag} for tag in tags_]
for uuid, tags_ in zip(df["uuid"], tags)
]
),
columns=["uuid", "tag"],
)
if "label" in df.columns:
labels_df = pd.DataFrame(
data=[({} if pd.isna(label) else label) for label in df.pop("label")]
)
else:
labels_df = pd.DataFrame()
labels_df["uuid"] = df["uuid"]
res = dict(tasks=df, tags=tags_df, labels=labels_df)
# for k, df in res.items():
# logging.warning(f"{k}:\n{df.head().to_string()}")
return res
def get_last_engaged_task_uuid(task_list, mark="engage"):
l = list(task_list.get_coll("engage").find({"mark": mark}).sort("dt", -1).limit(1))
if len(l) == 0:
return None
else:
return l[0]["task_uuid"]
def format_html(
df, html_out_config=None, task_list=None, print_callback=print, out_file=None
):
# logging.warning(html_out_config)
assert task_list is not None
assert html_out_config is not None
if html_out_config is None:
print_callback(df.to_html())
return
with open(html_out_config) as f:
config = json5.load(f)
logging.warning(f"config: {config}")
# index set
df = df.copy()
df.set_index("uuid", inplace=True)
assert df.index.is_unique
# filtering
df.drop(columns=["_id"], inplace=True)
env = {
"now": datetime.now(),
"last_engaged_task_uuid": get_last_engaged_task_uuid(task_list),
"utils": {
"pd": pd,
"custom": {
"ifnull": ifnull,
"get_task_by_uuid": _get_task_by_uuid(task_list),
},
},
}
## load UDFs
udfs = []
if "sql_udfs_file" in config:
udfs_fn = path.abspath(config["sql_udfs_file"])
logging.warning(f"udfs_fn: `{udfs_fn}`")
## adapted from https://stackoverflow.com/a/67692
spec = importlib.util.spec_from_file_location("gstasks_sql_udfs", udfs_fn)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
# logging.info(dir(foo))
# logging.warning(foo.export_udfs)
udfs.extend(foo.export_udfs)
logging.warning(f"udfs: {udfs}")
# sorting/filtering
if "sorting_sql" in config:
with open(config["sorting_sql"]) as f:
tpl = f.read()
logging.info(tpl)
sql = Template(tpl).render(env)
logging.info(sql)
res = pandas_sql(sql, _df_env(df), utils=udfs)
logging.info("\n" + res.to_csv(index=None))
df = df.loc[res["uuid"].to_list()]
# row styling
if "row_styling_sql" in config:
with open(config["row_styling_sql"]) as f:
tpl = f.read()
logging.info(tpl)
sql = Template(tpl).render(env)
logging.info(sql)
res_df = pandas_sql(sql, _df_env(df))
res_df.set_index("uuid", inplace=True)
classes = res_df.loc[df.index, "class"].to_list()
else:
classes = None
# _date_cols = ["_insertion_date", "_last_modification_date"]
# for cn in _date_cols:
# df[cn] = df[cn].apply(
# lambda dt: "" if pd.isna(dt) else dt.strftime("%Y-%m-%d %H:%M")
# )
logging.warning(df.dtypes)
# TODO: col order via `config`
if "output_columns" in config:
logging.warning(f'output_columns: {config["output_columns"]}')
rs = list(df.reset_index().to_dict(orient="records"))
# logging.warning(f"rs: {rs[:5]}")
df = pd.DataFrame(
{
output_column["column_name"]: _render_column(output_column, rs, env)
for output_column in config["output_columns"]
},
index=df.index,
)
# df = df[[r["column_name"] for r in config["output_columns"]]]
# for r in config["output_columns"]:
# jinja_tpl = r.get("jinja_tpl")
# if "jinja_tpl" in r:
# df[r["column_name"]] = df[r["column_name"]].apply(
# lambda x: Template(r["jinja_tpl"])
# .render(
# {
# **env,
# "x": x,
# }
# )
# .strip()
# )
out_file = config.get("out_file") if out_file is None else out_file
is_use_style = config.get("is_use_style", False)
s = (
_style_to_buf(buf=out_file, config=config, df=df, classes=classes)
if is_use_style
else df.to_html(buf=out_file, render_links=True)
)
logging.warning(f'html saved to "{out_file}"')
if s is not None:
print_callback(s)
def _render_column(output_column, rs, env):
# logging.warning(f"_render_column in: {output_column, rs[:5]}")
res = map(
lambda r: Template(output_column.get("jinja_tpl", "{{r[column_name]}}"))
.render(
{
**env,
"r": r,
"column_name": output_column["column_name"],
"x": r.get(output_column["column_name"]),
}
)
.strip(),
rs,
)
res = list(res)
# logging.warning(res)
return res
def _style_to_buf(
buf: typing.Optional[str],
config: dict,
df: pd.DataFrame,
classes: typing.Optional[list[str]],
):
# formatting
# formatting via SQL? via CSS?
# TODO(done): next -- optional formatting via class assignment
# TODO(done): add optional css via `config`
html_template = config.get("template")
logging.warning(f"html tpl: {html_template}")
style = df.style
if classes is not None:
assert len(classes) == len(df), (len(classes), len(df))
style = style.set_td_classes(
pd.DataFrame(
data=[[cls] * len(df.columns) for cls in classes],
columns=df.columns,
index=df.index,
)
)
to_html_kwargs = {}
if html_template is not None:
to_html_kwargs["doctype_html"] = True
html = style.to_html(**to_html_kwargs)
if html_template is not None:
# FIXME: solve `pandas` html escape problem and switch to `jinja2`
with open(html_template) as f:
tpl = string_template(f.read())
row_num, col_num = df.shape
html = tpl.substitute(table_html=html, row_num=row_num, col_num=col_num)
if buf is not None:
with open(buf, "w") as f:
f.write(html)
logging.warning(f'save to "{buf}"')
| nailbiter/pyassistantbot2 | _gstasks/html_formatter.py | html_formatter.py | py | 8,883 | python | en | code | 0 | github-code | 13 |
21676200262 | """
์ ์ผ์ฑ์ ๋ง์กฑํ๋ ํค๋ค์ ๋ชจ๋ ๊ตฌํ๋ค.
์ ์ผ์ฑ์ ๋ง์กฑํ๋ ํค ์ค ์ต์์ฑ์ ๋ง์กฑํ๋ ํค๋ฅผ ๊ตฌํ๋ ค๋ฉด ๋ถ๋ถ์งํฉ์ ํฌํจ๋๋์ง๋ฅผ ํ์ธํ๋ฉด ๋๋ค.
๋ถ๋ถ์งํฉ์ ํ์ธํ๋๋ฐ ์์ด, ๊ฐ์ฅ ์ ์ ํค๋ฅผ ํฌํจํ๋ ๋ถ๋ถ๋ถํฐ ์งํํด์ผํ๋ค.
์๋๋ผ๋ฉด ์ต์์ฑ์ ๋ง์กฑ์ํค์ง ๋ชปํ๋ ๊ฒฝ์ฐ๋ ๋ฐ์ํ๋ค.
"""
from itertools import combinations
def isCandidate(answer_set, cb):
for i in range(1, len(cb)+1):
for c in combinations(cb, i):
if c in answer_set:
return False
return True
def solution(relation):
answer = 0
answer_set = set()
for i in range(1, len(relation[0])+1):
for cb in combinations(range(len(relation[0])), i):
check = set()
length = len(relation)
for rel in relation:
tp = tuple(rel[i] for i in cb)
check.add(tp)
if len(check) == length and isCandidate(answer_set, cb):
answer += 1
answer_set.add(tuple(cb))
return answer | SangHyunGil/Algorithm | Programmers/Lv2/ํ๋ณดํค(Python).py | ํ๋ณดํค(Python).py | py | 1,121 | python | ko | code | 0 | github-code | 13 |
41504311313 | import re, hoshino, os, json
from . import RSS_class, rsshub
from hoshino import Service, priv
from hoshino.typing import CQEvent
from .config import *
sv_help = '''
- [ๆทปๅ ่ฎข้
่ฎข้
ๅ RSSๅฐๅ(/twitter/user/username)]
- [ๅ ้ค่ฎข้
่ฎข้
ๅ]
- [ๆฅ็ๆๆ่ฎข้
]
'''.strip()
sv = Service(
name = 'ๆจ็น่ฎข้
', #ๅ่ฝๅ
use_priv = priv.NORMAL, #ไฝฟ็จๆ้
manage_priv = priv.ADMIN, #็ฎก็ๆ้
visible = True, #False้่
enable_on_default = False, #ๆฏๅฆ้ป่ฎคๅฏ็จ
bundle = '่ฎข้
', #ๅฑไบๅชไธ็ฑป
help_ = sv_help #ๅธฎๅฉๆๆฌ
)
@sv.on_fullmatch(["ๅธฎๅฉๆจ็น่ฎข้
"])
async def bangzhu(bot, ev):
await bot.send(ev, sv_help, at_sender=True)
def load_config():
try:
config_path = hoshino_path + 'twitter_config.json'
if os.path.exists(config_path):
with open(config_path, 'r', encoding='utf8') as config_file:
return json.load(config_file)
else:
return {}
except:
return {}
def save_config(config):
try:
with open(hoshino_path + 'twitter_config.json', 'w', encoding='utf8') as config_file:
json.dump(config, config_file, ensure_ascii=False, indent=4)
return True
except:
return False
async def spider_work(rss, bot, sv:Service):
updates = await rsshub.getRSS(rss)
if not updates:
sv.logger.info(f'{rss.url}ๆชๆฃ็ดขๅฐๆฐๆจๆ')
return
sv.logger.info(f'{rss.url}ๆฃ็ดขๅฐ{len(updates)}ไธชๆฐๆจๆ๏ผ')
for msg in updates:
for gid in rss.gid:
await bot.send_group_msg(group_id=int(gid), message=msg)
@sv.on_prefix('ๆทปๅ ่ฎข้
')
async def handle_RssAdd(bot, ev: CQEvent):
if not priv.check_priv(ev, priv.ADMIN):
await bot.finish(ev, 'ๆฑๆญ๏ผๆจ้็ฎก็ๅ๏ผๆ ๆญคๆไปคไฝฟ็จๆ้')
s = ev.message.extract_plain_text().split(' ')
try:
name = s[0]
url = s[1]
except:
await bot.send(ev, '่พๅ
ฅๅๆฐ็ผบๅคฑ๏ผ')
return
config = load_config()
gid = str(ev.group_id)
if url in config.keys():
gidList = []
for item in config[url]:
gidList.append(item[0])
if gid not in gidList:
config[url].append([gid,name])
else:
await bot.finish(ev, 'ๆญค็พคๅทฒ็ปๆทปๅ ่ฟ่ฏฅ่ฎข้
๏ผ่ฏทๅฟ้ๅคๆทปๅ ')
else:
config[url] = []
config[url].append([gid,name])
if save_config(config):
await bot.send(ev, f'ๆทปๅ ่ฎข้
"{s}"ๆๅ!')
# ้ๆฐๅ ่ฝฝ็ผๅญ
await twitter_search_spider()
else:
await bot.send(ev, 'ๆทปๅ ่ฎข้
ๅคฑ่ดฅ๏ผ่ฏท้่ฏ')
@sv.on_prefix('ๅ ้ค่ฎข้
')
async def handle_RssDel(bot, ev: CQEvent):
config = load_config()
s = ev.message.extract_plain_text()
gid = str(ev.group_id)
for url in config.keys():
for item in config[url]:
if item[0] == gid and s == item[1]:
config[url].remove(item)
msg = f'ๅ ้ค่ฎข้
"{s}"ๆๅ'
if not save_config(config):
await bot.finish(ev, 'ๅ ้ค่ฎข้
ๅคฑ่ดฅ๏ผ่ฏท้่ฏ')
await bot.send(ev, msg)
return
msg = f'ๅ ้คๅคฑ่ดฅ, ๆญค็พคๆช่ฎพ็ฝฎ่ฎข้
"{s}"'
await bot.send(ev, msg)
@sv.on_fullmatch('ๆฅ็ๆๆ่ฎข้
')
async def handle_RssLook(bot, ev: CQEvent):
config = load_config()
gid = str(ev.group_id)
msg = ''
for url in config.keys():
for item in config[url]:
if item[0] == gid:
msg = msg + '\n' + item[1] + ': ' + url
if msg == '':
msg = 'ๆญค็พค่ฟๆชๆทปๅ twitter่ฎข้
'
else:
msg = 'twitter็ฌ่ซๅทฒๅผๅฏ!\nๆญค็พค่ฎพ็ฝฎ็่ฎข้
ไธบ:' + msg
await bot.send(ev, msg)
@sv.scheduled_job('interval',minutes=5)
async def twitter_search_spider():
bot = hoshino.get_bot()
config = load_config()
for url in config.keys():
gid = []
for item in config[url]:
gid.append(item[0])
if gid:
rss = RSS_class.rss()
rss.url = url
rss.gid = gid
await spider_work(rss, bot, sv)
for root, dirs, files in os.walk(hoshino_path):
for name in files:
if name.endswith('.jpg'):
os.remove(os.path.join(root, name))
| sanshanya/hoshino_xcw | XCW/Hoshino/hoshino/modules/CQTwitter/CQTwitter.py | CQTwitter.py | py | 4,387 | python | en | code | 231 | github-code | 13 |
36872760771 | """A module containing tools that a discord user might need."""
from enum import Enum
from typing import Union
from discord.ext import commands
from discord import Role, Embed
from src.cogs.base import ConfiguredCog
class RequestAction(Enum):
"""An enumeration class containing all the possible role request actions
that can be taken."""
ADD = 'add' # add a role to the user
REMOVE = 'remove' # remove a role from the user
LIST = 'list' # list all the possible roles
class RoleRequestCog(ConfiguredCog):
"""A Cog class meant to add and remove roles from users that request
them."""
config_name = 'role'
@commands.command()
async def role(self,
ctx: commands.Context,
action: str,
*target_role_list: str):
"""The origin point for the `role` command.
:param ctx: The command context.
:param action: The string action to execute. Should
correlate to an action in the
`RequestAction` enumeration.
:param target_role_list: A list of strings, denoting the desired
role to perform the action (or ignored,
depending on the action). This list will
be joined by spaces.
"""
role_query = ' '.join(target_role_list)
action = action.lower()
if ctx.guild is None:
message = ('This command must be used from a guild. Please go to '
'the guild you wish to use the command on '
'and try again.')
await ctx.send(message)
return
if action == RequestAction.ADD.value:
message = await self._add_role(ctx, role_query)
elif action == RequestAction.REMOVE.value:
message = await self._remove_role(ctx, role_query)
elif action == RequestAction.LIST.value:
message = self._build_role_list_message(ctx)
else:
message = (f'Unknown role command `{action}`, please re-enter '
f'your command and try again.')
await ctx.send(message)
async def _add_role(self, ctx: commands.Context, role_query: str) -> str:
"""Adds the role requested to the user, if possible.
:param ctx: The command context.
:param role_query: The role query the user inputted.
:return: The resulting message to send back to the user.
"""
# find role
role = self.find_role_in_guild(role_query, ctx.guild)
if not role:
return (f'No role by the name of `{role_query}` exists in this '
f'guild. Please check your spelling and try again.')
# make sure it's allowed to be manipulated
if not self._validate_role_against_whitelist(role):
return 'You are not allowed to interact with this role.'
if self.member_contains_role(role.name, ctx.author):
return 'You already have that role.'
# add role to user
reason = 'Role added via Manageable bot instance.'
await ctx.author.add_roles(role, reason=reason)
return f'You now have the `{role.name}` role.'
async def _remove_role(self,
ctx: commands.Context,
role_query: str) -> str:
"""Removes the role requested from the user, if possible.
:param ctx: The command context.
:param role_query: The role query the user inputted.
:return: The resulting message to send back to the user.
"""
# find role
role = self.find_role_in_guild(role_query, ctx.guild)
if not role:
return (f'No role by the name of `{role_query}` exists in this '
f'guild. Please check your spelling and try again.')
# make sure it's allowed to be manipulated
if not self._validate_role_against_whitelist(role):
return 'You are not allowed to interact with this role.'
if not self.member_contains_role(role.name, ctx.author):
return 'You do not have that role.'
# remove role from user
reason = 'Role removed via Manageable bot instance.'
await ctx.author.remove_roles(role, reason=reason)
return f'You no longer have the `{role.name}` role.'
def _build_role_list_message(self, ctx: commands.Context) -> str:
""" Builds a human-readable list of all the roles available to
manipulate with the `role` command.
:param ctx: The command context.
:return: A human-readable message listing the roles available.
"""
message = '__**Available roles to add/remove:**__'
for role_name in self.config['content']['role_whitelist']:
if self.find_role_in_guild(role_name, ctx.guild):
message += f'\n{role_name}'
return message
def _validate_role_against_whitelist(self, role: Role) -> bool:
"""Validates that the given role is in the config whitelist for allowed
role interactions.
:param role: The role to validate against the whitelist
configuration.
:return: True if the case-sensitive role name is listed in the
config, False otherwise.
"""
# Check the whitelist to make sure we are allowed to add this role
if role.name not in self.config["content"]["role_whitelist"]:
return False
return True
class TagCog(ConfiguredCog):
"""A class supporting the `tag` command functionality."""
config_name = 'tag'
@commands.command()
async def tag(self,
ctx: commands.Context,
tag_name: Union[str, None] = None):
"""The origin point for the `tag` command.
:param ctx: The command context.
:param tag_name: The key string of the tag to query the config for.
"""
if tag_name is not None:
tag_data = None
tag_list = ConfiguredCog.config['content']['tags']
for tag in tag_list:
# Check the tag, agnostic of case.
if tag.lower() == tag_name.lower():
tag_name = tag
tag_data = tag_list[tag_name]
break
# Throw an error since we didn't find a tag
if tag_data is None:
await ctx.send(f'The tag `{tag_name}` was not found.')
return
# Build tag data
tag_color = self._get_tag_data_safe(tag_data, 'color')
color = ConfiguredCog.convert_color(tag_color)
title = self._get_tag_data_safe(tag_data, 'title')
if title is None:
# Tag title isn't set, but is required, set it to the tag name
title = tag_name
url = self._get_tag_data_safe(tag_data, 'url')
description = self._get_tag_data_safe(tag_data, 'description')
# Send embed
message = Embed(color=color,
title=title,
url=url,
description=description)
else:
# Send list of tags
description = ('Please do `tag <tag_name>` '
'to display the tag contents.')
message = Embed(title='Available Tags',
description=description)
tag_list = ConfiguredCog.config['content']['tags']
for tag_id in tag_list.keys():
title = self._get_tag_data_safe(tag_list[tag_id], 'title')
if title is None:
# Tag title isn't set, but is required,
# so set it to the tag name
title = tag_id
message.add_field(name=tag_id, value=title)
await ctx.send(embed=message)
@staticmethod
def _get_tag_data_safe(tag_data: dict[str, str],
tag_name: str) -> Union[str, None]:
"""Looks up the tag name from a dictionary of tag data and fail safely
if it can't be found.
:param tag_data: A dictionary of tags and their data, where the keys
are strings referencing the tag's name,
and the values are dictionaries denoting the data
to build the tag.
:param tag_name: The key to query in the provided data.
:return: If the tag name is found in the data's keys, return the
corresponding dictionary value. If the tag's name was not
found in the data's keys, return `None`.
"""
try:
return tag_data[tag_name]
except KeyError:
return None
| scytail/Manageable | src/cogs/user_tools.py | user_tools.py | py | 9,035 | python | en | code | 1 | github-code | 13 |
31803030853 | from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Union
from psycopg2 import sql
# --------------------------------------------------------------------------- #
# SQL COMMAND #
# --------------------------------------------------------------------------- #
@dataclass
class Sequel:
"""Class that encapsulates a sql sequel, its name and parameters."""
name: str
cmd: sql.SQL
description: str = field(default=None)
query_context: str = field(default=None)
object_type: str = field(default=None)
object_name: str = field(default=None)
params: tuple = field(default=())
# --------------------------------------------------------------------------- #
# ADMIN SEQUEL BASE #
# --------------------------------------------------------------------------- #
class AdminSequelBase(ABC):
"""Defines the API for Administrative SQL queries."""
@abstractmethod
def create(self, name: str, *args, **kwargs) -> Sequel:
pass
@abstractmethod
def delete(self, name: str) -> Sequel:
pass
@abstractmethod
def exists(self, name: str) -> Sequel:
pass
# --------------------------------------------------------------------------- #
# ACCESS SEQUEL BASE #
# --------------------------------------------------------------------------- #
class AccessSequelBase(ABC):
"""Defines the API for Access related SQL queries."""
@abstractmethod
def read(self, name: str, *args, **kwargs) -> Sequel:
pass
@abstractmethod
def create(self, name: str, *args, **kwargs) -> Sequel:
pass
@abstractmethod
def update(self, name: str, *args, **kwargs) -> Sequel:
pass
@abstractmethod
def delete(self, name: str, *args, **kwargs) -> Sequel:
pass
# =========================================================================== #
# ADMINISTRATIVE QUERIES #
# =========================================================================== #
# --------------------------------------------------------------------------- #
# DATABASE SEQUEL #
# --------------------------------------------------------------------------- #
class DatabaseSequel(AdminSequelBase):
def create(self, name: str) -> Sequel:
sequel = Sequel(
name="create_database",
description="Created {} database".format(name),
query_context='admin',
object_type='database',
object_name=name,
cmd=sql.SQL("CREATE DATABASE {} ;").format(
sql.Identifier(name))
)
return sequel
def exists(self, name: str) -> Sequel:
sequel = Sequel(
name="database exists",
description="Checked existence of {} database.".format(name),
query_context='admin',
object_type='database',
object_name=name,
cmd=sql.SQL("""SELECT EXISTS(
SELECT datname FROM pg_catalog.pg_database
WHERE lower(datname) = lower(%s));"""),
params=tuple((name,))
)
return sequel
def delete(self, name: str) -> Sequel:
sequel = Sequel(
name="drop database",
description="Dropped {} database if it exists.".format(name),
query_context='admin',
object_type='database',
object_name=name,
cmd=sql.SQL("DROP DATABASE IF EXISTS {};").format(
sql.Identifier(name))
)
return sequel
def terminate_database(self, name: str) -> Sequel:
sequel = Sequel(
name="terminate_database_processes",
description="Terminated processes on {} database if it exists."
.format(name),
query_context='admin',
object_type='database',
object_name=name,
cmd=sql.SQL("""SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE
pg_stat_activity.pid <> pg_backend_pid() AND
pg_stat_activity.datname = {};""").format(
sql.Placeholder()
),
params=tuple((name,))
)
return sequel
def activity(self) -> Sequel:
sequel = Sequel(
name="activity",
description="Get activity from pg_stat_activity.",
query_context='admin',
object_type='database',
object_name="activity",
cmd=sql.SQL("""SELECT * FROM pg_stat_activity;"""),
params=None
)
return sequel
# --------------------------------------------------------------------------- #
# DATABASE SCHEMA #
# --------------------------------------------------------------------------- #
class SchemaSequel(AdminSequelBase):
def create(self, name: str) -> Sequel:
sequel = Sequel(
name="create_schema",
description="Created SCHEMA IF NOT EXISTS {}".format(name),
query_context='admin',
object_type='database',
object_name=name,
cmd=sql.SQL("CREATE SCHEMA {};").format(
sql.Identifier(name)
)
)
return sequel
def exists(self, name: str) -> Sequel:
sequel = Sequel(
name="database exists",
description="Checked existence of {} database.".format(name),
query_context='admin',
object_type='database',
object_name=name,
cmd=sql.SQL("""SELECT EXISTS(
SELECT schema_name FROM information_schema.schemata
WHERE lower(schema_name) = lower(%s));"""),
params=tuple((name,))
)
return sequel
def delete(self, name: str) -> Sequel:
sequel = Sequel(
name="drop_schema",
description="Dropped schema {}.".format(name),
query_context='admin',
object_type='database',
object_name=name,
cmd=sql.SQL("DROP SCHEMA {};").format(
sql.Identifier(name))
)
return sequel
# --------------------------------------------------------------------------- #
# TABLES SEQUEL #
# --------------------------------------------------------------------------- #
class TableSequel(AdminSequelBase):
def create(self, name: str, filepath: str) -> Sequel:
sequel = Sequel(
name="create_table",
description="Created table {} from SQL ddl in {}".format(
name, filepath),
query_context='admin',
object_type='table',
object_name=name,
cmd=None,
params=filepath
)
return sequel
def batch_create(self, filepath: str) -> Sequel:
sequel = Sequel(
name="batch_create_tables",
description="Create tables from SQL ddl in {}".format(filepath),
query_context='admin',
object_type='table',
object_name="batch",
cmd=None,
params=filepath
)
return sequel
def exists(self, name: str, schema: str) -> Sequel:
sequel = Sequel(
name="table_exists",
description="Checked existence of table {}".format(name),
query_context='admin',
object_type='table',
object_name=name,
cmd=sql.SQL("""SELECT 1 FROM information_schema.tables
WHERE table_schema = {}
AND table_name = {}""").format(
sql.Placeholder(),
sql.Placeholder()
),
params=(schema, name,)
)
return sequel
def delete(self, name: str, schema: str) -> Sequel:
sequel = Sequel(
name="delete_table",
description="Drop table {}.{}".format(schema, name),
query_context='admin',
object_type='table',
object_name=name,
cmd=sql.SQL("DROP TABLE IF EXISTS {}.{};").format(
sql.Identifier(schema),
sql.Identifier(name)
)
)
return sequel
def batch_delete(self, filepath) -> Sequel:
sequel = Sequel(
name="delete_tables",
description="Drop tables from SQL ddl in {}".format(filepath),
query_context='admin',
object_type='table',
object_name="batch",
cmd=None,
params=filepath
)
return sequel
def column_exists(self, name: str, schema: str, column: str) -> Sequel:
sequel = Sequel(
name="column_exists",
description="Checked existence of column {} in {} table".format(
column, name),
query_context='admin',
object_type='table',
object_name=name,
cmd=sql.SQL("""SELECT 1 FROM information_schema.columns
WHERE table_schema = {}
AND table_name = {}
AND column_name = {}""").format(
sql.Placeholder(),
sql.Placeholder(),
sql.Placeholder()
),
params=(schema, name, column,)
)
return sequel
def get_columns(self, name: str, schema: str) -> Sequel:
sequel = Sequel(
name="column_exists",
description="Obtained columns for {}.{} table".format(
schema, name),
query_context='admin',
object_type='table',
object_name=name,
cmd=sql.SQL("""SELECT column_name FROM information_schema.columns
WHERE table_schema = {}
AND table_name = {}""").format(
sql.Placeholder(),
sql.Placeholder()
),
params=(schema, name)
)
return sequel
def create_column(self, name: str, schema: str, column: str,
datatype: str) -> Sequel:
sequel = Sequel(
name="column_exists",
description="Add column {} to {}.{} table".format(
column, schema, name),
query_context='admin',
object_type='table',
object_name=name,
cmd=sql.SQL("""ALTER TABLE {}.{} ADD {} {};""").format(
sql.Identifier(schema),
sql.Identifier(name),
sql.Identifier(column),
sql.Placeholder()
),
params=(datatype,)
)
return sequel
def tables(self, schema: str = 'public') -> Sequel:
sequel = Sequel(
name="tables",
description="Selected table names in {} schema.".format(schema),
query_context='admin',
object_type='database',
object_name=schema,
cmd=sql.SQL("""SELECT table_name FROM information_schema.tables
WHERE table_schema = {}""").format(
sql.Placeholder()
),
params=(schema,)
)
return sequel
# --------------------------------------------------------------------------- #
# USER SEQUEL #
# --------------------------------------------------------------------------- #
class UserSequel(AdminSequelBase):
def create(self, name: str, password: str) -> Sequel:
sequel = Sequel(
name="create_user",
description="Created user {}".format(name),
query_context='admin',
object_type='user',
object_name=name,
cmd=sql.SQL("CREATE USER {} WITH PASSWORD {} CREATEDB;").format(
sql.Identifier(name),
sql.Placeholder()
),
params=(password,)
)
return sequel
def delete(self, name: str) -> Sequel:
sequel = Sequel(
name="drop_user",
description="Dropped user {}".format(name),
query_context='admin',
object_type='user',
object_name=name,
cmd=sql.SQL("DROP USER IF EXISTS {};").format(
sql.Identifier(name))
)
return sequel
def exists(self, name: str) -> Sequel:
sequel = Sequel(
name="user_exists",
description="Checked existence of user {}".format(name),
query_context='admin',
object_type='user',
object_name=name,
cmd=sql.SQL("SELECT 1 FROM pg_roles WHERE rolname ={};").format(
sql.Placeholder()),
params=tuple((name,))
)
return sequel
def grant(self, name: str, dbname: str) -> Sequel:
sequel = Sequel(
name="grant",
description="Granted privileges on database {} to {}"
.format(dbname, name),
query_context='admin',
object_type='user',
object_name=name,
cmd=sql.SQL("GRANT ALL PRIVILEGES ON DATABASE {} TO {} ;")
.format(
sql.Identifier(dbname),
sql.Identifier(name))
)
return sequel
def revoke(self, name: str, dbname: str) -> Sequel:
sequel = Sequel(
name="revoke",
description="Revoked privileges on database, names, and sequences {} from {}"
.format(dbname, name),
query_context='admin',
object_type='user',
object_name=name,
cmd=sql.SQL(
"""REVOKE ALL PRIVILEGES ON DATABASE {} FROM {} CASCADE;""")
.format(
sql.Identifier(dbname),
sql.Identifier(name),
sql.Identifier(name),
sql.Identifier(name),
sql.Identifier(name)
)
)
return sequel
# =========================================================================== #
# ACCESS QUERIES #
# =========================================================================== #
class AccessSequel(AccessSequelBase):
def _get(self, name: str, schema: str, columns: list = None,
filter_key: str = None,
filter_value: Union[str, int, float] = None)\
-> Sequel:
sequel = Sequel(
name="select",
description="Selected {} from {}.{} where {} = {}".format(
columns, schema, name, filter_key, filter_value
),
query_context='access',
object_type='table',
object_name=name,
cmd=sql.SQL("SELECT {} FROM {}.{} WHERE {} = {};").format(
sql.SQL(", ").join(map(sql.Identifier, columns)),
sql.Identifier(schema),
sql.Identifier(name),
sql.Identifier(filter_key),
sql.Placeholder()),
params=(filter_value,)
)
return sequel
def _get_all_columns_all_rows(self, name: str, schema: str,
columns: list = None,
filter_key: str = None,
filter_value: Union[str, int, float] = None)\
-> Sequel:
sequel = Sequel(
name="select",
description="Selected * from {}.{}".format(
schema, name
),
query_context='access',
object_type='table',
object_name=name,
cmd=sql.SQL("SELECT * FROM {}.{};").format(
sql.Identifier(schema),
sql.Identifier(name)
)
)
return sequel
def _get_all_rows(self, name: str, schema: str, columns: list = None,
filter_key: str = None,
filter_value: Union[str, int, float] = None) -> Sequel:
sequel = Sequel(
name="select",
description="Selected {} from {}.{}".format(
columns, schema, name
),
query_context='access',
object_type='table',
object_name=name,
cmd=sql.SQL("SELECT {} FROM {}.{};").format(
sql.SQL(", ").join(map(sql.Identifier, columns)),
sql.Identifier(schema),
sql.Identifier(name)
)
)
return sequel
def _get_all_columns(self, name: str, schema: str,
columns: list = None,
filter_key: str = None,
filter_value: Union[str, int, float] = None)\
-> Sequel:
sequel = Sequel(
name="select",
description="Selected * from {}.{} where {} = {}".format(
schema, name, filter_key, filter_value
),
query_context='access',
object_type='table',
object_name=name,
cmd=sql.SQL("SELECT * FROM {}.{} WHERE {} = {};").format(
sql.Identifier(schema),
sql.Identifier(name),
sql.Identifier(filter_key),
sql.Placeholder()
),
params=(filter_value,)
)
return sequel
def read(self, name: str, schema: str, columns: list = None,
filter_key: str = None,
filter_value: Union[str, int, float] = None) -> Sequel:
if (filter_key is None and filter_value is None) != \
(filter_key is None or filter_value is None):
raise ValueError("where values not completely specified.")
if (columns is not None and filter_key is not None):
# Returns selected columns from selected rows
return self._get(name=name, schema=schema, columns=columns,
filter_key=filter_key, filter_value=filter_value)
elif (columns is not None):
# Returns all rows, selected columns
return self._get_all_rows(name=name, schema=schema,
columns=columns,
filter_key=filter_key,
filter_value=filter_value)
elif (filter_key is not None):
# Returns all columns, selected rows
return self._get_all_columns(name=name, schema=schema,
columns=columns,
filter_key=filter_key,
filter_value=filter_value)
else:
return self._get_all_columns_all_rows(name=name, schema=schema,
columns=columns,
filter_key=filter_key,
filter_value=filter_value)
def create(self, name: str, schema: str, columns: list,
values: list) -> Sequel:
if (len(columns) != len(values)):
raise ValueError(
"Number of columns doesn't match number of values")
sequel = Sequel(
name="insert",
description="Inserted into {}.{} {} values {}".format(
schema, name, columns, name
),
query_context='access',
object_type='table',
object_name=name,
cmd=sql.SQL("INSERT into {}.{} ({}) values ({});")
.format(
sql.Identifier(schema),
sql.Identifier(name),
sql.SQL(', ').join(map(sql.Identifier, tuple((*columns,)))),
sql.SQL(', ').join(sql.Placeholder() * len(columns))
),
params=(*values,)
)
return sequel
def update(self, name: str, schema: str, column: str,
value: Union[str, float, int], filter_key: str,
filter_value: Union[str, float, int]) -> Sequel:
sequel = Sequel(
name="update",
description="Updated {}.{} setting {} = {} where {} = {}".format(
schema, name, column, value, filter_key, filter_value
),
query_context='access',
object_type='table',
object_name=name,
cmd=sql.SQL("UPDATE {}.{} SET {} = {} WHERE {} = {}").format(
sql.Identifier(schema),
sql.Identifier(name),
sql.Identifier(column),
sql.Placeholder(),
sql.Identifier(filter_key),
sql.Placeholder()
),
params=(value, filter_value,)
)
return sequel
def delete(self, name: str, schema: str, filter_key: str,
filter_value: Union[str, float, int]) -> Sequel:
sequel = Sequel(
name="delete",
description="Deleted from {}.{} where {} = {}".format(
schema, name, filter_key, filter_value
),
query_context='access',
object_type='table',
object_name=name,
cmd=sql.SQL("DELETE FROM {} WHERE {} = {}").format(
sql.Identifier(name),
sql.Identifier(filter_key),
sql.Placeholder()
),
params=(filter_value,)
)
return sequel
def begin(self) -> Sequel:
sequel = Sequel(
name="begin",
description="Started transaction.",
query_context='access',
object_type='transaction',
object_name='connection',
cmd=sql.SQL("START TRANSACTION;")
)
return sequel
| john-james-ai/drug-approval-analytics | src/infrastructure/data/sequel.py | sequel.py | py | 22,331 | python | en | code | 0 | github-code | 13 |
30578887074 | # -*- coding: utf-8 -*-
import sys
import appmodel
import cargardata
import os
import pickle
import model
import Noticia
import shutil
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog,QMessageBox,QTableWidgetItem,QWidget,QHeaderView
from PyQt5.QtGui import QImage, QPalette, QBrush
from PyQt5.QtCore import QSize
from PyQt5 import uic
from PyQt5 import QtCore
def pallet(x,y):
oImage = QImage('.'+os.path.sep+'utiliti'+os.path.sep+"copy.jpg")
sImage = oImage.scaled(QSize(x,y)) # resize Image to widgets size
palette = QPalette()
palette.setBrush(10, QBrush(sImage)) # 10 = Windowrole
return palette
#from Controlador_Clasificacion import Ventana_Clasifica
#Clase heredada de QMainWindow (Constructor de ventanas)
class Ventana_Principal(QMainWindow):
#Mรฉtodo constructor de la clase
def __init__(self):
#Iniciar el objeto QMainWindow
QMainWindow.__init__(self)
#Cargar la configuraciรณn del archivo .ui en el objeto
self.path_clasificar=""
self.path_entrenar=""
uic.loadUi("Ventana_Index.ui", self)
self.setWindowTitle("Clasificador de noticias")
self.setPalette(pallet(892,675))
#aรฑadimos los modelos
nam_model=appmodel.obtener_modelos('.'+os.path.sep+'modelos')
self.cbx_Modelos.addItems(nam_model)
self.btt_Desc.clicked.connect(self.selec_carpeta_Desc)
self.btt_Clasificar_2.clicked.connect(self.clasificar)
self.btt_Entreno.clicked.connect(self.selec_carpeta_Entre)
self.btt_Crear_Model.clicked.connect(self.Crear_Modelo)
def Crear_Modelo(self):
path='.'+os.path.sep+'modelos'+os.path.sep+self.txt_Nombre.text()+'.txt'
dst='.'+os.path.sep+'Entrenamiento'+os.path.sep+self.txt_Nombre.text()
categorias=cargardata.obtener_path_noticias_entrenamiento(self.path_entrenar)
if(self.txt_Nombre.text()!=""):
if not os.path.exists(path):
if(self.path_entrenar!=""):
data,target_names,target=cargardata.load_files(self.path_entrenar)
if(len(data)>0):
modelo = model.crearModelo(data,target,target_names, outpath=path)
if(modelo):
os.makedirs(dst)
pa=self.path_entrenar+os.path.sep+categorias[0]
shutil.move(pa,dst)
pa=self.path_entrenar+os.path.sep+categorias[1]
shutil.move(self.path_entrenar+os.path.sep+categorias[1],dst)
QMessageBox.about(self, "CREACION", "Modelo Creado")
self.cbx_Modelos.addItem(self.txt_Nombre.text())
else:
QMessageBox.about(self, "ERROR", "ERROR AL CREAR EL MODELO")
else :
QMessageBox.about(self, "ERROR", "Datos mal cargado")
else:
QMessageBox.about(self, "ERROR", "El nombre introducido ya existe")
else:
QMessageBox.about(self, "ERROR", "Introduce un nombre")
def selec_carpeta_Entre(self):
file = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
if file:
self.path_entrenar=file
def selec_carpeta_Desc(self):
file = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
if file:
self.path_clasificar=file
def clasificar(self):
if(self.path_clasificar!=""):
nombre_modelo=self.cbx_Modelos.currentText()
path_modelo='.'+os.path.sep+'modelos'+os.path.sep+nombre_modelo+".txt"
dirnoticias=self.path_clasificar
data=cargardata.load_noticias(dirnoticias)
if (data.size>0):
with open(path_modelo, 'rb') as f:
model = pickle.load(f)
violencia,genericas=appmodel.evaluar(data,model)
if violencia and genericas:
#self.setEnabled(False)
_ventanaclasificar=Ventana_Clasifica(nombre_modelo)
_ventanaclasificar.con_tabla(violencia,genericas)
_ventanaclasificar.show_win()
else :
QMessageBox.about(self, "ERROR", "Error al clasificar las noticias")
else:
QMessageBox.about(self, "ERROR", "Error al cargar los archivos a clasificar")
else:
QMessageBox.about(self, "ERROR", "No has seleccionado ninguna carpeta para clasificar")
class Ventana_Clasifica(QWidget):
#Mรฉtodo constructor de la clase
def __init__(self,nombre):
#Iniciar el objeto QMainWindow
QWidget.__init__(self)
#Cargar la configuraciรณn del archivo .ui en el objeto
self.window
self.nombre_modelo=nombre
self.genericas=[]
self.violencia=[]
self.table_model_violencia=0
self.setPalette(pallet(902,683))
#self.ventana_Index=Ventana
self.path_clasificar=""
self.window =uic.loadUi("Ventana_Clasificacion.ui", self)
self.setWindowTitle("Clasificado por el modelo "+ self.nombre_modelo)
self.btt_Reentreno.clicked.connect(self.reentrenar)
self.btt_Guardar_Noticias.clicked.connect(self.guardar)
def show_win(self):
self.window.show()
def guardar(self):
file = str(QFileDialog.getExistingDirectory(self, "Select Directory"))
if file:
os.makedirs(file+os.path.sep+"Violencia")
vio=cargardata.guardar_Noticias(file+os.path.sep+"Violencia"+os.path.sep+"VG",appmodel.CargarNoticias(self.violencia))
os.makedirs(file+os.path.sep+"Genericas")
gen=cargardata.guardar_Noticias(file+os.path.sep+"Genericas"+os.path.sep+"Gen",appmodel.CargarNoticias(self.genericas),)
if(vio and gen):
QMessageBox.about(self, "Informacion", "Noticias guardadas")
else :
QMessageBox.about(self, "ERROR", "Error al guardar las noticias")
def reentrenar(self):
noticias=appmodel.CargarNoticias(self.violencia)
for i in noticias:
if(not cargardata.Agregar_TXT(self.nombre_modelo, "VG", i)):
QMessageBox.about(self, "ERROR", "Error al cargar los archivos a clasificar")
noticias=appmodel.CargarNoticias(self.genericas)
for i in noticias:
if(not cargardata.Agregar_TXT(self.nombre_modelo, "General", i)):
QMessageBox.about(self, "ERROR", "Error al cargar los archivos a clasificar")
PATH='.'+os.path.sep+'modelos'+os.path.sep+self.nombre_modelo+'.txt'
dirname0=cargardata.obtener_path_reentrenamiento(self.nombre_modelo)
data,target_names,target=cargardata.load_files(dirname0)
modelo = model.crearModelo(data,target,target_names, outpath=PATH)
if(modelo):
QMessageBox.about(self, "Correcto", "Reentrenamiento correcto")
def cellClick_Vg(self,row,col):
if(col==2):
elec=QMessageBox.question(self, "Informacion", "Quiere previsualizar la noticia",QMessageBox.Yes |QMessageBox.No)
if elec==QMessageBox.Yes:
titulo=self.tbl_Vg.item(row,1).text()
desc=self.tbl_Vg.item(row,col).text()
fecha=self.tbl_Vg.item(row,0).text()
noticia=Noticia.Noticia(titulo,desc,fecha)
_ventanapreviw=Ventana_Previuw(noticia)
_ventanapreviw.show_win()
else:
elec=QMessageBox.question(self, "Informacion", "Quiere reclasificar la noticia ",QMessageBox.Yes |QMessageBox.No)
if elec==QMessageBox.Yes:
genericas=[]
violencia=[]
violencia,genericas=appmodel.reclasificar(self.violencia,self.genericas,row)
self.tbl_Vg.setColumnCount(0)
self.tbl_Vg.setRowCount(0)
self.con_tabla(violencia,genericas)
def cellClick_Nvg(self,row,col):
if(col==2):
elec=QMessageBox.question(self, "Informacion", "Quiere previsualizar la noticia",QMessageBox.Yes |QMessageBox.No)
if elec==QMessageBox.Yes:
titulo=self.tbl_Nvg.item(row,1).text()
desc=self.tbl_Nvg.item(row,col).text()
fecha=self.tbl_Nvg.item(row,0).text()
noticia=Noticia.Noticia(titulo,desc,fecha)
_ventanapreviw=Ventana_Previuw(noticia)
_ventanapreviw.show_win()
else:
elec=QMessageBox.question(self, "Informacion", "Quiere reclasificar la noticia ",QMessageBox.Yes |QMessageBox.No)
if elec==QMessageBox.Yes:
genericas=[]
violencia=[]
genericas, violencia=appmodel.reclasificar(self.genericas,self.violencia,row)
self.tbl_Vg.setColumnCount(0)
self.tbl_Vg.setRowCount(0)
self.con_tabla(violencia,genericas)
def con_tabla(self,violencia,genericas):
self.violencia=violencia
self.genericas=genericas
#aรฑadirle las cabeceras
header = ["Fecha","Titulo","Descripcion"]
#cargamos la tabla de violencia
noticias=appmodel.CargarNoticias(self.violencia)
data=appmodel.contenido_tabla(noticias)
self.tbl_Vg.setColumnCount(3)
self.tbl_Vg.setHorizontalHeaderLabels(header)
self.tbl_Vg.setRowCount(len(data))
#Aรฑadimos el controlador
self.tbl_Vg.cellClicked.connect(self.cellClick_Vg)
r=0
for i in data:
c=0
for e in i:
item=QTableWidgetItem(e)
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.tbl_Vg.setItem(r,c, item)
c=c+1
r=r+1
head = self.tbl_Vg.horizontalHeader()
head.setSectionResizeMode(QHeaderView.Stretch)
head.setStretchLastSection(True)
#cargamos la tabla de genericas
noticias=appmodel.CargarNoticias(self.genericas)
data=appmodel.contenido_tabla(noticias)
self.tbl_Nvg.setColumnCount(3)
self.tbl_Nvg.setHorizontalHeaderLabels(header)
self.tbl_Nvg.setRowCount(len(data))
self.tbl_Nvg.cellClicked.connect(self.cellClick_Nvg)
r=0
for i in data:
c=0
for e in i:
item=QTableWidgetItem(e)
item.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.tbl_Nvg.setItem(r,c,item )
c=c+1
r=r+1
head = self.tbl_Nvg.horizontalHeader()
head.setSectionResizeMode(QHeaderView.Stretch)
head.setStretchLastSection(True)
#self.table_model_violencia = MyTableModel.__init__(self,data, header, self)
#self.tbl_Vg.setModel(self.table_model_violencia)
class Ventana_Previuw(QWidget):
#Mรฉtodo constructor de la clase
def __init__(self,noticia):
#Iniciar el objeto QMainWindow
QWidget.__init__(self)
#Cargar la configuraciรณn del archivo .ui en el objeto
#self.ventana_Index=Ventana
self.noticia=noticia
self.path_clasificar=""
self.window
tit=""
desc=""
window=uic.loadUi("Preview.ui", self)
self.setWindowTitle("Visualizar una noticia")
self.window=window
descrip=self.splitStringMax(self.noticia.descripcion,120)
for i in descrip:
desc=desc+"\n"+i
titulo=self.splitStringMax(self.noticia.descripcion,100)
for i in titulo:
tit=tit+"\n"+i
self.window.lbl_Titulo.setText(tit);
self.window.lbl_Descripcion.setText(desc);
self.window.lbl_Fecha.setText(self.noticia.fecha)
def show_win(self):
self.window.show()
def splitStringMax(self,si, limit):
ls = si.split()
lo=[]
st=''
ln=len(ls)
if ln==1:
return [si]
i=0
for l in ls:
st+=l
i+=1
if i <ln:
lk=len(ls[i])
if (len(st))+1+lk < limit:
st+=' '
continue
lo.append(st);st=''
return lo
#Instancia para iniciar una aplicaciรณn
app = QApplication(sys.argv)
#Crear un objeto de la clase
_ventana = Ventana_Principal()
#Mostra la ventana
_ventana.show()
#Ejecutar la aplicaciรณn
app.exec_()
| Eufalo/Machine-Learning | controlador_Index.py | controlador_Index.py | py | 12,511 | python | es | code | 0 | github-code | 13 |
23248327916 | #!/usr/bin/env python3
# encoding: utf-8
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def get_len(head: ListNode):
it = head
cnt = 0
while it is not None:
it = it.next
cnt += 1
return cnt
def move_to_last_k(head: ListNode, k: int):
init_last = pre_first = ListNode(next=head)
for _ in range(k):
init_last = init_last.next
while init_last.next is not None:
pre_first = pre_first.next
init_last = init_last.next
return pre_first, pre_first.next, init_last
class Solution:
def rotateRight(self, head: ListNode, k: int) -> ListNode:
n = get_len(head)
if n == 0:
return None
k = k % n
if k == 0:
return head
init_first = head
pre_first, first, init_last = move_to_last_k(head, k)
pre_first.next, init_last.next = None, init_first
return first
| misaka-10032/leetcode | coding/00061-rotate-list/solution.py | solution.py | py | 971 | python | en | code | 1 | github-code | 13 |
26971581032 | # for loops are used when you know the exact number of iterations going to be happend
sum_ = 0
# for number in range(1, 101):
# sum_ = sum_ + number
# print("Sum of first 100 number:", sum_)
def is_prime(number):
for x in range(2, int(number ** 0.5)):
if number % x == 0:
return False
return True
LIMIT = int(input("Enter any limit of the prime number: "))
counter = 0 # this will be keeping track of the number of prime number we have in our bucket
num_counter = 1 # this will give the current number
while counter < LIMIT:
if is_prime(num_counter):
counter = counter + 1
sum_ += num_counter
num_counter += 1
print("Sum of number to 27th prime number is", sum_) | annup76779/python5tutor | Abdullah/velocity.py | velocity.py | py | 718 | python | en | code | 0 | github-code | 13 |
34487225926 | # 31 - Faรงa um programa que receba a altura e o peso de uma pessoa. De acordo com a tabela a seguir, verifique e mostre qual a clissificaรงรฃo dessa pessoa
# [Altura ] [Peso ]
# 60 | Entre 60 e 90 | +90
# Menor que 1,20 | A | D | G
# De 1,20 a 1,70 | B | E | H
# Maior que 1,70 | C | F | I
print('# EX 031')
# [+] COMEรO DO CรDIGO
altura = float(input('Digite sua altura: '))
peso = float(input('Digite o seu peso: '))
if altura < 1.20:
if peso < 60:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo A')
elif peso >= 60 and peso <= 90:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo D')
else:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo G')
elif altura >= 1.20 and altura <= 1.70:
if peso < 60:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo B')
elif peso >= 60 and peso <= 90:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo E')
else:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo H')
else:
if peso < 60:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo C')
elif peso >= 60 and peso <= 90:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo F')
else:
print(f'Vocรช tem {altura} de altura e {peso} de peso, portanto estรก na classificaรงรฃo I')
# [-] FIM DO CรDIGO
print('===+===\n\n')
# 32 - Escrever um programa que leia o cรณdigo do produto escolhido do cardรกpio de uma lanchonete e a quantidade. O programa deve calcular o valor a ser pago por aquele lanche. Considere que a cada execuรงรฃo somente serรก calculado um pedido. O cardรกpio da lanchonete segue o padrรฃo abaixo:
# [Especificaรงรฃo] [Cรณdigo] [Preรงo]
# Cachorro quente 100 1.20
# Bauru Simples 101 1.30
# Bauro com Ovo 102 1.50
# Hamburguer 103 1.20
# Cheeseburguer 104 1.70
# Suco 105 2.20
# Refrigerante 106 1.00
print('# EX 032')
# [+] COMEรO DO CรDIGO
codigo = int(input('''>>> Digite o cรณdigo do produto seguindo a tabela abaixo.
# [Especificaรงรฃo] [Cรณdigo] [Preรงo]
# Cachorro quente 100 1.20
# Bauru Simples 101 1.30
# Bauro com Ovo 102 1.50
# Hamburguer 103 1.20
# Cheeseburguer 104 1.70
# Suco 105 2.20
# Refrigerante 106 1.00
===> '''))
if 100 > codigo < 106:
print('Cรณdigo errado. Verifique se o cรณdigo consta na tabela.')
else:
quantidade = int(input(f'Digite a quantidade que vai pedir do produto {codigo}: '))
if codigo == 100:
print(f'Vocรช terรก que pagar {1.20 * quantidade:.2f}R$ para o(s) x{quantidade} Cachorro(s) Quente(s)!')
elif codigo == 101:
print(f'Vocรช terรก que pagar {1.30 * quantidade:.2f}R$ para o(s) x{quantidade} Bauru(s) Simples!')
elif codigo == 102:
print(f'Vocรช terรก que pagar {1.50 * quantidade:.2f}R$ para o(s) x{quantidade} Bauru(s) com ovo!')
elif codigo == 103:
print(f'Vocรช terรก que pagar {1.20 * quantidade:.2f}R$ para o(s) x{quantidade} Hamburguer(es)!')
elif codigo == 104:
print(f'Vocรช terรก que pagar {1.70 * quantidade:.2f}R$ para o(s) x{quantidade} Cheeseburguer(es)!')
elif codigo == 105:
print(f'Vocรช terรก que pagar {2.20 * quantidade:.2f}R$ para o(s) x{quantidade} Suco(s)!')
else:
print(f'Vocรช terรก que pagar {1.00 * quantidade:.2f}R$ para o(s) x{quantidade} Refrigetante(s)!')
# [-] FIM DO CรDIGO
print('===+===\n\n')
# 33 - Um produto vai sofrer aumento de acordo com a tabela abaixo. Leia o preรงo antigo, calcule e escreva o preรงo novo, em seguida escreva uma mensagem em funรงรฃo do preรงo novo (de acordo com a segnda tabela)
# TABELA 1
# [Preรงo antigo ] [Percentual de aumento]
# atรฉ R$ 50 5%
# entre 50 e 100 10%
# acima de 100 15%
# TABELA 2
# [Preรงo novo ] [Mensagem]
# atรฉ 80 Barato
# entre 80 e 120 Normal
# entre 120 e 200 Caro
# acima de 200 Muito caro
print('# EX 033')
# [+] COMEรO DO CรDIGO
preco = float(input('''[Digite o preรงo de um produto e reajustaremos pra vocรช, seguindo a tabela abaixo]
[Preรงo antigo ] [Percentual de aumento]
atรฉ R$ 50 5%
entre 50 e 100 10%
acima de 100 15%
==> '''))
if preco < 50:
preco = preco + (preco * (5/100))
elif 50 >= preco <= 100:
preco = preco + (preco * (10/100))
else:
preco = preco + (preco * (15/100))
print(f'O reajuste ficou em {preco}R$ para o preรงo do produto,', end='')
if preco < 80:
print(' e รฉ considerado um produto BARATO')
elif 80 >= preco <= 120:
print(' e รฉ considerado um produto NORMAL')
elif 120 > preco <= 200:
print(' e รฉ considerado um produto CARO')
else:
print(' e รฉ considerado um produto MUITO CARO')
# [-] FIM DO CรDIGO
print('===+===\n\n')
# 34 - Leia a nota e o nรบmero de faltas de um aluno e, esreva seu conceito. De acordo com a tabela abaixo, quando o alo tem mais de 20 faltas, ocorre uma reduรงรฃo de conceito.
#[Nota ] [Faltas -20] [Faltas +20]
# 9.0 a 10.0 A B
# 7.5 a 8.9 B C
# 5.0 a 7.4 C D
# 4.0 a 4.9 D E
# 0.0 a 3.9 E E
print('# EX 034')
# [+] COMEรO DO CรDIGO
nota = float(input('Digite a nota do aluno: '))
faltas = int(input('Digite a quantidade de faltas: '))
if 0.0 >= nota <= 3.9:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: E.')
elif 4.0 >= nota <= 4.9:
if faltas < 20:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: D.')
else:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: E.')
elif 5.0 >= nota <= 7.4:
if faltas < 20:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: C.')
else:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: D.')
elif 7.5 >= nota <= 8.9:
if faltas < 20:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: B.')
else:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: C.')
elif 9.0 >= nota <= 10.0:
if faltas < 20:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: A.')
else:
print(f'Com a nota {nota} e com {faltas} faltas, seu conceito รฉ: B.')
else:
print('Nota invรกlida.')
# [-] FIM DO CรDIGO
print('===+===\n\n')
# 35 - Leia uma data e determine se ela รฉ vรกlida. Ou seja, verifique se o mรชs estรก entre 1 e 12, e se o dia existe naquele mรชs. Note que Fevereiro tem 29 dias em anos bissextos, e 28 dias em anos nรฃo bissextos.
print('# EX 035')
# [+] COMEรO DO CรDIGO
valido = True
dia = int(input('Digite o dia: '))
mes = int(input('Digite o mรชs: '))
ano = int(input('Digite o ano: '))
if not 1 >= mes <= 12:
print('Mรชs invรกlido!')
valido = False
if ano % 4 == 0 or ano % 100 == 0 and ano % 400 == 0:
if mes == 2:
if not 1 >= dia <= 29:
print('Dia invรกlido!')
valido = False
else:
if not 1 >= dia <= 31:
print('Dia invรกlido!')
valido = False
else:
if mes == 2:
if not 1 >= dia <= 28:
print('Dia invรกlido!')
valido = False
else:
if not 1 >= dia <= 31:
print('Dia invรกlido!')
valido = False
if valido:
print(f'A data {dia}/{mes}/{ano} รฉ vรกlida!')
else:
print(f'A data {dia}/{mes}/{ano} nรฃo รฉ vรกlida!')
# [-] FIM DO CรDIGO
print('===+===\n\n') | puchunim/curso-geeky-university | 005 - Estruturas lรณgicas e condicionais/003 - Exercรญcios/031-043/main.py | main.py | py | 8,186 | python | pt | code | 0 | github-code | 13 |
31702072468 | #############################################################
#Date: 10.02.22 #
#Programmed by: Luka Henig (luka.henig@gmail.com) #
#Curse: 100 Days of Code(udemy) #
#Description: Litle pong game to learn and understand #
#python and the Turtle Library #
#############################################################
#imports
from turtle import Turtle
#scoardboard class
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.color("white")
self.penup()
self.hideturtle()
self.l_score = 0
self.r_score = 0
self.update_scoreboard()
#show score on Screen
def update_scoreboard(self):
"""Updates the score on screen, no parameters"""
self.clear()
self.goto(-100, 200)
self.write(self.l_score, align="center", font=("Courier", 40, "normal"))
self.goto(100, 200)
self.write(self.r_score, align="center", font=("Courier", 40, "normal"))
#increment score
def l_point(self):
"""increments score for left player by one, no parameters"""
self.l_score += 1
self.update_scoreboard()
#increment score
def r_point(self):
"""increments score for right player by one, no parameters"""
self.r_score += 1
self.update_scoreboard() | LukaHenig/Pong_Game | pong/scoreboard.py | scoreboard.py | py | 1,471 | python | en | code | 0 | github-code | 13 |
1382011846 | import serial
from typing import Type
import tkinter as tk
from tkinter import ttk
from controller import Controller
class ManualFrame(tk.Frame):
def __init__(
self,
master: Type[tk.Frame],
controller: Type[Controller],
ser: Type[serial.Serial] = None,
):
self.master = master
super().__init__(self.master)
self.ser = ser
self.controller = controller
self.controller.subscribe(
event="set_coord", observer=self, callback=self.set_manual_coord
)
self.create_widgets()
def create_widgets(self):
# Setup
self.labelframe_setup = ttk.Labelframe(master=self.master, text="Configuraรงรฃo")
self.labelframe_setup.pack(anchor="w")
var_feed_rate = tk.IntVar(master=self.labelframe_setup, value=1)
var_step_size = tk.DoubleVar(master=self.labelframe_setup, value=1)
self.label_feedrate = ttk.Label(master=self.labelframe_setup, text="Feed rate:")
self.label_stepsize = ttk.Label(
master=self.labelframe_setup, text="Tamanho do passo:"
)
self.spinbox_feedrate = tk.Spinbox(
master=self.labelframe_setup,
from_=0,
to=999999,
textvariable=var_feed_rate,
)
self.spinbox_stepsize = tk.Spinbox(
master=self.labelframe_setup,
from_=0,
to=999999,
format="%.2f",
textvariable=var_step_size,
)
self.label_feedrate.grid(row=0, column=0, sticky=tk.W)
self.label_stepsize.grid(row=0, column=1, sticky=tk.W)
self.spinbox_feedrate.grid(row=1, column=0)
self.spinbox_stepsize.grid(row=1, column=1)
# Joystick
self.labelframe_direction = ttk.Labelframe(
master=self.master, text="Por direรงรฃo"
)
self.labelframe_direction.pack(anchor="w")
self.button_y_up = ttk.Button(
master=self.labelframe_direction,
text="Y +",
command=lambda: self.__move_joystick(
axis="Y",
direction=1,
feedrate=self.controller.w_feed_rate.get(),
step=self.controller.w_step_size.get(),
),
)
self.button_y_down = ttk.Button(
master=self.labelframe_direction,
text="Y -",
command=lambda: self.__move_joystick(
axis="Y",
direction=0,
feedrate=self.controller.w_feed_rate.get(),
step=self.controller.w_step_size.get(),
),
)
self.button_x_up = ttk.Button(
master=self.labelframe_direction,
text="X +",
command=lambda: self.__move_joystick(
axis="X",
direction=1,
feedrate=self.controller.w_feed_rate.get(),
step=self.controller.w_step_size.get(),
),
)
self.button_x_down = ttk.Button(
master=self.labelframe_direction,
text="X -",
command=lambda: self.__move_joystick(
axis="X",
direction=0,
feedrate=self.controller.w_feed_rate.get(),
step=self.controller.w_step_size.get(),
),
)
self.button_z_up = ttk.Button(
master=self.labelframe_direction,
text="Z +",
command=lambda: self.__move_joystick(
axis="Z",
direction=1,
feedrate=self.controller.w_feed_rate.get(),
step=self.controller.w_step_size.get(),
),
)
self.button_z_down = ttk.Button(
master=self.labelframe_direction,
text="Z -",
command=lambda: self.__move_joystick(
axis="Z",
direction=0,
feedrate=self.controller.w_feed_rate.get(),
step=self.controller.w_step_size.get(),
),
)
self.button_y_up.grid(row=0, column=1)
self.button_y_down.grid(row=2, column=1)
self.button_x_up.grid(row=1, column=2)
self.button_x_down.grid(row=1, column=0)
self.button_z_up.grid(row=0, column=2)
self.button_z_down.grid(row=2, column=2)
# By coordenate
self.labelframe_coord = ttk.Labelframe(
master=self.master, text="Por coordenada"
)
self.labelframe_coord.pack(anchor="w")
var_x_coord = tk.DoubleVar(master=self.labelframe_coord, value=0)
var_y_coord = tk.DoubleVar(master=self.labelframe_coord, value=0)
var_z_coord = tk.DoubleVar(master=self.labelframe_coord, value=0)
self.label_x = ttk.Label(master=self.labelframe_coord, text="X:")
self.label_y = ttk.Label(master=self.labelframe_coord, text="Y:")
self.label_z = ttk.Label(master=self.labelframe_coord, text="Z:")
self.spinbox_x = tk.Spinbox(
master=self.labelframe_coord,
from_=0,
to=999999,
format="%.2f",
increment=0.1,
textvariable=var_x_coord,
)
self.spinbox_y = tk.Spinbox(
master=self.labelframe_coord,
from_=0,
to=999999,
format="%.2f",
increment=0.1,
textvariable=var_y_coord,
)
self.spinbox_z = tk.Spinbox(
master=self.labelframe_coord,
from_=0,
to=999999,
format="%.2f",
increment=0.1,
textvariable=var_z_coord,
)
self.label_x.grid(row=0, column=0)
self.spinbox_x.grid(row=0, column=1)
self.label_y.grid(row=1, column=0)
self.spinbox_y.grid(row=1, column=1)
self.label_z.grid(row=2, column=0)
self.spinbox_z.grid(row=2, column=1)
# Controller
self.controller.w_feed_rate = self.spinbox_feedrate
self.controller.w_step_size = self.spinbox_stepsize
self.controller.w_x_coord = self.spinbox_x
self.controller.w_y_coord = self.spinbox_y
self.controller.w_z_coord = self.spinbox_z
def __move_joystick(self, axis: str, direction: int, feedrate: int, step: float):
axis = axis.upper()
if direction == 1:
direction = ""
else:
direction = "-"
feedrate = str(feedrate)
step = str(step)
command = "$J=G21G91" + axis + direction + step + "F" + feedrate + "\n"
self.ser.write(str.encode(command))
self.ser.readline()
def set_manual_coord(self):
self.controller.feed_rate_set = self.spinbox_feedrate.get()
self.controller.stepsize_set = self.spinbox_stepsize.get()
self.controller.x_coord_set = self.spinbox_x.get()
self.controller.y_coord_set = self.spinbox_y.get()
self.controller.z_coord_set = self.spinbox_z.get()
| miqueiasmiguel/cnc_controller | frame_tab_manual.py | frame_tab_manual.py | py | 6,980 | python | en | code | 0 | github-code | 13 |
14693770141 | #!/usr/bin/python3
def safe_print_list_integers(my_list=[], x=0):
num = 0
for elements in my_list[:x]:
try:
print("{:d}".format(elements), end='')
num += 1
except (ValueError, TypeError):
continue
print()
return num
| Ninolincy/alx-higher_level_programming | 0x05-python-exceptions/2-safe_print_list_integers.py | 2-safe_print_list_integers.py | py | 284 | python | en | code | 1 | github-code | 13 |
12214205963 |
# gen code for:
# - Table Record C++ header file
# - C++ Class header file
# - C++ Class source file
from helper import get_field_by_col, get_macro_name
import record_header_codegen
import class_header_codegen
import class_source_codegen
class CRUDCodeGen:
def __init__(self, conn, table_name, class_name=None):
self.conn = conn
self.schema = {}
self.schema['table_name'] = table_name
if class_name is None:
class_name = table_name
self.set_class_name(class_name)
self._query_schema()
self._query_pks()
self._parse_columns()
def set_class_name(self, class_name, record_class_name=None):
self.schema['class_name'] = class_name
if record_class_name is None:
record_class_name = class_name + 'Record'
self.schema['record_class_name'] = record_class_name
def gen_record_header_code(self):
generator = record_header_codegen.Generator(self.schema)
return generator.gen_code()
def gen_class_header_code(self):
generator = class_header_codegen.Generator(self.schema)
return generator.gen_code()
def gen_class_source_code(self):
generator = class_source_codegen.Generator(self.schema)
return generator.gen_code()
def _query_schema(self):
sql = 'sp_columns "%s"' % self.schema['table_name']
cursor = self.conn.cursor(as_dict=True)
cursor.execute(sql)
self.schema['columns'] = []
self.schema['columns_map'] = {}
for col in cursor:
self.schema['columns'].append(col)
self.schema['columns_map'][col['COLUMN_NAME']] = col
def _query_pks(self):
sql = 'sp_primary_keys_rowset "%s"' % self.schema['table_name']
cursor = self.conn.cursor(as_dict=True)
cursor.execute(sql)
self.schema['pks'] = []
for pk in cursor:
self.schema['pks'].append(pk['COLUMN_NAME'])
def _parse_columns(self):
self.schema['fields'] = []
self.schema['fill_fields'] = {}
self.schema['macros'] = []
for col in self.schema['columns']:
field = get_field_by_col(col)
field_name = field['name']
self.schema['fields'].append(field)
if field['type'] == 'TCHAR':
self._parse_tchar_field(field)
self.schema['fill_fields'][field_name] = field
elif field['type'] == 'SQL_TIMESTAMP_STRUCT':
self.schema['fill_fields'][field_name] = field
self.schema['fields_map'] = {}
for field in self.schema['fields']:
if 'col_name' in field:
self.schema['fields_map'][field['col_name']] = field
def _parse_tchar_field(self, field):
col_name = field['col_name']
m_name = get_macro_name(self.schema['table_name'] + col_name) + '_LEN'
size_field_name = 'n' + col_name
field['size_field_name'] = size_field_name
field['macro'] = m_name
field['declare_more'] = '[%s + 1]' % m_name
macro = {
'name': m_name,
'val': field['length'],
}
size_field = {
'type': 'SQLLEN',
'type_prefix': 'n',
'name': size_field_name,
'default_val': '%s + 1' % m_name,
'declare_more': '',
}
self.schema['fields'].append(size_field)
self.schema['macros'].append(macro)
| dualface/cpp_activerecord | codegen/__init__.py | __init__.py | py | 3,573 | python | en | code | 2 | github-code | 13 |
34199298894 | """Main module."""
import typing
from dataclasses import asdict
import requests
from pydantic import BaseModel
import paddle_api.type_defs as td
T = typing.TypeVar("T", bound=BaseModel)
T_CREATE = typing.TypeVar("T_CREATE", bound=BaseModel)
T_UPDATE = typing.TypeVar("T_UPDATE", bound=BaseModel)
T_INSTANCE = typing.TypeVar("T_INSTANCE", bound=BaseModel)
class CRUDP(typing.Generic[T_CREATE, T_UPDATE, T_INSTANCE]):
"""Create, Retrieve, Update, Delete, Paginate."""
# probably good to make CRUDP class singleton per each path (/products, /prices, ...)
def __init__(self, client: "Paddle", path: str, t: typing.Type[T_INSTANCE]):
self.path = path
self.client = client
self.t: typing.Type[T_INSTANCE] = t
def _get_url(self, pk: typing.Optional[str] = None):
url = f"{self.client.base_url}{self.path}"
if not pk:
return url
return f"{url}/{pk}"
def _get(self, pk: typing.Optional[str] = None, query_params: typing.Optional[dict] = None) -> dict:
url = self._get_url(pk)
response = requests.get(
url=url,
headers=self.client.headers,
params=query_params,
)
return response.json()
def _create_or_update(self, data: typing.Union[T_CREATE, T_UPDATE], pk: typing.Optional[str] = None) -> dict:
request_f = requests.patch if pk else requests.post
response = request_f( # type: ignore
url=self._get_url(pk),
json=asdict(data),
headers=self.client.headers,
)
if response.status_code == 400:
raise BadRequest(response=response)
response.raise_for_status()
return response.json()
def create(self, item: T_CREATE) -> T_INSTANCE:
response = self._create_or_update(item)
return self.t.parse_obj(response["data"])
def update(self, item: T_UPDATE) -> T_INSTANCE:
response = self._create_or_update(item, pk=item.id) # type: ignore[attr-defined] # id
return self.t.parse_obj(response["data"])
def retrieve(self, pk: str) -> T_INSTANCE:
response = self._get(pk=pk)
return self.t.parse_obj(response["data"])
def paginator(self, per_page: typing.Optional[int] = None) -> typing.Generator[td.Page[T_INSTANCE], None, None]:
response = td.Page[self.t].parse_obj( # type: ignore[name-defined] # self.t
self._get(self.path, query_params={"per_page": per_page})
)
yield response
if not response.meta.pagination:
return None
while response.meta.pagination.has_more:
after = response.data[-1].id
response = td.Page[self.t].parse_obj( # type: ignore[name-defined] # self.t
self._get(self.path, query_params={"per_page": response.meta.pagination.per_page, "after": after})
)
yield response
def delete(self, pk: str):
raise NotImplementedError
class BadRequest(requests.HTTPError):
"""An Bad Request error occurred."""
def __init__(self, *args, **kwargs):
response = kwargs.pop("response", None)
error = response.json()["error"]
self.code = error["code"]
super().__init__(error["detail"], *args, **kwargs)
def item_paginator(page_paginator: typing.Generator[td.Page[T], None, None]) -> typing.Generator[T, None, None]:
"""Directly yield each data item from each page."""
for page in page_paginator:
for item in page.data:
yield item
class Paddle:
"""Paddle API client."""
def __init__(self, api_key: str, test_mode: bool = True, api_version: int = 3):
self.api_key = api_key
self.test_mode = test_mode
self.base_url = "https://api.paddle.com"
if self.test_mode:
self.base_url = "https://sandbox-api.paddle.com"
self.headers = {
'Authorization': f"Bearer {self.api_key}",
"Content-Type": "application/json",
"Paddle-Version": str(api_version),
}
def event_types(self) -> dict:
"""Get webhook event types."""
response = requests.get(
url=f"{self.base_url}/event-types",
headers=self.headers,
)
return response.json()["data"]
@property
def product(self) -> CRUDP[td.ProductCreate, td.ProductCreate, td.Product]:
"""Product crud and p (paginate)."""
return CRUDP(self, path="/products", t=td.Product)
@property
def price(self) -> CRUDP[td.PriceCreate, td.PriceCreate, td.Price]:
"""Price crud and p (paginate)."""
return CRUDP(self, path="/prices", t=td.Price)
| Korben11/paddle_api | paddle_api/paddle_api.py | paddle_api.py | py | 4,688 | python | en | code | 0 | github-code | 13 |
34071206580 | ls = [l for l in iter(input,'')]
ss = []
for l in ls:
ss.append(int(l.replace('F','0').replace('B','1').replace('R','1').replace('L','0'),base=2))
for s in range(9000):
if s+1 in ss and s-1 in ss and s not in ss:
print(s)
| UnrelatedString/advent-of-code-2020 | aoc5-2.py | aoc5-2.py | py | 241 | python | en | code | 1 | github-code | 13 |
33340605164 | import pymysql
import codecs
import csv
def conn_mysql():
conn = pymysql.connect(host='localhost', user='root', password='330324zhs', db='Hot_News', port=3306, autocommit=True)
return conn
def query_all(cur,sql,args):
cur.execute(sql, args)
return cur.fetchall()
def write_into_db(datas,TotalNum):
db =conn_mysql()
cursor = db.cursor()
#sql_create_database = 'CREATE DATABASE Hot_News DEFAULT CHARACTER SET utf8'
sql_create_table = 'CREATE TABLE IF NOT EXISTS news (id varchar(50) PRIMARY KEY, title varchar(50) not null, txt LONGTEXT not null, datetime varchar(30) not null, source varchar(50) not null, web varchar(5) not null, batch varchar(50) not null)DEFAULT CHARSET=utf8;'
cursor.execute(sql_create_table)
count = 0
for tmp in datas.keys():
if (count == TotalNum):
break
data = datas[tmp]
id = data['id']
title = data['title']
txt = data['txt'].replace('"','\"')
datetime = data['datetime']
source = data['source']
web = data['web']
batch =data['batch']
values = (id,title,txt,datetime,source,web,batch)
#print(values)
sql_insertdb = 'INSERT INTO news(id,title,txt,datetime,source,web,batch) VALUES("%s","%s","%s","%s","%s","%s","%s");'%values
try:
cursor.execute(sql_insertdb)
except:
continue
#print(cursor.fetchall())
count += 1
#print('err')
#continue
db.close()
return count
def write_into_db_cluster(datas,TotalNum):
db = conn_mysql()
cursor = db.cursor()
sql_create_table = 'CREATE TABLE IF NOT EXISTS clustering(clusterid varchar(50) PRIMARY KEY, categoryid int(5) not null, clusternum int(11) not null, batch varchar(50) not null, vectors LONGTEXT not null, clusterbatch varchar(30) not null, web varchar(5) not null)DEFAULT CHARSET=utf8;'
cursor.execute(sql_create_table)
count = 0
for tmp in datas.keys():
if (count == TotalNum):
break
data = datas[tmp]
clusterid= data['clusterid']
categoryid = data['categoryid']
clusternum = data['clusternum']
batch = data['batch']
vectors = data['vectors']
clusterbatch = data['clusterbatch']
web = data['web']
values = (clusterid, categoryid, vectors, clusternum, batch, clusterbatch, web)
# print(values)
sql_insertdb = 'INSERT INTO clustering(clusterid, categoryid, vectors, clusternum, batch, clusterbatch, web) VALUES("%s",%s,"%s",%s,"%s", "%s","%s");' % values
try:
cursor.execute(sql_insertdb)
except:
continue
# print(cursor.fetchall())
count += 1
# print('err')
# continue
db.close()
return count
def write_into_db_keyhots(datas,TotalNum):
db = conn_mysql()
cursor = db.cursor()
sql_create_table = 'CREATE TABLE IF NOT EXISTS keyhots(keyhotsid varchar(50) PRIMARY KEY, categoryid int(5) not null, batch varchar(50) not null, keywords LONGTEXT not null, keywords_num int(5) not null, hotvalues varchar(20) not null, keyhotsbatch varchar(30) not null, web varchar(5) not null)DEFAULT CHARSET=utf8;'
cursor.execute(sql_create_table)
count = 0
for tmp in datas.keys():
if (count == TotalNum):
break
data = datas[tmp]
keyhotsid = data['keyhotsid']
categoryid = data['categoryid']
batch = data['batch']
keywords = data['keywords']
keywords_num = data['keywords_num']
hotvalues = data['hotvalues']
keyhotsbatch = data['keyhotsbatch']
web = data['web']
values = (keyhotsid, categoryid, batch, keywords, keywords_num, hotvalues, keyhotsbatch, web)
# print(values)
sql_insertdb = 'INSERT INTO keyhots(keyhotsid, categoryid, batch, keywords, keywords_num, hotvalues, keyhotsbatch, web) VALUES("%s",%s,"%s","%s",%s,"%s","%s","%s");' % values
try:
cursor.execute(sql_insertdb)
except:
continue
# print(cursor.fetchall())
count += 1
# print('err')
# continue
db.close()
return count
def read_mysql_to_csv(filename):
with codecs.open(filename=filename, mode='w', encoding='utf-8') as f:
write = csv.writer(f, dialect='excel')
conn = conn_mysql()
cur = conn.cursor()
sql = 'select * from tb_csv'
results = query_all(cur=cur, sql=sql, args=None)
for result in results:
print(result)
write.writerow(result)
| Houzss1/News-hots-discovery-system | NewsWeb/mypackage/db_operate.py | db_operate.py | py | 4,616 | python | en | code | 1 | github-code | 13 |
42208077284 | import pandas as pd
import numpy as np
animals = ["Tiger", "Bear", "Moose"]
#print(pd.Series(animals))
animals = ["Tiger", "Bear", None]
#print(animals[:]) # ":" means "from the first element to the last"
#print(pd.Series(animals))
#Querying a Series
sports = {"Archery": "Bhutan", "Golf": "Scotland", "Sumo": "Japan", "Taekwondo": "South Korea"}
s = pd.Series(sports)
#print(s.iloc[3])
#print(s.loc["Golf"])
#print(s[3])
s = pd.Series([100.0, 120, 101])
#print(s)
total = np.sum(s)
#print(total)
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index = ["Store 1", "Store 1", "Store 2"])
#print(df.head())
#print(df.loc[:]["Item Purchased"])
#print(df["Item Purchased"])
#print(df.T) # Transpose matrix
df["Cost"] = df["Cost"] - df["Cost"]*0.2
#print(df)
matrix = np.array([[11,12,13,14], [21,22,23,24], [31,32,33,34]])
print(matrix)
matrix = [[11,12,13,14],
[21,22,23,24],
[31,32,33,34]]
transponded_matrix = pd.DataFrame(matrix).T
print(transponded_matrix.to_string(index=False, header=False))
print(transponded_matrix)
print(transponded_matrix[2][2]) | t3rmin41/tribe-of-ai-python | class_D/modules/tribe/ai/coursera/SeriesDatastructure.py | SeriesDatastructure.py | py | 1,509 | python | en | code | 0 | github-code | 13 |
41257103101 | from die import Die
# Create a D6.
die = Die()
# Make some rolls. and store results in a list.
results = []
for roll_num in range(1000):
result = die.roll()
results.append(result)
# Analizing the Results by counting how many times we roll each number
frequences = []
for value in range(1, die.num_sides+1):
frequency = results.count(value)
frequences.append(frequency)
print(frequences)
# Making a Histogram
from plotly.graph_objs import Bar, Layout
from plotly import offline
# Visualize the results.
x_values = list(range(1,die.num_sides+1))
data = [Bar(x=x_values, y=frequences)]
x_axis_config = {'title': 'Result'}
y_axis_config = {'title': 'Frequency of Result'}
my_layout = Layout(title='Results of rolling one D6 1000 times.', xaxis=x_axis_config, yaxis=y_axis_config)
offline.plot({'data': data, 'layout':my_layout}, filename= 'd6.html')
| fadiabji/data_science | die_visual.py | die_visual.py | py | 871 | python | en | code | 0 | github-code | 13 |
16132473603 | """
Purpose: Synchronization with Barriers
- for use by a fixed number of threads that need to wait for each other.
- Each thread tries to pass a barrier by calling the wait() method, which will
block until all of threads have made that call.
As soon as that happens, the threads are released simultaneously.
- The barrier can be reused any number of times for the same number of threads.
- UseCase: synchronizing a server and a client
โ as the server has to wait for the client after initializing itself.
Following snippet demonstrates the use of Barriers.
"""
from random import randrange
from threading import Barrier, Thread
from time import ctime, sleep
num = 4
# 4 threads will need to pass this barrier to get released.
b = Barrier(num)
names = ["Ramesh", "Ganesh", "Mahesh", "Suresh"]
def player():
name = names.pop()
sleep(randrange(2, 10))
print("%s reached the barrier at: %s" % (name, ctime()))
b.wait()
threads = []
print("Race starts nowโฆ")
for i in range(num):
threads.append(Thread(target=player))
threads[-1].start()
"""
Following loop enables waiting for the threads to complete before moving on with the main script.
"""
for thread in threads:
thread.join()
print()
print("Race over!")
| udhayprakash/PythonMaterial | python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/g_barriers/a_barriers.py | a_barriers.py | py | 1,257 | python | en | code | 7 | github-code | 13 |
38380839333 | from typing import List, Tuple
def parse(input_data: str) -> List[Tuple[str, int]]:
lines = input_data.strip().split('\n')
course = [line.split() for line in lines]
course = [(direction, int(dist)) for direction, dist in course]
return course
def underway(x, z, instruction):
if instruction[0] == 'forward':
x += instruction[1]
elif instruction[0] == 'up':
z -= instruction[1]
elif instruction[0] == 'down':
z += instruction[1]
return x, z
def underway_2(x, z, aim, instruction):
if instruction[0] == 'forward':
x += instruction[1]
z += aim * instruction[1]
elif instruction[0] == 'up':
aim -= instruction[1]
elif instruction[0] == 'down':
aim += instruction[1]
return x, z, aim
def solve1(input_data):
x = 0
z = 0
for instruction in parse(input_data):
x, z = underway(x, z, instruction)
return x * z
def solve2(input_data):
x = 0
z = 0
aim = 0
for instruction in parse(input_data):
x, z, aim = underway_2(x, z, aim, instruction)
return x * z
if __name__ == '__main__':
test_data = """forward 5
down 5
forward 8
up 3
down 8
forward 2"""
assert solve1(test_data) == 150
assert solve2(test_data) == 900
from aocd.models import Puzzle
puzzle = Puzzle(2021, 2)
answer_1 = solve1(puzzle.input_data)
print(answer_1)
puzzle.answer_a = answer_1
answer_2 = solve2(puzzle.input_data)
puzzle.answer_b = answer_2
| mharty3/advent_of_code | 2021/day-02.py | day-02.py | py | 1,557 | python | en | code | 0 | github-code | 13 |
14541325069 | from PyQt5.QtCore import pyqtSlot
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QGraphicsScene, QGraphicsPixmapItem, QGraphicsView
from PyQt5.QtGui import QImage, QPixmap
import cv2
class GraphicsView(QGraphicsView):
"""
Class documentation goes here.
"""
def __init__(self, parent=None):
super(GraphicsView, self).__init__(parent)
self.scale = 1
def load_img(self, QIm, x_scroball, y_scroball, x_scroball_max, y_scroball_max):
image_height, image_width, image_depth = QIm.shape
QIm = QImage(QIm.data, image_width, image_height,
image_width * image_depth,
QImage.Format_RGB888)
pix = QPixmap.fromImage(QIm)
self.item = QGraphicsPixmapItem(pix)
self.scene = QGraphicsScene()
self.scene.addItem(self.item)
self.setScene(self.scene)
self.item.setScale(self.scale)
if x_scroball_max != 0:
self.horizontalScrollBar().setMaximum(x_scroball_max)
self.verticalScrollBar().setMaximum(y_scroball_max)
self.horizontalScrollBar().setValue(x_scroball)
self.verticalScrollBar().setValue(y_scroball)
def get_scroball(self):
return self.horizontalScrollBar().value(), self.verticalScrollBar().value()
def get_max_scroball(self):
return self.horizontalScrollBar().maximum(), self.verticalScrollBar().maximum()
# def mousePressEvent(self, event):
# if event.buttons() == QtCore.Qt.LeftButton:
# self.scale = self.scale + 0.05
# if self.scale > 1.2:
# self.scale = 1.2
# elif event.buttons() == QtCore.Qt.RightButton:
# if self.scale <= 0:
# self.scale = 0.2
# else:
# self.scale = self.scale - 0.05
# self.item.setScale(self.scale)
def wheelEvent(self, event):
angle = event.angleDelta() / 8
if angle.y() > 0:
self.scale = self.scale + 0.05
if self.scale > 10:
self.scale = 10
else:
if self.scale <= 0:
self.scale = 0.2
else:
self.scale = self.scale - 0.1
self.item.setScale(self.scale)
def main():
import sys
app = QApplication(sys.argv)
piczoom = picturezoom()
piczoom.show()
app.exec_()
if __name__ == '__main__':
main() | zongdai/PerMo | 3D_Tool/widgets.py | widgets.py | py | 2,547 | python | en | code | 1 | github-code | 13 |
6127507994 | import tensorflow as tf
from tframe import console
import core
import model_lib as models
def main(_):
console.start('Task CNN (MEMBRANE)')
th = core.th
th.job_dir = './records_unet_alpha'
th.model = models.unet
th.suffix = '01'
th.batch_size = 2
th.learning_rate = 1e-4
th.epoch = 3
th.early_stop = True
th.patience = 5
th.print_cycle = 1
th.validation_per_round = 4
th.val_batch_size = 10
th.validate_train_set = True
th.export_tensors_upon_validation = True
# th.probe_cycle = 1
th.warm_up = False
th.save_model = True
th.overwrite = True
th.gather_note = True
th.summary = False
th.warm_up_thres = 0
#
th.train = False
th.mark = 'unet_{}'.format('x')
core.activate()
if __name__ == '__main__':
tf.app.run()
| Wuyou98/Image_unet | unet/task_cnn.py | task_cnn.py | py | 820 | python | en | code | 0 | github-code | 13 |
16189388540 | import BitVector
# Problem 1.1
# Is Unique: Implement and algorithm to determine if a string has all unique characters.
# What if you couldn't use additional data structures
def isunique(uniquestring):
lettermap = {}
for letter in uniquestring:
if lettermap.__contains__(letter):
return False
else:
lettermap[letter] = 1
return True
def isuniqueinplace(uniquestring):
uniquestringsorted = sorted(uniquestring)
for index in range(len(uniquestringsorted)-1):
if uniquestringsorted[index] == uniquestringsorted[index+1]:
return False
return True
# Problem 1.2
# Check Permutation: given two strings, write a method to decide if one is
# a permutation of the other
def ispermutation(string1, string2):
if sorted(string1.lower()) == sorted(string2.lower()):
return True
else:
return False
# Problem 1.3
# URLify: write a method to replace all spaces in a string with '%20'. You may assume that the string has sufficient
# space at the end to hold the additional characters, and that you are given the "true" length of the string.
def urlify(urlstring, strlen):
finalstring = ""
for index in range(strlen):
if urlstring[index] == " ":
finalstring += '%20'
else:
finalstring += urlstring[index]
print(index)
return finalstring
def urlifyinplace(urlstring, trulen):
spacecount = 0
for i in range(trulen):
if urlstring[i] == ' ':
spacecount += 1
index = trulen + spacecount*2
for letter in range(trulen-1, -1, -1):
if urlstring[letter] == ' ':
urlstring[index-1] = '0'
urlstring[index-2] = '2'
urlstring[index-3] = '%'
index -= 3
else:
urlstring[index-1]=urlstring[letter]
index -= 1
return urlstring
# Problem 1.4
# Palindrome Permutation: Given a string, write a function to check if it is a permutation of a palindrome.
def permpalindrome(inputstr):
lettermap = {}
for letter in inputstr:
if letter == ' ': #this one only takes into account space. it should account for any non letter character
continue
if letter in lettermap.keys():
lettermap.pop(letter)
else:
lettermap[letter] = 0
remainingletters = lettermap.keys()
if len(remainingletters) > 1:
return False
return True
# Palindrome Permutation - with bit vectors
def lettertoint(letter):
lettermap = {
'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10,
'k': 11, 'l': 12, 'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19,
't': 20, 'u': 21, 'v': 22, 'w': 23, 'x': 24, 'y': 25, 'z': 26
}
if letter in lettermap.keys():
return lettermap[letter]
else:
return 0
def toggle(bitvector, index):
if index == 0:
return bitvector
mask = BitVector.BitVector(size=26, intVal=1)
mask = mask << index
if int(bitvector & mask) == 0:
bitvector |= mask
else:
bitvector &= ~mask
return bitvector
def createbitvector(phrase):
bitvector = BitVector.BitVector(size=26)
for letter in phrase:
x = lettertoint(letter)
bitvector = toggle(bitvector, x)
return bitvector
def checkbitvector(bitvector):
return int(bitvector & BitVector.BitVector(size=26, intVal=int(bitvector)-1)) == 0
def ispalindromepermutation(phrase):
bitvector = createbitvector(phrase)
return int(bitvector) == 0 or checkbitvector(bitvector)
# Problem 1.5
# One Away - There are three types of edits that can be performed on strings: insert a character, remove a character,
# or replace a character. Given two strings, write a function to check if they are one edit (or zero edits) away.
def oneaway(string1, string2):
if len(string1) == len(string2):
flag = False
for index in range(len(string1)):
if string1[index: index+1] != string2[index: index+1]:
if flag:
return False
else:
flag = True
return True
else:
if len(string1) > len(string2):
return extracharoneaway(string1, string2)
else:
return extracharoneaway(string2, string1)
def extracharoneaway(lstring, sstring):
specindex = 0
for index in range(len(sstring)):
if lstring[index+specindex: index+1+specindex] != sstring[index: index+1]:
if specindex == 1:
return False
else:
specindex = 1
if lstring[index + specindex: index + 1 + specindex] != sstring[index: index + 1]:
return False
return True
# Problem 1.6
# String Compression: Implement a method to perform basic string compression using the counts of repeated characters.
# If the compressed string would not be smaller than the original string, return the original string.
def compresstring(inputstring):
index = 0
counter = 1
resultstring = ""
while index < len(inputstring):
if inputstring[index:index+1] == inputstring[index+1: index+2]:
counter += 1
else:
resultstring += inputstring[index:index+1] + str(counter)
counter = 1
index += 1
if len(resultstring) > len(inputstring):
return inputstring
else:
return resultstring
| SamRosentel/cracking-the-coding-interview | chapter1problems.py | chapter1problems.py | py | 5,533 | python | en | code | 0 | github-code | 13 |
1110472490 | #!/usr/bin/env python3
import pickle
import re
from collections import OrderedDict
from time import sleep
import zmq
import node
class FuzzyClient:
def __init__(self, ip='localhost', port=5555):
self.ip = ip
self.port = port
self.context = zmq.Context()
self.socket = self.context.socket(zmq.REQ)
self.socket.connect('tcp://%s:%d' % (self.ip, self.port))
self.glossary = OrderedDict()
self.glossary_vector = []
self.setting = {'depth_limit': 9, 'jump_limit': 1, 'num': 30, 'sim_th': 0.39}
@staticmethod
def req(rep_name, glossary=None, glossary_vector=None, name1=None, name2=None, setting=None):
req_json = {
'req': rep_name,
'glossary': glossary,
'glossary_vector': glossary_vector,
'name1': name1,
'name2': name2,
'setting': setting
}
return req_json
@staticmethod
def preprocess_input():
in_text = input()
in_text = re.sub(r'[ \t]+$', '', in_text)
re_res = re.search(r'\s*(.*)', in_text)
if re_res:
in_text = re_res.group(1)
return in_text
def list_names(self):
print('// list names')
for i in self.glossary:
print(self.glossary[i].name)
def add_name(self, name):
if name in self.glossary:
return False
neo = node.Node(name, self.get_word_vector(name))
self.glossary[name] = neo
self.glossary_vector.append(neo.vector)
print(f'\n//{name} added')
return True
def delete_name(self, name):
print('// delete name')
if name not in self.glossary:
return False
rm_idx = list(self.glossary.keys()).index(name)
del self.glossary[name]
del self.glossary_vector[rm_idx]
print(f'{name} deleted')
def add_implication(self, source_name, target_name, probability):
print('// add implication')
self.add_name(source_name)
self.add_name(target_name)
res_prob = self.glossary[source_name].add_implication(target_name, self.get_word_vector(target_name), probability)
self.glossary[source_name].sort_reason()
print(f'\n//{source_name} -> {target_name}; {res_prob[0]}, count: {res_prob[1]}')
def add_belief(self, source_name, target_name, probability):
print('// add belief')
self.add_name(source_name)
self.add_name(target_name)
res_prob = self.glossary[source_name].add_belief(target_name, self.get_word_vector(target_name), probability)
self.glossary[source_name].sort_reason()
print(f'\n//{source_name} -> {target_name}; {res_prob[0]}, count: {res_prob[1]}')
def add_membership(self, source_name, target_name, target_prob, source_prob):
print('// add membership')
self.add_name(source_name)
self.add_name(target_name)
res_prob = self.glossary[source_name].add_membership(target_name, self.get_word_vector(target_name), target_prob)
self.glossary[source_name].sort_reason()
print(f'\n//{source_name} -> {target_name}; {res_prob[0]}, count: {res_prob[1]}')
res_prob = self.glossary[target_name].add_membership(source_name, self.get_word_vector(source_name), source_prob)
self.glossary[target_name].sort_reason()
print(f'\n//{target_name} -> {source_name}; {res_prob[0]}, count: {res_prob[1]}')
def show_name(self, name):
if len(self.glossary) == 0 or name not in self.glossary:
print('empty glossary or not exist name')
return
print(f'//// {name}')
print('=== membership ===')
for i in self.glossary[name].membership:
print(f'->{i}; prob {self.glossary[name].membership[i][0]}, count {self.glossary[name].membership[i][1]}')
print('\n=== implication ===')
for i in self.glossary[name].implication:
print(f'->{i}; prob {self.glossary[name].implication[i][0]}, count {self.glossary[name].implication[i][1]}')
print('\n=== belief ===')
for i in self.glossary[name].belief:
print(f'->{i}; prob {self.glossary[name].belief[i][0]}, count {self.glossary[name].belief[i][1]}')
def get_word_vector(self, name):
req_x = self.req('gw', name1=name)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
return loaded_res['res_data']
def get_glossary_list(self):
req_x = self.req('sl')
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def load_glossary(self, file_name):
req_x = self.req('lg', name1=file_name)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
if loaded_res['res_data']:
self.glossary, self.glossary_vector = loaded_res['res_data']
print(f'{file_name} loaded!')
else:
print(f'\'{file_name}\' file not found in \'save\' folder ;)\n')
def save_glossary(self, file_name):
req_x = self.req('x', glossary=self.glossary, glossary_vector=self.glossary_vector, name1=file_name)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
if loaded_res['res_data']:
print(loaded_res['res_data'])
def find_path(self, source, dest):
if not len(self.glossary):
print('empty glossary')
return
req_x = self.req('fp', glossary=self.glossary, glossary_vector=self.glossary_vector,
name1=source, name2=dest, setting=self.setting)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def across_space(self, source, dest):
req_x = self.req('cr', name1=source, name2=dest)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def search_possible_path(self, source, length):
if not len(self.glossary):
print('empty glossary')
return
setting = {'depth_limit': length, 'jump_limit': self.setting['jump_limit'], 'sim_th': self.setting['sim_th']}
req_x = self.req('sp', glossary=self.glossary, glossary_vector=self.glossary_vector,
name1=source, name2=None, setting=setting)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def show_nearest_neighbor(self, name, num=30, sim_th=0.39):
setting = {'num': num, 'sim_th': sim_th}
req_x = self.req('nn', name1=name, name2=None, setting=setting)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def show_similarity(self, text1, text2):
req_x = self.req('ss', name1=text1, name2=text2)
self.socket.send(pickle.dumps(req_x))
res = self.socket.recv()
loaded_res = pickle.loads(res)
print(loaded_res['res_data'])
def show_all_names(self):
for name in self.glossary:
print('\n')
self.show_name(name)
def clear_glossary(self):
self.glossary = OrderedDict()
self.glossary_vector = []
def user_select(self):
print('~ welcome ~')
print('load file name? (example \'bug.p\')')
save_filename = self.preprocess_input()
self.load_glossary(save_filename)
self.list_names()
while True:
print('===== select ===== \nsl: server glossaries list \nlg: load server glossary\
\nln; list names\na; add name\ndn: delete name\nsa: show all names\nai; add implication\
\nab: add belief \nam: add membership \nnn: show nearest neighbor \nst: find path setting\
\nsn: show name \nss: show similarity \n\n-=-=- paths -=-=-\
\nfp; find path \ncr: cross vector space \ncg: clear current glossary\
\nsp: search possible path \n \
\n----- exit -----\
\nx; save &exit \nxx; exit without save')
try:
sel = self.preprocess_input()
if sel == 'sl':
self.get_glossary_list()
elif sel == 'lg':
self.get_glossary_list()
print('input; load file name')
name = self.preprocess_input()
# add name
self.load_glossary(name)
self.list_names()
elif sel == 'ln':
# list glossary
self.list_names()
elif sel == 'a':
print('input; name')
name = self.preprocess_input()
# add name
res = self.add_name(name)
if not res:
print('already exist')
elif sel == 'dn':
print('input; name to delete')
name = self.preprocess_input()
# delete name
self.delete_name(name)
elif sel == 'sa':
# show all names
self.show_all_names()
elif sel == 'ai':
print('input; source_name ')
source_name = self.preprocess_input()
print('input; target_name ')
target_name = self.preprocess_input()
print('input; probability')
probability = float(self.preprocess_input())
# add implication
self.add_implication(source_name, target_name, probability)
elif sel == 'ab':
print('input; source_name ')
source_name = self.preprocess_input()
print('input; target_name ')
target_name = self.preprocess_input()
print('input; probability')
probability = float(self.preprocess_input())
# add belief
self.add_belief(source_name, target_name, probability)
elif sel == 'am':
print('input; source_name ')
source_name = self.preprocess_input()
print('input; target_name ')
target_name = self.preprocess_input()
print(f'input; {source_name}->{target_name} similarity')
target_prob = float(self.preprocess_input())
print(f'input; {target_name}->{source_name} similarity')
source_prob = float(self.preprocess_input())
# add membership
self.add_membership(source_name, target_name, target_prob, source_prob)
elif sel == 'sn':
print('input; name')
name = self.preprocess_input()
# show relations
self.show_name(name)
elif sel == 'st':
print('input; depth limit')
self.setting['depth_limit'] = int(self.preprocess_input())
print('input; jump limit')
self.setting['jump_limit'] = int(self.preprocess_input())
elif sel == 'fp':
print('input; source ')
source = self.preprocess_input()
print('input; dest ')
dest = self.preprocess_input()
# find path
self.find_path(source, dest)
elif sel == 'cr':
print('input; source ')
source = self.preprocess_input()
print('input; dest ')
dest = self.preprocess_input()
# find path
self.across_space(source, dest)
elif sel == 'sp':
print('input; source')
source = self.preprocess_input()
print('input; length')
length = int(self.preprocess_input())
# search possible paths with length
self.search_possible_path(source, length)
elif sel == 'nn':
print('input; name')
name = self.preprocess_input()
# show nearest neighbor
self.show_nearest_neighbor(name)
elif sel == 'ss':
print('input; word1')
word1 = self.preprocess_input()
print('input; word2')
word2 = self.preprocess_input()
# show word distance
self.show_similarity(word1, word2)
elif sel == 'cg':
# clear current glossary
self.clear_glossary()
elif sel == 'x':
print('save file name?')
save_filename = self.preprocess_input()
# save
self.save_glossary(save_filename)
break
elif sel == 'xx':
print('exit without save')
print('see ya')
break
print('\nok\n')
except KeyboardInterrupt:
print(' \n\n### Plz Enter \'x\' or \'xx\' to exit ###\n')
sleep(0.33)
def main():
fc = FuzzyClient(ip='35.200.11.163', port=8888)
fc.user_select()
if __name__ == '__main__':
main()
| sngjuk/fuzzy-flow | src/client.py | client.py | py | 14,114 | python | en | code | 0 | github-code | 13 |
40262924923 | from datetime import datetime
from typing import Optional, Union
import phonenumbers
from aiogram import Bot, types
from aiogram.dispatcher.filters.state import State, StatesGroup
from aiogram.types import InlineKeyboardMarkup, ReplyKeyboardMarkup, InlineKeyboardButton
from app import config
bot = Bot(token=config.TOKEN)
class CustomState(State):
def __init__(self, message_text: str, keyboard: Optional[InlineKeyboardMarkup] = None,
state: Optional[str] = None, group_name: Optional[str] = None, picture: Optional[str] = None):
super().__init__(state, group_name)
self.message_text = message_text
self.keyboard = keyboard
self.picture = picture
async def apply(self, message: types.Message = None, callback_query: types.CallbackQuery = None,
message_text: Optional[str] = None,
keyboard: Optional[Union[InlineKeyboardMarkup, ReplyKeyboardMarkup]] = None):
await self.set()
if callback_query:
await callback_query.message.edit_text(text=message_text if message_text else self.message_text)
if self.picture:
with open(f'media/{self.picture}', 'rb') as file:
return await callback_query.message.answer_photo(file, reply_markup=self.keyboard)
elif keyboard:
return await callback_query.message.edit_reply_markup(reply_markup=keyboard)
elif self.keyboard:
return await callback_query.message.edit_reply_markup(reply_markup=self.keyboard)
elif message:
if self.picture:
with open(f'media/{self.picture}', 'rb') as file:
await callback_query.message.answer_photo(file)
return await message.answer(
text=message_text if message_text else self.message_text,
reply_markup=keyboard if keyboard else self.keyboard
)
start_keyboard = InlineKeyboardMarkup()
registration_button = InlineKeyboardButton('ะ ะตะณะธัััะฐัะธั', callback_data='registration')
about_button = InlineKeyboardButton('About', callback_data='about')
start_keyboard.add(registration_button, about_button)
class StartStatesGroup(StatesGroup):
start = CustomState(message_text='Start bot', keyboard=start_keyboard)
back_button = InlineKeyboardButton('ะะฐะทะฐะด', callback_data='back')
cancel_button = InlineKeyboardButton('ะัะผะตะฝะฐ', callback_data='cancel')
create_button = InlineKeyboardButton('ะกะพะทะดะฐัั', callback_data='create')
delete_button = InlineKeyboardButton('ะฃะดะฐะปะธัั', callback_data='delete')
save_button = InlineKeyboardButton('ะกะพั
ัะฐะฝะธัั', callback_data='save')
pay_button = InlineKeyboardButton('ะะฟะปะฐัะธัั', callback_data='pay')
previous_button = InlineKeyboardButton('ะัะตะดัะดััะฐั ัััะฐะฝะธัะฐ', callback_data='previous')
next_button = InlineKeyboardButton('ะกะปะตะดัััะฐั ัััะฐะฝะธัะฐ', callback_data='next')
chosen_surname_button = InlineKeyboardButton('ะคะฐะผะธะปะธั', callback_data='chosen_surname')
chosen_name_button = InlineKeyboardButton('ะะผั', callback_data='chosen_name')
chosen_patronymic_button = InlineKeyboardButton('ะััะตััะฒะพ', callback_data='chosen_patronymic')
chosen_phone_number_button = InlineKeyboardButton('ะะพะผะตั ัะตะปะตัะพะฝะฐ', callback_data='chosen_phone_number')
chosen_date_of_birth_button = InlineKeyboardButton('ะะฐัั ัะพะถะดะตะฝะธั', callback_data='chosen_date_of_birth')
def check_phone_number_is_valid(message_text):
try:
phone_number = phonenumbers.parse(message_text, 'RU')
except phonenumbers.phonenumberutil.NumberParseException:
return False
if not phonenumbers.is_valid_number(phone_number):
return False
return True
def check_date_of_birth_is_valid(message_text):
try:
datetime.strptime(message_text, '%d.%m.%Y')
except ValueError:
return False
return True
| Msakhibullin24/bot | app/states/common.py | common.py | py | 3,978 | python | en | code | 1 | github-code | 13 |
72393855698 | #!/usr/bin/env python
"""
Sum square difference
Problem 6
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten natural
numbers and the square of the sum is 3025 - 385 = 2640.
Find the difference between the sum of the squares of the first one hundred
natural numbers and the square of the sum.
"""
maxNums = 100
sumsquare = 0
squaresum = 0
for i in range(1, maxNums+1):
sumsquare += (i**2)
squaresum += i
squaresum = squaresum**2
print (squaresum - sumsquare)
| cynful/project-euler | p006.py | p006.py | py | 682 | python | en | code | 0 | github-code | 13 |
12645603953 | #!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
import pandas as pd
import matplotlib.tri as tri
b=pd.read_csv('pmf-mask-t6.dat', header=None, delim_whitespace=True, comment='#')
b.columns = ['beta', 'alpha', 'pmf','prob']
a=b[b.pmf<9999]
#b.pmf[b.pmf>9999] = np.max(a.pmf) + 1
#levels=np.arange(0.0, np.max(a.pmf) + 0.4, 0.4)
#b.pmf[b.pmf>9999] = 20
b.pmf[b.pmf>20] = 20
levels=np.arange(0.0, 20, 0.5)
plt.figure(figsize=(9,7.5))
ax = plt.gca()
CS = ax.tricontourf(b.beta, b.alpha, b.pmf, levels, cmap=plt.get_cmap('jet'))
cbar = plt.colorbar(CS, ax=ax)
cbar.ax.tick_params(labelsize=18)
plt.tick_params(axis='x', labelsize=20)
plt.tick_params(axis='y', labelsize=20)
plt.xlabel("Q_beta", fontsize=24)
plt.ylabel("Q_alpha", fontsize=24)
#y1=np.max(a.tc)+50
#if y1>350: y1=350
#
#plt.ylim(0,y1)
##plt.show()
plt.savefig('pmf-p.png')
| xinyugu1997/CPEB3_Actin | AWSEM_simulations/zipper/draw.py | draw.py | py | 914 | python | en | code | 0 | github-code | 13 |
15155947110 | # import
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from dotenv import dotenv_values
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from datetime import datetime
import sys
def waitElement(xpath, browser, second):
return WebDriverWait(browser, second).until(
EC.presence_of_element_located((By.XPATH, xpath))
)
def writeToFile(text):
with open('res.txt', 'a', encoding="utf-8") as f:
f.writelines(text + '\n')
split = lambda str : str.split(', ')
# formatTime = lambda time: time.strftime("%m/%d/%Y %H:%M:%S")
formatTime = lambda time: time.strftime("%Y/%m/%d %H:%M:%S")
# opton
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('--ignore-certificate-errors-spki-list')
options.add_argument('incognito')
# options.add_argument('headless')
if __name__ == '__main__':
try:
index = sys.argv[1]
except:
index = 1
# declare & init
config = dotenv_values(".env")
username = config[f"username{index}"]
password = config[f"password{index}"]
lUId = split(config[f"lUId{index}"])
lUName = split(config[f"lUName{index}"])
# start
browser = webdriver.Chrome(options=options)
browser.maximize_window()
# login
browser.get('https://www.facebook.com')
waitElement('//button[@name="login"]', browser, 8)
browser.find_element_by_xpath('//*[@id="email"]').send_keys(username)
browser.find_element_by_xpath('//*[@id="pass"]').send_keys(password)
browser.find_element_by_xpath('//button[@name="login"]').click()
sleep(2)
res = {uid:[(0, formatTime(datetime.now()))] for uid in lUId}
dictUser = {id:name for id, name in zip(lUId, lUName)}
while(True):
browser.get('http://daotao.hnue.edu.vn/')
sleep(5)
try:
bodyElement = browser.find_element_by_tag_name('body')
writeToFile(formatTime(datetime.now()) + " ------ " + bodyElement.text[:len("ฤฤng nhแบญp")])
if (bodyElement.text != "The service is unavailable."):
for uid in lUId:
browser.get(f'https://www.facebook.com/messages/t/{uid}')
waitElement('//div[@aria-label="Gแปญi lฦฐแปฃt thรญch"]', browser, 5).click()
sleep(3)
except Exception as e:
print('error: ', e)
sleep(20) | daotiennamhl/save_something | relevance/auto_tool/hneu/a.py | a.py | py | 2,465 | python | en | code | 0 | github-code | 13 |
7400359499 | from toy_diffusion_models.state_diffusion_model import main
if __name__ == '__main__':
import jaynes
from ml_logger import logger, instr
from dd_launch import RUN
jaynes.config('tjlab-gpu')
RUN.CUDA_VISIBLE_DEVICES = "3"
thunk = instr(main)
logger.log_text("""
charts:
- yKey: loss/mean
xKey: epoch
- type: image
glob: samples/ep_20/frame_*.png
- type: image
glob: samples/ep_40/frame_*.png
""", ".charts.yml", dedent=True, overwrite=True)
jaynes.run(thunk)
jaynes.listen()
| geyang/urop-playground | toy_analysis/state_model/train.py | train.py | py | 552 | python | en | code | 0 | github-code | 13 |
5450488892 | from django.urls import path
from . import views
app_name = "archive"
urlpatterns = [
path('', views.archive_home, name="archive_home"),
path('search/', views.archive_search, name="search"),
path('instrument/', views.instrument_home, name="instrument_home"),
path('instrument/<int:instr_id>/', views.instrument_detail, name="instrument_detail"),
path('maqam/', views.maqam_home, name="maqam_home"),
path('maqam/<int:maqam_id>/', views.maqam_detail, name="maqam_detail"),
path('rythm/', views.rythm_home, name="rythm_home"),
path('rythm/<int:rythm_id>/', views.rythm_detail, name="rythm_detail"),
path('jins/', views.jins_home, name="jins_home"),
path('jins/<int:jins_id>/', views.jins_detail, name="jins_detail"),
path('musicform/', views.musicForm_home, name="musicform_home"),
path('musicform/<int:musicForm_id>/', views.musicForm_detail, name="musicform_detail"),
path('artist/', views.artist_home, name="artist_home"),
path('artist/add/', views.artist_add, name="artist_add"),
path('artist/<int:artist_id>/', views.artist_detail, name="artist_detail"),
path('artist/<str:key_l>/', views.artist_fl, name="artist_fl"),
path('artist/edit/<int:artist_id>/', views.artist_edit, name="artist_edit"),
path('song/', views.song_home, name="song_home"),
path('song/<int:song_id>/', views.song_detail, name="song_detail"),
path('song/search/', views.song_search, name="song_search"),
path('song/add/', views.song_add, name="song_add"),
path('song/edit/<int:song_id>/', views.song_edit, name="song_edit"),
path('lyrics/', views.lyric_home, name="lyric_home"),
path('lyrics/<int:lyric_id>/', views.lyric_detail, name="lyric_detail"),
path('lyric/add/<int:song_id>/', views.lyric_add, name="lyric_add"),
path('scores/', views.score_home, name="score_home"),
path('scores/<int:score_id>/', views.score_detail, name="score_detail"),
path('score/add/<int:song_id>/', views.score_add, name="score_add"),
path('score/download/<int:score_id>/', views.score_download, name="score_download"),
path('wiki/', views.wiki_home, name="wiki_home"),
path('no_element/',views.wiki_no_element, name="wiki_no_element"),
path('contact/',views.contact, name="contact"),
path('report_error/', views.report_error, name="report_error"),
] | Mazen21/musikji | archive/urls.py | urls.py | py | 2,358 | python | en | code | 0 | github-code | 13 |
6623258854 | print('DESAFIO 80'.center(44))
'''
Digitar cinco valores e cadastrar numa lista, jรก na posiรงรฃo correta de inserรงรฃo. sem usar sort
no final, exibir a lista ordenada 5 2 4 0 1
'''
print(f" {' ORGANIZADOR DE NรMEROS ':_^44} ")
numbers = []
for X in range(1, 5+1):
number = int(input(f'\033[1mDigite o {X}ยบ nรบmero: \033[m'))
if not numbers or number > numbers[-1]:
numbers.append(number)
print('\033[37mNรบmero adicionado ao final da lista.\033[m')
else:
pos = 0
while pos < len(numbers):
if number <= numbers[pos]:
numbers.insert(pos, number)
print(f"\033[37mNรบmero adicionado na posiรงรฃo {pos}. \033[m")
break
pos += 1
print(f"\n\033[40;1mOs valores digitados foram: {numbers}\033[m") | aa-abnerandrade/cev-desafios-python | des080.py | des080.py | py | 810 | python | pt | code | 0 | github-code | 13 |
30480099620 |
# import the necessary packages
import numpy as np
import argparse
import imutils
import glob
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--template", required=True, help="Path to template image")
ap.add_argument("-i", "--images", required=True,
help="Path to images where template will be matched")
ap.add_argument("-v", "--visualize",
help="Flag indicating whether or not to visualize each iteration")
args = vars(ap.parse_args())
# load the image image, convert it to grayscale, and detect edges
template = cv2.imread(args["template"])
template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
template = cv2.Canny(template, 50, 200)
(tH, tW) = template.shape[:2]
cv2.imshow("Template", template)
# loop over the images to find the template in
for imagePath in glob.glob(args["images"] + "/*.jpg"):
# load the image, convert it to grayscale, and initialize the
# bookkeeping variable to keep track of the matched region
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
found = None
rects = []
# loop over the scales of the image
for scale in np.linspace(0.2, 1.0, 20)[::-1]:
# resize the image according to the scale, and keep track
# of the ratio of the resizing
resized = imutils.resize(gray, width = int(gray.shape[1] * scale))
w, h = resized.shape[::-1]
r = gray.shape[1] / float(resized.shape[1])
# if the resized image is smaller than the template, then break
# from the loop
if resized.shape[0] < tH or resized.shape[1] < tW:
break
# detect edges in the resized, grayscale image and apply template
# matching to find the template in the image
edged = cv2.Canny(resized, 50, 200)
result = cv2.matchTemplate(edged, template, cv2.TM_CCOEFF_NORMED)
threshold = .3
loc = np.where( result >= threshold)
print(zip(*loc))
for pt in zip(*loc[::-1]):
cv2.rectangle(image, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
cv2.imwrite('res.png',image)
#run with python match.py --template %.png --images images
| TeamMoodGitHub/Jonah-Term-1 | match.py | match.py | py | 2,070 | python | en | code | 0 | github-code | 13 |
559302334 | def max_heapify(A,i,size):
l=2*i+1
r=2*i+2
largest=i
if l<size and A[l]>A[i]:
largest=l
if r<size and A[r]>A[largest]:
largest=r
if largest!=i:
A[i],A[largest]=A[largest],A[i]
max_heapify(A,largest,size)
def build_heap(A):
for j in range(len(A)//2,-1,-1):
max_heapify(A,j,len(A))
def heapsort(A):
build_heap(A)
size=len(A)
for i in range(len(A)-1):
A[-(i+1)],A[0]=A[0],A[-(i+1)]
size-=1
max_heapify(A,0,size)
return A
print(heapsort(list(map(int,input().split()))))
| Quasar0007/Competitive_Programming | Heap_sort.py | Heap_sort.py | py | 578 | python | en | code | 0 | github-code | 13 |
73570791696 | t = int(input())
for _ in range(0, t):
n = int(input())
nums = list(map(int, input().split()))
A = nums.index(1)
B = nums.index(n)
A_left = A+1
A_right = n-A_left
B_left = B+1
B_right = n-B_left
A_closed = A_left if B < A else A_right + 1
A_open = n - A_closed + 1
B_closed = B_left if A < B else B_right + 1
B_open = n - B_closed + 1
print(min(A_closed, B_closed, A_open+B_open)) | JDSeiler/programming-problems | codeforces/round-725/a-stone-game.py | a-stone-game.py | py | 438 | python | en | code | 0 | github-code | 13 |
31313214139 | import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
import numpy as np
import csv
# Extract time samples from wavfile (take single channel)
sample_rate, samples = wavfile.read("data\\piano2_dual.wav")
samples = samples[:,0]
# Plot time series
plt.figure(0)
plt.plot(np.arange(1,len(samples)+1)/sample_rate,samples)
plt.title("Wav file samples")
plt.xlabel("Time [s]")
# Create spectrogram
frequencies, times, spectrogram = signal.spectrogram(samples, sample_rate, scaling='density')
print('max', np.max(spectrogram))
plt.figure(1)
plt.pcolormesh(times, frequencies, np.log10(spectrogram))
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.title('Log10(spectrogram)')
plt.colorbar()
plt.show()
#plt.figure(2)
#plt.specgram(samples, Fs=sample_rate)
#plt.show()
# Write spectrogram with time samples for processing
with open("data\\spectrogram.csv","w+") as my_csv:
out_data = np.append(np.reshape(times,(1,-1)),spectrogram,axis=0)
csvWriter = csv.writer(my_csv,delimiter=',')
csvWriter.writerows(out_data) | aahmad-p/reservoir-networks | reservoir-networks/create_spectrogram.py | create_spectrogram.py | py | 1,068 | python | en | code | 0 | github-code | 13 |
36974300303 | import sys
sys.path.append('/root/.snap/snap-python')
import os
os.environ.update({"LD_LIBRARY_PATH":"."})
import snappy
from snappy import ProductIO
# ReprojectOp = snappy.jpy.get_type('org.esa.snap.core.gpf.common.reproject.ReprojectionOp')
# in_file = '/home/zy/data_pool/U-TMP/S1A_IW_SLC__1SSV_20150109T112521_20150109T112553_004094_004F43_7041_Cal_deb_Spk_TC.dim'
# out_file = '/home/zy/data_pool/U-TMP/S1A_IW_SLC__1SSV_20150109T112521_20150109T112553_004094_004F43_7041_Cal_deb_Spk_TC_reproj.dim'
#
# product = ProductIO.readProduct(in_file)
#
# op = ReprojectOp()
# op.setSourceProduct(product)
# op.setParameter('crs', 'AUTO:42001')
# op.setParameter('resampling', 'Nearest')
#
# sub_product = op.getTargetProduct()
# # ProductIO.writeProduct(sub_product, out_file, 'BEAM-DIMAP')
#
# # ไบง็ไบ็ฎๆ ๆไปถ.dimๅๆไปถๅคน.data,ไฝๅๅ
ฅๅฎๆๅ,็จๅบไธ่ฝ่ชๅจ็ปๆ
CalibrationOp = snappy.jpy.get_type('org.esa.s1tbx.calibration.CalibrationOp')
in_file = '/home/zy/data_pool/U-TMP/S1A_IW_SLC__1SSV_20150109T112521_20150109T112553_004094_004F43_7041.dim'
product_orig = ProductIO.readProduct(in_file)
op = CalibrationOp()
op.setSourceProduct(product_orig)
# op.seParameter('Polarisations', 'VV')
# op.seParameter('Output band', 'sigma0')
# cal_product = op.getTargetProduct()
parameters = HashMap()
parameters.put('Polarisations', 'VV')
parameters.put('Output', 'sigma0')
cal_product = GPF.createProduct('Reproject', parameters, product_orig)
| Aaayue/Hello-World | sentinel1_prepro.py | sentinel1_prepro.py | py | 1,466 | python | en | code | 0 | github-code | 13 |
12786412191 | from enum import Enum
from FusionLibrary.libs.cli.cli_base import local_actions
import os
import re
from RoboGalaxyLibrary.utilitylib import logging as logger
from robot.libraries.BuiltIn import BuiltIn
import requests
import string
import threading
import urllib2
import urllib3
import yaml
class StorageSize(Enum):
OneMB = 1048576
FourMB = 4194304
TenMB = 10485760
OneGB = 1073741824
TwoGB = 2147483648
FiveGB = 5368709120
TenGB = 10737418240
ThirtyGB = 32212254720
OneFortyTB = 17592186044416
def cleanup_dir(dir):
"""
Cleanup local dir
:param dir:
"""
files = os.listdir(dir)
for file in files:
if file.endswith(".bin"):
os.remove(os.path.join(dir, file))
if file.endswith(".sdmp"):
os.remove(os.path.join(dir, file))
if file.endswith(".gz"):
os.remove(os.path.join(dir, file))
def download_updatebin_file(dir, url, chunk_size=StorageSize.OneMB.value):
"""
Download update.bin file from url
:param dir:
:param url:
:param chunk_size: default 10240000
"""
urllib3.disable_warnings()
filename = url.split('/')[-1]
localfile = os.path.join(dir, filename)
try:
logger._log_to_console_and_log_file("Downloading file from %s..." % url)
resp = requests.get(url, stream=True, verify=False)
with open(localfile, 'wb') as f:
for chunk in resp.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
f.flush()
logger._log_to_console_and_log_file("Downloaded file from %s" % url)
return True
except Exception as e:
raise AssertionError("Exceptions occurred while download %s" % url)
def download_updatebin_files(dir, hops, updatebin_urls):
"""
Download update.bin files
:param dir:
:param hops:
:param updatebin_urls:
"""
ts = []
for hop in hops:
filename = updatebin_urls[hop].split('/')[-1]
tn = 'download_%s' % filename
t = threading.Thread(name=tn, target=download_updatebin_file, args=(dir, updatebin_urls[hop],))
logger._log_to_console_and_log_file("Starting thread %s..." % tn)
t.start()
ts.append(t)
for t in ts:
t.join()
def get_ova_build(url):
"""
Get OVA build
:param url:
:return build:
"""
build = re.sub(r'.*_([\d.]*[-_]\d{6,7}).*', r'\1', url)
# pad extra 0 to match OV software version
build = re.sub(r'(.*)_(.*)', r'\1_0\2', build).replace('_', '-')
logger._debug("The OVA build number is %s" % build)
return build
def get_updatebin_build(updatebin_file):
"""
Get updatebin file build
:param updatebin_urls_file:
:return build:
"""
# remove SNAPSHOT
build = updatebin_file.replace('SNAPSHOT-', '')
build = re.sub(r'.*-([\d.]*-\d{7}).*', r'\1', build)
logger._debug("The updatebin build number is %s" % build)
return build
def remove_local_updatebin_files(dir):
"""
Remove local update.bin files
:param dir:
"""
files = os.listdir(dir)
for file in files:
if file.endswith(".bin"):
os.remove(os.path.join(dir, file))
def suppress_warnings_during_appliance_reboot():
"""
Suppress warnings from urllib3
"""
fusion_lib = BuiltIn().get_library_instance('FusionLibrary')
fusion_lib.fusion_client.suppress_urllib3_warnings()
| richa92/Jenkin_Regression_Testing | robo4.2/fusion/tests/RIST/BTI/BTIHelpers.py | BTIHelpers.py | py | 3,470 | python | en | code | 0 | github-code | 13 |
21536452172 | from django.urls import path
from . import views
app_name = 'portfolio'
urlpatterns = [
path('', views.index, name='index'),
# individual projects: lowest priority since they're a catchall
path('<path:project_url>/assets/<str:res_url>', views.project_res),
path('<path:project_url>/', views.project, name='project'),
]
| ckuhl/ckuhl.com | ckuhl/portfolio/urls.py | urls.py | py | 340 | python | en | code | 0 | github-code | 13 |
21271793837 | from core.common import get_logger
from contracts.opensea import OpenseaContract
from core.chain_account import ChainAccount
from core.chain_network import ChainNetwork
from contracts.erc721 import ERC721Contract
from thirdparty.open_sea_v1.endpoints.client import ClientParams
from thirdparty.open_sea_v1.endpoints.orders import OrdersEndpoint
from thirdparty.open_sea_v1.endpoints.assets import AssetsEndpoint
from thirdparty.blocknative.gas import get_gasPrice
from wconfig import OpenseaKey
logger = get_logger('openseas.log')
def get_assets(asset_contract_address):
logger.info(f"get assets")
time_range = 30 # last 30 seconds orders
client_params = ClientParams(
api_key=OpenseaKey[1],
offset=0,
max_pages=1,
limit=50
)
endpoint = AssetsEndpoint(
client_params=client_params,
asset_contract_address=asset_contract_address,
order_by='sale_price',
order_direction='asc'
)
flattened_events_pages: list = endpoint.get_parsed_pages()
if len(flattened_events_pages) > 0:
return flattened_events_pages
else:
logger.error('zero assets')
return None
def get_order(asset_contract_address, token_id):
logger.info(f'get order for token id {token_id}')
client_params = ClientParams(
api_key=OpenseaKey[0],
limit=50,
max_pages=1)
endpoint = OrdersEndpoint(
sale_kind=0,
side=1,
is_english=False,
include_bundled=False,
client_params=client_params,
asset_contract_address=asset_contract_address,
token_ids=[str(token_id)],
order_by='eth_price',
order_direction='asc',
#listed_after=int(time.time() - 600)
)
test = endpoint.get_parsed_pages(flat=True)
if len(test) > 0:
return test[0]
else:
logger.error(f'no order for token id {token_id} ')
return None
#order = test[0]
def verify_sale_balance(order, chain):
erc721 = ERC721Contract(chain=chain, address=order.target)
if erc721.ownerOf(order.asset.token_id).lower() == order.maker['address'].lower():
return True
else:
return False
def putorder(chain_name, order, max_price, account, gasRate: float = None, gas_price: int = None ):
chain = ChainNetwork(chain_name)
if not verify_sale_balance(order, chain):
logger.error(f'owner {order.maker["address"]} balance is zero, have sold.')
order_price = int(order.base_price) /10**18
if order_price <= max_price:
logger.info(f"order price : {order_price}")
gas_price = get_gasPrice() * gasRate if gasRate else gas_price
#my_account = ACCOUNT_530b
chain_account = ChainAccount(account, chain)
opensea = OpenseaContract(chain=chain)
if True: #opensea.ordersCanMatch_(from_addr=account.address, Order=order):
tx_params = opensea.atomicMatch_(from_addr=account.address, Order=order)
print(tx_params)
chain_account.sign_and_push(txn=tx_params, gas_price=gas_price)
return True
else:
logger.info(f'order can not match in contract')
return False
else:
logger.error(f' price change or high than max price {max_price} !')
return False | xiaoxiaoleo/Opensea-Sniper | thirdparty/opensea/utils.py | utils.py | py | 3,305 | python | en | code | 9 | github-code | 13 |
19282041078 | # -*- coding: utf-8 -*-
if 0:
from gluon import *
request, session, response, T, cache = current.request, current.session, current.response, current.t, current.cache
from gluon.dal import DAL
from gluon.tools import Auth, Service, Crud, Storage
db = DAL()
auth = Auth()
service = Service()
crud = Crud()
settings = Storage()
import uuid
auth.signature.created_by.label = T( auth.signature.created_by.label )
auth.signature.created_on.label = T( auth.signature.created_on.label )
auth.signature.modified_by.label = T( auth.signature.modified_by.label )
auth.signature.modified_on.label = T( auth.signature.modified_on.label )
db.define_table( 'plugin_mailcaptcha_queue',
Field( 'uuid', 'string',
label = T( 'UUID' ),
requires = IS_NOT_EMPTY(),
writable = False,
default = lambda: str( uuid.uuid4() ),
),
Field( 'email', 'string',
label = T( 'E-mail' ),
requires = IS_NOT_EMPTY(),
),
Field( 'client_address', 'string',
label = T( 'Client address' ),
),
Field( 'client_name', 'string',
label = T( 'Client name' ),
),
Field( 'helo_name', 'string',
label = T( 'Helo name' ),
),
Field( 'recipient', 'string',
label = T( 'Recipient' ),
),
auth.signature
)
db.plugin_mailcaptcha_queue.created_on.readable = True
db.define_table( 'plugin_mailcaptcha_whitelist',
Field( 'email', 'string',
label = T( 'E-mail / Domain' ),
requires = IS_NOT_EMPTY(),
),
auth.signature
)
db.plugin_mailcaptcha_whitelist.created_by.readable = True
db.plugin_mailcaptcha_whitelist.created_on.readable = True
db.plugin_mailcaptcha_whitelist.modified_by.readable = True
db.plugin_mailcaptcha_whitelist.modified_on.readable = True
db.define_table( 'plugin_mailcaptcha_blacklist',
Field( 'email', 'string',
label = T( 'E-mail / Domain' ),
requires = IS_NOT_EMPTY(),
),
auth.signature
)
db.plugin_mailcaptcha_blacklist.created_by.readable = True
db.plugin_mailcaptcha_blacklist.created_on.readable = True
db.plugin_mailcaptcha_blacklist.modified_by.readable = True
db.plugin_mailcaptcha_blacklist.modified_on.readable = True
db.define_table( 'plugin_mailcaptcha_apply_on',
Field( 'email', 'string',
label = T( 'E-mail' ),
requires = IS_NOT_EMPTY(),
),
Field( 'is_honeypot', 'boolean',
label = T( 'Honeypot' ),
comment = T( 'If this e-mail address receives mail than the sender goes to the blacklist.' )
),
auth.signature
)
db.plugin_mailcaptcha_apply_on.created_by.readable = True
db.plugin_mailcaptcha_apply_on.created_on.readable = True
db.plugin_mailcaptcha_apply_on.modified_by.readable = True
db.plugin_mailcaptcha_apply_on.modified_on.readable = True
db.define_table( 'plugin_mailcaptcha_settings',
Field( 'webserver_url', 'string',
label = T( 'Webserver url' ),
requires = IS_NOT_EMPTY(),
default = 'https://',
comment = T( 'Like: "https://mailcaptcha.domain.com/", don\'t put application name at the end' )
),
Field( 'greetings', 'text',
label = T( 'Web page message' ),
comment = XML( T( 'It\'s a markmin!<br/>%(sender)s - sender email address' ) ),
),
Field( 'successful', 'text',
label = T( 'Web page successful message' ),
comment = XML( T( 'It\'s a markmin!<br/>%(sender)s - sender email address' ) ),
),
Field( 'mail_server', 'string',
label = T( 'Mail server' ),
requires = IS_NOT_EMPTY(),
default = 'localhost:25'
),
Field( 'mail_sender', 'string',
label = T( 'Mail sender' ),
requires = IS_EMAIL(),
),
Field( 'mail_login', 'string',
label = T( 'Mail login' ),
),
Field( 'mail_subject', 'string',
label = T( 'Mail subject' ),
),
Field( 'mail_txt', 'text',
label = T( 'Mail text' ),
requires = IS_NOT_EMPTY(),
comment = T( '%(url)s - URL, %(from)s - from email address' ),
),
Field( 'mail_admin_approval_subject', 'string',
label = T( 'Admin approval subject' ),
),
Field( 'mail_admin_approval_txt', 'text',
label = T( 'Admin approval text' ),
requires = IS_NOT_EMPTY(),
comment = T( '%(from)s - from email address' ),
),
Field( 'mail_notify_recipient', 'string',
label = T( 'Mail notify recipient' ),
comment = T( 'Comma separated list. If it\'s empty no notify will be sent.' )
),
Field( 'mail_notify_subject', 'string',
label = T( 'Mail notify subject' ),
),
Field( 'mail_notify_txt', 'text',
label = T( 'Mail notify text' ),
comment = T( '%(from)s - from email address, %(client_address)s - client address, %(client_name)s - client name, %(helo_name)s - helo name, %(recipient)s - recipient, %(created_on)s - created on, %(url)s - admin url' ),
),
Field( 'x509_sign_keyfile', 'string',
label = T( 'X.509 sign keyfile' ),
),
Field( 'x509_sign_certfile', 'string',
label = T( 'X.509 certificate file' ),
),
Field( 'x509_sign_passphrase', 'string',
label = T( 'X.509 passphrase' ),
),
Field( 'defer_if_permit', 'string',
label = T( 'defer_if_permit reply text' ),
requires = IS_NOT_EMPTY(),
default = 'You should answer a captcha first'
),
Field( 'reject', 'string',
label = T( 'reject reply text' ),
requires = IS_NOT_EMPTY(),
default = 'You are in our blacklist'
),
Field( 'recaptcha_private_key', 'string',
label = T( 'Recaptcha private key' ),
requires = IS_NOT_EMPTY(),
),
Field( 'recaptcha_public_key', 'string',
label = T( 'Recaptcha public key' ),
requires = IS_NOT_EMPTY(),
),
Field( 'recaptcha_options', 'string',
label = T( 'Recaptcha options' ),
),
Field( 'listen_host', 'string',
label = T( 'Listening hostname' ),
default = 'localhost',
),
Field( 'listen_port', 'integer',
label = T( 'Listening port' ),
default = 9999
),
Field( 'queue_timeout', 'integer',
label = T( 'Queue entry timeout' ),
default = 1440,
requires = IS_NOT_EMPTY(),
comment = T( 'Time the e-mail entry is valid in the queue in minutes' ) ),
auth.signature
)
db.plugin_mailcaptcha_settings.created_by.readable = True
db.plugin_mailcaptcha_settings.created_on.readable = True
db.plugin_mailcaptcha_settings.modified_by.readable = True
db.plugin_mailcaptcha_settings.modified_on.readable = True
plugin_mailcaptcha_config = db( db.plugin_mailcaptcha_settings.id > 0 ).select( orderby = db.plugin_mailcaptcha_settings.id ).first()
if auth.user:
response.menu += [
( T( 'Mailcaptcha' ), False, False,
[
( T( 'Whitelist' ), True, URL( 'plugin_mailcaptcha', 'whitelist' ), [] ),
( T( 'Blacklist' ), True, URL( 'plugin_mailcaptcha', 'blacklist' ), [] ),
( T( 'Apply on' ), True, URL( 'plugin_mailcaptcha', 'apply_on' ), [] ),
( T( 'Settings' ), True, URL( 'plugin_mailcaptcha', 'settings' ), [] ),
( T( 'Queue' ), True, URL( 'plugin_mailcaptcha', 'queue' ), [] ),
( T( 'Task' ), True, URL( 'plugin_mailcaptcha', 'scheduler_task' ), [] ),
( T( 'Run' ), True, URL( 'plugin_mailcaptcha', 'scheduler_run' ), [] ),
] )
]
| szimszon/web2py_mailcaptcha | models/plugin_mailcaptcha.py | plugin_mailcaptcha.py | py | 7,764 | python | en | code | 4 | github-code | 13 |
42219953720 | from __future__ import print_function
import unittest
import os
from shutil import copy2
import olefile
class TestOlefile(unittest.TestCase):
def setUp(self):
self.non_ole_file = "tests/images/flower.jpg"
self.ole_file = "tests/images/test-ole-file.doc"
def test_isOleFile_false(self):
is_ole = olefile.isOleFile(self.non_ole_file)
self.assertFalse(is_ole)
def test_isOleFile_true(self):
is_ole = olefile.isOleFile(self.ole_file)
self.assertTrue(is_ole)
def test_context_manager(self):
with olefile.OleFileIO(self.ole_file) as ole:
exists = ole.exists('worddocument')
self.assertTrue(exists)
def test_exists_worddocument(self):
ole = olefile.OleFileIO(self.ole_file)
exists = ole.exists('worddocument')
self.assertTrue(exists)
ole.close()
def test_exists_no_vba_macros(self):
ole = olefile.OleFileIO(self.ole_file)
exists = ole.exists('macros/vba')
self.assertFalse(exists)
ole.close()
def test_get_type(self):
ole = olefile.OleFileIO(self.ole_file)
doc_type = ole.get_type('worddocument')
self.assertEqual(doc_type, olefile.STGTY_STREAM)
ole.close()
def test_get_size(self):
ole = olefile.OleFileIO(self.ole_file)
size = ole.get_size('worddocument')
self.assertGreater(size, 0)
ole.close()
def test_get_rootentry_name(self):
ole = olefile.OleFileIO(self.ole_file)
root = ole.get_rootentry_name()
self.assertEqual(root, "Root Entry")
ole.close()
def test_meta(self):
ole = olefile.OleFileIO(self.ole_file)
meta = ole.get_metadata()
self.assertEqual(meta.author, b"Laurence Ipsum")
self.assertEqual(meta.num_pages, 1)
ole.close()
def test_minifat_writing(self):
ole_file_copy = "tests/images/test-ole-file-copy.doc"
minifat_stream_name = "\x01compobj"
if os.path.isfile(ole_file_copy):
os.remove(ole_file_copy)
copy2(self.ole_file, ole_file_copy)
ole = olefile.OleFileIO(ole_file_copy, write_mode = True)
stream = ole.openstream(minifat_stream_name)
self.assertTrue(stream.size < ole.minisectorcutoff)
str_read = stream.read()
self.assertTrue(len(str_read) == stream.size)
self.assertTrue(str_read != b'\x00' * stream.size)
stream.close()
ole.write_stream(minifat_stream_name, b'\x00' * stream.size)
ole.close()
ole = olefile.OleFileIO(ole_file_copy)
stream = ole.openstream(minifat_stream_name)
self.assertTrue(stream.size < ole.minisectorcutoff)
str_read_replaced = stream.read()
self.assertTrue(len(str_read_replaced) == stream.size)
self.assertTrue(str_read_replaced != str_read)
self.assertTrue(str_read_replaced == b'\x00' * len(str_read))
stream.close()
ole.close()
os.remove(ole_file_copy)
class FileHandleCloseTest(unittest.TestCase):
"""Test file handles are closed correctly."""
def setUp(self):
self.non_ole_file = "tests/images/flower.jpg"
self.ole_file = "tests/images/test-ole-file.doc"
def leaking_test_function(self):
"""Function that leaks an open OleFileIO."""
ole = olefile.OleFileIO(self.ole_file)
@unittest.skip('Cannot predict when __del__ is run, so cannot test that '
'it issues a warning')
# requires python version 3.2 or higher
def test_warning(self):
"""Test that warning is issued when ole file leaks open fp."""
with self.assertWarns(olefile.OleFileIONotClosed):
self.leaking_test_function()
@unittest.skip('Cannot test attribute fp of OleFileIO instance that '
'failed to construct')
def test_init_fail(self):
"""Test that file handle is closed if open() from __init__ fails."""
ole = None
try:
ole = olefile.OleFileIO(self.non_ole_file)
self.fail('Should have raised an exception')
except Exception as exc:
self.assertEqual(str(exc), 'not an OLE2 structured storage file')
self.assertTrue(ole.fp.closed) # ole is still None
def test_context_manager(self):
"""Test that file handle is closed by context manager."""
file_handle = None
with olefile.OleFileIO(self.ole_file) as ole:
file_handle = ole.fp
self.assertFalse(file_handle.closed)
self.assertTrue(file_handle.closed)
def test_manual(self):
"""Test that simple manual close always closes self-created handle."""
ole = olefile.OleFileIO(self.ole_file)
self.assertFalse(ole.fp.closed)
_ = ole.listdir()
self.assertFalse(ole.fp.closed)
ole.close()
self.assertTrue(ole.fp.closed)
def test_fp_stays_open(self):
"""Test that fp is not automatically closed if provided by caller."""
with open(self.ole_file, 'rb') as file_handle:
self.assertFalse(file_handle.closed)
with olefile.OleFileIO(file_handle) as ole:
self.assertFalse(file_handle.closed)
self.assertEqual(file_handle, ole.fp)
# check that handle is still open, although ole is now closed
self.assertFalse(file_handle.closed)
# do something with it
file_handle.seek(0)
self.assertTrue(olefile.isOleFile(file_handle))
# now should be closed
self.assertTrue(file_handle.closed)
if __name__ == '__main__':
unittest.main()
| decalage2/olefile | tests/test_olefile.py | test_olefile.py | py | 5,695 | python | en | code | 201 | github-code | 13 |
5473329771 | import re
from app import app
from entities.tutor import Tutor
from googleapi.googleapi import GoogleApi
config = app.config['config']
logger = config.get_logger()
all_subjects = [
{'name': 'ุฑูุงุถูุงุช', 'id': 1},
{'name': 'ููุฒูุงุก', 'id': 2},
{'name': 'ูุบุฉ ุนุจุฑููุฉ', 'id': 3},
{'name': 'ูุบุฉ ุนุฑุจููุฉ', 'id': 4},
{'name': 'ูุบุฉ ุงูุฌููุฒููุฉ', 'id': 5},
{'name': 'ุฏูู ุงุณูุงู
ู', 'id': 6},
{'name': 'ุจุณูุฎูู
ุชุฑู', 'id': 7},
{'name': 'ู
ุฏููุงุช', 'id': 8},
{'name': 'ุนููู
ุญุงุณูุจ', 'id': 9},
{'name': 'ุงููุชุฑููููุง', 'id': 10},
{'name': 'ููู
ูุงุก', 'id': 11},
{'name': 'ุจููููุฌูุง', 'id': 12},
{'name': 'ุฅุนูุงู
', 'id': 13},
{'name': 'ุชุงุฑูุฎ ูุงููุชุงุจุฉ ุงูุงุจุฏุงุนูุฉ ูุงูููุฏ ูุงูุงุฏุจ', 'id': 14},
]
def get_tutors() -> list:
"""Returns a list of objects holding all tutors.
Note: If the Google form changes, the existing rows WILL NOT be updated, so their row (represented by a list) will
have less elements. In order to have a list with a fixed size, we pad such results with the empty string.
:return: List of tutors.
"""
sample_spreadsheet_id = config.MENTORS_SHEET_ID
sample_range_name = 'A:Q'
# used for padding missing columns
cols = re.search('([A-Z]):([A-Z])', sample_range_name)
rows_count = ord(cols[2]) - ord(cols[1]) + 1
service = GoogleApi.get_service()
result = service.spreadsheets().values().get(spreadsheetId=sample_spreadsheet_id,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
logger.info('got an empty list of rows for document %s', config.MENTORS_SHEET_ID)
return []
# remove header column
values.pop(0)
values = list(map(lambda v: v if len(v) == rows_count else v + [''] * (rows_count - len(v)), values))
return [Tutor(value[1], value[2], value[3], "+972" + str(value[4])[1:], _get_id_to_subject_list(value[5]),
value[6].split(','), value[7].split(','), value[8], value[9], value[10], value[11], value[12],
value[13].split(','), value[14], value[15], value[16])
for value in values]
def get_subjects() -> list:
"""Urgh... did you really expect to see documentation here?
"""
return all_subjects
def _get_id_to_subject_list(subjects) -> list:
"""Returns a list of objects. Each object has subject's name, mapped by its ID.
:param subjects: List of subjects (each is a coma-separated string).
:return: List of dictionaries {id -> name}.
"""
try:
return [next(item for item in all_subjects if item['name'] == subject.strip())
for subject in subjects.split(',')]
except StopIteration as si:
logger.error("couldn't generate subjects dict", si)
return []
| nadrus-online/nadrus-backend | app/contoller.py | contoller.py | py | 2,935 | python | en | code | 0 | github-code | 13 |
27802177712 | ##๋ฌธ์ 1 ์ํ์ข์ฐ
# n ์ ๊ณต๊ฐ์ ํฌ๊ธฐ
# n = int(input())
# plan = list(input().split())
# x = 1
# y = 1
# for word in plan:
# if word == 'L':
# if y > 1:
# y -= 1
# elif word == 'R':
# if y < n:
# y += 1
# elif word == 'U':
# if x > 1:
# x -= 1
# elif word == 'D':
# if x < n:
# x += 1
# print(x, y)
# ๋ง๋๋ฐ ๋ ์ข์ ๋ฐฉ๋ฒ์ด ์๋ค
# n = int(input())
# plans = input().split()
# move_types = ['L', 'R', 'U', 'D']
# move_x = [0,0,-1,1]
# move_y = [-1,1,0,0]
# x,y = 1,1
# # ์ด๋ ๊ณํ์ ํ๋์ฉ ํ์ธ
# for plan in plans:
# # ์ด๋ ํ ์ขํ๊ตฌํ๊ธฐ
# for move_type in move_types:
# if plan == move_type:
# temp_x, temp_y = x + move_x[move_type], y + move_y[move_type]
# # ๋ฒ์๋ฅผ ๋์ด๊ฐ๋๊ฒฝ์ฐ ๋ฌด์
# if temp_x < 1 or temp_x > n or temp_y < 1 or temp_y > n:
# continue
# x, y = temp_x, temp_y
# print(x,y)
## ์ฐ์ต๋ฌธ์ ์๊ฐ
# n = int(input())
# ์คํจ...
# ์ํ์ ๊ท์น์ ์ฐพ๊ธฐ ์ด๋ ค์ด๊ฒฝ์ฐ ๋นจ๋ฆฌ ๋ค๋ฅธ๋ฐฉ๋ฒ์ ์๊ฐํ์
# ํ์ด ๋ณด๊ณ ๋ค์ ์๋
# n = int(input())
# count = 0
# for i in range(n+1):
# for j in range(60):
# for k in range(60):
# res = str(i) + str(j) + str(k)
# if '3' in res:
# count += 1
# print(count)
## ๋ฌธ์ 2 ์ค์ ๋ฌธ์ ์์ค์ ๋์ดํธ
# ๋ค์ ํ์ด
# x_list = [None,a,b,c,d,e,f,g,h]
# s = str(input())
# x,y = x_list.index(s[0]), int(s[1])
# minimum = 1
# maximum = 8
# ๋ค์ํ์
# val = input()
# # x ์ขํ์ ๊ฒฝ์ฐ a ~ h ๋ฅผ ์ซ์๋ก ๋ณํ
# x = ord(val[0]) - (ord('a') + 1)
# y = int(val[1])
# moves = [(2,1),(2,-1),(1,2),(-1,2),(-2,1),(-2,-1),(1,-2),(-1,-2)]
# x,y = 1,1
# count = 0
# for move in moves:
# nx, ny = x + move[0], y + move[1]
# if nx < 1 or ny < 1 or nx > 8 or ny > 8:
# continue
# count+=1
# print(count)
## ๋ฌธ์ 3 ๊ฒ์ ๊ฐ๋ฐ
#์๊ฐ ์ด๊ณผ๋จ.. ์ผ๋จ ๊ตณ์ด move ๋ sides ๋ฐฐ์ด ๋ฐ๋ก์ฐ์ง๋ง๊ณ ์ข๋ก ํ์ ์ํฌ ๋ ๋ฐ๋ ๋ฐฉํฅ์ผ๋ก ๋๊ฒ ๋ณ๊ฒฝํ๋ฉด๋จ
#๋๋จธ์ง ์ ํ์ ๋ณ์์ด๋ฆ ์ข ๋ ์ ์ ํ๊ฒ ๋ฐ๊พธ๊ณ
def rotate(move):
if move == 3:
return 0
else:
return move+1
# ์ธ๋ก n ๊ฐ๋ก m
move = ['U','L','D','R']
move_x = [-1,0,1,0]
move_y = [0,-1,0,1]
n, m = map(int, input().split())
x, y, cur_side = map(int, input().split())
sides = ['U','R','D','L']
cur_side = move.index(sides[cur_side])
cmap = [[0 for _ in range(m)] for _ in range(n)]
for i in range(n):
cmap[i] = list(map(int, input().split()))
count = 1
side_maximum_count = 0
while True:
nx, ny = 0, 0
cur_side = rotate(cur_side)
nx, ny = x + move_x[cur_side], y + move_y[cur_side]
if nx < 0 or ny < 0 or nx > n or ny > m:
continue
if cmap[nx][ny] == 0:
cmap[x][y] = 1
x, y = nx, ny
count += 1
side_maximum_count = 0
continue
else:
side_maximum_count += 1
if side_maximum_count == 4:
nx, ny = x - move_x[cur_side], y - move_y[cur_side]
if cmap[nx][ny] == 1:
break;
side_maximum_count = 0
print(count)
# ์
๋ ฅ
# 4 4
# 1 1 0
# 1 1 1 1
# 1 0 0 1
# 1 1 0 1
# 1 1 1 1
# ์ถ๋ ฅ: 2 | tkdgns8234/DataStructure-Algorithm | Algorithm/CodingTest_Study_Book/3_impl.py | 3_impl.py | py | 3,300 | python | ko | code | 0 | github-code | 13 |
41324753815 | """ ShowOspfv3SummaryPrefix.py
IOSXE parser for the following show command:
* show ospfv3 summary-prefix
"""
# python
import re
# metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Or, Optional, Use, Default
# ===============================================
# Schema for 'show ospfv3 summary-prefix'
# Optional: allowing either ipv4 or ipv6 or both
# ===============================================
class ShowOspfv3SummaryPrefixSchema(MetaParser):
schema = {
'process_id': {
Any(): {
'address_family': str,
'router_id': str,
'null_route': {
Any(): {
'null_metric': str,
},
},
'summary': {
Any(): {
'sum_type': str,
'sum_tag': int,
'sum_metric': int
},
},
},
},
}
# ====================================
# Parser for 'ShowOspfv3SummaryPrefix'
# ====================================
class ShowOspfv3SummaryPrefix(ShowOspfv3SummaryPrefixSchema):
"""
Router#sh ospfv3 summary-prefix
OSPFv3 10000 address-family ipv6 (router-id 10.2.2.21)
10:2::/96 Metric <unreachable>
10:2:2::/96 Metric 111, External metric type 2, Tag 111
Router#
"""
cli_command = 'show ospfv3 summary-prefix'
def cli(self, output=None):
if output is None:
out = self.device.execute(self.cli_command)
else:
out = output
# init var
ret_dict = {}
ospf_id = ""
# OSPFv3 10000 address-family ipv6 (router-id 10.2.2.21)
p1 = re.compile(
r'^OSPFv3 +(?P<ospf_id>(\d+)) +address-family +(?P<address_family>(\S+)) +\(router-id +(?P<router_id>(\S+))\)')
# 10:2::/96 Metric <unreachable>
p2 = re.compile(r'^(?P<null_prefix>(\S+)) +.* Metric\s+(?P<null_metric>(\S+$))')
# 10:2:2::/96 Metric 111, External metric type 2, Tag 111
p3 = re.compile(
r'^(?P<sum_prefix>(\S+)) +.* Metric\s+(?P<sum_metric>(\d+)),.* +type +(?P<sum_type>(\d)),\s+Tag +(?P<sum_tag>(\S+))')
for line in out.splitlines():
line = line.strip()
m = p1.match(line)
if m:
group = m.groupdict()
ret_dict['process_id'] = {}
ospf_id = group['ospf_id']
ret_dict['process_id'][ospf_id] = {}
ret_dict['process_id'][ospf_id]['null_route'] = {}
ret_dict['process_id'][ospf_id]['summary'] = {}
ret_dict['process_id'][ospf_id]['address_family'] = group['address_family']
ret_dict['process_id'][ospf_id]['router_id'] = group['router_id']
continue
m = p2.match(line)
if m:
group = m.groupdict()
if group['null_prefix']:
n_prefix = group['null_prefix']
ret_dict['process_id'][ospf_id]['null_route'][n_prefix] = {}
ret_dict['process_id'][ospf_id]['null_route'][n_prefix]['null_metric'] = group['null_metric']
continue
m = p3.match(line)
if m:
group = m.groupdict()
if group['sum_prefix']:
prefix = group['sum_prefix']
ret_dict['process_id'][ospf_id]['summary'][prefix] = {}
ret_dict['process_id'][ospf_id]['summary'][prefix]['sum_metric'] = int(group['sum_metric'])
ret_dict['process_id'][ospf_id]['summary'][prefix]['sum_type'] = group['sum_type']
ret_dict['process_id'][ospf_id]['summary'][prefix]['sum_tag'] = int(group['sum_tag'])
continue
return ret_dict
# ===============================================
# schema for:
# * show ospfv3 vrf {vrf_id} neighbor
# ======================================================
class ShowOspfv3vrfNeighborSchema(MetaParser):
"""Schema detail for:
* show ospfv3 vrf {vrf_id} neighbor
"""
schema = {
'process_id': int,
'address_family': str,
'router_id': str,
'vrfs': {
int: {
'neighbor_id': {
str: {
'priority': int,
'state': str,
'dead_time': str,
'address': int,
'interface': str
},
},
},
},
}
# ======================================================
# parser for:
# * show ospfv3 vrf {vrf_id} neighbor
# ======================================================
class ShowOspfv3vrfNeighbor(ShowOspfv3vrfNeighborSchema):
"""parser details for:
* show ospfv3 vrf {vrf_id} neighbor
"""
cli_command = 'show ospfv3 vrf {vrf_id} neighbor'
def cli(self, vrf_id='', output=None):
cmd = self.cli_command.format(vrf_id=vrf_id)
if output is None:
output = self.device.execute(self.cli_command)
ret_dict = {}
# OSPFv3 2 address-family ipv6 vrf 2 (router-id 173.19.2.2)
p1 = re.compile(
r'^OSPFv3\s+(?P<process_id>\d+)+\s+address-family\s(?P<address_family>\w+)\s+vrf\s(?P<vrf_id>\d+)'
r'\s+\(router-id\s+(?P<router_id>[\d\.\/]+)\)$')
# Neighbor ID Pri State Dead Time Address Interface
# 100.100.100.100 1 FULL/ - 00:00:38 100.10.0.2 GigabitEthernet0/0/0/0
# 95.95.95.95 1 FULL/ - 00:00:38 100.20.0.2 GigabitEthernet0/0/0/1
# 192.168.199.137 1 FULL/DR 0:00:31 172.31.80.37 GigabitEthernet 0/3/0/2
p2 = re.compile(r'^(?P<neighbor_id>\S+)\s+(?P<priority>\d+) +(?P<state>[A-Z]+/\s*[A-Z-]*)'
r' +(?P<dead_time>(\d+:){2}\d+) +(?P<address>[\d\.\/]+) +(?P<interface>\w+\s*\S+)$')
for line in output.splitlines():
line = line.strip()
# OSPFv3 2 address-family ipv6 vrf 2 (router-id 173.19.2.2)
m = p1.match(line)
if m:
group = m.groupdict()
vrf_id = int(group['vrf_id'])
ret_dict['process_id'] = int(group['process_id'])
ret_dict['address_family'] = group['address_family']
ret_dict['router_id'] = group['router_id']
vrf_dict = ret_dict.setdefault('vrfs', {}).setdefault(vrf_id, {})
# Neighbor ID Pri State Dead Time Address Interface
# 100.100.100.100 1 FULL/ - 00:00:38 100.10.0.2 GigabitEthernet0/0/0/0
# 95.95.95.95 1 FULL/ - 00:00:38 100.20.0.2 GigabitEthernet0/0/0/1
# 192.168.199.137 1 FULL/DR 0:00:31 172.31.80.37 GigabitEthernet 0/3/0/2
m = p2.match(line)
if m:
group = m.groupdict()
neighbor_id = group['neighbor_id']
neighbor_dict = vrf_dict.setdefault('neighbor_id', {}).setdefault(neighbor_id, {})
neighbor_dict['priority'] = int(group['priority'])
neighbor_dict['state'] = group['state']
neighbor_dict['dead_time'] = group['dead_time']
neighbor_dict['address'] = int(group['address'])
neighbor_dict['interface'] = group['interface']
continue
return ret_dict
| hsaljuboori/ansible-aljuboori | ansible-aljuboori/.venv/.venv/lib/python3.8/site-packages/genie/libs/parser/iosxe/show_ospfv3.py | show_ospfv3.py | py | 7,779 | python | en | code | 0 | github-code | 13 |
27300274725 | import intake
import xesmf
import xarray
import functools
import pandas
import numpy
import logging
from pkg_resources import resource_filename
@functools.lru_cache
def paramdb():
return intake.cat.nci.ecmwf.grib_parameters.read()
def load_var(cat, chunks={"time": 12}, **kwargs):
"""
Load a single variable from intake
Args:
cat: intake-esm catalogue
**kwargs: catalogue search terms
Returns:
xarray.DataArray with a single variable
"""
result = cat.search(**kwargs)
if len(result.df) == 0:
raise Exception(f"No matches: {cat.name} {kwargs}")
# Prefer era5-1
# This fails as now get subcollection returns a list not a dictionary
#if "era5-1" in result.unique().get("sub_collection", {"values": []})["values"]:
unique_values = result.unique()
if "era5-1" in unique_values.get("sub_collection", []):
result = result.search(sub_collection="era5-1")
logging.debug(f"Opening {result.df.path.values}")
d = result.to_dataset_dict(xarray_open_kwargs={"chunks": chunks}, progressbar=False)
if len(d) > 1:
raise Exception(f"Multiple matches: {kwargs} {d.keys()}")
ds = list(d.values())[0]
da = ds[ds.attrs["intake_esm_vars"][0]]
params = paramdb()
params = params[params.cfVarName == da.name]
if len(params) == 0:
return da
params = params.iloc[0]
da.attrs["table"] = numpy.int32(params["table2Version"])
da.attrs["code"] = numpy.int32(params["indicatorOfParameter"])
da.attrs["standard_name"] = params["cfName"]
da.attrs["ecmwf_name"] = params["name"]
da.attrs["ecmwf_shortname"] = params["shortName"]
da.attrs["units"] = params["units"]
return da
def read_era5_land(var, year, month):
"""
Read land values (must be interpolated to era5 grid)
"""
cat = intake.cat.nci.era5_land
da = load_var(
cat, parameter=var, year=year, month=month, product_type="reanalysis"
) # , chunks={"time": 1, 'latitude': 500, 'longitude': 500},)
da.name = da.name + "_land"
return da
def read_era5_surface(var, year, month):
"""
Read surface values
"""
cat = intake.cat.nci.era5
da = load_var(
cat,
parameter=var,
year=year,
month=month,
product_type="reanalysis",
dataset="single-levels",
)
da.name = da.name + "_surf"
return da
def read_era5_pressure(var, year, month):
"""
Read pressure level values
"""
cat = intake.cat.nci.era5
da = load_var(
cat,
parameter=var,
year=year,
month=month,
product_type="reanalysis",
dataset="pressure-levels",
chunks={"time": 12, "level": 1},
)
da.name = da.name + "_pl"
return da
@functools.lru_cache
def regrid():
"""
Create regridding from land to surface
"""
land = read_era5_land("2t", 2000, 1)
surf = read_era5_surface("2t", 2000, 1)
# to solve issue #10 Regridder accept a xarray Dataset not DataArray
land = land.isel(time=0).squeeze(drop=True)
surf = surf.isel(time=0).squeeze(drop=True)
# in theory using drop=True should remove the time scalar variable but it's not working
landds = land.drop('time').to_dataset()
surfds = surf.drop('time').to_dataset()
return xesmf.Regridder(
landds,
surfds,
"bilinear",
filename=resource_filename(__name__, "nci_regrid_weights.nc"),
reuse_weights=True,
)
def merged_land_surf(var, year, month):
"""
Read the land and surface values for a variable, composing them so that
over land the land values are used, and over ocean the surface values are
used
"""
lsm = read_era5_surface("lsm", 2000, 1)[0, :, :]
surf = read_era5_surface(var, year, month)
# Different names in era5 and era5-land
renamed_vars = {"10u": "u10", "10v": "v10"}
if var in ["ci", "msl", "sst"] or year < 1981:
# No land value
return surf
if var.startswith("stl") or var.startswith("swvl"):
# Don't merge with surf
land = read_era5_land(var, year, month)
land_on_surf = regrid()(land)
else:
# Merge surf over ocean with land
land = read_era5_land(renamed_vars.get(var, var), year, month)
if (year, month) == (1981, 1):
# Fill in 19810101T0000 with values from 19810101T0100
surf, land = xarray.align(
surf, land, join="left", exclude=["latitude", "longitude"]
)
land = land.bfill("time", limit=None)
land_on_surf = regrid()(land) * lsm + surf * (1 - lsm)
land_on_surf.name = surf.name
land_on_surf.attrs = surf.attrs
return land_on_surf
def read_era5_month(surface_vars, pressure_vars, year, month, era5land: bool = True):
"""
Read a collection of surface and pressure level values for a single month
"""
if era5land:
surf = [merged_land_surf(v, year, month) for v in surface_vars]
else:
surf = [read_era5_surface(v, year, month) for v in surface_vars]
plev = [read_era5_pressure(v, year, month) for v in pressure_vars]
return xarray.merge([*surf, *plev])
def read_era5(surface_vars, pressure_vars, start, end, era5land: bool = True):
"""
Read a collection of surface and pressure level values between start and end
"""
if start < pandas.Timestamp("1979-01-01T00:00"):
raise ValueError(
f"Start time {start} is before ERA5 start date 1979-01-01T00:00"
)
t0 = pandas.offsets.MonthBegin().rollback(start.date())
t1 = pandas.offsets.MonthEnd().rollforward(end.date())
result = []
for t in pandas.date_range(t0, t1, freq="M"):
result.append(
read_era5_month(
surface_vars, pressure_vars, t.year, t.month, era5land=era5land
)
)
ds = xarray.concat(result, dim="time")
ds["lsm"] = read_era5_surface("lsm", 2000, 1).isel(time=0) # .squeeze('time')
ds["z"] = read_era5_surface("z", 2000, 1).isel(time=0) # .squeeze('time')
return ds.sel(time=slice(start, end))
def read_um(time, era5land: bool = True):
# Make sure the time includes an hour
start = pandas.offsets.Hour().rollback(time)
ds = read_era5(
[
"skt",
"sp",
"ci",
"sd",
"stl1",
"stl2",
"stl3",
"stl4",
"swvl1",
"swvl2",
"swvl3",
"swvl4",
],
["u", "v", "t", "q"],
start,
start,
era5land=era5land,
)
for k, v in ds.items():
ds[k] = v.fillna(v.mean())
ds = soil_level_metadata(ds)
return ds
def read_wrf(start, end, era5land: bool = True):
ds = read_era5(
[
"10u",
"10v",
"2t",
"2d",
"sp",
"msl",
"skt",
"ci",
"sst",
"rsn",
"sd",
"stl1",
"stl2",
"stl3",
"stl4",
"swvl1",
"swvl2",
"swvl3",
"swvl4",
],
["z", "u", "v", "t", "r"],
start,
end,
era5land=era5land,
)
ds = soil_level_metadata(ds)
for v in ds.values():
if v.dtype == "float64":
v.encoding["dtype"] = "float32"
if v.dtype == "int64":
v.encoding["dtype"] = "int32"
return ds
def soil_level_metadata(ds):
depth = [None, 3.5, 17.5, 64, 177.5]
depth_bnds = [None, 0, 7, 28, 100, 255]
depth_attrs = {
"long_name": "depth_below_land",
"units": "cm",
"positive": "down",
"axis": "Z",
}
for l in range(1, 5):
ds[f"stl{l}_surf"] = ds[f"stl{l}_surf"].expand_dims(f"depth{l}", axis=1)
ds[f"swvl{l}_surf"] = ds[f"swvl{l}_surf"].expand_dims(f"depth{l}", axis=1)
ds.coords[f"depth{l}"] = xarray.DataArray(
depth[l : (l + 1)], dims=[f"depth{l}"], attrs=depth_attrs
)
ds.coords[f"depth{l}_bnds"] = xarray.DataArray(
[depth_bnds[l : (l + 2)]], dims=[f"depth{l}", "bnds"], attrs=depth_attrs
)
ds.coords[f"depth{l}"].attrs["bounds"] = f"depth{l}_bnds"
return ds
| coecms/era5grib | era5grib/nci.py | nci.py | py | 8,380 | python | en | code | 4 | github-code | 13 |
7042745623 | #paquete utilizados
import requests
import json
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
#funcion main para generar las graficas
def main():
Dataset = read_data("test_tmp.csv")#se leen los datos del fichero creado anteriormente para insertarlo en un Dataset
if Dataset is None:
print("Error al hacer el dataset a partir del fichero")
return
IluminanciaSuperiorEsperada = []
IluminanciaSuperiorObtenida = []
FlujoSuperiorEsperado = []
FlujoSuperiorObtenido = []
FlujoLuminicoTotalEsperado = []
IluminanciaAbajoEsperada = []
FHSIEsperado = []
FHSIObtenido = []
#recorremos el Dataset leido para pasale los datos a la api
for i in range(0, len(Dataset), 1):
#parametros para para la peticion get
getParameters_prediccion = {
"base_de_datos": "db",
"ColorSuelo": Dataset.loc[i]["ColorSuelo"],
"AlturaLuminaria": Dataset.loc[i]["AlturaLuminaria"],
"FlujoLuminicoTotal": Dataset.loc[i]["FlujoLuminicoTotal"],
"TCC": Dataset.loc[i]["TCC"],
"IluminanciaAbajo": Dataset.loc[i]["IluminanciaAbajo"],
"Espectro": Dataset.loc[i]["Espectro"],
"ReflectanciaSuelo": Dataset.loc[i]["ReflectanciaSuelo"],
"IluminanciaSuperior": 0,
}
#get para pedir el patron a predecir para cada uno
patron_con_prediccion = requests.get("http://127.0.0.1:8000/predecir",#peticion al modulo para obtener la prediccion
params = getParameters_prediccion
)
if not patron_con_prediccion.status_code == 200:#se comprueba si ha fallado
raise Exception("Incorrect reply from Ree API. Status code: {}. Text: {}".format(patron_con_prediccion.status_code, patron_con_prediccion.text))
return {"Error al extraer datos para la prediccion"}
patron_solucion = (patron_con_prediccion.json())
patron_solucion = patron_solucion['IluminanciaSuperior']
#almacenamos datos necesarios para poder calcular despues el flujo superior y FHSI
IluminanciaAbajoEsperada.append(Dataset.loc[i]["IluminanciaAbajo"])
FlujoLuminicoTotalEsperado.append(Dataset.loc[i]["FlujoLuminicoTotal"])
IluminanciaSuperiorEsperada.append(Dataset.loc[i]["IluminanciaSuperior"])
IluminanciaSuperiorObtenida.append(patron_solucion)
for w in range(len(IluminanciaAbajoEsperada)):
#realizamos los calculos de FHSI y FlujoSuperior
FlujoSuperiorEsperado.append(FlujoLuminicoTotalEsperado[w] * (IluminanciaSuperiorEsperada[w])/IluminanciaAbajoEsperada[w])
FlujoSuperiorObtenido.append(FlujoLuminicoTotalEsperado[w] * (IluminanciaSuperiorObtenida[w])/IluminanciaAbajoEsperada[w])
FHSIEsperado.append(round(((FlujoSuperiorEsperado[w]/FlujoLuminicoTotalEsperado[w]) * 100), 3))
FHSIObtenido.append(round(((FlujoSuperiorObtenido[w]/FlujoLuminicoTotalEsperado[w]) * 100), 3))
#grafica para la iluminancia
fig, ax = plt.subplots(figsize=(20, 10))
ax.autoscale(enable=None, axis="x", tight=True)
ax.set_ylim(bottom=0, top=300)
ax.plot( IluminanciaSuperiorObtenida, 'r-', label='predicho')
ax.plot( IluminanciaSuperiorEsperada, 'b-', label='real')
plt.ylabel('IluminanciaSuperior')
plt.xlabel('patron')
plt.xticks(rotation = '90');
plt.legend()
plt.savefig("testIluminacion.png",bbox_inches='tight', dpi=100)
#grafica para la contaminacion luminica
fig, ax = plt.subplots(figsize=(20, 10))
ax.autoscale(enable=None, axis="x", tight=True)
ax.set_ylim(bottom=0, top=30)
ax.plot( FHSIObtenido, 'r-', label='predicho')
ax.plot( FHSIEsperado, 'b-', label='real')
plt.ylabel('ContaminacionLuminica')
plt.xlabel('patron')
plt.xticks(rotation = '90');
plt.legend()
plt.savefig("testContaminacionLuminica.png",bbox_inches='tight', dpi=100)
#funcion para leer datos del fichero
def read_data(fichero_datos):
Dataset = None
#guardamos los datos en un dataset de tipo string
Dataset = pd.read_csv(fichero_datos)
#reordeno las columnas para poner los cardinales en las primeras filas para usar el columnTransformer
titulos_columnas = ["ColorSuelo", "AlturaLuminaria", "FlujoLuminicoTotal", "TCC", "IluminanciaAbajo", "Espectro", "ReflectanciaSuelo", "IluminanciaSuperior"]
Dataset=Dataset.reindex(columns=titulos_columnas)
return Dataset
if __name__ == "__main__":
main() | Witiza99/TFG_SISTEMA_DE_ANALISIS_Y_PREDICCI-N_DE_CONTAMINACION_LUMINICA | SISTEMA_DE_ANALISIS_Y_PREDICCION_DE_CONTAMINACION_LUMรNICA/cliente/generador_grafica.py | generador_grafica.py | py | 4,134 | python | es | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.